mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:10:52 +07:00
xfs: Correct comment tyops -> typos
Just fix the typos checkpatch notices... Signed-off-by: Joe Perches <joe@perches.com> Reviewed-by: Bill O'Donnell <billodo@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
d6abecb825
commit
cf085a1b5d
@ -32,7 +32,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
* __vmalloc() will allocate data pages and auxiliary structures (e.g.
|
||||||
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
|
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
|
||||||
* we need to tell memory reclaim that we are in such a context via
|
* we need to tell memory reclaim that we are in such a context via
|
||||||
* PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
|
* PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
|
||||||
|
@ -1488,7 +1488,7 @@ xfs_alloc_ag_vextent_near(
|
|||||||
dofirst = prandom_u32() & 1;
|
dofirst = prandom_u32() & 1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* handle unitialized agbno range so caller doesn't have to */
|
/* handle uninitialized agbno range so caller doesn't have to */
|
||||||
if (!args->min_agbno && !args->max_agbno)
|
if (!args->min_agbno && !args->max_agbno)
|
||||||
args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
|
args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
|
||||||
ASSERT(args->min_agbno <= args->max_agbno);
|
ASSERT(args->min_agbno <= args->max_agbno);
|
||||||
|
@ -829,7 +829,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Retreive the attribute value and length.
|
* Retrieve the attribute value and length.
|
||||||
*
|
*
|
||||||
* If ATTR_KERNOVAL is specified, only the length needs to be returned.
|
* If ATTR_KERNOVAL is specified, only the length needs to be returned.
|
||||||
* Unlike a lookup, we only return an error if the attribute does not
|
* Unlike a lookup, we only return an error if the attribute does not
|
||||||
|
@ -482,7 +482,7 @@ xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free space block defintions for the node format.
|
* Free space block definitions for the node format.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -416,7 +416,7 @@ struct xfs_bulkstat {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Project quota id helpers (previously projid was 16bit only
|
* Project quota id helpers (previously projid was 16bit only
|
||||||
* and using two 16bit values to hold new 32bit projid was choosen
|
* and using two 16bit values to hold new 32bit projid was chosen
|
||||||
* to retain compatibility with "old" filesystems).
|
* to retain compatibility with "old" filesystems).
|
||||||
*/
|
*/
|
||||||
static inline uint32_t
|
static inline uint32_t
|
||||||
|
@ -432,9 +432,9 @@ static inline uint xfs_log_dinode_size(int version)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Buffer Log Format defintions
|
* Buffer Log Format definitions
|
||||||
*
|
*
|
||||||
* These are the physical dirty bitmap defintions for the log format structure.
|
* These are the physical dirty bitmap definitions for the log format structure.
|
||||||
*/
|
*/
|
||||||
#define XFS_BLF_CHUNK 128
|
#define XFS_BLF_CHUNK 128
|
||||||
#define XFS_BLF_SHIFT 7
|
#define XFS_BLF_SHIFT 7
|
||||||
|
@ -461,7 +461,7 @@ _xfs_buf_map_pages(
|
|||||||
unsigned nofs_flag;
|
unsigned nofs_flag;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_map_ram() will allocate auxillary structures (e.g.
|
* vm_map_ram() will allocate auxiliary structures (e.g.
|
||||||
* pagetables) with GFP_KERNEL, yet we are likely to be under
|
* pagetables) with GFP_KERNEL, yet we are likely to be under
|
||||||
* GFP_NOFS context here. Hence we need to tell memory reclaim
|
* GFP_NOFS context here. Hence we need to tell memory reclaim
|
||||||
* that we are in such a context via PF_MEMALLOC_NOFS to prevent
|
* that we are in such a context via PF_MEMALLOC_NOFS to prevent
|
||||||
|
@ -179,7 +179,7 @@ xlog_cil_alloc_shadow_bufs(
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* We free and allocate here as a realloc would copy
|
* We free and allocate here as a realloc would copy
|
||||||
* unecessary data. We don't use kmem_zalloc() for the
|
* unnecessary data. We don't use kmem_zalloc() for the
|
||||||
* same reason - we don't need to zero the data area in
|
* same reason - we don't need to zero the data area in
|
||||||
* the buffer, only the log vector header and the iovec
|
* the buffer, only the log vector header and the iovec
|
||||||
* storage.
|
* storage.
|
||||||
@ -682,7 +682,7 @@ xlog_cil_push(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* check for a previously pushed seqeunce */
|
/* check for a previously pushed sequence */
|
||||||
if (push_seq < cil->xc_ctx->sequence) {
|
if (push_seq < cil->xc_ctx->sequence) {
|
||||||
spin_unlock(&cil->xc_push_lock);
|
spin_unlock(&cil->xc_push_lock);
|
||||||
goto out_skip;
|
goto out_skip;
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
#ifndef __XFS_SYMLINK_H
|
#ifndef __XFS_SYMLINK_H
|
||||||
#define __XFS_SYMLINK_H 1
|
#define __XFS_SYMLINK_H 1
|
||||||
|
|
||||||
/* Kernel only symlink defintions */
|
/* Kernel only symlink definitions */
|
||||||
|
|
||||||
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
|
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
|
||||||
const char *target_path, umode_t mode, struct xfs_inode **ipp);
|
const char *target_path, umode_t mode, struct xfs_inode **ipp);
|
||||||
|
@ -427,15 +427,15 @@ xfsaild_push(
|
|||||||
|
|
||||||
case XFS_ITEM_FLUSHING:
|
case XFS_ITEM_FLUSHING:
|
||||||
/*
|
/*
|
||||||
* The item or its backing buffer is already beeing
|
* The item or its backing buffer is already being
|
||||||
* flushed. The typical reason for that is that an
|
* flushed. The typical reason for that is that an
|
||||||
* inode buffer is locked because we already pushed the
|
* inode buffer is locked because we already pushed the
|
||||||
* updates to it as part of inode clustering.
|
* updates to it as part of inode clustering.
|
||||||
*
|
*
|
||||||
* We do not want to to stop flushing just because lots
|
* We do not want to to stop flushing just because lots
|
||||||
* of items are already beeing flushed, but we need to
|
* of items are already being flushed, but we need to
|
||||||
* re-try the flushing relatively soon if most of the
|
* re-try the flushing relatively soon if most of the
|
||||||
* AIL is beeing flushed.
|
* AIL is being flushed.
|
||||||
*/
|
*/
|
||||||
XFS_STATS_INC(mp, xs_push_ail_flushing);
|
XFS_STATS_INC(mp, xs_push_ail_flushing);
|
||||||
trace_xfs_ail_flushing(lip);
|
trace_xfs_ail_flushing(lip);
|
||||||
@ -612,7 +612,7 @@ xfsaild(
|
|||||||
* The push is run asynchronously in a workqueue, which means the caller needs
|
* The push is run asynchronously in a workqueue, which means the caller needs
|
||||||
* to handle waiting on the async flush for space to become available.
|
* to handle waiting on the async flush for space to become available.
|
||||||
* We don't want to interrupt any push that is in progress, hence we only queue
|
* We don't want to interrupt any push that is in progress, hence we only queue
|
||||||
* work if we set the pushing bit approriately.
|
* work if we set the pushing bit appropriately.
|
||||||
*
|
*
|
||||||
* We do this unlocked - we only need to know whether there is anything in the
|
* We do this unlocked - we only need to know whether there is anything in the
|
||||||
* AIL at the time we are called. We don't need to access the contents of
|
* AIL at the time we are called. We don't need to access the contents of
|
||||||
|
Loading…
Reference in New Issue
Block a user