2013-08-12 17:49:42 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
#ifndef __XFS_BMAP_UTIL_H__
|
|
|
|
#define __XFS_BMAP_UTIL_H__
|
|
|
|
|
|
|
|
/* Kernel only BMAP related definitions and functions */
|
|
|
|
|
|
|
|
struct xfs_bmbt_irec;
|
2013-08-12 17:49:45 +07:00
|
|
|
struct xfs_bmap_free_item;
|
2013-08-12 17:49:42 +07:00
|
|
|
struct xfs_ifork;
|
|
|
|
struct xfs_inode;
|
|
|
|
struct xfs_mount;
|
|
|
|
struct xfs_trans;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Argument structure for xfs_bmap_alloc.
|
|
|
|
*/
|
|
|
|
struct xfs_bmalloca {
|
|
|
|
xfs_fsblock_t *firstblock; /* i/o first block allocated */
|
|
|
|
struct xfs_bmap_free *flist; /* bmap freelist */
|
|
|
|
struct xfs_trans *tp; /* transaction pointer */
|
|
|
|
struct xfs_inode *ip; /* incore inode pointer */
|
|
|
|
struct xfs_bmbt_irec prev; /* extent before the new one */
|
|
|
|
struct xfs_bmbt_irec got; /* extent after, or delayed */
|
|
|
|
|
|
|
|
xfs_fileoff_t offset; /* offset in file filling in */
|
|
|
|
xfs_extlen_t length; /* i/o length asked/allocated */
|
|
|
|
xfs_fsblock_t blkno; /* starting block of new extent */
|
|
|
|
|
|
|
|
struct xfs_btree_cur *cur; /* btree cursor */
|
|
|
|
xfs_extnum_t idx; /* current extent index */
|
|
|
|
int nallocs;/* number of extents alloc'd */
|
|
|
|
int logflags;/* flags for transaction logging */
|
|
|
|
|
|
|
|
xfs_extlen_t total; /* total blocks needed for xaction */
|
|
|
|
xfs_extlen_t minlen; /* minimum allocation size (blocks) */
|
|
|
|
xfs_extlen_t minleft; /* amount must be left after alloc */
|
xfs: refine the allocation stack switch
The allocation stack switch at xfs_bmapi_allocate() has served it's
purpose, but is no longer a sufficient solution to the stack usage
problem we have in the XFS allocation path.
Whilst the kernel stack size is now 16k, that is not a valid reason
for undoing all our "keep stack usage down" modifications. What it
does allow us to do is have the freedom to refine and perfect the
modifications knowing that if we get it wrong it won't blow up in
our faces - we have a safety net now.
This is important because we still have the issue of older kernels
having smaller stacks and that they are still supported and are
demonstrating a wide range of different stack overflows. Red Hat
has several open bugs for allocation based stack overflows from
directory modifications and direct IO block allocation and these
problems still need to be solved. If we can solve them upstream,
then distro's won't need to bake their own unique solutions.
To that end, I've observed that every allocation based stack
overflow report has had a specific characteristic - it has happened
during or directly after a bmap btree block split. That event
requires a new block to be allocated to the tree, and so we
effectively stack one allocation stack on top of another, and that's
when we get into trouble.
A further observation is that bmap btree block splits are much rarer
than writeback allocation - over a range of different workloads I've
observed the ratio of bmap btree inserts to splits ranges from 100:1
(xfstests run) to 10000:1 (local VM image server with sparse files
that range in the hundreds of thousands to millions of extents).
Either way, bmap btree split events are much, much rarer than
allocation events.
Finally, we have to move the kswapd state to the allocation workqueue
work when allocation is done on behalf of kswapd. This is proving to
cause significant perturbation in performance under memory pressure
and appears to be generating allocation deadlock warnings under some
workloads, so avoiding the use of a workqueue for the majority of
kswapd writeback allocation will minimise the impact of such
behaviour.
Hence it makes sense to move the stack switch to xfs_btree_split()
and only do it for bmap btree splits. Stack switches during
allocation will be much rarer, so there won't be significant
performacne overhead caused by switching stacks. The worse case
stack from all allocation paths will be split, not just writeback.
And the majority of memory allocations will be done in the correct
context (e.g. kswapd) without causing additional latency, and so we
simplify the memory reclaim interactions between processes,
workqueues and kswapd.
The worst stack I've been able to generate with this patch in place
is 5600 bytes deep. It's very revealing because we exit XFS at:
37) 1768 64 kmem_cache_alloc+0x13b/0x170
about 1800 bytes of stack consumed, and the remaining 3800 bytes
(and 36 functions) is memory reclaim, swap and the IO stack. And
this occurs in the inode allocation from an open(O_CREAT) syscall,
not writeback.
The amount of stack being used is much less than I've previously be
able to generate - fs_mark testing has been able to generate stack
usage of around 7k without too much trouble; with this patch it's
only just getting to 5.5k. This is primarily because the metadata
allocation paths (e.g. directory blocks) are no longer causing
double splits on the same stack, and hence now stack tracing is
showing swapping being the worst stack consumer rather than XFS.
Performance of fs_mark inode create workloads is unchanged.
Performance of fs_mark async fsync workloads is consistently good
with context switches reduced by around 150,000/s (30%).
Performance of dbench, streaming IO and postmark is unchanged.
Allocation deadlock warnings have not been seen on the workloads
that generated them since adding this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-07-15 04:08:24 +07:00
|
|
|
bool eof; /* set if allocating past last extent */
|
|
|
|
bool wasdel; /* replacing a delayed allocation */
|
|
|
|
bool userdata;/* set if is user data */
|
|
|
|
bool aeof; /* allocated space at eof */
|
|
|
|
bool conv; /* overwriting unwritten extents */
|
2013-08-12 17:49:42 +07:00
|
|
|
int flags;
|
|
|
|
struct completion *done;
|
|
|
|
struct work_struct work;
|
|
|
|
int result;
|
|
|
|
};
|
|
|
|
|
|
|
|
int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
|
|
|
|
int *committed);
|
|
|
|
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
|
|
|
|
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
|
|
|
|
int whichfork, int *eof);
|
|
|
|
int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
|
|
|
|
int whichfork, int *count);
|
|
|
|
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
|
|
|
|
xfs_fileoff_t start_fsb, xfs_fileoff_t length);
|
|
|
|
|
|
|
|
/* bmap to userspace formatter - copy to user & advance pointer */
|
|
|
|
typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *);
|
|
|
|
int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
|
|
|
|
xfs_bmap_format_t formatter, void *arg);
|
|
|
|
|
|
|
|
/* functions in xfs_bmap.c that are only needed by xfs_bmap_util.c */
|
|
|
|
void xfs_bmap_del_free(struct xfs_bmap_free *flist,
|
|
|
|
struct xfs_bmap_free_item *prev,
|
|
|
|
struct xfs_bmap_free_item *free);
|
|
|
|
int xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
|
|
|
|
struct xfs_bmbt_irec *prevp, xfs_extlen_t extsz,
|
|
|
|
int rt, int eof, int delay, int convert,
|
|
|
|
xfs_fileoff_t *offp, xfs_extlen_t *lenp);
|
|
|
|
void xfs_bmap_adjacent(struct xfs_bmalloca *ap);
|
|
|
|
int xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
|
|
|
|
int whichfork, struct xfs_bmbt_irec *rec,
|
|
|
|
int *is_empty);
|
|
|
|
|
2013-08-12 17:49:45 +07:00
|
|
|
/* preallocation and hole punch interface */
|
2013-10-12 14:55:07 +07:00
|
|
|
int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
|
|
|
|
xfs_off_t len, int alloc_type);
|
|
|
|
int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
|
|
|
|
xfs_off_t len);
|
2013-10-12 14:55:08 +07:00
|
|
|
int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
|
|
|
|
xfs_off_t len);
|
2014-02-24 06:58:19 +07:00
|
|
|
int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
|
|
|
|
xfs_off_t len);
|
2013-08-12 17:49:45 +07:00
|
|
|
|
|
|
|
/* EOF block manipulation functions */
|
|
|
|
bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
|
|
|
|
int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
|
|
|
|
bool need_iolock);
|
|
|
|
|
2013-08-12 17:49:48 +07:00
|
|
|
int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
|
|
|
|
struct xfs_swapext *sx);
|
|
|
|
|
2013-08-12 17:49:42 +07:00
|
|
|
xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
|
|
|
|
|
|
|
|
#endif /* __XFS_BMAP_UTIL_H__ */
|