mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:20:54 +07:00
writeback: introduce .tagged_writepages for the WB_SYNC_NONE sync stage
sync(2) is performed in two stages: the WB_SYNC_NONE sync and the WB_SYNC_ALL sync. Identify the first stage with .tagged_writepages and do livelock prevention for it, too. Jan's commitf446daaea9
("mm: implement writeback livelock avoidance using page tagging") is a partial fix in that it only fixed the WB_SYNC_ALL phase livelock. Although ext4 is tested to no longer livelock with commitf446daaea9
, it may due to some "redirty_tail() after pages_skipped" effect which is by no means a guarantee for _all_ the file systems. Note that writeback_inodes_sb() is called by not only sync(), they are treated the same because the other callers also need livelock prevention. Impact: It changes the order in which pages/inodes are synced to disk. Now in the WB_SYNC_NONE stage, it won't proceed to write the next inode until finished with the current inode. Acked-by: Jan Kara <jack@suse.cz> CC: Dave Chinner <david@fromorbit.com> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
This commit is contained in:
parent
59c5f46fbe
commit
6e6938b6d3
@ -2741,7 +2741,7 @@ static int write_cache_pages_da(struct address_space *mapping,
|
||||
index = wbc->range_start >> PAGE_CACHE_SHIFT;
|
||||
end = wbc->range_end >> PAGE_CACHE_SHIFT;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||
tag = PAGECACHE_TAG_TOWRITE;
|
||||
else
|
||||
tag = PAGECACHE_TAG_DIRTY;
|
||||
@ -2973,7 +2973,7 @@ static int ext4_da_writepages(struct address_space *mapping,
|
||||
}
|
||||
|
||||
retry:
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||
tag_pages_for_writeback(mapping, index, end);
|
||||
|
||||
while (!ret && wbc->nr_to_write > 0) {
|
||||
|
@ -36,6 +36,7 @@ struct wb_writeback_work {
|
||||
long nr_pages;
|
||||
struct super_block *sb;
|
||||
enum writeback_sync_modes sync_mode;
|
||||
unsigned int tagged_writepages:1;
|
||||
unsigned int for_kupdate:1;
|
||||
unsigned int range_cyclic:1;
|
||||
unsigned int for_background:1;
|
||||
@ -650,6 +651,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = work->sync_mode,
|
||||
.tagged_writepages = work->tagged_writepages,
|
||||
.older_than_this = NULL,
|
||||
.for_kupdate = work->for_kupdate,
|
||||
.for_background = work->for_background,
|
||||
@ -657,7 +659,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||
};
|
||||
unsigned long oldest_jif;
|
||||
long wrote = 0;
|
||||
long write_chunk;
|
||||
long write_chunk = MAX_WRITEBACK_PAGES;
|
||||
struct inode *inode;
|
||||
|
||||
if (wbc.for_kupdate) {
|
||||
@ -683,9 +685,7 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||
* (quickly) tag currently dirty pages
|
||||
* (maybe slowly) sync all tagged pages
|
||||
*/
|
||||
if (wbc.sync_mode == WB_SYNC_NONE)
|
||||
write_chunk = MAX_WRITEBACK_PAGES;
|
||||
else
|
||||
if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages)
|
||||
write_chunk = LONG_MAX;
|
||||
|
||||
wbc.wb_start = jiffies; /* livelock avoidance */
|
||||
@ -1188,10 +1188,11 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
struct wb_writeback_work work = {
|
||||
.sb = sb,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.done = &done,
|
||||
.nr_pages = nr,
|
||||
.sb = sb,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.tagged_writepages = 1,
|
||||
.done = &done,
|
||||
.nr_pages = nr,
|
||||
};
|
||||
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
@ -47,6 +47,7 @@ struct writeback_control {
|
||||
unsigned encountered_congestion:1; /* An output: a queue is full */
|
||||
unsigned for_kupdate:1; /* A kupdate writeback */
|
||||
unsigned for_background:1; /* A background writeback */
|
||||
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
|
||||
unsigned for_reclaim:1; /* Invoked from the page allocator */
|
||||
unsigned range_cyclic:1; /* range_start is cyclic */
|
||||
unsigned more_io:1; /* more io to be dispatched */
|
||||
|
@ -892,12 +892,12 @@ int write_cache_pages(struct address_space *mapping,
|
||||
range_whole = 1;
|
||||
cycled = 1; /* ignore range_cyclic tests */
|
||||
}
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||
tag = PAGECACHE_TAG_TOWRITE;
|
||||
else
|
||||
tag = PAGECACHE_TAG_DIRTY;
|
||||
retry:
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||
tag_pages_for_writeback(mapping, index, end);
|
||||
done_index = index;
|
||||
while (!done && (index <= end)) {
|
||||
|
Loading…
Reference in New Issue
Block a user