f2fs: split sync_node_pages with fsync_node_pages

This patch splits the existing sync_node_pages into (f)sync_node_pages.
The fsync_node_pages is used for f2fs_sync_file only.

Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Jaegeuk Kim 2016-04-13 16:24:44 -07:00
parent e6e5f5610d
commit 5268137564
5 changed files with 84 additions and 33 deletions

View File

@ -892,7 +892,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write);
err = sync_node_pages(sbi, 0, &wbc);
err = sync_node_pages(sbi, &wbc);
if (err) {
f2fs_unlock_all(sbi);
goto out;

View File

@ -1784,7 +1784,8 @@ void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_node_page_ra(struct page *, int);
void sync_inode_page(struct dnode_of_data *);
int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
int fsync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);

View File

@ -256,7 +256,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
sync_nodes:
sync_node_pages(sbi, ino, &wbc);
fsync_node_pages(sbi, ino, &wbc);
/* if cp_error was enabled, we should avoid infinite loop */
if (unlikely(f2fs_cp_error(sbi))) {

View File

@ -841,7 +841,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
sync_node_pages(sbi, 0, &wbc);
sync_node_pages(sbi, &wbc);
} else {
f2fs_submit_merged_bio(sbi, DATA, WRITE);
}

View File

@ -1222,12 +1222,84 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
iput(inode);
}
int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
struct writeback_control *wbc)
{
pgoff_t index, end;
struct pagevec pvec;
int step = ino ? 2 : 0;
int nwritten = 0;
pagevec_init(&pvec, 0);
index = 0;
end = ULONG_MAX;
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
PAGECACHE_TAG_DIRTY,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (unlikely(f2fs_cp_error(sbi))) {
pagevec_release(&pvec);
return -EIO;
}
if (!IS_DNODE(page) || !is_cold_node(page))
continue;
if (ino_of_node(page) != ino)
continue;
lock_page(page);
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
unlock_page(page);
continue;
}
if (ino_of_node(page) != ino)
goto continue_unlock;
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
f2fs_wait_on_page_writeback(page, NODE, true);
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
set_fsync_mark(page, 1);
if (IS_INODE(page))
set_dentry_mark(page,
need_dentry_mark(sbi, ino));
nwritten++;
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page);
if (--wbc->nr_to_write == 0)
break;
}
pagevec_release(&pvec);
cond_resched();
if (wbc->nr_to_write == 0)
break;
}
return nwritten;
}
int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
{
pgoff_t index, end;
struct pagevec pvec;
int step = 0;
int nwritten = 0;
pagevec_init(&pvec, 0);
@ -1266,28 +1338,15 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
if (step == 2 && (!IS_DNODE(page) ||
!is_cold_node(page)))
continue;
/*
* If an fsync mode,
* we should not skip writing node pages.
*/
lock_node:
if (ino) {
if (ino_of_node(page) == ino)
lock_page(page);
else
continue;
} else if (!trylock_page(page)) {
if (!trylock_page(page))
continue;
}
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
unlock_page(page);
continue;
}
if (ino && ino_of_node(page) != ino)
goto continue_unlock;
if (!PageDirty(page)) {
/* someone wrote it for us */
@ -1295,7 +1354,7 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
}
/* flush inline_data */
if (!ino && is_inline_node(page)) {
if (is_inline_node(page)) {
clear_inline_node(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));
@ -1308,17 +1367,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
/* called by fsync() */
if (ino && IS_DNODE(page)) {
set_fsync_mark(page, 1);
if (IS_INODE(page))
set_dentry_mark(page,
need_dentry_mark(sbi, ino));
nwritten++;
} else {
set_fsync_mark(page, 0);
set_dentry_mark(page, 0);
}
set_fsync_mark(page, 0);
set_dentry_mark(page, 0);
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page);
@ -1466,7 +1516,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE;
sync_node_pages(sbi, 0, wbc);
sync_node_pages(sbi, wbc);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0;