f2fs: Avoid double lock for cp_rwsem during checkpoint

There could be a scenario where f2fs_sync_node_pages gets
called during checkpoint, which in turn tries to flush
inline data and calls iput(). This results in deadlock as
iput() tries to hold cp_rwsem, which is already held at the
beginning by checkpoint->block_operations().

Call stack :

Thread A		Thread B
f2fs_write_checkpoint()
- block_operations(sbi)
 - f2fs_lock_all(sbi);
  - down_write(&sbi->cp_rwsem);

                        - open()
                         - igrab()
                        - write() write inline data
                        - unlink()
- f2fs_sync_node_pages()
 - if (is_inline_node(page))
  - flush_inline_data()
   - ilookup()
     page = f2fs_pagecache_get_page()
     if (!page)
      goto iput_out;
     iput_out:
			-close()
			-iput()
       iput(inode);
       - f2fs_evict_inode()
        - f2fs_truncate_blocks()
         - f2fs_lock_op()
           - down_read(&sbi->cp_rwsem);

Fixes: 2049d4fcb0 ("f2fs: avoid multiple node page writes due to inline_data")
Signed-off-by: Sayali Lokhande <sayalil@codeaurora.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Sayali Lokhande 2020-04-30 16:28:29 +05:30 committed by Jaegeuk Kim
parent baaa7ebf25
commit 34c061ad85
3 changed files with 55 additions and 2 deletions

View File

@ -1168,6 +1168,11 @@ static int block_operations(struct f2fs_sb_info *sbi)
};
int err = 0, cnt = 0;
/*
* Let's flush inline_data in dirty node pages.
*/
f2fs_flush_inline_data(sbi);
retry_flush_quotas:
f2fs_lock_all(sbi);
if (__need_flush_quota(sbi)) {

View File

@ -3282,6 +3282,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
int f2fs_move_node_page(struct page *node_page, int gc_type);
int f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);

View File

@ -1807,6 +1807,53 @@ static bool flush_dirty_inode(struct page *page)
return true;
}
int f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
{
pgoff_t index = 0;
struct pagevec pvec;
int nr_pages;
int ret = 0;
pagevec_init(&pvec);
while ((nr_pages = pagevec_lookup_tag(&pvec,
NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (!IS_DNODE(page))
continue;
lock_page(page);
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
/* flush inline_data, if it's async context. */
if (is_inline_node(page)) {
clear_inline_node(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));
continue;
}
unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
}
return ret;
}
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
@ -1870,8 +1917,8 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
goto continue_unlock;
}
/* flush inline_data */
if (is_inline_node(page)) {
/* flush inline_data, if it's async context. */
if (do_balance && is_inline_node(page)) {
clear_inline_node(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));