2012-11-29 11:28:09 +07:00
|
|
|
/*
|
2012-11-02 15:08:18 +07:00
|
|
|
* fs/f2fs/checkpoint.c
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com/
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/mpage.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
#include <linux/pagevec.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
|
|
|
|
#include "f2fs.h"
|
|
|
|
#include "node.h"
|
|
|
|
#include "segment.h"
|
2014-12-18 10:58:58 +07:00
|
|
|
#include "trace.h"
|
2013-04-23 16:26:54 +07:00
|
|
|
#include <trace/events/f2fs.h>
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-07-26 05:47:17 +07:00
|
|
|
static struct kmem_cache *ino_entry_slab;
|
2014-12-29 14:56:18 +07:00
|
|
|
struct kmem_cache *inode_entry_slab;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-05-19 04:07:56 +07:00
|
|
|
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
|
|
|
|
{
|
2016-09-20 10:04:18 +07:00
|
|
|
set_ckpt_flags(sbi, CP_ERROR_FLAG);
|
2016-05-19 04:07:56 +07:00
|
|
|
sbi->sb->s_flags |= MS_RDONLY;
|
|
|
|
if (!end_io)
|
|
|
|
f2fs_flush_merged_bios(sbi);
|
|
|
|
}
|
|
|
|
|
2012-11-29 11:28:09 +07:00
|
|
|
/*
|
2012-11-02 15:08:18 +07:00
|
|
|
* We guarantee no failure on the returned page.
|
|
|
|
*/
|
|
|
|
struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
|
|
|
|
{
|
2014-01-20 17:37:04 +07:00
|
|
|
struct address_space *mapping = META_MAPPING(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
struct page *page = NULL;
|
|
|
|
repeat:
|
2016-04-30 06:11:53 +07:00
|
|
|
page = f2fs_grab_cache_page(mapping, index, false);
|
2012-11-02 15:08:18 +07:00
|
|
|
if (!page) {
|
|
|
|
cond_resched();
|
|
|
|
goto repeat;
|
|
|
|
}
|
2016-01-20 22:43:51 +07:00
|
|
|
f2fs_wait_on_page_writeback(page, META, true);
|
2016-07-01 08:49:15 +07:00
|
|
|
if (!PageUptodate(page))
|
|
|
|
SetPageUptodate(page);
|
2012-11-02 15:08:18 +07:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2012-11-29 11:28:09 +07:00
|
|
|
/*
|
2012-11-02 15:08:18 +07:00
|
|
|
* We guarantee no failure on the returned page.
|
|
|
|
*/
|
2015-10-12 16:04:21 +07:00
|
|
|
static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
|
|
|
|
bool is_meta)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-01-20 17:37:04 +07:00
|
|
|
struct address_space *mapping = META_MAPPING(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
struct page *page;
|
2014-12-18 10:33:13 +07:00
|
|
|
struct f2fs_io_info fio = {
|
2015-04-24 04:38:15 +07:00
|
|
|
.sbi = sbi,
|
2014-12-18 10:33:13 +07:00
|
|
|
.type = META,
|
2016-06-06 02:31:55 +07:00
|
|
|
.op = REQ_OP_READ,
|
2016-11-01 20:40:10 +07:00
|
|
|
.op_flags = REQ_META | REQ_PRIO,
|
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 17:36:38 +07:00
|
|
|
.old_blkaddr = index,
|
|
|
|
.new_blkaddr = index,
|
2015-04-24 02:04:33 +07:00
|
|
|
.encrypted_page = NULL,
|
2014-12-18 10:33:13 +07:00
|
|
|
};
|
2015-10-12 16:04:21 +07:00
|
|
|
|
|
|
|
if (unlikely(!is_meta))
|
2016-06-06 02:31:55 +07:00
|
|
|
fio.op_flags &= ~REQ_META;
|
2012-11-02 15:08:18 +07:00
|
|
|
repeat:
|
2016-04-30 06:11:53 +07:00
|
|
|
page = f2fs_grab_cache_page(mapping, index, false);
|
2012-11-02 15:08:18 +07:00
|
|
|
if (!page) {
|
|
|
|
cond_resched();
|
|
|
|
goto repeat;
|
|
|
|
}
|
2013-03-08 19:29:23 +07:00
|
|
|
if (PageUptodate(page))
|
|
|
|
goto out;
|
|
|
|
|
2015-04-24 04:38:15 +07:00
|
|
|
fio.page = page;
|
|
|
|
|
2015-07-16 03:08:21 +07:00
|
|
|
if (f2fs_submit_page_bio(&fio)) {
|
|
|
|
f2fs_put_page(page, 1);
|
2012-11-02 15:08:18 +07:00
|
|
|
goto repeat;
|
2015-07-16 03:08:21 +07:00
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2013-03-08 19:29:23 +07:00
|
|
|
lock_page(page);
|
2013-12-06 13:00:58 +07:00
|
|
|
if (unlikely(page->mapping != mapping)) {
|
2013-04-26 09:55:17 +07:00
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
goto repeat;
|
|
|
|
}
|
2015-07-29 16:33:13 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if there is any IO error when accessing device, make our filesystem
|
|
|
|
* readonly and make sure do not write checkpoint with non-uptodate
|
|
|
|
* meta page.
|
|
|
|
*/
|
|
|
|
if (unlikely(!PageUptodate(page)))
|
2016-05-19 04:07:56 +07:00
|
|
|
f2fs_stop_checkpoint(sbi, false);
|
2013-03-08 19:29:23 +07:00
|
|
|
out:
|
2012-11-02 15:08:18 +07:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2015-10-12 16:04:21 +07:00
|
|
|
struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
|
|
|
|
{
|
|
|
|
return __get_meta_page(sbi, index, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for POR only */
|
|
|
|
struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
|
|
|
|
{
|
|
|
|
return __get_meta_page(sbi, index, false);
|
|
|
|
}
|
|
|
|
|
2015-04-18 17:05:36 +07:00
|
|
|
bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
|
2014-02-07 15:11:53 +07:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case META_NAT:
|
2014-12-08 13:59:17 +07:00
|
|
|
break;
|
2014-02-07 15:11:53 +07:00
|
|
|
case META_SIT:
|
2014-12-08 13:59:17 +07:00
|
|
|
if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
|
|
|
|
return false;
|
|
|
|
break;
|
2014-02-27 18:12:24 +07:00
|
|
|
case META_SSA:
|
2014-12-08 13:59:17 +07:00
|
|
|
if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
|
|
|
|
blkaddr < SM_I(sbi)->ssa_blkaddr))
|
|
|
|
return false;
|
|
|
|
break;
|
2014-02-07 15:11:53 +07:00
|
|
|
case META_CP:
|
2014-12-08 13:59:17 +07:00
|
|
|
if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
|
|
|
|
blkaddr < __start_cp_addr(sbi)))
|
|
|
|
return false;
|
|
|
|
break;
|
2014-09-12 03:49:55 +07:00
|
|
|
case META_POR:
|
2014-12-08 13:59:17 +07:00
|
|
|
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
|
|
|
|
blkaddr < MAIN_BLKADDR(sbi)))
|
|
|
|
return false;
|
|
|
|
break;
|
2014-02-07 15:11:53 +07:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
2014-12-08 13:59:17 +07:00
|
|
|
|
|
|
|
return true;
|
2014-02-07 15:11:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-02-27 18:12:24 +07:00
|
|
|
* Readahead CP/NAT/SIT/SSA pages
|
2014-02-07 15:11:53 +07:00
|
|
|
*/
|
2015-10-12 16:05:59 +07:00
|
|
|
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
|
|
|
|
int type, bool sync)
|
2014-02-07 15:11:53 +07:00
|
|
|
{
|
|
|
|
struct page *page;
|
2014-09-12 03:49:55 +07:00
|
|
|
block_t blkno = start;
|
2014-02-07 15:11:53 +07:00
|
|
|
struct f2fs_io_info fio = {
|
2015-04-24 04:38:15 +07:00
|
|
|
.sbi = sbi,
|
2014-02-07 15:11:53 +07:00
|
|
|
.type = META,
|
2016-06-06 02:31:55 +07:00
|
|
|
.op = REQ_OP_READ,
|
2016-11-01 20:40:10 +07:00
|
|
|
.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
|
2015-04-24 02:04:33 +07:00
|
|
|
.encrypted_page = NULL,
|
2014-02-07 15:11:53 +07:00
|
|
|
};
|
2016-02-14 17:54:33 +07:00
|
|
|
struct blk_plug plug;
|
2014-02-07 15:11:53 +07:00
|
|
|
|
2015-10-12 16:04:21 +07:00
|
|
|
if (unlikely(type == META_POR))
|
2016-06-06 02:31:55 +07:00
|
|
|
fio.op_flags &= ~REQ_META;
|
2015-10-12 16:04:21 +07:00
|
|
|
|
2016-02-14 17:54:33 +07:00
|
|
|
blk_start_plug(&plug);
|
2014-02-07 15:11:53 +07:00
|
|
|
for (; nrpages-- > 0; blkno++) {
|
|
|
|
|
2014-12-08 13:59:17 +07:00
|
|
|
if (!is_valid_blkaddr(sbi, blkno, type))
|
|
|
|
goto out;
|
|
|
|
|
2014-02-07 15:11:53 +07:00
|
|
|
switch (type) {
|
|
|
|
case META_NAT:
|
2014-12-08 13:59:17 +07:00
|
|
|
if (unlikely(blkno >=
|
|
|
|
NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
|
2014-02-07 15:11:53 +07:00
|
|
|
blkno = 0;
|
2014-12-08 13:59:17 +07:00
|
|
|
/* get nat block addr */
|
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 17:36:38 +07:00
|
|
|
fio.new_blkaddr = current_nat_addr(sbi,
|
2014-02-07 15:11:53 +07:00
|
|
|
blkno * NAT_ENTRY_PER_BLOCK);
|
|
|
|
break;
|
|
|
|
case META_SIT:
|
|
|
|
/* get sit block addr */
|
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 17:36:38 +07:00
|
|
|
fio.new_blkaddr = current_sit_addr(sbi,
|
2014-02-07 15:11:53 +07:00
|
|
|
blkno * SIT_ENTRY_PER_BLOCK);
|
|
|
|
break;
|
2014-02-27 18:12:24 +07:00
|
|
|
case META_SSA:
|
2014-02-07 15:11:53 +07:00
|
|
|
case META_CP:
|
2014-09-12 03:49:55 +07:00
|
|
|
case META_POR:
|
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 17:36:38 +07:00
|
|
|
fio.new_blkaddr = blkno;
|
2014-02-07 15:11:53 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2016-04-30 06:11:53 +07:00
|
|
|
page = f2fs_grab_cache_page(META_MAPPING(sbi),
|
|
|
|
fio.new_blkaddr, false);
|
2014-02-07 15:11:53 +07:00
|
|
|
if (!page)
|
|
|
|
continue;
|
|
|
|
if (PageUptodate(page)) {
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-04-24 04:38:15 +07:00
|
|
|
fio.page = page;
|
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 17:36:38 +07:00
|
|
|
fio.old_blkaddr = fio.new_blkaddr;
|
2015-04-24 04:38:15 +07:00
|
|
|
f2fs_submit_page_mbio(&fio);
|
2014-02-07 15:11:53 +07:00
|
|
|
f2fs_put_page(page, 0);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
f2fs_submit_merged_bio(sbi, META, READ);
|
2016-02-14 17:54:33 +07:00
|
|
|
blk_finish_plug(&plug);
|
2014-02-07 15:11:53 +07:00
|
|
|
return blkno - start;
|
|
|
|
}
|
|
|
|
|
2014-12-08 14:02:52 +07:00
|
|
|
void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
bool readahead = false;
|
|
|
|
|
|
|
|
page = find_get_page(META_MAPPING(sbi), index);
|
2016-04-07 01:27:03 +07:00
|
|
|
if (!page || !PageUptodate(page))
|
2014-12-08 14:02:52 +07:00
|
|
|
readahead = true;
|
|
|
|
f2fs_put_page(page, 0);
|
|
|
|
|
|
|
|
if (readahead)
|
2016-10-19 01:07:45 +07:00
|
|
|
ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
|
2014-12-08 14:02:52 +07:00
|
|
|
}
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
static int f2fs_write_meta_page(struct page *page,
|
|
|
|
struct writeback_control *wbc)
|
|
|
|
{
|
2014-09-03 05:31:18 +07:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-05-06 15:48:26 +07:00
|
|
|
trace_f2fs_writepage(page, META);
|
|
|
|
|
2015-01-28 16:48:42 +07:00
|
|
|
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
2013-12-05 16:15:22 +07:00
|
|
|
goto redirty_out;
|
2014-11-20 02:03:34 +07:00
|
|
|
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
|
2013-12-05 16:15:22 +07:00
|
|
|
goto redirty_out;
|
2014-08-12 06:49:25 +07:00
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2014-08-12 08:37:46 +07:00
|
|
|
goto redirty_out;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 17:56:11 +07:00
|
|
|
write_meta_page(sbi, page);
|
|
|
|
dec_page_count(sbi, F2FS_DIRTY_META);
|
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 17:28:11 +07:00
|
|
|
|
|
|
|
if (wbc->for_reclaim)
|
2017-02-02 07:51:22 +07:00
|
|
|
f2fs_submit_merged_bio_cond(sbi, page->mapping->host,
|
|
|
|
0, page->index, META, WRITE);
|
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 17:28:11 +07:00
|
|
|
|
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 17:56:11 +07:00
|
|
|
unlock_page(page);
|
2014-11-20 02:03:34 +07:00
|
|
|
|
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 17:28:11 +07:00
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2014-11-20 02:03:34 +07:00
|
|
|
f2fs_submit_merged_bio(sbi, META, WRITE);
|
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 17:28:11 +07:00
|
|
|
|
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 17:56:11 +07:00
|
|
|
return 0;
|
2013-12-05 16:15:22 +07:00
|
|
|
|
|
|
|
redirty_out:
|
2014-04-15 14:04:15 +07:00
|
|
|
redirty_page_for_writepage(wbc, page);
|
2013-12-05 16:15:22 +07:00
|
|
|
return AOP_WRITEPAGE_ACTIVATE;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int f2fs_write_meta_pages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
|
|
|
{
|
2014-09-03 05:31:18 +07:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
|
2014-03-18 11:47:11 +07:00
|
|
|
long diff, written;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2013-12-17 15:28:41 +07:00
|
|
|
/* collect a number of dirty meta pages and write together */
|
2014-03-18 11:47:11 +07:00
|
|
|
if (wbc->for_kupdate ||
|
|
|
|
get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
|
2014-03-18 11:43:05 +07:00
|
|
|
goto skip_write;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-02-04 15:14:00 +07:00
|
|
|
trace_f2fs_writepages(mapping->host, wbc, META);
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
/* if mounting is failed, skip writing node pages */
|
|
|
|
mutex_lock(&sbi->cp_mutex);
|
2014-03-18 11:47:11 +07:00
|
|
|
diff = nr_pages_to_write(sbi, META, wbc);
|
|
|
|
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
|
2012-11-02 15:08:18 +07:00
|
|
|
mutex_unlock(&sbi->cp_mutex);
|
2014-03-18 11:47:11 +07:00
|
|
|
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
|
2012-11-02 15:08:18 +07:00
|
|
|
return 0;
|
2014-03-18 11:43:05 +07:00
|
|
|
|
|
|
|
skip_write:
|
|
|
|
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
|
2016-02-04 15:14:00 +07:00
|
|
|
trace_f2fs_writepages(mapping->host, wbc, META);
|
2014-03-18 11:43:05 +07:00
|
|
|
return 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
|
|
|
|
long nr_to_write)
|
|
|
|
{
|
2014-01-20 17:37:04 +07:00
|
|
|
struct address_space *mapping = META_MAPPING(sbi);
|
f2fs: fix incorrect upper bound when iterating inode mapping tree
1. Inode mapping tree can index page in range of [0, ULONG_MAX], however,
in some places, f2fs only search or iterate page in ragne of [0, LONG_MAX],
result in miss hitting in page cache.
2. filemap_fdatawait_range accepts range parameters in unit of bytes, so
the max range it covers should be [0, LLONG_MAX], if we use [0, LONG_MAX]
as range for waiting on writeback, big number of pages will not be covered.
This patch corrects above two issues.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-24 16:20:44 +07:00
|
|
|
pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
|
2012-11-02 15:08:18 +07:00
|
|
|
struct pagevec pvec;
|
|
|
|
long nwritten = 0;
|
|
|
|
struct writeback_control wbc = {
|
|
|
|
.for_reclaim = 0,
|
|
|
|
};
|
2016-02-14 17:54:33 +07:00
|
|
|
struct blk_plug plug;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
pagevec_init(&pvec, 0);
|
|
|
|
|
2016-02-14 17:54:33 +07:00
|
|
|
blk_start_plug(&plug);
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
while (index <= end) {
|
|
|
|
int i, nr_pages;
|
|
|
|
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
|
|
|
|
PAGECACHE_TAG_DIRTY,
|
|
|
|
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
|
2013-12-05 16:15:22 +07:00
|
|
|
if (unlikely(nr_pages == 0))
|
2012-11-02 15:08:18 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
|
struct page *page = pvec.pages[i];
|
2014-02-05 11:03:57 +07:00
|
|
|
|
f2fs: fix incorrect upper bound when iterating inode mapping tree
1. Inode mapping tree can index page in range of [0, ULONG_MAX], however,
in some places, f2fs only search or iterate page in ragne of [0, LONG_MAX],
result in miss hitting in page cache.
2. filemap_fdatawait_range accepts range parameters in unit of bytes, so
the max range it covers should be [0, LLONG_MAX], if we use [0, LONG_MAX]
as range for waiting on writeback, big number of pages will not be covered.
This patch corrects above two issues.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-24 16:20:44 +07:00
|
|
|
if (prev == ULONG_MAX)
|
f2fs: merge meta writes as many possible
This patch tries to merge IOs as many as possible when background flusher
conducts flushing the dirty meta pages.
[Before]
...
2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124320, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124560, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95720, size = 987136
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123928, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123944, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123968, size = 45056
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124064, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 97648, size = 1007616
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123776, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123800, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124624, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 99616, size = 921600
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123608, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123624, size = 77824
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123792, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123864, size = 32768
...
[After]
...
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 92168, size = 892928
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 93912, size = 753664
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95384, size = 716800
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 96784, size = 712704
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104160, size = 364544
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104872, size = 356352
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 105568, size = 278528
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106112, size = 319488
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106736, size = 258048
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107240, size = 270336
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107768, size = 180224
...
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-10-02 06:42:55 +07:00
|
|
|
prev = page->index - 1;
|
|
|
|
if (nr_to_write != LONG_MAX && page->index != prev + 1) {
|
|
|
|
pagevec_release(&pvec);
|
|
|
|
goto stop;
|
|
|
|
}
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
lock_page(page);
|
2014-02-05 11:03:57 +07:00
|
|
|
|
|
|
|
if (unlikely(page->mapping != mapping)) {
|
|
|
|
continue_unlock:
|
|
|
|
unlock_page(page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!PageDirty(page)) {
|
|
|
|
/* someone wrote it for us */
|
|
|
|
goto continue_unlock;
|
|
|
|
}
|
|
|
|
|
2016-01-29 02:48:52 +07:00
|
|
|
f2fs_wait_on_page_writeback(page, META, true);
|
|
|
|
|
|
|
|
BUG_ON(PageWriteback(page));
|
2014-02-05 11:03:57 +07:00
|
|
|
if (!clear_page_dirty_for_io(page))
|
|
|
|
goto continue_unlock;
|
|
|
|
|
2015-02-16 15:19:22 +07:00
|
|
|
if (mapping->a_ops->writepage(page, &wbc)) {
|
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 17:56:11 +07:00
|
|
|
unlock_page(page);
|
|
|
|
break;
|
|
|
|
}
|
2013-12-05 16:15:22 +07:00
|
|
|
nwritten++;
|
f2fs: merge meta writes as many possible
This patch tries to merge IOs as many as possible when background flusher
conducts flushing the dirty meta pages.
[Before]
...
2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124320, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124560, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95720, size = 987136
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123928, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123944, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123968, size = 45056
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124064, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 97648, size = 1007616
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123776, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123800, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124624, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 99616, size = 921600
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123608, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123624, size = 77824
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123792, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123864, size = 32768
...
[After]
...
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 92168, size = 892928
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 93912, size = 753664
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95384, size = 716800
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 96784, size = 712704
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104160, size = 364544
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104872, size = 356352
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 105568, size = 278528
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106112, size = 319488
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106736, size = 258048
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107240, size = 270336
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107768, size = 180224
...
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-10-02 06:42:55 +07:00
|
|
|
prev = page->index;
|
2013-12-05 16:15:22 +07:00
|
|
|
if (unlikely(nwritten >= nr_to_write))
|
2012-11-02 15:08:18 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
pagevec_release(&pvec);
|
|
|
|
cond_resched();
|
|
|
|
}
|
f2fs: merge meta writes as many possible
This patch tries to merge IOs as many as possible when background flusher
conducts flushing the dirty meta pages.
[Before]
...
2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124320, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124560, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95720, size = 987136
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123928, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123944, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123968, size = 45056
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124064, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 97648, size = 1007616
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123776, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123800, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124624, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 99616, size = 921600
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123608, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123624, size = 77824
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123792, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123864, size = 32768
...
[After]
...
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 92168, size = 892928
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 93912, size = 753664
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95384, size = 716800
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 96784, size = 712704
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104160, size = 364544
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104872, size = 356352
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 105568, size = 278528
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106112, size = 319488
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106736, size = 258048
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107240, size = 270336
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107768, size = 180224
...
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-10-02 06:42:55 +07:00
|
|
|
stop:
|
2012-11-02 15:08:18 +07:00
|
|
|
if (nwritten)
|
2013-12-11 11:54:01 +07:00
|
|
|
f2fs_submit_merged_bio(sbi, type, WRITE);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-02-14 17:54:33 +07:00
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
return nwritten;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int f2fs_set_meta_page_dirty(struct page *page)
|
|
|
|
{
|
2013-10-24 15:53:29 +07:00
|
|
|
trace_f2fs_set_page_dirty(page, META);
|
|
|
|
|
2016-07-01 08:49:15 +07:00
|
|
|
if (!PageUptodate(page))
|
|
|
|
SetPageUptodate(page);
|
2012-11-02 15:08:18 +07:00
|
|
|
if (!PageDirty(page)) {
|
2016-07-01 08:40:10 +07:00
|
|
|
f2fs_set_page_dirty_nobuffers(page);
|
2014-09-03 05:31:18 +07:00
|
|
|
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
|
2015-01-19 19:24:37 +07:00
|
|
|
SetPagePrivate(page);
|
2014-12-18 10:58:58 +07:00
|
|
|
f2fs_trace_pid(page);
|
2012-11-02 15:08:18 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct address_space_operations f2fs_meta_aops = {
|
|
|
|
.writepage = f2fs_write_meta_page,
|
|
|
|
.writepages = f2fs_write_meta_pages,
|
|
|
|
.set_page_dirty = f2fs_set_meta_page_dirty,
|
2015-02-05 16:44:29 +07:00
|
|
|
.invalidatepage = f2fs_invalidate_page,
|
|
|
|
.releasepage = f2fs_release_page,
|
2016-09-20 04:03:27 +07:00
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
.migratepage = f2fs_migrate_page,
|
|
|
|
#endif
|
2012-11-02 15:08:18 +07:00
|
|
|
};
|
|
|
|
|
2014-07-26 05:47:17 +07:00
|
|
|
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
|
2014-07-26 05:47:16 +07:00
|
|
|
{
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[type];
|
2015-08-20 22:51:56 +07:00
|
|
|
struct ino_entry *e, *tmp;
|
|
|
|
|
|
|
|
tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
|
2014-07-25 08:15:17 +07:00
|
|
|
retry:
|
2015-08-20 22:51:56 +07:00
|
|
|
radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
|
2014-12-04 11:47:26 +07:00
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
spin_lock(&im->ino_lock);
|
|
|
|
e = radix_tree_lookup(&im->ino_root, ino);
|
2014-07-25 08:15:17 +07:00
|
|
|
if (!e) {
|
2015-08-20 22:51:56 +07:00
|
|
|
e = tmp;
|
2014-11-18 10:18:36 +07:00
|
|
|
if (radix_tree_insert(&im->ino_root, ino, e)) {
|
|
|
|
spin_unlock(&im->ino_lock);
|
2014-12-04 11:47:26 +07:00
|
|
|
radix_tree_preload_end();
|
2014-07-25 08:15:17 +07:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
memset(e, 0, sizeof(struct ino_entry));
|
|
|
|
e->ino = ino;
|
2014-07-26 05:47:16 +07:00
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
list_add_tail(&e->list, &im->ino_list);
|
2014-11-07 06:16:04 +07:00
|
|
|
if (type != ORPHAN_INO)
|
2014-11-18 10:18:36 +07:00
|
|
|
im->ino_num++;
|
2014-07-25 08:15:17 +07:00
|
|
|
}
|
2014-11-18 10:18:36 +07:00
|
|
|
spin_unlock(&im->ino_lock);
|
2014-12-04 11:47:26 +07:00
|
|
|
radix_tree_preload_end();
|
2015-08-20 22:51:56 +07:00
|
|
|
|
|
|
|
if (e != tmp)
|
|
|
|
kmem_cache_free(ino_entry_slab, tmp);
|
2014-07-26 05:47:16 +07:00
|
|
|
}
|
|
|
|
|
2014-07-26 05:47:17 +07:00
|
|
|
static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
|
2014-07-26 05:47:16 +07:00
|
|
|
{
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[type];
|
2014-07-26 05:47:17 +07:00
|
|
|
struct ino_entry *e;
|
2014-07-26 05:47:16 +07:00
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
spin_lock(&im->ino_lock);
|
|
|
|
e = radix_tree_lookup(&im->ino_root, ino);
|
2014-07-25 08:15:17 +07:00
|
|
|
if (e) {
|
|
|
|
list_del(&e->list);
|
2014-11-18 10:18:36 +07:00
|
|
|
radix_tree_delete(&im->ino_root, ino);
|
|
|
|
im->ino_num--;
|
|
|
|
spin_unlock(&im->ino_lock);
|
2014-07-25 08:15:17 +07:00
|
|
|
kmem_cache_free(ino_entry_slab, e);
|
|
|
|
return;
|
2014-07-26 05:47:16 +07:00
|
|
|
}
|
2014-11-18 10:18:36 +07:00
|
|
|
spin_unlock(&im->ino_lock);
|
2014-07-26 05:47:16 +07:00
|
|
|
}
|
|
|
|
|
2015-12-15 12:29:47 +07:00
|
|
|
void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
|
2014-07-25 21:40:59 +07:00
|
|
|
{
|
|
|
|
/* add new dirty ino entry into list */
|
|
|
|
__add_ino_entry(sbi, ino, type);
|
|
|
|
}
|
|
|
|
|
2015-12-15 12:29:47 +07:00
|
|
|
void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
|
2014-07-25 21:40:59 +07:00
|
|
|
{
|
|
|
|
/* remove dirty ino entry from list */
|
|
|
|
__remove_ino_entry(sbi, ino, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mode should be APPEND_INO or UPDATE_INO */
|
|
|
|
bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
|
|
|
|
{
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[mode];
|
2014-07-25 21:40:59 +07:00
|
|
|
struct ino_entry *e;
|
2014-11-18 10:18:36 +07:00
|
|
|
|
|
|
|
spin_lock(&im->ino_lock);
|
|
|
|
e = radix_tree_lookup(&im->ino_root, ino);
|
|
|
|
spin_unlock(&im->ino_lock);
|
2014-07-25 21:40:59 +07:00
|
|
|
return e ? true : false;
|
|
|
|
}
|
|
|
|
|
2016-05-03 12:09:56 +07:00
|
|
|
void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
|
2014-07-25 21:40:59 +07:00
|
|
|
{
|
|
|
|
struct ino_entry *e, *tmp;
|
|
|
|
int i;
|
|
|
|
|
2016-05-03 12:09:56 +07:00
|
|
|
for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[i];
|
|
|
|
|
|
|
|
spin_lock(&im->ino_lock);
|
|
|
|
list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
|
2014-07-25 21:40:59 +07:00
|
|
|
list_del(&e->list);
|
2014-11-18 10:18:36 +07:00
|
|
|
radix_tree_delete(&im->ino_root, e->ino);
|
2014-07-25 21:40:59 +07:00
|
|
|
kmem_cache_free(ino_entry_slab, e);
|
2014-11-18 10:18:36 +07:00
|
|
|
im->ino_num--;
|
2014-07-25 21:40:59 +07:00
|
|
|
}
|
2014-11-18 10:18:36 +07:00
|
|
|
spin_unlock(&im->ino_lock);
|
2014-07-25 21:40:59 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-30 09:36:53 +07:00
|
|
|
int acquire_orphan_inode(struct f2fs_sb_info *sbi)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[ORPHAN_INO];
|
2012-11-02 15:08:18 +07:00
|
|
|
int err = 0;
|
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
spin_lock(&im->ino_lock);
|
2016-04-30 06:29:22 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
2016-09-23 20:30:09 +07:00
|
|
|
if (time_to_inject(sbi, FAULT_ORPHAN)) {
|
2016-04-30 06:29:22 +07:00
|
|
|
spin_unlock(&im->ino_lock);
|
2017-02-25 10:08:28 +07:00
|
|
|
f2fs_show_injection_info(FAULT_ORPHAN);
|
2016-04-30 06:29:22 +07:00
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
#endif
|
2014-11-18 10:18:36 +07:00
|
|
|
if (unlikely(im->ino_num >= sbi->max_orphans))
|
2012-11-02 15:08:18 +07:00
|
|
|
err = -ENOSPC;
|
2013-07-30 09:36:53 +07:00
|
|
|
else
|
2014-11-18 10:18:36 +07:00
|
|
|
im->ino_num++;
|
|
|
|
spin_unlock(&im->ino_lock);
|
2013-12-26 17:24:19 +07:00
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-07-30 09:36:53 +07:00
|
|
|
void release_orphan_inode(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[ORPHAN_INO];
|
|
|
|
|
|
|
|
spin_lock(&im->ino_lock);
|
|
|
|
f2fs_bug_on(sbi, im->ino_num == 0);
|
|
|
|
im->ino_num--;
|
|
|
|
spin_unlock(&im->ino_lock);
|
2013-07-30 09:36:53 +07:00
|
|
|
}
|
|
|
|
|
2016-06-14 08:27:02 +07:00
|
|
|
void add_orphan_inode(struct inode *inode)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-07-25 08:15:17 +07:00
|
|
|
/* add new orphan ino entry into list */
|
2016-06-14 08:27:02 +07:00
|
|
|
__add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
|
|
|
|
update_inode_page(inode);
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
|
|
|
|
{
|
2014-07-26 05:47:16 +07:00
|
|
|
/* remove orphan entry from orphan list */
|
2014-07-26 05:47:17 +07:00
|
|
|
__remove_ino_entry(sbi, ino, ORPHAN_INO);
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2015-08-07 16:58:43 +07:00
|
|
|
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2015-08-07 16:58:43 +07:00
|
|
|
struct inode *inode;
|
2016-09-13 05:08:37 +07:00
|
|
|
struct node_info ni;
|
2016-09-22 01:39:42 +07:00
|
|
|
int err = acquire_orphan_inode(sbi);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
|
|
f2fs_msg(sbi->sb, KERN_WARNING,
|
|
|
|
"%s: orphan failed (ino=%x), run fsck to fix.",
|
|
|
|
__func__, ino);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
__add_ino_entry(sbi, ino, ORPHAN_INO);
|
2015-08-07 16:58:43 +07:00
|
|
|
|
2016-09-13 05:08:37 +07:00
|
|
|
inode = f2fs_iget_retry(sbi->sb, ino);
|
2015-08-07 16:58:43 +07:00
|
|
|
if (IS_ERR(inode)) {
|
|
|
|
/*
|
|
|
|
* there should be a bug that we can't find the entry
|
|
|
|
* to orphan inode.
|
|
|
|
*/
|
|
|
|
f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
|
|
|
|
return PTR_ERR(inode);
|
|
|
|
}
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
clear_nlink(inode);
|
|
|
|
|
|
|
|
/* truncate all the data during iput */
|
|
|
|
iput(inode);
|
2016-09-13 05:08:37 +07:00
|
|
|
|
|
|
|
get_node_info(sbi, ino, &ni);
|
|
|
|
|
|
|
|
/* ENOMEM was fully retried in f2fs_evict_inode. */
|
|
|
|
if (ni.blk_addr != NULL_ADDR) {
|
2016-09-22 01:39:42 +07:00
|
|
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
|
|
f2fs_msg(sbi->sb, KERN_WARNING,
|
|
|
|
"%s: orphan failed (ino=%x), run fsck to fix.",
|
|
|
|
__func__, ino);
|
|
|
|
return -EIO;
|
2016-09-13 05:08:37 +07:00
|
|
|
}
|
2016-09-22 01:39:42 +07:00
|
|
|
__remove_ino_entry(sbi, ino, ORPHAN_INO);
|
2015-08-07 16:58:43 +07:00
|
|
|
return 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2015-08-07 16:58:43 +07:00
|
|
|
int recover_orphan_inodes(struct f2fs_sb_info *sbi)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2015-02-26 06:57:21 +07:00
|
|
|
block_t start_blk, orphan_blocks, i, j;
|
2015-08-07 16:58:43 +07:00
|
|
|
int err;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-20 10:04:18 +07:00
|
|
|
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
|
2015-08-07 16:58:43 +07:00
|
|
|
return 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2015-02-26 06:57:20 +07:00
|
|
|
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
|
2015-02-26 06:57:21 +07:00
|
|
|
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2015-10-12 16:05:59 +07:00
|
|
|
ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
|
2014-02-07 15:11:53 +07:00
|
|
|
|
2015-02-26 06:57:21 +07:00
|
|
|
for (i = 0; i < orphan_blocks; i++) {
|
2012-11-02 15:08:18 +07:00
|
|
|
struct page *page = get_meta_page(sbi, start_blk + i);
|
|
|
|
struct f2fs_orphan_block *orphan_blk;
|
|
|
|
|
|
|
|
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
|
|
|
|
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
|
|
|
|
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
|
2015-08-07 16:58:43 +07:00
|
|
|
err = recover_orphan_inode(sbi, ino);
|
|
|
|
if (err) {
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
return err;
|
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
}
|
|
|
|
/* clear Orphan Flag */
|
2016-09-20 10:04:18 +07:00
|
|
|
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
|
2015-08-07 16:58:43 +07:00
|
|
|
return 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
|
|
|
|
{
|
2013-11-19 17:03:58 +07:00
|
|
|
struct list_head *head;
|
2012-11-02 15:08:18 +07:00
|
|
|
struct f2fs_orphan_block *orphan_blk = NULL;
|
|
|
|
unsigned int nentries = 0;
|
2015-07-13 16:44:25 +07:00
|
|
|
unsigned short index = 1;
|
2014-11-07 06:16:04 +07:00
|
|
|
unsigned short orphan_blocks;
|
2014-01-10 17:09:02 +07:00
|
|
|
struct page *page = NULL;
|
2014-07-26 05:47:17 +07:00
|
|
|
struct ino_entry *orphan = NULL;
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[ORPHAN_INO];
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
|
2014-11-07 06:16:04 +07:00
|
|
|
|
2015-05-02 01:08:59 +07:00
|
|
|
/*
|
|
|
|
* we don't need to do spin_lock(&im->ino_lock) here, since all the
|
|
|
|
* orphan inode operations are covered under f2fs_lock_op().
|
|
|
|
* And, spin_lock should be avoided due to page operations below.
|
|
|
|
*/
|
2014-11-18 10:18:36 +07:00
|
|
|
head = &im->ino_list;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
/* loop for each orphan inode entry and write them in Jornal block */
|
2013-11-19 17:03:58 +07:00
|
|
|
list_for_each_entry(orphan, head, list) {
|
|
|
|
if (!page) {
|
2015-07-13 16:44:25 +07:00
|
|
|
page = grab_meta_page(sbi, start_blk++);
|
2013-11-19 17:03:58 +07:00
|
|
|
orphan_blk =
|
|
|
|
(struct f2fs_orphan_block *)page_address(page);
|
|
|
|
memset(orphan_blk, 0, sizeof(*orphan_blk));
|
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2013-11-26 15:44:16 +07:00
|
|
|
orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2013-11-26 15:44:16 +07:00
|
|
|
if (nentries == F2FS_ORPHANS_PER_BLOCK) {
|
2012-11-02 15:08:18 +07:00
|
|
|
/*
|
|
|
|
* an orphan block is full of 1020 entries,
|
|
|
|
* then we need to flush current orphan blocks
|
|
|
|
* and bring another one in memory
|
|
|
|
*/
|
|
|
|
orphan_blk->blk_addr = cpu_to_le16(index);
|
|
|
|
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
|
|
|
|
orphan_blk->entry_count = cpu_to_le32(nentries);
|
|
|
|
set_page_dirty(page);
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
index++;
|
|
|
|
nentries = 0;
|
|
|
|
page = NULL;
|
|
|
|
}
|
2013-11-19 17:03:58 +07:00
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2013-11-19 17:03:58 +07:00
|
|
|
if (page) {
|
|
|
|
orphan_blk->blk_addr = cpu_to_le16(index);
|
|
|
|
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
|
|
|
|
orphan_blk->entry_count = cpu_to_le32(nentries);
|
|
|
|
set_page_dirty(page);
|
|
|
|
f2fs_put_page(page, 1);
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
|
|
|
|
struct f2fs_checkpoint **cp_block, struct page **cp_page,
|
|
|
|
unsigned long long *version)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
|
|
|
unsigned long blk_size = sbi->blocksize;
|
2016-09-30 07:24:53 +07:00
|
|
|
size_t crc_offset = 0;
|
2013-06-19 18:47:19 +07:00
|
|
|
__u32 crc = 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
*cp_page = get_meta_page(sbi, cp_addr);
|
|
|
|
*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
|
|
|
|
if (crc_offset >= blk_size) {
|
|
|
|
f2fs_msg(sbi->sb, KERN_WARNING,
|
|
|
|
"invalid crc_offset: %zu", crc_offset);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2017-02-25 18:53:39 +07:00
|
|
|
crc = cur_cp_crc(*cp_block);
|
2016-09-30 07:24:53 +07:00
|
|
|
if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
|
|
|
|
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
*version = cur_cp_version(*cp_block);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
|
|
|
|
block_t cp_addr, unsigned long long *version)
|
|
|
|
{
|
|
|
|
struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
|
|
|
|
struct f2fs_checkpoint *cp_block = NULL;
|
|
|
|
unsigned long long cur_version = 0, pre_version = 0;
|
|
|
|
int err;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
|
|
|
|
&cp_page_1, version);
|
|
|
|
if (err)
|
|
|
|
goto invalid_cp1;
|
|
|
|
pre_version = *version;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-09-30 07:24:53 +07:00
|
|
|
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
|
|
|
|
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
|
|
|
|
&cp_page_2, version);
|
|
|
|
if (err)
|
2012-11-02 15:08:18 +07:00
|
|
|
goto invalid_cp2;
|
2016-09-30 07:24:53 +07:00
|
|
|
cur_version = *version;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
if (cur_version == pre_version) {
|
|
|
|
*version = cur_version;
|
|
|
|
f2fs_put_page(cp_page_2, 1);
|
|
|
|
return cp_page_1;
|
|
|
|
}
|
|
|
|
invalid_cp2:
|
|
|
|
f2fs_put_page(cp_page_2, 1);
|
|
|
|
invalid_cp1:
|
|
|
|
f2fs_put_page(cp_page_1, 1);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int get_valid_checkpoint(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
struct f2fs_checkpoint *cp_block;
|
|
|
|
struct f2fs_super_block *fsb = sbi->raw_super;
|
|
|
|
struct page *cp1, *cp2, *cur_page;
|
|
|
|
unsigned long blk_size = sbi->blocksize;
|
|
|
|
unsigned long long cp1_version = 0, cp2_version = 0;
|
|
|
|
unsigned long long cp_start_blk_no;
|
2015-02-26 06:57:20 +07:00
|
|
|
unsigned int cp_blks = 1 + __cp_payload(sbi);
|
2014-05-12 10:27:43 +07:00
|
|
|
block_t cp_blk_no;
|
|
|
|
int i;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-05-12 10:27:43 +07:00
|
|
|
sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
|
2012-11-02 15:08:18 +07:00
|
|
|
if (!sbi->ckpt)
|
|
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
|
|
* Finding out valid cp block involves read both
|
|
|
|
* sets( cp pack1 and cp pack 2)
|
|
|
|
*/
|
|
|
|
cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
|
|
|
|
cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
|
|
|
|
|
|
|
|
/* The second checkpoint pack should start at the next segment */
|
2013-11-28 10:44:05 +07:00
|
|
|
cp_start_blk_no += ((unsigned long long)1) <<
|
|
|
|
le32_to_cpu(fsb->log_blocks_per_seg);
|
2012-11-02 15:08:18 +07:00
|
|
|
cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
|
|
|
|
|
|
|
|
if (cp1 && cp2) {
|
|
|
|
if (ver_after(cp2_version, cp1_version))
|
|
|
|
cur_page = cp2;
|
|
|
|
else
|
|
|
|
cur_page = cp1;
|
|
|
|
} else if (cp1) {
|
|
|
|
cur_page = cp1;
|
|
|
|
} else if (cp2) {
|
|
|
|
cur_page = cp2;
|
|
|
|
} else {
|
|
|
|
goto fail_no_cp;
|
|
|
|
}
|
|
|
|
|
|
|
|
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
|
|
|
|
memcpy(sbi->ckpt, cp_block, blk_size);
|
|
|
|
|
2016-02-17 10:26:32 +07:00
|
|
|
/* Sanity checking of checkpoint */
|
|
|
|
if (sanity_check_ckpt(sbi))
|
2016-12-06 08:25:32 +07:00
|
|
|
goto free_fail_no_cp;
|
2016-02-17 10:26:32 +07:00
|
|
|
|
2016-11-25 03:45:15 +07:00
|
|
|
if (cur_page == cp1)
|
|
|
|
sbi->cur_cp_pack = 1;
|
|
|
|
else
|
|
|
|
sbi->cur_cp_pack = 2;
|
2016-02-17 10:26:32 +07:00
|
|
|
|
2014-05-12 10:27:43 +07:00
|
|
|
if (cp_blks <= 1)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
|
|
|
|
if (cur_page == cp2)
|
|
|
|
cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
|
|
|
|
|
|
|
|
for (i = 1; i < cp_blks; i++) {
|
|
|
|
void *sit_bitmap_ptr;
|
|
|
|
unsigned char *ckpt = (unsigned char *)sbi->ckpt;
|
|
|
|
|
|
|
|
cur_page = get_meta_page(sbi, cp_blk_no + i);
|
|
|
|
sit_bitmap_ptr = page_address(cur_page);
|
|
|
|
memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
|
|
|
|
f2fs_put_page(cur_page, 1);
|
|
|
|
}
|
|
|
|
done:
|
2012-11-02 15:08:18 +07:00
|
|
|
f2fs_put_page(cp1, 1);
|
|
|
|
f2fs_put_page(cp2, 1);
|
|
|
|
return 0;
|
|
|
|
|
2016-12-06 08:25:32 +07:00
|
|
|
free_fail_no_cp:
|
|
|
|
f2fs_put_page(cp1, 1);
|
|
|
|
f2fs_put_page(cp2, 1);
|
2012-11-02 15:08:18 +07:00
|
|
|
fail_no_cp:
|
|
|
|
kfree(sbi->ckpt);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
static void __add_dirty_inode(struct inode *inode, enum inode_type type)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-09-03 05:31:18 +07:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2015-12-16 12:09:20 +07:00
|
|
|
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-05-21 00:13:22 +07:00
|
|
|
if (is_inode_flag_set(inode, flag))
|
2015-12-15 12:30:45 +07:00
|
|
|
return;
|
2014-03-29 10:33:17 +07:00
|
|
|
|
2016-05-21 00:13:22 +07:00
|
|
|
set_inode_flag(inode, flag);
|
|
|
|
list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
|
2015-12-17 16:14:44 +07:00
|
|
|
stat_inc_dirty_inode(sbi, type);
|
2013-06-05 15:42:45 +07:00
|
|
|
}
|
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
|
2015-12-15 12:31:40 +07:00
|
|
|
{
|
2015-12-16 12:09:20 +07:00
|
|
|
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
|
2015-12-15 12:31:40 +07:00
|
|
|
|
2016-05-21 00:13:22 +07:00
|
|
|
if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
|
2015-12-15 12:31:40 +07:00
|
|
|
return;
|
|
|
|
|
2016-05-21 00:13:22 +07:00
|
|
|
list_del_init(&F2FS_I(inode)->dirty_list);
|
|
|
|
clear_inode_flag(inode, flag);
|
2015-12-17 16:14:44 +07:00
|
|
|
stat_dec_dirty_inode(F2FS_I_SB(inode), type);
|
2015-12-15 12:31:40 +07:00
|
|
|
}
|
|
|
|
|
2014-09-13 05:53:45 +07:00
|
|
|
void update_dirty_page(struct inode *inode, struct page *page)
|
2013-06-05 15:42:45 +07:00
|
|
|
{
|
2014-09-03 05:31:18 +07:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2015-12-16 12:09:20 +07:00
|
|
|
enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
|
2013-06-05 15:42:45 +07:00
|
|
|
|
2015-06-29 17:14:10 +07:00
|
|
|
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
|
|
|
|
!S_ISLNK(inode->i_mode))
|
2012-11-02 15:08:18 +07:00
|
|
|
return;
|
2013-10-22 13:52:26 +07:00
|
|
|
|
2016-06-02 10:55:51 +07:00
|
|
|
spin_lock(&sbi->inode_lock[type]);
|
|
|
|
if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
|
2016-05-17 00:33:40 +07:00
|
|
|
__add_dirty_inode(inode, type);
|
2016-05-13 13:57:43 +07:00
|
|
|
inode_inc_dirty_pages(inode);
|
2016-06-02 10:55:51 +07:00
|
|
|
spin_unlock(&sbi->inode_lock[type]);
|
|
|
|
|
2014-09-13 05:53:45 +07:00
|
|
|
SetPagePrivate(page);
|
2014-12-18 10:58:58 +07:00
|
|
|
f2fs_trace_pid(page);
|
2013-06-05 15:42:45 +07:00
|
|
|
}
|
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
void remove_dirty_inode(struct inode *inode)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-09-03 05:31:18 +07:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2015-12-16 12:09:20 +07:00
|
|
|
enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
|
|
|
|
!S_ISLNK(inode->i_mode))
|
2012-11-02 15:08:18 +07:00
|
|
|
return;
|
|
|
|
|
2016-05-17 00:33:40 +07:00
|
|
|
if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
|
|
|
|
return;
|
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
spin_lock(&sbi->inode_lock[type]);
|
|
|
|
__remove_dirty_inode(inode, type);
|
|
|
|
spin_unlock(&sbi->inode_lock[type]);
|
2013-05-15 14:40:02 +07:00
|
|
|
}
|
|
|
|
|
2015-12-24 17:04:56 +07:00
|
|
|
int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2013-11-19 17:03:47 +07:00
|
|
|
struct list_head *head;
|
2012-11-02 15:08:18 +07:00
|
|
|
struct inode *inode;
|
2015-12-15 12:30:45 +07:00
|
|
|
struct f2fs_inode_info *fi;
|
2015-12-17 16:17:16 +07:00
|
|
|
bool is_dir = (type == DIR_INODE);
|
|
|
|
|
|
|
|
trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
|
|
|
|
get_pages(sbi, is_dir ?
|
|
|
|
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
|
2012-11-02 15:08:18 +07:00
|
|
|
retry:
|
2014-10-18 04:14:16 +07:00
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2015-12-24 17:04:56 +07:00
|
|
|
return -EIO;
|
2014-10-18 04:14:16 +07:00
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
spin_lock(&sbi->inode_lock[type]);
|
2013-11-19 17:03:47 +07:00
|
|
|
|
2015-12-16 12:09:20 +07:00
|
|
|
head = &sbi->inode_list[type];
|
2012-11-02 15:08:18 +07:00
|
|
|
if (list_empty(head)) {
|
2015-12-16 12:09:20 +07:00
|
|
|
spin_unlock(&sbi->inode_lock[type]);
|
2015-12-17 16:17:16 +07:00
|
|
|
trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
|
|
|
|
get_pages(sbi, is_dir ?
|
|
|
|
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
|
2015-12-24 17:04:56 +07:00
|
|
|
return 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
2017-01-07 17:49:42 +07:00
|
|
|
fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
|
2015-12-15 12:30:45 +07:00
|
|
|
inode = igrab(&fi->vfs_inode);
|
2015-12-16 12:09:20 +07:00
|
|
|
spin_unlock(&sbi->inode_lock[type]);
|
2012-11-02 15:08:18 +07:00
|
|
|
if (inode) {
|
2014-03-18 10:40:49 +07:00
|
|
|
filemap_fdatawrite(inode->i_mapping);
|
2012-11-02 15:08:18 +07:00
|
|
|
iput(inode);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We should submit bio, since it exists several
|
|
|
|
* wribacking dentry pages in the freeing inode.
|
|
|
|
*/
|
2013-12-11 11:54:01 +07:00
|
|
|
f2fs_submit_merged_bio(sbi, DATA, WRITE);
|
2015-02-27 19:13:14 +07:00
|
|
|
cond_resched();
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2016-05-21 01:10:10 +07:00
|
|
|
int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
struct list_head *head = &sbi->inode_list[DIRTY_META];
|
|
|
|
struct inode *inode;
|
|
|
|
struct f2fs_inode_info *fi;
|
|
|
|
s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
|
|
|
|
|
|
|
|
while (total--) {
|
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
spin_lock(&sbi->inode_lock[DIRTY_META]);
|
|
|
|
if (list_empty(head)) {
|
|
|
|
spin_unlock(&sbi->inode_lock[DIRTY_META]);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-01-07 17:49:42 +07:00
|
|
|
fi = list_first_entry(head, struct f2fs_inode_info,
|
2016-05-21 01:10:10 +07:00
|
|
|
gdirty_list);
|
|
|
|
inode = igrab(&fi->vfs_inode);
|
|
|
|
spin_unlock(&sbi->inode_lock[DIRTY_META]);
|
|
|
|
if (inode) {
|
2016-10-20 08:27:56 +07:00
|
|
|
sync_inode_metadata(inode, 0);
|
|
|
|
|
|
|
|
/* it's on eviction */
|
|
|
|
if (is_inode_flag_set(inode, FI_DIRTY_INODE))
|
|
|
|
update_inode_page(inode);
|
2016-05-21 01:10:10 +07:00
|
|
|
iput(inode);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-11-29 11:28:09 +07:00
|
|
|
/*
|
2012-11-02 15:08:18 +07:00
|
|
|
* Freeze all the FS-operations for checkpoint.
|
|
|
|
*/
|
2014-08-12 08:37:46 +07:00
|
|
|
static int block_operations(struct f2fs_sb_info *sbi)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
|
|
|
struct writeback_control wbc = {
|
|
|
|
.sync_mode = WB_SYNC_ALL,
|
|
|
|
.nr_to_write = LONG_MAX,
|
|
|
|
.for_reclaim = 0,
|
|
|
|
};
|
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 11:19:56 +07:00
|
|
|
struct blk_plug plug;
|
2014-08-12 08:37:46 +07:00
|
|
|
int err = 0;
|
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 11:19:56 +07:00
|
|
|
|
|
|
|
blk_start_plug(&plug);
|
|
|
|
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 14:21:29 +07:00
|
|
|
retry_flush_dents:
|
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint.
And each other operate routine(besides checkpoint) needs to acquire a fs_lock,
there is a terrible problem here, if these are too many concurrency threads acquiring
fs_lock, so that they will block each other and may lead to some performance problem,
but this is not the phenomenon we want to see.
Though there are some optimization patches introduced to enhance the usage of fs_lock,
but the thorough solution is using a *rw_sem* to replace the fs_lock.
Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block
other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other,
this can avoid the problem described above completely.
Because of the weakness of rw_sem, the above change may introduce a potential problem
that the checkpoint thread might get starved if other threads are intensively locking
the read semaphore for I/O.(Pointed out by Xu Jin)
In order to avoid this, a wait_list is introduced, the appending read semaphore ops
will be dropped into the wait_list if checkpoint thread is waiting for write semaphore,
and will be waked up when checkpoint thread gives up write semaphore.
Thanks to Kim's previous review and test, and will be very glad to see other guys'
performance tests about this patch.
V2:
-fix the potential starvation problem.
-use more suitable func name suggested by Xu Jin.
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
[Jaegeuk Kim: adjust minor coding standard]
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-09-27 17:08:30 +07:00
|
|
|
f2fs_lock_all(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
/* write all the dirty dentry pages */
|
|
|
|
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
|
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint.
And each other operate routine(besides checkpoint) needs to acquire a fs_lock,
there is a terrible problem here, if these are too many concurrency threads acquiring
fs_lock, so that they will block each other and may lead to some performance problem,
but this is not the phenomenon we want to see.
Though there are some optimization patches introduced to enhance the usage of fs_lock,
but the thorough solution is using a *rw_sem* to replace the fs_lock.
Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block
other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other,
this can avoid the problem described above completely.
Because of the weakness of rw_sem, the above change may introduce a potential problem
that the checkpoint thread might get starved if other threads are intensively locking
the read semaphore for I/O.(Pointed out by Xu Jin)
In order to avoid this, a wait_list is introduced, the appending read semaphore ops
will be dropped into the wait_list if checkpoint thread is waiting for write semaphore,
and will be waked up when checkpoint thread gives up write semaphore.
Thanks to Kim's previous review and test, and will be very glad to see other guys'
performance tests about this patch.
V2:
-fix the potential starvation problem.
-use more suitable func name suggested by Xu Jin.
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
[Jaegeuk Kim: adjust minor coding standard]
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-09-27 17:08:30 +07:00
|
|
|
f2fs_unlock_all(sbi);
|
2015-12-24 17:04:56 +07:00
|
|
|
err = sync_dirty_inodes(sbi, DIR_INODE);
|
|
|
|
if (err)
|
2014-08-12 08:37:46 +07:00
|
|
|
goto out;
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 14:21:29 +07:00
|
|
|
goto retry_flush_dents;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2016-05-21 01:10:10 +07:00
|
|
|
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
|
|
|
|
f2fs_unlock_all(sbi);
|
|
|
|
err = f2fs_sync_inode_meta(sbi);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
goto retry_flush_dents;
|
|
|
|
}
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
/*
|
2014-08-06 21:22:50 +07:00
|
|
|
* POR: we should ensure that there are no dirty node pages
|
2012-11-02 15:08:18 +07:00
|
|
|
* until finishing nat/sit flush.
|
|
|
|
*/
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 14:21:29 +07:00
|
|
|
retry_flush_nodes:
|
2014-07-03 17:58:39 +07:00
|
|
|
down_write(&sbi->node_write);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
|
2014-07-03 17:58:39 +07:00
|
|
|
up_write(&sbi->node_write);
|
2016-04-14 06:24:44 +07:00
|
|
|
err = sync_node_pages(sbi, &wbc);
|
2015-12-24 17:04:56 +07:00
|
|
|
if (err) {
|
2014-08-12 08:37:46 +07:00
|
|
|
f2fs_unlock_all(sbi);
|
|
|
|
goto out;
|
|
|
|
}
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 14:21:29 +07:00
|
|
|
goto retry_flush_nodes;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
2014-08-12 08:37:46 +07:00
|
|
|
out:
|
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 11:19:56 +07:00
|
|
|
blk_finish_plug(&plug);
|
2014-08-12 08:37:46 +07:00
|
|
|
return err;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void unblock_operations(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
2014-07-03 17:58:39 +07:00
|
|
|
up_write(&sbi->node_write);
|
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint.
And each other operate routine(besides checkpoint) needs to acquire a fs_lock,
there is a terrible problem here, if these are too many concurrency threads acquiring
fs_lock, so that they will block each other and may lead to some performance problem,
but this is not the phenomenon we want to see.
Though there are some optimization patches introduced to enhance the usage of fs_lock,
but the thorough solution is using a *rw_sem* to replace the fs_lock.
Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block
other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other,
this can avoid the problem described above completely.
Because of the weakness of rw_sem, the above change may introduce a potential problem
that the checkpoint thread might get starved if other threads are intensively locking
the read semaphore for I/O.(Pointed out by Xu Jin)
In order to avoid this, a wait_list is introduced, the appending read semaphore ops
will be dropped into the wait_list if checkpoint thread is waiting for write semaphore,
and will be waked up when checkpoint thread gives up write semaphore.
Thanks to Kim's previous review and test, and will be very glad to see other guys'
performance tests about this patch.
V2:
-fix the potential starvation problem.
-use more suitable func name suggested by Xu Jin.
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
[Jaegeuk Kim: adjust minor coding standard]
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-09-27 17:08:30 +07:00
|
|
|
f2fs_unlock_all(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2013-11-07 10:48:25 +07:00
|
|
|
static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
|
|
|
|
|
f2fs: don't wait writeback for datas during checkpoint
Normally, while committing checkpoint, we will wait on all pages to be
writebacked no matter the page is data or metadata, so in scenario where
there are lots of data IO being submitted with metadata, we may suffer
long latency for waiting writeback during checkpoint.
Indeed, we only care about persistence for pages with metadata, but not
pages with data, as file system consistent are only related to metadate,
so in order to avoid encountering long latency in above scenario, let's
recognize and reference metadata in submitted IOs, wait writeback only
for metadatas.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-11-16 09:41:20 +07:00
|
|
|
if (!get_pages(sbi, F2FS_WB_CP_DATA))
|
2013-11-07 10:48:25 +07:00
|
|
|
break;
|
|
|
|
|
2016-02-23 11:07:56 +07:00
|
|
|
io_schedule_timeout(5*HZ);
|
2013-11-07 10:48:25 +07:00
|
|
|
}
|
|
|
|
finish_wait(&sbi->cp_wait, &wait);
|
|
|
|
}
|
|
|
|
|
2016-10-01 07:37:43 +07:00
|
|
|
static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
|
{
|
|
|
|
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
|
|
|
|
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
|
|
|
|
|
|
|
spin_lock(&sbi->cp_lock);
|
|
|
|
|
2017-02-10 01:38:09 +07:00
|
|
|
if (cpc->reason == CP_UMOUNT && ckpt->cp_pack_total_block_count >
|
|
|
|
sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
|
|
|
|
disable_nat_bits(sbi, false);
|
|
|
|
|
2016-10-01 07:37:43 +07:00
|
|
|
if (cpc->reason == CP_UMOUNT)
|
|
|
|
__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
|
|
|
|
else
|
|
|
|
__clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
|
|
|
|
|
|
|
|
if (cpc->reason == CP_FASTBOOT)
|
|
|
|
__set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
|
|
|
|
else
|
|
|
|
__clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
|
|
|
|
|
|
|
|
if (orphan_num)
|
|
|
|
__set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
|
|
|
|
else
|
|
|
|
__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
|
|
|
|
|
|
|
|
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
|
|
|
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
|
|
|
|
|
|
|
|
/* set this flag to activate crc|cp_ver for recovery */
|
|
|
|
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
|
|
|
|
|
|
|
|
spin_unlock(&sbi->cp_lock);
|
|
|
|
}
|
|
|
|
|
2015-12-23 16:50:30 +07:00
|
|
|
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
|
|
|
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
2014-09-12 19:19:48 +07:00
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
2014-11-18 10:18:36 +07:00
|
|
|
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
|
2014-09-12 19:19:48 +07:00
|
|
|
nid_t last_nid = nm_i->next_scan_nid;
|
2012-11-02 15:08:18 +07:00
|
|
|
block_t start_blk;
|
|
|
|
unsigned int data_sum_blocks, orphan_blocks;
|
2013-06-19 18:47:19 +07:00
|
|
|
__u32 crc32 = 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
int i;
|
2015-02-26 06:57:20 +07:00
|
|
|
int cp_payload_blks = __cp_payload(sbi);
|
2016-01-27 08:57:30 +07:00
|
|
|
struct super_block *sb = sbi->sb;
|
|
|
|
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
|
|
|
|
u64 kbytes_written;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
/* Flush all the NAT/SIT pages */
|
2014-08-12 08:37:46 +07:00
|
|
|
while (get_pages(sbi, F2FS_DIRTY_META)) {
|
2012-11-02 15:08:18 +07:00
|
|
|
sync_meta_pages(sbi, META, LONG_MAX);
|
2014-08-12 08:37:46 +07:00
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2015-12-23 16:50:30 +07:00
|
|
|
return -EIO;
|
2014-08-12 08:37:46 +07:00
|
|
|
}
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
next_free_nid(sbi, &last_nid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* modify checkpoint
|
|
|
|
* version number is already updated
|
|
|
|
*/
|
|
|
|
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
|
|
|
|
ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
|
|
|
|
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
|
2014-08-22 15:17:38 +07:00
|
|
|
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
|
2012-11-02 15:08:18 +07:00
|
|
|
ckpt->cur_node_segno[i] =
|
|
|
|
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
|
|
|
|
ckpt->cur_node_blkoff[i] =
|
|
|
|
cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
|
|
|
|
ckpt->alloc_type[i + CURSEG_HOT_NODE] =
|
|
|
|
curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
|
|
|
|
}
|
2014-08-22 15:17:38 +07:00
|
|
|
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
2012-11-02 15:08:18 +07:00
|
|
|
ckpt->cur_data_segno[i] =
|
|
|
|
cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
|
|
|
|
ckpt->cur_data_blkoff[i] =
|
|
|
|
cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
|
|
|
|
ckpt->alloc_type[i + CURSEG_HOT_DATA] =
|
|
|
|
curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
|
|
|
|
ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
|
|
|
|
ckpt->next_free_nid = cpu_to_le32(last_nid);
|
|
|
|
|
|
|
|
/* 2 cp + n data seg summary + orphan inode blocks */
|
2014-12-09 13:21:46 +07:00
|
|
|
data_sum_blocks = npages_for_summary_flush(sbi, false);
|
2016-09-20 10:04:18 +07:00
|
|
|
spin_lock(&sbi->cp_lock);
|
2014-08-22 15:17:38 +07:00
|
|
|
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
|
2016-09-20 10:04:18 +07:00
|
|
|
__set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
|
2012-11-02 15:08:18 +07:00
|
|
|
else
|
2016-09-20 10:04:18 +07:00
|
|
|
__clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
|
|
|
|
spin_unlock(&sbi->cp_lock);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
|
2014-05-12 10:27:43 +07:00
|
|
|
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
|
|
|
|
orphan_blocks);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2015-01-30 02:45:33 +07:00
|
|
|
if (__remain_node_summaries(cpc->reason))
|
2014-08-22 15:17:38 +07:00
|
|
|
ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
|
2014-05-12 10:27:43 +07:00
|
|
|
cp_payload_blks + data_sum_blocks +
|
|
|
|
orphan_blocks + NR_CURSEG_NODE_TYPE);
|
2015-01-30 02:45:33 +07:00
|
|
|
else
|
2014-08-22 15:17:38 +07:00
|
|
|
ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
|
2014-05-12 10:27:43 +07:00
|
|
|
cp_payload_blks + data_sum_blocks +
|
|
|
|
orphan_blocks);
|
2015-01-30 02:45:33 +07:00
|
|
|
|
2016-10-01 07:37:43 +07:00
|
|
|
/* update ckpt flag for checkpoint */
|
|
|
|
update_ckpt_flags(sbi, cpc);
|
2016-09-20 07:55:10 +07:00
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
/* update SIT/NAT bitmap */
|
|
|
|
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
|
|
|
|
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
|
|
|
|
|
2016-03-03 03:04:24 +07:00
|
|
|
crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
|
2013-06-19 18:47:19 +07:00
|
|
|
*((__le32 *)((unsigned char *)ckpt +
|
|
|
|
le32_to_cpu(ckpt->checksum_offset)))
|
2012-11-02 15:08:18 +07:00
|
|
|
= cpu_to_le32(crc32);
|
|
|
|
|
2016-11-25 03:45:15 +07:00
|
|
|
start_blk = __start_cp_next_addr(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2017-02-10 01:38:09 +07:00
|
|
|
/* write nat bits */
|
|
|
|
if (enabled_nat_bits(sbi, cpc)) {
|
|
|
|
__u64 cp_ver = cur_cp_version(ckpt);
|
|
|
|
unsigned int i;
|
|
|
|
block_t blk;
|
|
|
|
|
|
|
|
cp_ver |= ((__u64)crc32 << 32);
|
|
|
|
*(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
|
|
|
|
|
|
|
|
blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
|
|
|
|
for (i = 0; i < nm_i->nat_bits_blocks; i++)
|
|
|
|
update_meta_page(sbi, nm_i->nat_bits +
|
|
|
|
(i << F2FS_BLKSIZE_BITS), blk + i);
|
|
|
|
|
|
|
|
/* Flush all the NAT BITS pages */
|
|
|
|
while (get_pages(sbi, F2FS_DIRTY_META)) {
|
|
|
|
sync_meta_pages(sbi, META, LONG_MAX);
|
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-17 04:06:54 +07:00
|
|
|
/* need to wait for end_io results */
|
|
|
|
wait_on_all_pages_writeback(sbi);
|
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2015-12-23 16:50:30 +07:00
|
|
|
return -EIO;
|
2015-09-17 04:06:54 +07:00
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
/* write out checkpoint buffer at block 0 */
|
2015-05-19 16:40:04 +07:00
|
|
|
update_meta_page(sbi, ckpt, start_blk++);
|
|
|
|
|
|
|
|
for (i = 1; i < 1 + cp_payload_blks; i++)
|
|
|
|
update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
|
|
|
|
start_blk++);
|
2014-05-12 10:27:43 +07:00
|
|
|
|
2014-11-18 10:18:36 +07:00
|
|
|
if (orphan_num) {
|
2012-11-02 15:08:18 +07:00
|
|
|
write_orphan_inodes(sbi, start_blk);
|
|
|
|
start_blk += orphan_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
write_data_summaries(sbi, start_blk);
|
|
|
|
start_blk += data_sum_blocks;
|
2016-01-27 08:57:30 +07:00
|
|
|
|
|
|
|
/* Record write statistics in the hot node summary */
|
|
|
|
kbytes_written = sbi->kbytes_written;
|
|
|
|
if (sb->s_bdev->bd_part)
|
|
|
|
kbytes_written += BD_PART_WRITTEN(sbi);
|
|
|
|
|
f2fs: split journal cache from curseg cache
In curseg cache, f2fs caches two different parts:
- datas of current summay block, i.e. summary entries, footer info.
- journal info, i.e. sparse nat/sit entries or io stat info.
With this approach, 1) it may cause higher lock contention when we access
or update both of the parts of cache since we use the same mutex lock
curseg_mutex to protect the cache. 2) current summary block with last
journal info will be writebacked into device as a normal summary block
when flushing, however, we treat journal info as valid one only in current
summary, so most normal summary blocks contain junk journal data, it wastes
remaining space of summary block.
So, in order to fix above issues, we split curseg cache into two parts:
a) current summary block, protected by original mutex lock curseg_mutex
b) journal cache, protected by newly introduced r/w semaphore journal_rwsem
When loading curseg cache during ->mount, we store summary info and
journal info into different caches; When doing checkpoint, we combine
datas of two cache into current summary block for persisting.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-19 17:08:46 +07:00
|
|
|
seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
|
2016-01-27 08:57:30 +07:00
|
|
|
|
2015-01-30 02:45:33 +07:00
|
|
|
if (__remain_node_summaries(cpc->reason)) {
|
2012-11-02 15:08:18 +07:00
|
|
|
write_node_summaries(sbi, start_blk);
|
|
|
|
start_blk += NR_CURSEG_NODE_TYPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* writeout checkpoint block */
|
2015-05-19 16:40:04 +07:00
|
|
|
update_meta_page(sbi, ckpt, start_blk);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
/* wait for previous submitted node/meta pages writeback */
|
2013-11-07 10:48:25 +07:00
|
|
|
wait_on_all_pages_writeback(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-08-12 08:37:46 +07:00
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2015-12-23 16:50:30 +07:00
|
|
|
return -EIO;
|
2014-08-12 08:37:46 +07:00
|
|
|
|
f2fs: fix incorrect upper bound when iterating inode mapping tree
1. Inode mapping tree can index page in range of [0, ULONG_MAX], however,
in some places, f2fs only search or iterate page in ragne of [0, LONG_MAX],
result in miss hitting in page cache.
2. filemap_fdatawait_range accepts range parameters in unit of bytes, so
the max range it covers should be [0, LLONG_MAX], if we use [0, LONG_MAX]
as range for waiting on writeback, big number of pages will not be covered.
This patch corrects above two issues.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-24 16:20:44 +07:00
|
|
|
filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
|
|
|
|
filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
/* update user_block_counts */
|
|
|
|
sbi->last_valid_block_count = sbi->total_valid_block_count;
|
2016-05-17 01:06:50 +07:00
|
|
|
percpu_counter_set(&sbi->alloc_valid_block_count, 0);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
/* Here, we only have one bio having CP pack */
|
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 17:56:11 +07:00
|
|
|
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-10-30 04:37:22 +07:00
|
|
|
/* wait for previous submitted meta pages writeback */
|
|
|
|
wait_on_all_pages_writeback(sbi);
|
|
|
|
|
2016-05-03 12:09:56 +07:00
|
|
|
release_ino_entry(sbi, false);
|
2014-08-12 08:37:46 +07:00
|
|
|
|
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
2015-12-23 16:50:30 +07:00
|
|
|
return -EIO;
|
2014-08-12 08:37:46 +07:00
|
|
|
|
2015-01-28 16:48:42 +07:00
|
|
|
clear_sbi_flag(sbi, SBI_IS_DIRTY);
|
2016-08-30 08:23:45 +07:00
|
|
|
clear_sbi_flag(sbi, SBI_NEED_CP);
|
2016-11-25 03:45:15 +07:00
|
|
|
__set_cp_next_pack(sbi);
|
2015-12-23 16:50:30 +07:00
|
|
|
|
2016-08-31 09:43:19 +07:00
|
|
|
/*
|
|
|
|
* redirty superblock if metadata like node page or inode cache is
|
|
|
|
* updated during writing checkpoint.
|
|
|
|
*/
|
|
|
|
if (get_pages(sbi, F2FS_DIRTY_NODES) ||
|
|
|
|
get_pages(sbi, F2FS_DIRTY_IMETA))
|
|
|
|
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
|
|
|
|
|
|
|
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
|
|
|
|
|
2015-12-23 16:50:30 +07:00
|
|
|
return 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2012-11-29 11:28:09 +07:00
|
|
|
/*
|
2014-08-06 21:22:50 +07:00
|
|
|
* We guarantee that this checkpoint procedure will not fail.
|
2012-11-02 15:08:18 +07:00
|
|
|
*/
|
2015-12-23 16:50:30 +07:00
|
|
|
int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
|
|
|
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
|
|
|
unsigned long long ckpt_ver;
|
2015-12-23 16:50:30 +07:00
|
|
|
int err = 0;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2013-02-04 13:11:17 +07:00
|
|
|
mutex_lock(&sbi->cp_mutex);
|
2014-08-12 08:37:46 +07:00
|
|
|
|
2015-01-28 16:48:42 +07:00
|
|
|
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
|
2015-05-01 12:37:50 +07:00
|
|
|
(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
|
|
|
|
(cpc->reason == CP_DISCARD && !sbi->discard_blks)))
|
2014-08-12 08:37:46 +07:00
|
|
|
goto out;
|
2015-12-23 16:50:30 +07:00
|
|
|
if (unlikely(f2fs_cp_error(sbi))) {
|
|
|
|
err = -EIO;
|
2014-08-12 08:37:46 +07:00
|
|
|
goto out;
|
2015-12-23 16:50:30 +07:00
|
|
|
}
|
|
|
|
if (f2fs_readonly(sbi->sb)) {
|
|
|
|
err = -EROFS;
|
2015-01-24 09:43:45 +07:00
|
|
|
goto out;
|
2015-12-23 16:50:30 +07:00
|
|
|
}
|
2015-02-27 14:56:16 +07:00
|
|
|
|
|
|
|
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
|
|
|
|
|
2015-12-23 16:50:30 +07:00
|
|
|
err = block_operations(sbi);
|
|
|
|
if (err)
|
2014-08-12 08:37:46 +07:00
|
|
|
goto out;
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2014-09-21 11:57:51 +07:00
|
|
|
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
|
2013-04-23 16:26:54 +07:00
|
|
|
|
2016-02-24 16:17:55 +07:00
|
|
|
f2fs_flush_merged_bios(sbi);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
2016-08-18 20:01:19 +07:00
|
|
|
/* this is the case of multiple fstrims without any changes */
|
2016-12-30 07:58:54 +07:00
|
|
|
if (cpc->reason == CP_DISCARD) {
|
2016-12-30 13:06:15 +07:00
|
|
|
if (!exist_trim_candidates(sbi, cpc)) {
|
|
|
|
unblock_operations(sbi);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-12-30 07:58:54 +07:00
|
|
|
if (NM_I(sbi)->dirty_nat_cnt == 0 &&
|
|
|
|
SIT_I(sbi)->dirty_sentries == 0 &&
|
|
|
|
prefree_segments(sbi) == 0) {
|
|
|
|
flush_sit_entries(sbi, cpc);
|
|
|
|
clear_prefree_segments(sbi, cpc);
|
|
|
|
unblock_operations(sbi);
|
|
|
|
goto out;
|
|
|
|
}
|
2016-08-18 20:01:19 +07:00
|
|
|
}
|
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
/*
|
|
|
|
* update checkpoint pack index
|
|
|
|
* Increase the version number so that
|
|
|
|
* SIT entries and seg summaries are written at correct place
|
|
|
|
*/
|
2013-08-09 13:03:21 +07:00
|
|
|
ckpt_ver = cur_cp_version(ckpt);
|
2012-11-02 15:08:18 +07:00
|
|
|
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
|
|
|
|
|
|
|
|
/* write cached NAT/SIT entries to NAT/SIT area */
|
2017-02-10 01:38:09 +07:00
|
|
|
flush_nat_entries(sbi, cpc);
|
2014-09-21 12:06:39 +07:00
|
|
|
flush_sit_entries(sbi, cpc);
|
2012-11-02 15:08:18 +07:00
|
|
|
|
|
|
|
/* unlock all the fs_lock[] in do_checkpoint() */
|
2015-12-23 16:50:30 +07:00
|
|
|
err = do_checkpoint(sbi, cpc);
|
2016-12-30 05:07:53 +07:00
|
|
|
if (err)
|
2016-10-11 21:57:00 +07:00
|
|
|
release_discard_addrs(sbi);
|
2016-12-30 05:07:53 +07:00
|
|
|
else
|
2016-10-11 21:57:00 +07:00
|
|
|
clear_prefree_segments(sbi, cpc);
|
2016-08-29 22:58:34 +07:00
|
|
|
|
2012-11-02 15:08:18 +07:00
|
|
|
unblock_operations(sbi);
|
2014-02-13 13:12:29 +07:00
|
|
|
stat_inc_cp_count(sbi->stat_info);
|
2015-04-10 07:03:53 +07:00
|
|
|
|
|
|
|
if (cpc->reason == CP_RECOVERY)
|
|
|
|
f2fs_msg(sbi->sb, KERN_NOTICE,
|
|
|
|
"checkpoint: version = %llx", ckpt_ver);
|
2015-10-06 04:49:57 +07:00
|
|
|
|
|
|
|
/* do checkpoint periodically */
|
2016-01-09 06:51:50 +07:00
|
|
|
f2fs_update_time(sbi, CP_TIME);
|
2015-12-16 07:07:14 +07:00
|
|
|
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
|
2014-08-12 08:37:46 +07:00
|
|
|
out:
|
|
|
|
mutex_unlock(&sbi->cp_mutex);
|
2015-12-23 16:50:30 +07:00
|
|
|
return err;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2014-07-26 05:47:17 +07:00
|
|
|
void init_ino_entry_info(struct f2fs_sb_info *sbi)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-07-26 05:47:17 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_INO_ENTRY; i++) {
|
2014-11-18 10:18:36 +07:00
|
|
|
struct inode_management *im = &sbi->im[i];
|
|
|
|
|
|
|
|
INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
|
|
|
|
spin_lock_init(&im->ino_lock);
|
|
|
|
INIT_LIST_HEAD(&im->ino_list);
|
|
|
|
im->ino_num = 0;
|
2014-07-26 05:47:17 +07:00
|
|
|
}
|
|
|
|
|
2014-08-22 15:17:38 +07:00
|
|
|
sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
|
2015-02-27 16:38:13 +07:00
|
|
|
NR_CURSEG_TYPE - __cp_payload(sbi)) *
|
|
|
|
F2FS_ORPHANS_PER_BLOCK;
|
2012-11-02 15:08:18 +07:00
|
|
|
}
|
|
|
|
|
2013-01-16 22:08:30 +07:00
|
|
|
int __init create_checkpoint_caches(void)
|
2012-11-02 15:08:18 +07:00
|
|
|
{
|
2014-07-26 05:47:17 +07:00
|
|
|
ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
|
|
|
|
sizeof(struct ino_entry));
|
|
|
|
if (!ino_entry_slab)
|
2012-11-02 15:08:18 +07:00
|
|
|
return -ENOMEM;
|
2014-12-29 14:56:18 +07:00
|
|
|
inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
|
|
|
|
sizeof(struct inode_entry));
|
2013-12-06 13:00:58 +07:00
|
|
|
if (!inode_entry_slab) {
|
2014-07-26 05:47:17 +07:00
|
|
|
kmem_cache_destroy(ino_entry_slab);
|
2012-11-02 15:08:18 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void destroy_checkpoint_caches(void)
|
|
|
|
{
|
2014-07-26 05:47:17 +07:00
|
|
|
kmem_cache_destroy(ino_entry_slab);
|
2012-11-02 15:08:18 +07:00
|
|
|
kmem_cache_destroy(inode_entry_slab);
|
|
|
|
}
|