Updates since last change:

- Introduce superblock checksum support;
 - Set iowait when waiting I/O for sync decompression path;
 - Several code cleanups.
 -----BEGIN PGP SIGNATURE-----
 
 iIwEABYIADQWIQThPAmQN9sSA0DVxtI5NzHcH7XmBAUCXd/dwRYcZ2FveGlhbmcy
 NUBodWF3ZWkuY29tAAoJEDk3MdwfteYEjd4BANBpFIxoenCeu9A8XVG4TVIobyoz
 g/eci9QZr+rgPGl/AP93QFEAPmjwQi6iNQREwvXQ96EE9rMX0Legt3tFeHPRCw==
 =xdz3
 -----END PGP SIGNATURE-----

Merge tag 'erofs-for-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "No major kernel updates for this round since I'm fully diving into
  LZMA algorithm internals now to provide high CR XZ algorihm support.
  That needs more work and time for me to get a better compression time.

  Summary:

   - Introduce superblock checksum support

   - Set iowait when waiting I/O for sync decompression path

   - Several code cleanups"

* tag 'erofs-for-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: remove unnecessary output in erofs_show_options()
  erofs: drop all vle annotations for runtime names
  erofs: support superblock checksum
  erofs: set iowait for sync decompression
  erofs: clean up decompress queue stuffs
  erofs: get rid of __stagingpage_alloc helper
  erofs: remove dead code since managed cache is now built-in
  erofs: clean up collection handling routines
This commit is contained in:
Linus Torvalds 2019-11-30 11:13:33 -08:00
commit e2d73c302b
9 changed files with 190 additions and 203 deletions

View File

@ -3,6 +3,7 @@
config EROFS_FS
tristate "EROFS filesystem support"
depends on BLOCK
select LIBCRC32C
help
EROFS (Enhanced Read-Only File System) is a lightweight
read-only file system with modern designs (eg. page-sized

View File

@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
victim = availables[--top];
get_page(victim);
} else {
victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
victim = erofs_allocpage(pagepool, GFP_KERNEL);
if (!victim)
return -ENOMEM;
victim->mapping = Z_EROFS_MAPPING_STAGING;

View File

@ -11,6 +11,8 @@
#define EROFS_SUPER_OFFSET 1024
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
/*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
* be incompatible with this kernel version.
@ -37,7 +39,6 @@ struct erofs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__u8 volume_name[16]; /* volume name */
__le32 feature_incompat;
__u8 reserved2[44];
};

View File

@ -85,6 +85,7 @@ struct erofs_sb_info {
u8 uuid[16]; /* 128-bit uuid for volume */
u8 volume_name[16]; /* volume name */
u32 feature_compat;
u32 feature_incompat;
unsigned int mount_opt;
@ -278,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value)
extern const struct super_operations erofs_sops;
extern const struct address_space_operations erofs_raw_access_aops;
#ifdef CONFIG_EROFS_FS_ZIP
extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
#endif
extern const struct address_space_operations z_erofs_aops;
/*
* Logical to physical block mapping, used by erofs_map_blocks()
@ -382,7 +381,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr);

View File

@ -9,6 +9,7 @@
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/crc32c.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function,
va_end(args);
}
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
struct erofs_super_block *dsb;
u32 expected_crc, crc;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
if (!dsb)
return -ENOMEM;
expected_crc = le32_to_cpu(dsb->checksum);
dsb->checksum = 0;
/* to allow for x86 boot sectors and other oddities. */
crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
kfree(dsb);
if (crc != expected_crc) {
erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
crc, expected_crc);
return -EBADMSG;
}
return 0;
}
static void erofs_inode_init_once(void *ptr)
{
struct erofs_inode *vi = ptr;
@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi = EROFS_SB(sb);
data = kmap_atomic(page);
data = kmap(page);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL;
@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb)
goto out;
}
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) {
ret = erofs_superblock_csum_verify(sb, data);
if (ret)
goto out;
}
blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) {
@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb)
}
ret = 0;
out:
kunmap_atomic(data);
kunmap(page);
put_page(page);
return ret;
}
@ -566,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",cache_strategy=readahead");
} else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
seq_puts(seq, ",cache_strategy=readaround");
} else {
seq_puts(seq, ",cache_strategy=(unknown)");
DBG_BUGON(1);
}
#endif
return 0;

View File

@ -7,7 +7,7 @@
#include "internal.h"
#include <linux/pagevec.h>
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{
struct page *page;
@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru);
} else {
page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
page = alloc_page(gfp);
}
return page;
}
@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
}
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp,
bool cleanup)
struct erofs_workgroup *grp)
{
/*
* If managed cache is on, refcount of workgroups
@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
}
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink,
bool cleanup)
unsigned long nr_shrink)
{
pgoff_t first_index = 0;
void *batch[PAGEVEC_SIZE];
@ -208,7 +206,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
first_index = grp->index + 1;
/* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
if (!erofs_try_to_release_workgroup(sbi, grp))
continue;
++freed;
@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex);
erofs_shrink_workstation(sbi, ~0UL, true);
/* clean up all remaining workgroups in memory */
erofs_shrink_workstation(sbi, ~0UL);
spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list);
@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
spin_unlock(&erofs_sb_list_lock);
sbi->shrinker_run_no = run_no;
freed += erofs_shrink_workstation(sbi, nr, false);
freed += erofs_shrink_workstation(sbi, nr);
spin_lock(&erofs_sb_list_lock);
/* Get the next list element before we move this one */

View File

@ -337,9 +337,9 @@ try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
return COLLECT_PRIMARY; /* :( better luck next time */
}
static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
struct inode *inode,
struct erofs_map_blocks *map)
static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
struct inode *inode,
struct erofs_map_blocks *map)
{
struct erofs_workgroup *grp;
struct z_erofs_pcluster *pcl;
@ -349,20 +349,20 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
if (!grp)
return NULL;
return -ENOENT;
pcl = container_of(grp, struct z_erofs_pcluster, obj);
if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
DBG_BUGON(1);
erofs_workgroup_put(grp);
return ERR_PTR(-EFSCORRUPTED);
return -EFSCORRUPTED;
}
cl = z_erofs_primarycollection(pcl);
if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
DBG_BUGON(1);
erofs_workgroup_put(grp);
return ERR_PTR(-EFSCORRUPTED);
return -EFSCORRUPTED;
}
length = READ_ONCE(pcl->length);
@ -370,7 +370,7 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
DBG_BUGON(1);
erofs_workgroup_put(grp);
return ERR_PTR(-EFSCORRUPTED);
return -EFSCORRUPTED;
}
} else {
unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
@ -394,12 +394,12 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
clt->tailpcl = NULL;
clt->pcl = pcl;
clt->cl = cl;
return cl;
return 0;
}
static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
struct inode *inode,
struct erofs_map_blocks *map)
static int z_erofs_register_collection(struct z_erofs_collector *clt,
struct inode *inode,
struct erofs_map_blocks *map)
{
struct z_erofs_pcluster *pcl;
struct z_erofs_collection *cl;
@ -408,7 +408,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
/* no available workgroup, let's allocate one */
pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
if (!pcl)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
z_erofs_pcluster_init_always(pcl);
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
@ -442,7 +442,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
if (err) {
mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl);
return ERR_PTR(-EAGAIN);
return -EAGAIN;
}
/* used to check tail merging loop due to corrupted images */
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
@ -450,14 +450,14 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
clt->owned_head = &pcl->next;
clt->pcl = pcl;
clt->cl = cl;
return cl;
return 0;
}
static int z_erofs_collector_begin(struct z_erofs_collector *clt,
struct inode *inode,
struct erofs_map_blocks *map)
{
struct z_erofs_collection *cl;
int ret;
DBG_BUGON(clt->cl);
@ -471,19 +471,22 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
}
repeat:
cl = cllookup(clt, inode, map);
if (!cl) {
cl = clregister(clt, inode, map);
ret = z_erofs_lookup_collection(clt, inode, map);
if (ret == -ENOENT) {
ret = z_erofs_register_collection(clt, inode, map);
if (cl == ERR_PTR(-EAGAIN))
/* someone registered at the same time, give another try */
if (ret == -EAGAIN) {
cond_resched();
goto repeat;
}
}
if (IS_ERR(cl))
return PTR_ERR(cl);
if (ret)
return ret;
z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
cl->pagevec, cl->vcnt);
clt->cl->pagevec, clt->cl->vcnt);
clt->compressedpages = clt->pcl->compressed_pages;
if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
@ -543,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
return true;
}
static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
gfp_t gfp)
{
struct page *page = erofs_allocpage(pagepool, gfp, true);
page->mapping = Z_EROFS_MAPPING_STAGING;
return page;
}
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
unsigned int cachestrategy,
erofs_off_t la)
@ -571,7 +565,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct list_head *pagepool)
{
struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct erofs_map_blocks *const map = &fe->map;
struct z_erofs_collector *const clt = &fe->clt;
const loff_t offset = page_offset(page);
@ -658,8 +652,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
__stagingpage_alloc(pagepool, GFP_NOFS);
erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err)
@ -698,13 +693,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
goto out;
}
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{
tagptr1_t t = tagptr_init(tagptr1_t, ptr);
struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t);
bool background = tagptr_unfold_tags(t);
if (!background) {
/* wake up the caller thread for sync decompression */
if (sync) {
unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags);
@ -718,37 +711,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
queue_work(z_erofs_workqueue, &io->u.work);
}
static inline void z_erofs_vle_read_endio(struct bio *bio)
static void z_erofs_decompressqueue_endio(struct bio *bio)
{
struct erofs_sb_info *sbi = NULL;
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
blk_status_t err = bio->bi_status;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!page->mapping);
if (!sbi && !z_erofs_page_is_staging(page))
sbi = EROFS_SB(page->mapping->host->i_sb);
/* sbi should already be gotten if the page is managed */
if (sbi)
cachemngd = erofs_page_is_managed(sbi, page);
if (err)
SetPageError(page);
else if (cachemngd)
SetPageUptodate(page);
if (cachemngd)
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
if (!err)
SetPageUptodate(page);
unlock_page(page);
}
}
z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
bio_put(bio);
}
@ -953,9 +939,8 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
return err;
}
static void z_erofs_vle_unzip_all(struct super_block *sb,
struct z_erofs_unzip_io *io,
struct list_head *pagepool)
static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct list_head *pagepool)
{
z_erofs_next_pcluster_t owned = io->head;
@ -971,21 +956,21 @@ static void z_erofs_vle_unzip_all(struct super_block *sb,
pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(pcl->next);
z_erofs_decompress_pcluster(sb, pcl, pagepool);
z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
}
}
static void z_erofs_vle_unzip_wq(struct work_struct *work)
static void z_erofs_decompressqueue_work(struct work_struct *work)
{
struct z_erofs_unzip_io_sb *iosb =
container_of(work, struct z_erofs_unzip_io_sb, io.u.work);
struct z_erofs_decompressqueue *bgq =
container_of(work, struct z_erofs_decompressqueue, u.work);
LIST_HEAD(pagepool);
DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool);
DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
z_erofs_decompress_queue(bgq, &pagepool);
put_pages_list(&pagepool);
kvfree(iosb);
kvfree(bgq);
}
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
@ -994,8 +979,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
struct address_space *mc,
gfp_t gfp)
{
/* determined at compile time to avoid too many #ifdefs */
const bool nocache = __builtin_constant_p(mc) ? !mc : false;
const pgoff_t index = pcl->obj.index;
bool tocache = false;
@ -1016,7 +999,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
* the cached page has not been allocated and
* an placeholder is out there, prepare it now.
*/
if (!nocache && page == PAGE_UNALLOCATED) {
if (page == PAGE_UNALLOCATED) {
tocache = true;
goto out_allocpage;
}
@ -1028,21 +1011,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
mapping = READ_ONCE(page->mapping);
/*
* if managed cache is disabled, it's no way to
* get such a cached-like page.
*/
if (nocache) {
/* if managed cache is disabled, it is impossible `justfound' */
DBG_BUGON(justfound);
/* and it should be locked, not uptodate, and not truncated */
DBG_BUGON(!PageLocked(page));
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!mapping);
goto out;
}
/*
* unmanaged (file) pages are all locked solidly,
* therefore it is impossible for `mapping' to be NULL.
@ -1093,50 +1061,52 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unlock_page(page);
put_page(page);
out_allocpage:
page = __stagingpage_alloc(pagepool, gfp);
if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
list_add(&page->lru, pagepool);
cpu_relax();
goto repeat;
}
if (nocache || !tocache)
goto out;
if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
/* non-LRU / non-movable temporary page is needed */
page->mapping = Z_EROFS_MAPPING_STAGING;
goto out;
tocache = false;
}
if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
if (tocache) {
/* since it added to managed cache successfully */
unlock_page(page);
put_page(page);
} else {
list_add(&page->lru, pagepool);
}
cond_resched();
goto repeat;
}
set_page_private(page, (unsigned long)pcl);
SetPagePrivate(page);
out: /* the only exit (for tracing and debugging) */
return page;
}
static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb,
struct z_erofs_unzip_io *io,
bool foreground)
static struct z_erofs_decompressqueue *
jobqueue_init(struct super_block *sb,
struct z_erofs_decompressqueue *fgq, bool *fg)
{
struct z_erofs_unzip_io_sb *iosb;
struct z_erofs_decompressqueue *q;
if (foreground) {
/* waitqueue available for foreground io */
DBG_BUGON(!io);
init_waitqueue_head(&io->u.wait);
atomic_set(&io->pending_bios, 0);
goto out;
if (fg && !*fg) {
q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
if (!q) {
*fg = true;
goto fg_out;
}
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
} else {
fg_out:
q = fgq;
init_waitqueue_head(&fgq->u.wait);
atomic_set(&fgq->pending_bios, 0);
}
iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
DBG_BUGON(!iosb);
/* initialize fields in the allocated descriptor */
io = &iosb->io;
iosb->sb = sb;
INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
out:
io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
return io;
q->sb = sb;
q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
return q;
}
/* define decompression jobqueue types */
@ -1147,22 +1117,17 @@ enum {
};
static void *jobqueueset_init(struct super_block *sb,
z_erofs_next_pcluster_t qtail[],
struct z_erofs_unzip_io *q[],
struct z_erofs_unzip_io *fgq,
bool forcefg)
struct z_erofs_decompressqueue *q[],
struct z_erofs_decompressqueue *fgq, bool *fg)
{
/*
* if managed cache is enabled, bypass jobqueue is needed,
* no need to read from device for all pclusters in this queue.
*/
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
}
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
@ -1184,9 +1149,8 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
unsigned int nr_bios,
bool force_fg)
static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
unsigned int nr_bios, bool force_fg)
{
/*
* although background is preferred, no one is pending for submission.
@ -1195,19 +1159,19 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
if (force_fg || nr_bios)
return false;
kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io));
kvfree(q[JQ_SUBMIT]);
return true;
}
static bool z_erofs_vle_submit_all(struct super_block *sb,
z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool,
struct z_erofs_unzip_io *fgq,
bool force_fg)
static bool z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool,
struct z_erofs_decompressqueue *fgq,
bool *force_fg)
{
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_unzip_io *q[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
struct bio *bio;
void *bi_private;
/* since bio will be NULL, no need to initialize last_index */
@ -1221,7 +1185,9 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
force_submit = false;
bio = NULL;
nr_bios = 0;
bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */
q[JQ_SUBMIT]->head = owned_head;
@ -1268,7 +1234,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
if (!bio) {
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_end_io = z_erofs_vle_read_endio;
bio->bi_end_io = z_erofs_decompressqueue_endio;
bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
LOG_SECTORS_PER_BLOCK;
@ -1297,40 +1263,38 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
if (bio)
submit_bio(bio);
if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
return true;
z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
return true;
}
static void z_erofs_submit_and_unzip(struct super_block *sb,
struct z_erofs_collector *clt,
struct list_head *pagepool,
bool force_fg)
static void z_erofs_runqueue(struct super_block *sb,
struct z_erofs_collector *clt,
struct list_head *pagepool, bool force_fg)
{
struct z_erofs_unzip_io io[NR_JOBQUEUES];
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (!z_erofs_vle_submit_all(sb, clt->owned_head,
pagepool, io, force_fg))
if (!z_erofs_submit_queue(sb, clt->owned_head,
pagepool, io, &force_fg))
return;
/* decompress no I/O pclusters immediately */
z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
/* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
if (!force_fg)
return;
/* wait until all bios are completed */
wait_event(io[JQ_SUBMIT].u.wait,
!atomic_read(&io[JQ_SUBMIT].pending_bios));
io_wait_event(io[JQ_SUBMIT].u.wait,
!atomic_read(&io[JQ_SUBMIT].pending_bios));
/* let's synchronous decompression */
z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
/* handle synchronous decompress queue in the caller context */
z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
}
static int z_erofs_vle_normalaccess_readpage(struct file *file,
struct page *page)
static int z_erofs_readpage(struct file *file, struct page *page)
{
struct inode *const inode = page->mapping->host;
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
@ -1345,7 +1309,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
(void)z_erofs_collector_end(&f.clt);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true);
z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true);
if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
@ -1364,10 +1328,8 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
return nr <= sbi->max_sync_decompress_pages;
}
static int z_erofs_vle_normalaccess_readpages(struct file *filp,
struct address_space *mapping,
struct list_head *pages,
unsigned int nr_pages)
static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned int nr_pages)
{
struct inode *const inode = mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@ -1422,7 +1384,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
(void)z_erofs_collector_end(&f.clt);
z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync);
z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync);
if (f.map.mpage)
put_page(f.map.mpage);
@ -1432,8 +1394,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
return 0;
}
const struct address_space_operations z_erofs_vle_normalaccess_aops = {
.readpage = z_erofs_vle_normalaccess_readpage,
.readpages = z_erofs_vle_normalaccess_readpages,
const struct address_space_operations z_erofs_aops = {
.readpage = z_erofs_readpage,
.readpages = z_erofs_readpages,
};

View File

@ -84,7 +84,8 @@ struct z_erofs_pcluster {
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
struct z_erofs_unzip_io {
struct z_erofs_decompressqueue {
struct super_block *sb;
atomic_t pending_bios;
z_erofs_next_pcluster_t head;
@ -94,11 +95,6 @@ struct z_erofs_unzip_io {
} u;
};
struct z_erofs_unzip_io_sb {
struct z_erofs_unzip_io io;
struct super_block *sb;
};
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page)

View File

@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode)
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
}
inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops;
inode->i_mapping->a_ops = &z_erofs_aops;
return 0;
}
static int fill_inode_lazy(struct inode *inode)
static int z_erofs_fill_inode_lazy(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
@ -138,8 +138,8 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
return 0;
}
static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned long lcn)
static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned long lcn)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
@ -311,13 +311,13 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
}
static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn)
static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn)
{
const unsigned int datamode = EROFS_I(m->inode)->datalayout;
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
return vle_legacy_load_cluster_from_disk(m, lcn);
return legacy_load_cluster_from_disk(m, lcn);
if (datamode == EROFS_INODE_FLAT_COMPRESSION)
return compacted_load_cluster_from_disk(m, lcn);
@ -325,8 +325,8 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return -EINVAL;
}
static int vle_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance)
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance)
{
struct erofs_inode *const vi = EROFS_I(m->inode);
struct erofs_map_blocks *const map = m->map;
@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
/* load extent head logical cluster if needed */
lcn -= lookback_distance;
err = vle_load_cluster_from_disk(m, lcn);
err = z_erofs_load_cluster_from_disk(m, lcn);
if (err)
return err;
@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
DBG_BUGON(1);
return -EFSCORRUPTED;
}
return vle_extent_lookback(m, m->delta[0]);
return z_erofs_extent_lookback(m, m->delta[0]);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
map->m_flags &= ~EROFS_MAP_ZIPPED;
/* fallthrough */
@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
goto out;
}
err = fill_inode_lazy(inode);
err = z_erofs_fill_inode_lazy(inode);
if (err)
goto out;
@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
m.lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
err = vle_load_cluster_from_disk(&m, m.lcn);
err = z_erofs_load_cluster_from_disk(&m, m.lcn);
if (err)
goto unmap_out;
@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */
err = vle_extent_lookback(&m, m.delta[0]);
err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err)
goto unmap_out;
break;