mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 11:38:14 +07:00
Changes since last update:
- fix an out-of-bound read access introduced in v5.3, which could rarely cause data corruption; - various cleanup patches. -----BEGIN PGP SIGNATURE----- iIwEABYIADQWIQThPAmQN9sSA0DVxtI5NzHcH7XmBAUCXjCB9xYcZ2FveGlhbmcy NUBodWF3ZWkuY29tAAoJEDk3MdwfteYEW+0BALDobAWvVw7Bxkz0tkFkmoZKF4eG Otdzi/EhLq6baeyGAQCtSbTblu9/hAL53p6++RsTXazzzWDUyZZKtZj8MkR5Ag== =c1Fs -----END PGP SIGNATURE----- Merge tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs Pull erofs updates from Gao Xiang: "A regression fix, several cleanups and (maybe) plus an upcoming new mount api convert patch as a part of vfs update are considered available for this cycle. All commits have been in linux-next and tested with no smoke out. Summary: - fix an out-of-bound read access introduced in v5.3, which could rarely cause data corruption - various cleanup patches" * tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: clean up z_erofs_submit_queue() erofs: fold in postsubmit_is_all_bypassed() erofs: fix out-of-bound read for shifted uncompressed block erofs: remove void tagging/untagging of workgroup pointers erofs: remove unused tag argument while registering a workgroup erofs: remove unused tag argument while finding a workgroup erofs: correct indentation of an assigned structure inside a function
This commit is contained in:
commit
3893c2025f
@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
|
||||
}
|
||||
|
||||
src = kmap_atomic(*rq->in);
|
||||
if (!rq->out[0]) {
|
||||
dst = NULL;
|
||||
} else {
|
||||
if (rq->out[0]) {
|
||||
dst = kmap_atomic(rq->out[0]);
|
||||
memcpy(dst + rq->pageofs_out, src, righthalf);
|
||||
kunmap_atomic(dst);
|
||||
}
|
||||
|
||||
if (rq->out[1] == *rq->in) {
|
||||
memmove(src, src + righthalf, rq->pageofs_out);
|
||||
} else if (nrpages_out == 2) {
|
||||
if (dst)
|
||||
kunmap_atomic(dst);
|
||||
if (nrpages_out == 2) {
|
||||
DBG_BUGON(!rq->out[1]);
|
||||
dst = kmap_atomic(rq->out[1]);
|
||||
memcpy(dst, src + righthalf, rq->pageofs_out);
|
||||
if (rq->out[1] == *rq->in) {
|
||||
memmove(src, src + righthalf, rq->pageofs_out);
|
||||
} else {
|
||||
dst = kmap_atomic(rq->out[1]);
|
||||
memcpy(dst, src + righthalf, rq->pageofs_out);
|
||||
kunmap_atomic(dst);
|
||||
}
|
||||
}
|
||||
if (dst)
|
||||
kunmap_atomic(dst);
|
||||
kunmap_atomic(src);
|
||||
return 0;
|
||||
}
|
||||
|
@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
int erofs_workgroup_put(struct erofs_workgroup *grp);
|
||||
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
|
||||
pgoff_t index, bool *tag);
|
||||
pgoff_t index);
|
||||
int erofs_register_workgroup(struct super_block *sb,
|
||||
struct erofs_workgroup *grp, bool tag);
|
||||
struct erofs_workgroup *grp);
|
||||
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
|
||||
void erofs_shrinker_register(struct super_block *sb);
|
||||
void erofs_shrinker_unregister(struct super_block *sb);
|
||||
|
@ -59,7 +59,7 @@ static int erofs_workgroup_get(struct erofs_workgroup *grp)
|
||||
}
|
||||
|
||||
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
|
||||
pgoff_t index, bool *tag)
|
||||
pgoff_t index)
|
||||
{
|
||||
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
||||
struct erofs_workgroup *grp;
|
||||
@ -68,9 +68,6 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
|
||||
rcu_read_lock();
|
||||
grp = radix_tree_lookup(&sbi->workstn_tree, index);
|
||||
if (grp) {
|
||||
*tag = xa_pointer_tag(grp);
|
||||
grp = xa_untag_pointer(grp);
|
||||
|
||||
if (erofs_workgroup_get(grp)) {
|
||||
/* prefer to relax rcu read side */
|
||||
rcu_read_unlock();
|
||||
@ -84,8 +81,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
|
||||
}
|
||||
|
||||
int erofs_register_workgroup(struct super_block *sb,
|
||||
struct erofs_workgroup *grp,
|
||||
bool tag)
|
||||
struct erofs_workgroup *grp)
|
||||
{
|
||||
struct erofs_sb_info *sbi;
|
||||
int err;
|
||||
@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
|
||||
sbi = EROFS_SB(sb);
|
||||
xa_lock(&sbi->workstn_tree);
|
||||
|
||||
grp = xa_tag_pointer(grp, tag);
|
||||
|
||||
/*
|
||||
* Bump up reference count before making this workgroup
|
||||
* visible to other users in order to avoid potential UAF
|
||||
@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
|
||||
* however in order to avoid some race conditions, add a
|
||||
* DBG_BUGON to observe this in advance.
|
||||
*/
|
||||
DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
|
||||
grp->index)) != grp);
|
||||
DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
|
||||
|
||||
/*
|
||||
* If managed cache is on, last refcount should indicate
|
||||
@ -201,7 +194,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
|
||||
batch, first_index, PAGEVEC_SIZE);
|
||||
|
||||
for (i = 0; i < found; ++i) {
|
||||
struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
|
||||
struct erofs_workgroup *grp = batch[i];
|
||||
|
||||
first_index = grp->index + 1;
|
||||
|
||||
|
@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler;
|
||||
|
||||
static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
|
||||
{
|
||||
static const struct xattr_handler *xattr_handler_map[] = {
|
||||
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
|
||||
static const struct xattr_handler *xattr_handler_map[] = {
|
||||
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
|
||||
#ifdef CONFIG_EROFS_FS_POSIX_ACL
|
||||
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
|
||||
[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
|
||||
&posix_acl_default_xattr_handler,
|
||||
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
|
||||
&posix_acl_access_xattr_handler,
|
||||
[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
|
||||
&posix_acl_default_xattr_handler,
|
||||
#endif
|
||||
[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
|
||||
[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
|
||||
#ifdef CONFIG_EROFS_FS_SECURITY
|
||||
[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
|
||||
[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
|
||||
xattr_handler_map[idx] : NULL;
|
||||
|
123
fs/erofs/zdata.c
123
fs/erofs/zdata.c
@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
|
||||
struct z_erofs_pcluster *pcl;
|
||||
struct z_erofs_collection *cl;
|
||||
unsigned int length;
|
||||
bool tag;
|
||||
|
||||
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
|
||||
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
|
||||
if (!grp)
|
||||
return -ENOENT;
|
||||
|
||||
@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
|
||||
*/
|
||||
mutex_trylock(&cl->lock);
|
||||
|
||||
err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
|
||||
err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
|
||||
if (err) {
|
||||
mutex_unlock(&cl->lock);
|
||||
kmem_cache_free(pcluster_cachep, pcl);
|
||||
@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
|
||||
qtail[JQ_BYPASS] = &pcl->next;
|
||||
}
|
||||
|
||||
static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
|
||||
unsigned int nr_bios, bool force_fg)
|
||||
{
|
||||
/*
|
||||
* although background is preferred, no one is pending for submission.
|
||||
* don't issue workqueue for decompression but drop it directly instead.
|
||||
*/
|
||||
if (force_fg || nr_bios)
|
||||
return false;
|
||||
|
||||
kvfree(q[JQ_SUBMIT]);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
static void z_erofs_submit_queue(struct super_block *sb,
|
||||
z_erofs_next_pcluster_t owned_head,
|
||||
struct list_head *pagepool,
|
||||
struct z_erofs_decompressqueue *fgq,
|
||||
@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
|
||||
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
|
||||
struct bio *bio;
|
||||
void *bi_private;
|
||||
/* since bio will be NULL, no need to initialize last_index */
|
||||
pgoff_t uninitialized_var(last_index);
|
||||
bool force_submit = false;
|
||||
unsigned int nr_bios;
|
||||
unsigned int nr_bios = 0;
|
||||
struct bio *bio = NULL;
|
||||
|
||||
if (owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||
return false;
|
||||
|
||||
force_submit = false;
|
||||
bio = NULL;
|
||||
nr_bios = 0;
|
||||
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
|
||||
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
|
||||
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
|
||||
@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
|
||||
do {
|
||||
struct z_erofs_pcluster *pcl;
|
||||
unsigned int clusterpages;
|
||||
pgoff_t first_index;
|
||||
struct page *page;
|
||||
unsigned int i = 0, bypass = 0;
|
||||
int err;
|
||||
pgoff_t cur, end;
|
||||
unsigned int i = 0;
|
||||
bool bypass = true;
|
||||
|
||||
/* no possible 'owned_head' equals the following */
|
||||
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
|
||||
@ -1206,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
|
||||
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
|
||||
|
||||
clusterpages = BIT(pcl->clusterbits);
|
||||
cur = pcl->obj.index;
|
||||
end = cur + BIT(pcl->clusterbits);
|
||||
|
||||
/* close the main owned chain at first */
|
||||
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
|
||||
Z_EROFS_PCLUSTER_TAIL_CLOSED);
|
||||
|
||||
first_index = pcl->obj.index;
|
||||
force_submit |= (first_index != last_index + 1);
|
||||
do {
|
||||
struct page *page;
|
||||
int err;
|
||||
|
||||
repeat:
|
||||
page = pickup_page_for_submission(pcl, i, pagepool,
|
||||
MNGD_MAPPING(sbi),
|
||||
GFP_NOFS);
|
||||
if (!page) {
|
||||
force_submit = true;
|
||||
++bypass;
|
||||
goto skippage;
|
||||
}
|
||||
page = pickup_page_for_submission(pcl, i++, pagepool,
|
||||
MNGD_MAPPING(sbi),
|
||||
GFP_NOFS);
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
if (bio && force_submit) {
|
||||
if (bio && cur != last_index + 1) {
|
||||
submit_bio_retry:
|
||||
submit_bio(bio);
|
||||
bio = NULL;
|
||||
}
|
||||
submit_bio(bio);
|
||||
bio = NULL;
|
||||
}
|
||||
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
|
||||
bio->bi_end_io = z_erofs_decompressqueue_endio;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
|
||||
LOG_SECTORS_PER_BLOCK;
|
||||
bio->bi_private = bi_private;
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
bio->bi_end_io = z_erofs_decompressqueue_endio;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
bio->bi_iter.bi_sector = (sector_t)cur <<
|
||||
LOG_SECTORS_PER_BLOCK;
|
||||
bio->bi_private = bi_private;
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
++nr_bios;
|
||||
}
|
||||
|
||||
++nr_bios;
|
||||
}
|
||||
err = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (err < PAGE_SIZE)
|
||||
goto submit_bio_retry;
|
||||
|
||||
err = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (err < PAGE_SIZE)
|
||||
goto submit_bio_retry;
|
||||
last_index = cur;
|
||||
bypass = false;
|
||||
} while (++cur < end);
|
||||
|
||||
force_submit = false;
|
||||
last_index = first_index + i;
|
||||
skippage:
|
||||
if (++i < clusterpages)
|
||||
goto repeat;
|
||||
|
||||
if (bypass < clusterpages)
|
||||
if (!bypass)
|
||||
qtail[JQ_SUBMIT] = &pcl->next;
|
||||
else
|
||||
move_to_bypass_jobqueue(pcl, qtail, owned_head);
|
||||
@ -1263,11 +1234,15 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
|
||||
if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* although background is preferred, no one is pending for submission.
|
||||
* don't issue workqueue for decompression but drop it directly instead.
|
||||
*/
|
||||
if (!*force_fg && !nr_bios) {
|
||||
kvfree(q[JQ_SUBMIT]);
|
||||
return;
|
||||
}
|
||||
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void z_erofs_runqueue(struct super_block *sb,
|
||||
@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
|
||||
{
|
||||
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
|
||||
|
||||
if (!z_erofs_submit_queue(sb, clt->owned_head,
|
||||
pagepool, io, &force_fg))
|
||||
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||
return;
|
||||
z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
|
||||
|
||||
/* handle bypass queue (no i/o pclusters) immediately */
|
||||
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
|
||||
|
Loading…
Reference in New Issue
Block a user