mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-20 00:06:14 +07:00
btrfs: Cleanup compress_file_range()
Remove unnecessary checks in compress_file_range(). Signed-off-by: Ashish Samant <ashish.samant@oracle.com> [ minor coding style fixups ] Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
6f034ece34
commit
c8bb0c8bd2
@ -585,9 +585,27 @@ static noinline void compress_file_range(struct inode *inode,
|
||||
will_compress = 0;
|
||||
} else {
|
||||
num_bytes = total_in;
|
||||
*num_added += 1;
|
||||
|
||||
/*
|
||||
* The async work queues will take care of doing actual
|
||||
* allocation on disk for these compressed pages, and
|
||||
* will submit them to the elevator.
|
||||
*/
|
||||
add_async_extent(async_cow, start, num_bytes,
|
||||
total_compressed, pages, nr_pages_ret,
|
||||
compress_type);
|
||||
|
||||
if (start + num_bytes < end) {
|
||||
start += num_bytes;
|
||||
pages = NULL;
|
||||
cond_resched();
|
||||
goto again;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!will_compress && pages) {
|
||||
if (pages) {
|
||||
/*
|
||||
* the compression code ran but failed to make things smaller,
|
||||
* free any pages it allocated and our page pointer array
|
||||
@ -607,43 +625,23 @@ static noinline void compress_file_range(struct inode *inode,
|
||||
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
|
||||
}
|
||||
}
|
||||
if (will_compress) {
|
||||
*num_added += 1;
|
||||
|
||||
/* the async work queues will take care of doing actual
|
||||
* allocation on disk for these compressed pages,
|
||||
* and will submit them to the elevator.
|
||||
*/
|
||||
add_async_extent(async_cow, start, num_bytes,
|
||||
total_compressed, pages, nr_pages_ret,
|
||||
compress_type);
|
||||
|
||||
if (start + num_bytes < end) {
|
||||
start += num_bytes;
|
||||
pages = NULL;
|
||||
cond_resched();
|
||||
goto again;
|
||||
}
|
||||
} else {
|
||||
cleanup_and_bail_uncompressed:
|
||||
/*
|
||||
* No compression, but we still need to write the pages in
|
||||
* the file we've been given so far. redirty the locked
|
||||
* page if it corresponds to our extent and set things up
|
||||
* for the async work queue to run cow_file_range to do
|
||||
* the normal delalloc dance
|
||||
*/
|
||||
if (page_offset(locked_page) >= start &&
|
||||
page_offset(locked_page) <= end) {
|
||||
__set_page_dirty_nobuffers(locked_page);
|
||||
/* unlocked later on in the async handlers */
|
||||
}
|
||||
if (redirty)
|
||||
extent_range_redirty_for_io(inode, start, end);
|
||||
add_async_extent(async_cow, start, end - start + 1,
|
||||
0, NULL, 0, BTRFS_COMPRESS_NONE);
|
||||
*num_added += 1;
|
||||
}
|
||||
/*
|
||||
* No compression, but we still need to write the pages in the file
|
||||
* we've been given so far. redirty the locked page if it corresponds
|
||||
* to our extent and set things up for the async work queue to run
|
||||
* cow_file_range to do the normal delalloc dance.
|
||||
*/
|
||||
if (page_offset(locked_page) >= start &&
|
||||
page_offset(locked_page) <= end)
|
||||
__set_page_dirty_nobuffers(locked_page);
|
||||
/* unlocked later on in the async handlers */
|
||||
|
||||
if (redirty)
|
||||
extent_range_redirty_for_io(inode, start, end);
|
||||
add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
|
||||
BTRFS_COMPRESS_NONE);
|
||||
*num_added += 1;
|
||||
|
||||
return;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user