zram: optimize memory operations with clear_page()/copy_page()

Some architectures provides architecture-specific, optimized version of
clear_page()/copy_page(), which may have better performance than
memset()/memcpy(). So use clear_page()/copy_page() to optimize zram
performance if possible.

Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jiang Liu 2013-06-07 00:07:30 +08:00 committed by Greg Kroah-Hartman
parent 0f0e3ba346
commit 42e99bd975

View File

@ -128,23 +128,26 @@ static void zram_free_page(struct zram *zram, size_t index)
meta->table[index].size = 0;
}
static inline int is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
static void handle_zero_page(struct bio_vec *bvec)
{
struct page *page = bvec->bv_page;
void *user_mem;
user_mem = kmap_atomic(page);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
if (is_partial_io(bvec))
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
else
clear_page(user_mem);
kunmap_atomic(user_mem);
flush_dcache_page(page);
}
static inline int is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
int ret = LZO_E_OK;
@ -154,13 +157,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
unsigned long handle = meta->table[index].handle;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
memset(mem, 0, PAGE_SIZE);
clear_page(mem);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (meta->table[index].size == PAGE_SIZE)
memcpy(mem, cmem, PAGE_SIZE);
copy_page(mem, cmem);
else
ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
mem, &clen);
@ -309,11 +312,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
memcpy(cmem, src, clen);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
copy_page(cmem, src);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
}
zs_unmap_object(meta->mem_pool, handle);