linux_dsm_epyc7002/net/ceph/pagevec.c
Kirill A. Shutemov 09cbfeaf1a mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.

This promise never materialized.  And unlikely will.

We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE.  And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.

Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.

Let's stop pretending that pages in page cache are special.  They are
not.

The changes are pretty straight-forward:

 - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};

 - page_cache_get() -> get_page();

 - page_cache_release() -> put_page();

This patch contains automated changes generated with coccinelle using
script below.  For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.

The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.

There are few places in the code where coccinelle didn't reach.  I'll
fix them manually in a separate patch.  Comments and documentation also
will be addressed with the separate patch.

virtual patch

@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT

@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE

@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK

@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)

@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)

@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-04 10:41:08 -07:00

200 lines
4.0 KiB
C

#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/namei.h>
#include <linux/writeback.h>
#include <linux/ceph/libceph.h>
/*
* build a vector of user pages
*/
struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages, bool write_page)
{
struct page **pages;
int got = 0;
int rc = 0;
pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
if (!pages)
return ERR_PTR(-ENOMEM);
while (got < num_pages) {
rc = get_user_pages_unlocked(
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
num_pages - got, write_page, 0, pages + got);
if (rc < 0)
break;
BUG_ON(rc == 0);
got += rc;
}
if (rc < 0)
goto fail;
return pages;
fail:
ceph_put_page_vector(pages, got, false);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(ceph_get_direct_page_vector);
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
{
int i;
for (i = 0; i < num_pages; i++) {
if (dirty)
set_page_dirty_lock(pages[i]);
put_page(pages[i]);
}
kvfree(pages);
}
EXPORT_SYMBOL(ceph_put_page_vector);
void ceph_release_page_vector(struct page **pages, int num_pages)
{
int i;
for (i = 0; i < num_pages; i++)
__free_pages(pages[i], 0);
kfree(pages);
}
EXPORT_SYMBOL(ceph_release_page_vector);
/*
* allocate a vector new pages
*/
struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
{
struct page **pages;
int i;
pages = kmalloc(sizeof(*pages) * num_pages, flags);
if (!pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++) {
pages[i] = __page_cache_alloc(flags);
if (pages[i] == NULL) {
ceph_release_page_vector(pages, i);
return ERR_PTR(-ENOMEM);
}
}
return pages;
}
EXPORT_SYMBOL(ceph_alloc_page_vector);
/*
* copy user data into a page vector
*/
int ceph_copy_user_to_page_vector(struct page **pages,
const void __user *data,
loff_t off, size_t len)
{
int i = 0;
int po = off & ~PAGE_MASK;
int left = len;
int l, bad;
while (left > 0) {
l = min_t(int, PAGE_SIZE-po, left);
bad = copy_from_user(page_address(pages[i]) + po, data, l);
if (bad == l)
return -EFAULT;
data += l - bad;
left -= l - bad;
po += l - bad;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
return len;
}
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
po += l;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_to_page_vector);
void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(data, page_address(pages[i]) + po, l);
data += l;
left -= l;
po += l;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_from_page_vector);
/*
* Zero an extent within a page vector. Offset is relative to the
* start of the first page.
*/
void ceph_zero_page_vector_range(int off, int len, struct page **pages)
{
int i = off >> PAGE_SHIFT;
off &= ~PAGE_MASK;
dout("zero_page_vector_page %u~%u\n", off, len);
/* leading partial page? */
if (off) {
int end = min((int)PAGE_SIZE, off + len);
dout("zeroing %d %p head from %d\n", i, pages[i],
(int)off);
zero_user_segment(pages[i], off, end);
len -= (end - off);
i++;
}
while (len >= PAGE_SIZE) {
dout("zeroing %d %p len=%d\n", i, pages[i], len);
zero_user_segment(pages[i], 0, PAGE_SIZE);
len -= PAGE_SIZE;
i++;
}
/* trailing partial page? */
if (len) {
dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
zero_user_segment(pages[i], 0, len);
}
}
EXPORT_SYMBOL(ceph_zero_page_vector_range);