linux_dsm_epyc7002/net/ceph/pagevec.c
Dave Hansen d4edcf0d56 mm/gup: Switch all callers of get_user_pages() to not pass tsk/mm
We will soon modify the vanilla get_user_pages() so it can no
longer be used on mm/tasks other than 'current/current->mm',
which is by far the most common way it is called.  For now,
we allow the old-style calls, but warn when they are used.
(implemented in previous patch)

This patch switches all callers of:

	get_user_pages()
	get_user_pages_unlocked()
	get_user_pages_locked()

to stop passing tsk/mm so they will no longer see the warnings.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: jack@suse.cz
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20160212210156.113E9407@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-02-16 10:11:12 +01:00

200 lines
4.1 KiB
C

#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/namei.h>
#include <linux/writeback.h>
#include <linux/ceph/libceph.h>
/*
* build a vector of user pages
*/
struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages, bool write_page)
{
struct page **pages;
int got = 0;
int rc = 0;
pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
if (!pages)
return ERR_PTR(-ENOMEM);
while (got < num_pages) {
rc = get_user_pages_unlocked(
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
num_pages - got, write_page, 0, pages + got);
if (rc < 0)
break;
BUG_ON(rc == 0);
got += rc;
}
if (rc < 0)
goto fail;
return pages;
fail:
ceph_put_page_vector(pages, got, false);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(ceph_get_direct_page_vector);
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
{
int i;
for (i = 0; i < num_pages; i++) {
if (dirty)
set_page_dirty_lock(pages[i]);
put_page(pages[i]);
}
kvfree(pages);
}
EXPORT_SYMBOL(ceph_put_page_vector);
void ceph_release_page_vector(struct page **pages, int num_pages)
{
int i;
for (i = 0; i < num_pages; i++)
__free_pages(pages[i], 0);
kfree(pages);
}
EXPORT_SYMBOL(ceph_release_page_vector);
/*
* allocate a vector new pages
*/
struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
{
struct page **pages;
int i;
pages = kmalloc(sizeof(*pages) * num_pages, flags);
if (!pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++) {
pages[i] = __page_cache_alloc(flags);
if (pages[i] == NULL) {
ceph_release_page_vector(pages, i);
return ERR_PTR(-ENOMEM);
}
}
return pages;
}
EXPORT_SYMBOL(ceph_alloc_page_vector);
/*
* copy user data into a page vector
*/
int ceph_copy_user_to_page_vector(struct page **pages,
const void __user *data,
loff_t off, size_t len)
{
int i = 0;
int po = off & ~PAGE_CACHE_MASK;
int left = len;
int l, bad;
while (left > 0) {
l = min_t(int, PAGE_CACHE_SIZE-po, left);
bad = copy_from_user(page_address(pages[i]) + po, data, l);
if (bad == l)
return -EFAULT;
data += l - bad;
left -= l - bad;
po += l - bad;
if (po == PAGE_CACHE_SIZE) {
po = 0;
i++;
}
}
return len;
}
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_CACHE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
po += l;
if (po == PAGE_CACHE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_to_page_vector);
void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_CACHE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
memcpy(data, page_address(pages[i]) + po, l);
data += l;
left -= l;
po += l;
if (po == PAGE_CACHE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_from_page_vector);
/*
* Zero an extent within a page vector. Offset is relative to the
* start of the first page.
*/
void ceph_zero_page_vector_range(int off, int len, struct page **pages)
{
int i = off >> PAGE_CACHE_SHIFT;
off &= ~PAGE_CACHE_MASK;
dout("zero_page_vector_page %u~%u\n", off, len);
/* leading partial page? */
if (off) {
int end = min((int)PAGE_CACHE_SIZE, off + len);
dout("zeroing %d %p head from %d\n", i, pages[i],
(int)off);
zero_user_segment(pages[i], off, end);
len -= (end - off);
i++;
}
while (len >= PAGE_CACHE_SIZE) {
dout("zeroing %d %p len=%d\n", i, pages[i], len);
zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
len -= PAGE_CACHE_SIZE;
i++;
}
/* trailing partial page? */
if (len) {
dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
zero_user_segment(pages[i], 0, len);
}
}
EXPORT_SYMBOL(ceph_zero_page_vector_range);