mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:50:53 +07:00
Merge branch 'mm-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull mm gup cleanup from Ingo Molnar: "This removes the ugly get-user-pages API hack, now that all upstream code has been migrated to it" ("ugly" is putting it mildly. But it worked.. - Linus) * 'mm-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mm/gup: Remove the macro overload API migration helpers from the get_user*() APIs
This commit is contained in:
commit
a1f983174d
@ -1250,78 +1250,20 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
long get_user_pages6(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages, int *locked);
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags);
|
||||
long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
||||
/* suppress warnings from use in EXPORT_SYMBOL() */
|
||||
#ifndef __DISABLE_GUP_DEPRECATED
|
||||
#define __gup_deprecated __deprecated
|
||||
#else
|
||||
#define __gup_deprecated
|
||||
#endif
|
||||
/*
|
||||
* These macros provide backward-compatibility with the old
|
||||
* get_user_pages() variants which took tsk/mm. These
|
||||
* functions/macros provide both compile-time __deprecated so we
|
||||
* can catch old-style use and not break the build. The actual
|
||||
* functions also have WARN_ON()s to let us know at runtime if
|
||||
* the get_user_pages() should have been the "remote" variant.
|
||||
*
|
||||
* These are hideous, but temporary.
|
||||
*
|
||||
* If you run into one of these __deprecated warnings, look
|
||||
* at how you are calling get_user_pages(). If you are calling
|
||||
* it with current/current->mm as the first two arguments,
|
||||
* simply remove those arguments. The behavior will be the same
|
||||
* as it is now. If you are calling it on another task, use
|
||||
* get_user_pages_remote() instead.
|
||||
*
|
||||
* Any questions? Ask Dave Hansen <dave@sr71.net>
|
||||
*/
|
||||
long
|
||||
__gup_deprecated
|
||||
get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
|
||||
get_user_pages
|
||||
#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \
|
||||
get_user_pages8, x, \
|
||||
get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
|
||||
|
||||
__gup_deprecated
|
||||
long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
int *locked);
|
||||
#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
|
||||
get_user_pages_locked
|
||||
#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \
|
||||
get_user_pages_locked8, x, \
|
||||
get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
|
||||
|
||||
__gup_deprecated
|
||||
long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages);
|
||||
#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \
|
||||
get_user_pages_unlocked
|
||||
#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \
|
||||
get_user_pages_unlocked7, x, \
|
||||
get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
|
||||
|
||||
/* Container for pinned pfns / pages */
|
||||
struct frame_vector {
|
||||
unsigned int nr_allocated; /* Number of frames we have space for */
|
||||
|
52
mm/gup.c
52
mm/gup.c
@ -1,4 +1,3 @@
|
||||
#define __DISABLE_GUP_DEPRECATED 1
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
@ -839,7 +838,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
||||
* if (locked)
|
||||
* up_read(&mm->mmap_sem);
|
||||
*/
|
||||
long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
@ -847,7 +846,7 @@ long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
|
||||
write, force, pages, NULL, locked, true,
|
||||
FOLL_TOUCH);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_locked6);
|
||||
EXPORT_SYMBOL(get_user_pages_locked);
|
||||
|
||||
/*
|
||||
* Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
|
||||
@ -892,13 +891,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
|
||||
* or if "force" shall be set to 1 (get_user_pages_fast misses the
|
||||
* "force" parameter).
|
||||
*/
|
||||
long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
{
|
||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, FOLL_TOUCH);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked5);
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
|
||||
/*
|
||||
* get_user_pages_remote() - pin user pages in memory
|
||||
@ -972,7 +971,7 @@ EXPORT_SYMBOL(get_user_pages_remote);
|
||||
* and mm being operated on are the current task's. We also
|
||||
* obviously don't pass FOLL_REMOTE in here.
|
||||
*/
|
||||
long get_user_pages6(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
@ -980,7 +979,7 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
|
||||
write, force, pages, vmas, NULL, false,
|
||||
FOLL_TOUCH);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages6);
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
/**
|
||||
* populate_vma_page_range() - populate a range of pages in the vma.
|
||||
@ -1491,7 +1490,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
int nr, ret;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
@ -1503,8 +1501,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
ret = get_user_pages_unlocked(current, mm, start,
|
||||
nr_pages - nr, write, 0, pages);
|
||||
ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
@ -1519,38 +1516,3 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
|
||||
|
||||
long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
|
||||
WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
|
||||
|
||||
return get_user_pages6(start, nr_pages, write, force, pages, vmas);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages8);
|
||||
|
||||
long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages, int *locked)
|
||||
{
|
||||
WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
|
||||
WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
|
||||
|
||||
return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_locked8);
|
||||
|
||||
long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
{
|
||||
WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
|
||||
WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
|
||||
|
||||
return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked7);
|
||||
|
||||
|
44
mm/nommu.c
44
mm/nommu.c
@ -15,8 +15,6 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#define __DISABLE_GUP_DEPRECATED
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmacache.h>
|
||||
@ -161,7 +159,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
* slab page or a secondary page from a compound page
|
||||
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
||||
*/
|
||||
long get_user_pages6(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
@ -175,15 +173,15 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages,
|
||||
return __get_user_pages(current, current->mm, start, nr_pages, flags,
|
||||
pages, vmas, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages6);
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
return get_user_pages6(start, nr_pages, write, force, pages, NULL);
|
||||
return get_user_pages(start, nr_pages, write, force, pages, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_locked6);
|
||||
EXPORT_SYMBOL(get_user_pages_locked);
|
||||
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
@ -199,13 +197,13 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
}
|
||||
EXPORT_SYMBOL(__get_user_pages_unlocked);
|
||||
|
||||
long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
{
|
||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked5);
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
|
||||
/**
|
||||
* follow_pfn - look up PFN at a user virtual address
|
||||
@ -1989,31 +1987,3 @@ static int __meminit init_admin_reserve(void)
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(init_admin_reserve);
|
||||
|
||||
long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
return get_user_pages6(start, nr_pages, write, force, pages, vmas);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages8);
|
||||
|
||||
long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
return get_user_pages_locked6(start, nr_pages, write,
|
||||
force, pages, locked);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_locked8);
|
||||
|
||||
long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
{
|
||||
return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked7);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user