mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 01:40:53 +07:00
mm: remove unnecessary wrapper function do_mmap_pgoff()
The current split between do_mmap() and do_mmap_pgoff() was introduced in
commit 1fcfd8db7f
("mm, mpx: add "vm_flags_t vm_flags" arg to
do_mmap_pgoff()") to support MPX.
The wrapper function do_mmap_pgoff() always passed 0 as the value of the
vm_flags argument to do_mmap(). However, MPX support has subsequently
been removed from the kernel and there were no more direct callers of
do_mmap(); all calls were going via do_mmap_pgoff().
Simplify the code by removing do_mmap_pgoff() and changing all callers to
directly call do_mmap(), which now no longer takes a vm_flags argument.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Link: http://lkml.kernel.org/r/20200727194109.1371462-1-pcc@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d70cec8983
commit
45e55300f1
6
fs/aio.c
6
fs/aio.c
@ -525,9 +525,9 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
|
|||||||
return -EINTR;
|
return -EINTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
|
ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
MAP_SHARED, 0, &unused, NULL);
|
MAP_SHARED, 0, &unused, NULL);
|
||||||
mmap_write_unlock(mm);
|
mmap_write_unlock(mm);
|
||||||
if (IS_ERR((void *)ctx->mmap_base)) {
|
if (IS_ERR((void *)ctx->mmap_base)) {
|
||||||
ctx->mmap_size = 0;
|
ctx->mmap_size = 0;
|
||||||
|
@ -140,7 +140,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
* already been checked by prepare_hugepage_range. If you add
|
* already been checked by prepare_hugepage_range. If you add
|
||||||
* any error returns here, do so after setting VM_HUGETLB, so
|
* any error returns here, do so after setting VM_HUGETLB, so
|
||||||
* is_vm_hugetlb_page tests below unmap_region go the right
|
* is_vm_hugetlb_page tests below unmap_region go the right
|
||||||
* way when do_mmap_pgoff unwinds (may be important on powerpc
|
* way when do_mmap unwinds (may be important on powerpc
|
||||||
* and ia64).
|
* and ia64).
|
||||||
*/
|
*/
|
||||||
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
|
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
|
||||||
|
@ -528,7 +528,7 @@ static inline int mapping_mapped(struct address_space *mapping)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Might pages of this file have been modified in userspace?
|
* Might pages of this file have been modified in userspace?
|
||||||
* Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
|
* Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
|
||||||
* marks vma as VM_SHARED if it is shared, and the file was opened for
|
* marks vma as VM_SHARED if it is shared, and the file was opened for
|
||||||
* writing i.e. vma may be mprotected writable even if now readonly.
|
* writing i.e. vma may be mprotected writable even if now readonly.
|
||||||
*
|
*
|
||||||
|
@ -2546,23 +2546,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
|
|||||||
struct list_head *uf);
|
struct list_head *uf);
|
||||||
extern unsigned long do_mmap(struct file *file, unsigned long addr,
|
extern unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||||
unsigned long len, unsigned long prot, unsigned long flags,
|
unsigned long len, unsigned long prot, unsigned long flags,
|
||||||
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
|
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
|
||||||
struct list_head *uf);
|
|
||||||
extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
|
extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
|
||||||
struct list_head *uf, bool downgrade);
|
struct list_head *uf, bool downgrade);
|
||||||
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
|
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
|
||||||
struct list_head *uf);
|
struct list_head *uf);
|
||||||
extern int do_madvise(unsigned long start, size_t len_in, int behavior);
|
extern int do_madvise(unsigned long start, size_t len_in, int behavior);
|
||||||
|
|
||||||
static inline unsigned long
|
|
||||||
do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
||||||
unsigned long len, unsigned long prot, unsigned long flags,
|
|
||||||
unsigned long pgoff, unsigned long *populate,
|
|
||||||
struct list_head *uf)
|
|
||||||
{
|
|
||||||
return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
extern int __mm_populate(unsigned long addr, unsigned long len,
|
extern int __mm_populate(unsigned long addr, unsigned long len,
|
||||||
int ignore_errors);
|
int ignore_errors);
|
||||||
|
@ -1558,7 +1558,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
|
|||||||
goto invalid;
|
goto invalid;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
|
addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
|
||||||
*raddr = addr;
|
*raddr = addr;
|
||||||
err = 0;
|
err = 0;
|
||||||
if (IS_ERR_VALUE(addr))
|
if (IS_ERR_VALUE(addr))
|
||||||
|
16
mm/mmap.c
16
mm/mmap.c
@ -1030,7 +1030,7 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
|
|||||||
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
|
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
|
||||||
*
|
*
|
||||||
* We don't check here for the merged mmap wrapping around the end of pagecache
|
* We don't check here for the merged mmap wrapping around the end of pagecache
|
||||||
* indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
|
* indices (16TB on ia32) because do_mmap() does not permit mmap's which
|
||||||
* wrap, nor mmaps which cover the final page at index -1UL.
|
* wrap, nor mmaps which cover the final page at index -1UL.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
@ -1365,11 +1365,11 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
unsigned long do_mmap(struct file *file, unsigned long addr,
|
unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||||
unsigned long len, unsigned long prot,
|
unsigned long len, unsigned long prot,
|
||||||
unsigned long flags, vm_flags_t vm_flags,
|
unsigned long flags, unsigned long pgoff,
|
||||||
unsigned long pgoff, unsigned long *populate,
|
unsigned long *populate, struct list_head *uf)
|
||||||
struct list_head *uf)
|
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
|
vm_flags_t vm_flags;
|
||||||
int pkey = 0;
|
int pkey = 0;
|
||||||
|
|
||||||
*populate = 0;
|
*populate = 0;
|
||||||
@ -1431,7 +1431,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
|
|||||||
* to. we assume access permissions have been handled by the open
|
* to. we assume access permissions have been handled by the open
|
||||||
* of the memory object, so we don't do any here.
|
* of the memory object, so we don't do any here.
|
||||||
*/
|
*/
|
||||||
vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
|
vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
|
||||||
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
|
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
|
||||||
|
|
||||||
if (flags & MAP_LOCKED)
|
if (flags & MAP_LOCKED)
|
||||||
@ -2230,7 +2230,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
|||||||
/*
|
/*
|
||||||
* mmap_region() will call shmem_zero_setup() to create a file,
|
* mmap_region() will call shmem_zero_setup() to create a file,
|
||||||
* so use shmem's get_unmapped_area in case it can be huge.
|
* so use shmem's get_unmapped_area in case it can be huge.
|
||||||
* do_mmap_pgoff() will clear pgoff, so match alignment.
|
* do_mmap() will clear pgoff, so match alignment.
|
||||||
*/
|
*/
|
||||||
pgoff = 0;
|
pgoff = 0;
|
||||||
get_area = shmem_get_unmapped_area;
|
get_area = shmem_get_unmapped_area;
|
||||||
@ -3003,7 +3003,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
file = get_file(vma->vm_file);
|
file = get_file(vma->vm_file);
|
||||||
ret = do_mmap_pgoff(vma->vm_file, start, size,
|
ret = do_mmap(vma->vm_file, start, size,
|
||||||
prot, flags, pgoff, &populate, NULL);
|
prot, flags, pgoff, &populate, NULL);
|
||||||
fput(file);
|
fput(file);
|
||||||
out:
|
out:
|
||||||
@ -3223,7 +3223,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
|
|||||||
* By setting it to reflect the virtual start address of the
|
* By setting it to reflect the virtual start address of the
|
||||||
* vma, merges and splits can happen in a seamless way, just
|
* vma, merges and splits can happen in a seamless way, just
|
||||||
* using the existing file pgoff checks and manipulations.
|
* using the existing file pgoff checks and manipulations.
|
||||||
* Similarly in do_mmap_pgoff and in do_brk.
|
* Similarly in do_mmap and in do_brk.
|
||||||
*/
|
*/
|
||||||
if (vma_is_anonymous(vma)) {
|
if (vma_is_anonymous(vma)) {
|
||||||
BUG_ON(vma->anon_vma);
|
BUG_ON(vma->anon_vma);
|
||||||
|
@ -1078,7 +1078,6 @@ unsigned long do_mmap(struct file *file,
|
|||||||
unsigned long len,
|
unsigned long len,
|
||||||
unsigned long prot,
|
unsigned long prot,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
vm_flags_t vm_flags,
|
|
||||||
unsigned long pgoff,
|
unsigned long pgoff,
|
||||||
unsigned long *populate,
|
unsigned long *populate,
|
||||||
struct list_head *uf)
|
struct list_head *uf)
|
||||||
@ -1086,6 +1085,7 @@ unsigned long do_mmap(struct file *file,
|
|||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct vm_region *region;
|
struct vm_region *region;
|
||||||
struct rb_node *rb;
|
struct rb_node *rb;
|
||||||
|
vm_flags_t vm_flags;
|
||||||
unsigned long capabilities, result;
|
unsigned long capabilities, result;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1104,7 +1104,7 @@ unsigned long do_mmap(struct file *file,
|
|||||||
|
|
||||||
/* we've determined that we can make the mapping, now translate what we
|
/* we've determined that we can make the mapping, now translate what we
|
||||||
* now know into VMA flags */
|
* now know into VMA flags */
|
||||||
vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
|
vm_flags = determine_vm_flags(file, prot, flags, capabilities);
|
||||||
|
|
||||||
/* we're going to need to record the mapping */
|
/* we're going to need to record the mapping */
|
||||||
region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
|
region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
|
||||||
@ -1763,7 +1763,7 @@ EXPORT_SYMBOL_GPL(access_process_vm);
|
|||||||
*
|
*
|
||||||
* Check the shared mappings on an inode on behalf of a shrinking truncate to
|
* Check the shared mappings on an inode on behalf of a shrinking truncate to
|
||||||
* make sure that that any outstanding VMAs aren't broken and then shrink the
|
* make sure that that any outstanding VMAs aren't broken and then shrink the
|
||||||
* vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
|
* vm_regions that extend that beyond so that do_mmap() doesn't
|
||||||
* automatically grant mappings that are too large.
|
* automatically grant mappings that are too large.
|
||||||
*/
|
*/
|
||||||
int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
|
int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
|
||||||
|
@ -4245,7 +4245,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* shmem_zero_setup - setup a shared anonymous mapping
|
* shmem_zero_setup - setup a shared anonymous mapping
|
||||||
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
|
* @vma: the vma to be mmapped is prepared by do_mmap
|
||||||
*/
|
*/
|
||||||
int shmem_zero_setup(struct vm_area_struct *vma)
|
int shmem_zero_setup(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
@ -503,8 +503,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
|
|||||||
if (!ret) {
|
if (!ret) {
|
||||||
if (mmap_write_lock_killable(mm))
|
if (mmap_write_lock_killable(mm))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
|
ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
|
||||||
&populate, &uf);
|
&uf);
|
||||||
mmap_write_unlock(mm);
|
mmap_write_unlock(mm);
|
||||||
userfaultfd_unmap_complete(mm, &uf);
|
userfaultfd_unmap_complete(mm, &uf);
|
||||||
if (populate)
|
if (populate)
|
||||||
|
Loading…
Reference in New Issue
Block a user