mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 05:36:40 +07:00
5beb493052
The old anon_vma code can lead to scalability issues with heavily forking workloads. Specifically, each anon_vma will be shared between the parent process and all its child processes. In a workload with 1000 child processes and a VMA with 1000 anonymous pages per process that get COWed, this leads to a system with a million anonymous pages in the same anon_vma, each of which is mapped in just one of the 1000 processes. However, the current rmap code needs to walk them all, leading to O(N) scanning complexity for each page. This can result in systems where one CPU is walking the page tables of 1000 processes in page_referenced_one, while all other CPUs are stuck on the anon_vma lock. This leads to catastrophic failure for a benchmark like AIM7, where the total number of processes can reach in the tens of thousands. Real workloads are still a factor 10 less process intensive than AIM7, but they are catching up. This patch changes the way anon_vmas and VMAs are linked, which allows us to associate multiple anon_vmas with a VMA. At fork time, each child process gets its own anon_vmas, in which its COWed pages will be instantiated. The parents' anon_vma is also linked to the VMA, because non-COWed pages could be present in any of the children. This reduces rmap scanning complexity to O(1) for the pages of the 1000 child processes, with O(N) complexity for at most 1/N pages in the system. This reduces the average scanning cost in heavily forking workloads from O(N) to 2. The only real complexity in this patch stems from the fact that linking a VMA to anon_vmas now involves memory allocations. This means vma_adjust can fail, if it needs to attach a VMA to anon_vma structures. This in turn means error handling needs to be added to the calling functions. A second source of complexity is that, because there can be multiple anon_vmas, the anon_vma linking in vma_adjust can no longer be done under "the" anon_vma lock. To prevent the rmap code from walking up an incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h to make sure it is impossible to compile a kernel that needs both symbolic values for the same bitflag. Some test results: Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test box with 16GB RAM and not quite enough IO), the system ends up running >99% in system time, with every CPU on the same anon_vma lock in the pageout code. With these changes, AIM7 hits the cross-over point around 29.7k users. This happens with ~99% IO wait time, there never seems to be any spike in system time. The anon_vma lock contention appears to be resolved. [akpm@linux-foundation.org: cleanups] Signed-off-by: Rik van Riel <riel@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
522 lines
13 KiB
C
522 lines
13 KiB
C
/*
|
|
* mm/mremap.c
|
|
*
|
|
* (C) Copyright 1996 Linus Torvalds
|
|
*
|
|
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
|
|
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/ksm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/security.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/mmu_notifier.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "internal.h"
|
|
|
|
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
return NULL;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
if (pud_none_or_clear_bad(pud))
|
|
return NULL;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
return NULL;
|
|
|
|
return pmd;
|
|
}
|
|
|
|
static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
pud = pud_alloc(mm, pgd, addr);
|
|
if (!pud)
|
|
return NULL;
|
|
|
|
pmd = pmd_alloc(mm, pud, addr);
|
|
if (!pmd)
|
|
return NULL;
|
|
|
|
if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
|
|
return NULL;
|
|
|
|
return pmd;
|
|
}
|
|
|
|
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
unsigned long old_addr, unsigned long old_end,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
unsigned long new_addr)
|
|
{
|
|
struct address_space *mapping = NULL;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
unsigned long old_start;
|
|
|
|
old_start = old_addr;
|
|
mmu_notifier_invalidate_range_start(vma->vm_mm,
|
|
old_start, old_end);
|
|
if (vma->vm_file) {
|
|
/*
|
|
* Subtle point from Rajesh Venkatasubramanian: before
|
|
* moving file-based ptes, we must lock truncate_pagecache
|
|
* out, since it might clean the dst vma before the src vma,
|
|
* and we propagate stale pages into the dst afterward.
|
|
*/
|
|
mapping = vma->vm_file->f_mapping;
|
|
spin_lock(&mapping->i_mmap_lock);
|
|
if (new_vma->vm_truncate_count &&
|
|
new_vma->vm_truncate_count != vma->vm_truncate_count)
|
|
new_vma->vm_truncate_count = 0;
|
|
}
|
|
|
|
/*
|
|
* We don't have to worry about the ordering of src and dst
|
|
* pte locks because exclusive mmap_sem prevents deadlock.
|
|
*/
|
|
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
|
|
new_pte = pte_offset_map_nested(new_pmd, new_addr);
|
|
new_ptl = pte_lockptr(mm, new_pmd);
|
|
if (new_ptl != old_ptl)
|
|
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
|
arch_enter_lazy_mmu_mode();
|
|
|
|
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
|
|
new_pte++, new_addr += PAGE_SIZE) {
|
|
if (pte_none(*old_pte))
|
|
continue;
|
|
pte = ptep_clear_flush(vma, old_addr, old_pte);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
set_pte_at(mm, new_addr, new_pte, pte);
|
|
}
|
|
|
|
arch_leave_lazy_mmu_mode();
|
|
if (new_ptl != old_ptl)
|
|
spin_unlock(new_ptl);
|
|
pte_unmap_nested(new_pte - 1);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
if (mapping)
|
|
spin_unlock(&mapping->i_mmap_lock);
|
|
mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
|
|
}
|
|
|
|
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
|
|
|
unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
unsigned long old_addr, struct vm_area_struct *new_vma,
|
|
unsigned long new_addr, unsigned long len)
|
|
{
|
|
unsigned long extent, next, old_end;
|
|
pmd_t *old_pmd, *new_pmd;
|
|
|
|
old_end = old_addr + len;
|
|
flush_cache_range(vma, old_addr, old_end);
|
|
|
|
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
|
|
cond_resched();
|
|
next = (old_addr + PMD_SIZE) & PMD_MASK;
|
|
if (next - 1 > old_end)
|
|
next = old_end;
|
|
extent = next - old_addr;
|
|
old_pmd = get_old_pmd(vma->vm_mm, old_addr);
|
|
if (!old_pmd)
|
|
continue;
|
|
new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
|
|
if (!new_pmd)
|
|
break;
|
|
next = (new_addr + PMD_SIZE) & PMD_MASK;
|
|
if (extent > next - new_addr)
|
|
extent = next - new_addr;
|
|
if (extent > LATENCY_LIMIT)
|
|
extent = LATENCY_LIMIT;
|
|
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
|
new_vma, new_pmd, new_addr);
|
|
}
|
|
|
|
return len + old_addr - old_end; /* how much done */
|
|
}
|
|
|
|
static unsigned long move_vma(struct vm_area_struct *vma,
|
|
unsigned long old_addr, unsigned long old_len,
|
|
unsigned long new_len, unsigned long new_addr)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct vm_area_struct *new_vma;
|
|
unsigned long vm_flags = vma->vm_flags;
|
|
unsigned long new_pgoff;
|
|
unsigned long moved_len;
|
|
unsigned long excess = 0;
|
|
unsigned long hiwater_vm;
|
|
int split = 0;
|
|
int err;
|
|
|
|
/*
|
|
* We'd prefer to avoid failure later on in do_munmap:
|
|
* which may split one vma into three before unmapping.
|
|
*/
|
|
if (mm->map_count >= sysctl_max_map_count - 3)
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* Advise KSM to break any KSM pages in the area to be moved:
|
|
* it would be confusing if they were to turn up at the new
|
|
* location, where they happen to coincide with different KSM
|
|
* pages recently unmapped. But leave vma->vm_flags as it was,
|
|
* so KSM can come around to merge on vma and new_vma afterwards.
|
|
*/
|
|
err = ksm_madvise(vma, old_addr, old_addr + old_len,
|
|
MADV_UNMERGEABLE, &vm_flags);
|
|
if (err)
|
|
return err;
|
|
|
|
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
|
|
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
|
|
if (!new_vma)
|
|
return -ENOMEM;
|
|
|
|
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
|
|
if (moved_len < old_len) {
|
|
/*
|
|
* On error, move entries back from new area to old,
|
|
* which will succeed since page tables still there,
|
|
* and then proceed to unmap new area instead of old.
|
|
*/
|
|
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
|
|
vma = new_vma;
|
|
old_len = new_len;
|
|
old_addr = new_addr;
|
|
new_addr = -ENOMEM;
|
|
}
|
|
|
|
/* Conceal VM_ACCOUNT so old reservation is not undone */
|
|
if (vm_flags & VM_ACCOUNT) {
|
|
vma->vm_flags &= ~VM_ACCOUNT;
|
|
excess = vma->vm_end - vma->vm_start - old_len;
|
|
if (old_addr > vma->vm_start &&
|
|
old_addr + old_len < vma->vm_end)
|
|
split = 1;
|
|
}
|
|
|
|
/*
|
|
* If we failed to move page tables we still do total_vm increment
|
|
* since do_munmap() will decrement it by old_len == new_len.
|
|
*
|
|
* Since total_vm is about to be raised artificially high for a
|
|
* moment, we need to restore high watermark afterwards: if stats
|
|
* are taken meanwhile, total_vm and hiwater_vm appear too high.
|
|
* If this were a serious issue, we'd add a flag to do_munmap().
|
|
*/
|
|
hiwater_vm = mm->hiwater_vm;
|
|
mm->total_vm += new_len >> PAGE_SHIFT;
|
|
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
|
|
|
|
if (do_munmap(mm, old_addr, old_len) < 0) {
|
|
/* OOM: unable to split vma, just get accounts right */
|
|
vm_unacct_memory(excess >> PAGE_SHIFT);
|
|
excess = 0;
|
|
}
|
|
mm->hiwater_vm = hiwater_vm;
|
|
|
|
/* Restore VM_ACCOUNT if one or two pieces of vma left */
|
|
if (excess) {
|
|
vma->vm_flags |= VM_ACCOUNT;
|
|
if (split)
|
|
vma->vm_next->vm_flags |= VM_ACCOUNT;
|
|
}
|
|
|
|
if (vm_flags & VM_LOCKED) {
|
|
mm->locked_vm += new_len >> PAGE_SHIFT;
|
|
if (new_len > old_len)
|
|
mlock_vma_pages_range(new_vma, new_addr + old_len,
|
|
new_addr + new_len);
|
|
}
|
|
|
|
return new_addr;
|
|
}
|
|
|
|
static struct vm_area_struct *vma_to_resize(unsigned long addr,
|
|
unsigned long old_len, unsigned long new_len, unsigned long *p)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma = find_vma(mm, addr);
|
|
|
|
if (!vma || vma->vm_start > addr)
|
|
goto Efault;
|
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
goto Einval;
|
|
|
|
/* We can't remap across vm area boundaries */
|
|
if (old_len > vma->vm_end - addr)
|
|
goto Efault;
|
|
|
|
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
|
|
if (new_len > old_len)
|
|
goto Efault;
|
|
}
|
|
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
unsigned long locked, lock_limit;
|
|
locked = mm->locked_vm << PAGE_SHIFT;
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK);
|
|
locked += new_len - old_len;
|
|
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
|
|
goto Eagain;
|
|
}
|
|
|
|
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
|
|
goto Enomem;
|
|
|
|
if (vma->vm_flags & VM_ACCOUNT) {
|
|
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
|
|
if (security_vm_enough_memory(charged))
|
|
goto Efault;
|
|
*p = charged;
|
|
}
|
|
|
|
return vma;
|
|
|
|
Efault: /* very odd choice for most of the cases, but... */
|
|
return ERR_PTR(-EFAULT);
|
|
Einval:
|
|
return ERR_PTR(-EINVAL);
|
|
Enomem:
|
|
return ERR_PTR(-ENOMEM);
|
|
Eagain:
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
|
|
static unsigned long mremap_to(unsigned long addr,
|
|
unsigned long old_len, unsigned long new_addr,
|
|
unsigned long new_len)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long ret = -EINVAL;
|
|
unsigned long charged = 0;
|
|
unsigned long map_flags;
|
|
|
|
if (new_addr & ~PAGE_MASK)
|
|
goto out;
|
|
|
|
if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
|
|
goto out;
|
|
|
|
/* Check if the location we're moving into overlaps the
|
|
* old location at all, and fail if it does.
|
|
*/
|
|
if ((new_addr <= addr) && (new_addr+new_len) > addr)
|
|
goto out;
|
|
|
|
if ((addr <= new_addr) && (addr+old_len) > new_addr)
|
|
goto out;
|
|
|
|
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
|
|
if (ret)
|
|
goto out;
|
|
|
|
ret = do_munmap(mm, new_addr, new_len);
|
|
if (ret)
|
|
goto out;
|
|
|
|
if (old_len >= new_len) {
|
|
ret = do_munmap(mm, addr+new_len, old_len - new_len);
|
|
if (ret && old_len != new_len)
|
|
goto out;
|
|
old_len = new_len;
|
|
}
|
|
|
|
vma = vma_to_resize(addr, old_len, new_len, &charged);
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto out;
|
|
}
|
|
|
|
map_flags = MAP_FIXED;
|
|
if (vma->vm_flags & VM_MAYSHARE)
|
|
map_flags |= MAP_SHARED;
|
|
|
|
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
|
|
((addr - vma->vm_start) >> PAGE_SHIFT),
|
|
map_flags);
|
|
if (ret & ~PAGE_MASK)
|
|
goto out1;
|
|
|
|
ret = move_vma(vma, addr, old_len, new_len, new_addr);
|
|
if (!(ret & ~PAGE_MASK))
|
|
goto out;
|
|
out1:
|
|
vm_unacct_memory(charged);
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
|
|
{
|
|
unsigned long end = vma->vm_end + delta;
|
|
if (end < vma->vm_end) /* overflow */
|
|
return 0;
|
|
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
|
|
return 0;
|
|
if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
|
|
0, MAP_FIXED) & ~PAGE_MASK)
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* Expand (or shrink) an existing mapping, potentially moving it at the
|
|
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
|
|
*
|
|
* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
|
|
* This option implies MREMAP_MAYMOVE.
|
|
*/
|
|
unsigned long do_mremap(unsigned long addr,
|
|
unsigned long old_len, unsigned long new_len,
|
|
unsigned long flags, unsigned long new_addr)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long ret = -EINVAL;
|
|
unsigned long charged = 0;
|
|
|
|
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
|
|
goto out;
|
|
|
|
if (addr & ~PAGE_MASK)
|
|
goto out;
|
|
|
|
old_len = PAGE_ALIGN(old_len);
|
|
new_len = PAGE_ALIGN(new_len);
|
|
|
|
/*
|
|
* We allow a zero old-len as a special case
|
|
* for DOS-emu "duplicate shm area" thing. But
|
|
* a zero new-len is nonsensical.
|
|
*/
|
|
if (!new_len)
|
|
goto out;
|
|
|
|
if (flags & MREMAP_FIXED) {
|
|
if (flags & MREMAP_MAYMOVE)
|
|
ret = mremap_to(addr, old_len, new_addr, new_len);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Always allow a shrinking remap: that just unmaps
|
|
* the unnecessary pages..
|
|
* do_munmap does all the needed commit accounting
|
|
*/
|
|
if (old_len >= new_len) {
|
|
ret = do_munmap(mm, addr+new_len, old_len - new_len);
|
|
if (ret && old_len != new_len)
|
|
goto out;
|
|
ret = addr;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Ok, we need to grow..
|
|
*/
|
|
vma = vma_to_resize(addr, old_len, new_len, &charged);
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto out;
|
|
}
|
|
|
|
/* old_len exactly to the end of the area..
|
|
*/
|
|
if (old_len == vma->vm_end - addr) {
|
|
/* can we just expand the current mapping? */
|
|
if (vma_expandable(vma, new_len - old_len)) {
|
|
int pages = (new_len - old_len) >> PAGE_SHIFT;
|
|
|
|
if (vma_adjust(vma, vma->vm_start, addr + new_len,
|
|
vma->vm_pgoff, NULL)) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
mm->total_vm += pages;
|
|
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
mm->locked_vm += pages;
|
|
mlock_vma_pages_range(vma, addr + old_len,
|
|
addr + new_len);
|
|
}
|
|
ret = addr;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* We weren't able to just expand or shrink the area,
|
|
* we need to create a new one and move it..
|
|
*/
|
|
ret = -ENOMEM;
|
|
if (flags & MREMAP_MAYMOVE) {
|
|
unsigned long map_flags = 0;
|
|
if (vma->vm_flags & VM_MAYSHARE)
|
|
map_flags |= MAP_SHARED;
|
|
|
|
new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
|
|
vma->vm_pgoff +
|
|
((addr - vma->vm_start) >> PAGE_SHIFT),
|
|
map_flags);
|
|
if (new_addr & ~PAGE_MASK) {
|
|
ret = new_addr;
|
|
goto out;
|
|
}
|
|
|
|
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
|
|
if (ret)
|
|
goto out;
|
|
ret = move_vma(vma, addr, old_len, new_len, new_addr);
|
|
}
|
|
out:
|
|
if (ret & ~PAGE_MASK)
|
|
vm_unacct_memory(charged);
|
|
return ret;
|
|
}
|
|
|
|
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
|
|
unsigned long, new_len, unsigned long, flags,
|
|
unsigned long, new_addr)
|
|
{
|
|
unsigned long ret;
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
|
|
up_write(¤t->mm->mmap_sem);
|
|
return ret;
|
|
}
|