mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 18:15:09 +07:00
4cf5892495
Patch series "Add support for fast mremap". This series speeds up the mremap(2) syscall by copying page tables at the PMD level even for non-THP systems. There is concern that the extra 'address' argument that mremap passes to pte_alloc may do something subtle architecture related in the future that may make the scheme not work. Also we find that there is no point in passing the 'address' to pte_alloc since its unused. This patch therefore removes this argument tree-wide resulting in a nice negative diff as well. Also ensuring along the way that the enabled architectures do not do anything funky with the 'address' argument that goes unnoticed by the optimization. Build and boot tested on x86-64. Build tested on arm64. The config enablement patch for arm64 will be posted in the future after more testing. The changes were obtained by applying the following Coccinelle script. (thanks Julia for answering all Coccinelle questions!). Following fix ups were done manually: * Removal of address argument from pte_fragment_alloc * Removal of pte_alloc_one_fast definitions from m68k and microblaze. // Options: --include-headers --no-includes // Note: I split the 'identifier fn' line, so if you are manually // running it, please unsplit it so it runs for you. virtual patch @pte_alloc_func_def depends on patch exists@ identifier E2; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; type T2; @@ fn(... - , T2 E2 ) { ... } @pte_alloc_func_proto_noarg depends on patch exists@ type T1, T2, T3, T4; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; @@ ( - T3 fn(T1, T2); + T3 fn(T1); | - T3 fn(T1, T2, T4); + T3 fn(T1, T2); ) @pte_alloc_func_proto depends on patch exists@ identifier E1, E2, E4; type T1, T2, T3, T4; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; @@ ( - T3 fn(T1 E1, T2 E2); + T3 fn(T1 E1); | - T3 fn(T1 E1, T2 E2, T4 E4); + T3 fn(T1 E1, T2 E2); ) @pte_alloc_func_call depends on patch exists@ expression E2; identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; @@ fn(... -, E2 ) @pte_alloc_macro depends on patch exists@ identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$"; identifier a, b, c; expression e; position p; @@ ( - #define fn(a, b, c) e + #define fn(a, b) e | - #define fn(a, b) e + #define fn(a) e ) Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.com Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Suggested-by: Kirill A. Shutemov <kirill@shutemov.name> Acked-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Michal Hocko <mhocko@kernel.org> Cc: Julia Lawall <Julia.Lawall@lip6.fr> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: William Kucharski <william.kucharski@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
260 lines
6.1 KiB
C
260 lines
6.1 KiB
C
/*
|
|
* This file contains the routines setting up the linux page tables.
|
|
*
|
|
* Copyright (C) 2008 Michal Simek
|
|
* Copyright (C) 2008 PetaLogix
|
|
*
|
|
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
|
|
*
|
|
* Derived from arch/ppc/mm/pgtable.c:
|
|
* -- paulus
|
|
*
|
|
* Derived from arch/ppc/mm/init.c:
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
*
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
|
*
|
|
* Derived from "arch/i386/mm/init.c"
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
* Public License. See the file COPYING in the main directory of this
|
|
* archive for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <linux/io.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/fixmap.h>
|
|
|
|
unsigned long ioremap_base;
|
|
unsigned long ioremap_bot;
|
|
EXPORT_SYMBOL(ioremap_bot);
|
|
|
|
#ifndef CONFIG_SMP
|
|
struct pgtable_cache_struct quicklists;
|
|
#endif
|
|
|
|
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
|
|
unsigned long flags)
|
|
{
|
|
unsigned long v, i;
|
|
phys_addr_t p;
|
|
int err;
|
|
|
|
/*
|
|
* Choose an address to map it to.
|
|
* Once the vmalloc system is running, we use it.
|
|
* Before then, we use space going down from ioremap_base
|
|
* (ioremap_bot records where we're up to).
|
|
*/
|
|
p = addr & PAGE_MASK;
|
|
size = PAGE_ALIGN(addr + size) - p;
|
|
|
|
/*
|
|
* Don't allow anybody to remap normal RAM that we're using.
|
|
* mem_init() sets high_memory so only do the check after that.
|
|
*
|
|
* However, allow remap of rootfs: TBD
|
|
*/
|
|
|
|
if (mem_init_done &&
|
|
p >= memory_start && p < virt_to_phys(high_memory) &&
|
|
!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
|
|
p < __virt_to_phys((phys_addr_t)__bss_stop))) {
|
|
pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
|
|
(unsigned long)p, __builtin_return_address(0));
|
|
return NULL;
|
|
}
|
|
|
|
if (size == 0)
|
|
return NULL;
|
|
|
|
/*
|
|
* Is it already mapped? If the whole area is mapped then we're
|
|
* done, otherwise remap it since we want to keep the virt addrs for
|
|
* each request contiguous.
|
|
*
|
|
* We make the assumption here that if the bottom and top
|
|
* of the range we want are mapped then it's mapped to the
|
|
* same virt address (and this is contiguous).
|
|
* -- Cort
|
|
*/
|
|
|
|
if (mem_init_done) {
|
|
struct vm_struct *area;
|
|
area = get_vm_area(size, VM_IOREMAP);
|
|
if (area == NULL)
|
|
return NULL;
|
|
v = (unsigned long) area->addr;
|
|
} else {
|
|
v = (ioremap_bot -= size);
|
|
}
|
|
|
|
if ((flags & _PAGE_PRESENT) == 0)
|
|
flags |= _PAGE_KERNEL;
|
|
if (flags & _PAGE_NO_CACHE)
|
|
flags |= _PAGE_GUARDED;
|
|
|
|
err = 0;
|
|
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
|
|
err = map_page(v + i, p + i, flags);
|
|
if (err) {
|
|
if (mem_init_done)
|
|
vfree((void *)v);
|
|
return NULL;
|
|
}
|
|
|
|
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
|
|
}
|
|
|
|
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
|
|
{
|
|
return __ioremap(addr, size, _PAGE_NO_CACHE);
|
|
}
|
|
EXPORT_SYMBOL(ioremap);
|
|
|
|
void iounmap(volatile void __iomem *addr)
|
|
{
|
|
if ((__force void *)addr > high_memory &&
|
|
(unsigned long) addr < ioremap_bot)
|
|
vfree((void *) (PAGE_MASK & (unsigned long) addr));
|
|
}
|
|
EXPORT_SYMBOL(iounmap);
|
|
|
|
|
|
int map_page(unsigned long va, phys_addr_t pa, int flags)
|
|
{
|
|
pmd_t *pd;
|
|
pte_t *pg;
|
|
int err = -ENOMEM;
|
|
/* Use upper 10 bits of VA to index the first level map */
|
|
pd = pmd_offset(pgd_offset_k(va), va);
|
|
/* Use middle 10 bits of VA to index the second-level map */
|
|
pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
|
|
/* pg = pte_alloc_kernel(&init_mm, pd, va); */
|
|
|
|
if (pg != NULL) {
|
|
err = 0;
|
|
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
|
|
__pgprot(flags)));
|
|
if (unlikely(mem_init_done))
|
|
_tlbie(va);
|
|
}
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Map in all of physical memory starting at CONFIG_KERNEL_START.
|
|
*/
|
|
void __init mapin_ram(void)
|
|
{
|
|
unsigned long v, p, s, f;
|
|
|
|
v = CONFIG_KERNEL_START;
|
|
p = memory_start;
|
|
for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
|
|
f = _PAGE_PRESENT | _PAGE_ACCESSED |
|
|
_PAGE_SHARED | _PAGE_HWEXEC;
|
|
if ((char *) v < _stext || (char *) v >= _etext)
|
|
f |= _PAGE_WRENABLE;
|
|
else
|
|
/* On the MicroBlaze, no user access
|
|
forces R/W kernel access */
|
|
f |= _PAGE_USER;
|
|
map_page(v, p, f);
|
|
v += PAGE_SIZE;
|
|
p += PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
/* is x a power of 2? */
|
|
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
|
|
|
|
/* Scan the real Linux page tables and return a PTE pointer for
|
|
* a virtual address in a context.
|
|
* Returns true (1) if PTE was found, zero otherwise. The pointer to
|
|
* the PTE pointer is unmodified if PTE is not found.
|
|
*/
|
|
static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
|
|
{
|
|
pgd_t *pgd;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
int retval = 0;
|
|
|
|
pgd = pgd_offset(mm, addr & PAGE_MASK);
|
|
if (pgd) {
|
|
pmd = pmd_offset(pgd, addr & PAGE_MASK);
|
|
if (pmd_present(*pmd)) {
|
|
pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
|
|
if (pte) {
|
|
retval = 1;
|
|
*ptep = pte;
|
|
}
|
|
}
|
|
}
|
|
return retval;
|
|
}
|
|
|
|
/* Find physical address for this virtual address. Normally used by
|
|
* I/O functions, but anyone can call it.
|
|
*/
|
|
unsigned long iopa(unsigned long addr)
|
|
{
|
|
unsigned long pa;
|
|
|
|
pte_t *pte;
|
|
struct mm_struct *mm;
|
|
|
|
/* Allow mapping of user addresses (within the thread)
|
|
* for DMA if necessary.
|
|
*/
|
|
if (addr < TASK_SIZE)
|
|
mm = current->mm;
|
|
else
|
|
mm = &init_mm;
|
|
|
|
pa = 0;
|
|
if (get_pteptr(mm, addr, &pte))
|
|
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
|
|
|
|
return pa;
|
|
}
|
|
|
|
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
|
{
|
|
pte_t *pte;
|
|
if (mem_init_done) {
|
|
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
|
} else {
|
|
pte = (pte_t *)early_get_page();
|
|
if (pte)
|
|
clear_page(pte);
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
|
|
{
|
|
unsigned long address = __fix_to_virt(idx);
|
|
|
|
if (idx >= __end_of_fixed_addresses)
|
|
BUG();
|
|
|
|
map_page(address, phys, pgprot_val(flags));
|
|
}
|