mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 15:07:35 +07:00
e31cf2f4ca
Patch series "mm: consolidate definitions of page table accessors", v2. The low level page table accessors (pXY_index(), pXY_offset()) are duplicated across all architectures and sometimes more than once. For instance, we have 31 definition of pgd_offset() for 25 supported architectures. Most of these definitions are actually identical and typically it boils down to, e.g. static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } These definitions can be shared among 90% of the arches provided XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined. For architectures that really need a custom version there is always possibility to override the generic version with the usual ifdefs magic. These patches introduce include/linux/pgtable.h that replaces include/asm-generic/pgtable.h and add the definitions of the page table accessors to the new header. This patch (of 12): The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the functions involving page table manipulations, e.g. pte_alloc() and pmd_alloc(). So, there is no point to explicitly include <asm/pgtable.h> in the files that include <linux/mm.h>. The include statements in such cases are remove with a simple loop: for f in $(git grep -l "include <linux/mm.h>") ; do sed -i -e '/include <asm\/pgtable.h>/ d' $f done Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
137 lines
3.3 KiB
C
137 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* linux/arch/arm/mm/copypage-v6.c
|
|
*
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/highmem.h>
|
|
|
|
#include <asm/shmparam.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cachetype.h>
|
|
|
|
#include "mm.h"
|
|
|
|
#if SHMLBA > 16384
|
|
#error FIX ME
|
|
#endif
|
|
|
|
static DEFINE_RAW_SPINLOCK(v6_lock);
|
|
|
|
/*
|
|
* Copy the user page. No aliasing to deal with so we can just
|
|
* attack the kernel's existing mapping of these pages.
|
|
*/
|
|
static void v6_copy_user_highpage_nonaliasing(struct page *to,
|
|
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
|
|
{
|
|
void *kto, *kfrom;
|
|
|
|
kfrom = kmap_atomic(from);
|
|
kto = kmap_atomic(to);
|
|
copy_page(kto, kfrom);
|
|
kunmap_atomic(kto);
|
|
kunmap_atomic(kfrom);
|
|
}
|
|
|
|
/*
|
|
* Clear the user page. No aliasing to deal with so we can just
|
|
* attack the kernel's existing mapping of this page.
|
|
*/
|
|
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
|
|
{
|
|
void *kaddr = kmap_atomic(page);
|
|
clear_page(kaddr);
|
|
kunmap_atomic(kaddr);
|
|
}
|
|
|
|
/*
|
|
* Discard data in the kernel mapping for the new page.
|
|
* FIXME: needs this MCRR to be supported.
|
|
*/
|
|
static void discard_old_kernel_data(void *kto)
|
|
{
|
|
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
|
|
:
|
|
: "r" (kto),
|
|
"r" ((unsigned long)kto + PAGE_SIZE - 1)
|
|
: "cc");
|
|
}
|
|
|
|
/*
|
|
* Copy the page, taking account of the cache colour.
|
|
*/
|
|
static void v6_copy_user_highpage_aliasing(struct page *to,
|
|
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
|
|
{
|
|
unsigned int offset = CACHE_COLOUR(vaddr);
|
|
unsigned long kfrom, kto;
|
|
|
|
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
|
__flush_dcache_page(page_mapping_file(from), from);
|
|
|
|
/* FIXME: not highmem safe */
|
|
discard_old_kernel_data(page_address(to));
|
|
|
|
/*
|
|
* Now copy the page using the same cache colour as the
|
|
* pages ultimate destination.
|
|
*/
|
|
raw_spin_lock(&v6_lock);
|
|
|
|
kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
|
|
kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
|
|
|
|
set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
|
|
set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
|
|
|
|
copy_page((void *)kto, (void *)kfrom);
|
|
|
|
raw_spin_unlock(&v6_lock);
|
|
}
|
|
|
|
/*
|
|
* Clear the user page. We need to deal with the aliasing issues,
|
|
* so remap the kernel page into the same cache colour as the user
|
|
* page.
|
|
*/
|
|
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
|
|
{
|
|
unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
|
|
|
|
/* FIXME: not highmem safe */
|
|
discard_old_kernel_data(page_address(page));
|
|
|
|
/*
|
|
* Now clear the page using the same cache colour as
|
|
* the pages ultimate destination.
|
|
*/
|
|
raw_spin_lock(&v6_lock);
|
|
|
|
set_top_pte(to, mk_pte(page, PAGE_KERNEL));
|
|
clear_page((void *)to);
|
|
|
|
raw_spin_unlock(&v6_lock);
|
|
}
|
|
|
|
struct cpu_user_fns v6_user_fns __initdata = {
|
|
.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
|
|
.cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
|
|
};
|
|
|
|
static int __init v6_userpage_init(void)
|
|
{
|
|
if (cache_is_vipt_aliasing()) {
|
|
cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
|
|
cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
core_initcall(v6_userpage_init);
|