mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 07:01:33 +07:00
e31cf2f4ca
Patch series "mm: consolidate definitions of page table accessors", v2. The low level page table accessors (pXY_index(), pXY_offset()) are duplicated across all architectures and sometimes more than once. For instance, we have 31 definition of pgd_offset() for 25 supported architectures. Most of these definitions are actually identical and typically it boils down to, e.g. static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } These definitions can be shared among 90% of the arches provided XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined. For architectures that really need a custom version there is always possibility to override the generic version with the usual ifdefs magic. These patches introduce include/linux/pgtable.h that replaces include/asm-generic/pgtable.h and add the definitions of the page table accessors to the new header. This patch (of 12): The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the functions involving page table manipulations, e.g. pte_alloc() and pmd_alloc(). So, there is no point to explicitly include <asm/pgtable.h> in the files that include <linux/mm.h>. The include statements in such cases are remove with a simple loop: for f in $(git grep -l "include <linux/mm.h>") ; do sed -i -e '/include <asm\/pgtable.h>/ d' $f done Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
154 lines
4.0 KiB
C
154 lines
4.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Performance counter callchain support - powerpc architecture code
|
|
*
|
|
* Copyright © 2009 Paul Mackerras, IBM Corporation.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/sigcontext.h>
|
|
#include <asm/ucontext.h>
|
|
#include <asm/vdso.h>
|
|
#include <asm/pte-walk.h>
|
|
|
|
#include "callchain.h"
|
|
|
|
/*
|
|
* On 64-bit we don't want to invoke hash_page on user addresses from
|
|
* interrupt context, so if the access faults, we read the page tables
|
|
* to find which page (if any) is mapped and access it directly.
|
|
*/
|
|
int read_user_stack_slow(void __user *ptr, void *buf, int nb)
|
|
{
|
|
|
|
unsigned long addr = (unsigned long) ptr;
|
|
unsigned long offset;
|
|
struct page *page;
|
|
void *kaddr;
|
|
|
|
if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
|
|
kaddr = page_address(page);
|
|
|
|
/* align address to page boundary */
|
|
offset = addr & ~PAGE_MASK;
|
|
|
|
memcpy(buf, kaddr + offset, nb);
|
|
put_page(page);
|
|
return 0;
|
|
}
|
|
return -EFAULT;
|
|
}
|
|
|
|
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
|
{
|
|
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
|
|
((unsigned long)ptr & 7))
|
|
return -EFAULT;
|
|
|
|
if (!probe_user_read(ret, ptr, sizeof(*ret)))
|
|
return 0;
|
|
|
|
return read_user_stack_slow(ptr, ret, 8);
|
|
}
|
|
|
|
/*
|
|
* 64-bit user processes use the same stack frame for RT and non-RT signals.
|
|
*/
|
|
struct signal_frame_64 {
|
|
char dummy[__SIGNAL_FRAMESIZE];
|
|
struct ucontext uc;
|
|
unsigned long unused[2];
|
|
unsigned int tramp[6];
|
|
struct siginfo *pinfo;
|
|
void *puc;
|
|
struct siginfo info;
|
|
char abigap[288];
|
|
};
|
|
|
|
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
|
|
{
|
|
if (nip == fp + offsetof(struct signal_frame_64, tramp))
|
|
return 1;
|
|
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
|
|
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Do some sanity checking on the signal frame pointed to by sp.
|
|
* We check the pinfo and puc pointers in the frame.
|
|
*/
|
|
static int sane_signal_64_frame(unsigned long sp)
|
|
{
|
|
struct signal_frame_64 __user *sf;
|
|
unsigned long pinfo, puc;
|
|
|
|
sf = (struct signal_frame_64 __user *) sp;
|
|
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
|
|
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
|
|
return 0;
|
|
return pinfo == (unsigned long) &sf->info &&
|
|
puc == (unsigned long) &sf->uc;
|
|
}
|
|
|
|
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
unsigned long sp, next_sp;
|
|
unsigned long next_ip;
|
|
unsigned long lr;
|
|
long level = 0;
|
|
struct signal_frame_64 __user *sigframe;
|
|
unsigned long __user *fp, *uregs;
|
|
|
|
next_ip = perf_instruction_pointer(regs);
|
|
lr = regs->link;
|
|
sp = regs->gpr[1];
|
|
perf_callchain_store(entry, next_ip);
|
|
|
|
while (entry->nr < entry->max_stack) {
|
|
fp = (unsigned long __user *) sp;
|
|
if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
|
|
return;
|
|
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
|
|
return;
|
|
|
|
/*
|
|
* Note: the next_sp - sp >= signal frame size check
|
|
* is true when next_sp < sp, which can happen when
|
|
* transitioning from an alternate signal stack to the
|
|
* normal stack.
|
|
*/
|
|
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
|
|
(is_sigreturn_64_address(next_ip, sp) ||
|
|
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
|
|
sane_signal_64_frame(sp)) {
|
|
/*
|
|
* This looks like an signal frame
|
|
*/
|
|
sigframe = (struct signal_frame_64 __user *) sp;
|
|
uregs = sigframe->uc.uc_mcontext.gp_regs;
|
|
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
|
|
read_user_stack_64(&uregs[PT_LNK], &lr) ||
|
|
read_user_stack_64(&uregs[PT_R1], &sp))
|
|
return;
|
|
level = 0;
|
|
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
|
perf_callchain_store(entry, next_ip);
|
|
continue;
|
|
}
|
|
|
|
if (level == 0)
|
|
next_ip = lr;
|
|
perf_callchain_store(entry, next_ip);
|
|
++level;
|
|
sp = next_sp;
|
|
}
|
|
}
|