mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-01 11:56:45 +07:00
7230ced492
A change to __ioremap() broke reading /dev/oldmem because we're no
longer able to ioremap pfn 0 (d177c207
, "[PATCH] powerpc: IOMMU: don't
ioremap null addresses").
We actually don't need to ioremap for anything that's part of the linear
mapping, so just read it directly.
Also make sure we're only reading one page or less at a time.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Sachin Sant <sachinp@in.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
136 lines
3.4 KiB
C
136 lines
3.4 KiB
C
/*
|
|
* Routines for doing kexec-based kdump.
|
|
*
|
|
* Copyright (C) 2005, IBM Corp.
|
|
*
|
|
* Created by: Michael Ellerman
|
|
*
|
|
* This source code is licensed under the GNU General Public License,
|
|
* Version 2. See the file COPYING for more details.
|
|
*/
|
|
|
|
#undef DEBUG
|
|
|
|
#include <linux/crash_dump.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/lmb.h>
|
|
#include <asm/code-patching.h>
|
|
#include <asm/kdump.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/firmware.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
#ifdef DEBUG
|
|
#include <asm/udbg.h>
|
|
#define DBG(fmt...) udbg_printf(fmt)
|
|
#else
|
|
#define DBG(fmt...)
|
|
#endif
|
|
|
|
void __init reserve_kdump_trampoline(void)
|
|
{
|
|
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
|
|
}
|
|
|
|
static void __init create_trampoline(unsigned long addr)
|
|
{
|
|
unsigned int *p = (unsigned int *)addr;
|
|
|
|
/* The maximum range of a single instruction branch, is the current
|
|
* instruction's address + (32 MB - 4) bytes. For the trampoline we
|
|
* need to branch to current address + 32 MB. So we insert a nop at
|
|
* the trampoline address, then the next instruction (+ 4 bytes)
|
|
* does a branch to (32 MB - 4). The net effect is that when we
|
|
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
|
|
* two instructions it doesn't require any registers.
|
|
*/
|
|
patch_instruction(p, PPC_NOP_INSTR);
|
|
patch_branch(++p, addr + PHYSICAL_START, 0);
|
|
}
|
|
|
|
void __init setup_kdump_trampoline(void)
|
|
{
|
|
unsigned long i;
|
|
|
|
DBG(" -> setup_kdump_trampoline()\n");
|
|
|
|
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
|
|
create_trampoline(i);
|
|
}
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
|
|
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
|
|
#endif /* CONFIG_PPC_PSERIES */
|
|
|
|
DBG(" <- setup_kdump_trampoline()\n");
|
|
}
|
|
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
static int __init parse_elfcorehdr(char *p)
|
|
{
|
|
if (p)
|
|
elfcorehdr_addr = memparse(p, &p);
|
|
|
|
return 1;
|
|
}
|
|
__setup("elfcorehdr=", parse_elfcorehdr);
|
|
#endif
|
|
|
|
static int __init parse_savemaxmem(char *p)
|
|
{
|
|
if (p)
|
|
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
|
|
|
|
return 1;
|
|
}
|
|
__setup("savemaxmem=", parse_savemaxmem);
|
|
|
|
|
|
static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
|
|
unsigned long offset, int userbuf)
|
|
{
|
|
if (userbuf) {
|
|
if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
|
|
return -EFAULT;
|
|
} else
|
|
memcpy(buf, (vaddr + offset), csize);
|
|
|
|
return csize;
|
|
}
|
|
|
|
/**
|
|
* copy_oldmem_page - copy one page from "oldmem"
|
|
* @pfn: page frame number to be copied
|
|
* @buf: target memory address for the copy; this can be in kernel address
|
|
* space or user address space (see @userbuf)
|
|
* @csize: number of bytes to copy
|
|
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
|
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
|
* otherwise @buf is in kernel address space, use memcpy().
|
|
*
|
|
* Copy a page from "oldmem". For this page, there is no pte mapped
|
|
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
|
*/
|
|
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|
size_t csize, unsigned long offset, int userbuf)
|
|
{
|
|
void *vaddr;
|
|
|
|
if (!csize)
|
|
return 0;
|
|
|
|
csize = min(csize, PAGE_SIZE);
|
|
|
|
if (pfn < max_pfn) {
|
|
vaddr = __va(pfn << PAGE_SHIFT);
|
|
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
|
} else {
|
|
vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
|
|
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
|
iounmap(vaddr);
|
|
}
|
|
|
|
return csize;
|
|
}
|