mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 19:26:45 +07:00
3ab8352137
This patch provides an enhancement to kexec/kdump. It implements the following features: - Backup/restore memory used by the original kernel before/after kexec. - Save/restore CPU state before/after kexec. The features of this patch can be used as a general method to call program in physical mode (paging turning off). This can be used to call BIOS code under Linux. kexec-tools needs to be patched to support kexec jump. The patches and the precompiled kexec can be download from the following URL: source: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-src_git_kh10.tar.bz2 patches: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-patches_git_kh10.tar.bz2 binary: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec_git_kh10 Usage example of calling some physical mode code and return: 1. Compile and install patched kernel with following options selected: CONFIG_X86_32=y CONFIG_KEXEC=y CONFIG_PM=y CONFIG_KEXEC_JUMP=y 2. Build patched kexec-tool or download the pre-built one. 3. Build some physical mode executable named such as "phy_mode" 4. Boot kernel compiled in step 1. 5. Load physical mode executable with /sbin/kexec. The shell command line can be as follow: /sbin/kexec --load-preserve-context --args-none phy_mode 6. Call physical mode executable with following shell command line: /sbin/kexec -e Implementation point: To support jumping without reserving memory. One shadow backup page (source page) is allocated for each page used by kexeced code image (destination page). When do kexec_load, the image of kexeced code is loaded into source pages, and before executing, the destination pages and the source pages are swapped, so the contents of destination pages are backupped. Before jumping to the kexeced code image and after jumping back to the original kernel, the destination pages and the source pages are swapped too. C ABI (calling convention) is used as communication protocol between kernel and called code. A flag named KEXEC_PRESERVE_CONTEXT for sys_kexec_load is added to indicate that the loaded kernel image is used for jumping back. Now, only the i386 architecture is supported. Signed-off-by: Huang Ying <ying.huang@intel.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Nigel Cunningham <nigel@nigel.suspend2.net> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
379 lines
7.8 KiB
ArmAsm
379 lines
7.8 KiB
ArmAsm
/*
|
|
* relocate_kernel.S - put the kernel image in place to boot
|
|
* Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
|
|
*
|
|
* This source code is licensed under the GNU General Public License,
|
|
* Version 2. See the file COPYING for more details.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page.h>
|
|
#include <asm/kexec.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
/*
|
|
* Must be relocatable PIC code callable as a C function
|
|
*/
|
|
|
|
#define PTR(x) (x << 2)
|
|
#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
|
|
#define PAE_PGD_ATTR (_PAGE_PRESENT)
|
|
|
|
/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are
|
|
* used to save some data for jumping back
|
|
*/
|
|
#define DATA(offset) (PAGE_SIZE/2+(offset))
|
|
|
|
/* Minimal CPU state */
|
|
#define ESP DATA(0x0)
|
|
#define CR0 DATA(0x4)
|
|
#define CR3 DATA(0x8)
|
|
#define CR4 DATA(0xc)
|
|
|
|
/* other data */
|
|
#define CP_VA_CONTROL_PAGE DATA(0x10)
|
|
#define CP_PA_PGD DATA(0x14)
|
|
#define CP_PA_SWAP_PAGE DATA(0x18)
|
|
#define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
|
|
|
|
.text
|
|
.align PAGE_SIZE
|
|
.globl relocate_kernel
|
|
relocate_kernel:
|
|
/* Save the CPU context, used for jumping back */
|
|
|
|
pushl %ebx
|
|
pushl %esi
|
|
pushl %edi
|
|
pushl %ebp
|
|
pushf
|
|
|
|
movl 20+8(%esp), %ebp /* list of pages */
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
|
|
movl %esp, ESP(%edi)
|
|
movl %cr0, %eax
|
|
movl %eax, CR0(%edi)
|
|
movl %cr3, %eax
|
|
movl %eax, CR3(%edi)
|
|
movl %cr4, %eax
|
|
movl %eax, CR4(%edi)
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
/* map the control page at its virtual address */
|
|
|
|
movl PTR(VA_PGD)(%ebp), %edi
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0xc0000000, %eax
|
|
shrl $27, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_PMD_0)(%ebp), %edx
|
|
orl $PAE_PGD_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
movl PTR(VA_PMD_0)(%ebp), %edi
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0x3fe00000, %eax
|
|
shrl $18, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_PTE_0)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
movl PTR(VA_PTE_0)(%ebp), %edi
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0x001ff000, %eax
|
|
shrl $9, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
/* identity map the control page at its physical address */
|
|
|
|
movl PTR(VA_PGD)(%ebp), %edi
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0xc0000000, %eax
|
|
shrl $27, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_PMD_1)(%ebp), %edx
|
|
orl $PAE_PGD_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
movl PTR(VA_PMD_1)(%ebp), %edi
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0x3fe00000, %eax
|
|
shrl $18, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_PTE_1)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
movl PTR(VA_PTE_1)(%ebp), %edi
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0x001ff000, %eax
|
|
shrl $9, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
#else
|
|
/* map the control page at its virtual address */
|
|
|
|
movl PTR(VA_PGD)(%ebp), %edi
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0xffc00000, %eax
|
|
shrl $20, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_PTE_0)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
movl PTR(VA_PTE_0)(%ebp), %edi
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0x003ff000, %eax
|
|
shrl $10, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
/* identity map the control page at its physical address */
|
|
|
|
movl PTR(VA_PGD)(%ebp), %edi
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0xffc00000, %eax
|
|
shrl $20, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_PTE_1)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
|
|
movl PTR(VA_PTE_1)(%ebp), %edi
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
|
|
andl $0x003ff000, %eax
|
|
shrl $10, %eax
|
|
addl %edi, %eax
|
|
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
|
|
orl $PAGE_ATTR, %edx
|
|
movl %edx, (%eax)
|
|
#endif
|
|
|
|
relocate_new_kernel:
|
|
/* read the arguments and say goodbye to the stack */
|
|
movl 20+4(%esp), %ebx /* page_list */
|
|
movl 20+8(%esp), %ebp /* list of pages */
|
|
movl 20+12(%esp), %edx /* start address */
|
|
movl 20+16(%esp), %ecx /* cpu_has_pae */
|
|
movl 20+20(%esp), %esi /* preserve_context */
|
|
|
|
/* zero out flags, and disable interrupts */
|
|
pushl $0
|
|
popfl
|
|
|
|
/* save some information for jumping back */
|
|
movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
|
|
movl %edi, CP_VA_CONTROL_PAGE(%edi)
|
|
movl PTR(PA_PGD)(%ebp), %eax
|
|
movl %eax, CP_PA_PGD(%edi)
|
|
movl PTR(PA_SWAP_PAGE)(%ebp), %eax
|
|
movl %eax, CP_PA_SWAP_PAGE(%edi)
|
|
movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
|
|
|
|
/* get physical address of control page now */
|
|
/* this is impossible after page table switch */
|
|
movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
|
|
|
|
/* switch to new set of page tables */
|
|
movl PTR(PA_PGD)(%ebp), %eax
|
|
movl %eax, %cr3
|
|
|
|
/* setup a new stack at the end of the physical control page */
|
|
lea PAGE_SIZE(%edi), %esp
|
|
|
|
/* jump to identity mapped page */
|
|
movl %edi, %eax
|
|
addl $(identity_mapped - relocate_kernel), %eax
|
|
pushl %eax
|
|
ret
|
|
|
|
identity_mapped:
|
|
/* store the start address on the stack */
|
|
pushl %edx
|
|
|
|
/* Set cr0 to a known state:
|
|
* - Paging disabled
|
|
* - Alignment check disabled
|
|
* - Write protect disabled
|
|
* - No task switch
|
|
* - Don't do FP software emulation.
|
|
* - Proctected mode enabled
|
|
*/
|
|
movl %cr0, %eax
|
|
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
|
|
orl $(X86_CR0_PE), %eax
|
|
movl %eax, %cr0
|
|
|
|
/* clear cr4 if applicable */
|
|
testl %ecx, %ecx
|
|
jz 1f
|
|
/* Set cr4 to a known state:
|
|
* Setting everything to zero seems safe.
|
|
*/
|
|
xorl %eax, %eax
|
|
movl %eax, %cr4
|
|
|
|
jmp 1f
|
|
1:
|
|
|
|
/* Flush the TLB (needed?) */
|
|
xorl %eax, %eax
|
|
movl %eax, %cr3
|
|
|
|
movl CP_PA_SWAP_PAGE(%edi), %eax
|
|
pushl %eax
|
|
pushl %ebx
|
|
call swap_pages
|
|
addl $8, %esp
|
|
|
|
/* To be certain of avoiding problems with self-modifying code
|
|
* I need to execute a serializing instruction here.
|
|
* So I flush the TLB, it's handy, and not processor dependent.
|
|
*/
|
|
xorl %eax, %eax
|
|
movl %eax, %cr3
|
|
|
|
/* set all of the registers to known values */
|
|
/* leave %esp alone */
|
|
|
|
testl %esi, %esi
|
|
jnz 1f
|
|
xorl %edi, %edi
|
|
xorl %eax, %eax
|
|
xorl %ebx, %ebx
|
|
xorl %ecx, %ecx
|
|
xorl %edx, %edx
|
|
xorl %esi, %esi
|
|
xorl %ebp, %ebp
|
|
ret
|
|
1:
|
|
popl %edx
|
|
movl CP_PA_SWAP_PAGE(%edi), %esp
|
|
addl $PAGE_SIZE, %esp
|
|
2:
|
|
call *%edx
|
|
|
|
/* get the re-entry point of the peer system */
|
|
movl 0(%esp), %ebp
|
|
call 1f
|
|
1:
|
|
popl %ebx
|
|
subl $(1b - relocate_kernel), %ebx
|
|
movl CP_VA_CONTROL_PAGE(%ebx), %edi
|
|
lea PAGE_SIZE(%ebx), %esp
|
|
movl CP_PA_SWAP_PAGE(%ebx), %eax
|
|
movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
|
|
pushl %eax
|
|
pushl %edx
|
|
call swap_pages
|
|
addl $8, %esp
|
|
movl CP_PA_PGD(%ebx), %eax
|
|
movl %eax, %cr3
|
|
movl %cr0, %eax
|
|
orl $(1<<31), %eax
|
|
movl %eax, %cr0
|
|
lea PAGE_SIZE(%edi), %esp
|
|
movl %edi, %eax
|
|
addl $(virtual_mapped - relocate_kernel), %eax
|
|
pushl %eax
|
|
ret
|
|
|
|
virtual_mapped:
|
|
movl CR4(%edi), %eax
|
|
movl %eax, %cr4
|
|
movl CR3(%edi), %eax
|
|
movl %eax, %cr3
|
|
movl CR0(%edi), %eax
|
|
movl %eax, %cr0
|
|
movl ESP(%edi), %esp
|
|
movl %ebp, %eax
|
|
|
|
popf
|
|
popl %ebp
|
|
popl %edi
|
|
popl %esi
|
|
popl %ebx
|
|
ret
|
|
|
|
/* Do the copies */
|
|
swap_pages:
|
|
movl 8(%esp), %edx
|
|
movl 4(%esp), %ecx
|
|
pushl %ebp
|
|
pushl %ebx
|
|
pushl %edi
|
|
pushl %esi
|
|
movl %ecx, %ebx
|
|
jmp 1f
|
|
|
|
0: /* top, read another word from the indirection page */
|
|
movl (%ebx), %ecx
|
|
addl $4, %ebx
|
|
1:
|
|
testl $0x1, %ecx /* is it a destination page */
|
|
jz 2f
|
|
movl %ecx, %edi
|
|
andl $0xfffff000, %edi
|
|
jmp 0b
|
|
2:
|
|
testl $0x2, %ecx /* is it an indirection page */
|
|
jz 2f
|
|
movl %ecx, %ebx
|
|
andl $0xfffff000, %ebx
|
|
jmp 0b
|
|
2:
|
|
testl $0x4, %ecx /* is it the done indicator */
|
|
jz 2f
|
|
jmp 3f
|
|
2:
|
|
testl $0x8, %ecx /* is it the source indicator */
|
|
jz 0b /* Ignore it otherwise */
|
|
movl %ecx, %esi /* For every source page do a copy */
|
|
andl $0xfffff000, %esi
|
|
|
|
movl %edi, %eax
|
|
movl %esi, %ebp
|
|
|
|
movl %edx, %edi
|
|
movl $1024, %ecx
|
|
rep ; movsl
|
|
|
|
movl %ebp, %edi
|
|
movl %eax, %esi
|
|
movl $1024, %ecx
|
|
rep ; movsl
|
|
|
|
movl %eax, %edi
|
|
movl %edx, %esi
|
|
movl $1024, %ecx
|
|
rep ; movsl
|
|
|
|
lea PAGE_SIZE(%ebp), %esi
|
|
jmp 0b
|
|
3:
|
|
popl %esi
|
|
popl %edi
|
|
popl %ebx
|
|
popl %ebp
|
|
ret
|