mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 20:03:05 +07:00
65fddcfca8
The replacement of <asm/pgrable.h> with <linux/pgtable.h> made the include of the latter in the middle of asm includes. Fix this up with the aid of the below script and manual adjustments here and there. import sys import re if len(sys.argv) is not 3: print "USAGE: %s <file> <header>" % (sys.argv[0]) sys.exit(1) hdr_to_move="#include <linux/%s>" % sys.argv[2] moved = False in_hdrs = False with open(sys.argv[1], "r") as f: lines = f.readlines() for _line in lines: line = _line.rstrip(' ') if line == hdr_to_move: continue if line.startswith("#include <linux/"): in_hdrs = True elif not moved and in_hdrs: moved = True print hdr_to_move print line Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-4-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
199 lines
4.3 KiB
C
199 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Hibernation support specific for i386 - temporary page tables
|
|
*
|
|
* Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
|
*/
|
|
|
|
#include <linux/gfp.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/pgtable.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/mmzone.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/suspend.h>
|
|
|
|
/* Pointer to the temporary resume page tables */
|
|
pgd_t *resume_pg_dir;
|
|
|
|
/* The following three functions are based on the analogous code in
|
|
* arch/x86/mm/init_32.c
|
|
*/
|
|
|
|
/*
|
|
* Create a middle page table on a resume-safe page and put a pointer to it in
|
|
* the given global directory entry. This only returns the gd entry
|
|
* in non-PAE compilation mode, since the middle layer is folded.
|
|
*/
|
|
static pmd_t *resume_one_md_table_init(pgd_t *pgd)
|
|
{
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd_table;
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
|
if (!pmd_table)
|
|
return NULL;
|
|
|
|
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
|
p4d = p4d_offset(pgd, 0);
|
|
pud = pud_offset(p4d, 0);
|
|
|
|
BUG_ON(pmd_table != pmd_offset(pud, 0));
|
|
#else
|
|
p4d = p4d_offset(pgd, 0);
|
|
pud = pud_offset(p4d, 0);
|
|
pmd_table = pmd_offset(pud, 0);
|
|
#endif
|
|
|
|
return pmd_table;
|
|
}
|
|
|
|
/*
|
|
* Create a page table on a resume-safe page and place a pointer to it in
|
|
* a middle page directory entry.
|
|
*/
|
|
static pte_t *resume_one_page_table_init(pmd_t *pmd)
|
|
{
|
|
if (pmd_none(*pmd)) {
|
|
pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
|
|
if (!page_table)
|
|
return NULL;
|
|
|
|
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
|
|
|
|
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
|
|
|
|
return page_table;
|
|
}
|
|
|
|
return pte_offset_kernel(pmd, 0);
|
|
}
|
|
|
|
/*
|
|
* This maps the physical memory to kernel virtual address space, a total
|
|
* of max_low_pfn pages, by creating page tables starting from address
|
|
* PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
|
|
*/
|
|
static int resume_physical_mapping_init(pgd_t *pgd_base)
|
|
{
|
|
unsigned long pfn;
|
|
pgd_t *pgd;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
int pgd_idx, pmd_idx;
|
|
|
|
pgd_idx = pgd_index(PAGE_OFFSET);
|
|
pgd = pgd_base + pgd_idx;
|
|
pfn = 0;
|
|
|
|
for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
|
|
pmd = resume_one_md_table_init(pgd);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
|
|
if (pfn >= max_low_pfn)
|
|
continue;
|
|
|
|
for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
|
|
if (pfn >= max_low_pfn)
|
|
break;
|
|
|
|
/* Map with big pages if possible, otherwise create
|
|
* normal page tables.
|
|
* NOTE: We can mark everything as executable here
|
|
*/
|
|
if (boot_cpu_has(X86_FEATURE_PSE)) {
|
|
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
|
|
pfn += PTRS_PER_PTE;
|
|
} else {
|
|
pte_t *max_pte;
|
|
|
|
pte = resume_one_page_table_init(pmd);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
|
|
max_pte = pte + PTRS_PER_PTE;
|
|
for (; pte < max_pte; pte++, pfn++) {
|
|
if (pfn >= max_low_pfn)
|
|
break;
|
|
|
|
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
|
|
{
|
|
#ifdef CONFIG_X86_PAE
|
|
int i;
|
|
|
|
/* Init entries of the first-level page table to the zero page */
|
|
for (i = 0; i < PTRS_PER_PGD; i++)
|
|
set_pgd(pg_dir + i,
|
|
__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
|
|
#endif
|
|
}
|
|
|
|
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
|
|
{
|
|
pgd_t *pgd;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
pgd = pgd_base + pgd_index(restore_jump_address);
|
|
|
|
pmd = resume_one_md_table_init(pgd);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
|
|
if (boot_cpu_has(X86_FEATURE_PSE)) {
|
|
set_pmd(pmd + pmd_index(restore_jump_address),
|
|
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
|
|
} else {
|
|
pte = resume_one_page_table_init(pmd);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
set_pte(pte + pte_index(restore_jump_address),
|
|
__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
asmlinkage int swsusp_arch_resume(void)
|
|
{
|
|
int error;
|
|
|
|
resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
|
if (!resume_pg_dir)
|
|
return -ENOMEM;
|
|
|
|
resume_init_first_level_page_table(resume_pg_dir);
|
|
|
|
error = set_up_temporary_text_mapping(resume_pg_dir);
|
|
if (error)
|
|
return error;
|
|
|
|
error = resume_physical_mapping_init(resume_pg_dir);
|
|
if (error)
|
|
return error;
|
|
|
|
temp_pgt = __pa(resume_pg_dir);
|
|
|
|
error = relocate_restore_code();
|
|
if (error)
|
|
return error;
|
|
|
|
/* We have got enough memory and from now on we cannot recover */
|
|
restore_image();
|
|
return 0;
|
|
}
|