mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
ca15ca406f
Patch series "mm: cleanup usage of <asm/pgalloc.h>" Most architectures have very similar versions of pXd_alloc_one() and pXd_free_one() for intermediate levels of page table. These patches add generic versions of these functions in <asm-generic/pgalloc.h> and enable use of the generic functions where appropriate. In addition, functions declared and defined in <asm/pgalloc.h> headers are used mostly by core mm and early mm initialization in arch and there is no actual reason to have the <asm/pgalloc.h> included all over the place. The first patch in this series removes unneeded includes of <asm/pgalloc.h> In the end it didn't work out as neatly as I hoped and moving pXd_alloc_track() definitions to <asm-generic/pgalloc.h> would require unnecessary changes to arches that have custom page table allocations, so I've decided to move lib/ioremap.c to mm/ and make pgalloc-track.h local to mm/. This patch (of 8): In most cases <asm/pgalloc.h> header is required only for allocations of page table memory. Most of the .c files that include that header do not use symbols declared in <asm/pgalloc.h> and do not require that header. As for the other header files that used to include <asm/pgalloc.h>, it is possible to move that include into the .c file that actually uses symbols from <asm/pgalloc.h> and drop the include from the header file. The process was somewhat automated using sed -i -E '/[<"]asm\/pgalloc\.h/d' \ $(grep -L -w -f /tmp/xx \ $(git grep -E -l '[<"]asm/pgalloc\.h')) where /tmp/xx contains all the symbols defined in arch/*/include/asm/pgalloc.h. [rppt@linux.ibm.com: fix powerpc warning] Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Joerg Roedel <joro@8bytes.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com> Cc: Stafford Horne <shorne@gmail.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Joerg Roedel <jroedel@suse.de> Cc: Matthew Wilcox <willy@infradead.org> Link: http://lkml.kernel.org/r/20200627143453.31835-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200627143453.31835-2-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
57 lines
1.5 KiB
C
57 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#define DISABLE_BRANCH_PROFILING
|
|
|
|
#include <linux/kasan.h>
|
|
#include <linux/memblock.h>
|
|
#include <mm/mmu_decl.h>
|
|
|
|
int __init kasan_init_region(void *start, size_t size)
|
|
{
|
|
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
|
|
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
|
|
unsigned long k_cur = k_start;
|
|
int k_size = k_end - k_start;
|
|
int k_size_base = 1 << (ffs(k_size) - 1);
|
|
int ret;
|
|
void *block;
|
|
|
|
block = memblock_alloc(k_size, k_size_base);
|
|
|
|
if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
|
|
int k_size_more = 1 << (ffs(k_size - k_size_base) - 1);
|
|
|
|
setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
|
|
if (k_size_more >= SZ_128K)
|
|
setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
|
|
k_size_more, PAGE_KERNEL);
|
|
if (v_block_mapped(k_start))
|
|
k_cur = k_start + k_size_base;
|
|
if (v_block_mapped(k_start + k_size_base))
|
|
k_cur = k_start + k_size_base + k_size_more;
|
|
|
|
update_bats();
|
|
}
|
|
|
|
if (!block)
|
|
block = memblock_alloc(k_size, PAGE_SIZE);
|
|
if (!block)
|
|
return -ENOMEM;
|
|
|
|
ret = kasan_init_shadow_page_tables(k_start, k_end);
|
|
if (ret)
|
|
return ret;
|
|
|
|
kasan_update_early_region(k_start, k_cur, __pte(0));
|
|
|
|
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
|
|
pmd_t *pmd = pmd_off_k(k_cur);
|
|
void *va = block + k_cur - k_start;
|
|
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
|
|
|
|
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
|
|
}
|
|
flush_tlb_kernel_range(k_start, k_end);
|
|
return 0;
|
|
}
|