mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 18:25:10 +07:00
aa0ab02ba9
On the 8xx, the page size is set in the PMD entry and applies to
all pages of the page table pointed by the said PMD entry.
When an app has some regular pages allocated (e.g. see below) and tries
to mmap() a huge page at a hint address covered by the same PMD entry,
the kernel accepts the hint allthough the 8xx cannot handle different
page sizes in the same PMD entry.
10000000-10001000 r-xp 00000000 00:0f 2597 /root/malloc
10010000-10011000 rwxp 00000000 00:0f 2597 /root/malloc
mmap(0x10080000, 524288, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|0x40000, -1, 0) = 0x10080000
This results the app remaining forever in do_page_fault()/hugetlb_fault()
and when interrupting that app, we get the following warning:
[162980.035629] WARNING: CPU: 0 PID: 2777 at arch/powerpc/mm/hugetlbpage.c:354 hugetlb_free_pgd_range+0xc8/0x1e4
[162980.035699] CPU: 0 PID: 2777 Comm: malloc Tainted: G W 4.14.6 #85
[162980.035744] task: c67e2c00 task.stack: c668e000
[162980.035783] NIP: c000fe18 LR: c00e1eec CTR: c00f90c0
[162980.035830] REGS: c668fc20 TRAP: 0700 Tainted: G W (4.14.6)
[162980.035854] MSR: 00029032 <EE,ME,IR,DR,RI> CR: 24044224 XER: 20000000
[162980.036003]
[162980.036003] GPR00: c00e1eec c668fcd0 c67e2c00 00000010 c6869410 10080000 00000000 77fb4000
[162980.036003] GPR08: ffff0001 0683c001 00000000 ffffff80 44028228 10018a34 00004008 418004fc
[162980.036003] GPR16: c668e000 00040100 c668e000 c06c0000 c668fe78 c668e000 c6835ba0 c668fd48
[162980.036003] GPR24: 00000000 73ffffff 74000000 00000001 77fb4000 100fffff 10100000 10100000
[162980.036743] NIP [c000fe18] hugetlb_free_pgd_range+0xc8/0x1e4
[162980.036839] LR [c00e1eec] free_pgtables+0x12c/0x150
[162980.036861] Call Trace:
[162980.036939] [c668fcd0] [c00f0774] unlink_anon_vmas+0x1c4/0x214 (unreliable)
[162980.037040] [c668fd10] [c00e1eec] free_pgtables+0x12c/0x150
[162980.037118] [c668fd40] [c00eabac] exit_mmap+0xe8/0x1b4
[162980.037210] [c668fda0] [c0019710] mmput.part.9+0x20/0xd8
[162980.037301] [c668fdb0] [c001ecb0] do_exit+0x1f0/0x93c
[162980.037386] [c668fe00] [c001f478] do_group_exit+0x40/0xcc
[162980.037479] [c668fe10] [c002a76c] get_signal+0x47c/0x614
[162980.037570] [c668fe70] [c0007840] do_signal+0x54/0x244
[162980.037654] [c668ff30] [c0007ae8] do_notify_resume+0x34/0x88
[162980.037744] [c668ff40] [c000dae8] do_user_signal+0x74/0xc4
[162980.037781] Instruction dump:
[162980.037821] 7fdff378 81370000 54a3463a 80890020 7d24182e 7c841a14 712a0004 4082ff94
[162980.038014] 2f890000 419e0010 712a0ff0 408200e0 <0fe00000> 54a9000a 7f984840 419d0094
[162980.038216] ---[ end trace c0ceeca8e7a5800a ]---
[162980.038754] BUG: non-zero nr_ptes on freeing mm: 1
[162985.363322] BUG: non-zero nr_ptes on freeing mm: -1
In order to fix this, this patch uses the address space "slices"
implemented for BOOK3S/64 and enhanced to support PPC32 by the
preceding patch.
This patch modifies the context.id on the 8xx to be in the range
[1:16] instead of [0:15] in order to identify context.id == 0 as
not initialised contexts as done on BOOK3S
This patch activates CONFIG_PPC_MM_SLICES when CONFIG_HUGETLB_PAGE is
selected for the 8xx
Alltough we could in theory have as many slices as PMD entries, the
current slices implementation limits the number of low slices to 16.
This limitation is not preventing us to fix the initial issue allthough
it is suboptimal. It will be cured in a subsequent patch.
Fixes: 4b91428699
("powerpc/8xx: Implement support of hugepages")
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
206 lines
5.1 KiB
C
206 lines
5.1 KiB
C
/*
|
|
* This file contains the routines for initializing the MMU
|
|
* on the 8xx series of chips.
|
|
* -- christophe
|
|
*
|
|
* Derived from arch/powerpc/mm/40x_mmu.c:
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <linux/memblock.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/code-patching.h>
|
|
|
|
#include "mmu_decl.h"
|
|
|
|
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
|
|
|
|
extern int __map_without_ltlbs;
|
|
|
|
static unsigned long block_mapped_ram;
|
|
|
|
/*
|
|
* Return PA for this VA if it is in an area mapped with LTLBs.
|
|
* Otherwise, returns 0
|
|
*/
|
|
phys_addr_t v_block_mapped(unsigned long va)
|
|
{
|
|
unsigned long p = PHYS_IMMR_BASE;
|
|
|
|
if (__map_without_ltlbs)
|
|
return 0;
|
|
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
|
|
return p + va - VIRT_IMMR_BASE;
|
|
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
|
|
return __pa(va);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Return VA for a given PA mapped with LTLBs or 0 if not mapped
|
|
*/
|
|
unsigned long p_block_mapped(phys_addr_t pa)
|
|
{
|
|
unsigned long p = PHYS_IMMR_BASE;
|
|
|
|
if (__map_without_ltlbs)
|
|
return 0;
|
|
if (pa >= p && pa < p + IMMR_SIZE)
|
|
return VIRT_IMMR_BASE + pa - p;
|
|
if (pa < block_mapped_ram)
|
|
return (unsigned long)__va(pa);
|
|
return 0;
|
|
}
|
|
|
|
#define LARGE_PAGE_SIZE_8M (1<<23)
|
|
|
|
/*
|
|
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
|
|
*/
|
|
void __init MMU_init_hw(void)
|
|
{
|
|
/* PIN up to the 3 first 8Mb after IMMR in DTLB table */
|
|
#ifdef CONFIG_PIN_TLB_DATA
|
|
unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
|
|
unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY;
|
|
#ifdef CONFIG_PIN_TLB_IMMR
|
|
int i = 29;
|
|
#else
|
|
int i = 28;
|
|
#endif
|
|
unsigned long addr = 0;
|
|
unsigned long mem = total_lowmem;
|
|
|
|
for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
|
|
mtspr(SPRN_MD_CTR, ctr | (i << 8));
|
|
mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
|
|
mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2);
|
|
mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
|
|
addr += LARGE_PAGE_SIZE_8M;
|
|
mem -= LARGE_PAGE_SIZE_8M;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static void __init mmu_mapin_immr(void)
|
|
{
|
|
unsigned long p = PHYS_IMMR_BASE;
|
|
unsigned long v = VIRT_IMMR_BASE;
|
|
unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
|
|
int offset;
|
|
|
|
for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
|
|
map_kernel_page(v + offset, p + offset, f);
|
|
}
|
|
|
|
/* Address of instructions to patch */
|
|
#ifndef CONFIG_PIN_TLB_IMMR
|
|
extern unsigned int DTLBMiss_jmp;
|
|
#endif
|
|
extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
|
|
#ifndef CONFIG_PIN_TLB_TEXT
|
|
extern unsigned int ITLBMiss_cmp;
|
|
#endif
|
|
|
|
static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
|
|
{
|
|
unsigned int instr = *addr;
|
|
|
|
instr &= 0xffff0000;
|
|
instr |= (unsigned long)__va(mapped) >> 16;
|
|
patch_instruction(addr, instr);
|
|
}
|
|
|
|
unsigned long __init mmu_mapin_ram(unsigned long top)
|
|
{
|
|
unsigned long mapped;
|
|
|
|
if (__map_without_ltlbs) {
|
|
mapped = 0;
|
|
mmu_mapin_immr();
|
|
#ifndef CONFIG_PIN_TLB_IMMR
|
|
patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
|
|
#endif
|
|
#ifndef CONFIG_PIN_TLB_TEXT
|
|
mmu_patch_cmp_limit(&ITLBMiss_cmp, 0);
|
|
#endif
|
|
} else {
|
|
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
|
|
}
|
|
|
|
mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped);
|
|
mmu_patch_cmp_limit(&FixupDAR_cmp, mapped);
|
|
|
|
/* If the size of RAM is not an exact power of two, we may not
|
|
* have covered RAM in its entirety with 8 MiB
|
|
* pages. Consequently, restrict the top end of RAM currently
|
|
* allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
|
|
* coverage with normal-sized pages (or other reasons) do not
|
|
* attempt to allocate outside the allowed range.
|
|
*/
|
|
if (mapped)
|
|
memblock_set_current_limit(mapped);
|
|
|
|
block_mapped_ram = mapped;
|
|
|
|
return mapped;
|
|
}
|
|
|
|
void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
|
phys_addr_t first_memblock_size)
|
|
{
|
|
/* We don't currently support the first MEMBLOCK not mapping 0
|
|
* physical on those processors
|
|
*/
|
|
BUG_ON(first_memblock_base != 0);
|
|
|
|
/* 8xx can only access 24MB at the moment */
|
|
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
|
|
}
|
|
|
|
/*
|
|
* Set up to use a given MMU context.
|
|
* id is context number, pgd is PGD pointer.
|
|
*
|
|
* We place the physical address of the new task page directory loaded
|
|
* into the MMU base register, and set the ASID compare register with
|
|
* the new "context."
|
|
*/
|
|
void set_context(unsigned long id, pgd_t *pgd)
|
|
{
|
|
s16 offset = (s16)(__pa(swapper_pg_dir));
|
|
|
|
#ifdef CONFIG_BDI_SWITCH
|
|
pgd_t **ptr = *(pgd_t ***)(KERNELBASE + 0xf0);
|
|
|
|
/* Context switch the PTE pointer for the Abatron BDI2000.
|
|
* The PGDIR is passed as second argument.
|
|
*/
|
|
*(ptr + 1) = pgd;
|
|
#endif
|
|
|
|
/* Register M_TW will contain base address of level 1 table minus the
|
|
* lower part of the kernel PGDIR base address, so that all accesses to
|
|
* level 1 table are done relative to lower part of kernel PGDIR base
|
|
* address.
|
|
*/
|
|
mtspr(SPRN_M_TW, __pa(pgd) - offset);
|
|
|
|
/* Update context */
|
|
mtspr(SPRN_M_CASID, id - 1);
|
|
/* sync */
|
|
mb();
|
|
}
|
|
|
|
void flush_instruction_cache(void)
|
|
{
|
|
isync();
|
|
mtspr(SPRN_IC_CST, IDC_INVALL);
|
|
isync();
|
|
}
|