mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 19:35:15 +07:00
b6bdb7517c
On architectures with CONFIG_HAVE_ARCH_HUGE_VMAP set, ioremap() may
create pud/pmd mappings. A kernel panic was observed on arm64 systems
with Cortex-A75 in the following steps as described by Hanjun Guo.
1. ioremap a 4K size, valid page table will build,
2. iounmap it, pte0 will set to 0;
3. ioremap the same address with 2M size, pgd/pmd is unchanged,
then set the a new value for pmd;
4. pte0 is leaked;
5. CPU may meet exception because the old pmd is still in TLB,
which will lead to kernel panic.
This panic is not reproducible on x86. INVLPG, called from iounmap,
purges all levels of entries associated with purged address on x86. x86
still has memory leak.
The patch changes the ioremap path to free unmapped page table(s) since
doing so in the unmap path has the following issues:
- The iounmap() path is shared with vunmap(). Since vmap() only
supports pte mappings, making vunmap() to free a pte page is an
overhead for regular vmap users as they do not need a pte page freed
up.
- Checking if all entries in a pte page are cleared in the unmap path
is racy, and serializing this check is expensive.
- The unmap path calls free_vmap_area_noflush() to do lazy TLB purges.
Clearing a pud/pmd entry before the lazy TLB purges needs extra TLB
purge.
Add two interfaces, pud_free_pmd_page() and pmd_free_pte_page(), which
clear a given pud/pmd entry and free up a page for the lower level
entries.
This patch implements their stub functions on x86 and arm64, which work
as workaround.
[akpm@linux-foundation.org: fix typo in pmd_free_pte_page() stub]
Link: http://lkml.kernel.org/r/20180314180155.19492-2-toshi.kani@hpe.com
Fixes: e61ce6ade4
("mm: change ioremap to set up huge I/O mappings")
Reported-by: Lei Li <lious.lilei@hisilicon.com>
Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Wang Xuefeng <wxf.wang@hisilicon.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Chintan Pandya <cpandya@codeaurora.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
184 lines
4.2 KiB
C
184 lines
4.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Re-map IO memory to kernel address space so that we can access it.
|
|
* This is needed for high PCI addresses that aren't mapped in the
|
|
* 640k-1MB IO memory area on PC's
|
|
*
|
|
* (C) Copyright 1995 1996 Linus Torvalds
|
|
*/
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/io.h>
|
|
#include <linux/export.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
|
static int __read_mostly ioremap_p4d_capable;
|
|
static int __read_mostly ioremap_pud_capable;
|
|
static int __read_mostly ioremap_pmd_capable;
|
|
static int __read_mostly ioremap_huge_disabled;
|
|
|
|
static int __init set_nohugeiomap(char *str)
|
|
{
|
|
ioremap_huge_disabled = 1;
|
|
return 0;
|
|
}
|
|
early_param("nohugeiomap", set_nohugeiomap);
|
|
|
|
void __init ioremap_huge_init(void)
|
|
{
|
|
if (!ioremap_huge_disabled) {
|
|
if (arch_ioremap_pud_supported())
|
|
ioremap_pud_capable = 1;
|
|
if (arch_ioremap_pmd_supported())
|
|
ioremap_pmd_capable = 1;
|
|
}
|
|
}
|
|
|
|
static inline int ioremap_p4d_enabled(void)
|
|
{
|
|
return ioremap_p4d_capable;
|
|
}
|
|
|
|
static inline int ioremap_pud_enabled(void)
|
|
{
|
|
return ioremap_pud_capable;
|
|
}
|
|
|
|
static inline int ioremap_pmd_enabled(void)
|
|
{
|
|
return ioremap_pmd_capable;
|
|
}
|
|
|
|
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
static inline int ioremap_p4d_enabled(void) { return 0; }
|
|
static inline int ioremap_pud_enabled(void) { return 0; }
|
|
static inline int ioremap_pmd_enabled(void) { return 0; }
|
|
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
|
|
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
|
{
|
|
pte_t *pte;
|
|
u64 pfn;
|
|
|
|
pfn = phys_addr >> PAGE_SHIFT;
|
|
pte = pte_alloc_kernel(pmd, addr);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
do {
|
|
BUG_ON(!pte_none(*pte));
|
|
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
|
|
pfn++;
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
phys_addr -= addr;
|
|
pmd = pmd_alloc(&init_mm, pud, addr);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (ioremap_pmd_enabled() &&
|
|
((next - addr) == PMD_SIZE) &&
|
|
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
|
|
pmd_free_pte_page(pmd)) {
|
|
if (pmd_set_huge(pmd, phys_addr + addr, prot))
|
|
continue;
|
|
}
|
|
|
|
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
|
|
return -ENOMEM;
|
|
} while (pmd++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
|
|
phys_addr -= addr;
|
|
pud = pud_alloc(&init_mm, p4d, addr);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (ioremap_pud_enabled() &&
|
|
((next - addr) == PUD_SIZE) &&
|
|
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
|
|
pud_free_pmd_page(pud)) {
|
|
if (pud_set_huge(pud, phys_addr + addr, prot))
|
|
continue;
|
|
}
|
|
|
|
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
|
|
return -ENOMEM;
|
|
} while (pud++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
|
|
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
|
{
|
|
p4d_t *p4d;
|
|
unsigned long next;
|
|
|
|
phys_addr -= addr;
|
|
p4d = p4d_alloc(&init_mm, pgd, addr);
|
|
if (!p4d)
|
|
return -ENOMEM;
|
|
do {
|
|
next = p4d_addr_end(addr, end);
|
|
|
|
if (ioremap_p4d_enabled() &&
|
|
((next - addr) == P4D_SIZE) &&
|
|
IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
|
|
if (p4d_set_huge(p4d, phys_addr + addr, prot))
|
|
continue;
|
|
}
|
|
|
|
if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
|
|
return -ENOMEM;
|
|
} while (p4d++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
int ioremap_page_range(unsigned long addr,
|
|
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
|
{
|
|
pgd_t *pgd;
|
|
unsigned long start;
|
|
unsigned long next;
|
|
int err;
|
|
|
|
might_sleep();
|
|
BUG_ON(addr >= end);
|
|
|
|
start = addr;
|
|
phys_addr -= addr;
|
|
pgd = pgd_offset_k(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
|
|
if (err)
|
|
break;
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
flush_cache_vmap(start, end);
|
|
|
|
return err;
|
|
}
|