mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 14:56:47 +07:00
ae94da8981
Patch series "Clean up hugetlb boot command line processing", v4. Longpeng(Mike) reported a weird message from hugetlb command line processing and proposed a solution [1]. While the proposed patch does address the specific issue, there are other related issues in command line processing. As hugetlbfs evolved, updates to command line processing have been made to meet immediate needs and not necessarily in a coordinated manner. The result is that some processing is done in arch specific code, some is done in arch independent code and coordination is problematic. Semantics can vary between architectures. The patch series does the following: - Define arch specific arch_hugetlb_valid_size routine used to validate passed huge page sizes. - Move hugepagesz= command line parsing out of arch specific code and into an arch independent routine. - Clean up command line processing to follow desired semantics and document those semantics. [1] https://lore.kernel.org/linux-mm/20200305033014.1152-1-longpeng2@huawei.com This patch (of 3): The architecture independent routine hugetlb_default_setup sets up the default huge pages size. It has no way to verify if the passed value is valid, so it accepts it and attempts to validate at a later time. This requires undocumented cooperation between the arch specific and arch independent code. For architectures that support more than one huge page size, provide a routine arch_hugetlb_valid_size to validate a huge page size. hugetlb_default_setup can use this to validate passed values. arch_hugetlb_valid_size will also be used in a subsequent patch to move processing of the "hugepagesz=" in arch specific code to a common routine in arch independent code. Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390] Acked-by: Will Deacon <will@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Longpeng <longpeng2@huawei.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Mina Almasry <almasrymina@google.com> Cc: Peter Xu <peterx@redhat.com> Cc: Nitesh Narayan Lal <nitesh@redhat.com> Cc: Anders Roxell <anders.roxell@linaro.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Qian Cai <cai@lca.pw> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Link: http://lkml.kernel.org/r/20200428205614.246260-1-mike.kravetz@oracle.com Link: http://lkml.kernel.org/r/20200428205614.246260-2-mike.kravetz@oracle.com Link: http://lkml.kernel.org/r/20200417185049.275845-1-mike.kravetz@oracle.com Link: http://lkml.kernel.org/r/20200417185049.275845-2-mike.kravetz@oracle.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
372 lines
10 KiB
C
372 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* IBM System z Huge TLB Page Support for Kernel.
|
|
*
|
|
* Copyright IBM Corp. 2007,2020
|
|
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
|
*/
|
|
|
|
#define KMSG_COMPONENT "hugetlb"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/security.h>
|
|
|
|
/*
|
|
* If the bit selected by single-bit bitmask "a" is set within "x", move
|
|
* it to the position indicated by single-bit bitmask "b".
|
|
*/
|
|
#define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
|
|
|
|
static inline unsigned long __pte_to_rste(pte_t pte)
|
|
{
|
|
unsigned long rste;
|
|
|
|
/*
|
|
* Convert encoding pte bits pmd / pud bits
|
|
* lIR.uswrdy.p dy..R...I...wr
|
|
* empty 010.000000.0 -> 00..0...1...00
|
|
* prot-none, clean, old 111.000000.1 -> 00..1...1...00
|
|
* prot-none, clean, young 111.000001.1 -> 01..1...1...00
|
|
* prot-none, dirty, old 111.000010.1 -> 10..1...1...00
|
|
* prot-none, dirty, young 111.000011.1 -> 11..1...1...00
|
|
* read-only, clean, old 111.000100.1 -> 00..1...1...01
|
|
* read-only, clean, young 101.000101.1 -> 01..1...0...01
|
|
* read-only, dirty, old 111.000110.1 -> 10..1...1...01
|
|
* read-only, dirty, young 101.000111.1 -> 11..1...0...01
|
|
* read-write, clean, old 111.001100.1 -> 00..1...1...11
|
|
* read-write, clean, young 101.001101.1 -> 01..1...0...11
|
|
* read-write, dirty, old 110.001110.1 -> 10..0...1...11
|
|
* read-write, dirty, young 100.001111.1 -> 11..0...0...11
|
|
* HW-bits: R read-only, I invalid
|
|
* SW-bits: p present, y young, d dirty, r read, w write, s special,
|
|
* u unused, l large
|
|
*/
|
|
if (pte_present(pte)) {
|
|
rste = pte_val(pte) & PAGE_MASK;
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_READ,
|
|
_SEGMENT_ENTRY_READ);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
|
|
_SEGMENT_ENTRY_WRITE);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
|
|
_SEGMENT_ENTRY_INVALID);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
|
|
_SEGMENT_ENTRY_PROTECT);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
|
|
_SEGMENT_ENTRY_DIRTY);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
|
|
_SEGMENT_ENTRY_YOUNG);
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
|
|
_SEGMENT_ENTRY_SOFT_DIRTY);
|
|
#endif
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
|
|
_SEGMENT_ENTRY_NOEXEC);
|
|
} else
|
|
rste = _SEGMENT_ENTRY_EMPTY;
|
|
return rste;
|
|
}
|
|
|
|
static inline pte_t __rste_to_pte(unsigned long rste)
|
|
{
|
|
int present;
|
|
pte_t pte;
|
|
|
|
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
|
present = pud_present(__pud(rste));
|
|
else
|
|
present = pmd_present(__pmd(rste));
|
|
|
|
/*
|
|
* Convert encoding pmd / pud bits pte bits
|
|
* dy..R...I...wr lIR.uswrdy.p
|
|
* empty 00..0...1...00 -> 010.000000.0
|
|
* prot-none, clean, old 00..1...1...00 -> 111.000000.1
|
|
* prot-none, clean, young 01..1...1...00 -> 111.000001.1
|
|
* prot-none, dirty, old 10..1...1...00 -> 111.000010.1
|
|
* prot-none, dirty, young 11..1...1...00 -> 111.000011.1
|
|
* read-only, clean, old 00..1...1...01 -> 111.000100.1
|
|
* read-only, clean, young 01..1...0...01 -> 101.000101.1
|
|
* read-only, dirty, old 10..1...1...01 -> 111.000110.1
|
|
* read-only, dirty, young 11..1...0...01 -> 101.000111.1
|
|
* read-write, clean, old 00..1...1...11 -> 111.001100.1
|
|
* read-write, clean, young 01..1...0...11 -> 101.001101.1
|
|
* read-write, dirty, old 10..0...1...11 -> 110.001110.1
|
|
* read-write, dirty, young 11..0...0...11 -> 100.001111.1
|
|
* HW-bits: R read-only, I invalid
|
|
* SW-bits: p present, y young, d dirty, r read, w write, s special,
|
|
* u unused, l large
|
|
*/
|
|
if (present) {
|
|
pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
|
|
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
|
|
_PAGE_READ);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
|
|
_PAGE_WRITE);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
|
|
_PAGE_INVALID);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
|
|
_PAGE_PROTECT);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
|
|
_PAGE_DIRTY);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
|
|
_PAGE_YOUNG);
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
|
|
_PAGE_DIRTY);
|
|
#endif
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
|
|
_PAGE_NOEXEC);
|
|
} else
|
|
pte_val(pte) = _PAGE_INVALID;
|
|
return pte;
|
|
}
|
|
|
|
static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
|
|
{
|
|
struct page *page;
|
|
unsigned long size, paddr;
|
|
|
|
if (!mm_uses_skeys(mm) ||
|
|
rste & _SEGMENT_ENTRY_INVALID)
|
|
return;
|
|
|
|
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
|
|
page = pud_page(__pud(rste));
|
|
size = PUD_SIZE;
|
|
paddr = rste & PUD_MASK;
|
|
} else {
|
|
page = pmd_page(__pmd(rste));
|
|
size = PMD_SIZE;
|
|
paddr = rste & PMD_MASK;
|
|
}
|
|
|
|
if (!test_and_set_bit(PG_arch_1, &page->flags))
|
|
__storage_key_init_range(paddr, paddr + size - 1);
|
|
}
|
|
|
|
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte)
|
|
{
|
|
unsigned long rste;
|
|
|
|
rste = __pte_to_rste(pte);
|
|
if (!MACHINE_HAS_NX)
|
|
rste &= ~_SEGMENT_ENTRY_NOEXEC;
|
|
|
|
/* Set correct table type for 2G hugepages */
|
|
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
|
|
if (likely(pte_present(pte)))
|
|
rste |= _REGION3_ENTRY_LARGE;
|
|
rste |= _REGION_ENTRY_TYPE_R3;
|
|
} else if (likely(pte_present(pte)))
|
|
rste |= _SEGMENT_ENTRY_LARGE;
|
|
|
|
clear_huge_pte_skeys(mm, rste);
|
|
pte_val(*ptep) = rste;
|
|
}
|
|
|
|
pte_t huge_ptep_get(pte_t *ptep)
|
|
{
|
|
return __rste_to_pte(pte_val(*ptep));
|
|
}
|
|
|
|
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
pte_t pte = huge_ptep_get(ptep);
|
|
pmd_t *pmdp = (pmd_t *) ptep;
|
|
pud_t *pudp = (pud_t *) ptep;
|
|
|
|
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
|
pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
|
|
else
|
|
pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
|
return pte;
|
|
}
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgdp;
|
|
p4d_t *p4dp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
p4dp = p4d_alloc(mm, pgdp, addr);
|
|
if (p4dp) {
|
|
pudp = pud_alloc(mm, p4dp, addr);
|
|
if (pudp) {
|
|
if (sz == PUD_SIZE)
|
|
return (pte_t *) pudp;
|
|
else if (sz == PMD_SIZE)
|
|
pmdp = pmd_alloc(mm, pudp, addr);
|
|
}
|
|
}
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgdp;
|
|
p4d_t *p4dp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
if (pgd_present(*pgdp)) {
|
|
p4dp = p4d_offset(pgdp, addr);
|
|
if (p4d_present(*p4dp)) {
|
|
pudp = pud_offset(p4dp, addr);
|
|
if (pud_present(*pudp)) {
|
|
if (pud_large(*pudp))
|
|
return (pte_t *) pudp;
|
|
pmdp = pmd_offset(pudp, addr);
|
|
}
|
|
}
|
|
}
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
{
|
|
return pmd_large(pmd);
|
|
}
|
|
|
|
int pud_huge(pud_t pud)
|
|
{
|
|
return pud_large(pud);
|
|
}
|
|
|
|
struct page *
|
|
follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|
pud_t *pud, int flags)
|
|
{
|
|
if (flags & FOLL_GET)
|
|
return NULL;
|
|
|
|
return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
|
|
}
|
|
|
|
bool __init arch_hugetlb_valid_size(unsigned long size)
|
|
{
|
|
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
|
|
return true;
|
|
else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
|
|
return true;
|
|
else
|
|
return false;
|
|
}
|
|
|
|
static __init int setup_hugepagesz(char *opt)
|
|
{
|
|
unsigned long size;
|
|
char *string = opt;
|
|
|
|
size = memparse(opt, &opt);
|
|
if (arch_hugetlb_valid_size(size)) {
|
|
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
|
|
} else {
|
|
hugetlb_bad_size();
|
|
pr_err("hugepagesz= specifies an unsupported page size %s\n",
|
|
string);
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
__setup("hugepagesz=", setup_hugepagesz);
|
|
|
|
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
|
unsigned long addr, unsigned long len,
|
|
unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct hstate *h = hstate_file(file);
|
|
struct vm_unmapped_area_info info;
|
|
|
|
info.flags = 0;
|
|
info.length = len;
|
|
info.low_limit = current->mm->mmap_base;
|
|
info.high_limit = TASK_SIZE;
|
|
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
|
info.align_offset = 0;
|
|
return vm_unmapped_area(&info);
|
|
}
|
|
|
|
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
unsigned long addr0, unsigned long len,
|
|
unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct hstate *h = hstate_file(file);
|
|
struct vm_unmapped_area_info info;
|
|
unsigned long addr;
|
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
info.length = len;
|
|
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
|
info.high_limit = current->mm->mmap_base;
|
|
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
|
info.align_offset = 0;
|
|
addr = vm_unmapped_area(&info);
|
|
|
|
/*
|
|
* A failed mmap() very likely causes application failure,
|
|
* so fall back to the bottom-up function here. This scenario
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
if (addr & ~PAGE_MASK) {
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
info.flags = 0;
|
|
info.low_limit = TASK_UNMAPPED_BASE;
|
|
info.high_limit = TASK_SIZE;
|
|
addr = vm_unmapped_area(&info);
|
|
}
|
|
|
|
return addr;
|
|
}
|
|
|
|
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct hstate *h = hstate_file(file);
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
|
|
if (len & ~huge_page_mask(h))
|
|
return -EINVAL;
|
|
if (len > TASK_SIZE - mmap_min_addr)
|
|
return -ENOMEM;
|
|
|
|
if (flags & MAP_FIXED) {
|
|
if (prepare_hugepage_range(file, addr, len))
|
|
return -EINVAL;
|
|
goto check_asce_limit;
|
|
}
|
|
|
|
if (addr) {
|
|
addr = ALIGN(addr, huge_page_size(h));
|
|
vma = find_vma(mm, addr);
|
|
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
|
(!vma || addr + len <= vm_start_gap(vma)))
|
|
goto check_asce_limit;
|
|
}
|
|
|
|
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
|
addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
|
|
pgoff, flags);
|
|
else
|
|
addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
|
|
pgoff, flags);
|
|
if (offset_in_page(addr))
|
|
return addr;
|
|
|
|
check_asce_limit:
|
|
return check_asce_limit(mm, addr, len);
|
|
}
|