mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 18:15:09 +07:00
7868a2087e
A poisoned or migrated hugepage is stored as a swap entry in the page tables. On architectures that support hugepages consisting of contiguous page table entries (such as on arm64) this leads to ambiguity in determining the page table entry to return in huge_pte_offset() when a poisoned entry is encountered. Let's remove the ambiguity by adding a size parameter to convey additional information about the requested address. Also fixup the definition/usage of huge_pte_offset() throughout the tree. Link: http://lkml.kernel.org/r/20170522133604.11392-4-punit.agrawal@arm.com Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> Acked-by: Steve Capper <steve.capper@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: James Hogan <james.hogan@imgtec.com> (odd fixer:METAG ARCHITECTURE) Cc: Ralf Baechle <ralf@linux-mips.org> (supporter:MIPS) Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Helge Deller <deller@gmx.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Rich Felker <dalias@libc.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
245 lines
6.9 KiB
C
245 lines
6.9 KiB
C
/*
|
|
* IBM System z Huge TLB Page Support for Kernel.
|
|
*
|
|
* Copyright IBM Corp. 2007,2016
|
|
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
|
*/
|
|
|
|
#define KMSG_COMPONENT "hugetlb"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
|
|
/*
|
|
* If the bit selected by single-bit bitmask "a" is set within "x", move
|
|
* it to the position indicated by single-bit bitmask "b".
|
|
*/
|
|
#define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
|
|
|
|
static inline unsigned long __pte_to_rste(pte_t pte)
|
|
{
|
|
unsigned long rste;
|
|
|
|
/*
|
|
* Convert encoding pte bits pmd / pud bits
|
|
* lIR.uswrdy.p dy..R...I...wr
|
|
* empty 010.000000.0 -> 00..0...1...00
|
|
* prot-none, clean, old 111.000000.1 -> 00..1...1...00
|
|
* prot-none, clean, young 111.000001.1 -> 01..1...1...00
|
|
* prot-none, dirty, old 111.000010.1 -> 10..1...1...00
|
|
* prot-none, dirty, young 111.000011.1 -> 11..1...1...00
|
|
* read-only, clean, old 111.000100.1 -> 00..1...1...01
|
|
* read-only, clean, young 101.000101.1 -> 01..1...0...01
|
|
* read-only, dirty, old 111.000110.1 -> 10..1...1...01
|
|
* read-only, dirty, young 101.000111.1 -> 11..1...0...01
|
|
* read-write, clean, old 111.001100.1 -> 00..1...1...11
|
|
* read-write, clean, young 101.001101.1 -> 01..1...0...11
|
|
* read-write, dirty, old 110.001110.1 -> 10..0...1...11
|
|
* read-write, dirty, young 100.001111.1 -> 11..0...0...11
|
|
* HW-bits: R read-only, I invalid
|
|
* SW-bits: p present, y young, d dirty, r read, w write, s special,
|
|
* u unused, l large
|
|
*/
|
|
if (pte_present(pte)) {
|
|
rste = pte_val(pte) & PAGE_MASK;
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_READ,
|
|
_SEGMENT_ENTRY_READ);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
|
|
_SEGMENT_ENTRY_WRITE);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
|
|
_SEGMENT_ENTRY_INVALID);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
|
|
_SEGMENT_ENTRY_PROTECT);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
|
|
_SEGMENT_ENTRY_DIRTY);
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
|
|
_SEGMENT_ENTRY_YOUNG);
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
|
|
_SEGMENT_ENTRY_SOFT_DIRTY);
|
|
#endif
|
|
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
|
|
_SEGMENT_ENTRY_NOEXEC);
|
|
} else
|
|
rste = _SEGMENT_ENTRY_EMPTY;
|
|
return rste;
|
|
}
|
|
|
|
static inline pte_t __rste_to_pte(unsigned long rste)
|
|
{
|
|
int present;
|
|
pte_t pte;
|
|
|
|
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
|
present = pud_present(__pud(rste));
|
|
else
|
|
present = pmd_present(__pmd(rste));
|
|
|
|
/*
|
|
* Convert encoding pmd / pud bits pte bits
|
|
* dy..R...I...wr lIR.uswrdy.p
|
|
* empty 00..0...1...00 -> 010.000000.0
|
|
* prot-none, clean, old 00..1...1...00 -> 111.000000.1
|
|
* prot-none, clean, young 01..1...1...00 -> 111.000001.1
|
|
* prot-none, dirty, old 10..1...1...00 -> 111.000010.1
|
|
* prot-none, dirty, young 11..1...1...00 -> 111.000011.1
|
|
* read-only, clean, old 00..1...1...01 -> 111.000100.1
|
|
* read-only, clean, young 01..1...0...01 -> 101.000101.1
|
|
* read-only, dirty, old 10..1...1...01 -> 111.000110.1
|
|
* read-only, dirty, young 11..1...0...01 -> 101.000111.1
|
|
* read-write, clean, old 00..1...1...11 -> 111.001100.1
|
|
* read-write, clean, young 01..1...0...11 -> 101.001101.1
|
|
* read-write, dirty, old 10..0...1...11 -> 110.001110.1
|
|
* read-write, dirty, young 11..0...0...11 -> 100.001111.1
|
|
* HW-bits: R read-only, I invalid
|
|
* SW-bits: p present, y young, d dirty, r read, w write, s special,
|
|
* u unused, l large
|
|
*/
|
|
if (present) {
|
|
pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
|
|
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
|
|
_PAGE_READ);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
|
|
_PAGE_WRITE);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
|
|
_PAGE_INVALID);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
|
|
_PAGE_PROTECT);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
|
|
_PAGE_DIRTY);
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
|
|
_PAGE_YOUNG);
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
|
|
_PAGE_DIRTY);
|
|
#endif
|
|
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
|
|
_PAGE_NOEXEC);
|
|
} else
|
|
pte_val(pte) = _PAGE_INVALID;
|
|
return pte;
|
|
}
|
|
|
|
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte)
|
|
{
|
|
unsigned long rste;
|
|
|
|
rste = __pte_to_rste(pte);
|
|
if (!MACHINE_HAS_NX)
|
|
rste &= ~_SEGMENT_ENTRY_NOEXEC;
|
|
|
|
/* Set correct table type for 2G hugepages */
|
|
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
|
rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
|
|
else
|
|
rste |= _SEGMENT_ENTRY_LARGE;
|
|
pte_val(*ptep) = rste;
|
|
}
|
|
|
|
pte_t huge_ptep_get(pte_t *ptep)
|
|
{
|
|
return __rste_to_pte(pte_val(*ptep));
|
|
}
|
|
|
|
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
pte_t pte = huge_ptep_get(ptep);
|
|
pmd_t *pmdp = (pmd_t *) ptep;
|
|
pud_t *pudp = (pud_t *) ptep;
|
|
|
|
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
|
pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
|
|
else
|
|
pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
|
return pte;
|
|
}
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgdp;
|
|
p4d_t *p4dp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
p4dp = p4d_alloc(mm, pgdp, addr);
|
|
if (p4dp) {
|
|
pudp = pud_alloc(mm, p4dp, addr);
|
|
if (pudp) {
|
|
if (sz == PUD_SIZE)
|
|
return (pte_t *) pudp;
|
|
else if (sz == PMD_SIZE)
|
|
pmdp = pmd_alloc(mm, pudp, addr);
|
|
}
|
|
}
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgdp;
|
|
p4d_t *p4dp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
if (pgd_present(*pgdp)) {
|
|
p4dp = p4d_offset(pgdp, addr);
|
|
if (p4d_present(*p4dp)) {
|
|
pudp = pud_offset(p4dp, addr);
|
|
if (pud_present(*pudp)) {
|
|
if (pud_large(*pudp))
|
|
return (pte_t *) pudp;
|
|
pmdp = pmd_offset(pudp, addr);
|
|
}
|
|
}
|
|
}
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
{
|
|
return pmd_large(pmd);
|
|
}
|
|
|
|
int pud_huge(pud_t pud)
|
|
{
|
|
return pud_large(pud);
|
|
}
|
|
|
|
struct page *
|
|
follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|
pud_t *pud, int flags)
|
|
{
|
|
if (flags & FOLL_GET)
|
|
return NULL;
|
|
|
|
return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
|
|
}
|
|
|
|
static __init int setup_hugepagesz(char *opt)
|
|
{
|
|
unsigned long size;
|
|
char *string = opt;
|
|
|
|
size = memparse(opt, &opt);
|
|
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) {
|
|
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
|
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
|
|
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
|
} else {
|
|
hugetlb_bad_size();
|
|
pr_err("hugepagesz= specifies an unsupported page size %s\n",
|
|
string);
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
__setup("hugepagesz=", setup_hugepagesz);
|