mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 11:27:20 +07:00
20a004e7b0
In many cases, page tables can be accessed concurrently by either another CPU (due to things like fast gup) or by the hardware page table walker itself, which may set access/dirty bits. In such cases, it is important to use READ_ONCE/WRITE_ONCE when accessing page table entries so that entries cannot be torn, merged or subject to apparent loss of coherence due to compiler transformations. Whilst there are some scenarios where this cannot happen (e.g. pinned kernel mappings for the linear region), the overhead of using READ_ONCE /WRITE_ONCE everywhere is minimal and makes the code an awful lot easier to reason about. This patch consistently uses these macros in the arch code, as well as explicitly namespacing pointers to page table entries from the entries themselves by using adopting a 'p' suffix for the former (as is sometimes used elsewhere in the kernel source). Tested-by: Yury Norov <ynorov@caviumnetworks.com> Tested-by: Richard Ruigrok <rruigrok@codeaurora.org> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
156 lines
3.8 KiB
C
156 lines
3.8 KiB
C
/*
|
|
* Based on arch/arm/include/asm/pgalloc.h
|
|
*
|
|
* Copyright (C) 2000-2001 Russell King
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_PGALLOC_H
|
|
#define __ASM_PGALLOC_H
|
|
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
|
|
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pmd_t *)__get_free_page(PGALLOC_GFP);
|
|
}
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
|
|
{
|
|
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
|
|
free_page((unsigned long)pmdp);
|
|
}
|
|
|
|
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
|
{
|
|
set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
|
|
}
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
|
|
{
|
|
__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
|
|
}
|
|
#else
|
|
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pud_t *)__get_free_page(PGALLOC_GFP);
|
|
}
|
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
|
{
|
|
BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
|
|
free_page((unsigned long)pudp);
|
|
}
|
|
|
|
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
|
{
|
|
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
|
|
}
|
|
|
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
|
|
{
|
|
__pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
|
|
}
|
|
#else
|
|
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
|
|
|
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
|
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
|
|
|
|
static inline pte_t *
|
|
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pte_t *)__get_free_page(PGALLOC_GFP);
|
|
}
|
|
|
|
static inline pgtable_t
|
|
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
struct page *pte;
|
|
|
|
pte = alloc_pages(PGALLOC_GFP, 0);
|
|
if (!pte)
|
|
return NULL;
|
|
if (!pgtable_page_ctor(pte)) {
|
|
__free_page(pte);
|
|
return NULL;
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
/*
|
|
* Free a PTE table.
|
|
*/
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
|
|
{
|
|
if (ptep)
|
|
free_page((unsigned long)ptep);
|
|
}
|
|
|
|
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|
{
|
|
pgtable_page_dtor(pte);
|
|
__free_page(pte);
|
|
}
|
|
|
|
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
|
pmdval_t prot)
|
|
{
|
|
set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
|
|
}
|
|
|
|
/*
|
|
* Populate the pmdp entry with a pointer to the pte. This pmd is part
|
|
* of the mm address space.
|
|
*/
|
|
static inline void
|
|
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
|
{
|
|
/*
|
|
* The pmd must be loaded with the physical address of the PTE table
|
|
*/
|
|
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
|
|
}
|
|
|
|
static inline void
|
|
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
|
|
{
|
|
__pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
|
|
}
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
#endif
|