mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 04:56:43 +07:00
a0668cdc15
Currently we have a fair bit of rather fiddly code to manage the various kmem_caches used to store page tables of various levels. We generally have two caches holding some combination of PGD, PUD and PMD tables, plus several more for the special hugepage pagetables. This patch cleans this all up by taking a different approach. Rather than the caches being designated as for PUDs or for hugeptes for 16M pages, the caches are simply allocated to be a specific size. Thus sharing of caches between different types/levels of pagetables happens naturally. The pagetable size, where needed, is passed around encoded in the same way as {PGD,PUD,PMD}_INDEX_SIZE; that is n where the pagetable contains 2^n pointers. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
54 lines
1.3 KiB
C
54 lines
1.3 KiB
C
#ifndef _ASM_POWERPC_PGALLOC_H
|
|
#define _ASM_POWERPC_PGALLOC_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#ifdef CONFIG_PPC_BOOK3E
|
|
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
|
|
#else /* CONFIG_PPC_BOOK3E */
|
|
static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
|
|
unsigned long address)
|
|
{
|
|
}
|
|
#endif /* !CONFIG_PPC_BOOK3E */
|
|
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
free_page((unsigned long)pte);
|
|
}
|
|
|
|
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
|
{
|
|
pgtable_page_dtor(ptepage);
|
|
__free_page(ptepage);
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/pgalloc-64.h>
|
|
#else
|
|
#include <asm/pgalloc-32.h>
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
|
|
extern void pte_free_finish(void);
|
|
#else /* CONFIG_SMP */
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
|
|
{
|
|
pgtable_free(table, shift);
|
|
}
|
|
static inline void pte_free_finish(void) { }
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
|
|
unsigned long address)
|
|
{
|
|
tlb_flush_pgtable(tlb, address);
|
|
pgtable_page_dtor(ptepage);
|
|
pgtable_free_tlb(tlb, page_address(ptepage), 0);
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_PGALLOC_H */
|