mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
13224794cb
Patch series "mm: remove quicklist page table caches". A while ago Nicholas proposed to remove quicklist page table caches [1]. I've rebased his patch on the curren upstream and switched ia64 and sh to use generic versions of PTE allocation. [1] https://lore.kernel.org/linux-mm/20190711030339.20892-1-npiggin@gmail.com This patch (of 3): Remove page table allocator "quicklists". These have been around for a long time, but have not got much traction in the last decade and are only used on ia64 and sh architectures. The numbers in the initial commit look interesting but probably don't apply anymore. If anybody wants to resurrect this it's in the git history, but it's unhelpful to have this code and divergent allocator behaviour for minor archs. Also it might be better to instead make more general improvements to page allocator if this is still so slow. Link: http://lkml.kernel.org/r/1565250728-21721-2-git-send-email-rppt@linux.ibm.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
116 lines
2.8 KiB
C
116 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _SPARC64_PGALLOC_H
|
|
#define _SPARC64_PGALLOC_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/spitfire.h>
|
|
#include <asm/cpudata.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/page.h>
|
|
|
|
/* Page table allocation/freeing. */
|
|
|
|
extern struct kmem_cache *pgtable_cache;
|
|
|
|
static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
|
|
{
|
|
pgd_set(pgd, pud);
|
|
}
|
|
|
|
#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
|
}
|
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
kmem_cache_free(pgtable_cache, pgd);
|
|
}
|
|
|
|
static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
|
|
{
|
|
pud_set(pud, pmd);
|
|
}
|
|
|
|
#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
|
}
|
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|
{
|
|
kmem_cache_free(pgtable_cache, pud);
|
|
}
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
|
}
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
{
|
|
kmem_cache_free(pgtable_cache, pmd);
|
|
}
|
|
|
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
|
|
pgtable_t pte_alloc_one(struct mm_struct *mm);
|
|
void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
|
|
void pte_free(struct mm_struct *mm, pgtable_t ptepage);
|
|
|
|
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
|
|
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
|
|
#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD))
|
|
|
|
void pgtable_free(void *table, bool is_page);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
struct mmu_gather;
|
|
void tlb_remove_table(struct mmu_gather *, void *);
|
|
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
|
|
{
|
|
unsigned long pgf = (unsigned long)table;
|
|
if (is_page)
|
|
pgf |= 0x1UL;
|
|
tlb_remove_table(tlb, (void *)pgf);
|
|
}
|
|
|
|
static inline void __tlb_remove_table(void *_table)
|
|
{
|
|
void *table = (void *)((unsigned long)_table & ~0x1UL);
|
|
bool is_page = false;
|
|
|
|
if ((unsigned long)_table & 0x1UL)
|
|
is_page = true;
|
|
pgtable_free(table, is_page);
|
|
}
|
|
#else /* CONFIG_SMP */
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
|
|
{
|
|
pgtable_free(table, is_page);
|
|
}
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
|
|
unsigned long address)
|
|
{
|
|
pgtable_free_tlb(tlb, pte, true);
|
|
}
|
|
|
|
#define __pmd_free_tlb(tlb, pmd, addr) \
|
|
pgtable_free_tlb(tlb, pmd, false)
|
|
|
|
#define __pud_free_tlb(tlb, pud, addr) \
|
|
pgtable_free_tlb(tlb, pud, false)
|
|
|
|
#endif /* _SPARC64_PGALLOC_H */
|