mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 18:55:26 +07:00
13224794cb
Patch series "mm: remove quicklist page table caches". A while ago Nicholas proposed to remove quicklist page table caches [1]. I've rebased his patch on the curren upstream and switched ia64 and sh to use generic versions of PTE allocation. [1] https://lore.kernel.org/linux-mm/20190711030339.20892-1-npiggin@gmail.com This patch (of 3): Remove page table allocator "quicklists". These have been around for a long time, but have not got much traction in the last decade and are only used on ia64 and sh architectures. The numbers in the initial commit look interesting but probably don't apply anymore. If anybody wants to resurrect this it's in the git history, but it's unhelpful to have this code and divergent allocator behaviour for minor archs. Also it might be better to instead make more general improvements to page allocator if this is still so slow. Link: http://lkml.kernel.org/r/1565250728-21721-2-git-send-email-rppt@linux.ibm.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
78 lines
1.8 KiB
C
78 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _SPARC_PGALLOC_H
|
|
#define _SPARC_PGALLOC_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
|
|
#include <asm/pgtsrmmu.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/vaddrs.h>
|
|
#include <asm/page.h>
|
|
|
|
struct page;
|
|
|
|
void *srmmu_get_nocache(int size, int align);
|
|
void srmmu_free_nocache(void *addr, int size);
|
|
|
|
extern struct resource sparc_iomap;
|
|
|
|
pgd_t *get_pgd_fast(void);
|
|
static inline void free_pgd_fast(pgd_t *pgd)
|
|
{
|
|
srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
|
|
}
|
|
|
|
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
|
|
#define pgd_alloc(mm) get_pgd_fast()
|
|
|
|
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
|
|
{
|
|
unsigned long pa = __nocache_pa(pmdp);
|
|
|
|
set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
|
|
}
|
|
|
|
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
|
|
unsigned long address)
|
|
{
|
|
return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
|
|
SRMMU_PMD_TABLE_SIZE);
|
|
}
|
|
|
|
static inline void free_pmd_fast(pmd_t * pmd)
|
|
{
|
|
srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE);
|
|
}
|
|
|
|
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
|
|
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
|
|
|
void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
void pmd_set(pmd_t *pmdp, pte_t *ptep);
|
|
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
|
|
|
|
pgtable_t pte_alloc_one(struct mm_struct *mm);
|
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
|
{
|
|
return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
|
|
}
|
|
|
|
|
|
static inline void free_pte_fast(pte_t *pte)
|
|
{
|
|
srmmu_free_nocache(pte, PTE_SIZE);
|
|
}
|
|
|
|
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
|
|
|
|
void pte_free(struct mm_struct * mm, pgtable_t pte);
|
|
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
|
|
|
#endif /* _SPARC_PGALLOC_H */
|