mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-15 08:07:33 +07:00

Patch series "mm: remove quicklist page table caches". A while ago Nicholas proposed to remove quicklist page table caches [1]. I've rebased his patch on the curren upstream and switched ia64 and sh to use generic versions of PTE allocation. [1] https://lore.kernel.org/linux-mm/20190711030339.20892-1-npiggin@gmail.com This patch (of 3): Remove page table allocator "quicklists". These have been around for a long time, but have not got much traction in the last decade and are only used on ia64 and sh architectures. The numbers in the initial commit look interesting but probably don't apply anymore. If anybody wants to resurrect this it's in the git history, but it's unhelpful to have this code and divergent allocator behaviour for minor archs. Also it might be better to instead make more general improvements to page allocator if this is still so slow. Link: http://lkml.kernel.org/r/1565250728-21721-2-git-send-email-rppt@linux.ibm.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
86 lines
2.0 KiB
C
86 lines
2.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
*/
|
|
|
|
#ifndef _ASM_RISCV_PGALLOC_H
|
|
#define _ASM_RISCV_PGALLOC_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
|
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
|
pmd_t *pmd, pte_t *pte)
|
|
{
|
|
unsigned long pfn = virt_to_pfn(pte);
|
|
|
|
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
|
|
static inline void pmd_populate(struct mm_struct *mm,
|
|
pmd_t *pmd, pgtable_t pte)
|
|
{
|
|
unsigned long pfn = virt_to_pfn(page_address(pte));
|
|
|
|
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
{
|
|
unsigned long pfn = virt_to_pfn(pmd);
|
|
|
|
set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *pgd;
|
|
|
|
pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
|
|
if (likely(pgd != NULL)) {
|
|
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
/* Copy kernel mappings */
|
|
memcpy(pgd + USER_PTRS_PER_PGD,
|
|
init_mm.pgd + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
}
|
|
return pgd;
|
|
}
|
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
free_page((unsigned long)pgd);
|
|
}
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pmd_t *)__get_free_page(
|
|
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
|
|
}
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
{
|
|
free_page((unsigned long)pmd);
|
|
}
|
|
|
|
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
|
|
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
|
#define __pte_free_tlb(tlb, pte, buf) \
|
|
do { \
|
|
pgtable_page_dtor(pte); \
|
|
tlb_remove_page((tlb), pte); \
|
|
} while (0)
|
|
|
|
#endif /* _ASM_RISCV_PGALLOC_H */
|