mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-14 04:36:47 +07:00
powerpc/mm/book(e)(3s)/64: Add page table accounting
Introduce a helper pgtable_gfp_flags() which
just returns the current gfp flags and adds
__GFP_ACCOUNT to account for page table allocation.
The generic helper is added to include/asm/pgalloc.h
and has two variants - WARNING ugly bits ahead
1. If the header is included from a module, no check
for mm == &init_mm is done, since init_mm is not
exported
2. For kernel includes, the check is done and required
see (3e79ec7
arch: x86: charge page tables to kmemcg)
The fundamental assumption is that no module should be
doing pgd/pud/pmd and pte alloc's on behalf of init_mm
directly.
NOTE: This adds an overhead to pmd/pud/pgd allocations
similar to x86. The other alternative was to implement
pmd_alloc_kernel/pud_alloc_kernel and pgd_alloc_kernel
with their offset variants.
For 4k page size, pte_alloc_one no longer calls
pte_alloc_one_kernel.
Signed-off-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
c5cee6421c
commit
de3b87611d
@ -31,7 +31,8 @@ extern struct kmem_cache *pgtable_cache[];
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
@ -53,10 +53,11 @@ extern void __tlb_remove_table(void *_table);
|
||||
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
||||
return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
|
||||
#else
|
||||
struct page *page;
|
||||
page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
|
||||
page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT),
|
||||
4);
|
||||
if (!page)
|
||||
return NULL;
|
||||
return (pgd_t *) page_address(page);
|
||||
@ -76,7 +77,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
if (radix_enabled())
|
||||
return radix__pgd_alloc(mm);
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
@ -93,7 +95,8 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
@ -119,7 +122,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
@ -168,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||
struct page *page;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_alloc_one_kernel(mm, address);
|
||||
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
page = virt_to_page(pte);
|
||||
|
@ -43,7 +43,8 @@ extern struct kmem_cache *pgtable_cache[];
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
@ -57,7 +58,8 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
@ -96,7 +98,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||
struct page *page;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_alloc_one_kernel(mm, address);
|
||||
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
page = virt_to_page(pte);
|
||||
@ -189,7 +191,8 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
|
@ -3,6 +3,20 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
#ifndef MODULE
|
||||
static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
|
||||
{
|
||||
if (unlikely(mm == &init_mm))
|
||||
return gfp;
|
||||
return gfp | __GFP_ACCOUNT;
|
||||
}
|
||||
#else /* !MODULE */
|
||||
static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
|
||||
{
|
||||
return gfp | __GFP_ACCOUNT;
|
||||
}
|
||||
#endif /* MODULE */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#include <asm/book3s/pgalloc.h>
|
||||
#else
|
||||
|
@ -351,12 +351,20 @@ static pte_t *get_from_cache(struct mm_struct *mm)
|
||||
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
|
||||
{
|
||||
void *ret = NULL;
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!kernel && !pgtable_page_ctor(page)) {
|
||||
__free_page(page);
|
||||
return NULL;
|
||||
struct page *page;
|
||||
|
||||
if (!kernel) {
|
||||
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!pgtable_page_ctor(page)) {
|
||||
__free_page(page);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
page = alloc_page(PGALLOC_GFP);
|
||||
if (!page)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = page_address(page);
|
||||
|
Loading…
Reference in New Issue
Block a user