mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 11:46:44 +07:00
13224794cb
Patch series "mm: remove quicklist page table caches". A while ago Nicholas proposed to remove quicklist page table caches [1]. I've rebased his patch on the curren upstream and switched ia64 and sh to use generic versions of PTE allocation. [1] https://lore.kernel.org/linux-mm/20190711030339.20892-1-npiggin@gmail.com This patch (of 3): Remove page table allocator "quicklists". These have been around for a long time, but have not got much traction in the last decade and are only used on ia64 and sh architectures. The numbers in the initial commit look interesting but probably don't apply anymore. If anybody wants to resurrect this it's in the git history, but it's unhelpful to have this code and divergent allocator behaviour for minor archs. Also it might be better to instead make more general improvements to page allocator if this is still so slow. Link: http://lkml.kernel.org/r/1565250728-21721-2-git-send-email-rppt@linux.ibm.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
279 lines
6.7 KiB
C
279 lines
6.7 KiB
C
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mmdebug.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/swap.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlb.h>
|
|
|
|
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
|
|
|
static bool tlb_next_batch(struct mmu_gather *tlb)
|
|
{
|
|
struct mmu_gather_batch *batch;
|
|
|
|
batch = tlb->active;
|
|
if (batch->next) {
|
|
tlb->active = batch->next;
|
|
return true;
|
|
}
|
|
|
|
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
|
|
return false;
|
|
|
|
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
|
|
if (!batch)
|
|
return false;
|
|
|
|
tlb->batch_count++;
|
|
batch->next = NULL;
|
|
batch->nr = 0;
|
|
batch->max = MAX_GATHER_BATCH;
|
|
|
|
tlb->active->next = batch;
|
|
tlb->active = batch;
|
|
|
|
return true;
|
|
}
|
|
|
|
static void tlb_batch_pages_flush(struct mmu_gather *tlb)
|
|
{
|
|
struct mmu_gather_batch *batch;
|
|
|
|
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
|
|
free_pages_and_swap_cache(batch->pages, batch->nr);
|
|
batch->nr = 0;
|
|
}
|
|
tlb->active = &tlb->local;
|
|
}
|
|
|
|
static void tlb_batch_list_free(struct mmu_gather *tlb)
|
|
{
|
|
struct mmu_gather_batch *batch, *next;
|
|
|
|
for (batch = tlb->local.next; batch; batch = next) {
|
|
next = batch->next;
|
|
free_pages((unsigned long)batch, 0);
|
|
}
|
|
tlb->local.next = NULL;
|
|
}
|
|
|
|
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
|
|
{
|
|
struct mmu_gather_batch *batch;
|
|
|
|
VM_BUG_ON(!tlb->end);
|
|
|
|
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
|
|
VM_WARN_ON(tlb->page_size != page_size);
|
|
#endif
|
|
|
|
batch = tlb->active;
|
|
/*
|
|
* Add the page and check if we are full. If so
|
|
* force a flush.
|
|
*/
|
|
batch->pages[batch->nr++] = page;
|
|
if (batch->nr == batch->max) {
|
|
if (!tlb_next_batch(tlb))
|
|
return true;
|
|
batch = tlb->active;
|
|
}
|
|
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
|
|
|
|
return false;
|
|
}
|
|
|
|
#endif /* HAVE_MMU_GATHER_NO_GATHER */
|
|
|
|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
|
|
|
/*
|
|
* See the comment near struct mmu_table_batch.
|
|
*/
|
|
|
|
/*
|
|
* If we want tlb_remove_table() to imply TLB invalidates.
|
|
*/
|
|
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
|
|
{
|
|
#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
|
|
/*
|
|
* Invalidate page-table caches used by hardware walkers. Then we still
|
|
* need to RCU-sched wait while freeing the pages because software
|
|
* walkers can still be in-flight.
|
|
*/
|
|
tlb_flush_mmu_tlbonly(tlb);
|
|
#endif
|
|
}
|
|
|
|
static void tlb_remove_table_smp_sync(void *arg)
|
|
{
|
|
/* Simply deliver the interrupt */
|
|
}
|
|
|
|
static void tlb_remove_table_one(void *table)
|
|
{
|
|
/*
|
|
* This isn't an RCU grace period and hence the page-tables cannot be
|
|
* assumed to be actually RCU-freed.
|
|
*
|
|
* It is however sufficient for software page-table walkers that rely on
|
|
* IRQ disabling. See the comment near struct mmu_table_batch.
|
|
*/
|
|
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
|
|
__tlb_remove_table(table);
|
|
}
|
|
|
|
static void tlb_remove_table_rcu(struct rcu_head *head)
|
|
{
|
|
struct mmu_table_batch *batch;
|
|
int i;
|
|
|
|
batch = container_of(head, struct mmu_table_batch, rcu);
|
|
|
|
for (i = 0; i < batch->nr; i++)
|
|
__tlb_remove_table(batch->tables[i]);
|
|
|
|
free_page((unsigned long)batch);
|
|
}
|
|
|
|
static void tlb_table_flush(struct mmu_gather *tlb)
|
|
{
|
|
struct mmu_table_batch **batch = &tlb->batch;
|
|
|
|
if (*batch) {
|
|
tlb_table_invalidate(tlb);
|
|
call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
|
|
*batch = NULL;
|
|
}
|
|
}
|
|
|
|
void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
|
{
|
|
struct mmu_table_batch **batch = &tlb->batch;
|
|
|
|
if (*batch == NULL) {
|
|
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
|
if (*batch == NULL) {
|
|
tlb_table_invalidate(tlb);
|
|
tlb_remove_table_one(table);
|
|
return;
|
|
}
|
|
(*batch)->nr = 0;
|
|
}
|
|
|
|
(*batch)->tables[(*batch)->nr++] = table;
|
|
if ((*batch)->nr == MAX_TABLE_BATCH)
|
|
tlb_table_flush(tlb);
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
|
|
|
|
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
|
{
|
|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
|
tlb_table_flush(tlb);
|
|
#endif
|
|
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
|
tlb_batch_pages_flush(tlb);
|
|
#endif
|
|
}
|
|
|
|
void tlb_flush_mmu(struct mmu_gather *tlb)
|
|
{
|
|
tlb_flush_mmu_tlbonly(tlb);
|
|
tlb_flush_mmu_free(tlb);
|
|
}
|
|
|
|
/**
|
|
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
|
|
* @tlb: the mmu_gather structure to initialize
|
|
* @mm: the mm_struct of the target address space
|
|
* @start: start of the region that will be removed from the page-table
|
|
* @end: end of the region that will be removed from the page-table
|
|
*
|
|
* Called to initialize an (on-stack) mmu_gather structure for page-table
|
|
* tear-down from @mm. The @start and @end are set to 0 and -1
|
|
* respectively when @mm is without users and we're going to destroy
|
|
* the full address space (exit/execve).
|
|
*/
|
|
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
tlb->mm = mm;
|
|
|
|
/* Is it from 0 to ~0? */
|
|
tlb->fullmm = !(start | (end+1));
|
|
|
|
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
|
tlb->need_flush_all = 0;
|
|
tlb->local.next = NULL;
|
|
tlb->local.nr = 0;
|
|
tlb->local.max = ARRAY_SIZE(tlb->__pages);
|
|
tlb->active = &tlb->local;
|
|
tlb->batch_count = 0;
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
|
tlb->batch = NULL;
|
|
#endif
|
|
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
|
|
tlb->page_size = 0;
|
|
#endif
|
|
|
|
__tlb_reset_range(tlb);
|
|
inc_tlb_flush_pending(tlb->mm);
|
|
}
|
|
|
|
/**
|
|
* tlb_finish_mmu - finish an mmu_gather structure
|
|
* @tlb: the mmu_gather structure to finish
|
|
* @start: start of the region that will be removed from the page-table
|
|
* @end: end of the region that will be removed from the page-table
|
|
*
|
|
* Called at the end of the shootdown operation to free up any resources that
|
|
* were required.
|
|
*/
|
|
void tlb_finish_mmu(struct mmu_gather *tlb,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
/*
|
|
* If there are parallel threads are doing PTE changes on same range
|
|
* under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
|
|
* flush by batching, one thread may end up seeing inconsistent PTEs
|
|
* and result in having stale TLB entries. So flush TLB forcefully
|
|
* if we detect parallel PTE batching threads.
|
|
*
|
|
* However, some syscalls, e.g. munmap(), may free page tables, this
|
|
* needs force flush everything in the given range. Otherwise this
|
|
* may result in having stale TLB entries for some architectures,
|
|
* e.g. aarch64, that could specify flush what level TLB.
|
|
*/
|
|
if (mm_tlb_flush_nested(tlb->mm)) {
|
|
/*
|
|
* The aarch64 yields better performance with fullmm by
|
|
* avoiding multiple CPUs spamming TLBI messages at the
|
|
* same time.
|
|
*
|
|
* On x86 non-fullmm doesn't yield significant difference
|
|
* against fullmm.
|
|
*/
|
|
tlb->fullmm = 1;
|
|
__tlb_reset_range(tlb);
|
|
tlb->freed_tables = 1;
|
|
}
|
|
|
|
tlb_flush_mmu(tlb);
|
|
|
|
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
|
tlb_batch_list_free(tlb);
|
|
#endif
|
|
dec_tlb_flush_pending(tlb->mm);
|
|
}
|