[S390] use generic RCU page-table freeing code

Replace the s390 specific rcu page-table freeing code with the
generic variant. This requires to duplicate the definition for the
struct mmu_table_batch as s390 does not use the generic tlb flush
code.

While we are at it remove the restriction that page table fragments
can not be reused after a single fragment has been freed with rcu
and split out allocation and freeing of page tables with pgstes.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Martin Schwidefsky 2011-06-06 14:14:41 +02:00
parent 3ec90878ba
commit 36409f6353
4 changed files with 172 additions and 235 deletions

View File

@ -89,6 +89,7 @@ config S390
select HAVE_GET_USER_PAGES_FAST select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_RCU_TABLE_FREE if SMP
select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK select ARCH_INLINE_SPIN_LOCK

View File

@ -17,15 +17,15 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/mm.h> #include <linux/mm.h>
#define check_pgt_cache() do {} while (0)
unsigned long *crst_table_alloc(struct mm_struct *); unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *); void crst_table_free(struct mm_struct *, unsigned long *);
void crst_table_free_rcu(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *); unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *); void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mm_struct *, unsigned long *); #ifdef CONFIG_HAVE_RCU_TABLE_FREE
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
void __tlb_remove_table(void *_table);
#endif
static inline void clear_table(unsigned long *s, unsigned long val, size_t n) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {

View File

@ -26,67 +26,60 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
struct mmu_gather { struct mmu_gather {
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
#endif
unsigned int fullmm; unsigned int fullmm;
unsigned int nr_ptes; unsigned int need_flush;
unsigned int nr_pxds;
unsigned int max;
void **array;
void *local[8];
}; };
static inline void __tlb_alloc_page(struct mmu_gather *tlb) #ifdef CONFIG_HAVE_RCU_TABLE_FREE
{ struct mmu_table_batch {
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); struct rcu_head rcu;
unsigned int nr;
void *tables[0];
};
if (addr) { #define MAX_TABLE_BATCH \
tlb->array = (void *) addr; ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
tlb->max = PAGE_SIZE / sizeof(void *);
} extern void tlb_table_flush(struct mmu_gather *tlb);
} extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif
static inline void tlb_gather_mmu(struct mmu_gather *tlb, static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm, struct mm_struct *mm,
unsigned int full_mm_flush) unsigned int full_mm_flush)
{ {
tlb->mm = mm; tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->array = tlb->local;
tlb->fullmm = full_mm_flush; tlb->fullmm = full_mm_flush;
tlb->need_flush = 0;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
if (tlb->fullmm) if (tlb->fullmm)
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
else
__tlb_alloc_page(tlb);
tlb->nr_ptes = 0;
tlb->nr_pxds = tlb->max;
} }
static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{ {
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max)) if (!tlb->need_flush)
__tlb_flush_mm(tlb->mm); return;
while (tlb->nr_ptes > 0) tlb->need_flush = 0;
page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); __tlb_flush_mm(tlb->mm);
while (tlb->nr_pxds < tlb->max) #ifdef CONFIG_HAVE_RCU_TABLE_FREE
crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); tlb_table_flush(tlb);
#endif
} }
static inline void tlb_finish_mmu(struct mmu_gather *tlb, static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
rcu_table_freelist_finish();
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->array != tlb->local)
free_pages((unsigned long) tlb->array, 0);
} }
/* /*
@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address) unsigned long address)
{ {
if (!tlb->fullmm) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->array[tlb->nr_ptes++] = pte; if (!tlb->fullmm)
if (tlb->nr_ptes >= tlb->nr_pxds) return page_table_free_rcu(tlb, (unsigned long *) pte);
tlb_flush_mmu(tlb); #endif
} else page_table_free(tlb->mm, (unsigned long *) pte);
page_table_free(tlb->mm, (unsigned long *) pte);
} }
/* /*
@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef __s390x__ #ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31)) if (tlb->mm->context.asce_limit <= (1UL << 31))
return; return;
if (!tlb->fullmm) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->array[--tlb->nr_pxds] = pmd; if (!tlb->fullmm)
if (tlb->nr_ptes >= tlb->nr_pxds) return tlb_remove_table(tlb, pmd);
tlb_flush_mmu(tlb); #endif
} else crst_table_free(tlb->mm, (unsigned long *) pmd);
crst_table_free(tlb->mm, (unsigned long *) pmd);
#endif #endif
} }
@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#ifdef __s390x__ #ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42)) if (tlb->mm->context.asce_limit <= (1UL << 42))
return; return;
if (!tlb->fullmm) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->array[--tlb->nr_pxds] = pud; if (!tlb->fullmm)
if (tlb->nr_ptes >= tlb->nr_pxds) return tlb_remove_table(tlb, pud);
tlb_flush_mmu(tlb); #endif
} else crst_table_free(tlb->mm, (unsigned long *) pud);
crst_table_free(tlb->mm, (unsigned long *) pud);
#endif #endif
} }

View File

@ -24,94 +24,12 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
struct rcu_table_freelist {
struct rcu_head rcu;
struct mm_struct *mm;
unsigned int pgt_index;
unsigned int crst_index;
unsigned long *table[0];
};
#define RCU_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
/ sizeof(unsigned long))
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
static void __page_table_free(struct mm_struct *mm, unsigned long *table);
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
{
struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
struct rcu_table_freelist *batch = *batchp;
if (batch)
return batch;
batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
if (batch) {
batch->mm = mm;
batch->pgt_index = 0;
batch->crst_index = RCU_FREELIST_SIZE;
*batchp = batch;
}
return batch;
}
static void rcu_table_freelist_callback(struct rcu_head *head)
{
struct rcu_table_freelist *batch =
container_of(head, struct rcu_table_freelist, rcu);
while (batch->pgt_index > 0)
__page_table_free(batch->mm, batch->table[--batch->pgt_index]);
while (batch->crst_index < RCU_FREELIST_SIZE)
crst_table_free(batch->mm, batch->table[batch->crst_index++]);
free_page((unsigned long) batch);
}
void rcu_table_freelist_finish(void)
{
struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
struct rcu_table_freelist *batch = *batchp;
if (!batch)
goto out;
call_rcu(&batch->rcu, rcu_table_freelist_callback);
*batchp = NULL;
out:
put_cpu_var(rcu_table_freelist);
}
static void smp_sync(void *arg)
{
}
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
#define ALLOC_ORDER 1 #define ALLOC_ORDER 1
#define TABLES_PER_PAGE 4 #define FRAG_MASK 0x0f
#define FRAG_MASK 15UL
#define SECOND_HALVES 10UL
void clear_table_pgstes(unsigned long *table)
{
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
memset(table + 256, 0, PAGE_SIZE/4);
clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
memset(table + 768, 0, PAGE_SIZE/4);
}
#else #else
#define ALLOC_ORDER 2 #define ALLOC_ORDER 2
#define TABLES_PER_PAGE 2 #define FRAG_MASK 0x03
#define FRAG_MASK 3UL
#define SECOND_HALVES 2UL
void clear_table_pgstes(unsigned long *table)
{
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
memset(table + 256, 0, PAGE_SIZE/2);
}
#endif #endif
unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
free_pages((unsigned long) table, ALLOC_ORDER); free_pages((unsigned long) table, ALLOC_ORDER);
} }
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{
struct rcu_table_freelist *batch;
preempt_disable();
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
crst_table_free(mm, table);
goto out;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
crst_table_free(mm, table);
goto out;
}
batch->table[--batch->crst_index] = table;
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
out:
preempt_enable();
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{ {
@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
} }
#endif #endif
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
unsigned int old, new;
do {
old = atomic_read(v);
new = old ^ bits;
} while (atomic_cmpxchg(v, old, new) != old);
return new;
}
/* /*
* page table entry allocation/free routines. * page table entry allocation/free routines.
*/ */
#ifdef CONFIG_PGSTE
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
{
struct page *page;
unsigned long *table;
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
return table;
}
static inline void page_table_free_pgste(unsigned long *table)
{
struct page *page;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page);
}
#endif
unsigned long *page_table_alloc(struct mm_struct *mm) unsigned long *page_table_alloc(struct mm_struct *mm)
{ {
struct page *page; struct page *page;
unsigned long *table; unsigned long *table;
unsigned long bits; unsigned int mask, bit;
bits = (mm->context.has_pgste) ? 3UL : 1UL; #ifdef CONFIG_PGSTE
if (mm_has_pgste(mm))
return page_table_alloc_pgste(mm);
#endif
/* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
page = NULL; mask = FRAG_MASK;
if (!list_empty(&mm->context.pgtable_list)) { if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list, page = list_first_entry(&mm->context.pgtable_list,
struct page, lru); struct page, lru);
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) table = (unsigned long *) page_to_phys(page);
page = NULL; mask = atomic_read(&page->_mapcount);
mask = mask | (mask >> 4);
} }
if (!page) { if ((mask & FRAG_MASK) == FRAG_MASK) {
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT); page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page) if (!page)
return NULL; return NULL;
pgtable_page_ctor(page); pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK; atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
if (mm->context.has_pgste) clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
} else {
for (bit = 1; mask & bit; bit <<= 1)
table += PTRS_PER_PTE;
mask = atomic_xor_bits(&page->_mapcount, bit);
if ((mask & FRAG_MASK) == FRAG_MASK)
list_del(&page->lru);
} }
table = (unsigned long *) page_to_phys(page);
while (page->flags & bits) {
table += 256;
bits <<= 1;
}
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
return table; return table;
} }
static void __page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page;
unsigned long bits;
bits = ((unsigned long) table) & 15;
table = (unsigned long *)(((unsigned long) table) ^ bits);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
page->flags ^= bits;
if (!(page->flags & FRAG_MASK)) {
pgtable_page_dtor(page);
__free_page(page);
}
}
void page_table_free(struct mm_struct *mm, unsigned long *table) void page_table_free(struct mm_struct *mm, unsigned long *table)
{ {
struct page *page; struct page *page;
unsigned long bits; unsigned int bit, mask;
bits = (mm->context.has_pgste) ? 3UL : 1UL; #ifdef CONFIG_PGSTE
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); if (mm_has_pgste(mm))
return page_table_free_pgste(table);
#endif
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
page->flags ^= bits; if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
if (!list_empty(&page->lru))
list_move(&page->lru, &mm->context.pgtable_list);
page = NULL;
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru); list_del(&page->lru);
mask = atomic_xor_bits(&page->_mapcount, bit);
if (mask & FRAG_MASK)
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
if (page) { if (mask == 0) {
pgtable_page_dtor(page); pgtable_page_dtor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page); __free_page(page);
} }
} }
void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) #ifdef CONFIG_HAVE_RCU_TABLE_FREE
{
struct rcu_table_freelist *batch;
struct page *page;
unsigned long bits;
preempt_disable(); static void __page_table_free_rcu(void *table, unsigned bit)
if (atomic_read(&mm->mm_users) < 2 && {
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { struct page *page;
page_table_free(mm, table);
goto out; #ifdef CONFIG_PGSTE
if (bit == FRAG_MASK)
return page_table_free_pgste(table);
#endif
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
pgtable_page_dtor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page);
} }
batch = rcu_table_freelist_get(mm); }
if (!batch) {
smp_call_function(smp_sync, NULL, 1); void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
page_table_free(mm, table); {
goto out; struct mm_struct *mm;
struct page *page;
unsigned int bit, mask;
mm = tlb->mm;
#ifdef CONFIG_PGSTE
if (mm_has_pgste(mm)) {
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
} }
bits = (mm->context.has_pgste) ? 3UL : 1UL; #endif
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
/* Delayed freeing with rcu prevents reuse of pgtable fragments */ if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
list_del_init(&page->lru); list_del(&page->lru);
mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
if (mask & FRAG_MASK)
list_add_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
table = (unsigned long *)(((unsigned long) table) | bits); table = (unsigned long *) (__pa(table) | (bit << 4));
batch->table[batch->pgt_index++] = table; tlb_remove_table(tlb, table);
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
out:
preempt_enable();
} }
void __tlb_remove_table(void *_table)
{
void *table = (void *)((unsigned long) _table & PAGE_MASK);
unsigned type = (unsigned long) _table & ~PAGE_MASK;
if (type)
__page_table_free_rcu(table, type);
else
free_pages((unsigned long) table, ALLOC_ORDER);
}
#endif
/* /*
* switch on pgstes for its userspace process (for kvm) * switch on pgstes for its userspace process (for kvm)
*/ */
@ -369,7 +315,7 @@ int s390_enable_sie(void)
return -EINVAL; return -EINVAL;
/* Do we have pgstes? if yes, we are done */ /* Do we have pgstes? if yes, we are done */
if (tsk->mm->context.has_pgste) if (mm_has_pgste(tsk->mm))
return 0; return 0;
/* lets check if we are allowed to replace the mm */ /* lets check if we are allowed to replace the mm */