mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 20:00:53 +07:00
[PATCH] mm: tlb_gather_mmu get_cpu_var
tlb_gather_mmu dates from before kernel preemption was allowed, and uses smp_processor_id or __get_cpu_var to find its per-cpu mmu_gather. That works because it's currently only called after getting page_table_lock, which is not dropped until after the matching tlb_finish_mmu. But don't rely on that, it will soon change: now disable preemption internally by proper get_cpu_var in tlb_gather_mmu, put_cpu_var in tlb_finish_mmu. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
7be7a54699
commit
15a23ffa2f
@ -39,8 +39,7 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
static inline struct mmu_gather *
|
||||
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu);
|
||||
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
||||
|
||||
tlb->mm = mm;
|
||||
tlb->freed = 0;
|
||||
@ -65,6 +64,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb)
|
||||
|
@ -17,13 +17,12 @@ struct mmu_gather {
|
||||
unsigned int avoided_flushes;
|
||||
};
|
||||
|
||||
extern struct mmu_gather mmu_gathers[NR_CPUS];
|
||||
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
static inline struct mmu_gather *
|
||||
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct mmu_gather *tlb = &mmu_gathers[cpu];
|
||||
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
||||
|
||||
tlb->mm = mm;
|
||||
tlb->freed = 0;
|
||||
@ -52,6 +51,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
|
||||
|
@ -35,9 +35,7 @@
|
||||
#endif
|
||||
|
||||
/* struct mmu_gather is an opaque type used by the mm code for passing around
|
||||
* any data needed by arch specific code for tlb_remove_page. This structure
|
||||
* can be per-CPU or per-MM as the page table lock is held for the duration of
|
||||
* TLB shootdown.
|
||||
* any data needed by arch specific code for tlb_remove_page.
|
||||
*/
|
||||
struct mmu_gather {
|
||||
struct mm_struct *mm;
|
||||
@ -57,7 +55,7 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
static inline struct mmu_gather *
|
||||
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
{
|
||||
struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
|
||||
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
||||
|
||||
tlb->mm = mm;
|
||||
|
||||
@ -85,7 +83,7 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
|
||||
/* tlb_finish_mmu
|
||||
* Called at the end of the shootdown operation to free up any resources
|
||||
* that were required. The page table lock is still held at this point.
|
||||
* that were required.
|
||||
*/
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
@ -101,6 +99,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
@ -129,7 +129,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
|
||||
static inline struct mmu_gather *
|
||||
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
{
|
||||
struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers);
|
||||
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
||||
|
||||
tlb->mm = mm;
|
||||
/*
|
||||
@ -154,7 +154,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
|
||||
/*
|
||||
* Called at the end of the shootdown operation to free up any resources that were
|
||||
* collected. The page table lock is still held at this point.
|
||||
* collected.
|
||||
*/
|
||||
static inline void
|
||||
tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
@ -174,6 +174,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
@ -44,7 +44,7 @@ extern void flush_tlb_pending(void);
|
||||
|
||||
static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
{
|
||||
struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
|
||||
struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
|
||||
|
||||
BUG_ON(mp->tlb_nr);
|
||||
|
||||
@ -97,6 +97,8 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp)
|
||||
|
Loading…
Reference in New Issue
Block a user