mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-11 18:36:45 +07:00
x86: merge mmu_context.h
Impact: cleanup tj: * changed cpu to unsigned as was done on mmu_context_64.h as cpu id is officially unsigned int * added missing ';' to 32bit version of deactivate_mm() Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
0dd76d736e
commit
6826c8ff07
@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
void destroy_context(struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "mmu_context_32.h"
|
||||
#else
|
||||
# include "mmu_context_64.h"
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned cpu = smp_processor_id();
|
||||
|
||||
if (likely(prev != next)) {
|
||||
/* stop flush ipis for the previous mm */
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
#ifdef CONFIG_SMP
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
percpu_write(cpu_tlbstate.active_mm, next);
|
||||
#endif
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
|
||||
/* Re-load page tables */
|
||||
load_cr3(next->pgd);
|
||||
|
||||
/*
|
||||
* load the LDT, if the LDT is different:
|
||||
*/
|
||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
|
||||
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
|
||||
/* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload CR3
|
||||
* to make sure to use no freed page tables.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define activate_mm(prev, next) \
|
||||
do { \
|
||||
@ -33,5 +76,17 @@ do { \
|
||||
switch_mm((prev), (next), NULL); \
|
||||
} while (0);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
do { \
|
||||
loadsegment(gs, 0); \
|
||||
} while (0)
|
||||
#else
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
do { \
|
||||
load_gs_index(0); \
|
||||
loadsegment(fs, 0); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MMU_CONTEXT_H */
|
||||
|
@ -1,55 +0,0 @@
|
||||
#ifndef _ASM_X86_MMU_CONTEXT_32_H
|
||||
#define _ASM_X86_MMU_CONTEXT_32_H
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (likely(prev != next)) {
|
||||
/* stop flush ipis for the previous mm */
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
#ifdef CONFIG_SMP
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
percpu_write(cpu_tlbstate.active_mm, next);
|
||||
#endif
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
|
||||
/* Re-load page tables */
|
||||
load_cr3(next->pgd);
|
||||
|
||||
/*
|
||||
* load the LDT, if the LDT is different:
|
||||
*/
|
||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
|
||||
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
|
||||
/* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload %cr3.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
asm("movl %0,%%gs": :"r" (0));
|
||||
|
||||
#endif /* _ASM_X86_MMU_CONTEXT_32_H */
|
@ -1,52 +0,0 @@
|
||||
#ifndef _ASM_X86_MMU_CONTEXT_64_H
|
||||
#define _ASM_X86_MMU_CONTEXT_64_H
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned cpu = smp_processor_id();
|
||||
if (likely(prev != next)) {
|
||||
/* stop flush ipis for the previous mm */
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
#ifdef CONFIG_SMP
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
percpu_write(cpu_tlbstate.active_mm, next);
|
||||
#endif
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
load_cr3(next->pgd);
|
||||
|
||||
if (unlikely(next->context.ldt != prev->context.ldt))
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
|
||||
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
|
||||
/* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload CR3
|
||||
* to make sure to use no freed page tables.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
do { \
|
||||
load_gs_index(0); \
|
||||
asm volatile("movl %0,%%fs"::"r"(0)); \
|
||||
} while (0)
|
||||
|
||||
#endif /* _ASM_X86_MMU_CONTEXT_64_H */
|
Loading…
Reference in New Issue
Block a user