mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-06 01:38:47 +07:00
x86-64: Move irqcount from PDA to per-cpu.
tj: s/irqcount/irq_count/ Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
3d1e42a7cf
commit
5689553076
@ -15,7 +15,7 @@ struct x8664_pda {
|
|||||||
unsigned long unused2;
|
unsigned long unused2;
|
||||||
unsigned long unused3;
|
unsigned long unused3;
|
||||||
unsigned long unused4;
|
unsigned long unused4;
|
||||||
int irqcount; /* 32 Irq nesting counter. Starts -1 */
|
int unused5;
|
||||||
unsigned int unused6; /* 36 was cpunumber */
|
unsigned int unused6; /* 36 was cpunumber */
|
||||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||||
unsigned long stack_canary; /* 40 stack canary value */
|
unsigned long stack_canary; /* 40 stack canary value */
|
||||||
|
@ -49,7 +49,6 @@ int main(void)
|
|||||||
BLANK();
|
BLANK();
|
||||||
#undef ENTRY
|
#undef ENTRY
|
||||||
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
|
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
|
||||||
ENTRY(irqcount);
|
|
||||||
DEFINE(pda_size, sizeof(struct x8664_pda));
|
DEFINE(pda_size, sizeof(struct x8664_pda));
|
||||||
BLANK();
|
BLANK();
|
||||||
#undef ENTRY
|
#undef ENTRY
|
||||||
|
@ -893,6 +893,8 @@ DEFINE_PER_CPU(unsigned long, kernel_stack) =
|
|||||||
(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
|
(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
|
||||||
EXPORT_PER_CPU_SYMBOL(kernel_stack);
|
EXPORT_PER_CPU_SYMBOL(kernel_stack);
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(unsigned int, irq_count) = -1;
|
||||||
|
|
||||||
void __cpuinit pda_init(int cpu)
|
void __cpuinit pda_init(int cpu)
|
||||||
{
|
{
|
||||||
struct x8664_pda *pda = cpu_pda(cpu);
|
struct x8664_pda *pda = cpu_pda(cpu);
|
||||||
@ -903,8 +905,6 @@ void __cpuinit pda_init(int cpu)
|
|||||||
|
|
||||||
load_pda_offset(cpu);
|
load_pda_offset(cpu);
|
||||||
|
|
||||||
pda->irqcount = -1;
|
|
||||||
|
|
||||||
if (cpu != 0) {
|
if (cpu != 0) {
|
||||||
if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
|
if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
|
||||||
pda->nodenumber = cpu_to_node(cpu);
|
pda->nodenumber = cpu_to_node(cpu);
|
||||||
|
@ -337,12 +337,12 @@ ENTRY(save_args)
|
|||||||
je 1f
|
je 1f
|
||||||
SWAPGS
|
SWAPGS
|
||||||
/*
|
/*
|
||||||
* irqcount is used to check if a CPU is already on an interrupt stack
|
* irq_count is used to check if a CPU is already on an interrupt stack
|
||||||
* or not. While this is essentially redundant with preempt_count it is
|
* or not. While this is essentially redundant with preempt_count it is
|
||||||
* a little cheaper to use a separate counter in the PDA (short of
|
* a little cheaper to use a separate counter in the PDA (short of
|
||||||
* moving irq_enter into assembly, which would be too much work)
|
* moving irq_enter into assembly, which would be too much work)
|
||||||
*/
|
*/
|
||||||
1: incl %gs:pda_irqcount
|
1: incl PER_CPU_VAR(irq_count)
|
||||||
jne 2f
|
jne 2f
|
||||||
popq_cfi %rax /* move return address... */
|
popq_cfi %rax /* move return address... */
|
||||||
mov PER_CPU_VAR(irq_stack_ptr),%rsp
|
mov PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||||
@ -837,7 +837,7 @@ common_interrupt:
|
|||||||
ret_from_intr:
|
ret_from_intr:
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
decl %gs:pda_irqcount
|
decl PER_CPU_VAR(irq_count)
|
||||||
leaveq
|
leaveq
|
||||||
CFI_DEF_CFA_REGISTER rsp
|
CFI_DEF_CFA_REGISTER rsp
|
||||||
CFI_ADJUST_CFA_OFFSET -8
|
CFI_ADJUST_CFA_OFFSET -8
|
||||||
@ -1260,14 +1260,14 @@ ENTRY(call_softirq)
|
|||||||
CFI_REL_OFFSET rbp,0
|
CFI_REL_OFFSET rbp,0
|
||||||
mov %rsp,%rbp
|
mov %rsp,%rbp
|
||||||
CFI_DEF_CFA_REGISTER rbp
|
CFI_DEF_CFA_REGISTER rbp
|
||||||
incl %gs:pda_irqcount
|
incl PER_CPU_VAR(irq_count)
|
||||||
cmove PER_CPU_VAR(irq_stack_ptr),%rsp
|
cmove PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||||
push %rbp # backlink for old unwinder
|
push %rbp # backlink for old unwinder
|
||||||
call __do_softirq
|
call __do_softirq
|
||||||
leaveq
|
leaveq
|
||||||
CFI_DEF_CFA_REGISTER rsp
|
CFI_DEF_CFA_REGISTER rsp
|
||||||
CFI_ADJUST_CFA_OFFSET -8
|
CFI_ADJUST_CFA_OFFSET -8
|
||||||
decl %gs:pda_irqcount
|
decl PER_CPU_VAR(irq_count)
|
||||||
ret
|
ret
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
END(call_softirq)
|
END(call_softirq)
|
||||||
@ -1297,7 +1297,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
|||||||
movq %rdi, %rsp # we don't return, adjust the stack frame
|
movq %rdi, %rsp # we don't return, adjust the stack frame
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
DEFAULT_FRAME
|
DEFAULT_FRAME
|
||||||
11: incl %gs:pda_irqcount
|
11: incl PER_CPU_VAR(irq_count)
|
||||||
movq %rsp,%rbp
|
movq %rsp,%rbp
|
||||||
CFI_DEF_CFA_REGISTER rbp
|
CFI_DEF_CFA_REGISTER rbp
|
||||||
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||||
@ -1305,7 +1305,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
|||||||
call xen_evtchn_do_upcall
|
call xen_evtchn_do_upcall
|
||||||
popq %rsp
|
popq %rsp
|
||||||
CFI_DEF_CFA_REGISTER rsp
|
CFI_DEF_CFA_REGISTER rsp
|
||||||
decl %gs:pda_irqcount
|
decl PER_CPU_VAR(irq_count)
|
||||||
jmp error_exit
|
jmp error_exit
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
END(do_hypervisor_callback)
|
END(do_hypervisor_callback)
|
||||||
|
Loading…
Reference in New Issue
Block a user