x86: Use {push,pop}{l,q}_cfi in more places

... plus additionally introduce {push,pop}f{l,q}_cfi. All in the
hope that the code becomes better readable this way (it gets
quite a bit smaller in any case).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Alexander van Heukelum <heukelum@fastmail.fm>
LKML-Reference: <4C7FBDA40200007800013FAF@vpn.id2.novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Jan Beulich 2010-09-02 14:07:16 +01:00 committed by Ingo Molnar
parent a34107b557
commit df5d1874ce
3 changed files with 141 additions and 238 deletions

View File

@ -89,6 +89,16 @@
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
.endm .endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0 .macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp) movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset CFI_REL_OFFSET \reg, \offset
@ -109,6 +119,16 @@
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
.endm .endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0 .macro movl_cfi reg offset=0
movl %\reg, \offset(%esp) movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset CFI_REL_OFFSET \reg, \offset

View File

@ -115,8 +115,7 @@
/* unfortunately push/pop can't be no-op */ /* unfortunately push/pop can't be no-op */
.macro PUSH_GS .macro PUSH_GS
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4
.endm .endm
.macro POP_GS pop=0 .macro POP_GS pop=0
addl $(4 + \pop), %esp addl $(4 + \pop), %esp
@ -140,14 +139,12 @@
#else /* CONFIG_X86_32_LAZY_GS */ #else /* CONFIG_X86_32_LAZY_GS */
.macro PUSH_GS .macro PUSH_GS
pushl %gs pushl_cfi %gs
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET gs, 0*/ /*CFI_REL_OFFSET gs, 0*/
.endm .endm
.macro POP_GS pop=0 .macro POP_GS pop=0
98: popl %gs 98: popl_cfi %gs
CFI_ADJUST_CFA_OFFSET -4
/*CFI_RESTORE gs*/ /*CFI_RESTORE gs*/
.if \pop <> 0 .if \pop <> 0
add $\pop, %esp add $\pop, %esp
@ -195,35 +192,25 @@
.macro SAVE_ALL .macro SAVE_ALL
cld cld
PUSH_GS PUSH_GS
pushl %fs pushl_cfi %fs
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET fs, 0;*/ /*CFI_REL_OFFSET fs, 0;*/
pushl %es pushl_cfi %es
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET es, 0;*/ /*CFI_REL_OFFSET es, 0;*/
pushl %ds pushl_cfi %ds
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ds, 0;*/ /*CFI_REL_OFFSET ds, 0;*/
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eax, 0 CFI_REL_OFFSET eax, 0
pushl %ebp pushl_cfi %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebp, 0 CFI_REL_OFFSET ebp, 0
pushl %edi pushl_cfi %edi
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edi, 0 CFI_REL_OFFSET edi, 0
pushl %esi pushl_cfi %esi
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET esi, 0 CFI_REL_OFFSET esi, 0
pushl %edx pushl_cfi %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx, 0 CFI_REL_OFFSET edx, 0
pushl %ecx pushl_cfi %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx, 0 CFI_REL_OFFSET ecx, 0
pushl %ebx pushl_cfi %ebx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0 CFI_REL_OFFSET ebx, 0
movl $(__USER_DS), %edx movl $(__USER_DS), %edx
movl %edx, %ds movl %edx, %ds
@ -234,39 +221,29 @@
.endm .endm
.macro RESTORE_INT_REGS .macro RESTORE_INT_REGS
popl %ebx popl_cfi %ebx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ebx CFI_RESTORE ebx
popl %ecx popl_cfi %ecx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ecx CFI_RESTORE ecx
popl %edx popl_cfi %edx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE edx CFI_RESTORE edx
popl %esi popl_cfi %esi
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE esi CFI_RESTORE esi
popl %edi popl_cfi %edi
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE edi CFI_RESTORE edi
popl %ebp popl_cfi %ebp
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ebp CFI_RESTORE ebp
popl %eax popl_cfi %eax
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE eax CFI_RESTORE eax
.endm .endm
.macro RESTORE_REGS pop=0 .macro RESTORE_REGS pop=0
RESTORE_INT_REGS RESTORE_INT_REGS
1: popl %ds 1: popl_cfi %ds
CFI_ADJUST_CFA_OFFSET -4
/*CFI_RESTORE ds;*/ /*CFI_RESTORE ds;*/
2: popl %es 2: popl_cfi %es
CFI_ADJUST_CFA_OFFSET -4
/*CFI_RESTORE es;*/ /*CFI_RESTORE es;*/
3: popl %fs 3: popl_cfi %fs
CFI_ADJUST_CFA_OFFSET -4
/*CFI_RESTORE fs;*/ /*CFI_RESTORE fs;*/
POP_GS \pop POP_GS \pop
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
@ -320,16 +297,12 @@
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
CFI_STARTPROC CFI_STARTPROC
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
call schedule_tail call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl %eax popl_cfi %eax
CFI_ADJUST_CFA_OFFSET -4 pushl_cfi $0x0202 # Reset kernel eflags
pushl $0x0202 # Reset kernel eflags popfl_cfi
CFI_ADJUST_CFA_OFFSET 4
popfl
CFI_ADJUST_CFA_OFFSET -4
jmp syscall_exit jmp syscall_exit
CFI_ENDPROC CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
@ -409,29 +382,23 @@ sysenter_past_esp:
* enough kernel state to call TRACE_IRQS_OFF can be called - but * enough kernel state to call TRACE_IRQS_OFF can be called - but
* we immediately enable interrupts at that point anyway. * we immediately enable interrupts at that point anyway.
*/ */
pushl $(__USER_DS) pushl_cfi $(__USER_DS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ss, 0*/ /*CFI_REL_OFFSET ss, 0*/
pushl %ebp pushl_cfi %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET esp, 0 CFI_REL_OFFSET esp, 0
pushfl pushfl_cfi
orl $X86_EFLAGS_IF, (%esp) orl $X86_EFLAGS_IF, (%esp)
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $(__USER_CS)
pushl $(__USER_CS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET cs, 0*/ /*CFI_REL_OFFSET cs, 0*/
/* /*
* Push current_thread_info()->sysenter_return to the stack. * Push current_thread_info()->sysenter_return to the stack.
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
* pushed above; +8 corresponds to copy_thread's esp0 setting. * pushed above; +8 corresponds to copy_thread's esp0 setting.
*/ */
pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eip, 0 CFI_REL_OFFSET eip, 0
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
@ -486,8 +453,7 @@ sysenter_audit:
movl %eax,%edx /* 2nd arg: syscall number */ movl %eax,%edx /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
call audit_syscall_entry call audit_syscall_entry
pushl %ebx pushl_cfi %ebx
CFI_ADJUST_CFA_OFFSET 4
movl PT_EAX(%esp),%eax /* reload syscall number */ movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call jmp sysenter_do_call
@ -529,8 +495,7 @@ ENDPROC(ia32_sysenter_target)
# system call handler stub # system call handler stub
ENTRY(system_call) ENTRY(system_call)
RING0_INT_FRAME # can't unwind into user space anyway RING0_INT_FRAME # can't unwind into user space anyway
pushl %eax # save orig_eax pushl_cfi %eax # save orig_eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
# system call tracing in operation / emulation # system call tracing in operation / emulation
@ -566,7 +531,6 @@ restore_all_notrace:
je ldt_ss # returning to user-space with LDT SS je ldt_ss # returning to user-space with LDT SS
restore_nocheck: restore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code RESTORE_REGS 4 # skip orig_eax/error_code
CFI_ADJUST_CFA_OFFSET -4
irq_return: irq_return:
INTERRUPT_RETURN INTERRUPT_RETURN
.section .fixup,"ax" .section .fixup,"ax"
@ -619,10 +583,8 @@ ldt_ss:
shr $16, %edx shr $16, %edx
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
pushl $__ESPFIX_SS pushl_cfi $__ESPFIX_SS
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi %eax /* new kernel esp */
push %eax /* new kernel esp */
CFI_ADJUST_CFA_OFFSET 4
/* Disable interrupts, but do not irqtrace this section: we /* Disable interrupts, but do not irqtrace this section: we
* will soon execute iret and the tracer was already set to * will soon execute iret and the tracer was already set to
* the irqstate after the iret */ * the irqstate after the iret */
@ -666,11 +628,9 @@ work_notifysig: # deal with pending signals and
ALIGN ALIGN
work_notifysig_v86: work_notifysig_v86:
pushl %ecx # save ti_flags for do_notify_resume pushl_cfi %ecx # save ti_flags for do_notify_resume
CFI_ADJUST_CFA_OFFSET 4
call save_v86_state # %eax contains pt_regs pointer call save_v86_state # %eax contains pt_regs pointer
popl %ecx popl_cfi %ecx
CFI_ADJUST_CFA_OFFSET -4
movl %eax, %esp movl %eax, %esp
#else #else
movl %esp, %eax movl %esp, %eax
@ -803,10 +763,8 @@ ENDPROC(ptregs_clone)
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
shl $16, %eax shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */ addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS pushl_cfi $__KERNEL_DS
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi %eax
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
lss (%esp), %esp /* switch to the normal stack segment */ lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
.endm .endm
@ -843,8 +801,7 @@ vector=FIRST_EXTERNAL_VECTOR
.if vector <> FIRST_EXTERNAL_VECTOR .if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
.endif .endif
1: pushl $(~vector+0x80) /* Note: always in signed byte range */ 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
CFI_ADJUST_CFA_OFFSET 4
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
jmp 2f jmp 2f
.endif .endif
@ -884,8 +841,7 @@ ENDPROC(common_interrupt)
#define BUILD_INTERRUPT3(name, nr, fn) \ #define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \ ENTRY(name) \
RING0_INT_FRAME; \ RING0_INT_FRAME; \
pushl $~(nr); \ pushl_cfi $~(nr); \
CFI_ADJUST_CFA_OFFSET 4; \
SAVE_ALL; \ SAVE_ALL; \
TRACE_IRQS_OFF \ TRACE_IRQS_OFF \
movl %esp,%eax; \ movl %esp,%eax; \
@ -901,21 +857,18 @@ ENDPROC(name)
ENTRY(coprocessor_error) ENTRY(coprocessor_error)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_coprocessor_error
pushl $do_coprocessor_error
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(coprocessor_error) END(coprocessor_error)
ENTRY(simd_coprocessor_error) ENTRY(simd_coprocessor_error)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4
#ifdef CONFIG_X86_INVD_BUG #ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
661: pushl $do_general_protection 661: pushl_cfi $do_general_protection
662: 662:
.section .altinstructions,"a" .section .altinstructions,"a"
.balign 4 .balign 4
@ -930,19 +883,16 @@ ENTRY(simd_coprocessor_error)
664: 664:
.previous .previous
#else #else
pushl $do_simd_coprocessor_error pushl_cfi $do_simd_coprocessor_error
#endif #endif
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(simd_coprocessor_error) END(simd_coprocessor_error)
ENTRY(device_not_available) ENTRY(device_not_available)
RING0_INT_FRAME RING0_INT_FRAME
pushl $-1 # mark this as an int pushl_cfi $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_device_not_available
pushl $do_device_not_available
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(device_not_available) END(device_not_available)
@ -964,82 +914,68 @@ END(native_irq_enable_sysexit)
ENTRY(overflow) ENTRY(overflow)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_overflow
pushl $do_overflow
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(overflow) END(overflow)
ENTRY(bounds) ENTRY(bounds)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_bounds
pushl $do_bounds
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(bounds) END(bounds)
ENTRY(invalid_op) ENTRY(invalid_op)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_invalid_op
pushl $do_invalid_op
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(invalid_op) END(invalid_op)
ENTRY(coprocessor_segment_overrun) ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_coprocessor_segment_overrun
pushl $do_coprocessor_segment_overrun
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(coprocessor_segment_overrun) END(coprocessor_segment_overrun)
ENTRY(invalid_TSS) ENTRY(invalid_TSS)
RING0_EC_FRAME RING0_EC_FRAME
pushl $do_invalid_TSS pushl_cfi $do_invalid_TSS
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(invalid_TSS) END(invalid_TSS)
ENTRY(segment_not_present) ENTRY(segment_not_present)
RING0_EC_FRAME RING0_EC_FRAME
pushl $do_segment_not_present pushl_cfi $do_segment_not_present
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(segment_not_present) END(segment_not_present)
ENTRY(stack_segment) ENTRY(stack_segment)
RING0_EC_FRAME RING0_EC_FRAME
pushl $do_stack_segment pushl_cfi $do_stack_segment
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(stack_segment) END(stack_segment)
ENTRY(alignment_check) ENTRY(alignment_check)
RING0_EC_FRAME RING0_EC_FRAME
pushl $do_alignment_check pushl_cfi $do_alignment_check
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(alignment_check) END(alignment_check)
ENTRY(divide_error) ENTRY(divide_error)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 # no error code pushl_cfi $0 # no error code
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_divide_error
pushl $do_divide_error
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(divide_error) END(divide_error)
@ -1047,10 +983,8 @@ END(divide_error)
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
ENTRY(machine_check) ENTRY(machine_check)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi machine_check_vector
pushl machine_check_vector
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(machine_check) END(machine_check)
@ -1058,10 +992,8 @@ END(machine_check)
ENTRY(spurious_interrupt_bug) ENTRY(spurious_interrupt_bug)
RING0_INT_FRAME RING0_INT_FRAME
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $do_spurious_interrupt_bug
pushl $do_spurious_interrupt_bug
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(spurious_interrupt_bug) END(spurious_interrupt_bug)
@ -1092,8 +1024,7 @@ ENTRY(xen_sysenter_target)
ENTRY(xen_hypervisor_callback) ENTRY(xen_hypervisor_callback)
CFI_STARTPROC CFI_STARTPROC
pushl $0 pushl_cfi $0
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
@ -1129,23 +1060,20 @@ ENDPROC(xen_hypervisor_callback)
# We distinguish between categories by maintaining a status value in EAX. # We distinguish between categories by maintaining a status value in EAX.
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
CFI_STARTPROC CFI_STARTPROC
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
movl $1,%eax movl $1,%eax
1: mov 4(%esp),%ds 1: mov 4(%esp),%ds
2: mov 8(%esp),%es 2: mov 8(%esp),%es
3: mov 12(%esp),%fs 3: mov 12(%esp),%fs
4: mov 16(%esp),%gs 4: mov 16(%esp),%gs
testl %eax,%eax testl %eax,%eax
popl %eax popl_cfi %eax
CFI_ADJUST_CFA_OFFSET -4
lea 16(%esp),%esp lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16 CFI_ADJUST_CFA_OFFSET -16
jz 5f jz 5f
addl $16,%esp addl $16,%esp
jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
5: pushl $0 # EAX == 0 => Category 1 (Bad segment) 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC CFI_ENDPROC
@ -1295,40 +1223,29 @@ syscall_table_size=(.-sys_call_table)
ENTRY(page_fault) ENTRY(page_fault)
RING0_EC_FRAME RING0_EC_FRAME
pushl $do_page_fault pushl_cfi $do_page_fault
CFI_ADJUST_CFA_OFFSET 4
ALIGN ALIGN
error_code: error_code:
/* the function address is in %gs's slot on the stack */ /* the function address is in %gs's slot on the stack */
pushl %fs pushl_cfi %fs
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET fs, 0*/ /*CFI_REL_OFFSET fs, 0*/
pushl %es pushl_cfi %es
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET es, 0*/ /*CFI_REL_OFFSET es, 0*/
pushl %ds pushl_cfi %ds
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ds, 0*/ /*CFI_REL_OFFSET ds, 0*/
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eax, 0 CFI_REL_OFFSET eax, 0
pushl %ebp pushl_cfi %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebp, 0 CFI_REL_OFFSET ebp, 0
pushl %edi pushl_cfi %edi
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edi, 0 CFI_REL_OFFSET edi, 0
pushl %esi pushl_cfi %esi
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET esi, 0 CFI_REL_OFFSET esi, 0
pushl %edx pushl_cfi %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx, 0 CFI_REL_OFFSET edx, 0
pushl %ecx pushl_cfi %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx, 0 CFI_REL_OFFSET ecx, 0
pushl %ebx pushl_cfi %ebx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0 CFI_REL_OFFSET ebx, 0
cld cld
movl $(__KERNEL_PERCPU), %ecx movl $(__KERNEL_PERCPU), %ecx
@ -1370,12 +1287,9 @@ END(page_fault)
movl TSS_sysenter_sp0 + \offset(%esp), %esp movl TSS_sysenter_sp0 + \offset(%esp), %esp
CFI_DEF_CFA esp, 0 CFI_DEF_CFA esp, 0
CFI_UNDEFINED eip CFI_UNDEFINED eip
pushfl pushfl_cfi
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi $__KERNEL_CS
pushl $__KERNEL_CS pushl_cfi $sysenter_past_esp
CFI_ADJUST_CFA_OFFSET 4
pushl $sysenter_past_esp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eip, 0 CFI_REL_OFFSET eip, 0
.endm .endm
@ -1385,8 +1299,7 @@ ENTRY(debug)
jne debug_stack_correct jne debug_stack_correct
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
debug_stack_correct: debug_stack_correct:
pushl $-1 # mark this as an int pushl_cfi $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
xorl %edx,%edx # error code 0 xorl %edx,%edx # error code 0
@ -1406,32 +1319,27 @@ END(debug)
*/ */
ENTRY(nmi) ENTRY(nmi)
RING0_INT_FRAME RING0_INT_FRAME
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
popl %eax popl_cfi %eax
CFI_ADJUST_CFA_OFFSET -4
je nmi_espfix_stack je nmi_espfix_stack
cmpl $ia32_sysenter_target,(%esp) cmpl $ia32_sysenter_target,(%esp)
je nmi_stack_fixup je nmi_stack_fixup
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
movl %esp,%eax movl %esp,%eax
/* Do not access memory above the end of our stack page, /* Do not access memory above the end of our stack page,
* it might not exist. * it might not exist.
*/ */
andl $(THREAD_SIZE-1),%eax andl $(THREAD_SIZE-1),%eax
cmpl $(THREAD_SIZE-20),%eax cmpl $(THREAD_SIZE-20),%eax
popl %eax popl_cfi %eax
CFI_ADJUST_CFA_OFFSET -4
jae nmi_stack_correct jae nmi_stack_correct
cmpl $ia32_sysenter_target,12(%esp) cmpl $ia32_sysenter_target,12(%esp)
je nmi_debug_stack_check je nmi_debug_stack_check
nmi_stack_correct: nmi_stack_correct:
/* We have a RING0_INT_FRAME here */ /* We have a RING0_INT_FRAME here */
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
@ -1460,18 +1368,14 @@ nmi_espfix_stack:
* *
* create the pointer to lss back * create the pointer to lss back
*/ */
pushl %ss pushl_cfi %ss
CFI_ADJUST_CFA_OFFSET 4 pushl_cfi %esp
pushl %esp
CFI_ADJUST_CFA_OFFSET 4
addl $4, (%esp) addl $4, (%esp)
/* copy the iret frame of 12 bytes */ /* copy the iret frame of 12 bytes */
.rept 3 .rept 3
pushl 16(%esp) pushl_cfi 16(%esp)
CFI_ADJUST_CFA_OFFSET 4
.endr .endr
pushl %eax pushl_cfi %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
@ -1485,8 +1389,7 @@ END(nmi)
ENTRY(int3) ENTRY(int3)
RING0_INT_FRAME RING0_INT_FRAME
pushl $-1 # mark this as an int pushl_cfi $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
@ -1498,8 +1401,7 @@ END(int3)
ENTRY(general_protection) ENTRY(general_protection)
RING0_EC_FRAME RING0_EC_FRAME
pushl $do_general_protection pushl_cfi $do_general_protection
CFI_ADJUST_CFA_OFFSET 4
jmp error_code jmp error_code
CFI_ENDPROC CFI_ENDPROC
END(general_protection) END(general_protection)

View File

@ -213,23 +213,17 @@ ENDPROC(native_usergs_sysret64)
.macro FAKE_STACK_FRAME child_rip .macro FAKE_STACK_FRAME child_rip
/* push in order ss, rsp, eflags, cs, rip */ /* push in order ss, rsp, eflags, cs, rip */
xorl %eax, %eax xorl %eax, %eax
pushq $__KERNEL_DS /* ss */ pushq_cfi $__KERNEL_DS /* ss */
CFI_ADJUST_CFA_OFFSET 8
/*CFI_REL_OFFSET ss,0*/ /*CFI_REL_OFFSET ss,0*/
pushq %rax /* rsp */ pushq_cfi %rax /* rsp */
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET rsp,0 CFI_REL_OFFSET rsp,0
pushq $X86_EFLAGS_IF /* eflags - interrupts on */ pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
CFI_ADJUST_CFA_OFFSET 8
/*CFI_REL_OFFSET rflags,0*/ /*CFI_REL_OFFSET rflags,0*/
pushq $__KERNEL_CS /* cs */ pushq_cfi $__KERNEL_CS /* cs */
CFI_ADJUST_CFA_OFFSET 8
/*CFI_REL_OFFSET cs,0*/ /*CFI_REL_OFFSET cs,0*/
pushq \child_rip /* rip */ pushq_cfi \child_rip /* rip */
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET rip,0 CFI_REL_OFFSET rip,0
pushq %rax /* orig rax */ pushq_cfi %rax /* orig rax */
CFI_ADJUST_CFA_OFFSET 8
.endm .endm
.macro UNFAKE_STACK_FRAME .macro UNFAKE_STACK_FRAME
@ -398,10 +392,8 @@ ENTRY(ret_from_fork)
LOCK ; btr $TIF_FORK,TI_flags(%r8) LOCK ; btr $TIF_FORK,TI_flags(%r8)
push kernel_eflags(%rip) pushq_cfi kernel_eflags(%rip)
CFI_ADJUST_CFA_OFFSET 8 popfq_cfi # reset kernel eflags
popf # reset kernel eflags
CFI_ADJUST_CFA_OFFSET -8
call schedule_tail # rdi: 'prev' task parameter call schedule_tail # rdi: 'prev' task parameter
@ -521,11 +513,9 @@ sysret_careful:
jnc sysret_signal jnc sysret_signal
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq %rdi pushq_cfi %rdi
CFI_ADJUST_CFA_OFFSET 8
call schedule call schedule
popq %rdi popq_cfi %rdi
CFI_ADJUST_CFA_OFFSET -8
jmp sysret_check jmp sysret_check
/* Handle a signal */ /* Handle a signal */
@ -634,11 +624,9 @@ int_careful:
jnc int_very_careful jnc int_very_careful
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq %rdi pushq_cfi %rdi
CFI_ADJUST_CFA_OFFSET 8
call schedule call schedule
popq %rdi popq_cfi %rdi
CFI_ADJUST_CFA_OFFSET -8
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
jmp int_with_check jmp int_with_check
@ -652,12 +640,10 @@ int_check_syscall_exit_work:
/* Check for syscall exit trace */ /* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal jz int_signal
pushq %rdi pushq_cfi %rdi
CFI_ADJUST_CFA_OFFSET 8
leaq 8(%rsp),%rdi # &ptregs -> arg1 leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace_leave call syscall_trace_leave
popq %rdi popq_cfi %rdi
CFI_ADJUST_CFA_OFFSET -8
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
jmp int_restore_rest jmp int_restore_rest
@ -765,8 +751,7 @@ vector=FIRST_EXTERNAL_VECTOR
.if vector <> FIRST_EXTERNAL_VECTOR .if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
.endif .endif
1: pushq $(~vector+0x80) /* Note: always in signed byte range */ 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
CFI_ADJUST_CFA_OFFSET 8
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
jmp 2f jmp 2f
.endif .endif
@ -821,6 +806,7 @@ ret_from_intr:
TRACE_IRQS_OFF TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
leaveq leaveq
CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
exit_intr: exit_intr:
@ -902,11 +888,9 @@ retint_careful:
jnc retint_signal jnc retint_signal
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq %rdi pushq_cfi %rdi
CFI_ADJUST_CFA_OFFSET 8
call schedule call schedule
popq %rdi popq_cfi %rdi
CFI_ADJUST_CFA_OFFSET -8
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
@ -955,8 +939,7 @@ END(common_interrupt)
.macro apicinterrupt num sym do_sym .macro apicinterrupt num sym do_sym
ENTRY(\sym) ENTRY(\sym)
INTR_FRAME INTR_FRAME
pushq $~(\num) pushq_cfi $~(\num)
CFI_ADJUST_CFA_OFFSET 8
interrupt \do_sym interrupt \do_sym
jmp ret_from_intr jmp ret_from_intr
CFI_ENDPROC CFI_ENDPROC
@ -1138,16 +1121,14 @@ zeroentry simd_coprocessor_error do_simd_coprocessor_error
/* edi: new selector */ /* edi: new selector */
ENTRY(native_load_gs_index) ENTRY(native_load_gs_index)
CFI_STARTPROC CFI_STARTPROC
pushf pushfq_cfi
CFI_ADJUST_CFA_OFFSET 8
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS SWAPGS
gs_change: gs_change:
movl %edi,%gs movl %edi,%gs
2: mfence /* workaround */ 2: mfence /* workaround */
SWAPGS SWAPGS
popf popfq_cfi
CFI_ADJUST_CFA_OFFSET -8
ret ret
CFI_ENDPROC CFI_ENDPROC
END(native_load_gs_index) END(native_load_gs_index)
@ -1214,8 +1195,7 @@ END(kernel_execve)
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq) ENTRY(call_softirq)
CFI_STARTPROC CFI_STARTPROC
push %rbp pushq_cfi %rbp
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET rbp,0 CFI_REL_OFFSET rbp,0
mov %rsp,%rbp mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp CFI_DEF_CFA_REGISTER rbp
@ -1224,6 +1204,7 @@ ENTRY(call_softirq)
push %rbp # backlink for old unwinder push %rbp # backlink for old unwinder
call __do_softirq call __do_softirq
leaveq leaveq
CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)