mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 20:06:38 +07:00
xen: jump to iret fixup
Use jmp rather than call for the iret fixup, so its consistent with the sysexit fixup, and it simplifies the stack (which is already complex). Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
dbe9e994c9
commit
0f2c876952
@ -1042,8 +1042,7 @@ ENTRY(xen_hypervisor_callback)
|
||||
cmpl $xen_iret_end_crit,%eax
|
||||
jae 1f
|
||||
|
||||
call xen_iret_crit_fixup
|
||||
jmp 2f
|
||||
jmp xen_iret_crit_fixup
|
||||
|
||||
1: cmpl $xen_sysexit_start_crit,%eax
|
||||
jb 2f
|
||||
|
@ -223,9 +223,7 @@ hyper_iret:
|
||||
ds } SAVE_ALL state
|
||||
eax }
|
||||
: :
|
||||
ebx }
|
||||
----------------
|
||||
return addr <- esp
|
||||
ebx }<- esp
|
||||
----------------
|
||||
|
||||
In order to deliver the nested exception properly, we need to shift
|
||||
@ -240,10 +238,8 @@ hyper_iret:
|
||||
it's usermode state which we eventually need to restore.
|
||||
*/
|
||||
ENTRY(xen_iret_crit_fixup)
|
||||
/* offsets +4 for return address */
|
||||
|
||||
/*
|
||||
Paranoia: Make sure we're really coming from userspace.
|
||||
Paranoia: Make sure we're really coming from kernel space.
|
||||
One could imagine a case where userspace jumps into the
|
||||
critical range address, but just before the CPU delivers a GP,
|
||||
it decides to deliver an interrupt instead. Unlikely?
|
||||
@ -252,32 +248,32 @@ ENTRY(xen_iret_crit_fixup)
|
||||
jump instruction itself, not the destination, but some virtual
|
||||
environments get this wrong.
|
||||
*/
|
||||
movl PT_CS+4(%esp), %ecx
|
||||
movl PT_CS(%esp), %ecx
|
||||
andl $SEGMENT_RPL_MASK, %ecx
|
||||
cmpl $USER_RPL, %ecx
|
||||
je 2f
|
||||
|
||||
lea PT_ORIG_EAX+4(%esp), %esi
|
||||
lea PT_EFLAGS+4(%esp), %edi
|
||||
lea PT_ORIG_EAX(%esp), %esi
|
||||
lea PT_EFLAGS(%esp), %edi
|
||||
|
||||
/* If eip is before iret_restore_end then stack
|
||||
hasn't been restored yet. */
|
||||
cmp $iret_restore_end, %eax
|
||||
jae 1f
|
||||
|
||||
movl 0+4(%edi),%eax /* copy EAX */
|
||||
movl %eax, PT_EAX+4(%esp)
|
||||
movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */
|
||||
movl %eax, PT_EAX(%esp)
|
||||
|
||||
lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
|
||||
|
||||
/* set up the copy */
|
||||
1: std
|
||||
mov $(PT_EIP+4) / 4, %ecx /* copy ret+saved regs up to orig_eax */
|
||||
mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
|
||||
rep movsl
|
||||
cld
|
||||
|
||||
lea 4(%edi),%esp /* point esp to new frame */
|
||||
2: ret
|
||||
2: jmp xen_do_upcall
|
||||
|
||||
|
||||
ENTRY(xen_sysexit)
|
||||
|
Loading…
Reference in New Issue
Block a user