KVM: SVM: Split svm_vcpu_run inline assembly to separate file
The compiler (GCC) does not like the situation, where there is inline
assembly block that clobbers all available machine registers in the
middle of the function. This situation can be found in function
svm_vcpu_run in file kvm/svm.c and results in many register spills and
fills to/from stack frame.
This patch fixes the issue with the same approach as was done for
VMX some time ago. The big inline assembly is moved to a separate
assembly .S file, taking into account all ABI requirements.
There are two main benefits of the above approach:
* elimination of several register spills and fills to/from stack
frame, and consequently smaller function .text size. The binary size
of svm_vcpu_run is lowered from 2019 to 1626 bytes.
* more efficient access to a register save array. Currently, register
save array is accessed as:
7b00: 48 8b 98 28 02 00 00 mov 0x228(%rax),%rbx
7b07: 48 8b 88 18 02 00 00 mov 0x218(%rax),%rcx
7b0e: 48 8b 90 20 02 00 00 mov 0x220(%rax),%rdx
and passing ia pointer to a register array as an argument to a function one gets:
12: 48 8b 48 08 mov 0x8(%rax),%rcx
16: 48 8b 50 10 mov 0x10(%rax),%rdx
1a: 48 8b 58 18 mov 0x18(%rax),%rbx
As a result, the total size, considering that the new function size is 229
bytes, gets lowered by 164 bytes.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-30 20:02:13 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/asm.h>
|
|
|
|
#include <asm/bitsperlong.h>
|
|
|
|
#include <asm/kvm_vcpu_regs.h>
|
2020-04-13 14:17:58 +07:00
|
|
|
#include <asm/nospec-branch.h>
|
KVM: SVM: Split svm_vcpu_run inline assembly to separate file
The compiler (GCC) does not like the situation, where there is inline
assembly block that clobbers all available machine registers in the
middle of the function. This situation can be found in function
svm_vcpu_run in file kvm/svm.c and results in many register spills and
fills to/from stack frame.
This patch fixes the issue with the same approach as was done for
VMX some time ago. The big inline assembly is moved to a separate
assembly .S file, taking into account all ABI requirements.
There are two main benefits of the above approach:
* elimination of several register spills and fills to/from stack
frame, and consequently smaller function .text size. The binary size
of svm_vcpu_run is lowered from 2019 to 1626 bytes.
* more efficient access to a register save array. Currently, register
save array is accessed as:
7b00: 48 8b 98 28 02 00 00 mov 0x228(%rax),%rbx
7b07: 48 8b 88 18 02 00 00 mov 0x218(%rax),%rcx
7b0e: 48 8b 90 20 02 00 00 mov 0x220(%rax),%rdx
and passing ia pointer to a register array as an argument to a function one gets:
12: 48 8b 48 08 mov 0x8(%rax),%rcx
16: 48 8b 50 10 mov 0x10(%rax),%rdx
1a: 48 8b 58 18 mov 0x18(%rax),%rbx
As a result, the total size, considering that the new function size is 229
bytes, gets lowered by 164 bytes.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-30 20:02:13 +07:00
|
|
|
|
|
|
|
#define WORD_SIZE (BITS_PER_LONG / 8)
|
|
|
|
|
|
|
|
/* Intentionally omit RAX as it's context switched by hardware */
|
|
|
|
#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
|
|
|
|
#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
|
|
|
|
#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
|
|
|
|
/* Intentionally omit RSP as it's context switched by hardware */
|
|
|
|
#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
|
|
|
|
#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
|
|
|
|
#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
|
|
|
|
#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
|
|
|
|
#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
|
|
|
|
#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
|
|
|
|
#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
|
|
|
|
#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
|
|
|
|
#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
|
|
|
|
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
|
|
|
|
#endif
|
|
|
|
|
2020-07-09 02:51:58 +07:00
|
|
|
.section .noinstr.text, "ax"
|
KVM: SVM: Split svm_vcpu_run inline assembly to separate file
The compiler (GCC) does not like the situation, where there is inline
assembly block that clobbers all available machine registers in the
middle of the function. This situation can be found in function
svm_vcpu_run in file kvm/svm.c and results in many register spills and
fills to/from stack frame.
This patch fixes the issue with the same approach as was done for
VMX some time ago. The big inline assembly is moved to a separate
assembly .S file, taking into account all ABI requirements.
There are two main benefits of the above approach:
* elimination of several register spills and fills to/from stack
frame, and consequently smaller function .text size. The binary size
of svm_vcpu_run is lowered from 2019 to 1626 bytes.
* more efficient access to a register save array. Currently, register
save array is accessed as:
7b00: 48 8b 98 28 02 00 00 mov 0x228(%rax),%rbx
7b07: 48 8b 88 18 02 00 00 mov 0x218(%rax),%rcx
7b0e: 48 8b 90 20 02 00 00 mov 0x220(%rax),%rdx
and passing ia pointer to a register array as an argument to a function one gets:
12: 48 8b 48 08 mov 0x8(%rax),%rcx
16: 48 8b 50 10 mov 0x10(%rax),%rdx
1a: 48 8b 58 18 mov 0x18(%rax),%rbx
As a result, the total size, considering that the new function size is 229
bytes, gets lowered by 164 bytes.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-30 20:02:13 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
|
|
|
|
* @vmcb_pa: unsigned long
|
|
|
|
* @regs: unsigned long * (to guest registers)
|
|
|
|
*/
|
|
|
|
SYM_FUNC_START(__svm_vcpu_run)
|
|
|
|
push %_ASM_BP
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
push %r15
|
|
|
|
push %r14
|
|
|
|
push %r13
|
|
|
|
push %r12
|
|
|
|
#else
|
|
|
|
push %edi
|
|
|
|
push %esi
|
|
|
|
#endif
|
|
|
|
push %_ASM_BX
|
|
|
|
|
|
|
|
/* Save @regs. */
|
|
|
|
push %_ASM_ARG2
|
|
|
|
|
|
|
|
/* Save @vmcb. */
|
|
|
|
push %_ASM_ARG1
|
|
|
|
|
|
|
|
/* Move @regs to RAX. */
|
|
|
|
mov %_ASM_ARG2, %_ASM_AX
|
|
|
|
|
|
|
|
/* Load guest registers. */
|
|
|
|
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
|
|
|
|
mov VCPU_RDX(%_ASM_AX), %_ASM_DX
|
|
|
|
mov VCPU_RBX(%_ASM_AX), %_ASM_BX
|
|
|
|
mov VCPU_RBP(%_ASM_AX), %_ASM_BP
|
|
|
|
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
|
|
|
|
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
mov VCPU_R8 (%_ASM_AX), %r8
|
|
|
|
mov VCPU_R9 (%_ASM_AX), %r9
|
|
|
|
mov VCPU_R10(%_ASM_AX), %r10
|
|
|
|
mov VCPU_R11(%_ASM_AX), %r11
|
|
|
|
mov VCPU_R12(%_ASM_AX), %r12
|
|
|
|
mov VCPU_R13(%_ASM_AX), %r13
|
|
|
|
mov VCPU_R14(%_ASM_AX), %r14
|
|
|
|
mov VCPU_R15(%_ASM_AX), %r15
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* "POP" @vmcb to RAX. */
|
|
|
|
pop %_ASM_AX
|
|
|
|
|
|
|
|
/* Enter guest mode */
|
2020-04-13 14:17:58 +07:00
|
|
|
sti
|
KVM: SVM: Split svm_vcpu_run inline assembly to separate file
The compiler (GCC) does not like the situation, where there is inline
assembly block that clobbers all available machine registers in the
middle of the function. This situation can be found in function
svm_vcpu_run in file kvm/svm.c and results in many register spills and
fills to/from stack frame.
This patch fixes the issue with the same approach as was done for
VMX some time ago. The big inline assembly is moved to a separate
assembly .S file, taking into account all ABI requirements.
There are two main benefits of the above approach:
* elimination of several register spills and fills to/from stack
frame, and consequently smaller function .text size. The binary size
of svm_vcpu_run is lowered from 2019 to 1626 bytes.
* more efficient access to a register save array. Currently, register
save array is accessed as:
7b00: 48 8b 98 28 02 00 00 mov 0x228(%rax),%rbx
7b07: 48 8b 88 18 02 00 00 mov 0x218(%rax),%rcx
7b0e: 48 8b 90 20 02 00 00 mov 0x220(%rax),%rdx
and passing ia pointer to a register array as an argument to a function one gets:
12: 48 8b 48 08 mov 0x8(%rax),%rcx
16: 48 8b 50 10 mov 0x10(%rax),%rdx
1a: 48 8b 58 18 mov 0x18(%rax),%rbx
As a result, the total size, considering that the new function size is 229
bytes, gets lowered by 164 bytes.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-30 20:02:13 +07:00
|
|
|
1: vmload %_ASM_AX
|
|
|
|
jmp 3f
|
|
|
|
2: cmpb $0, kvm_rebooting
|
|
|
|
jne 3f
|
|
|
|
ud2
|
|
|
|
_ASM_EXTABLE(1b, 2b)
|
|
|
|
|
|
|
|
3: vmrun %_ASM_AX
|
|
|
|
jmp 5f
|
|
|
|
4: cmpb $0, kvm_rebooting
|
|
|
|
jne 5f
|
|
|
|
ud2
|
|
|
|
_ASM_EXTABLE(3b, 4b)
|
|
|
|
|
|
|
|
5: vmsave %_ASM_AX
|
|
|
|
jmp 7f
|
|
|
|
6: cmpb $0, kvm_rebooting
|
|
|
|
jne 7f
|
|
|
|
ud2
|
|
|
|
_ASM_EXTABLE(5b, 6b)
|
|
|
|
7:
|
2020-04-13 14:17:58 +07:00
|
|
|
cli
|
|
|
|
|
|
|
|
#ifdef CONFIG_RETPOLINE
|
|
|
|
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
|
|
|
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
|
|
|
#endif
|
|
|
|
|
KVM: SVM: Split svm_vcpu_run inline assembly to separate file
The compiler (GCC) does not like the situation, where there is inline
assembly block that clobbers all available machine registers in the
middle of the function. This situation can be found in function
svm_vcpu_run in file kvm/svm.c and results in many register spills and
fills to/from stack frame.
This patch fixes the issue with the same approach as was done for
VMX some time ago. The big inline assembly is moved to a separate
assembly .S file, taking into account all ABI requirements.
There are two main benefits of the above approach:
* elimination of several register spills and fills to/from stack
frame, and consequently smaller function .text size. The binary size
of svm_vcpu_run is lowered from 2019 to 1626 bytes.
* more efficient access to a register save array. Currently, register
save array is accessed as:
7b00: 48 8b 98 28 02 00 00 mov 0x228(%rax),%rbx
7b07: 48 8b 88 18 02 00 00 mov 0x218(%rax),%rcx
7b0e: 48 8b 90 20 02 00 00 mov 0x220(%rax),%rdx
and passing ia pointer to a register array as an argument to a function one gets:
12: 48 8b 48 08 mov 0x8(%rax),%rcx
16: 48 8b 50 10 mov 0x10(%rax),%rdx
1a: 48 8b 58 18 mov 0x18(%rax),%rbx
As a result, the total size, considering that the new function size is 229
bytes, gets lowered by 164 bytes.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-03-30 20:02:13 +07:00
|
|
|
/* "POP" @regs to RAX. */
|
|
|
|
pop %_ASM_AX
|
|
|
|
|
|
|
|
/* Save all guest registers. */
|
|
|
|
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
|
|
|
|
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
|
|
|
|
mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
|
|
|
|
mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
|
|
|
|
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
|
|
|
|
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
mov %r8, VCPU_R8 (%_ASM_AX)
|
|
|
|
mov %r9, VCPU_R9 (%_ASM_AX)
|
|
|
|
mov %r10, VCPU_R10(%_ASM_AX)
|
|
|
|
mov %r11, VCPU_R11(%_ASM_AX)
|
|
|
|
mov %r12, VCPU_R12(%_ASM_AX)
|
|
|
|
mov %r13, VCPU_R13(%_ASM_AX)
|
|
|
|
mov %r14, VCPU_R14(%_ASM_AX)
|
|
|
|
mov %r15, VCPU_R15(%_ASM_AX)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear all general purpose registers except RSP and RAX to prevent
|
|
|
|
* speculative use of the guest's values, even those that are reloaded
|
|
|
|
* via the stack. In theory, an L1 cache miss when restoring registers
|
|
|
|
* could lead to speculative execution with the guest's values.
|
|
|
|
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
|
|
|
|
* free. RSP and RAX are exempt as they are restored by hardware
|
|
|
|
* during VM-Exit.
|
|
|
|
*/
|
|
|
|
xor %ecx, %ecx
|
|
|
|
xor %edx, %edx
|
|
|
|
xor %ebx, %ebx
|
|
|
|
xor %ebp, %ebp
|
|
|
|
xor %esi, %esi
|
|
|
|
xor %edi, %edi
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
xor %r8d, %r8d
|
|
|
|
xor %r9d, %r9d
|
|
|
|
xor %r10d, %r10d
|
|
|
|
xor %r11d, %r11d
|
|
|
|
xor %r12d, %r12d
|
|
|
|
xor %r13d, %r13d
|
|
|
|
xor %r14d, %r14d
|
|
|
|
xor %r15d, %r15d
|
|
|
|
#endif
|
|
|
|
|
|
|
|
pop %_ASM_BX
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
pop %r12
|
|
|
|
pop %r13
|
|
|
|
pop %r14
|
|
|
|
pop %r15
|
|
|
|
#else
|
|
|
|
pop %esi
|
|
|
|
pop %edi
|
|
|
|
#endif
|
|
|
|
pop %_ASM_BP
|
|
|
|
ret
|
|
|
|
SYM_FUNC_END(__svm_vcpu_run)
|