mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:10:52 +07:00
x86/paravirt: Remove unused _paravirt_ident_32
There is no user of _paravirt_ident_32 left in the tree. Remove it together with the related paravirt_patch_ident_32(). paravirt_patch_ident_64() can be moved inside CONFIG_PARAVIRT_XXL=y. Signed-off-by: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akataria@vmware.com Cc: boris.ostrovsky@oracle.com Cc: rusty@rustcorp.com.au Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20181030063301.15054-1-jgross@suse.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f77084d963
commit
7847c7be04
@ -361,7 +361,6 @@ extern struct paravirt_patch_template pv_ops;
|
|||||||
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
|
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
|
||||||
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
|
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
|
||||||
|
|
||||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
|
|
||||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
||||||
unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
||||||
unsigned long addr, unsigned len);
|
unsigned long addr, unsigned len);
|
||||||
@ -651,7 +650,6 @@ void paravirt_leave_lazy_mmu(void);
|
|||||||
void paravirt_flush_lazy_mmu(void);
|
void paravirt_flush_lazy_mmu(void);
|
||||||
|
|
||||||
void _paravirt_nop(void);
|
void _paravirt_nop(void);
|
||||||
u32 _paravirt_ident_32(u32);
|
|
||||||
u64 _paravirt_ident_64(u64);
|
u64 _paravirt_ident_64(u64);
|
||||||
|
|
||||||
#define paravirt_nop ((void *)_paravirt_nop)
|
#define paravirt_nop ((void *)_paravirt_nop)
|
||||||
|
@ -56,17 +56,6 @@ asm (".pushsection .entry.text, \"ax\"\n"
|
|||||||
".type _paravirt_nop, @function\n\t"
|
".type _paravirt_nop, @function\n\t"
|
||||||
".popsection");
|
".popsection");
|
||||||
|
|
||||||
/* identity function, which can be inlined */
|
|
||||||
u32 notrace _paravirt_ident_32(u32 x)
|
|
||||||
{
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 notrace _paravirt_ident_64(u64 x)
|
|
||||||
{
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init default_banner(void)
|
void __init default_banner(void)
|
||||||
{
|
{
|
||||||
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
|
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
|
||||||
@ -102,6 +91,12 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_XXL
|
#ifdef CONFIG_PARAVIRT_XXL
|
||||||
|
/* identity function, which can be inlined */
|
||||||
|
u64 notrace _paravirt_ident_64(u64 x)
|
||||||
|
{
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
||||||
unsigned long addr, unsigned len)
|
unsigned long addr, unsigned len)
|
||||||
{
|
{
|
||||||
@ -146,13 +141,11 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
|||||||
else if (opfunc == _paravirt_nop)
|
else if (opfunc == _paravirt_nop)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PARAVIRT_XXL
|
||||||
/* identity functions just return their single argument */
|
/* identity functions just return their single argument */
|
||||||
else if (opfunc == _paravirt_ident_32)
|
|
||||||
ret = paravirt_patch_ident_32(insnbuf, len);
|
|
||||||
else if (opfunc == _paravirt_ident_64)
|
else if (opfunc == _paravirt_ident_64)
|
||||||
ret = paravirt_patch_ident_64(insnbuf, len);
|
ret = paravirt_patch_ident_64(insnbuf, len);
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_XXL
|
|
||||||
else if (type == PARAVIRT_PATCH(cpu.iret) ||
|
else if (type == PARAVIRT_PATCH(cpu.iret) ||
|
||||||
type == PARAVIRT_PATCH(cpu.usergs_sysret64))
|
type == PARAVIRT_PATCH(cpu.usergs_sysret64))
|
||||||
/* If operation requires a jmp, then jmp */
|
/* If operation requires a jmp, then jmp */
|
||||||
@ -309,13 +302,8 @@ struct pv_info pv_info = {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
|
|
||||||
/* 32-bit pagetable entries */
|
|
||||||
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
|
|
||||||
#else
|
|
||||||
/* 64-bit pagetable entries */
|
/* 64-bit pagetable entries */
|
||||||
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
|
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
|
||||||
#endif
|
|
||||||
|
|
||||||
struct paravirt_patch_template pv_ops = {
|
struct paravirt_patch_template pv_ops = {
|
||||||
/* Init ops. */
|
/* Init ops. */
|
||||||
|
@ -10,24 +10,18 @@ DEF_NATIVE(cpu, iret, "iret");
|
|||||||
DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
|
DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
|
||||||
DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
|
DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
|
||||||
DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
|
DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
||||||
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
|
|
||||||
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
|
||||||
{
|
|
||||||
/* arg in %eax, return in %eax */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||||
{
|
{
|
||||||
/* arg in %edx:%eax, return in %edx:%eax */
|
/* arg in %edx:%eax, return in %edx:%eax */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
|
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
|
||||||
|
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
||||||
|
#endif
|
||||||
|
|
||||||
extern bool pv_is_native_spin_unlock(void);
|
extern bool pv_is_native_spin_unlock(void);
|
||||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||||
|
@ -15,27 +15,19 @@ DEF_NATIVE(cpu, wbinvd, "wbinvd");
|
|||||||
|
|
||||||
DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
|
DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
|
||||||
DEF_NATIVE(cpu, swapgs, "swapgs");
|
DEF_NATIVE(cpu, swapgs, "swapgs");
|
||||||
#endif
|
|
||||||
|
|
||||||
DEF_NATIVE(, mov32, "mov %edi, %eax");
|
|
||||||
DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
||||||
|
|
||||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
||||||
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
|
|
||||||
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
|
||||||
{
|
|
||||||
return paravirt_patch_insns(insnbuf, len,
|
|
||||||
start__mov32, end__mov32);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||||
{
|
{
|
||||||
return paravirt_patch_insns(insnbuf, len,
|
return paravirt_patch_insns(insnbuf, len,
|
||||||
start__mov64, end__mov64);
|
start__mov64, end__mov64);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
|
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
|
||||||
|
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
||||||
|
#endif
|
||||||
|
|
||||||
extern bool pv_is_native_spin_unlock(void);
|
extern bool pv_is_native_spin_unlock(void);
|
||||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||||
|
Loading…
Reference in New Issue
Block a user