x86: paravirt spinlocks, !CONFIG_SMP build fixes

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2008-07-09 14:33:33 +02:00
parent 2d9e1e2f58
commit 4bb689eee1
2 changed files with 8 additions and 0 deletions

View File

@ -270,11 +270,13 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
void __init paravirt_use_bytelocks(void)
{
#ifdef CONFIG_SMP
pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
pv_lock_ops.spin_lock = __byte_spin_lock;
pv_lock_ops.spin_trylock = __byte_spin_trylock;
pv_lock_ops.spin_unlock = __byte_spin_unlock;
#endif
}
struct pv_info pv_info = {
@ -461,12 +463,14 @@ struct pv_mmu_ops pv_mmu_ops = {
};
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
.spin_is_locked = __ticket_spin_is_locked,
.spin_is_contended = __ticket_spin_is_contended,
.spin_lock = __ticket_spin_lock,
.spin_trylock = __ticket_spin_trylock,
.spin_unlock = __ticket_spin_unlock,
#endif
};
EXPORT_SYMBOL_GPL(pv_time_ops);

View File

@ -1387,6 +1387,8 @@ void _paravirt_nop(void);
void paravirt_use_bytelocks(void);
#ifdef CONFIG_SMP
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
@ -1412,6 +1414,8 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
}
#endif
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch_site {
u8 *instr; /* original instructions */