mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 09:26:36 +07:00
ae03900105
On PVH, PVHVM, at failure in the VCPUOP_register_vcpu_info hypercall we limit the number of cpus to to MAX_VIRT_CPUS. However, if this failure had occurred for a cpu beyond MAX_VIRT_CPUS, we continue to function with > MAX_VIRT_CPUS. This leads to problems at the next save/restore cycle when there are > MAX_VIRT_CPUS threads going into stop_machine() but coming back up there's valid state for only the first MAX_VIRT_CPUS. This patch pulls the excess CPUs down via cpu_down(). Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> Signed-off-by: Juergen Gross <jgross@suse.com>
43 lines
1.1 KiB
C
43 lines
1.1 KiB
C
#ifndef _XEN_SMP_H
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern void xen_send_IPI_mask(const struct cpumask *mask,
|
|
int vector);
|
|
extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
|
|
int vector);
|
|
extern void xen_send_IPI_allbutself(int vector);
|
|
extern void xen_send_IPI_all(int vector);
|
|
extern void xen_send_IPI_self(int vector);
|
|
|
|
extern int xen_smp_intr_init(unsigned int cpu);
|
|
extern void xen_smp_intr_free(unsigned int cpu);
|
|
int xen_smp_intr_init_pv(unsigned int cpu);
|
|
void xen_smp_intr_free_pv(unsigned int cpu);
|
|
|
|
void xen_smp_cpus_done(unsigned int max_cpus);
|
|
|
|
void xen_smp_send_reschedule(int cpu);
|
|
void xen_smp_send_call_function_ipi(const struct cpumask *mask);
|
|
void xen_smp_send_call_function_single_ipi(int cpu);
|
|
|
|
struct xen_common_irq {
|
|
int irq;
|
|
char *name;
|
|
};
|
|
#else /* CONFIG_SMP */
|
|
|
|
static inline int xen_smp_intr_init(unsigned int cpu)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void xen_smp_intr_free(unsigned int cpu) {}
|
|
|
|
static inline int xen_smp_intr_init_pv(unsigned int cpu)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#endif
|