mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:05:18 +07:00
9a9594efe5
Pull SMP hotplug updates from Thomas Gleixner: "This update is primarily a cleanup of the CPU hotplug locking code. The hotplug locking mechanism is an open coded RWSEM, which allows recursive locking. The main problem with that is the recursive nature as it evades the full lockdep coverage and hides potential deadlocks. The rework replaces the open coded RWSEM with a percpu RWSEM and establishes full lockdep coverage that way. The bulk of the changes fix up recursive locking issues and address the now fully reported potential deadlocks all over the place. Some of these deadlocks have been observed in the RT tree, but on mainline the probability was low enough to hide them away." * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits) cpu/hotplug: Constify attribute_group structures powerpc: Only obtain cpu_hotplug_lock if called by rtasd ARM/hw_breakpoint: Fix possible recursive locking for arch_hw_breakpoint_init cpu/hotplug: Remove unused check_for_tasks() function perf/core: Don't release cred_guard_mutex if not taken cpuhotplug: Link lock stacks for hotplug callbacks acpi/processor: Prevent cpu hotplug deadlock sched: Provide is_percpu_thread() helper cpu/hotplug: Convert hotplug locking to percpu rwsem s390: Prevent hotplug rwsem recursion arm: Prevent hotplug rwsem recursion arm64: Prevent cpu hotplug rwsem recursion kprobes: Cure hotplug lock ordering issues jump_label: Reorder hotplug lock and jump_label_lock perf/tracing/cpuhotplug: Fix locking order ACPI/processor: Use cpu_hotplug_disable() instead of get_online_cpus() PCI: Replace the racy recursion prevention PCI: Use cpu_hotplug_disable() instead of get_online_cpus() perf/x86/intel: Drop get_online_cpus() in intel_snb_check_microcode() x86/perf: Drop EXPORT of perf_check_microcode ...
118 lines
2.6 KiB
C
118 lines
2.6 KiB
C
#ifndef _ASM_POWERPC_TOPOLOGY_H
|
|
#define _ASM_POWERPC_TOPOLOGY_H
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
struct device;
|
|
struct device_node;
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
/*
|
|
* If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that
|
|
* all zones on all nodes will be eligible for zone_reclaim().
|
|
*/
|
|
#define RECLAIM_DISTANCE 10
|
|
|
|
#include <asm/mmzone.h>
|
|
|
|
#define parent_node(node) (node)
|
|
|
|
#define cpumask_of_node(node) ((node) == -1 ? \
|
|
cpu_all_mask : \
|
|
node_to_cpumask_map[node])
|
|
|
|
struct pci_bus;
|
|
#ifdef CONFIG_PCI
|
|
extern int pcibus_to_node(struct pci_bus *bus);
|
|
#else
|
|
static inline int pcibus_to_node(struct pci_bus *bus)
|
|
{
|
|
return -1;
|
|
}
|
|
#endif
|
|
|
|
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
|
|
cpu_all_mask : \
|
|
cpumask_of_node(pcibus_to_node(bus)))
|
|
|
|
extern int __node_distance(int, int);
|
|
#define node_distance(a, b) __node_distance(a, b)
|
|
|
|
extern void __init dump_numa_cpu_topology(void);
|
|
|
|
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
|
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
|
extern int numa_update_cpu_topology(bool cpus_locked);
|
|
|
|
static inline int early_cpu_to_node(int cpu)
|
|
{
|
|
int nid;
|
|
|
|
nid = numa_cpu_lookup_table[cpu];
|
|
|
|
/*
|
|
* Fall back to node 0 if nid is unset (it should be, except bugs).
|
|
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
|
|
*/
|
|
return (nid < 0) ? 0 : nid;
|
|
}
|
|
#else
|
|
|
|
static inline int early_cpu_to_node(int cpu) { return 0; }
|
|
|
|
static inline void dump_numa_cpu_topology(void) {}
|
|
|
|
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void sysfs_remove_device_from_node(struct device *dev,
|
|
int nid)
|
|
{
|
|
}
|
|
|
|
static inline int numa_update_cpu_topology(bool cpus_locked)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
|
|
extern int start_topology_update(void);
|
|
extern int stop_topology_update(void);
|
|
extern int prrn_is_enabled(void);
|
|
#else
|
|
static inline int start_topology_update(void)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int stop_topology_update(void)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int prrn_is_enabled(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
|
|
|
|
#include <asm-generic/topology.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
#include <asm/cputable.h>
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/smp.h>
|
|
|
|
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
|
|
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
|
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
|
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
|
#endif
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_TOPOLOGY_H */
|