mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-04 17:35:25 +07:00
Merge branch 'cpus4096' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-x86 into cpus4096
This commit is contained in:
commit
238a5b4bff
@ -3,8 +3,6 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
extern cpumask_var_t cpu_callin_mask;
|
||||
extern cpumask_var_t cpu_callout_mask;
|
||||
extern cpumask_var_t cpu_initialized_mask;
|
||||
@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
extern cpumask_t cpu_callin_map;
|
||||
extern cpumask_t cpu_callout_map;
|
||||
extern cpumask_t cpu_initialized;
|
||||
extern cpumask_t cpu_sibling_setup_map;
|
||||
|
||||
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
|
||||
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
|
||||
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
|
||||
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
|
||||
|
||||
static inline void setup_cpu_local_masks(void) { }
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_CPUMASK_H */
|
||||
|
@ -109,11 +109,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus)
|
||||
return sd->node;
|
||||
}
|
||||
|
||||
static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
|
||||
{
|
||||
return node_to_cpumask(__pcibus_to_node(bus));
|
||||
}
|
||||
|
||||
static inline const struct cpumask *
|
||||
cpumask_of_pcibus(const struct pci_bus *bus)
|
||||
{
|
||||
|
@ -94,7 +94,7 @@ struct cpuinfo_x86 {
|
||||
unsigned long loops_per_jiffy;
|
||||
#ifdef CONFIG_SMP
|
||||
/* cpus sharing the last level cache: */
|
||||
cpumask_t llc_shared_map;
|
||||
cpumask_var_t llc_shared_map;
|
||||
#endif
|
||||
/* cpuid returned max cores value: */
|
||||
u16 x86_max_cores;
|
||||
|
@ -21,19 +21,19 @@
|
||||
extern int smp_num_siblings;
|
||||
extern unsigned int num_processors;
|
||||
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
|
||||
DECLARE_PER_CPU(u16, cpu_llc_id);
|
||||
DECLARE_PER_CPU(int, cpu_number);
|
||||
|
||||
static inline struct cpumask *cpu_sibling_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_sibling_map, cpu);
|
||||
return per_cpu(cpu_sibling_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_core_map, cpu);
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
|
||||
@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
smp_ops.send_call_func_single_ipi(cpu);
|
||||
}
|
||||
|
||||
static inline void arch_send_call_function_ipi(cpumask_t mask)
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
smp_ops.send_call_func_ipi(&mask);
|
||||
smp_ops.send_call_func_ipi(mask);
|
||||
}
|
||||
|
||||
void cpu_disable_common(void);
|
||||
|
@ -44,9 +44,6 @@
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_t node_to_cpumask_map[];
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
extern int cpu_to_node_map[];
|
||||
|
||||
@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu)
|
||||
}
|
||||
#define early_cpu_to_node(cpu) cpu_to_node(cpu)
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'.
|
||||
*
|
||||
* Side note: this function creates the returned cpumask on the stack
|
||||
* so with a high NR_CPUS count, excessive stack space is used. The
|
||||
* cpumask_of_node function should be used whenever possible.
|
||||
*/
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
static inline void setup_node_to_cpumask_map(void) { }
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_t *node_to_cpumask_map;
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
||||
|
||||
@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number);
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
extern int cpu_to_node(int cpu);
|
||||
extern int early_cpu_to_node(int cpu);
|
||||
extern const cpumask_t *cpumask_of_node(int node);
|
||||
extern cpumask_t node_to_cpumask(int node);
|
||||
|
||||
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
@ -108,34 +81,25 @@ static inline int early_cpu_to_node(int cpu)
|
||||
return early_per_cpu(x86_cpu_to_node_map, cpu);
|
||||
}
|
||||
|
||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
extern const struct cpumask *cpumask_of_node(int node);
|
||||
#else
|
||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
#endif
|
||||
|
||||
extern void setup_node_to_cpumask_map(void);
|
||||
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = cpumask_of_node(node)
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
* Returns the number of the node containing Node 'node'. This
|
||||
* architecture is flat, so it is a pretty simple function!
|
||||
@ -143,7 +107,6 @@ extern void setup_node_to_cpumask_map(void);
|
||||
#define parent_node(node) (node)
|
||||
|
||||
#define pcibus_to_node(bus) __pcibus_to_node(bus)
|
||||
#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern unsigned long node_start_pfn[];
|
||||
@ -209,30 +172,17 @@ static inline int early_cpu_to_node(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &cpu_online_map;
|
||||
}
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
}
|
||||
static inline int node_to_first_cpu(int node)
|
||||
{
|
||||
return first_cpu(cpu_online_map);
|
||||
return cpumask_first(cpu_online_mask);
|
||||
}
|
||||
|
||||
static inline void setup_node_to_cpumask_map(void) { }
|
||||
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = cpumask_of_node(node)
|
||||
#endif
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
@ -245,16 +195,13 @@ static inline int node_to_first_cpu(int node)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern cpumask_t cpu_coregroup_map(int cpu);
|
||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
|
||||
/* indicates that pointers to the topology cpumask_t maps are valid */
|
||||
#define arch_provides_topology_pointers yes
|
||||
@ -268,7 +215,7 @@ struct pci_bus;
|
||||
void set_pci_bus_resources_arch_default(struct pci_bus *b);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
|
||||
#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
|
||||
#define smt_capable() (smp_num_siblings > 1)
|
||||
#endif
|
||||
|
||||
|
@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const cpumask_t *bigsmp_target_cpus(void)
|
||||
static const struct cpumask *bigsmp_target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return &cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return &cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
|
||||
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
|
||||
}
|
||||
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
|
||||
{ } /* NULL entry stops DMI scanning */
|
||||
};
|
||||
|
||||
static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpus_clear(*retmask);
|
||||
cpu_set(cpu, *retmask);
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int probe_bigsmp(void)
|
||||
|
@ -410,7 +410,7 @@ static void es7000_enable_apic_mode(void)
|
||||
WARN(1, "Command failed, status = %x\n", mip_status);
|
||||
}
|
||||
|
||||
static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -420,7 +420,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
|
||||
@ -455,14 +456,14 @@ static int es7000_apic_id_registered(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const cpumask_t *target_cpus_cluster(void)
|
||||
static const struct cpumask *target_cpus_cluster(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
return cpu_all_mask;
|
||||
}
|
||||
|
||||
static const cpumask_t *es7000_target_cpus(void)
|
||||
static const struct cpumask *es7000_target_cpus(void)
|
||||
{
|
||||
return &cpumask_of_cpu(smp_processor_id());
|
||||
return cpumask_of(smp_processor_id());
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
@ -517,7 +518,7 @@ static void es7000_setup_apic_routing(void)
|
||||
"Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
(apic_version[apic] == 0x14) ?
|
||||
"Physical Cluster" : "Logical Cluster",
|
||||
nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
|
||||
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
|
||||
}
|
||||
|
||||
static int es7000_apicid_to_node(int logical_apicid)
|
||||
@ -572,7 +573,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
unsigned int round = 0;
|
||||
int cpu, uninitialized_var(apicid);
|
||||
|
@ -39,7 +39,7 @@
|
||||
int unknown_nmi_panic;
|
||||
int nmi_watchdog_enabled;
|
||||
|
||||
static cpumask_t backtrace_mask = CPU_MASK_NONE;
|
||||
static cpumask_var_t backtrace_mask;
|
||||
|
||||
/* nmi_active:
|
||||
* >0: the lapic NMI watchdog is active, but can be disabled
|
||||
@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void)
|
||||
if (!prev_nmi_count)
|
||||
goto error;
|
||||
|
||||
alloc_cpumask_var(&backtrace_mask, GFP_KERNEL);
|
||||
printk(KERN_INFO "Testing NMI watchdog ... ");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||
touched = 1;
|
||||
}
|
||||
|
||||
if (cpu_isset(cpu, backtrace_mask)) {
|
||||
if (cpumask_test_cpu(cpu, backtrace_mask)) {
|
||||
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
|
||||
|
||||
spin_lock(&lock);
|
||||
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
||||
dump_stack();
|
||||
spin_unlock(&lock);
|
||||
cpu_clear(cpu, backtrace_mask);
|
||||
cpumask_clear_cpu(cpu, backtrace_mask);
|
||||
}
|
||||
|
||||
/* Could check oops_in_progress here too, but it's safer not to */
|
||||
@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
backtrace_mask = cpu_online_map;
|
||||
cpumask_copy(backtrace_mask, cpu_online_mask);
|
||||
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
||||
for (i = 0; i < 10 * 1000; i++) {
|
||||
if (cpus_empty(backtrace_mask))
|
||||
if (cpumask_empty(backtrace_mask))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
|
@ -334,9 +334,9 @@ static inline void numaq_smp_callin_clear_local_apic(void)
|
||||
clear_local_APIC();
|
||||
}
|
||||
|
||||
static inline const cpumask_t *numaq_target_cpus(void)
|
||||
static inline const struct cpumask *numaq_target_cpus(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
return cpu_all_mask;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
@ -427,7 +427,7 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
* We use physical apicids here, not logical, so just return the default
|
||||
* physical broadcast to stop people from breaking us
|
||||
*/
|
||||
static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return 0x0F;
|
||||
}
|
||||
@ -462,7 +462,7 @@ static int probe_numaq(void)
|
||||
return found_numaq;
|
||||
}
|
||||
|
||||
static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -472,7 +472,8 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
static void numaq_setup_portio_remap(void)
|
||||
|
@ -83,7 +83,8 @@ static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
/* should be called last. */
|
||||
|
@ -53,23 +53,19 @@ static unsigned summit_get_apic_id(unsigned long x)
|
||||
return (x >> 24) & 0xFF;
|
||||
}
|
||||
|
||||
static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
|
||||
static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
default_send_IPI_mask_sequence_logical(mask, vector);
|
||||
}
|
||||
|
||||
static void summit_send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
summit_send_IPI_mask(&mask, vector);
|
||||
default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static void summit_send_IPI_all(int vector)
|
||||
{
|
||||
summit_send_IPI_mask(&cpu_online_map, vector);
|
||||
summit_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#include <asm/tsc.h>
|
||||
@ -186,13 +182,13 @@ static inline int is_WPEG(struct rio_detail *rio){
|
||||
|
||||
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static const cpumask_t *summit_target_cpus(void)
|
||||
static const struct cpumask *summit_target_cpus(void)
|
||||
{
|
||||
/* CPU_MASK_ALL (0xff) has undefined behaviour with
|
||||
* dest_LowestPrio mode logical clustered apic interrupt routing
|
||||
* Just start on cpu 0. IRQ balancing will spread load
|
||||
*/
|
||||
return &cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
}
|
||||
|
||||
static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
@ -289,7 +285,7 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
unsigned int round = 0;
|
||||
int cpu, apicid = 0;
|
||||
@ -346,7 +342,7 @@ static int probe_summit(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -356,7 +352,8 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_SUMMIT_NUMA
|
||||
|
@ -41,8 +41,6 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/* all of these masks are initialized in setup_cpu_local_masks() */
|
||||
cpumask_var_t cpu_callin_mask;
|
||||
cpumask_var_t cpu_callout_mask;
|
||||
@ -60,16 +58,6 @@ void __init setup_cpu_local_masks(void)
|
||||
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
cpumask_t cpu_callin_map;
|
||||
cpumask_t cpu_callout_map;
|
||||
cpumask_t cpu_initialized;
|
||||
cpumask_t cpu_sibling_setup_map;
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
|
||||
static struct cpu_dev *this_cpu __cpuinitdata;
|
||||
|
||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
|
@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
||||
unsigned int i;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
|
||||
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
|
||||
#endif
|
||||
|
||||
/* Errata workaround */
|
||||
|
@ -56,7 +56,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
|
||||
static int cpu_family = CPU_OPTERON;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
|
||||
static inline const struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Return a frequency in MHz, given an input fid */
|
||||
@ -654,7 +657,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
|
||||
|
||||
dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
|
||||
data->powernow_table = powernow_table;
|
||||
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
|
||||
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
|
||||
for (j = 0; j < data->numps; j++)
|
||||
@ -808,7 +811,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
|
||||
/* fill in data */
|
||||
data->numps = data->acpi_data.state_count;
|
||||
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
|
||||
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
powernow_k8_acpi_pst_values(data, 0);
|
||||
|
||||
@ -1224,7 +1227,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
|
||||
else
|
||||
cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
|
||||
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
|
||||
data->available_cores = pol->cpus;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
@ -1286,7 +1289,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
|
||||
unsigned int khz = 0;
|
||||
unsigned int first;
|
||||
|
||||
first = first_cpu(per_cpu(cpu_core_map, cpu));
|
||||
first = cpumask_first(cpu_core_mask(cpu));
|
||||
data = per_cpu(powernow_data, first);
|
||||
|
||||
if (!data)
|
||||
|
@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
/* only run on CPU to be set, or on its sibling */
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
|
||||
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
|
||||
#endif
|
||||
|
||||
cpus_allowed = current->cpus_allowed;
|
||||
|
@ -990,7 +990,7 @@ static struct sysdev_attribute *mce_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static cpumask_t mce_device_initialized = CPU_MASK_NONE;
|
||||
static cpumask_var_t mce_device_initialized;
|
||||
|
||||
/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
|
||||
static __cpuinit int mce_create_device(unsigned int cpu)
|
||||
@ -1021,7 +1021,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
|
||||
if (err)
|
||||
goto error2;
|
||||
}
|
||||
cpu_set(cpu, mce_device_initialized);
|
||||
cpumask_set_cpu(cpu, mce_device_initialized);
|
||||
|
||||
return 0;
|
||||
error2:
|
||||
@ -1043,7 +1043,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!cpu_isset(cpu, mce_device_initialized))
|
||||
if (!cpumask_test_cpu(cpu, mce_device_initialized))
|
||||
return;
|
||||
|
||||
for (i = 0; mce_attributes[i]; i++)
|
||||
@ -1053,7 +1053,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
|
||||
sysdev_remove_file(&per_cpu(device_mce, cpu),
|
||||
&bank_attrs[i]);
|
||||
sysdev_unregister(&per_cpu(device_mce,cpu));
|
||||
cpu_clear(cpu, mce_device_initialized);
|
||||
cpumask_clear_cpu(cpu, mce_device_initialized);
|
||||
}
|
||||
|
||||
/* Make sure there are no machine checks on offlined CPUs. */
|
||||
@ -1162,6 +1162,8 @@ static __init int mce_init_device(void)
|
||||
if (!mce_available(&boot_cpu_data))
|
||||
return -EIO;
|
||||
|
||||
alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
|
||||
|
||||
err = mce_init_banks();
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -477,7 +477,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
|
||||
i = cpumask_first(&per_cpu(cpu_core_map, cpu));
|
||||
i = cpumask_first(cpu_core_mask(cpu));
|
||||
|
||||
/* first core not up yet */
|
||||
if (cpu_data(i).cpu_core_id)
|
||||
@ -497,7 +497,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
|
||||
cpumask_copy(b->cpus, cpu_core_mask(cpu));
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
goto out;
|
||||
}
|
||||
@ -521,7 +521,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
#ifndef CONFIG_SMP
|
||||
cpumask_setall(b->cpus);
|
||||
#else
|
||||
cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
|
||||
cpumask_copy(b->cpus, cpu_core_mask(cpu));
|
||||
#endif
|
||||
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
@ -249,7 +249,7 @@ void cmci_rediscover(int dying)
|
||||
for_each_online_cpu (cpu) {
|
||||
if (cpu == dying)
|
||||
continue;
|
||||
if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
|
||||
if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
|
||||
continue;
|
||||
/* Recheck banks in case CPUs don't all have the same */
|
||||
if (cmci_supported(&banks))
|
||||
|
@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
||||
if (c->x86_max_cores * smp_num_siblings > 1) {
|
||||
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||
seq_printf(m, "siblings\t: %d\n",
|
||||
cpus_weight(per_cpu(cpu_core_map, cpu)));
|
||||
cpumask_weight(cpu_sibling_mask(cpu)));
|
||||
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
||||
seq_printf(m, "apicid\t\t: %d\n", c->apicid);
|
||||
@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
||||
*pos = first_cpu(cpu_online_map);
|
||||
*pos = cpumask_first(cpu_online_mask);
|
||||
else
|
||||
*pos = next_cpu_nr(*pos - 1, cpu_online_map);
|
||||
*pos = cpumask_next(*pos - 1, cpu_online_mask);
|
||||
if ((*pos) < nr_cpu_ids)
|
||||
return &cpu_data(*pos);
|
||||
return NULL;
|
||||
|
@ -324,7 +324,7 @@ void stop_this_cpu(void *dummy)
|
||||
/*
|
||||
* Remove this CPU:
|
||||
*/
|
||||
cpu_clear(smp_processor_id(), cpu_online_map);
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
disable_local_APIC();
|
||||
|
||||
for (;;) {
|
||||
@ -474,12 +474,12 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static cpumask_t c1e_mask = CPU_MASK_NONE;
|
||||
static cpumask_var_t c1e_mask;
|
||||
static int c1e_detected;
|
||||
|
||||
void c1e_remove_cpu(int cpu)
|
||||
{
|
||||
cpu_clear(cpu, c1e_mask);
|
||||
cpumask_clear_cpu(cpu, c1e_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -508,8 +508,8 @@ static void c1e_idle(void)
|
||||
if (c1e_detected) {
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (!cpu_isset(cpu, c1e_mask)) {
|
||||
cpu_set(cpu, c1e_mask);
|
||||
if (!cpumask_test_cpu(cpu, c1e_mask)) {
|
||||
cpumask_set_cpu(cpu, c1e_mask);
|
||||
/*
|
||||
* Force broadcast so ACPI can not interfere. Needs
|
||||
* to run with interrupts enabled as it uses
|
||||
@ -556,6 +556,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
|
||||
pm_idle = mwait_idle;
|
||||
} else if (check_c1e_idle(c)) {
|
||||
printk(KERN_INFO "using C1E aware idle routine\n");
|
||||
alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
|
||||
cpumask_clear(c1e_mask);
|
||||
pm_idle = c1e_idle;
|
||||
} else
|
||||
pm_idle = default_idle;
|
||||
|
@ -101,11 +101,11 @@ EXPORT_SYMBOL(smp_num_siblings);
|
||||
DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
|
||||
|
||||
/* representing HT siblings of each logical CPU */
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
||||
|
||||
/* representing HT and core siblings of each logical CPU */
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
|
||||
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
||||
|
||||
/* Per CPU bogomips and other parameters */
|
||||
@ -115,11 +115,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
atomic_t init_deasserted;
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
|
||||
|
||||
/* which logical CPUs are on which nodes */
|
||||
cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
|
||||
{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
|
||||
EXPORT_SYMBOL(node_to_cpumask_map);
|
||||
/* which node each logical CPU is on */
|
||||
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
|
||||
EXPORT_SYMBOL(cpu_to_node_map);
|
||||
@ -128,7 +123,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
|
||||
static void map_cpu_to_node(int cpu, int node)
|
||||
{
|
||||
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
|
||||
cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
|
||||
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
|
||||
cpu_to_node_map[cpu] = node;
|
||||
}
|
||||
|
||||
@ -139,7 +134,7 @@ static void unmap_cpu_to_node(int cpu)
|
||||
|
||||
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
|
||||
for (node = 0; node < MAX_NUMNODES; node++)
|
||||
cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
|
||||
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
|
||||
cpu_to_node_map[cpu] = 0;
|
||||
}
|
||||
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
|
||||
@ -301,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused)
|
||||
__flush_tlb_all();
|
||||
#endif
|
||||
|
||||
/* This must be done before setting cpu_online_map */
|
||||
/* This must be done before setting cpu_online_mask */
|
||||
set_cpu_sibling_map(raw_smp_processor_id());
|
||||
wmb();
|
||||
|
||||
@ -334,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
|
||||
cpu_idle();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* In this case, llc_shared_map is a pointer to a cpumask. */
|
||||
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
||||
const struct cpuinfo_x86 *src)
|
||||
{
|
||||
struct cpumask *llc = dst->llc_shared_map;
|
||||
*dst = *src;
|
||||
dst->llc_shared_map = llc;
|
||||
}
|
||||
#else
|
||||
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
||||
const struct cpuinfo_x86 *src)
|
||||
{
|
||||
*dst = *src;
|
||||
}
|
||||
#endif /* CONFIG_CPUMASK_OFFSTACK */
|
||||
|
||||
/*
|
||||
* The bootstrap kernel entry code has set these up. Save them for
|
||||
* a given CPU
|
||||
@ -343,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(id);
|
||||
|
||||
*c = boot_cpu_data;
|
||||
copy_cpuinfo_x86(c, &boot_cpu_data);
|
||||
c->cpu_index = id;
|
||||
if (id != 0)
|
||||
identify_secondary_cpu(c);
|
||||
@ -367,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
cpumask_set_cpu(cpu, cpu_sibling_mask(i));
|
||||
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
||||
cpumask_set_cpu(cpu, cpu_core_mask(i));
|
||||
cpumask_set_cpu(i, &c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, &o->llc_shared_map);
|
||||
cpumask_set_cpu(i, c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, o->llc_shared_map);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, c->llc_shared_map);
|
||||
|
||||
if (current_cpu_data.x86_max_cores == 1) {
|
||||
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
|
||||
@ -386,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
||||
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
||||
cpumask_set_cpu(i, &c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
|
||||
cpumask_set_cpu(i, c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
|
||||
}
|
||||
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
||||
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
||||
@ -425,12 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
if (sched_mc_power_savings || sched_smt_power_savings)
|
||||
return cpu_core_mask(cpu);
|
||||
else
|
||||
return &c->llc_shared_map;
|
||||
}
|
||||
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
{
|
||||
return *cpu_coregroup_mask(cpu);
|
||||
return c->llc_shared_map;
|
||||
}
|
||||
|
||||
static void impress_friends(void)
|
||||
@ -897,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
|
||||
*/
|
||||
static __init void disable_smp(void)
|
||||
{
|
||||
/* use the read/write pointers to the present and possible maps */
|
||||
cpumask_copy(&cpu_present_map, cpumask_of(0));
|
||||
cpumask_copy(&cpu_possible_map, cpumask_of(0));
|
||||
init_cpu_present(cpumask_of(0));
|
||||
init_cpu_possible(cpumask_of(0));
|
||||
smpboot_clear_io_apic_irqs();
|
||||
|
||||
if (smp_found_config)
|
||||
@ -1031,6 +1037,8 @@ static void __init smp_cpu_index_default(void)
|
||||
*/
|
||||
void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
preempt_disable();
|
||||
smp_cpu_index_default();
|
||||
current_cpu_data = boot_cpu_data;
|
||||
@ -1044,6 +1052,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
||||
boot_cpu_logical_apicid = logical_smp_processor_id();
|
||||
#endif
|
||||
current_thread_info()->cpu = 0; /* needed? */
|
||||
for_each_possible_cpu(i) {
|
||||
alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
||||
alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
||||
alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
||||
cpumask_clear(per_cpu(cpu_core_map, i));
|
||||
cpumask_clear(per_cpu(cpu_sibling_map, i));
|
||||
cpumask_clear(cpu_data(i).llc_shared_map);
|
||||
}
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
enable_IR_x2apic();
|
||||
@ -1132,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus);
|
||||
|
||||
|
||||
/*
|
||||
* cpu_possible_map should be static, it cannot change as cpu's
|
||||
* cpu_possible_mask should be static, it cannot change as cpu's
|
||||
* are onlined, or offlined. The reason is per-cpu data-structures
|
||||
* are allocated by some modules at init time, and dont expect to
|
||||
* do this dynamically on cpu arrival/departure.
|
||||
* cpu_present_map on the other hand can change dynamically.
|
||||
* cpu_present_mask on the other hand can change dynamically.
|
||||
* In case when cpu_hotplug is not compiled, then we resort to current
|
||||
* behaviour, which is cpu_possible == cpu_present.
|
||||
* - Ashok Raj
|
||||
|
@ -275,6 +275,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
|
||||
|
||||
/**
|
||||
* uv_flush_tlb_others - globally purge translation cache of a virtual
|
||||
* address or all TLB's
|
||||
@ -304,8 +306,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
||||
struct mm_struct *mm,
|
||||
unsigned long va, unsigned int cpu)
|
||||
{
|
||||
static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
|
||||
struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
|
||||
struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
|
||||
int i;
|
||||
int bit;
|
||||
int blade;
|
||||
@ -755,6 +756,10 @@ static int __init uv_bau_init(void)
|
||||
if (!is_uv_system())
|
||||
return 0;
|
||||
|
||||
for_each_possible_cpu(cur_cpu)
|
||||
alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
|
||||
GFP_KERNEL, cpu_to_node(cur_cpu));
|
||||
|
||||
uv_bau_retry_limit = 1;
|
||||
uv_nshift = uv_hub_info->n_val;
|
||||
uv_mmask = (1UL << uv_hub_info->n_val) - 1;
|
||||
|
@ -14,7 +14,7 @@ obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
|
||||
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
|
||||
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
|
||||
|
||||
obj-$(CONFIG_NUMA) += numa_$(BITS).o
|
||||
obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
|
||||
obj-$(CONFIG_K8_NUMA) += k8topology_64.o
|
||||
obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
|
||||
|
||||
|
67
arch/x86/mm/numa.c
Normal file
67
arch/x86/mm/numa.c
Normal file
@ -0,0 +1,67 @@
|
||||
/* Common code for 32 and 64-bit NUMA */
|
||||
#include <linux/topology.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
# define DBG(x...) printk(KERN_DEBUG x)
|
||||
#else
|
||||
# define DBG(x...)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Which logical CPUs are on which nodes
|
||||
*/
|
||||
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
EXPORT_SYMBOL(node_to_cpumask_map);
|
||||
|
||||
/*
|
||||
* Allocate node_to_cpumask_map based on number of available nodes
|
||||
* Requires node_possible_map to be valid.
|
||||
*
|
||||
* Note: node_to_cpumask() is not valid until after this is done.
|
||||
* (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
|
||||
*/
|
||||
void __init setup_node_to_cpumask_map(void)
|
||||
{
|
||||
unsigned int node, num = 0;
|
||||
|
||||
/* setup nr_node_ids if not done yet */
|
||||
if (nr_node_ids == MAX_NUMNODES) {
|
||||
for_each_node_mask(node, node_possible_map)
|
||||
num = node;
|
||||
nr_node_ids = num + 1;
|
||||
}
|
||||
|
||||
/* allocate the map */
|
||||
for (node = 0; node < nr_node_ids; node++)
|
||||
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
|
||||
|
||||
/* cpumask_of_node() will now work */
|
||||
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
/*
|
||||
* Returns a pointer to the bitmask of CPUs on Node 'node'.
|
||||
*/
|
||||
const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
if (node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return cpu_none_mask;
|
||||
}
|
||||
if (node_to_cpumask_map[node] == NULL) {
|
||||
printk(KERN_WARNING
|
||||
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
|
||||
node);
|
||||
dump_stack();
|
||||
return cpu_online_mask;
|
||||
}
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_of_node);
|
||||
#endif
|
@ -20,12 +20,6 @@
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/k8.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
# define DBG(x...) printk(KERN_DEBUG x)
|
||||
#else
|
||||
# define DBG(x...)
|
||||
#endif
|
||||
|
||||
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
|
||||
EXPORT_SYMBOL(node_data);
|
||||
|
||||
@ -48,12 +42,6 @@ EXPORT_PER_CPU_SYMBOL(node_number);
|
||||
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
|
||||
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
|
||||
|
||||
/*
|
||||
* Which logical CPUs are on which nodes
|
||||
*/
|
||||
cpumask_t *node_to_cpumask_map;
|
||||
EXPORT_SYMBOL(node_to_cpumask_map);
|
||||
|
||||
/*
|
||||
* Given a shift value, try to populate memnodemap[]
|
||||
* Returns :
|
||||
@ -661,36 +649,6 @@ void __init init_cpu_to_node(void)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Allocate node_to_cpumask_map based on number of available nodes
|
||||
* Requires node_possible_map to be valid.
|
||||
*
|
||||
* Note: node_to_cpumask() is not valid until after this is done.
|
||||
* (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
|
||||
*/
|
||||
void __init setup_node_to_cpumask_map(void)
|
||||
{
|
||||
unsigned int node, num = 0;
|
||||
cpumask_t *map;
|
||||
|
||||
/* setup nr_node_ids if not done yet */
|
||||
if (nr_node_ids == MAX_NUMNODES) {
|
||||
for_each_node_mask(node, node_possible_map)
|
||||
num = node;
|
||||
nr_node_ids = num + 1;
|
||||
}
|
||||
|
||||
/* allocate the map */
|
||||
map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
|
||||
DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
|
||||
|
||||
pr_debug("Node to cpumask map at %p for %d nodes\n",
|
||||
map, nr_node_ids);
|
||||
|
||||
/* node_to_cpumask() will now work */
|
||||
node_to_cpumask_map = map;
|
||||
}
|
||||
|
||||
void __cpuinit numa_set_node(int cpu, int node)
|
||||
{
|
||||
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
|
||||
@ -723,12 +681,12 @@ void __cpuinit numa_clear_node(int cpu)
|
||||
|
||||
void __cpuinit numa_add_cpu(int cpu)
|
||||
{
|
||||
cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
|
||||
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
|
||||
}
|
||||
|
||||
void __cpuinit numa_remove_cpu(int cpu)
|
||||
{
|
||||
cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
|
||||
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
|
||||
}
|
||||
|
||||
#else /* CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
@ -739,20 +697,20 @@ void __cpuinit numa_remove_cpu(int cpu)
|
||||
static void __cpuinit numa_set_cpumask(int cpu, int enable)
|
||||
{
|
||||
int node = early_cpu_to_node(cpu);
|
||||
cpumask_t *mask;
|
||||
struct cpumask *mask;
|
||||
char buf[64];
|
||||
|
||||
if (node_to_cpumask_map == NULL) {
|
||||
printk(KERN_ERR "node_to_cpumask_map NULL\n");
|
||||
mask = node_to_cpumask_map[node];
|
||||
if (mask == NULL) {
|
||||
printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
|
||||
mask = &node_to_cpumask_map[node];
|
||||
if (enable)
|
||||
cpu_set(cpu, *mask);
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
else
|
||||
cpu_clear(cpu, *mask);
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
|
||||
cpulist_scnprintf(buf, sizeof(buf), mask);
|
||||
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
|
||||
@ -799,59 +757,6 @@ int early_cpu_to_node(int cpu)
|
||||
return per_cpu(x86_cpu_to_node_map, cpu);
|
||||
}
|
||||
|
||||
|
||||
/* empty cpumask */
|
||||
static const cpumask_t cpu_mask_none;
|
||||
|
||||
/*
|
||||
* Returns a pointer to the bitmask of CPUs on Node 'node'.
|
||||
*/
|
||||
const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
if (node_to_cpumask_map == NULL) {
|
||||
printk(KERN_WARNING
|
||||
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
|
||||
node);
|
||||
dump_stack();
|
||||
return (const cpumask_t *)&cpu_online_map;
|
||||
}
|
||||
if (node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return &cpu_mask_none;
|
||||
}
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_of_node);
|
||||
|
||||
/*
|
||||
* Returns a bitmask of CPUs on Node 'node'.
|
||||
*
|
||||
* Side note: this function creates the returned cpumask on the stack
|
||||
* so with a high NR_CPUS count, excessive stack space is used. The
|
||||
* node_to_cpumask_ptr function should be used whenever possible.
|
||||
*/
|
||||
cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
if (node_to_cpumask_map == NULL) {
|
||||
printk(KERN_WARNING
|
||||
"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
|
||||
dump_stack();
|
||||
return cpu_online_map;
|
||||
}
|
||||
if (node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return cpu_mask_none;
|
||||
}
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
EXPORT_SYMBOL(node_to_cpumask);
|
||||
|
||||
/*
|
||||
* --------- end of debug versions of the numa functions ---------
|
||||
*/
|
||||
|
@ -380,7 +380,7 @@ static unsigned int get_stagger(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu = smp_processor_id();
|
||||
return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
|
||||
return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
||||
if (rc >= 0) {
|
||||
num_processors++;
|
||||
cpu_set(i, cpu_possible_map);
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
||||
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
|
||||
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
|
||||
continue;
|
||||
cpu_clear(cpu, cpu_possible_map);
|
||||
set_cpu_possible(cpu, false);
|
||||
}
|
||||
|
||||
for_each_possible_cpu (cpu) {
|
||||
@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
||||
if (IS_ERR(idle))
|
||||
panic("failed fork for CPU %d", cpu);
|
||||
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ static inline int blk_cpu_to_group(int cpu)
|
||||
const struct cpumask *mask = cpu_coregroup_mask(cpu);
|
||||
return cpumask_first(mask);
|
||||
#elif defined(CONFIG_SCHED_SMT)
|
||||
return first_cpu(per_cpu(cpu_sibling_map, cpu));
|
||||
return cpumask_first(topology_thread_cpumask(cpu));
|
||||
#else
|
||||
return cpu;
|
||||
#endif
|
||||
|
@ -24,7 +24,7 @@ static struct sysdev_class node_class = {
|
||||
static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
|
||||
{
|
||||
struct node *node_dev = to_node(dev);
|
||||
node_to_cpumask_ptr(mask, node_dev->sysdev.id);
|
||||
const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
|
||||
int len;
|
||||
|
||||
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
|
||||
|
@ -212,10 +212,9 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
|
||||
node = dev_to_node(&dev->dev);
|
||||
if (node >= 0) {
|
||||
int cpu;
|
||||
node_to_cpumask_ptr(nodecpumask, node);
|
||||
|
||||
get_online_cpus();
|
||||
cpu = cpumask_any_and(nodecpumask, cpu_online_mask);
|
||||
cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
error = work_on_cpu(cpu, local_pci_probe, &ddi);
|
||||
else
|
||||
|
@ -10,7 +10,7 @@ static void enable_hotplug_cpu(int cpu)
|
||||
if (!cpu_present(cpu))
|
||||
arch_register_cpu(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
}
|
||||
|
||||
static void disable_hotplug_cpu(int cpu)
|
||||
@ -18,7 +18,7 @@ static void disable_hotplug_cpu(int cpu)
|
||||
if (cpu_present(cpu))
|
||||
arch_unregister_cpu(cpu);
|
||||
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, false);
|
||||
}
|
||||
|
||||
static void vcpu_hotplug(unsigned int cpu)
|
||||
|
@ -38,11 +38,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef nr_cpus_node
|
||||
#define nr_cpus_node(node) \
|
||||
({ \
|
||||
node_to_cpumask_ptr(__tmp__, node); \
|
||||
cpus_weight(*__tmp__); \
|
||||
})
|
||||
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
||||
#endif
|
||||
|
||||
#define for_each_node_with_cpus(node) \
|
||||
|
@ -7249,7 +7249,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
|
||||
{
|
||||
int group;
|
||||
|
||||
cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
|
||||
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
|
||||
group = cpumask_first(mask);
|
||||
if (sg)
|
||||
*sg = &per_cpu(sched_group_core, group).sg;
|
||||
@ -7278,7 +7278,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
|
||||
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
|
||||
group = cpumask_first(mask);
|
||||
#elif defined(CONFIG_SCHED_SMT)
|
||||
cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
|
||||
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
|
||||
group = cpumask_first(mask);
|
||||
#else
|
||||
group = cpu;
|
||||
@ -7621,7 +7621,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
||||
SD_INIT(sd, SIBLING);
|
||||
set_domain_attribute(sd, attr);
|
||||
cpumask_and(sched_domain_span(sd),
|
||||
&per_cpu(cpu_sibling_map, i), cpu_map);
|
||||
topology_thread_cpumask(i), cpu_map);
|
||||
sd->parent = p;
|
||||
p->child = sd;
|
||||
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
|
||||
@ -7632,7 +7632,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
||||
/* Set up CPU (sibling) groups */
|
||||
for_each_cpu(i, cpu_map) {
|
||||
cpumask_and(this_sibling_map,
|
||||
&per_cpu(cpu_sibling_map, i), cpu_map);
|
||||
topology_thread_cpumask(i), cpu_map);
|
||||
if (i != cpumask_first(this_sibling_map))
|
||||
continue;
|
||||
|
||||
|
@ -2134,7 +2134,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
|
||||
int n, val;
|
||||
int min_val = INT_MAX;
|
||||
int best_node = -1;
|
||||
node_to_cpumask_ptr(tmp, 0);
|
||||
const struct cpumask *tmp = cpumask_of_node(0);
|
||||
|
||||
/* Use the local node if we haven't already */
|
||||
if (!node_isset(node, *used_node_mask)) {
|
||||
@ -2155,8 +2155,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
|
||||
val += (n < node);
|
||||
|
||||
/* Give preference to headless and unused nodes */
|
||||
node_to_cpumask_ptr_next(tmp, n);
|
||||
if (!cpus_empty(*tmp))
|
||||
tmp = cpumask_of_node(n);
|
||||
if (!cpumask_empty(tmp))
|
||||
val += PENALTY_FOR_NODE_WITH_CPUS;
|
||||
|
||||
/* Slight preference for less loaded node */
|
||||
|
@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages)
|
||||
int node = numa_node_id();
|
||||
struct zone *zones = NODE_DATA(node)->node_zones;
|
||||
int num_cpus_on_node;
|
||||
node_to_cpumask_ptr(cpumask_on_node, node);
|
||||
const struct cpumask *cpumask_on_node = cpumask_of_node(node);
|
||||
|
||||
node_free_pages =
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
|
@ -1160,7 +1160,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
||||
struct kmem_cache *cachep;
|
||||
struct kmem_list3 *l3 = NULL;
|
||||
int node = cpu_to_node(cpu);
|
||||
node_to_cpumask_ptr(mask, node);
|
||||
const struct cpumask *mask = cpumask_of_node(node);
|
||||
|
||||
list_for_each_entry(cachep, &cache_chain, next) {
|
||||
struct array_cache *nc;
|
||||
|
@ -1963,7 +1963,7 @@ static int kswapd(void *p)
|
||||
struct reclaim_state reclaim_state = {
|
||||
.reclaimed_slab = 0,
|
||||
};
|
||||
node_to_cpumask_ptr(cpumask, pgdat->node_id);
|
||||
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
|
||||
|
||||
if (!cpumask_empty(cpumask))
|
||||
set_cpus_allowed_ptr(tsk, cpumask);
|
||||
@ -2198,7 +2198,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
|
||||
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
|
||||
for_each_node_state(nid, N_HIGH_MEMORY) {
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
node_to_cpumask_ptr(mask, pgdat->node_id);
|
||||
const struct cpumask *mask;
|
||||
|
||||
mask = cpumask_of_node(pgdat->node_id);
|
||||
|
||||
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
|
||||
/* One of our CPUs online: restore mask */
|
||||
|
@ -317,8 +317,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
|
||||
}
|
||||
case SVC_POOL_PERNODE:
|
||||
{
|
||||
node_to_cpumask_ptr(nodecpumask, node);
|
||||
set_cpus_allowed_ptr(task, nodecpumask);
|
||||
set_cpus_allowed_ptr(task, cpumask_of_node(node));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user