2008-03-29 02:12:16 +07:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* SGI UV APIC functions (note: not an Intel compatible APIC)
|
|
|
|
*
|
2014-03-31 21:37:00 +07:00
|
|
|
* Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
|
2008-03-29 02:12:16 +07:00
|
|
|
*/
|
|
|
|
#include <linux/cpumask.h>
|
2009-02-26 20:10:10 +07:00
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/kernel.h>
|
2016-07-14 07:18:56 +07:00
|
|
|
#include <linux/export.h>
|
2008-03-29 02:12:16 +07:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/sched.h>
|
2008-10-25 05:24:29 +07:00
|
|
|
#include <linux/timer.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2009-02-26 20:10:10 +07:00
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/init.h>
|
2009-04-17 21:24:47 +07:00
|
|
|
#include <linux/io.h>
|
2010-02-03 05:38:14 +07:00
|
|
|
#include <linux/pci.h>
|
2010-02-26 23:49:12 +07:00
|
|
|
#include <linux/kdebug.h>
|
2011-03-25 21:20:14 +07:00
|
|
|
#include <linux/delay.h>
|
2011-03-31 21:32:02 +07:00
|
|
|
#include <linux/crash_dump.h>
|
2013-07-09 06:01:42 +07:00
|
|
|
#include <linux/reboot.h>
|
2009-02-26 20:10:10 +07:00
|
|
|
|
2008-03-29 02:12:16 +07:00
|
|
|
#include <asm/uv/uv_mmrs.h>
|
|
|
|
#include <asm/uv/uv_hub.h>
|
2009-02-26 20:10:10 +07:00
|
|
|
#include <asm/current.h>
|
|
|
|
#include <asm/pgtable.h>
|
2008-07-10 03:27:19 +07:00
|
|
|
#include <asm/uv/bios.h>
|
2009-02-26 20:10:10 +07:00
|
|
|
#include <asm/uv/uv.h>
|
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/ipi.h>
|
|
|
|
#include <asm/smp.h>
|
2009-11-20 03:23:41 +07:00
|
|
|
#include <asm/x86_init.h>
|
2011-05-09 23:35:19 +07:00
|
|
|
#include <asm/nmi.h>
|
|
|
|
|
2008-07-23 03:20:22 +07:00
|
|
|
DEFINE_PER_CPU(int, x2apic_extra_bits);
|
|
|
|
|
2010-02-03 05:38:14 +07:00
|
|
|
#define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
|
|
|
|
|
2008-07-22 12:08:21 +07:00
|
|
|
static enum uv_system_type uv_system_type;
|
2009-11-20 03:23:41 +07:00
|
|
|
static u64 gru_start_paddr, gru_end_paddr;
|
2013-05-29 22:56:09 +07:00
|
|
|
static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
|
|
|
|
static u64 gru_dist_lmask, gru_dist_umask;
|
2010-10-27 04:27:28 +07:00
|
|
|
static union uvh_apicid uvh_apicid;
|
2016-04-30 04:54:17 +07:00
|
|
|
|
|
|
|
/* info derived from CPUID */
|
|
|
|
static struct {
|
|
|
|
unsigned int apicid_shift;
|
|
|
|
unsigned int apicid_mask;
|
|
|
|
unsigned int socketid_shift; /* aka pnode_shift for UV1/2/3 */
|
|
|
|
unsigned int pnode_mask;
|
|
|
|
unsigned int gpa_shift;
|
|
|
|
} uv_cpuid;
|
|
|
|
|
2010-01-13 04:09:04 +07:00
|
|
|
int uv_min_hub_revision_id;
|
|
|
|
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
|
2010-11-17 05:23:52 +07:00
|
|
|
unsigned int uv_apicid_hibits;
|
|
|
|
EXPORT_SYMBOL_GPL(uv_apicid_hibits);
|
2009-11-20 03:23:41 +07:00
|
|
|
|
2011-05-21 07:51:20 +07:00
|
|
|
static struct apic apic_x2apic_uv_x;
|
2016-04-30 04:54:15 +07:00
|
|
|
static struct uv_hub_info_s uv_hub_info_node0;
|
2011-05-21 07:51:20 +07:00
|
|
|
|
2016-04-30 04:54:06 +07:00
|
|
|
/* Set this to use hardware error handler instead of kernel panic */
|
|
|
|
static int disable_uv_undefined_panic = 1;
|
|
|
|
unsigned long uv_undefined(char *str)
|
|
|
|
{
|
|
|
|
if (likely(!disable_uv_undefined_panic))
|
|
|
|
panic("UV: error: undefined MMR: %s\n", str);
|
|
|
|
else
|
|
|
|
pr_crit("UV: error: undefined MMR: %s\n", str);
|
|
|
|
return ~0ul; /* cause a machine fault */
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(uv_undefined);
|
|
|
|
|
2010-12-01 02:55:39 +07:00
|
|
|
static unsigned long __init uv_early_read_mmr(unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned long val, *mmr;
|
|
|
|
|
|
|
|
mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
|
|
|
|
val = *mmr;
|
|
|
|
early_iounmap(mmr, sizeof(*mmr));
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2009-11-24 05:46:07 +07:00
|
|
|
static inline bool is_GRU_range(u64 start, u64 end)
|
2009-11-20 03:23:41 +07:00
|
|
|
{
|
2013-05-29 22:56:09 +07:00
|
|
|
if (gru_dist_base) {
|
|
|
|
u64 su = start & gru_dist_umask; /* upper (incl pnode) bits */
|
|
|
|
u64 sl = start & gru_dist_lmask; /* base offset bits */
|
|
|
|
u64 eu = end & gru_dist_umask;
|
|
|
|
u64 el = end & gru_dist_lmask;
|
|
|
|
|
|
|
|
/* Must reside completely within a single GRU range */
|
|
|
|
return (sl == gru_dist_base && el == gru_dist_base &&
|
|
|
|
su >= gru_first_node_paddr &&
|
|
|
|
su <= gru_last_node_paddr &&
|
|
|
|
eu == su);
|
|
|
|
} else {
|
|
|
|
return start >= gru_start_paddr && end <= gru_end_paddr;
|
|
|
|
}
|
2009-11-20 03:23:41 +07:00
|
|
|
}
|
|
|
|
|
2009-11-24 05:46:07 +07:00
|
|
|
static bool uv_is_untracked_pat_range(u64 start, u64 end)
|
2009-11-20 03:23:41 +07:00
|
|
|
{
|
|
|
|
return is_ISA_range(start, end) || is_GRU_range(start, end);
|
|
|
|
}
|
2008-07-22 12:08:21 +07:00
|
|
|
|
2010-12-01 02:55:40 +07:00
|
|
|
static int __init early_get_pnodeid(void)
|
2009-04-17 21:24:47 +07:00
|
|
|
{
|
|
|
|
union uvh_node_id_u node_id;
|
2010-12-01 02:55:40 +07:00
|
|
|
union uvh_rh_gam_config_mmr_u m_n_config;
|
|
|
|
int pnode;
|
2010-01-13 04:09:04 +07:00
|
|
|
|
|
|
|
/* Currently, all blades have same revision number */
|
2010-12-01 02:55:39 +07:00
|
|
|
node_id.v = uv_early_read_mmr(UVH_NODE_ID);
|
2010-12-01 02:55:40 +07:00
|
|
|
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
|
2010-01-13 04:09:04 +07:00
|
|
|
uv_min_hub_revision_id = node_id.s.revision;
|
|
|
|
|
2013-02-12 02:45:12 +07:00
|
|
|
switch (node_id.s.part_number) {
|
|
|
|
case UV2_HUB_PART_NUMBER:
|
|
|
|
case UV2_HUB_PART_NUMBER_X:
|
2011-11-30 04:00:58 +07:00
|
|
|
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
|
2013-02-12 02:45:12 +07:00
|
|
|
break;
|
|
|
|
case UV3_HUB_PART_NUMBER:
|
|
|
|
case UV3_HUB_PART_NUMBER_X:
|
2013-10-14 23:17:34 +07:00
|
|
|
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
|
2013-02-12 02:45:12 +07:00
|
|
|
break;
|
2016-04-30 04:54:05 +07:00
|
|
|
case UV4_HUB_PART_NUMBER:
|
|
|
|
uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
|
|
|
|
break;
|
2013-02-12 02:45:12 +07:00
|
|
|
}
|
2011-05-12 00:50:28 +07:00
|
|
|
|
|
|
|
uv_hub_info->hub_revision = uv_min_hub_revision_id;
|
2016-04-30 04:54:17 +07:00
|
|
|
uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
|
|
|
|
pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
|
|
|
|
uv_cpuid.gpa_shift = 46; /* default unless changed */
|
|
|
|
|
|
|
|
pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
|
|
|
|
node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
|
|
|
|
m_n_config.s.n_skt, uv_cpuid.pnode_mask, pnode);
|
2010-12-01 02:55:40 +07:00
|
|
|
return pnode;
|
2009-04-17 21:24:47 +07:00
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:17 +07:00
|
|
|
/* [copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
|
|
|
|
#define SMT_LEVEL 0 /* leaf 0xb SMT level */
|
|
|
|
#define INVALID_TYPE 0 /* leaf 0xb sub-leaf types */
|
|
|
|
#define SMT_TYPE 1
|
|
|
|
#define CORE_TYPE 2
|
|
|
|
#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
|
|
|
|
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
|
|
|
|
|
|
|
|
static void set_x2apic_bits(void)
|
|
|
|
{
|
|
|
|
unsigned int eax, ebx, ecx, edx, sub_index;
|
|
|
|
unsigned int sid_shift;
|
|
|
|
|
|
|
|
cpuid(0, &eax, &ebx, &ecx, &edx);
|
|
|
|
if (eax < 0xb) {
|
|
|
|
pr_info("UV: CPU does not have CPUID.11\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
|
|
|
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) {
|
|
|
|
pr_info("UV: CPUID.11 not implemented\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
|
|
|
|
sub_index = 1;
|
|
|
|
do {
|
|
|
|
cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
|
|
|
|
if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
|
|
|
|
sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sub_index++;
|
|
|
|
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
|
|
|
uv_cpuid.apicid_shift = 0;
|
|
|
|
uv_cpuid.apicid_mask = (~(-1 << sid_shift));
|
|
|
|
uv_cpuid.socketid_shift = sid_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init early_get_apic_socketid_shift(void)
|
2010-10-27 04:27:28 +07:00
|
|
|
{
|
2016-04-30 04:54:17 +07:00
|
|
|
if (is_uv2_hub() || is_uv3_hub())
|
|
|
|
uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
|
|
|
|
|
|
|
|
set_x2apic_bits();
|
|
|
|
|
|
|
|
pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n",
|
|
|
|
uv_cpuid.apicid_shift, uv_cpuid.apicid_mask);
|
|
|
|
pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n",
|
|
|
|
uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
|
2010-10-27 04:27:28 +07:00
|
|
|
}
|
|
|
|
|
2010-11-17 05:23:52 +07:00
|
|
|
/*
|
|
|
|
* Add an extra bit as dictated by bios to the destination apicid of
|
|
|
|
* interrupts potentially passing through the UV HUB. This prevents
|
|
|
|
* a deadlock between interrupts and IO port operations.
|
|
|
|
*/
|
|
|
|
static void __init uv_set_apicid_hibit(void)
|
|
|
|
{
|
2011-05-12 00:50:28 +07:00
|
|
|
union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
|
2010-11-17 05:23:52 +07:00
|
|
|
|
2011-05-12 00:50:28 +07:00
|
|
|
if (is_uv1_hub()) {
|
|
|
|
apicid_mask.v =
|
|
|
|
uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
|
|
|
|
uv_apicid_hibits =
|
|
|
|
apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
|
|
|
|
}
|
2010-11-17 05:23:52 +07:00
|
|
|
}
|
|
|
|
|
x86: Annotate section mismatch warnings in kernel/apic/x2apic_uv_x.c
The function uv_acpi_madt_oem_check() has been marked __init,
the struct apic_x2apic_uv_x has been marked __refdata.
The aim is to address the following section mismatch messages:
WARNING: arch/x86/kernel/apic/built-in.o(.data+0x1368): Section mismatch in reference from the variable apic_x2apic_uv_x to the function .cpuinit.text:uv_wakeup_secondary()
The variable apic_x2apic_uv_x references
the function __cpuinit uv_wakeup_secondary()
If the reference is valid then annotate the
variable with __init* or __refdata (see linux/init.h) or name the variable:
*driver, *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console,
WARNING: arch/x86/kernel/built-in.o(.data+0x68e8): Section mismatch in reference from the variable apic_x2apic_uv_x to the function .cpuinit.text:uv_wakeup_secondary()
The variable apic_x2apic_uv_x references
the function __cpuinit uv_wakeup_secondary()
If the reference is valid then annotate the
variable with __init* or __refdata (see linux/init.h) or name the variable:
*driver, *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console,
WARNING: arch/x86/built-in.o(.text+0x7b36f): Section mismatch in reference from the function uv_acpi_madt_oem_check() to the function .init.text:early_ioremap()
The function uv_acpi_madt_oem_check() references
the function __init early_ioremap().
This is often because uv_acpi_madt_oem_check lacks a __init
annotation or the annotation of early_ioremap is wrong.
WARNING: arch/x86/built-in.o(.text+0x7b38d): Section mismatch in reference from the function uv_acpi_madt_oem_check() to the function .init.text:early_iounmap()
The function uv_acpi_madt_oem_check() references
the function __init early_iounmap().
This is often because uv_acpi_madt_oem_check lacks a __init
annotation or the annotation of early_iounmap is wrong.
WARNING: arch/x86/built-in.o(.data+0x8668): Section mismatch in reference from the variable apic_x2apic_uv_x to the function .cpuinit.text:uv_wakeup_secondary()
The variable apic_x2apic_uv_x references
the function __cpuinit uv_wakeup_secondary()
If the reference is valid then annotate the
variable with __init* or __refdata (see linux/init.h) or name the variable:
*driver, *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console,
Signed-off-by: Leonardo Potenza <lpotenza@inwind.it>
LKML-Reference: <200908161855.48302.lpotenza@inwind.it>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-16 23:55:48 +07:00
|
|
|
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
2008-07-22 12:08:21 +07:00
|
|
|
{
|
2015-04-10 01:26:30 +07:00
|
|
|
int pnodeid;
|
|
|
|
int uv_apic;
|
2010-01-16 01:09:09 +07:00
|
|
|
|
2015-04-10 01:26:29 +07:00
|
|
|
if (strncmp(oem_id, "SGI", 3) != 0)
|
|
|
|
return 0;
|
|
|
|
|
2016-04-30 04:54:15 +07:00
|
|
|
/* Setup early hub type field in uv_hub_info for Node 0 */
|
|
|
|
uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
|
|
|
|
|
2015-04-10 01:26:30 +07:00
|
|
|
/*
|
|
|
|
* Determine UV arch type.
|
|
|
|
* SGI: UV100/1000
|
|
|
|
* SGI2: UV2000/3000
|
|
|
|
* SGI3: UV300 (truncated to 4 chars because of different varieties)
|
2016-04-30 04:54:05 +07:00
|
|
|
* SGI4: UV400 (truncated to 4 chars because of different varieties)
|
2015-04-10 01:26:30 +07:00
|
|
|
*/
|
|
|
|
uv_hub_info->hub_revision =
|
2016-04-30 04:54:05 +07:00
|
|
|
!strncmp(oem_id, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
|
2015-04-10 01:26:30 +07:00
|
|
|
!strncmp(oem_id, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
|
|
|
|
!strcmp(oem_id, "SGI2") ? UV2_HUB_REVISION_BASE :
|
|
|
|
!strcmp(oem_id, "SGI") ? UV1_HUB_REVISION_BASE : 0;
|
|
|
|
|
|
|
|
if (uv_hub_info->hub_revision == 0)
|
|
|
|
goto badbios;
|
|
|
|
|
|
|
|
pnodeid = early_get_pnodeid();
|
2016-04-30 04:54:17 +07:00
|
|
|
early_get_apic_socketid_shift();
|
2015-04-10 01:26:30 +07:00
|
|
|
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
|
|
|
|
x86_platform.nmi_init = uv_nmi_init;
|
|
|
|
|
|
|
|
if (!strcmp(oem_table_id, "UVX")) { /* most common */
|
|
|
|
uv_system_type = UV_X2APIC;
|
|
|
|
uv_apic = 0;
|
|
|
|
|
|
|
|
} else if (!strcmp(oem_table_id, "UVH")) { /* only UV1 systems */
|
|
|
|
uv_system_type = UV_NON_UNIQUE_APIC;
|
|
|
|
__this_cpu_write(x2apic_extra_bits,
|
|
|
|
pnodeid << uvh_apicid.s.pnode_shift);
|
|
|
|
uv_set_apicid_hibit();
|
|
|
|
uv_apic = 1;
|
|
|
|
|
|
|
|
} else if (!strcmp(oem_table_id, "UVL")) { /* only used for */
|
|
|
|
uv_system_type = UV_LEGACY_APIC; /* very small systems */
|
|
|
|
uv_apic = 0;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
goto badbios;
|
2008-07-22 12:08:21 +07:00
|
|
|
}
|
2015-04-10 01:26:30 +07:00
|
|
|
|
|
|
|
pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n",
|
|
|
|
oem_id, oem_table_id, uv_system_type,
|
|
|
|
uv_min_hub_revision_id, uv_apic);
|
|
|
|
|
|
|
|
return uv_apic;
|
|
|
|
|
|
|
|
badbios:
|
|
|
|
pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
|
|
|
|
pr_err("Current BIOS not supported, update kernel and/or BIOS\n");
|
|
|
|
BUG();
|
2008-07-22 12:08:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
enum uv_system_type get_uv_system_type(void)
|
|
|
|
{
|
|
|
|
return uv_system_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
int is_uv_system(void)
|
|
|
|
{
|
|
|
|
return uv_system_type != UV_NONE;
|
|
|
|
}
|
2008-08-11 16:19:20 +07:00
|
|
|
EXPORT_SYMBOL_GPL(is_uv_system);
|
2008-07-22 12:08:21 +07:00
|
|
|
|
2016-04-30 04:54:15 +07:00
|
|
|
void **__uv_hub_info_list;
|
|
|
|
EXPORT_SYMBOL_GPL(__uv_hub_info_list);
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2016-04-30 04:54:12 +07:00
|
|
|
DEFINE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
|
|
|
|
EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_info);
|
|
|
|
|
2008-03-29 02:12:16 +07:00
|
|
|
short uv_possible_blades;
|
|
|
|
EXPORT_SYMBOL_GPL(uv_possible_blades);
|
|
|
|
|
2008-07-10 03:27:19 +07:00
|
|
|
unsigned long sn_rtc_cycles_per_second;
|
|
|
|
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
|
|
|
|
|
2016-04-30 04:54:19 +07:00
|
|
|
/* the following values are used for the per node hub info struct */
|
2016-04-30 04:54:16 +07:00
|
|
|
static __initdata unsigned short *_node_to_pnode;
|
2016-04-30 04:54:19 +07:00
|
|
|
static __initdata unsigned short _min_socket, _max_socket;
|
|
|
|
static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len;
|
|
|
|
static __initdata struct uv_gam_range_entry *uv_gre_table;
|
|
|
|
static __initdata struct uv_gam_parameters *uv_gp_table;
|
2016-04-30 04:54:20 +07:00
|
|
|
static __initdata unsigned short *_socket_to_node;
|
|
|
|
static __initdata unsigned short *_socket_to_pnode;
|
|
|
|
static __initdata unsigned short *_pnode_to_socket;
|
2016-04-30 04:54:21 +07:00
|
|
|
static __initdata struct uv_gam_range_s *_gr_table;
|
2016-04-30 04:54:19 +07:00
|
|
|
#define SOCK_EMPTY ((unsigned short)~0)
|
2016-04-30 04:54:16 +07:00
|
|
|
|
2016-04-30 04:54:15 +07:00
|
|
|
extern int uv_hub_info_version(void)
|
|
|
|
{
|
|
|
|
return UV_HUB_INFO_VERSION;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(uv_hub_info_version);
|
|
|
|
|
2016-04-30 04:54:21 +07:00
|
|
|
/* Build GAM range lookup table */
|
|
|
|
static __init void build_uv_gr_table(void)
|
|
|
|
{
|
|
|
|
struct uv_gam_range_entry *gre = uv_gre_table;
|
|
|
|
struct uv_gam_range_s *grt;
|
|
|
|
unsigned long last_limit = 0, ram_limit = 0;
|
|
|
|
int bytes, i, sid, lsid = -1;
|
|
|
|
|
|
|
|
if (!gre)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bytes = _gr_table_len * sizeof(struct uv_gam_range_s);
|
|
|
|
grt = kzalloc(bytes, GFP_KERNEL);
|
|
|
|
BUG_ON(!grt);
|
|
|
|
_gr_table = grt;
|
|
|
|
|
|
|
|
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
|
|
|
|
if (gre->type == UV_GAM_RANGE_TYPE_HOLE) {
|
|
|
|
if (!ram_limit) { /* mark hole between ram/non-ram */
|
|
|
|
ram_limit = last_limit;
|
|
|
|
last_limit = gre->limit;
|
|
|
|
lsid++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
last_limit = gre->limit;
|
|
|
|
pr_info("UV: extra hole in GAM RE table @%d\n",
|
|
|
|
(int)(gre - uv_gre_table));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (_max_socket < gre->sockid) {
|
|
|
|
pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n",
|
|
|
|
gre->sockid, _max_socket,
|
|
|
|
(int)(gre - uv_gre_table));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
sid = gre->sockid - _min_socket;
|
|
|
|
if (lsid < sid) { /* new range */
|
|
|
|
grt = &_gr_table[sid];
|
|
|
|
grt->base = lsid;
|
|
|
|
grt->nasid = gre->nasid;
|
|
|
|
grt->limit = last_limit = gre->limit;
|
|
|
|
lsid = sid;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (lsid == sid && !ram_limit) { /* update range */
|
|
|
|
if (grt->limit == last_limit) { /* .. if contiguous */
|
|
|
|
grt->limit = last_limit = gre->limit;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ram_limit) { /* non-contiguous ram range */
|
|
|
|
grt++;
|
|
|
|
grt->base = sid - 1;
|
|
|
|
grt->nasid = gre->nasid;
|
|
|
|
grt->limit = last_limit = gre->limit;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
grt++; /* non-contiguous/non-ram */
|
|
|
|
grt->base = grt - _gr_table; /* base is this entry */
|
|
|
|
grt->nasid = gre->nasid;
|
|
|
|
grt->limit = last_limit = gre->limit;
|
|
|
|
lsid++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* shorten table if possible */
|
|
|
|
grt++;
|
|
|
|
i = grt - _gr_table;
|
|
|
|
if (i < _gr_table_len) {
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
bytes = i * sizeof(struct uv_gam_range_s);
|
|
|
|
ret = krealloc(_gr_table, bytes, GFP_KERNEL);
|
|
|
|
if (ret) {
|
|
|
|
_gr_table = ret;
|
|
|
|
_gr_table_len = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* display resultant gam range table */
|
|
|
|
for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) {
|
|
|
|
int gb = grt->base;
|
|
|
|
unsigned long start = gb < 0 ? 0 :
|
|
|
|
(unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
|
|
|
|
unsigned long end =
|
|
|
|
(unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
|
|
|
|
|
|
|
|
pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n",
|
|
|
|
i, grt->nasid, start, end, gb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
2008-03-29 02:12:16 +07:00
|
|
|
{
|
|
|
|
unsigned long val;
|
2008-05-28 21:51:18 +07:00
|
|
|
int pnode;
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2008-05-28 21:51:18 +07:00
|
|
|
pnode = uv_apicid_to_pnode(phys_apicid);
|
2010-11-17 05:23:52 +07:00
|
|
|
phys_apicid |= uv_apicid_hibits;
|
2008-03-29 02:12:16 +07:00
|
|
|
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
|
|
|
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
2009-02-26 11:50:49 +07:00
|
|
|
((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
2008-04-16 23:45:15 +07:00
|
|
|
APIC_DM_INIT;
|
2008-05-28 21:51:18 +07:00
|
|
|
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
|
2008-04-16 23:45:15 +07:00
|
|
|
|
|
|
|
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
|
|
|
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
2009-02-26 11:50:49 +07:00
|
|
|
((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
2008-04-16 23:45:15 +07:00
|
|
|
APIC_DM_STARTUP;
|
2008-05-28 21:51:18 +07:00
|
|
|
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
|
2009-02-26 11:50:49 +07:00
|
|
|
|
2008-03-29 02:12:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uv_send_IPI_one(int cpu, int vector)
|
|
|
|
{
|
2009-04-03 06:59:03 +07:00
|
|
|
unsigned long apicid;
|
2008-05-28 21:51:18 +07:00
|
|
|
int pnode;
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2008-09-29 20:45:29 +07:00
|
|
|
apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
2008-05-28 21:51:18 +07:00
|
|
|
pnode = uv_apicid_to_pnode(apicid);
|
2009-04-03 06:59:03 +07:00
|
|
|
uv_hub_send_ipi(pnode, apicid, vector);
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
|
|
|
|
2008-12-17 08:33:59 +07:00
|
|
|
static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
|
2008-03-29 02:12:16 +07:00
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
|
2008-12-17 08:33:59 +07:00
|
|
|
for_each_cpu(cpu, mask)
|
2008-12-17 08:33:52 +07:00
|
|
|
uv_send_IPI_one(cpu, vector);
|
|
|
|
}
|
|
|
|
|
2008-12-17 08:33:59 +07:00
|
|
|
static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
2008-12-17 08:33:52 +07:00
|
|
|
{
|
|
|
|
unsigned int this_cpu = smp_processor_id();
|
2009-01-28 21:42:24 +07:00
|
|
|
unsigned int cpu;
|
2008-12-17 08:33:52 +07:00
|
|
|
|
2009-01-28 21:42:24 +07:00
|
|
|
for_each_cpu(cpu, mask) {
|
2008-12-17 08:33:52 +07:00
|
|
|
if (cpu != this_cpu)
|
2008-03-29 02:12:16 +07:00
|
|
|
uv_send_IPI_one(cpu, vector);
|
2009-01-28 21:42:24 +07:00
|
|
|
}
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void uv_send_IPI_allbutself(int vector)
|
|
|
|
{
|
2008-12-17 08:33:52 +07:00
|
|
|
unsigned int this_cpu = smp_processor_id();
|
2009-01-28 21:42:24 +07:00
|
|
|
unsigned int cpu;
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2009-01-28 21:42:24 +07:00
|
|
|
for_each_online_cpu(cpu) {
|
2008-12-17 08:33:52 +07:00
|
|
|
if (cpu != this_cpu)
|
|
|
|
uv_send_IPI_one(cpu, vector);
|
2009-01-28 21:42:24 +07:00
|
|
|
}
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void uv_send_IPI_all(int vector)
|
|
|
|
{
|
2008-12-17 08:33:59 +07:00
|
|
|
uv_send_IPI_mask(cpu_online_mask, vector);
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
|
|
|
|
2012-03-17 02:25:35 +07:00
|
|
|
static int uv_apic_id_valid(int apicid)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-03-29 02:12:16 +07:00
|
|
|
static int uv_apic_id_registered(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-07-12 03:11:55 +07:00
|
|
|
static void uv_init_apic_ldr(void)
|
2008-07-11 01:16:55 +07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-06-07 20:15:59 +07:00
|
|
|
static int
|
2009-01-28 21:20:18 +07:00
|
|
|
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
2012-06-07 20:15:59 +07:00
|
|
|
const struct cpumask *andmask,
|
|
|
|
unsigned int *apicid)
|
2008-12-17 08:33:54 +07:00
|
|
|
{
|
2012-06-14 14:49:55 +07:00
|
|
|
int unsigned cpu;
|
2008-12-17 08:33:54 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
|
|
|
* May as well be the first.
|
|
|
|
*/
|
2009-01-28 21:20:18 +07:00
|
|
|
for_each_cpu_and(cpu, cpumask, andmask) {
|
2008-12-18 06:21:39 +07:00
|
|
|
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
|
|
|
break;
|
2009-01-28 21:20:18 +07:00
|
|
|
}
|
2012-06-07 20:15:59 +07:00
|
|
|
|
2012-06-14 14:49:55 +07:00
|
|
|
if (likely(cpu < nr_cpu_ids)) {
|
2012-06-14 14:49:35 +07:00
|
|
|
*apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
|
|
|
|
return 0;
|
|
|
|
}
|
2012-06-14 14:49:55 +07:00
|
|
|
|
|
|
|
return -EINVAL;
|
2008-12-17 08:33:54 +07:00
|
|
|
}
|
|
|
|
|
2009-01-28 20:08:38 +07:00
|
|
|
static unsigned int x2apic_get_apic_id(unsigned long x)
|
2008-07-11 01:16:48 +07:00
|
|
|
{
|
|
|
|
unsigned int id;
|
|
|
|
|
|
|
|
WARN_ON(preemptible() && num_online_cpus() > 1);
|
2010-12-18 22:28:55 +07:00
|
|
|
id = x | __this_cpu_read(x2apic_extra_bits);
|
2008-07-11 01:16:48 +07:00
|
|
|
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
2008-07-22 12:08:21 +07:00
|
|
|
static unsigned long set_apic_id(unsigned int id)
|
2008-07-12 15:01:20 +07:00
|
|
|
{
|
|
|
|
unsigned long x;
|
|
|
|
|
|
|
|
/* maskout x2apic_extra_bits ? */
|
|
|
|
x = id;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int uv_read_apic_id(void)
|
|
|
|
{
|
2009-01-28 20:08:38 +07:00
|
|
|
return x2apic_get_apic_id(apic_read(APIC_ID));
|
2008-07-12 15:01:20 +07:00
|
|
|
}
|
|
|
|
|
2009-01-28 19:31:22 +07:00
|
|
|
static int uv_phys_pkg_id(int initial_apicid, int index_msb)
|
2008-03-29 02:12:16 +07:00
|
|
|
{
|
2008-07-11 01:16:48 +07:00
|
|
|
return uv_read_apic_id() >> index_msb;
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void uv_send_IPI_self(int vector)
|
|
|
|
{
|
|
|
|
apic_write(APIC_SELF_IPI, vector);
|
|
|
|
}
|
|
|
|
|
2011-05-20 06:45:46 +07:00
|
|
|
static int uv_probe(void)
|
|
|
|
{
|
|
|
|
return apic == &apic_x2apic_uv_x;
|
|
|
|
}
|
|
|
|
|
2011-05-21 07:51:20 +07:00
|
|
|
static struct apic __refdata apic_x2apic_uv_x = {
|
2009-01-28 08:37:01 +07:00
|
|
|
|
|
|
|
.name = "UV large system",
|
2011-05-20 06:45:46 +07:00
|
|
|
.probe = uv_probe,
|
2009-01-28 08:37:01 +07:00
|
|
|
.acpi_madt_oem_check = uv_acpi_madt_oem_check,
|
2012-03-17 02:25:35 +07:00
|
|
|
.apic_id_valid = uv_apic_id_valid,
|
2009-01-28 08:37:01 +07:00
|
|
|
.apic_id_registered = uv_apic_id_registered,
|
|
|
|
|
2009-01-28 10:02:31 +07:00
|
|
|
.irq_delivery_mode = dest_Fixed,
|
2009-07-27 21:38:56 +07:00
|
|
|
.irq_dest_mode = 0, /* physical */
|
2009-01-28 08:37:01 +07:00
|
|
|
|
2012-06-05 18:23:29 +07:00
|
|
|
.target_cpus = online_target_cpus,
|
2009-01-28 11:08:44 +07:00
|
|
|
.disable_esr = 0,
|
2009-01-28 11:29:25 +07:00
|
|
|
.dest_logical = APIC_DEST_LOGICAL,
|
2009-01-28 08:37:01 +07:00
|
|
|
.check_apicid_used = NULL,
|
|
|
|
|
2012-06-07 20:14:49 +07:00
|
|
|
.vector_allocation_domain = default_vector_allocation_domain,
|
2009-01-28 08:37:01 +07:00
|
|
|
.init_apic_ldr = uv_init_apic_ldr,
|
|
|
|
|
|
|
|
.ioapic_phys_id_map = NULL,
|
|
|
|
.setup_apic_routing = NULL,
|
2009-01-28 12:50:47 +07:00
|
|
|
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
2009-01-28 08:37:01 +07:00
|
|
|
.apicid_to_cpu_present = NULL,
|
2009-01-28 18:43:18 +07:00
|
|
|
.check_phys_apicid_present = default_check_phys_apicid_present,
|
2009-01-28 19:31:22 +07:00
|
|
|
.phys_pkg_id = uv_phys_pkg_id,
|
2009-01-28 08:37:01 +07:00
|
|
|
|
2009-01-28 20:08:38 +07:00
|
|
|
.get_apic_id = x2apic_get_apic_id,
|
2009-01-28 08:37:01 +07:00
|
|
|
.set_apic_id = set_apic_id,
|
|
|
|
|
|
|
|
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
|
|
|
|
|
2015-11-05 05:57:05 +07:00
|
|
|
.send_IPI = uv_send_IPI_one,
|
2009-01-28 08:37:01 +07:00
|
|
|
.send_IPI_mask = uv_send_IPI_mask,
|
|
|
|
.send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
|
|
|
|
.send_IPI_allbutself = uv_send_IPI_allbutself,
|
|
|
|
.send_IPI_all = uv_send_IPI_all,
|
|
|
|
.send_IPI_self = uv_send_IPI_self,
|
|
|
|
|
2009-02-26 19:51:40 +07:00
|
|
|
.wakeup_secondary_cpu = uv_wakeup_secondary,
|
2009-01-28 08:37:01 +07:00
|
|
|
.inquire_remote_apic = NULL,
|
2009-02-17 14:02:14 +07:00
|
|
|
|
|
|
|
.read = native_apic_msr_read,
|
|
|
|
.write = native_apic_msr_write,
|
2012-05-16 23:03:58 +07:00
|
|
|
.eoi_write = native_apic_msr_eoi_write,
|
2009-02-17 14:02:14 +07:00
|
|
|
.icr_read = native_x2apic_icr_read,
|
|
|
|
.icr_write = native_x2apic_icr_write,
|
|
|
|
.wait_icr_idle = native_x2apic_wait_icr_idle,
|
|
|
|
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
|
2008-03-29 02:12:16 +07:00
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void set_x2apic_extra_bits(int pnode)
|
2008-03-29 02:12:16 +07:00
|
|
|
{
|
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: Fix Moorestown VRTC fixmap placement
x86/gpio: Implement x86 gpio_to_irq convert function
x86, UV: Fix APICID shift for Westmere processors
x86: Use PCI method for enabling AMD extended config space before MSR method
x86: tsc: Prevent delayed init if initial tsc calibration failed
x86, lapic-timer: Increase the max_delta to 31 bits
x86: Fix sparse non-ANSI function warnings in smpboot.c
x86, numa: Fix CONFIG_DEBUG_PER_CPU_MAPS without NUMA emulation
x86, AMD, PCI: Add AMD northbridge PCI device id for CPU families 12h and 14h
x86, numa: Fix cpu to node mapping for sparse node ids
x86, numa: Fake node-to-cpumask for NUMA emulation
x86, numa: Fake apicid and pxm mappings for NUMA emulation
x86, numa: Avoid compiling NUMA emulation functions without CONFIG_NUMA_EMU
x86, numa: Reduce minimum fake node size to 32M
Fix up trivial conflict in arch/x86/kernel/apic/x2apic_uv_x.c
2011-01-12 02:11:46 +07:00
|
|
|
__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:07 +07:00
|
|
|
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
|
2008-05-28 21:51:18 +07:00
|
|
|
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
|
|
|
|
|
|
|
|
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
|
|
|
{
|
2010-11-07 03:41:04 +07:00
|
|
|
union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
|
2008-05-28 21:51:18 +07:00
|
|
|
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
|
2016-04-30 04:54:07 +07:00
|
|
|
unsigned long m_redirect;
|
|
|
|
unsigned long m_overlay;
|
2008-05-28 21:51:18 +07:00
|
|
|
int i;
|
|
|
|
|
2016-04-30 04:54:07 +07:00
|
|
|
for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) {
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
|
|
|
m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
|
|
|
|
m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
|
|
|
|
m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
|
|
|
|
m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
alias.v = uv_read_local_mmr(m_overlay);
|
2009-10-16 05:40:00 +07:00
|
|
|
if (alias.s.enable && alias.s.base == 0) {
|
2008-05-28 21:51:18 +07:00
|
|
|
*size = (1UL << alias.s.m_alias);
|
2016-04-30 04:54:07 +07:00
|
|
|
redirect.v = uv_read_local_mmr(m_redirect);
|
|
|
|
*base = (unsigned long)redirect.s.dest_base
|
|
|
|
<< DEST_SHIFT;
|
2008-05-28 21:51:18 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2009-10-16 05:40:00 +07:00
|
|
|
*base = *size = 0;
|
2008-05-28 21:51:18 +07:00
|
|
|
}
|
|
|
|
|
2008-07-02 02:45:38 +07:00
|
|
|
enum map_type {map_wb, map_uc};
|
|
|
|
|
2010-01-09 03:13:54 +07:00
|
|
|
static __init void map_high(char *id, unsigned long base, int pshift,
|
|
|
|
int bshift, int max_pnode, enum map_type map_type)
|
2008-07-02 02:45:38 +07:00
|
|
|
{
|
|
|
|
unsigned long bytes, paddr;
|
|
|
|
|
2010-01-09 03:13:54 +07:00
|
|
|
paddr = base << pshift;
|
|
|
|
bytes = (1UL << bshift) * (max_pnode + 1);
|
2013-02-12 02:45:12 +07:00
|
|
|
if (!paddr) {
|
|
|
|
pr_info("UV: Map %s_HI base address NULL\n", id);
|
|
|
|
return;
|
|
|
|
}
|
2013-05-29 22:56:09 +07:00
|
|
|
pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
|
2008-07-02 02:45:38 +07:00
|
|
|
if (map_type == map_uc)
|
|
|
|
init_extra_mapping_uc(paddr, bytes);
|
|
|
|
else
|
|
|
|
init_extra_mapping_wb(paddr, bytes);
|
|
|
|
}
|
2013-02-12 02:45:12 +07:00
|
|
|
|
2013-05-29 22:56:09 +07:00
|
|
|
static __init void map_gru_distributed(unsigned long c)
|
|
|
|
{
|
|
|
|
union uvh_rh_gam_gru_overlay_config_mmr_u gru;
|
|
|
|
u64 paddr;
|
|
|
|
unsigned long bytes;
|
|
|
|
int nid;
|
|
|
|
|
|
|
|
gru.v = c;
|
|
|
|
/* only base bits 42:28 relevant in dist mode */
|
|
|
|
gru_dist_base = gru.v & 0x000007fff0000000UL;
|
|
|
|
if (!gru_dist_base) {
|
|
|
|
pr_info("UV: Map GRU_DIST base address NULL\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
|
|
|
gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1);
|
|
|
|
gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1);
|
|
|
|
gru_dist_base &= gru_dist_lmask; /* Clear bits above M */
|
|
|
|
for_each_online_node(nid) {
|
|
|
|
paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) |
|
|
|
|
gru_dist_base;
|
|
|
|
init_extra_mapping_wb(paddr, bytes);
|
|
|
|
gru_first_node_paddr = min(paddr, gru_first_node_paddr);
|
|
|
|
gru_last_node_paddr = max(paddr, gru_last_node_paddr);
|
|
|
|
}
|
|
|
|
/* Save upper (63:M) bits of address only for is_GRU_range */
|
|
|
|
gru_first_node_paddr &= gru_dist_umask;
|
|
|
|
gru_last_node_paddr &= gru_dist_umask;
|
|
|
|
pr_debug("UV: Map GRU_DIST base 0x%016llx 0x%016llx - 0x%016llx\n",
|
|
|
|
gru_dist_base, gru_first_node_paddr, gru_last_node_paddr);
|
|
|
|
}
|
|
|
|
|
2008-07-02 02:45:38 +07:00
|
|
|
static __init void map_gru_high(int max_pnode)
|
|
|
|
{
|
|
|
|
union uvh_rh_gam_gru_overlay_config_mmr_u gru;
|
|
|
|
int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
2016-04-30 04:54:07 +07:00
|
|
|
unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK;
|
|
|
|
unsigned long base;
|
2008-07-02 02:45:38 +07:00
|
|
|
|
|
|
|
gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
|
2013-05-29 22:56:09 +07:00
|
|
|
if (!gru.s.enable) {
|
2013-02-12 02:45:12 +07:00
|
|
|
pr_info("UV: GRU disabled\n");
|
2013-05-29 22:56:09 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_uv3_hub() && gru.s3.mode) {
|
|
|
|
map_gru_distributed(gru.v);
|
|
|
|
return;
|
2009-11-20 03:23:41 +07:00
|
|
|
}
|
2016-04-30 04:54:07 +07:00
|
|
|
base = (gru.v & mask) >> shift;
|
|
|
|
map_high("GRU", base, shift, shift, max_pnode, map_wb);
|
|
|
|
gru_start_paddr = ((u64)base << shift);
|
2013-05-29 22:56:09 +07:00
|
|
|
gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
|
2008-07-02 02:45:38 +07:00
|
|
|
}
|
|
|
|
|
2009-09-09 22:43:39 +07:00
|
|
|
static __init void map_mmr_high(int max_pnode)
|
|
|
|
{
|
|
|
|
union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
|
|
|
|
int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
|
|
|
|
|
|
|
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
|
|
|
|
if (mmr.s.enable)
|
2010-01-09 03:13:54 +07:00
|
|
|
map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
|
2013-02-12 02:45:12 +07:00
|
|
|
else
|
|
|
|
pr_info("UV: MMR disabled\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
|
|
|
|
* and REDIRECT MMR regs are exactly the same on UV3.
|
|
|
|
*/
|
|
|
|
struct mmioh_config {
|
|
|
|
unsigned long overlay;
|
|
|
|
unsigned long redirect;
|
|
|
|
char *id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static __initdata struct mmioh_config mmiohs[] = {
|
|
|
|
{
|
|
|
|
UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
|
|
|
|
UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
|
|
|
|
"MMIOH0"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
|
|
|
|
UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
|
|
|
|
"MMIOH1"
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2016-04-30 04:54:11 +07:00
|
|
|
/* UV3 & UV4 have identical MMIOH overlay configs */
|
2013-02-12 02:45:12 +07:00
|
|
|
static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
|
|
|
|
{
|
|
|
|
union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
|
|
|
|
unsigned long mmr;
|
|
|
|
unsigned long base;
|
|
|
|
int i, n, shift, m_io, max_io;
|
|
|
|
int nasid, lnasid, fi, li;
|
|
|
|
char *id;
|
|
|
|
|
|
|
|
id = mmiohs[index].id;
|
|
|
|
overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
|
|
|
|
pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
|
|
|
|
id, overlay.v, overlay.s3.base, overlay.s3.m_io);
|
|
|
|
if (!overlay.s3.enable) {
|
|
|
|
pr_info("UV: %s disabled\n", id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
|
|
|
|
base = (unsigned long)overlay.s3.base;
|
|
|
|
m_io = overlay.s3.m_io;
|
|
|
|
mmr = mmiohs[index].redirect;
|
|
|
|
n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
|
|
|
|
min_pnode *= 2; /* convert to NASID */
|
|
|
|
max_pnode *= 2;
|
|
|
|
max_io = lnasid = fi = li = -1;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
|
|
|
|
|
|
|
|
redirect.v = uv_read_local_mmr(mmr + i * 8);
|
|
|
|
nasid = redirect.s3.nasid;
|
|
|
|
if (nasid < min_pnode || max_pnode < nasid)
|
|
|
|
nasid = -1; /* invalid NASID */
|
|
|
|
|
|
|
|
if (nasid == lnasid) {
|
|
|
|
li = i;
|
|
|
|
if (i != n-1) /* last entry check */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if we have a cached (or last) redirect to print */
|
|
|
|
if (lnasid != -1 || (i == n-1 && nasid != -1)) {
|
|
|
|
unsigned long addr1, addr2;
|
|
|
|
int f, l;
|
|
|
|
|
|
|
|
if (lnasid == -1) {
|
|
|
|
f = l = i;
|
|
|
|
lnasid = nasid;
|
|
|
|
} else {
|
|
|
|
f = fi;
|
|
|
|
l = li;
|
|
|
|
}
|
|
|
|
addr1 = (base << shift) +
|
|
|
|
f * (unsigned long)(1 << m_io);
|
|
|
|
addr2 = (base << shift) +
|
|
|
|
(l + 1) * (unsigned long)(1 << m_io);
|
|
|
|
pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
|
|
|
|
id, fi, li, lnasid, addr1, addr2);
|
|
|
|
if (max_io < l)
|
|
|
|
max_io = l;
|
|
|
|
}
|
|
|
|
fi = li = i;
|
|
|
|
lnasid = nasid;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
|
|
|
|
id, base, shift, m_io, max_io);
|
|
|
|
|
|
|
|
if (max_io >= 0)
|
|
|
|
map_high(id, base, shift, m_io, max_io, map_uc);
|
2009-09-09 22:43:39 +07:00
|
|
|
}
|
|
|
|
|
2013-02-12 02:45:12 +07:00
|
|
|
static __init void map_mmioh_high(int min_pnode, int max_pnode)
|
2008-07-02 02:45:38 +07:00
|
|
|
{
|
|
|
|
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
2013-02-12 02:45:12 +07:00
|
|
|
unsigned long mmr, base;
|
|
|
|
int shift, enable, m_io, n_io;
|
2008-07-02 02:45:38 +07:00
|
|
|
|
2016-04-30 04:54:11 +07:00
|
|
|
if (is_uv3_hub() || is_uv4_hub()) {
|
2013-02-12 02:45:12 +07:00
|
|
|
/* Map both MMIOH Regions */
|
|
|
|
map_mmioh_high_uv3(0, min_pnode, max_pnode);
|
|
|
|
map_mmioh_high_uv3(1, min_pnode, max_pnode);
|
|
|
|
return;
|
2011-05-12 00:50:28 +07:00
|
|
|
}
|
2013-02-12 02:45:12 +07:00
|
|
|
|
|
|
|
if (is_uv1_hub()) {
|
|
|
|
mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
|
|
|
|
shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
|
|
|
mmioh.v = uv_read_local_mmr(mmr);
|
|
|
|
enable = !!mmioh.s1.enable;
|
|
|
|
base = mmioh.s1.base;
|
|
|
|
m_io = mmioh.s1.m_io;
|
|
|
|
n_io = mmioh.s1.n_io;
|
|
|
|
} else if (is_uv2_hub()) {
|
|
|
|
mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
|
2011-05-12 00:50:28 +07:00
|
|
|
shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
2013-02-12 02:45:12 +07:00
|
|
|
mmioh.v = uv_read_local_mmr(mmr);
|
|
|
|
enable = !!mmioh.s2.enable;
|
|
|
|
base = mmioh.s2.base;
|
|
|
|
m_io = mmioh.s2.m_io;
|
|
|
|
n_io = mmioh.s2.n_io;
|
|
|
|
} else
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
max_pnode &= (1 << n_io) - 1;
|
|
|
|
pr_info(
|
|
|
|
"UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
|
|
|
|
base, shift, m_io, n_io, max_pnode);
|
|
|
|
map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
|
|
|
|
} else {
|
|
|
|
pr_info("UV: MMIOH disabled\n");
|
2011-05-12 00:50:28 +07:00
|
|
|
}
|
2008-07-02 02:45:38 +07:00
|
|
|
}
|
|
|
|
|
2009-11-25 23:20:19 +07:00
|
|
|
static __init void map_low_mmrs(void)
|
|
|
|
{
|
|
|
|
init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
|
|
|
|
init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
|
|
|
|
}
|
|
|
|
|
2008-07-10 03:27:19 +07:00
|
|
|
static __init void uv_rtc_init(void)
|
|
|
|
{
|
2008-10-03 23:59:33 +07:00
|
|
|
long status;
|
|
|
|
u64 ticks_per_sec;
|
2008-07-10 03:27:19 +07:00
|
|
|
|
2008-10-03 23:59:33 +07:00
|
|
|
status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
|
|
|
|
&ticks_per_sec);
|
|
|
|
if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
|
2008-07-10 03:27:19 +07:00
|
|
|
printk(KERN_WARNING
|
|
|
|
"unable to determine platform RTC clock frequency, "
|
|
|
|
"guessing.\n");
|
|
|
|
/* BIOS gives wrong value for clock freq. so guess */
|
|
|
|
sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
|
|
|
|
} else
|
|
|
|
sn_rtc_cycles_per_second = ticks_per_sec;
|
|
|
|
}
|
|
|
|
|
2008-10-25 05:24:29 +07:00
|
|
|
/*
|
|
|
|
* percpu heartbeat timer
|
|
|
|
*/
|
|
|
|
static void uv_heartbeat(unsigned long ignored)
|
|
|
|
{
|
2016-04-30 04:54:13 +07:00
|
|
|
struct timer_list *timer = &uv_scir_info->timer;
|
|
|
|
unsigned char bits = uv_scir_info->state;
|
2008-10-25 05:24:29 +07:00
|
|
|
|
|
|
|
/* flip heartbeat bit */
|
|
|
|
bits ^= SCIR_CPU_HEARTBEAT;
|
|
|
|
|
2008-10-27 21:51:20 +07:00
|
|
|
/* is this cpu idle? */
|
|
|
|
if (idle_cpu(raw_smp_processor_id()))
|
2008-10-25 05:24:29 +07:00
|
|
|
bits &= ~SCIR_CPU_ACTIVITY;
|
|
|
|
else
|
|
|
|
bits |= SCIR_CPU_ACTIVITY;
|
|
|
|
|
|
|
|
/* update system controller interface reg */
|
|
|
|
uv_set_scir_bits(bits);
|
|
|
|
|
|
|
|
/* enable next timer period */
|
2016-07-04 16:50:16 +07:00
|
|
|
mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
|
2008-10-25 05:24:29 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void uv_heartbeat_enable(int cpu)
|
2008-10-25 05:24:29 +07:00
|
|
|
{
|
2016-04-30 04:54:13 +07:00
|
|
|
while (!uv_cpu_scir_info(cpu)->enabled) {
|
|
|
|
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
|
2008-10-25 05:24:29 +07:00
|
|
|
|
|
|
|
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
|
2016-07-04 16:50:16 +07:00
|
|
|
setup_pinned_timer(timer, uv_heartbeat, cpu);
|
2008-10-25 05:24:29 +07:00
|
|
|
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
|
|
|
|
add_timer_on(timer, cpu);
|
2016-04-30 04:54:13 +07:00
|
|
|
uv_cpu_scir_info(cpu)->enabled = 1;
|
2008-10-25 05:24:29 +07:00
|
|
|
|
2010-01-07 21:35:42 +07:00
|
|
|
/* also ensure that boot cpu is enabled */
|
|
|
|
cpu = 0;
|
|
|
|
}
|
2008-10-25 05:24:29 +07:00
|
|
|
}
|
|
|
|
|
2008-11-20 06:05:14 +07:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void uv_heartbeat_disable(int cpu)
|
2008-10-25 05:24:29 +07:00
|
|
|
{
|
2016-04-30 04:54:13 +07:00
|
|
|
if (uv_cpu_scir_info(cpu)->enabled) {
|
|
|
|
uv_cpu_scir_info(cpu)->enabled = 0;
|
|
|
|
del_timer(&uv_cpu_scir_info(cpu)->timer);
|
2008-10-25 05:24:29 +07:00
|
|
|
}
|
|
|
|
uv_set_cpu_scir_bits(cpu, 0xff);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cpu hotplug notifier
|
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
|
|
|
|
void *hcpu)
|
2008-10-25 05:24:29 +07:00
|
|
|
{
|
|
|
|
long cpu = (long)hcpu;
|
|
|
|
|
2016-03-19 17:47:39 +07:00
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
|
case CPU_DOWN_FAILED:
|
2008-10-25 05:24:29 +07:00
|
|
|
case CPU_ONLINE:
|
|
|
|
uv_heartbeat_enable(cpu);
|
|
|
|
break;
|
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
|
uv_heartbeat_disable(cpu);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init void uv_scir_register_cpu_notifier(void)
|
|
|
|
{
|
|
|
|
hotcpu_notifier(uv_scir_cpu_notify, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !CONFIG_HOTPLUG_CPU */
|
|
|
|
|
|
|
|
static __init void uv_scir_register_cpu_notifier(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init int uv_init_heartbeat(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (is_uv_system())
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
uv_heartbeat_enable(cpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
late_initcall(uv_init_heartbeat);
|
|
|
|
|
|
|
|
#endif /* !CONFIG_HOTPLUG_CPU */
|
|
|
|
|
2010-02-03 05:38:14 +07:00
|
|
|
/* Direct Legacy VGA I/O traffic to designated IOH */
|
|
|
|
int uv_set_vga_state(struct pci_dev *pdev, bool decode,
|
2011-05-25 11:00:49 +07:00
|
|
|
unsigned int command_bits, u32 flags)
|
2010-02-03 05:38:14 +07:00
|
|
|
{
|
|
|
|
int domain, bus, rc;
|
|
|
|
|
2011-05-25 11:00:49 +07:00
|
|
|
PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
|
|
|
|
pdev->devfn, decode, command_bits, flags);
|
2010-02-03 05:38:14 +07:00
|
|
|
|
2011-05-25 11:00:49 +07:00
|
|
|
if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
|
2010-02-03 05:38:14 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((command_bits & PCI_COMMAND_IO) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
domain = pci_domain_nr(pdev->bus);
|
|
|
|
bus = pdev->bus->number;
|
|
|
|
|
|
|
|
rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
|
|
|
|
PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-09-23 20:42:05 +07:00
|
|
|
/*
|
|
|
|
* Called on each cpu to initialize the per_cpu UV data area.
|
2009-02-26 20:10:10 +07:00
|
|
|
* FIXME: hotplug not supported yet
|
2008-09-23 20:42:05 +07:00
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void uv_cpu_init(void)
|
2008-09-23 20:42:05 +07:00
|
|
|
{
|
2016-02-24 06:34:30 +07:00
|
|
|
/* CPU 0 initialization will be done via uv_system_init. */
|
2016-04-30 04:54:16 +07:00
|
|
|
if (smp_processor_id() == 0)
|
2008-09-23 20:42:05 +07:00
|
|
|
return;
|
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
uv_hub_info->nr_online_cpus++;
|
2008-09-23 20:42:05 +07:00
|
|
|
|
|
|
|
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
|
|
|
|
set_x2apic_extra_bits(uv_hub_info->pnode);
|
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:07 +07:00
|
|
|
struct mn {
|
|
|
|
unsigned char m_val;
|
|
|
|
unsigned char n_val;
|
|
|
|
unsigned char m_shift;
|
|
|
|
unsigned char n_lshift;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void get_mn(struct mn *mnp)
|
2008-03-29 02:12:16 +07:00
|
|
|
{
|
2016-04-30 04:54:07 +07:00
|
|
|
union uvh_rh_gam_config_mmr_u m_n_config;
|
|
|
|
union uv3h_gr0_gam_gr_config_u m_gr_config;
|
|
|
|
|
|
|
|
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
|
|
|
|
mnp->n_val = m_n_config.s.n_skt;
|
|
|
|
if (is_uv4_hub()) {
|
|
|
|
mnp->m_val = 0;
|
|
|
|
mnp->n_lshift = 0;
|
|
|
|
} else if (is_uv3_hub()) {
|
|
|
|
mnp->m_val = m_n_config.s3.m_skt;
|
|
|
|
m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
|
|
|
|
mnp->n_lshift = m_gr_config.s3.m_skt;
|
|
|
|
} else if (is_uv2_hub()) {
|
|
|
|
mnp->m_val = m_n_config.s2.m_skt;
|
|
|
|
mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
|
|
|
|
} else if (is_uv1_hub()) {
|
|
|
|
mnp->m_val = m_n_config.s1.m_skt;
|
|
|
|
mnp->n_lshift = mnp->m_val;
|
|
|
|
}
|
|
|
|
mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init uv_init_hub_info(struct uv_hub_info_s *hub_info)
|
|
|
|
{
|
|
|
|
struct mn mn = {0}; /* avoid unitialized warnings */
|
2008-05-28 21:51:18 +07:00
|
|
|
union uvh_node_id_u node_id;
|
2016-04-30 04:54:07 +07:00
|
|
|
|
|
|
|
get_mn(&mn);
|
|
|
|
hub_info->m_val = mn.m_val;
|
|
|
|
hub_info->n_val = mn.n_val;
|
|
|
|
hub_info->m_shift = mn.m_shift;
|
2016-04-30 04:54:17 +07:00
|
|
|
hub_info->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
|
2016-04-30 04:54:07 +07:00
|
|
|
|
|
|
|
hub_info->hub_revision = uv_hub_info->hub_revision;
|
2016-04-30 04:54:17 +07:00
|
|
|
hub_info->pnode_mask = uv_cpuid.pnode_mask;
|
2016-04-30 04:54:19 +07:00
|
|
|
hub_info->min_pnode = _min_pnode;
|
2016-04-30 04:54:20 +07:00
|
|
|
hub_info->min_socket = _min_socket;
|
|
|
|
hub_info->pnode_to_socket = _pnode_to_socket;
|
|
|
|
hub_info->socket_to_node = _socket_to_node;
|
|
|
|
hub_info->socket_to_pnode = _socket_to_pnode;
|
2016-04-30 04:54:21 +07:00
|
|
|
hub_info->gr_table_len = _gr_table_len;
|
|
|
|
hub_info->gr_table = _gr_table;
|
2016-04-30 04:54:17 +07:00
|
|
|
hub_info->gpa_mask = mn.m_val ?
|
|
|
|
(1UL << (mn.m_val + mn.n_val)) - 1 :
|
|
|
|
(1UL << uv_cpuid.gpa_shift) - 1;
|
2016-04-30 04:54:07 +07:00
|
|
|
|
|
|
|
node_id.v = uv_read_local_mmr(UVH_NODE_ID);
|
|
|
|
hub_info->gnode_extra =
|
|
|
|
(node_id.s.node_id & ~((1 << mn.n_val) - 1)) >> 1;
|
|
|
|
|
|
|
|
hub_info->gnode_upper =
|
|
|
|
((unsigned long)hub_info->gnode_extra << mn.m_val);
|
|
|
|
|
2016-04-30 04:54:19 +07:00
|
|
|
if (uv_gp_table) {
|
|
|
|
hub_info->global_mmr_base = uv_gp_table->mmr_base;
|
|
|
|
hub_info->global_mmr_shift = uv_gp_table->mmr_shift;
|
|
|
|
hub_info->global_gru_base = uv_gp_table->gru_base;
|
|
|
|
hub_info->global_gru_shift = uv_gp_table->gru_shift;
|
|
|
|
hub_info->gpa_shift = uv_gp_table->gpa_shift;
|
|
|
|
hub_info->gpa_mask = (1UL << hub_info->gpa_shift) - 1;
|
|
|
|
} else {
|
|
|
|
hub_info->global_mmr_base =
|
|
|
|
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
|
|
|
|
~UV_MMR_ENABLE;
|
|
|
|
hub_info->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
|
|
|
|
}
|
2016-04-30 04:54:07 +07:00
|
|
|
|
|
|
|
get_lowmem_redirect(
|
|
|
|
&hub_info->lowmem_remap_base, &hub_info->lowmem_remap_top);
|
|
|
|
|
2016-04-30 04:54:17 +07:00
|
|
|
hub_info->apic_pnode_shift = uv_cpuid.socketid_shift;
|
2016-04-30 04:54:07 +07:00
|
|
|
|
|
|
|
/* show system specific info */
|
|
|
|
pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n",
|
|
|
|
hub_info->n_val, hub_info->m_val,
|
|
|
|
hub_info->m_shift, hub_info->n_lshift);
|
|
|
|
|
2016-04-30 04:54:19 +07:00
|
|
|
pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n",
|
|
|
|
hub_info->gpa_mask, hub_info->gpa_shift,
|
|
|
|
hub_info->pnode_mask, hub_info->apic_pnode_shift);
|
|
|
|
|
|
|
|
pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n",
|
|
|
|
hub_info->global_mmr_base, hub_info->global_mmr_shift,
|
|
|
|
hub_info->global_gru_base, hub_info->global_gru_shift);
|
2016-04-30 04:54:07 +07:00
|
|
|
|
|
|
|
pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n",
|
|
|
|
hub_info->gnode_upper, hub_info->gnode_extra);
|
2016-04-30 04:54:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init decode_gam_params(unsigned long ptr)
|
|
|
|
{
|
|
|
|
uv_gp_table = (struct uv_gam_parameters *)ptr;
|
|
|
|
|
|
|
|
pr_info("UV: GAM Params...\n");
|
|
|
|
pr_info("UV: mmr_base/shift:0x%llx/%d gru_base/shift:0x%llx/%d gpa_shift:%d\n",
|
|
|
|
uv_gp_table->mmr_base, uv_gp_table->mmr_shift,
|
|
|
|
uv_gp_table->gru_base, uv_gp_table->gru_shift,
|
|
|
|
uv_gp_table->gpa_shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init decode_gam_rng_tbl(unsigned long ptr)
|
|
|
|
{
|
|
|
|
struct uv_gam_range_entry *gre = (struct uv_gam_range_entry *)ptr;
|
|
|
|
unsigned long lgre = 0;
|
|
|
|
int index = 0;
|
|
|
|
int sock_min = 999999, pnode_min = 99999;
|
|
|
|
int sock_max = -1, pnode_max = -1;
|
|
|
|
|
|
|
|
uv_gre_table = gre;
|
|
|
|
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
|
|
|
|
if (!index) {
|
|
|
|
pr_info("UV: GAM Range Table...\n");
|
|
|
|
pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s %3s\n",
|
|
|
|
"Range", "", "Size", "Type", "NASID",
|
|
|
|
"SID", "PN", "PXM");
|
|
|
|
}
|
|
|
|
pr_info(
|
|
|
|
"UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x %3d\n",
|
|
|
|
index++,
|
|
|
|
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
|
|
|
|
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
|
|
|
|
((unsigned long)(gre->limit - lgre)) >>
|
|
|
|
(30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
|
|
|
|
gre->type, gre->nasid, gre->sockid,
|
|
|
|
gre->pnode, gre->pxm);
|
|
|
|
|
|
|
|
lgre = gre->limit;
|
|
|
|
if (sock_min > gre->sockid)
|
|
|
|
sock_min = gre->sockid;
|
|
|
|
if (sock_max < gre->sockid)
|
|
|
|
sock_max = gre->sockid;
|
|
|
|
if (pnode_min > gre->pnode)
|
|
|
|
pnode_min = gre->pnode;
|
|
|
|
if (pnode_max < gre->pnode)
|
|
|
|
pnode_max = gre->pnode;
|
|
|
|
}
|
|
|
|
_min_socket = sock_min;
|
|
|
|
_max_socket = sock_max;
|
|
|
|
_min_pnode = pnode_min;
|
|
|
|
_max_pnode = pnode_max;
|
|
|
|
_gr_table_len = index;
|
|
|
|
pr_info(
|
|
|
|
"UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n",
|
|
|
|
index, _min_socket, _max_socket, _min_pnode, _max_pnode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init decode_uv_systab(void)
|
|
|
|
{
|
|
|
|
struct uv_systab *st;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
st = uv_systab;
|
|
|
|
if ((!st || st->revision < UV_SYSTAB_VERSION_UV4) && !is_uv4_hub())
|
|
|
|
return;
|
|
|
|
if (st->revision != UV_SYSTAB_VERSION_UV4_LATEST) {
|
|
|
|
pr_crit(
|
|
|
|
"UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n",
|
|
|
|
st->revision, UV_SYSTAB_VERSION_UV4_LATEST);
|
|
|
|
BUG();
|
|
|
|
}
|
2016-04-30 04:54:07 +07:00
|
|
|
|
2016-04-30 04:54:19 +07:00
|
|
|
for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
|
|
|
|
unsigned long ptr = st->entry[i].offset;
|
2016-04-30 04:54:07 +07:00
|
|
|
|
2016-04-30 04:54:19 +07:00
|
|
|
if (!ptr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ptr = ptr + (unsigned long)st;
|
|
|
|
|
|
|
|
switch (st->entry[i].type) {
|
|
|
|
case UV_SYSTAB_TYPE_GAM_PARAMS:
|
|
|
|
decode_gam_params(ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case UV_SYSTAB_TYPE_GAM_RNG_TBL:
|
|
|
|
decode_gam_rng_tbl(ptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-04-30 04:54:07 +07:00
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
/*
|
|
|
|
* Setup physical blade translations from UVH_NODE_PRESENT_TABLE
|
|
|
|
* .. NB: UVH_NODE_PRESENT_TABLE is going away,
|
|
|
|
* .. being replaced by GAM Range Table
|
|
|
|
*/
|
|
|
|
static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info)
|
|
|
|
{
|
2016-04-30 04:54:23 +07:00
|
|
|
int i, uv_pb = 0;
|
2016-04-30 04:54:16 +07:00
|
|
|
|
|
|
|
pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH);
|
|
|
|
for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
|
|
|
|
unsigned long np;
|
|
|
|
|
|
|
|
np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
|
|
|
|
if (np)
|
|
|
|
pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np);
|
|
|
|
|
|
|
|
uv_pb += hweight64(np);
|
|
|
|
}
|
|
|
|
if (uv_possible_blades != uv_pb)
|
|
|
|
uv_possible_blades = uv_pb;
|
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:20 +07:00
|
|
|
static void __init build_socket_tables(void)
|
|
|
|
{
|
|
|
|
struct uv_gam_range_entry *gre = uv_gre_table;
|
|
|
|
int num, nump;
|
|
|
|
int cpu, i, lnid;
|
|
|
|
int minsock = _min_socket;
|
|
|
|
int maxsock = _max_socket;
|
|
|
|
int minpnode = _min_pnode;
|
|
|
|
int maxpnode = _max_pnode;
|
|
|
|
size_t bytes;
|
|
|
|
|
|
|
|
if (!gre) {
|
|
|
|
if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
|
|
|
|
pr_info("UV: No UVsystab socket table, ignoring\n");
|
|
|
|
return; /* not required */
|
|
|
|
}
|
|
|
|
pr_crit(
|
|
|
|
"UV: Error: UVsystab address translations not available!\n");
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* build socket id -> node id, pnode */
|
|
|
|
num = maxsock - minsock + 1;
|
|
|
|
bytes = num * sizeof(_socket_to_node[0]);
|
|
|
|
_socket_to_node = kmalloc(bytes, GFP_KERNEL);
|
|
|
|
_socket_to_pnode = kmalloc(bytes, GFP_KERNEL);
|
|
|
|
|
|
|
|
nump = maxpnode - minpnode + 1;
|
|
|
|
bytes = nump * sizeof(_pnode_to_socket[0]);
|
|
|
|
_pnode_to_socket = kmalloc(bytes, GFP_KERNEL);
|
|
|
|
BUG_ON(!_socket_to_node || !_socket_to_pnode || !_pnode_to_socket);
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++)
|
|
|
|
_socket_to_node[i] = _socket_to_pnode[i] = SOCK_EMPTY;
|
|
|
|
|
|
|
|
for (i = 0; i < nump; i++)
|
|
|
|
_pnode_to_socket[i] = SOCK_EMPTY;
|
|
|
|
|
|
|
|
/* fill in pnode/node/addr conversion list values */
|
|
|
|
pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n");
|
|
|
|
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
|
|
|
|
if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
|
|
|
|
continue;
|
|
|
|
i = gre->sockid - minsock;
|
|
|
|
if (_socket_to_pnode[i] != SOCK_EMPTY)
|
|
|
|
continue; /* duplicate */
|
|
|
|
_socket_to_pnode[i] = gre->pnode;
|
|
|
|
_socket_to_node[i] = gre->pxm;
|
|
|
|
|
|
|
|
i = gre->pnode - minpnode;
|
|
|
|
_pnode_to_socket[i] = gre->sockid;
|
|
|
|
|
|
|
|
pr_info(
|
|
|
|
"UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n",
|
|
|
|
gre->sockid, gre->type, gre->nasid,
|
|
|
|
_socket_to_pnode[gre->sockid - minsock],
|
|
|
|
_socket_to_node[gre->sockid - minsock],
|
|
|
|
_pnode_to_socket[gre->pnode - minpnode]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check socket -> node values */
|
|
|
|
lnid = -1;
|
|
|
|
for_each_present_cpu(cpu) {
|
|
|
|
int nid = cpu_to_node(cpu);
|
|
|
|
int apicid, sockid;
|
|
|
|
|
|
|
|
if (lnid == nid)
|
|
|
|
continue;
|
|
|
|
lnid = nid;
|
|
|
|
apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
|
|
|
sockid = apicid >> uv_cpuid.socketid_shift;
|
|
|
|
i = sockid - minsock;
|
|
|
|
|
|
|
|
if (nid != _socket_to_node[i]) {
|
|
|
|
pr_warn(
|
|
|
|
"UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n",
|
|
|
|
i, sockid, gre->type, _socket_to_node[i], nid);
|
|
|
|
_socket_to_node[i] = nid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup physical blade to pnode translation from GAM Range Table */
|
|
|
|
bytes = num_possible_nodes() * sizeof(_node_to_pnode[0]);
|
|
|
|
_node_to_pnode = kmalloc(bytes, GFP_KERNEL);
|
|
|
|
BUG_ON(!_node_to_pnode);
|
|
|
|
|
|
|
|
for (lnid = 0; lnid < num_possible_nodes(); lnid++) {
|
|
|
|
unsigned short sockid;
|
|
|
|
|
|
|
|
for (sockid = minsock; sockid <= maxsock; sockid++) {
|
|
|
|
if (lnid == _socket_to_node[sockid - minsock]) {
|
|
|
|
_node_to_pnode[lnid] =
|
|
|
|
_socket_to_pnode[sockid - minsock];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sockid > maxsock) {
|
|
|
|
pr_err("UV: socket for node %d not found!\n", lnid);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If socket id == pnode or socket id == node for all nodes,
|
|
|
|
* system runs faster by removing corresponding conversion table.
|
|
|
|
*/
|
|
|
|
pr_info("UV: Checking socket->node/pnode for identity maps\n");
|
|
|
|
if (minsock == 0) {
|
|
|
|
for (i = 0; i < num; i++)
|
|
|
|
if (_socket_to_node[i] == SOCK_EMPTY ||
|
|
|
|
i != _socket_to_node[i])
|
|
|
|
break;
|
|
|
|
if (i >= num) {
|
|
|
|
kfree(_socket_to_node);
|
|
|
|
_socket_to_node = NULL;
|
|
|
|
pr_info("UV: 1:1 socket_to_node table removed\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (minsock == minpnode) {
|
|
|
|
for (i = 0; i < num; i++)
|
|
|
|
if (_socket_to_pnode[i] != SOCK_EMPTY &&
|
|
|
|
_socket_to_pnode[i] != i + minpnode)
|
|
|
|
break;
|
|
|
|
if (i >= num) {
|
|
|
|
kfree(_socket_to_pnode);
|
|
|
|
_socket_to_pnode = NULL;
|
|
|
|
pr_info("UV: 1:1 socket_to_pnode table removed\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:07 +07:00
|
|
|
void __init uv_system_init(void)
|
|
|
|
{
|
|
|
|
struct uv_hub_info_s hub_info = {0};
|
2016-04-30 04:54:16 +07:00
|
|
|
int bytes, cpu, nodeid;
|
|
|
|
unsigned short min_pnode = 9999, max_pnode = 0;
|
2016-04-30 04:54:05 +07:00
|
|
|
char *hub = is_uv4_hub() ? "UV400" :
|
|
|
|
is_uv3_hub() ? "UV300" :
|
|
|
|
is_uv2_hub() ? "UV2000/3000" :
|
|
|
|
is_uv1_hub() ? "UV100/1000" : NULL;
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2015-04-10 01:26:31 +07:00
|
|
|
if (!hub) {
|
|
|
|
pr_err("UV: Unknown/unsupported UV hub\n");
|
|
|
|
return;
|
|
|
|
}
|
2013-02-12 02:45:12 +07:00
|
|
|
pr_info("UV: Found %s hub\n", hub);
|
2015-12-12 03:59:45 +07:00
|
|
|
|
2016-05-05 14:56:10 +07:00
|
|
|
map_low_mmrs();
|
2009-11-25 23:20:19 +07:00
|
|
|
|
2016-04-30 04:54:19 +07:00
|
|
|
uv_bios_init(); /* get uv_systab for decoding */
|
|
|
|
decode_uv_systab();
|
2016-04-30 04:54:20 +07:00
|
|
|
build_socket_tables();
|
2016-04-30 04:54:21 +07:00
|
|
|
build_uv_gr_table();
|
2016-04-30 04:54:07 +07:00
|
|
|
uv_init_hub_info(&hub_info);
|
2016-04-30 04:54:16 +07:00
|
|
|
uv_possible_blades = num_possible_nodes();
|
|
|
|
if (!_node_to_pnode)
|
|
|
|
boot_init_possible_blades(&hub_info);
|
2012-01-07 02:19:00 +07:00
|
|
|
|
|
|
|
/* uv_num_possible_blades() is really the hub count */
|
2016-04-30 04:54:12 +07:00
|
|
|
pr_info("UV: Found %d hubs, %d nodes, %d cpus\n",
|
|
|
|
uv_num_possible_blades(),
|
|
|
|
num_possible_nodes(),
|
|
|
|
num_possible_cpus());
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2009-12-17 23:53:25 +07:00
|
|
|
uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
|
|
|
|
&sn_region_size, &system_serial_number);
|
2016-04-30 04:54:07 +07:00
|
|
|
hub_info.coherency_domain_number = sn_coherency_id;
|
2008-07-10 03:27:19 +07:00
|
|
|
uv_rtc_init();
|
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
bytes = sizeof(void *) * uv_num_possible_blades();
|
|
|
|
__uv_hub_info_list = kzalloc(bytes, GFP_KERNEL);
|
|
|
|
BUG_ON(!__uv_hub_info_list);
|
2009-12-29 04:28:25 +07:00
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
bytes = sizeof(struct uv_hub_info_s);
|
|
|
|
for_each_node(nodeid) {
|
|
|
|
struct uv_hub_info_s *new_hub;
|
|
|
|
|
|
|
|
if (__uv_hub_info_list[nodeid]) {
|
|
|
|
pr_err("UV: Node %d UV HUB already initialized!?\n",
|
|
|
|
nodeid);
|
|
|
|
BUG();
|
2016-04-30 04:54:15 +07:00
|
|
|
}
|
2008-05-28 21:51:18 +07:00
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
/* Allocate new per hub info list */
|
|
|
|
new_hub = (nodeid == 0) ?
|
|
|
|
&uv_hub_info_node0 :
|
|
|
|
kzalloc_node(bytes, GFP_KERNEL, nodeid);
|
|
|
|
BUG_ON(!new_hub);
|
|
|
|
__uv_hub_info_list[nodeid] = new_hub;
|
|
|
|
new_hub = uv_hub_info_list(nodeid);
|
|
|
|
BUG_ON(!new_hub);
|
|
|
|
*new_hub = hub_info;
|
|
|
|
|
2016-04-30 04:54:23 +07:00
|
|
|
/* Use information from GAM table if available */
|
|
|
|
if (_node_to_pnode)
|
|
|
|
new_hub->pnode = _node_to_pnode[nodeid];
|
|
|
|
else /* Fill in during cpu loop */
|
|
|
|
new_hub->pnode = 0xffff;
|
2016-04-30 04:54:16 +07:00
|
|
|
new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
|
|
|
|
new_hub->memory_nid = -1;
|
|
|
|
new_hub->nr_possible_cpus = 0;
|
|
|
|
new_hub->nr_online_cpus = 0;
|
|
|
|
}
|
2009-07-27 21:35:07 +07:00
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
/* Initialize per cpu info */
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
int apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
2016-04-30 04:54:23 +07:00
|
|
|
int numa_node_id;
|
|
|
|
unsigned short pnode;
|
2016-04-30 04:54:12 +07:00
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
nodeid = cpu_to_node(cpu);
|
2016-04-30 04:54:23 +07:00
|
|
|
numa_node_id = numa_cpu_node(cpu);
|
|
|
|
pnode = uv_apicid_to_pnode(apicid);
|
|
|
|
|
2016-04-30 04:54:15 +07:00
|
|
|
uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
|
|
|
|
uv_cpu_info_per(cpu)->blade_cpu_id =
|
2016-04-30 04:54:16 +07:00
|
|
|
uv_cpu_hub_info(cpu)->nr_possible_cpus++;
|
|
|
|
if (uv_cpu_hub_info(cpu)->memory_nid == -1)
|
|
|
|
uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
|
2016-04-30 04:54:23 +07:00
|
|
|
if (nodeid != numa_node_id && /* init memoryless node */
|
|
|
|
uv_hub_info_list(numa_node_id)->pnode == 0xffff)
|
|
|
|
uv_hub_info_list(numa_node_id)->pnode = pnode;
|
|
|
|
else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
|
|
|
|
uv_cpu_hub_info(cpu)->pnode = pnode;
|
2016-04-30 04:54:16 +07:00
|
|
|
uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
2008-07-02 02:45:38 +07:00
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
for_each_node(nodeid) {
|
2016-04-30 04:54:23 +07:00
|
|
|
unsigned short pnode = uv_hub_info_list(nodeid)->pnode;
|
|
|
|
|
|
|
|
/* Add pnode info for pre-GAM list nodes without cpus */
|
|
|
|
if (pnode == 0xffff) {
|
|
|
|
unsigned long paddr;
|
|
|
|
|
|
|
|
paddr = node_start_pfn(nodeid) << PAGE_SHIFT;
|
|
|
|
pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
|
|
|
|
uv_hub_info_list(nodeid)->pnode = pnode;
|
|
|
|
}
|
|
|
|
min_pnode = min(pnode, min_pnode);
|
|
|
|
max_pnode = max(pnode, max_pnode);
|
2016-04-30 04:54:16 +07:00
|
|
|
pr_info("UV: UVHUB node:%2d pn:%02x nrcpus:%d\n",
|
|
|
|
nodeid,
|
|
|
|
uv_hub_info_list(nodeid)->pnode,
|
|
|
|
uv_hub_info_list(nodeid)->nr_possible_cpus);
|
2009-03-30 21:01:11 +07:00
|
|
|
}
|
|
|
|
|
2016-04-30 04:54:16 +07:00
|
|
|
pr_info("UV: min_pnode:%02x max_pnode:%02x\n", min_pnode, max_pnode);
|
2008-07-02 02:45:38 +07:00
|
|
|
map_gru_high(max_pnode);
|
2009-09-09 22:43:39 +07:00
|
|
|
map_mmr_high(max_pnode);
|
2013-02-12 02:45:12 +07:00
|
|
|
map_mmioh_high(min_pnode, max_pnode);
|
2008-03-29 02:12:16 +07:00
|
|
|
|
2013-09-24 04:25:01 +07:00
|
|
|
uv_nmi_setup();
|
2008-09-23 20:42:05 +07:00
|
|
|
uv_cpu_init();
|
2008-10-25 05:24:29 +07:00
|
|
|
uv_scir_register_cpu_notifier();
|
2008-11-11 05:16:31 +07:00
|
|
|
proc_mkdir("sgi_uv", NULL);
|
2010-02-03 05:38:14 +07:00
|
|
|
|
|
|
|
/* register Legacy VGA I/O redirection handler */
|
|
|
|
pci_register_set_vga_state(uv_set_vga_state);
|
2011-03-31 21:32:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
|
|
|
|
* EFI is not enabled in the kdump kernel.
|
|
|
|
*/
|
|
|
|
if (is_kdump_kernel())
|
|
|
|
reboot_type = BOOT_ACPI;
|
2008-03-29 02:12:16 +07:00
|
|
|
}
|
2011-05-21 07:51:17 +07:00
|
|
|
|
|
|
|
apic_driver(apic_x2apic_uv_x);
|