mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:20:50 +07:00
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpu updates from Ingo Molnar: "The main changes in this cycle were: - Add support for the "Dhyana" x86 CPUs by Hygon: these are licensed based on the AMD Zen architecture, and are built and sold in China, for domestic datacenter use. The code is pretty close to AMD support, mostly with a few quirks and enumeration differences. (Pu Wen) - Enable CPUID support on Cyrix 6x86/6x86L processors" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tools/cpupower: Add Hygon Dhyana support cpufreq: Add Hygon Dhyana support ACPI: Add Hygon Dhyana support x86/xen: Add Hygon Dhyana support to Xen x86/kvm: Add Hygon Dhyana support to KVM x86/mce: Add Hygon Dhyana support to the MCA infrastructure x86/bugs: Add Hygon Dhyana to the respective mitigation machinery x86/apic: Add Hygon Dhyana support x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge x86/amd_nb: Check vendor in AMD-only functions x86/alternative: Init ideal_nops for Hygon Dhyana x86/events: Add Hygon Dhyana support to PMU infrastructure x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana x86/cpu: Create Hygon Dhyana architecture support file x86/CPU: Change query logic so CPUID is enabled before testing x86/CPU: Use correct macros for Cyrix calls
This commit is contained in:
commit
fec98069fb
@ -6787,6 +6787,12 @@ S: Maintained
|
||||
F: mm/memory-failure.c
|
||||
F: mm/hwpoison-inject.c
|
||||
|
||||
HYGON PROCESSOR SUPPORT
|
||||
M: Pu Wen <puwen@hygon.cn>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: arch/x86/kernel/cpu/hygon.c
|
||||
|
||||
Hyper-V CORE AND DRIVERS
|
||||
M: "K. Y. Srinivasan" <kys@microsoft.com>
|
||||
M: Haiyang Zhang <haiyangz@microsoft.com>
|
||||
|
@ -426,6 +426,20 @@ config CPU_SUP_AMD
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CPU_SUP_HYGON
|
||||
default y
|
||||
bool "Support Hygon processors" if PROCESSOR_SELECT
|
||||
select CPU_SUP_AMD
|
||||
help
|
||||
This enables detection, tunings and quirks for Hygon processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
Hygon CPU. Disabling this option on other types of CPUs
|
||||
makes the kernel a tiny bit smaller. Disabling it on an Hygon
|
||||
CPU might render the kernel unbootable.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CPU_SUP_CENTAUR
|
||||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
|
@ -669,6 +669,10 @@ static int __init amd_core_pmu_init(void)
|
||||
* We fallback to using default amd_get_event_constraints.
|
||||
*/
|
||||
break;
|
||||
case 0x18:
|
||||
pr_cont("Fam18h ");
|
||||
/* Using default amd_get_event_constraints. */
|
||||
break;
|
||||
default:
|
||||
pr_err("core perfctr but no constraints; unknown hardware!\n");
|
||||
return -ENODEV;
|
||||
|
@ -515,17 +515,19 @@ static int __init amd_uncore_init(void)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return -ENODEV;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
|
||||
return -ENODEV;
|
||||
|
||||
if (boot_cpu_data.x86 == 0x17) {
|
||||
if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
|
||||
/*
|
||||
* For F17h, the Northbridge counters are repurposed as Data
|
||||
* Fabric counters. Also, L3 counters are supported too. The PMUs
|
||||
* are exported based on family as either L2 or L3 and NB or DF.
|
||||
* For F17h or F18h, the Northbridge counters are
|
||||
* repurposed as Data Fabric counters. Also, L3
|
||||
* counters are supported too. The PMUs are exported
|
||||
* based on family as either L2 or L3 and NB or DF.
|
||||
*/
|
||||
num_counters_nb = NUM_COUNTERS_NB;
|
||||
num_counters_llc = NUM_COUNTERS_L3;
|
||||
@ -557,7 +559,9 @@ static int __init amd_uncore_init(void)
|
||||
if (ret)
|
||||
goto fail_nb;
|
||||
|
||||
pr_info("AMD NB counters detected\n");
|
||||
pr_info("%s NB counters detected\n",
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
|
||||
"HYGON" : "AMD");
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -571,7 +575,9 @@ static int __init amd_uncore_init(void)
|
||||
if (ret)
|
||||
goto fail_llc;
|
||||
|
||||
pr_info("AMD LLC counters detected\n");
|
||||
pr_info("%s LLC counters detected\n",
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
|
||||
"HYGON" : "AMD");
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -1797,6 +1797,10 @@ static int __init init_hw_perf_events(void)
|
||||
case X86_VENDOR_AMD:
|
||||
err = amd_pmu_init();
|
||||
break;
|
||||
case X86_VENDOR_HYGON:
|
||||
err = amd_pmu_init();
|
||||
x86_pmu.name = "HYGON";
|
||||
break;
|
||||
default:
|
||||
err = -ENOTSUPP;
|
||||
}
|
||||
|
@ -103,6 +103,9 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
|
||||
|
||||
static inline bool amd_gart_present(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
return false;
|
||||
|
||||
/* GART present only on Fam15h, upto model 0fh */
|
||||
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
|
||||
(boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
|
||||
|
@ -3,5 +3,6 @@
|
||||
#define _ASM_X86_CACHEINFO_H
|
||||
|
||||
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
|
||||
|
||||
#endif /* _ASM_X86_CACHEINFO_H */
|
||||
|
@ -364,6 +364,10 @@ struct x86_emulate_ctxt {
|
||||
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
|
||||
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
|
||||
|
||||
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx 0x6f677948
|
||||
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx 0x656e6975
|
||||
#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx 0x6e65476e
|
||||
|
||||
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
|
||||
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
|
||||
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
|
||||
|
@ -217,6 +217,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
|
||||
static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
|
||||
#endif
|
||||
|
||||
static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
|
||||
|
||||
int mce_available(struct cpuinfo_x86 *c);
|
||||
bool mce_is_memory_error(struct mce *m);
|
||||
|
||||
|
@ -155,7 +155,8 @@ enum cpuid_regs_idx {
|
||||
#define X86_VENDOR_CENTAUR 5
|
||||
#define X86_VENDOR_TRANSMETA 7
|
||||
#define X86_VENDOR_NSC 8
|
||||
#define X86_VENDOR_NUM 9
|
||||
#define X86_VENDOR_HYGON 9
|
||||
#define X86_VENDOR_NUM 10
|
||||
|
||||
#define X86_VENDOR_UNKNOWN 0xff
|
||||
|
||||
|
@ -83,9 +83,10 @@ static inline void cpu_emergency_vmxoff(void)
|
||||
*/
|
||||
static inline int cpu_has_svm(const char **msg)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
|
||||
if (msg)
|
||||
*msg = "not amd";
|
||||
*msg = "not amd or hygon";
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -222,6 +222,10 @@ void __init arch_init_ideal_nops(void)
|
||||
}
|
||||
break;
|
||||
|
||||
case X86_VENDOR_HYGON:
|
||||
ideal_nops = p6_nops;
|
||||
return;
|
||||
|
||||
case X86_VENDOR_AMD:
|
||||
if (boot_cpu_data.x86 > 0xf) {
|
||||
ideal_nops = p6_nops;
|
||||
|
@ -61,6 +61,21 @@ static const struct pci_device_id amd_nb_link_ids[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct pci_device_id hygon_root_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct pci_device_id hygon_nb_misc_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct pci_device_id hygon_nb_link_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
|
||||
{ 0x00, 0x18, 0x20 },
|
||||
{ 0xff, 0x00, 0x20 },
|
||||
@ -194,15 +209,24 @@ EXPORT_SYMBOL_GPL(amd_df_indirect_read);
|
||||
|
||||
int amd_cache_northbridges(void)
|
||||
{
|
||||
u16 i = 0;
|
||||
struct amd_northbridge *nb;
|
||||
const struct pci_device_id *misc_ids = amd_nb_misc_ids;
|
||||
const struct pci_device_id *link_ids = amd_nb_link_ids;
|
||||
const struct pci_device_id *root_ids = amd_root_ids;
|
||||
struct pci_dev *root, *misc, *link;
|
||||
struct amd_northbridge *nb;
|
||||
u16 i = 0;
|
||||
|
||||
if (amd_northbridges.num)
|
||||
return 0;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
root_ids = hygon_root_ids;
|
||||
misc_ids = hygon_nb_misc_ids;
|
||||
link_ids = hygon_nb_link_ids;
|
||||
}
|
||||
|
||||
misc = NULL;
|
||||
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
|
||||
while ((misc = next_northbridge(misc, misc_ids)) != NULL)
|
||||
i++;
|
||||
|
||||
if (!i)
|
||||
@ -218,11 +242,11 @@ int amd_cache_northbridges(void)
|
||||
link = misc = root = NULL;
|
||||
for (i = 0; i != amd_northbridges.num; i++) {
|
||||
node_to_amd_nb(i)->root = root =
|
||||
next_northbridge(root, amd_root_ids);
|
||||
next_northbridge(root, root_ids);
|
||||
node_to_amd_nb(i)->misc = misc =
|
||||
next_northbridge(misc, amd_nb_misc_ids);
|
||||
next_northbridge(misc, misc_ids);
|
||||
node_to_amd_nb(i)->link = link =
|
||||
next_northbridge(link, amd_nb_link_ids);
|
||||
next_northbridge(link, link_ids);
|
||||
}
|
||||
|
||||
if (amd_gart_present())
|
||||
@ -261,11 +285,19 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges);
|
||||
*/
|
||||
bool __init early_is_amd_nb(u32 device)
|
||||
{
|
||||
const struct pci_device_id *misc_ids = amd_nb_misc_ids;
|
||||
const struct pci_device_id *id;
|
||||
u32 vendor = device & 0xffff;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return false;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
misc_ids = hygon_nb_misc_ids;
|
||||
|
||||
device >>= 16;
|
||||
for (id = amd_nb_misc_ids; id->vendor; id++)
|
||||
for (id = misc_ids; id->vendor; id++)
|
||||
if (vendor == id->vendor && device == id->device)
|
||||
return true;
|
||||
return false;
|
||||
@ -277,7 +309,8 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
|
||||
u64 base, msr;
|
||||
unsigned int segn_busn_bits;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return NULL;
|
||||
|
||||
/* assume all cpus from fam10h have mmconfig */
|
||||
|
@ -224,6 +224,11 @@ static int modern_apic(void)
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86 >= 0xf)
|
||||
return 1;
|
||||
|
||||
/* Hygon systems use modern APIC */
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
return 1;
|
||||
|
||||
return lapic_get_version() >= 0x14;
|
||||
}
|
||||
|
||||
@ -1912,6 +1917,8 @@ static int __init detect_init_APIC(void)
|
||||
(boot_cpu_data.x86 >= 15))
|
||||
break;
|
||||
goto no_apic;
|
||||
case X86_VENDOR_HYGON:
|
||||
break;
|
||||
case X86_VENDOR_INTEL:
|
||||
if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
|
||||
(boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
|
||||
|
@ -185,6 +185,7 @@ void __init default_setup_apic_routing(void)
|
||||
break;
|
||||
}
|
||||
/* If P4 and above fall through */
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
def_to_bigsmp = 1;
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
|
||||
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o
|
||||
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
|
||||
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
|
||||
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
|
||||
obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
|
||||
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
|
||||
|
@ -312,6 +312,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
}
|
||||
|
||||
if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
|
||||
pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
@ -371,7 +372,8 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
return;
|
||||
|
||||
retpoline_auto:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
retpoline_amd:
|
||||
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
|
||||
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
|
||||
|
@ -602,6 +602,10 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
|
||||
else
|
||||
amd_cpuid4(index, &eax, &ebx, &ecx);
|
||||
amd_init_l3_cache(this_leaf, index);
|
||||
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
cpuid_count(0x8000001d, index, &eax.full,
|
||||
&ebx.full, &ecx.full, &edx);
|
||||
amd_init_l3_cache(this_leaf, index);
|
||||
} else {
|
||||
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
|
||||
}
|
||||
@ -625,7 +629,8 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
|
||||
union _cpuid4_leaf_eax cache_eax;
|
||||
int i = -1;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_AMD)
|
||||
if (c->x86_vendor == X86_VENDOR_AMD ||
|
||||
c->x86_vendor == X86_VENDOR_HYGON)
|
||||
op = 0x8000001d;
|
||||
else
|
||||
op = 4;
|
||||
@ -678,6 +683,22 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
|
||||
}
|
||||
}
|
||||
|
||||
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
|
||||
{
|
||||
/*
|
||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||
* have an L3 cache by looking at the L3 cache CPUID leaf.
|
||||
*/
|
||||
if (!cpuid_edx(0x80000006))
|
||||
return;
|
||||
|
||||
/*
|
||||
* LLC is at the core complex level.
|
||||
* Core complex ID is ApicId[3] for these processors.
|
||||
*/
|
||||
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
|
||||
}
|
||||
|
||||
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
|
||||
@ -691,6 +712,11 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
}
|
||||
|
||||
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Cache sizes */
|
||||
@ -913,7 +939,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
|
||||
int index_msb, i;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
if (c->x86_vendor == X86_VENDOR_AMD ||
|
||||
c->x86_vendor == X86_VENDOR_HYGON) {
|
||||
if (__cache_amd_cpumap_setup(cpu, index, base))
|
||||
return;
|
||||
}
|
||||
|
@ -963,6 +963,7 @@ static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
||||
{ X86_VENDOR_AMD },
|
||||
{ X86_VENDOR_HYGON },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1076,6 +1077,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
c->extended_cpuid_level = 0;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
identify_cpu_without_cpuid(c);
|
||||
|
||||
/* cyrix could have cpuid enabled via c_identify()*/
|
||||
if (have_cpuid_p()) {
|
||||
cpu_detect(c);
|
||||
@ -1093,7 +1097,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
if (this_cpu->c_bsp_init)
|
||||
this_cpu->c_bsp_init(c);
|
||||
} else {
|
||||
identify_cpu_without_cpuid(c);
|
||||
setup_clear_cpu_cap(X86_FEATURE_CPUID);
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,7 @@ extern u32 get_scattered_cpuid_leaf(unsigned int level,
|
||||
enum cpuid_regs_idx reg);
|
||||
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);
|
||||
|
||||
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
|
||||
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
|
||||
|
@ -437,7 +437,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
|
||||
/* enable MAPEN */
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
|
||||
/* enable cpuid */
|
||||
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
|
||||
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);
|
||||
/* disable MAPEN */
|
||||
setCx86(CX86_CCR3, ccr3);
|
||||
local_irq_restore(flags);
|
||||
|
408
arch/x86/kernel/cpu/hygon.c
Normal file
408
arch/x86/kernel/cpu/hygon.c
Normal file
@ -0,0 +1,408 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Hygon Processor Support for Linux
|
||||
*
|
||||
* Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
|
||||
*
|
||||
* Author: Pu Wen <puwen@hygon.cn>
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/cacheinfo.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/delay.h>
|
||||
#ifdef CONFIG_X86_64
|
||||
# include <asm/set_memory.h>
|
||||
#endif
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
/*
|
||||
* nodes_per_socket: Stores the number of nodes per socket.
|
||||
* Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
|
||||
*/
|
||||
static u32 nodes_per_socket = 1;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* To workaround broken NUMA config. Read the comment in
|
||||
* srat_detect_node().
|
||||
*/
|
||||
static int nearby_node(int apicid)
|
||||
{
|
||||
int i, node;
|
||||
|
||||
for (i = apicid - 1; i >= 0; i--) {
|
||||
node = __apicid_to_node[i];
|
||||
if (node != NUMA_NO_NODE && node_online(node))
|
||||
return node;
|
||||
}
|
||||
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
|
||||
node = __apicid_to_node[i];
|
||||
if (node != NUMA_NO_NODE && node_online(node))
|
||||
return node;
|
||||
}
|
||||
return first_node(node_online_map); /* Shouldn't happen */
|
||||
}
|
||||
#endif
|
||||
|
||||
static void hygon_get_topology_early(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_TOPOEXT))
|
||||
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixup core topology information for
|
||||
* (1) Hygon multi-node processors
|
||||
* Assumption: Number of cores in each internal node is the same.
|
||||
* (2) Hygon processors supporting compute units
|
||||
*/
|
||||
static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u8 node_id;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* get information required for multi-node processors */
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
int err;
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
node_id = ecx & 0xff;
|
||||
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
|
||||
/*
|
||||
* In case leaf B is available, use it to derive
|
||||
* topology information.
|
||||
*/
|
||||
err = detect_extended_topology(c);
|
||||
if (!err)
|
||||
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
cacheinfo_hygon_init_llc_id(c, cpu, node_id);
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||
node_id = value & 7;
|
||||
|
||||
per_cpu(cpu_llc_id, cpu) = node_id;
|
||||
} else
|
||||
return;
|
||||
|
||||
if (nodes_per_socket > 1)
|
||||
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
|
||||
}
|
||||
|
||||
/*
|
||||
* On Hygon setup the lower bits of the APIC id distinguish the cores.
|
||||
* Assumes number of cores is a power of two.
|
||||
*/
|
||||
static void hygon_detect_cmp(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int bits;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
bits = c->x86_coreid_bits;
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->phys_proc_id = c->initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
||||
}
|
||||
|
||||
static void srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_NUMA
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned int apicid = c->apicid;
|
||||
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
|
||||
/*
|
||||
* On multi-fabric platform (e.g. Numascale NumaChip) a
|
||||
* platform-specific handler needs to be called to fixup some
|
||||
* IDs of the CPU.
|
||||
*/
|
||||
if (x86_cpuinit.fixup_cpu_id)
|
||||
x86_cpuinit.fixup_cpu_id(c, node);
|
||||
|
||||
if (!node_online(node)) {
|
||||
/*
|
||||
* Two possibilities here:
|
||||
*
|
||||
* - The CPU is missing memory and no node was created. In
|
||||
* that case try picking one from a nearby CPU.
|
||||
*
|
||||
* - The APIC IDs differ from the HyperTransport node IDs.
|
||||
* Assume they are all increased by a constant offset, but
|
||||
* in the same order as the HT nodeids. If that doesn't
|
||||
* result in a usable node fall back to the path for the
|
||||
* previous case.
|
||||
*
|
||||
* This workaround operates directly on the mapping between
|
||||
* APIC ID and NUMA node, assuming certain relationship
|
||||
* between APIC ID, HT node ID and NUMA topology. As going
|
||||
* through CPU mapping may alter the outcome, directly
|
||||
* access __apicid_to_node[].
|
||||
*/
|
||||
int ht_nodeid = c->initial_apicid;
|
||||
|
||||
if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = __apicid_to_node[ht_nodeid];
|
||||
/* Pick a nearby node */
|
||||
if (!node_online(node))
|
||||
node = nearby_node(apicid);
|
||||
}
|
||||
numa_set_node(cpu, node);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void early_init_hygon_mc(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int bits, ecx;
|
||||
|
||||
/* Multi core CPU? */
|
||||
if (c->extended_cpuid_level < 0x80000008)
|
||||
return;
|
||||
|
||||
ecx = cpuid_ecx(0x80000008);
|
||||
|
||||
c->x86_max_cores = (ecx & 0xff) + 1;
|
||||
|
||||
/* CPU telling us the core id bits shift? */
|
||||
bits = (ecx >> 12) & 0xF;
|
||||
|
||||
/* Otherwise recompute */
|
||||
if (bits == 0) {
|
||||
while ((1 << bits) < c->x86_max_cores)
|
||||
bits++;
|
||||
}
|
||||
|
||||
c->x86_coreid_bits = bits;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void bsp_init_hygon(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long long tseg;
|
||||
|
||||
/*
|
||||
* Split up direct mapping around the TSEG SMM area.
|
||||
* Don't do it for gbpages because there seems very little
|
||||
* benefit in doing so.
|
||||
*/
|
||||
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
|
||||
unsigned long pfn = tseg >> PAGE_SHIFT;
|
||||
|
||||
pr_debug("tseg: %010llx\n", tseg);
|
||||
if (pfn_range_is_mapped(pfn, pfn + 1))
|
||||
set_memory_4k((unsigned long)__va(tseg), 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
|
||||
u64 val;
|
||||
|
||||
rdmsrl(MSR_K7_HWCR, val);
|
||||
if (!(val & BIT(24)))
|
||||
pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_MWAITX))
|
||||
use_mwaitx_delay();
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
u32 ecx;
|
||||
|
||||
ecx = cpuid_ecx(0x8000001e);
|
||||
nodes_per_socket = ((ecx >> 8) & 7) + 1;
|
||||
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||
nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
||||
!boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
|
||||
/*
|
||||
* Try to cache the base value so further operations can
|
||||
* avoid RMW. If that faults, do not enable SSBD.
|
||||
*/
|
||||
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
|
||||
setup_force_cpu_cap(X86_FEATURE_SSBD);
|
||||
x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void early_init_hygon(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 dummy;
|
||||
|
||||
early_init_hygon_mc(c);
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_K8);
|
||||
|
||||
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
|
||||
|
||||
/*
|
||||
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
|
||||
* with P/T states and does not stop in deep C-states
|
||||
*/
|
||||
if (c->x86_power & (1 << 8)) {
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||
}
|
||||
|
||||
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
|
||||
if (c->x86_power & BIT(12))
|
||||
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
|
||||
/*
|
||||
* ApicID can always be treated as an 8-bit value for Hygon APIC So, we
|
||||
* can safely set X86_FEATURE_EXTD_APICID unconditionally.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_APIC))
|
||||
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is only needed to tell the kernel whether to use VMCALL
|
||||
* and VMMCALL. VMMCALL is never executed except under virt, so
|
||||
* we can set it unconditionally.
|
||||
*/
|
||||
set_cpu_cap(c, X86_FEATURE_VMMCALL);
|
||||
|
||||
hygon_get_topology_early(c);
|
||||
}
|
||||
|
||||
static void init_hygon(struct cpuinfo_x86 *c)
|
||||
{
|
||||
early_init_hygon(c);
|
||||
|
||||
/*
|
||||
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
||||
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
|
||||
*/
|
||||
clear_cpu_cap(c, 0*32+31);
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/* get apicid instead of initial apic id from cpuid */
|
||||
c->apicid = hard_smp_processor_id();
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
|
||||
cpu_detect_cache_sizes(c);
|
||||
|
||||
hygon_detect_cmp(c);
|
||||
hygon_get_topology(c);
|
||||
srat_detect_node(c);
|
||||
|
||||
init_hygon_cacheinfo(c);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_XMM2)) {
|
||||
unsigned long long val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* A serializing LFENCE has less overhead than MFENCE, so
|
||||
* use it for execution serialization. On families which
|
||||
* don't have that MSR, LFENCE is already serializing.
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/*
|
||||
* Verify that the MSR write was successful (could be running
|
||||
* under a hypervisor) and only then assume that LFENCE is
|
||||
* serializing.
|
||||
*/
|
||||
ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
|
||||
if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
} else {
|
||||
/* MFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hygon processors have APIC timer running in deep C states.
|
||||
*/
|
||||
set_cpu_cap(c, X86_FEATURE_ARAT);
|
||||
|
||||
/* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
|
||||
if (!cpu_has(c, X86_FEATURE_XENPV))
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
}
|
||||
|
||||
static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 ebx, eax, ecx, edx;
|
||||
u16 mask = 0xfff;
|
||||
|
||||
if (c->extended_cpuid_level < 0x80000006)
|
||||
return;
|
||||
|
||||
cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
|
||||
tlb_lli_4k[ENTRIES] = ebx & mask;
|
||||
|
||||
/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
|
||||
if (!((eax >> 16) & mask))
|
||||
tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
|
||||
else
|
||||
tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
|
||||
|
||||
/* a 4M entry uses two 2M entries */
|
||||
tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
|
||||
|
||||
/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
|
||||
if (!(eax & mask)) {
|
||||
cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
|
||||
tlb_lli_2m[ENTRIES] = eax & 0xff;
|
||||
} else
|
||||
tlb_lli_2m[ENTRIES] = eax & mask;
|
||||
|
||||
tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
|
||||
}
|
||||
|
||||
static const struct cpu_dev hygon_cpu_dev = {
|
||||
.c_vendor = "Hygon",
|
||||
.c_ident = { "HygonGenuine" },
|
||||
.c_early_init = early_init_hygon,
|
||||
.c_detect_tlb = cpu_detect_tlb_hygon,
|
||||
.c_bsp_init = bsp_init_hygon,
|
||||
.c_init = init_hygon,
|
||||
.c_x86_vendor = X86_VENDOR_HYGON,
|
||||
};
|
||||
|
||||
cpu_dev_register(hygon_cpu_dev);
|
@ -336,7 +336,8 @@ int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
|
||||
|
||||
void __init mcheck_vendor_init_severity(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
mce_severity = mce_severity_amd;
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,7 @@ static void print_mce(struct mce *m)
|
||||
{
|
||||
__print_mce(m);
|
||||
|
||||
if (m->cpuvendor != X86_VENDOR_AMD)
|
||||
if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
|
||||
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
|
||||
}
|
||||
|
||||
@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m)
|
||||
|
||||
bool mce_is_memory_error(struct mce *m)
|
||||
{
|
||||
if (m->cpuvendor == X86_VENDOR_AMD) {
|
||||
if (m->cpuvendor == X86_VENDOR_AMD ||
|
||||
m->cpuvendor == X86_VENDOR_HYGON) {
|
||||
return amd_mce_is_memory_error(m);
|
||||
|
||||
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
|
||||
/*
|
||||
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
|
||||
@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m)
|
||||
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
|
||||
return false;
|
||||
|
||||
if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
|
||||
return false;
|
||||
|
||||
if (m->status & MCI_STATUS_UC)
|
||||
return false;
|
||||
|
||||
@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
|
||||
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
|
||||
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
|
||||
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
|
||||
@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
|
||||
mce_amd_feature_init(c);
|
||||
break;
|
||||
}
|
||||
|
||||
case X86_VENDOR_HYGON:
|
||||
mce_hygon_feature_init(c);
|
||||
break;
|
||||
|
||||
case X86_VENDOR_CENTAUR:
|
||||
mce_centaur_feature_init(c);
|
||||
break;
|
||||
@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void)
|
||||
static void vendor_disable_error_reporting(void)
|
||||
{
|
||||
/*
|
||||
* Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide.
|
||||
* Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
|
||||
* are socket-wide.
|
||||
* Disabling them for just a single offlined CPU is bad, since it will
|
||||
* inhibit reporting for all shared resources on the socket like the
|
||||
* last level cache (LLC), the integrated memory controller (iMC), etc.
|
||||
*/
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
return;
|
||||
|
||||
|
@ -831,7 +831,8 @@ int __init amd_special_default_mtrr(void)
|
||||
{
|
||||
u32 l, h;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return 0;
|
||||
if (boot_cpu_data.x86 < 0xf)
|
||||
return 0;
|
||||
|
@ -127,7 +127,7 @@ static void __init set_num_var_ranges(void)
|
||||
|
||||
if (use_intel())
|
||||
rdmsr(MSR_MTRRcap, config, dummy);
|
||||
else if (is_cpu(AMD))
|
||||
else if (is_cpu(AMD) || is_cpu(HYGON))
|
||||
config = 2;
|
||||
else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
|
||||
config = 8;
|
||||
|
@ -46,6 +46,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
|
||||
{
|
||||
/* returns the bit offset of the performance counter register */
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
if (msr >= MSR_F15H_PERF_CTR)
|
||||
return (msr - MSR_F15H_PERF_CTR) >> 1;
|
||||
@ -74,6 +75,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
|
||||
{
|
||||
/* returns the bit offset of the event selection register */
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
if (msr >= MSR_F15H_PERF_CTL)
|
||||
return (msr - MSR_F15H_PERF_CTL) >> 1;
|
||||
|
@ -676,6 +676,7 @@ static void __init smp_quirk_init_udelay(void)
|
||||
|
||||
/* if modern processor, use no delay */
|
||||
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
|
||||
((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
|
||||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
|
||||
init_udelay = 0;
|
||||
return;
|
||||
@ -1592,7 +1593,8 @@ static inline void mwait_play_dead(void)
|
||||
void *mwait_ptr;
|
||||
int i;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
return;
|
||||
if (!this_cpu_has(X86_FEATURE_MWAIT))
|
||||
return;
|
||||
|
@ -2711,7 +2711,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
|
||||
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
|
||||
return true;
|
||||
|
||||
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
|
||||
/* Hygon ("HygonGenuine") */
|
||||
if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
|
||||
ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
|
||||
edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* default: (not Intel, not AMD, not Hygon), apply Intel's
|
||||
* stricter rules...
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,8 @@ static int __init early_root_info_init(void)
|
||||
vendor = id & 0xffff;
|
||||
device = (id>>16) & 0xffff;
|
||||
|
||||
if (vendor != PCI_VENDOR_ID_AMD)
|
||||
if (vendor != PCI_VENDOR_ID_AMD &&
|
||||
vendor != PCI_VENDOR_ID_HYGON)
|
||||
continue;
|
||||
|
||||
if (hb_probes[i].device == device) {
|
||||
@ -390,7 +391,8 @@ static int __init pci_io_ecs_init(void)
|
||||
|
||||
static int __init amd_postcore_init(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return 0;
|
||||
|
||||
early_root_info_init();
|
||||
|
@ -91,6 +91,12 @@ static void xen_pmu_arch_init(void)
|
||||
k7_counters_mirrored = 0;
|
||||
break;
|
||||
}
|
||||
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
amd_num_counters = F10H_NUM_COUNTERS;
|
||||
amd_counters_base = MSR_K7_PERFCTR0;
|
||||
amd_ctrls_base = MSR_K7_EVNTSEL0;
|
||||
amd_msr_step = 1;
|
||||
k7_counters_mirrored = 0;
|
||||
} else {
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
@ -286,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
|
||||
|
||||
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||
if (is_amd_pmu_msr(msr)) {
|
||||
if (!xen_amd_pmu_emulate(msr, val, 1))
|
||||
*val = native_read_msr_safe(msr, err);
|
||||
@ -309,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
|
||||
{
|
||||
uint64_t val = ((uint64_t)high << 32) | low;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||
if (is_amd_pmu_msr(msr)) {
|
||||
if (!xen_amd_pmu_emulate(msr, &val, 0))
|
||||
*err = native_write_msr_safe(msr, low, high);
|
||||
@ -380,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter)
|
||||
|
||||
unsigned long long xen_read_pmc(int counter)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return xen_amd_read_pmc(counter);
|
||||
else
|
||||
return xen_intel_read_pmc(counter);
|
||||
|
@ -70,6 +70,7 @@ static void power_saving_mwait_init(void)
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
case X86_VENDOR_INTEL:
|
||||
/*
|
||||
|
@ -205,6 +205,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
||||
static void tsc_check_state(int state)
|
||||
{
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
case X86_VENDOR_INTEL:
|
||||
case X86_VENDOR_CENTAUR:
|
||||
|
@ -61,6 +61,7 @@ enum {
|
||||
|
||||
#define INTEL_MSR_RANGE (0xffff)
|
||||
#define AMD_MSR_RANGE (0x7)
|
||||
#define HYGON_MSR_RANGE (0x7)
|
||||
|
||||
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
|
||||
|
||||
@ -95,6 +96,7 @@ static bool boost_state(unsigned int cpu)
|
||||
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
@ -113,6 +115,7 @@ static int boost_set_msr(bool enable)
|
||||
msr_addr = MSR_IA32_MISC_ENABLE;
|
||||
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
|
||||
break;
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
msr_addr = MSR_K7_HWCR;
|
||||
msr_mask = MSR_K7_HWCR_CPB_DIS;
|
||||
@ -225,6 +228,8 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
msr &= AMD_MSR_RANGE;
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
msr &= HYGON_MSR_RANGE;
|
||||
else
|
||||
msr &= INTEL_MSR_RANGE;
|
||||
|
||||
|
@ -111,11 +111,16 @@ static int __init amd_freq_sensitivity_init(void)
|
||||
{
|
||||
u64 val;
|
||||
struct pci_dev *pcidev;
|
||||
unsigned int pci_vendor;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
pci_vendor = PCI_VENDOR_ID_AMD;
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
pci_vendor = PCI_VENDOR_ID_HYGON;
|
||||
else
|
||||
return -ENODEV;
|
||||
|
||||
pcidev = pci_get_device(PCI_VENDOR_ID_AMD,
|
||||
pcidev = pci_get_device(pci_vendor,
|
||||
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
|
||||
|
||||
if (!pcidev) {
|
||||
|
@ -2565,6 +2565,8 @@
|
||||
|
||||
#define PCI_VENDOR_ID_AMAZON 0x1d0f
|
||||
|
||||
#define PCI_VENDOR_ID_HYGON 0x1d94
|
||||
|
||||
#define PCI_VENDOR_ID_TEKRAM 0x1de1
|
||||
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
|
||||
|
||||
|
@ -170,6 +170,7 @@ static int get_boost_mode(unsigned int cpu)
|
||||
unsigned long pstates[MAX_HW_PSTATES] = {0,};
|
||||
|
||||
if (cpupower_cpu_info.vendor != X86_VENDOR_AMD &&
|
||||
cpupower_cpu_info.vendor != X86_VENDOR_HYGON &&
|
||||
cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
|
||||
return 0;
|
||||
|
||||
@ -190,8 +191,9 @@ static int get_boost_mode(unsigned int cpu)
|
||||
printf(_(" Supported: %s\n"), support ? _("yes") : _("no"));
|
||||
printf(_(" Active: %s\n"), active ? _("yes") : _("no"));
|
||||
|
||||
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD &&
|
||||
cpupower_cpu_info.family >= 0x10) {
|
||||
if ((cpupower_cpu_info.vendor == X86_VENDOR_AMD &&
|
||||
cpupower_cpu_info.family >= 0x10) ||
|
||||
cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
|
||||
ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states,
|
||||
pstates, &pstate_no);
|
||||
if (ret)
|
||||
|
@ -45,7 +45,7 @@ static int get_did(int family, union msr_pstate pstate)
|
||||
|
||||
if (family == 0x12)
|
||||
t = pstate.val & 0xf;
|
||||
else if (family == 0x17)
|
||||
else if (family == 0x17 || family == 0x18)
|
||||
t = pstate.fam17h_bits.did;
|
||||
else
|
||||
t = pstate.bits.did;
|
||||
@ -59,7 +59,7 @@ static int get_cof(int family, union msr_pstate pstate)
|
||||
int fid, did, cof;
|
||||
|
||||
did = get_did(family, pstate);
|
||||
if (family == 0x17) {
|
||||
if (family == 0x17 || family == 0x18) {
|
||||
fid = pstate.fam17h_bits.fid;
|
||||
cof = 200 * fid / did;
|
||||
} else {
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "helpers/helpers.h"
|
||||
|
||||
static const char *cpu_vendor_table[X86_VENDOR_MAX] = {
|
||||
"Unknown", "GenuineIntel", "AuthenticAMD",
|
||||
"Unknown", "GenuineIntel", "AuthenticAMD", "HygonGenuine",
|
||||
};
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
@ -109,6 +109,7 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info)
|
||||
fclose(fp);
|
||||
/* Get some useful CPU capabilities from cpuid */
|
||||
if (cpu_info->vendor != X86_VENDOR_AMD &&
|
||||
cpu_info->vendor != X86_VENDOR_HYGON &&
|
||||
cpu_info->vendor != X86_VENDOR_INTEL)
|
||||
return ret;
|
||||
|
||||
@ -124,8 +125,9 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info)
|
||||
if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1))
|
||||
cpu_info->caps |= CPUPOWER_CAP_APERF;
|
||||
|
||||
/* AMD Boost state enable/disable register */
|
||||
if (cpu_info->vendor == X86_VENDOR_AMD) {
|
||||
/* AMD or Hygon Boost state enable/disable register */
|
||||
if (cpu_info->vendor == X86_VENDOR_AMD ||
|
||||
cpu_info->vendor == X86_VENDOR_HYGON) {
|
||||
if (ext_cpuid_level >= 0x80000007 &&
|
||||
(cpuid_edx(0x80000007) & (1 << 9)))
|
||||
cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
|
||||
|
@ -61,7 +61,7 @@ extern int be_verbose;
|
||||
|
||||
/* cpuid and cpuinfo helpers **************************/
|
||||
enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
|
||||
X86_VENDOR_AMD, X86_VENDOR_MAX};
|
||||
X86_VENDOR_AMD, X86_VENDOR_HYGON, X86_VENDOR_MAX};
|
||||
|
||||
#define CPUPOWER_CAP_INV_TSC 0x00000001
|
||||
#define CPUPOWER_CAP_APERF 0x00000002
|
||||
|
@ -26,7 +26,7 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
|
||||
* has Hardware determined variable increments instead.
|
||||
*/
|
||||
|
||||
if (cpu_info.family == 0x17) {
|
||||
if (cpu_info.family == 0x17 || cpu_info.family == 0x18) {
|
||||
if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
|
||||
if (!(val & CPUPOWER_AMD_CPBDIS))
|
||||
*active = 1;
|
||||
|
@ -241,7 +241,8 @@ static int init_maxfreq_mode(void)
|
||||
if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
|
||||
goto use_sysfs;
|
||||
|
||||
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) {
|
||||
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
|
||||
cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
|
||||
/* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf
|
||||
* freq.
|
||||
* A test whether hwcr is accessable/available would be:
|
||||
|
Loading…
Reference in New Issue
Block a user