mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 22:06:49 +07:00
f9300eaaac
- New power capping framework and the the Intel Running Average Power Limit (RAPL) driver using it from Srinivas Pandruvada and Jacob Pan. - Addition of the in-kernel switching feature to the arm_big_little cpufreq driver from Viresh Kumar and Nicolas Pitre. - cpufreq support for iMac G5 from Aaro Koskinen. - Baytrail processors support for intel_pstate from Dirk Brandewie. - cpufreq support for Midway/ECX-2000 from Mark Langsdorf. - ARM vexpress/TC2 cpufreq support from Sudeep KarkadaNagesha. - ACPI power management support for the I2C and SPI bus types from Mika Westerberg and Lv Zheng. - cpufreq core fixes and cleanups from Viresh Kumar, Srivatsa S Bhat, Stratos Karafotis, Xiaoguang Chen, Lan Tianyu. - cpufreq drivers updates (mostly fixes and cleanups) from Viresh Kumar, Aaro Koskinen, Jungseok Lee, Sudeep KarkadaNagesha, Lukasz Majewski, Manish Badarkhe, Hans-Christian Egtvedt, Evgeny Kapaev. - intel_pstate updates from Dirk Brandewie and Adrian Huang. - ACPICA update to version 20130927 includig fixes and cleanups and some reduction of divergences between the ACPICA code in the kernel and ACPICA upstream in order to improve the automatic ACPICA patch generation process. From Bob Moore, Lv Zheng, Tomasz Nowicki, Naresh Bhat, Bjorn Helgaas, David E Box. - ACPI IPMI driver fixes and cleanups from Lv Zheng. - ACPI hotplug fixes and cleanups from Bjorn Helgaas, Toshi Kani, Zhang Yanfei, Rafael J Wysocki. - Conversion of the ACPI AC driver to the platform bus type and multiple driver fixes and cleanups related to ACPI from Zhang Rui. - ACPI processor driver fixes and cleanups from Hanjun Guo, Jiang Liu, Bartlomiej Zolnierkiewicz, Mathieu Rhéaume, Rafael J Wysocki. - Fixes and cleanups and new blacklist entries related to the ACPI video support from Aaron Lu, Felipe Contreras, Lennart Poettering, Kirill Tkhai. - cpuidle core cleanups from Viresh Kumar and Lorenzo Pieralisi. - cpuidle drivers fixes and cleanups from Daniel Lezcano, Jingoo Han, Bartlomiej Zolnierkiewicz, Prarit Bhargava. - devfreq updates from Sachin Kamat, Dan Carpenter, Manish Badarkhe. - Operation Performance Points (OPP) core updates from Nishanth Menon. - Runtime power management core fix from Rafael J Wysocki and update from Ulf Hansson. - Hibernation fixes from Aaron Lu and Rafael J Wysocki. - Device suspend/resume lockup detection mechanism from Benoit Goby. - Removal of unused proc directories created for various ACPI drivers from Lan Tianyu. - ACPI LPSS driver fix and new device IDs for the ACPI platform scan handler from Heikki Krogerus and Jarkko Nikula. - New ACPI _OSI blacklist entry for Toshiba NB100 from Levente Kurusa. - Assorted fixes and cleanups related to ACPI from Andy Shevchenko, Al Stone, Bartlomiej Zolnierkiewicz, Colin Ian King, Dan Carpenter, Felipe Contreras, Jianguo Wu, Lan Tianyu, Yinghai Lu, Mathias Krause, Liu Chuansheng. - Assorted PM fixes and cleanups from Andy Shevchenko, Thierry Reding, Jean-Christophe Plagniol-Villard. / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQIcBAABCAAGBQJSfPKLAAoJEILEb/54YlRxH6YQAJwDKi25RCZziFSIenXuqzC/ c6JxoH/tSnDHJHhcTgqh7H7Raa+zmatMDf0m2oEv2Wjfx4Lt4BQK4iefhe/zY4lX yJ8uXDg+U8DYhDX2XwbwnFpd1M1k/A+s2gIHDTHHGnE0kDngXdd8RAFFktBmooTZ l5LBQvOrTlgX/ZfqI/MNmQ6lfY6kbCABGSHV1tUUsDA6Kkvk/LAUTOMSmptv1q22 hcs6k55vR34qADPkUX5GghjmcYJv+gNtvbDEJUjcmCwVoPWouF415m7R5lJ8w3/M 49Q8Tbu5HELWLwca64OorS8qh/P7sgUOf1BX5IDzHnJT+TGeDfvcYbMv2Z275/WZ /bqhuLuKBpsHQ2wvEeT+lYV3FlifKeTf1FBxER3ApjzI3GfpmVVQ+dpEu8e9hcTh ZTPGzziGtoIsHQ0unxb+zQOyt1PmIk+cU4IsKazs5U20zsVDMcKzPrb19Od49vMX gCHvRzNyOTqKWpE83Ss4NGOVPAG02AXiXi/BpuYBHKDy6fTH/liKiCw5xlCDEtmt lQrEbupKpc/dhCLo5ws6w7MZzjWJs2eSEQcNR4DlR++pxIpYOOeoPTXXrghgZt2X mmxZI2qsJ7GAvPzII8OBeF3CRO3fabZ6Nez+M+oEZjGe05ZtpB3ccw410HwieqBn dYpJFt/BHK189odhV9CM =JCxk -----END PGP SIGNATURE----- Merge tag 'pm+acpi-3.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull ACPI and power management updates from Rafael J Wysocki: - New power capping framework and the the Intel Running Average Power Limit (RAPL) driver using it from Srinivas Pandruvada and Jacob Pan. - Addition of the in-kernel switching feature to the arm_big_little cpufreq driver from Viresh Kumar and Nicolas Pitre. - cpufreq support for iMac G5 from Aaro Koskinen. - Baytrail processors support for intel_pstate from Dirk Brandewie. - cpufreq support for Midway/ECX-2000 from Mark Langsdorf. - ARM vexpress/TC2 cpufreq support from Sudeep KarkadaNagesha. - ACPI power management support for the I2C and SPI bus types from Mika Westerberg and Lv Zheng. - cpufreq core fixes and cleanups from Viresh Kumar, Srivatsa S Bhat, Stratos Karafotis, Xiaoguang Chen, Lan Tianyu. - cpufreq drivers updates (mostly fixes and cleanups) from Viresh Kumar, Aaro Koskinen, Jungseok Lee, Sudeep KarkadaNagesha, Lukasz Majewski, Manish Badarkhe, Hans-Christian Egtvedt, Evgeny Kapaev. - intel_pstate updates from Dirk Brandewie and Adrian Huang. - ACPICA update to version 20130927 includig fixes and cleanups and some reduction of divergences between the ACPICA code in the kernel and ACPICA upstream in order to improve the automatic ACPICA patch generation process. From Bob Moore, Lv Zheng, Tomasz Nowicki, Naresh Bhat, Bjorn Helgaas, David E Box. - ACPI IPMI driver fixes and cleanups from Lv Zheng. - ACPI hotplug fixes and cleanups from Bjorn Helgaas, Toshi Kani, Zhang Yanfei, Rafael J Wysocki. - Conversion of the ACPI AC driver to the platform bus type and multiple driver fixes and cleanups related to ACPI from Zhang Rui. - ACPI processor driver fixes and cleanups from Hanjun Guo, Jiang Liu, Bartlomiej Zolnierkiewicz, Mathieu Rhéaume, Rafael J Wysocki. - Fixes and cleanups and new blacklist entries related to the ACPI video support from Aaron Lu, Felipe Contreras, Lennart Poettering, Kirill Tkhai. - cpuidle core cleanups from Viresh Kumar and Lorenzo Pieralisi. - cpuidle drivers fixes and cleanups from Daniel Lezcano, Jingoo Han, Bartlomiej Zolnierkiewicz, Prarit Bhargava. - devfreq updates from Sachin Kamat, Dan Carpenter, Manish Badarkhe. - Operation Performance Points (OPP) core updates from Nishanth Menon. - Runtime power management core fix from Rafael J Wysocki and update from Ulf Hansson. - Hibernation fixes from Aaron Lu and Rafael J Wysocki. - Device suspend/resume lockup detection mechanism from Benoit Goby. - Removal of unused proc directories created for various ACPI drivers from Lan Tianyu. - ACPI LPSS driver fix and new device IDs for the ACPI platform scan handler from Heikki Krogerus and Jarkko Nikula. - New ACPI _OSI blacklist entry for Toshiba NB100 from Levente Kurusa. - Assorted fixes and cleanups related to ACPI from Andy Shevchenko, Al Stone, Bartlomiej Zolnierkiewicz, Colin Ian King, Dan Carpenter, Felipe Contreras, Jianguo Wu, Lan Tianyu, Yinghai Lu, Mathias Krause, Liu Chuansheng. - Assorted PM fixes and cleanups from Andy Shevchenko, Thierry Reding, Jean-Christophe Plagniol-Villard. * tag 'pm+acpi-3.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (386 commits) cpufreq: conservative: fix requested_freq reduction issue ACPI / hotplug: Consolidate deferred execution of ACPI hotplug routines PM / runtime: Use pm_runtime_put_sync() in __device_release_driver() ACPI / event: remove unneeded NULL pointer check Revert "ACPI / video: Ignore BIOS initial backlight value for HP 250 G1" ACPI / video: Quirk initial backlight level 0 ACPI / video: Fix initial level validity test intel_pstate: skip the driver if ACPI has power mgmt option PM / hibernate: Avoid overflow in hibernate_preallocate_memory() ACPI / hotplug: Do not execute "insert in progress" _OST ACPI / hotplug: Carry out PCI root eject directly ACPI / hotplug: Merge device hot-removal routines ACPI / hotplug: Make acpi_bus_hot_remove_device() internal ACPI / hotplug: Simplify device ejection routines ACPI / hotplug: Fix handle_root_bridge_removal() ACPI / hotplug: Refuse to hot-remove all objects with disabled hotplug ACPI / scan: Start matching drivers after trying scan handlers ACPI: Remove acpi_pci_slot_init() headers from internal.h ACPI / blacklist: fix name of ThinkPad Edge E530 PowerCap: Fix build error with option -Werror=format-security ... Conflicts: arch/arm/mach-omap2/opp.c drivers/Kconfig drivers/spi/spi.c
323 lines
8.2 KiB
C
323 lines
8.2 KiB
C
/*
|
|
* arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
|
|
*
|
|
* Created by: Nicolas Pitre, October 2012
|
|
* Copyright: (C) 2012-2013 Linaro Limited
|
|
*
|
|
* Some portions of this file were originally written by Achin Gupta
|
|
* Copyright: (C) 2012 ARM Limited
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/io.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/of_irq.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/irqchip/arm-gic.h>
|
|
|
|
#include <asm/mcpm.h>
|
|
#include <asm/proc-fns.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/cp15.h>
|
|
|
|
#include <linux/arm-cci.h>
|
|
|
|
#include "spc.h"
|
|
|
|
/* SCC conf registers */
|
|
#define A15_CONF 0x400
|
|
#define A7_CONF 0x500
|
|
#define SYS_INFO 0x700
|
|
#define SPC_BASE 0xb00
|
|
|
|
/*
|
|
* We can't use regular spinlocks. In the switcher case, it is possible
|
|
* for an outbound CPU to call power_down() after its inbound counterpart
|
|
* is already live using the same logical CPU number which trips lockdep
|
|
* debugging.
|
|
*/
|
|
static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
#define TC2_CLUSTERS 2
|
|
#define TC2_MAX_CPUS_PER_CLUSTER 3
|
|
|
|
static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
|
|
|
|
/* Keep per-cpu usage count to cope with unordered up/down requests */
|
|
static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
|
|
|
|
#define tc2_cluster_unused(cluster) \
|
|
(!tc2_pm_use_count[0][cluster] && \
|
|
!tc2_pm_use_count[1][cluster] && \
|
|
!tc2_pm_use_count[2][cluster])
|
|
|
|
static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
|
|
{
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
|
|
* variant exists, we need to disable IRQs manually here.
|
|
*/
|
|
local_irq_disable();
|
|
arch_spin_lock(&tc2_pm_lock);
|
|
|
|
if (tc2_cluster_unused(cluster))
|
|
ve_spc_powerdown(cluster, false);
|
|
|
|
tc2_pm_use_count[cpu][cluster]++;
|
|
if (tc2_pm_use_count[cpu][cluster] == 1) {
|
|
ve_spc_set_resume_addr(cluster, cpu,
|
|
virt_to_phys(mcpm_entry_point));
|
|
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
|
} else if (tc2_pm_use_count[cpu][cluster] != 2) {
|
|
/*
|
|
* The only possible values are:
|
|
* 0 = CPU down
|
|
* 1 = CPU (still) up
|
|
* 2 = CPU requested to be up before it had a chance
|
|
* to actually make itself down.
|
|
* Any other value is a bug.
|
|
*/
|
|
BUG();
|
|
}
|
|
|
|
arch_spin_unlock(&tc2_pm_lock);
|
|
local_irq_enable();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void tc2_pm_down(u64 residency)
|
|
{
|
|
unsigned int mpidr, cpu, cluster;
|
|
bool last_man = false, skip_wfi = false;
|
|
|
|
mpidr = read_cpuid_mpidr();
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
|
|
|
__mcpm_cpu_going_down(cpu, cluster);
|
|
|
|
arch_spin_lock(&tc2_pm_lock);
|
|
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
|
|
tc2_pm_use_count[cpu][cluster]--;
|
|
if (tc2_pm_use_count[cpu][cluster] == 0) {
|
|
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
|
if (tc2_cluster_unused(cluster)) {
|
|
ve_spc_powerdown(cluster, true);
|
|
ve_spc_global_wakeup_irq(true);
|
|
last_man = true;
|
|
}
|
|
} else if (tc2_pm_use_count[cpu][cluster] == 1) {
|
|
/*
|
|
* A power_up request went ahead of us.
|
|
* Even if we do not want to shut this CPU down,
|
|
* the caller expects a certain state as if the WFI
|
|
* was aborted. So let's continue with cache cleaning.
|
|
*/
|
|
skip_wfi = true;
|
|
} else
|
|
BUG();
|
|
|
|
/*
|
|
* If the CPU is committed to power down, make sure
|
|
* the power controller will be in charge of waking it
|
|
* up upon IRQ, ie IRQ lines are cut from GIC CPU IF
|
|
* to the CPU by disabling the GIC CPU IF to prevent wfi
|
|
* from completing execution behind power controller back
|
|
*/
|
|
if (!skip_wfi)
|
|
gic_cpu_if_down();
|
|
|
|
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
|
|
arch_spin_unlock(&tc2_pm_lock);
|
|
|
|
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
|
|
/*
|
|
* On the Cortex-A15 we need to disable
|
|
* L2 prefetching before flushing the cache.
|
|
*/
|
|
asm volatile(
|
|
"mcr p15, 1, %0, c15, c0, 3 \n\t"
|
|
"isb \n\t"
|
|
"dsb "
|
|
: : "r" (0x400) );
|
|
}
|
|
|
|
v7_exit_coherency_flush(all);
|
|
|
|
cci_disable_port_by_cpu(mpidr);
|
|
|
|
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
|
|
} else {
|
|
/*
|
|
* If last man then undo any setup done previously.
|
|
*/
|
|
if (last_man) {
|
|
ve_spc_powerdown(cluster, false);
|
|
ve_spc_global_wakeup_irq(false);
|
|
}
|
|
|
|
arch_spin_unlock(&tc2_pm_lock);
|
|
|
|
v7_exit_coherency_flush(louis);
|
|
}
|
|
|
|
__mcpm_cpu_down(cpu, cluster);
|
|
|
|
/* Now we are prepared for power-down, do it: */
|
|
if (!skip_wfi)
|
|
wfi();
|
|
|
|
/* Not dead at this point? Let our caller cope. */
|
|
}
|
|
|
|
static void tc2_pm_power_down(void)
|
|
{
|
|
tc2_pm_down(0);
|
|
}
|
|
|
|
static void tc2_pm_suspend(u64 residency)
|
|
{
|
|
unsigned int mpidr, cpu, cluster;
|
|
|
|
mpidr = read_cpuid_mpidr();
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
|
|
tc2_pm_down(residency);
|
|
}
|
|
|
|
static void tc2_pm_powered_up(void)
|
|
{
|
|
unsigned int mpidr, cpu, cluster;
|
|
unsigned long flags;
|
|
|
|
mpidr = read_cpuid_mpidr();
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
|
|
|
local_irq_save(flags);
|
|
arch_spin_lock(&tc2_pm_lock);
|
|
|
|
if (tc2_cluster_unused(cluster)) {
|
|
ve_spc_powerdown(cluster, false);
|
|
ve_spc_global_wakeup_irq(false);
|
|
}
|
|
|
|
if (!tc2_pm_use_count[cpu][cluster])
|
|
tc2_pm_use_count[cpu][cluster] = 1;
|
|
|
|
ve_spc_cpu_wakeup_irq(cluster, cpu, false);
|
|
ve_spc_set_resume_addr(cluster, cpu, 0);
|
|
|
|
arch_spin_unlock(&tc2_pm_lock);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static const struct mcpm_platform_ops tc2_pm_power_ops = {
|
|
.power_up = tc2_pm_power_up,
|
|
.power_down = tc2_pm_power_down,
|
|
.suspend = tc2_pm_suspend,
|
|
.powered_up = tc2_pm_powered_up,
|
|
};
|
|
|
|
static bool __init tc2_pm_usage_count_init(void)
|
|
{
|
|
unsigned int mpidr, cpu, cluster;
|
|
|
|
mpidr = read_cpuid_mpidr();
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
|
|
pr_err("%s: boot CPU is out of bound!\n", __func__);
|
|
return false;
|
|
}
|
|
tc2_pm_use_count[cpu][cluster] = 1;
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* Enable cluster-level coherency, in preparation for turning on the MMU.
|
|
*/
|
|
static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
|
|
{
|
|
asm volatile (" \n"
|
|
" cmp r0, #1 \n"
|
|
" bxne lr \n"
|
|
" b cci_enable_port_for_self ");
|
|
}
|
|
|
|
static int __init tc2_pm_init(void)
|
|
{
|
|
int ret, irq;
|
|
void __iomem *scc;
|
|
u32 a15_cluster_id, a7_cluster_id, sys_info;
|
|
struct device_node *np;
|
|
|
|
/*
|
|
* The power management-related features are hidden behind
|
|
* SCC registers. We need to extract runtime information like
|
|
* cluster ids and number of CPUs really available in clusters.
|
|
*/
|
|
np = of_find_compatible_node(NULL, NULL,
|
|
"arm,vexpress-scc,v2p-ca15_a7");
|
|
scc = of_iomap(np, 0);
|
|
if (!scc)
|
|
return -ENODEV;
|
|
|
|
a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
|
|
a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
|
|
if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
|
|
return -EINVAL;
|
|
|
|
sys_info = readl_relaxed(scc + SYS_INFO);
|
|
tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
|
|
tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
|
|
|
|
irq = irq_of_parse_and_map(np, 0);
|
|
|
|
/*
|
|
* A subset of the SCC registers is also used to communicate
|
|
* with the SPC (power controller). We need to be able to
|
|
* drive it very early in the boot process to power up
|
|
* processors, so we initialize the SPC driver here.
|
|
*/
|
|
ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (!cci_probed())
|
|
return -ENODEV;
|
|
|
|
if (!tc2_pm_usage_count_init())
|
|
return -EINVAL;
|
|
|
|
ret = mcpm_platform_register(&tc2_pm_power_ops);
|
|
if (!ret) {
|
|
mcpm_sync_init(tc2_pm_power_up_setup);
|
|
pr_info("TC2 power management initialized\n");
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
early_initcall(tc2_pm_init);
|