2012-12-19 00:53:14 +07:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 ARM Limited
|
|
|
|
*
|
|
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "psci: " fmt
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/of.h>
|
2013-10-25 02:30:14 +07:00
|
|
|
#include <linux/smp.h>
|
2014-05-07 21:18:36 +07:00
|
|
|
#include <linux/delay.h>
|
2015-07-31 21:46:16 +07:00
|
|
|
#include <linux/psci.h>
|
2013-09-27 16:25:02 +07:00
|
|
|
#include <linux/slab.h>
|
2015-07-31 21:46:16 +07:00
|
|
|
|
2014-04-18 01:38:41 +07:00
|
|
|
#include <uapi/linux/psci.h>
|
2012-12-19 00:53:14 +07:00
|
|
|
|
|
|
|
#include <asm/compiler.h>
|
2013-10-25 02:30:15 +07:00
|
|
|
#include <asm/cpu_ops.h>
|
2012-12-19 00:53:14 +07:00
|
|
|
#include <asm/errno.h>
|
2013-10-25 02:30:14 +07:00
|
|
|
#include <asm/smp_plat.h>
|
2013-09-27 16:25:02 +07:00
|
|
|
#include <asm/suspend.h>
|
2013-10-25 02:30:14 +07:00
|
|
|
|
2015-04-30 23:59:03 +07:00
|
|
|
static bool psci_power_state_loses_context(u32 state)
|
|
|
|
{
|
|
|
|
return state & PSCI_0_2_POWER_STATE_TYPE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool psci_power_state_is_valid(u32 state)
|
|
|
|
{
|
|
|
|
const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK |
|
|
|
|
PSCI_0_2_POWER_STATE_TYPE_MASK |
|
|
|
|
PSCI_0_2_POWER_STATE_AFFL_MASK;
|
|
|
|
|
|
|
|
return !(state & ~valid_mask);
|
|
|
|
}
|
2013-10-25 02:30:14 +07:00
|
|
|
|
2015-04-30 23:59:03 +07:00
|
|
|
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
|
2013-09-27 16:25:02 +07:00
|
|
|
|
2015-05-13 20:12:46 +07:00
|
|
|
static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
|
2013-09-27 16:25:02 +07:00
|
|
|
{
|
|
|
|
int i, ret, count = 0;
|
2015-04-30 23:59:03 +07:00
|
|
|
u32 *psci_states;
|
2015-05-13 20:12:46 +07:00
|
|
|
struct device_node *state_node, *cpu_node;
|
|
|
|
|
|
|
|
cpu_node = of_get_cpu_node(cpu, NULL);
|
|
|
|
if (!cpu_node)
|
|
|
|
return -ENODEV;
|
2013-09-27 16:25:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PSCI cpu_suspend function hook has not been initialized
|
|
|
|
* idle states must not be enabled, so bail out
|
|
|
|
*/
|
|
|
|
if (!psci_ops.cpu_suspend)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/* Count idle states */
|
|
|
|
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
|
|
|
|
count))) {
|
|
|
|
count++;
|
|
|
|
of_node_put(state_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
|
|
|
|
if (!psci_states)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2015-04-30 23:59:03 +07:00
|
|
|
u32 state;
|
2013-09-27 16:25:02 +07:00
|
|
|
|
|
|
|
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
|
|
|
|
|
|
|
|
ret = of_property_read_u32(state_node,
|
|
|
|
"arm,psci-suspend-param",
|
2015-04-30 23:59:03 +07:00
|
|
|
&state);
|
2013-09-27 16:25:02 +07:00
|
|
|
if (ret) {
|
|
|
|
pr_warn(" * %s missing arm,psci-suspend-param property\n",
|
|
|
|
state_node->full_name);
|
|
|
|
of_node_put(state_node);
|
|
|
|
goto free_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
of_node_put(state_node);
|
2015-04-30 23:59:03 +07:00
|
|
|
pr_debug("psci-power-state %#x index %d\n", state, i);
|
|
|
|
if (!psci_power_state_is_valid(state)) {
|
|
|
|
pr_warn("Invalid PSCI power state %#x\n", state);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_mem;
|
|
|
|
}
|
|
|
|
psci_states[i] = state;
|
2013-09-27 16:25:02 +07:00
|
|
|
}
|
|
|
|
/* Idle states parsed correctly, initialize per-cpu pointer */
|
|
|
|
per_cpu(psci_power_state, cpu) = psci_states;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_mem:
|
|
|
|
kfree(psci_states);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-05-13 20:12:46 +07:00
|
|
|
static int __init cpu_psci_cpu_init(unsigned int cpu)
|
2013-10-25 02:30:14 +07:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-25 02:30:15 +07:00
|
|
|
static int __init cpu_psci_cpu_prepare(unsigned int cpu)
|
2013-10-25 02:30:14 +07:00
|
|
|
{
|
|
|
|
if (!psci_ops.cpu_on) {
|
|
|
|
pr_err("no cpu_on method, not booting CPU%d\n", cpu);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
arm64: factor out spin-table boot method
The arm64 kernel has an internal holding pen, which is necessary for
some systems where we can't bring CPUs online individually and must hold
multiple CPUs in a safe area until the kernel is able to handle them.
The current SMP infrastructure for arm64 is closely coupled to this
holding pen, and alternative boot methods must launch CPUs into the pen,
where they sit before they are launched into the kernel proper.
With PSCI (and possibly other future boot methods), we can bring CPUs
online individually, and need not perform the secondary_holding_pen
dance. Instead, this patch factors the holding pen management code out
to the spin-table boot method code, as it is the only boot method
requiring the pen.
A new entry point for secondaries, secondary_entry is added for other
boot methods to use, which bypasses the holding pen and its associated
overhead when bringing CPUs online. The smp.pen.text section is also
removed, as the pen can live in head.text without problem.
The cpu_operations structure is extended with two new functions,
cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
performing any post-boot cleanup required by a bootmethod (e.g.
resetting the secondary_holding_pen_release to INVALID_HWID).
Documentation is added for cpu_operations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2013-10-25 02:30:16 +07:00
|
|
|
static int cpu_psci_cpu_boot(unsigned int cpu)
|
|
|
|
{
|
|
|
|
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
|
|
|
|
if (err)
|
2014-02-28 16:57:47 +07:00
|
|
|
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
|
arm64: factor out spin-table boot method
The arm64 kernel has an internal holding pen, which is necessary for
some systems where we can't bring CPUs online individually and must hold
multiple CPUs in a safe area until the kernel is able to handle them.
The current SMP infrastructure for arm64 is closely coupled to this
holding pen, and alternative boot methods must launch CPUs into the pen,
where they sit before they are launched into the kernel proper.
With PSCI (and possibly other future boot methods), we can bring CPUs
online individually, and need not perform the secondary_holding_pen
dance. Instead, this patch factors the holding pen management code out
to the spin-table boot method code, as it is the only boot method
requiring the pen.
A new entry point for secondaries, secondary_entry is added for other
boot methods to use, which bypasses the holding pen and its associated
overhead when bringing CPUs online. The smp.pen.text section is also
removed, as the pen can live in head.text without problem.
The cpu_operations structure is extended with two new functions,
cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
performing any post-boot cleanup required by a bootmethod (e.g.
resetting the secondary_holding_pen_release to INVALID_HWID).
Documentation is added for cpu_operations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2013-10-25 02:30:16 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-10-25 02:30:19 +07:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
static int cpu_psci_cpu_disable(unsigned int cpu)
|
|
|
|
{
|
|
|
|
/* Fail early if we don't have CPU_OFF support */
|
|
|
|
if (!psci_ops.cpu_off)
|
|
|
|
return -EOPNOTSUPP;
|
2015-04-23 00:10:26 +07:00
|
|
|
|
|
|
|
/* Trusted OS will deny CPU_OFF */
|
|
|
|
if (psci_tos_resident_on(cpu))
|
|
|
|
return -EPERM;
|
|
|
|
|
2013-10-25 02:30:19 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_psci_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
/*
|
|
|
|
* There are no known implementations of PSCI actually using the
|
|
|
|
* power state field, pass a sensible default for now.
|
|
|
|
*/
|
2015-04-30 23:59:03 +07:00
|
|
|
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
|
|
|
|
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
|
2013-10-25 02:30:19 +07:00
|
|
|
|
|
|
|
ret = psci_ops.cpu_off(state);
|
|
|
|
|
2014-02-28 16:57:47 +07:00
|
|
|
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
|
2013-10-25 02:30:19 +07:00
|
|
|
}
|
2014-05-07 21:18:36 +07:00
|
|
|
|
|
|
|
static int cpu_psci_cpu_kill(unsigned int cpu)
|
|
|
|
{
|
|
|
|
int err, i;
|
|
|
|
|
|
|
|
if (!psci_ops.affinity_info)
|
2015-04-20 23:55:30 +07:00
|
|
|
return 0;
|
2014-05-07 21:18:36 +07:00
|
|
|
/*
|
|
|
|
* cpu_kill could race with cpu_die and we can
|
|
|
|
* potentially end up declaring this cpu undead
|
|
|
|
* while it is dying. So, try again a few times.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
|
|
|
|
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
|
|
|
|
pr_info("CPU%d killed.\n", cpu);
|
2015-04-20 23:55:30 +07:00
|
|
|
return 0;
|
2014-05-07 21:18:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
msleep(10);
|
|
|
|
pr_info("Retrying again to check for CPU kill\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
|
|
|
|
cpu, err);
|
2015-04-20 23:55:30 +07:00
|
|
|
return -ETIMEDOUT;
|
2014-05-07 21:18:36 +07:00
|
|
|
}
|
2013-10-25 02:30:19 +07:00
|
|
|
#endif
|
|
|
|
|
2013-09-27 16:25:02 +07:00
|
|
|
static int psci_suspend_finisher(unsigned long index)
|
|
|
|
{
|
2015-04-30 23:59:03 +07:00
|
|
|
u32 *state = __this_cpu_read(psci_power_state);
|
2013-09-27 16:25:02 +07:00
|
|
|
|
|
|
|
return psci_ops.cpu_suspend(state[index - 1],
|
|
|
|
virt_to_phys(cpu_resume));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
|
|
|
|
{
|
|
|
|
int ret;
|
2015-04-30 23:59:03 +07:00
|
|
|
u32 *state = __this_cpu_read(psci_power_state);
|
2013-09-27 16:25:02 +07:00
|
|
|
/*
|
|
|
|
* idle state index 0 corresponds to wfi, should never be called
|
|
|
|
* from the cpu_suspend operations
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(!index))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-04-30 23:59:03 +07:00
|
|
|
if (!psci_power_state_loses_context(state[index - 1]))
|
2013-09-27 16:25:02 +07:00
|
|
|
ret = psci_ops.cpu_suspend(state[index - 1], 0);
|
|
|
|
else
|
2015-06-18 21:41:32 +07:00
|
|
|
ret = cpu_suspend(index, psci_suspend_finisher);
|
2013-09-27 16:25:02 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-10-25 02:30:15 +07:00
|
|
|
const struct cpu_operations cpu_psci_ops = {
|
2013-10-25 02:30:14 +07:00
|
|
|
.name = "psci",
|
2013-09-27 16:25:02 +07:00
|
|
|
#ifdef CONFIG_CPU_IDLE
|
|
|
|
.cpu_init_idle = cpu_psci_cpu_init_idle,
|
|
|
|
.cpu_suspend = cpu_psci_cpu_suspend,
|
|
|
|
#endif
|
2013-10-25 02:30:15 +07:00
|
|
|
.cpu_init = cpu_psci_cpu_init,
|
|
|
|
.cpu_prepare = cpu_psci_cpu_prepare,
|
arm64: factor out spin-table boot method
The arm64 kernel has an internal holding pen, which is necessary for
some systems where we can't bring CPUs online individually and must hold
multiple CPUs in a safe area until the kernel is able to handle them.
The current SMP infrastructure for arm64 is closely coupled to this
holding pen, and alternative boot methods must launch CPUs into the pen,
where they sit before they are launched into the kernel proper.
With PSCI (and possibly other future boot methods), we can bring CPUs
online individually, and need not perform the secondary_holding_pen
dance. Instead, this patch factors the holding pen management code out
to the spin-table boot method code, as it is the only boot method
requiring the pen.
A new entry point for secondaries, secondary_entry is added for other
boot methods to use, which bypasses the holding pen and its associated
overhead when bringing CPUs online. The smp.pen.text section is also
removed, as the pen can live in head.text without problem.
The cpu_operations structure is extended with two new functions,
cpu_boot and cpu_postboot, for bringing a cpu into the kernel and
performing any post-boot cleanup required by a bootmethod (e.g.
resetting the secondary_holding_pen_release to INVALID_HWID).
Documentation is added for cpu_operations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2013-10-25 02:30:16 +07:00
|
|
|
.cpu_boot = cpu_psci_cpu_boot,
|
2013-10-25 02:30:19 +07:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
.cpu_disable = cpu_psci_cpu_disable,
|
|
|
|
.cpu_die = cpu_psci_cpu_die,
|
2014-05-07 21:18:36 +07:00
|
|
|
.cpu_kill = cpu_psci_cpu_kill,
|
2013-10-25 02:30:19 +07:00
|
|
|
#endif
|
2013-10-25 02:30:14 +07:00
|
|
|
};
|
|
|
|
|