mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 10:17:47 +07:00
d55c5f28af
The trusted OS may reject CPU_OFF calls to its resident CPU, so we must avoid issuing those. We never migrate a Trusted OS and we already take care to prevent CPU_OFF PSCI call. However, this is not reflected explicitly to the userspace. Any user can attempt to hotplug trusted OS resident CPU. The entire motion of going through the various state transitions in the CPU hotplug state machine gets executed and the PSCI layer finally refuses to make CPU_OFF call. This results is unnecessary unwinding of CPU hotplug state machine in the kernel. Instead we can mark the trusted OS resident CPU as not available for hotplug, so that the user attempt or request to do the same will get immediately rejected. Cc: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
124 lines
2.5 KiB
C
124 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
*
|
|
* Copyright (C) 2013 ARM Limited
|
|
*
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "psci: " fmt
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/of.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/psci.h>
|
|
#include <linux/mm.h>
|
|
|
|
#include <uapi/linux/psci.h>
|
|
|
|
#include <asm/cpu_ops.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/smp_plat.h>
|
|
|
|
static int __init cpu_psci_cpu_init(unsigned int cpu)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int __init cpu_psci_cpu_prepare(unsigned int cpu)
|
|
{
|
|
if (!psci_ops.cpu_on) {
|
|
pr_err("no cpu_on method, not booting CPU%d\n", cpu);
|
|
return -ENODEV;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cpu_psci_cpu_boot(unsigned int cpu)
|
|
{
|
|
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
|
|
if (err)
|
|
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
|
|
|
|
return err;
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static bool cpu_psci_cpu_can_disable(unsigned int cpu)
|
|
{
|
|
return !psci_tos_resident_on(cpu);
|
|
}
|
|
|
|
static int cpu_psci_cpu_disable(unsigned int cpu)
|
|
{
|
|
/* Fail early if we don't have CPU_OFF support */
|
|
if (!psci_ops.cpu_off)
|
|
return -EOPNOTSUPP;
|
|
|
|
/* Trusted OS will deny CPU_OFF */
|
|
if (psci_tos_resident_on(cpu))
|
|
return -EPERM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cpu_psci_cpu_die(unsigned int cpu)
|
|
{
|
|
int ret;
|
|
/*
|
|
* There are no known implementations of PSCI actually using the
|
|
* power state field, pass a sensible default for now.
|
|
*/
|
|
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
|
|
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
|
|
|
|
ret = psci_ops.cpu_off(state);
|
|
|
|
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
|
|
}
|
|
|
|
static int cpu_psci_cpu_kill(unsigned int cpu)
|
|
{
|
|
int err, i;
|
|
|
|
if (!psci_ops.affinity_info)
|
|
return 0;
|
|
/*
|
|
* cpu_kill could race with cpu_die and we can
|
|
* potentially end up declaring this cpu undead
|
|
* while it is dying. So, try again a few times.
|
|
*/
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
|
|
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
|
|
pr_info("CPU%d killed.\n", cpu);
|
|
return 0;
|
|
}
|
|
|
|
msleep(10);
|
|
pr_info("Retrying again to check for CPU kill\n");
|
|
}
|
|
|
|
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
|
|
cpu, err);
|
|
return -ETIMEDOUT;
|
|
}
|
|
#endif
|
|
|
|
const struct cpu_operations cpu_psci_ops = {
|
|
.name = "psci",
|
|
.cpu_init = cpu_psci_cpu_init,
|
|
.cpu_prepare = cpu_psci_cpu_prepare,
|
|
.cpu_boot = cpu_psci_cpu_boot,
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
.cpu_can_disable = cpu_psci_cpu_can_disable,
|
|
.cpu_disable = cpu_psci_cpu_disable,
|
|
.cpu_die = cpu_psci_cpu_die,
|
|
.cpu_kill = cpu_psci_cpu_kill,
|
|
#endif
|
|
};
|
|
|