2019-05-29 00:10:04 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-10-31 16:41:17 +07:00
|
|
|
/*
|
|
|
|
* CPU complex suspend & resume functions for Tegra SoCs
|
|
|
|
*
|
|
|
|
* Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2014-07-11 14:44:49 +07:00
|
|
|
#include <linux/clk/tegra.h>
|
2012-10-31 16:41:17 +07:00
|
|
|
#include <linux/cpumask.h>
|
2012-10-31 16:41:21 +07:00
|
|
|
#include <linux/cpu_pm.h>
|
2014-07-11 14:44:49 +07:00
|
|
|
#include <linux/delay.h>
|
2012-10-31 16:41:21 +07:00
|
|
|
#include <linux/err.h>
|
2014-07-11 14:44:49 +07:00
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/kernel.h>
|
2014-01-29 06:10:37 +07:00
|
|
|
#include <linux/slab.h>
|
2014-07-11 14:44:49 +07:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/suspend.h>
|
2012-10-31 16:41:21 +07:00
|
|
|
|
2019-04-10 15:47:28 +07:00
|
|
|
#include <linux/firmware/trusted_foundations.h>
|
|
|
|
|
2017-03-28 19:42:54 +07:00
|
|
|
#include <soc/tegra/flowctrl.h>
|
2014-07-11 14:52:41 +07:00
|
|
|
#include <soc/tegra/fuse.h>
|
2014-07-11 18:19:06 +07:00
|
|
|
#include <soc/tegra/pm.h>
|
|
|
|
#include <soc/tegra/pmc.h>
|
2014-07-11 14:52:41 +07:00
|
|
|
|
2012-10-31 16:41:21 +07:00
|
|
|
#include <asm/cacheflush.h>
|
2019-03-18 05:52:10 +07:00
|
|
|
#include <asm/firmware.h>
|
2012-10-31 16:41:21 +07:00
|
|
|
#include <asm/idmap.h>
|
|
|
|
#include <asm/proc-fns.h>
|
2014-07-11 14:44:49 +07:00
|
|
|
#include <asm/smp_plat.h>
|
|
|
|
#include <asm/suspend.h>
|
2012-10-31 16:41:21 +07:00
|
|
|
#include <asm/tlbflush.h>
|
2012-10-31 16:41:17 +07:00
|
|
|
|
2014-07-11 14:44:49 +07:00
|
|
|
#include "iomap.h"
|
|
|
|
#include "pm.h"
|
|
|
|
#include "reset.h"
|
2012-10-31 16:41:21 +07:00
|
|
|
#include "sleep.h"
|
|
|
|
|
2012-10-31 16:41:17 +07:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static DEFINE_SPINLOCK(tegra_lp2_lock);
|
2013-08-12 16:40:03 +07:00
|
|
|
static u32 iram_save_size;
|
|
|
|
static void *iram_save_addr;
|
|
|
|
struct tegra_lp1_iram tegra_lp1_iram;
|
2012-10-31 16:41:21 +07:00
|
|
|
void (*tegra_tear_down_cpu)(void);
|
2013-08-12 16:40:03 +07:00
|
|
|
void (*tegra_sleep_core_finish)(unsigned long v2p);
|
|
|
|
static int (*tegra_sleep_func)(unsigned long v2p);
|
2012-10-31 16:41:17 +07:00
|
|
|
|
2013-06-04 17:47:33 +07:00
|
|
|
static void tegra_tear_down_cpu_init(void)
|
|
|
|
{
|
2014-07-11 14:52:41 +07:00
|
|
|
switch (tegra_get_chip_id()) {
|
2013-06-04 17:47:33 +07:00
|
|
|
case TEGRA20:
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
|
|
|
|
tegra_tear_down_cpu = tegra20_tear_down_cpu;
|
|
|
|
break;
|
|
|
|
case TEGRA30:
|
2013-07-03 16:50:42 +07:00
|
|
|
case TEGRA114:
|
2013-10-11 16:58:38 +07:00
|
|
|
case TEGRA124:
|
2013-07-03 16:50:42 +07:00
|
|
|
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
|
2013-10-11 16:58:38 +07:00
|
|
|
IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
|
|
|
|
IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
|
2013-06-04 17:47:33 +07:00
|
|
|
tegra_tear_down_cpu = tegra30_tear_down_cpu;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 16:41:21 +07:00
|
|
|
/*
|
|
|
|
* restore_cpu_complex
|
|
|
|
*
|
|
|
|
* restores cpu clock setting, clears flow controller
|
|
|
|
*
|
|
|
|
* Always called on CPU 0.
|
|
|
|
*/
|
|
|
|
static void restore_cpu_complex(void)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
BUG_ON(cpu != 0);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
cpu = cpu_logical_map(cpu);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Restore the CPU clock settings */
|
|
|
|
tegra_cpu_clock_resume();
|
|
|
|
|
|
|
|
flowctrl_cpu_suspend_exit(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* suspend_cpu_complex
|
|
|
|
*
|
|
|
|
* saves pll state for use by restart_plls, prepares flow controller for
|
|
|
|
* transition to suspend state
|
|
|
|
*
|
|
|
|
* Must always be called on cpu 0.
|
|
|
|
*/
|
|
|
|
static void suspend_cpu_complex(void)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
BUG_ON(cpu != 0);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
cpu = cpu_logical_map(cpu);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Save the CPU clock settings */
|
|
|
|
tegra_cpu_clock_suspend();
|
|
|
|
|
|
|
|
flowctrl_cpu_suspend_enter(cpu);
|
|
|
|
}
|
|
|
|
|
2020-02-25 05:40:47 +07:00
|
|
|
void tegra_pm_clear_cpu_in_lp2(void)
|
2012-10-31 16:41:17 +07:00
|
|
|
{
|
2013-06-04 17:47:35 +07:00
|
|
|
int phy_cpu_id = cpu_logical_map(smp_processor_id());
|
2012-10-31 16:41:17 +07:00
|
|
|
u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
|
|
|
|
|
|
|
|
spin_lock(&tegra_lp2_lock);
|
|
|
|
|
|
|
|
BUG_ON(!(*cpu_in_lp2 & BIT(phy_cpu_id)));
|
|
|
|
*cpu_in_lp2 &= ~BIT(phy_cpu_id);
|
|
|
|
|
|
|
|
spin_unlock(&tegra_lp2_lock);
|
|
|
|
}
|
|
|
|
|
2020-02-25 05:40:47 +07:00
|
|
|
void tegra_pm_set_cpu_in_lp2(void)
|
2012-10-31 16:41:17 +07:00
|
|
|
{
|
2013-06-04 17:47:35 +07:00
|
|
|
int phy_cpu_id = cpu_logical_map(smp_processor_id());
|
2012-10-31 16:41:17 +07:00
|
|
|
u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
|
|
|
|
|
|
|
|
spin_lock(&tegra_lp2_lock);
|
|
|
|
|
|
|
|
BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id)));
|
|
|
|
*cpu_in_lp2 |= BIT(phy_cpu_id);
|
|
|
|
|
|
|
|
spin_unlock(&tegra_lp2_lock);
|
|
|
|
}
|
2012-10-31 16:41:21 +07:00
|
|
|
|
|
|
|
static int tegra_sleep_cpu(unsigned long v2p)
|
|
|
|
{
|
2020-02-25 05:40:48 +07:00
|
|
|
if (tegra_cpu_car_ops->rail_off_ready &&
|
|
|
|
WARN_ON(!tegra_cpu_rail_off_ready()))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2019-03-18 05:52:10 +07:00
|
|
|
/*
|
|
|
|
* L2 cache disabling using kernel API only allowed when all
|
|
|
|
* secondary CPU's are offline. Cache have to be disabled with
|
|
|
|
* MMU-on if cache maintenance is done via Trusted Foundations
|
|
|
|
* firmware. Note that CPUIDLE won't ever enter powergate on Tegra30
|
|
|
|
* if any of secondary CPU's is online and this is the LP2-idle
|
|
|
|
* code-path only for Tegra20/30.
|
|
|
|
*/
|
2020-02-25 05:40:48 +07:00
|
|
|
#ifdef CONFIG_OUTER_CACHE
|
|
|
|
if (trusted_foundations_registered() && outer_cache.disable)
|
|
|
|
outer_cache.disable();
|
|
|
|
#endif
|
2019-03-18 05:52:10 +07:00
|
|
|
/*
|
|
|
|
* Note that besides of setting up CPU reset vector this firmware
|
|
|
|
* call may also do the following, depending on the FW version:
|
|
|
|
* 1) Disable L2. But this doesn't matter since we already
|
|
|
|
* disabled the L2.
|
|
|
|
* 2) Disable D-cache. This need to be taken into account in
|
|
|
|
* particular by the tegra_disable_clean_inv_dcache() which
|
|
|
|
* shall avoid the re-disable.
|
|
|
|
*/
|
|
|
|
call_firmware_op(prepare_idle, TF_PM_MODE_LP2);
|
|
|
|
|
2013-03-26 01:19:11 +07:00
|
|
|
setup_mm_for_reboot();
|
2012-10-31 16:41:21 +07:00
|
|
|
tegra_sleep_cpu_finish(v2p);
|
|
|
|
|
|
|
|
/* should never here */
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-11 18:19:06 +07:00
|
|
|
static void tegra_pm_set(enum tegra_suspend_mode mode)
|
|
|
|
{
|
|
|
|
u32 value;
|
|
|
|
|
|
|
|
switch (tegra_get_chip_id()) {
|
|
|
|
case TEGRA20:
|
|
|
|
case TEGRA30:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Turn off CRAIL */
|
|
|
|
value = flowctrl_read_cpu_csr(0);
|
|
|
|
value &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
|
|
|
|
value |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
|
|
|
|
flowctrl_write_cpu_csr(0, value);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tegra_pmc_enter_suspend_mode(mode);
|
|
|
|
}
|
|
|
|
|
2020-02-25 05:40:47 +07:00
|
|
|
int tegra_pm_enter_lp2(void)
|
2012-10-31 16:41:21 +07:00
|
|
|
{
|
2020-02-25 05:40:45 +07:00
|
|
|
int err;
|
|
|
|
|
2014-07-11 18:19:06 +07:00
|
|
|
tegra_pm_set(TEGRA_SUSPEND_LP2);
|
2012-10-31 16:41:21 +07:00
|
|
|
|
|
|
|
cpu_cluster_pm_enter();
|
|
|
|
suspend_cpu_complex();
|
|
|
|
|
2020-02-25 05:40:45 +07:00
|
|
|
err = cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
|
2012-10-31 16:41:21 +07:00
|
|
|
|
2019-03-18 05:52:10 +07:00
|
|
|
/*
|
|
|
|
* Resume L2 cache if it wasn't re-enabled early during resume,
|
|
|
|
* which is the case for Tegra30 that has to re-enable the cache
|
|
|
|
* via firmware call. In other cases cache is already enabled and
|
|
|
|
* hence re-enabling is a no-op. This is always a no-op on Tegra114+.
|
|
|
|
*/
|
|
|
|
outer_resume();
|
|
|
|
|
2012-10-31 16:41:21 +07:00
|
|
|
restore_cpu_complex();
|
|
|
|
cpu_cluster_pm_exit();
|
2020-02-25 05:40:45 +07:00
|
|
|
|
|
|
|
return err;
|
2012-10-31 16:41:21 +07:00
|
|
|
}
|
2013-04-03 18:31:47 +07:00
|
|
|
|
|
|
|
enum tegra_suspend_mode tegra_pm_validate_suspend_mode(
|
|
|
|
enum tegra_suspend_mode mode)
|
|
|
|
{
|
|
|
|
/*
|
2013-08-12 16:40:03 +07:00
|
|
|
* The Tegra devices support suspending to LP1 or lower currently.
|
2013-04-03 18:31:47 +07:00
|
|
|
*/
|
2013-08-12 16:40:03 +07:00
|
|
|
if (mode > TEGRA_SUSPEND_LP1)
|
|
|
|
return TEGRA_SUSPEND_LP1;
|
2013-04-03 18:31:47 +07:00
|
|
|
|
|
|
|
return mode;
|
|
|
|
}
|
|
|
|
|
2013-08-12 16:40:03 +07:00
|
|
|
static int tegra_sleep_core(unsigned long v2p)
|
|
|
|
{
|
2019-03-18 05:52:10 +07:00
|
|
|
/*
|
|
|
|
* Cache have to be disabled with MMU-on if cache maintenance is done
|
|
|
|
* via Trusted Foundations firmware. This is a no-op on Tegra114+.
|
|
|
|
*/
|
|
|
|
if (trusted_foundations_registered())
|
|
|
|
outer_disable();
|
|
|
|
|
|
|
|
call_firmware_op(prepare_idle, TF_PM_MODE_LP1);
|
|
|
|
|
2013-08-12 16:40:03 +07:00
|
|
|
setup_mm_for_reboot();
|
|
|
|
tegra_sleep_core_finish(v2p);
|
|
|
|
|
|
|
|
/* should never here */
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tegra_lp1_iram_hook
|
|
|
|
*
|
|
|
|
* Hooking the address of LP1 reset vector and SDRAM self-refresh code in
|
|
|
|
* SDRAM. These codes not be copied to IRAM in this fuction. We need to
|
|
|
|
* copy these code to IRAM before LP0/LP1 suspend and restore the content
|
|
|
|
* of IRAM after resume.
|
|
|
|
*/
|
|
|
|
static bool tegra_lp1_iram_hook(void)
|
|
|
|
{
|
2014-07-11 14:52:41 +07:00
|
|
|
switch (tegra_get_chip_id()) {
|
2013-08-12 16:40:05 +07:00
|
|
|
case TEGRA20:
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
|
|
|
|
tegra20_lp1_iram_hook();
|
|
|
|
break;
|
ARM: tegra: add LP1 suspend support for Tegra30
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored, CCLK burst policy be set in PLLX. Then jumping to
"tegra_resume" that was expected to be stored in PMC_SCRATCH41 to restore
CPU context and back to kernel.
Based on the work by: Scott Williams <scwilliams@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:04 +07:00
|
|
|
case TEGRA30:
|
ARM: tegra: add LP1 suspend support for Tegra114
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored. Then jumping to "tegra_resume" that was expected to be stored
in PMC_SCRATCH41 to restore CPU context and back to kernel.
Based on the work by: Bo Yan <byan@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:06 +07:00
|
|
|
case TEGRA114:
|
2013-10-11 16:58:38 +07:00
|
|
|
case TEGRA124:
|
ARM: tegra: add LP1 suspend support for Tegra114
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored. Then jumping to "tegra_resume" that was expected to be stored
in PMC_SCRATCH41 to restore CPU context and back to kernel.
Based on the work by: Bo Yan <byan@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:06 +07:00
|
|
|
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
|
2013-10-11 16:58:38 +07:00
|
|
|
IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
|
|
|
|
IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
|
ARM: tegra: add LP1 suspend support for Tegra30
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored, CCLK burst policy be set in PLLX. Then jumping to
"tegra_resume" that was expected to be stored in PMC_SCRATCH41 to restore
CPU context and back to kernel.
Based on the work by: Scott Williams <scwilliams@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:04 +07:00
|
|
|
tegra30_lp1_iram_hook();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-12 16:40:03 +07:00
|
|
|
if (!tegra_lp1_iram.start_addr || !tegra_lp1_iram.end_addr)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
iram_save_size = tegra_lp1_iram.end_addr - tegra_lp1_iram.start_addr;
|
|
|
|
iram_save_addr = kmalloc(iram_save_size, GFP_KERNEL);
|
|
|
|
if (!iram_save_addr)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tegra_sleep_core_init(void)
|
|
|
|
{
|
2014-07-11 14:52:41 +07:00
|
|
|
switch (tegra_get_chip_id()) {
|
2013-08-12 16:40:05 +07:00
|
|
|
case TEGRA20:
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
|
|
|
|
tegra20_sleep_core_init();
|
|
|
|
break;
|
ARM: tegra: add LP1 suspend support for Tegra30
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored, CCLK burst policy be set in PLLX. Then jumping to
"tegra_resume" that was expected to be stored in PMC_SCRATCH41 to restore
CPU context and back to kernel.
Based on the work by: Scott Williams <scwilliams@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:04 +07:00
|
|
|
case TEGRA30:
|
ARM: tegra: add LP1 suspend support for Tegra114
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored. Then jumping to "tegra_resume" that was expected to be stored
in PMC_SCRATCH41 to restore CPU context and back to kernel.
Based on the work by: Bo Yan <byan@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:06 +07:00
|
|
|
case TEGRA114:
|
2013-10-11 16:58:38 +07:00
|
|
|
case TEGRA124:
|
ARM: tegra: add LP1 suspend support for Tegra114
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored. Then jumping to "tegra_resume" that was expected to be stored
in PMC_SCRATCH41 to restore CPU context and back to kernel.
Based on the work by: Bo Yan <byan@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:06 +07:00
|
|
|
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
|
2013-10-11 16:58:38 +07:00
|
|
|
IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
|
|
|
|
IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
|
ARM: tegra: add LP1 suspend support for Tegra30
The LP1 suspend mode will power off the CPU, clock gated the PLLs and put
SDRAM to self-refresh mode. Any interrupt can wake up device from LP1. The
sequence when LP1 suspending:
* tunning off L1 data cache and the MMU
* storing some EMC registers, DPD (deep power down) status, clk source of
mselect and SCLK burst policy
* putting SDRAM into self-refresh
* switching CPU to CLK_M (12MHz OSC)
* tunning off PLLM, PLLP, PLLA, PLLC and PLLX
* switching SCLK to CLK_S (32KHz OSC)
* shutting off the CPU rail
The sequence of LP1 resuming:
* re-enabling PLLM, PLLP, PLLA, PLLC and PLLX
* restoring the clk source of mselect and SCLK burst policy
* setting up CCLK burst policy to PLLX
* restoring DPD status and some EMC registers
* resuming SDRAM to normal mode
* jumping to the "tegra_resume" from PMC_SCRATCH41
Due to the SDRAM will be put into self-refresh mode, the low level
procedures of LP1 suspending and resuming should be copied to
TEGRA_IRAM_CODE_AREA (TEGRA_IRAM_BASE + SZ_4K) when suspending. Before
restoring the CPU context when resuming, the SDRAM needs to be switched
back to normal mode. And the PLLs need to be re-enabled, SCLK burst policy
be restored, CCLK burst policy be set in PLLX. Then jumping to
"tegra_resume" that was expected to be stored in PMC_SCRATCH41 to restore
CPU context and back to kernel.
Based on the work by: Scott Williams <scwilliams@nvidia.com>
Signed-off-by: Joseph Lo <josephl@nvidia.com>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-08-12 16:40:04 +07:00
|
|
|
tegra30_sleep_core_init();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-12 16:40:03 +07:00
|
|
|
if (!tegra_sleep_core_finish)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_suspend_enter_lp1(void)
|
|
|
|
{
|
|
|
|
/* copy the reset vector & SDRAM shutdown code into IRAM */
|
2013-08-21 05:19:15 +07:00
|
|
|
memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
|
2013-08-12 16:40:03 +07:00
|
|
|
iram_save_size);
|
2013-08-21 05:19:15 +07:00
|
|
|
memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
|
|
|
|
tegra_lp1_iram.start_addr, iram_save_size);
|
2013-08-12 16:40:03 +07:00
|
|
|
|
|
|
|
*((u32 *)tegra_cpu_lp1_mask) = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_suspend_exit_lp1(void)
|
|
|
|
{
|
|
|
|
/* restore IRAM */
|
2013-08-21 05:19:15 +07:00
|
|
|
memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr,
|
2013-08-12 16:40:03 +07:00
|
|
|
iram_save_size);
|
|
|
|
|
|
|
|
*(u32 *)tegra_cpu_lp1_mask = 0;
|
|
|
|
}
|
|
|
|
|
2013-04-03 18:31:47 +07:00
|
|
|
static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
|
|
|
|
[TEGRA_SUSPEND_NONE] = "none",
|
|
|
|
[TEGRA_SUSPEND_LP2] = "LP2",
|
|
|
|
[TEGRA_SUSPEND_LP1] = "LP1",
|
|
|
|
[TEGRA_SUSPEND_LP0] = "LP0",
|
|
|
|
};
|
|
|
|
|
2013-06-18 02:43:14 +07:00
|
|
|
static int tegra_suspend_enter(suspend_state_t state)
|
2013-04-03 18:31:47 +07:00
|
|
|
{
|
|
|
|
enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
|
|
|
|
|
|
|
|
if (WARN_ON(mode < TEGRA_SUSPEND_NONE ||
|
|
|
|
mode >= TEGRA_MAX_SUSPEND_MODE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pr_info("Entering suspend state %s\n", lp_state[mode]);
|
|
|
|
|
2014-07-11 18:19:06 +07:00
|
|
|
tegra_pm_set(mode);
|
2013-04-03 18:31:47 +07:00
|
|
|
|
|
|
|
local_fiq_disable();
|
|
|
|
|
|
|
|
suspend_cpu_complex();
|
|
|
|
switch (mode) {
|
2013-08-12 16:40:03 +07:00
|
|
|
case TEGRA_SUSPEND_LP1:
|
|
|
|
tegra_suspend_enter_lp1();
|
|
|
|
break;
|
2013-04-03 18:31:47 +07:00
|
|
|
case TEGRA_SUSPEND_LP2:
|
2020-02-25 05:40:47 +07:00
|
|
|
tegra_pm_set_cpu_in_lp2();
|
2013-04-03 18:31:47 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-12 16:40:03 +07:00
|
|
|
cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func);
|
2013-04-03 18:31:47 +07:00
|
|
|
|
2019-03-18 05:52:10 +07:00
|
|
|
/*
|
|
|
|
* Resume L2 cache if it wasn't re-enabled early during resume,
|
|
|
|
* which is the case for Tegra30 that has to re-enable the cache
|
|
|
|
* via firmware call. In other cases cache is already enabled and
|
|
|
|
* hence re-enabling is a no-op.
|
|
|
|
*/
|
|
|
|
outer_resume();
|
|
|
|
|
2013-04-03 18:31:47 +07:00
|
|
|
switch (mode) {
|
2013-08-12 16:40:03 +07:00
|
|
|
case TEGRA_SUSPEND_LP1:
|
|
|
|
tegra_suspend_exit_lp1();
|
|
|
|
break;
|
2013-04-03 18:31:47 +07:00
|
|
|
case TEGRA_SUSPEND_LP2:
|
2020-02-25 05:40:47 +07:00
|
|
|
tegra_pm_clear_cpu_in_lp2();
|
2013-04-03 18:31:47 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
restore_cpu_complex();
|
|
|
|
|
|
|
|
local_fiq_enable();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct platform_suspend_ops tegra_suspend_ops = {
|
|
|
|
.valid = suspend_valid_only_mem,
|
|
|
|
.enter = tegra_suspend_enter,
|
|
|
|
};
|
|
|
|
|
|
|
|
void __init tegra_init_suspend(void)
|
|
|
|
{
|
2013-08-12 16:40:03 +07:00
|
|
|
enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
|
|
|
|
|
|
|
|
if (mode == TEGRA_SUSPEND_NONE)
|
2013-04-03 18:31:47 +07:00
|
|
|
return;
|
|
|
|
|
2013-06-04 17:47:33 +07:00
|
|
|
tegra_tear_down_cpu_init();
|
2013-04-03 18:31:47 +07:00
|
|
|
|
2013-08-12 16:40:03 +07:00
|
|
|
if (mode >= TEGRA_SUSPEND_LP1) {
|
|
|
|
if (!tegra_lp1_iram_hook() || !tegra_sleep_core_init()) {
|
|
|
|
pr_err("%s: unable to allocate memory for SDRAM"
|
|
|
|
"self-refresh -- LP0/LP1 unavailable\n",
|
|
|
|
__func__);
|
|
|
|
tegra_pmc_set_suspend_mode(TEGRA_SUSPEND_LP2);
|
|
|
|
mode = TEGRA_SUSPEND_LP2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set up sleep function for cpu_suspend */
|
|
|
|
switch (mode) {
|
|
|
|
case TEGRA_SUSPEND_LP1:
|
|
|
|
tegra_sleep_func = tegra_sleep_core;
|
|
|
|
break;
|
|
|
|
case TEGRA_SUSPEND_LP2:
|
|
|
|
tegra_sleep_func = tegra_sleep_cpu;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-04-03 18:31:47 +07:00
|
|
|
suspend_set_ops(&tegra_suspend_ops);
|
|
|
|
}
|
2020-02-25 05:40:42 +07:00
|
|
|
|
|
|
|
int tegra_pm_park_secondary_cpu(unsigned long cpu)
|
|
|
|
{
|
|
|
|
if (cpu > 0) {
|
|
|
|
tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS);
|
|
|
|
|
|
|
|
if (tegra_get_chip_id() == TEGRA20)
|
|
|
|
tegra20_hotplug_shutdown();
|
|
|
|
else
|
|
|
|
tegra30_hotplug_shutdown();
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-10-31 16:41:17 +07:00
|
|
|
#endif
|