mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 10:45:09 +07:00
97c8580e85
Annotate cpu_wait implementations using the __cpuidle macro which places these functions in the .cpuidle.text section. This allows cpu_in_idle() to return true for PC values which fall within these functions, allowing nmi_backtrace() to produce cleaner output for CPUs running idle functions. For example: # echo l >/proc/sysrq-trigger [ 38.587170] sysrq: SysRq : Show backtrace of all active CPUs [ 38.593657] NMI backtrace for cpu 1 [ 38.597611] CPU: 1 PID: 161 Comm: sh Not tainted 4.18.0-rc1+ #27 [ 38.604306] Stack : 00000000 00000004 00000006 80486724 00000000 00000000 00000000 00000000 [ 38.613647] 80e17eda 00000034 00000000 00000000 80d20000 80b67e98 8e559c90 0ffe1e88 [ 38.622986] 00000000 00000000 80e70000 00000000 8f61db18 38312e34 722d302e 202b3163 [ 38.632324] 8e559d3c 8e559adc 00000001 6b636162 80d20000 80000000 00000000 80d1cfa4 [ 38.641664] 00000001 80d20000 80d19520 00000000 00000003 80836724 00000004 80e10004 [ 38.650993] ... [ 38.653724] Call Trace: [ 38.656499] [<8040cdd0>] show_stack+0xa0/0x144 [ 38.661475] [<80b67e98>] dump_stack+0xe8/0x120 [ 38.666455] [<80b6f6d4>] nmi_cpu_backtrace+0x1b4/0x1cc [ 38.672189] [<80b6f81c>] nmi_trigger_cpumask_backtrace+0x130/0x1e4 [ 38.679081] [<808295d8>] __handle_sysrq+0xc0/0x180 [ 38.684421] [<80829b84>] write_sysrq_trigger+0x50/0x64 [ 38.690176] [<8061c984>] proc_reg_write+0xd0/0xfc [ 38.695447] [<805aac1c>] __vfs_write+0x54/0x194 [ 38.700500] [<805aaf24>] vfs_write+0xe0/0x18c [ 38.705360] [<805ab190>] ksys_write+0x7c/0xf0 [ 38.710238] [<80416018>] syscall_common+0x34/0x58 [ 38.715558] Sending NMI from CPU 1 to CPUs 0,2-3: [ 38.720916] NMI backtrace for cpu 0 skipped: idling at r4k_wait_irqoff+0x2c/0x34 [ 38.729186] NMI backtrace for cpu 3 skipped: idling at r4k_wait_irqoff+0x2c/0x34 [ 38.737449] NMI backtrace for cpu 2 skipped: idling at r4k_wait_irqoff+0x2c/0x34 Without this we get register value & backtrace output from all CPUs, which is generally useless for those running the idle function & serves only to overwhelm & obfuscate the meaningful output from non-idle CPUs. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/19598/
275 lines
6.1 KiB
C
275 lines
6.1 KiB
C
/*
|
|
* MIPS idle loop and WAIT instruction support.
|
|
*
|
|
* Copyright (C) xxxx the Anonymous
|
|
* Copyright (C) 1994 - 2006 Ralf Baechle
|
|
* Copyright (C) 2003, 2004 Maciej W. Rozycki
|
|
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
#include <linux/cpu.h>
|
|
#include <linux/export.h>
|
|
#include <linux/init.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/sched.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/cpu-info.h>
|
|
#include <asm/cpu-type.h>
|
|
#include <asm/idle.h>
|
|
#include <asm/mipsregs.h>
|
|
|
|
/*
|
|
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
|
|
* the implementation of the "wait" feature differs between CPU families. This
|
|
* points to the function that implements CPU specific wait.
|
|
* The wait instruction stops the pipeline and reduces the power consumption of
|
|
* the CPU very much.
|
|
*/
|
|
void (*cpu_wait)(void);
|
|
EXPORT_SYMBOL(cpu_wait);
|
|
|
|
static void __cpuidle r3081_wait(void)
|
|
{
|
|
unsigned long cfg = read_c0_conf();
|
|
write_c0_conf(cfg | R30XX_CONF_HALT);
|
|
local_irq_enable();
|
|
}
|
|
|
|
static void __cpuidle r39xx_wait(void)
|
|
{
|
|
if (!need_resched())
|
|
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
|
|
local_irq_enable();
|
|
}
|
|
|
|
void __cpuidle r4k_wait(void)
|
|
{
|
|
local_irq_enable();
|
|
__r4k_wait();
|
|
}
|
|
|
|
/*
|
|
* This variant is preferable as it allows testing need_resched and going to
|
|
* sleep depending on the outcome atomically. Unfortunately the "It is
|
|
* implementation-dependent whether the pipeline restarts when a non-enabled
|
|
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
|
|
* using this version a gamble.
|
|
*/
|
|
void __cpuidle r4k_wait_irqoff(void)
|
|
{
|
|
if (!need_resched())
|
|
__asm__(
|
|
" .set push \n"
|
|
" .set arch=r4000 \n"
|
|
" wait \n"
|
|
" .set pop \n");
|
|
local_irq_enable();
|
|
}
|
|
|
|
/*
|
|
* The RM7000 variant has to handle erratum 38. The workaround is to not
|
|
* have any pending stores when the WAIT instruction is executed.
|
|
*/
|
|
static void __cpuidle rm7k_wait_irqoff(void)
|
|
{
|
|
if (!need_resched())
|
|
__asm__(
|
|
" .set push \n"
|
|
" .set arch=r4000 \n"
|
|
" .set noat \n"
|
|
" mfc0 $1, $12 \n"
|
|
" sync \n"
|
|
" mtc0 $1, $12 # stalls until W stage \n"
|
|
" wait \n"
|
|
" mtc0 $1, $12 # stalls until W stage \n"
|
|
" .set pop \n");
|
|
local_irq_enable();
|
|
}
|
|
|
|
/*
|
|
* Au1 'wait' is only useful when the 32kHz counter is used as timer,
|
|
* since coreclock (and the cp0 counter) stops upon executing it. Only an
|
|
* interrupt can wake it, so they must be enabled before entering idle modes.
|
|
*/
|
|
static void __cpuidle au1k_wait(void)
|
|
{
|
|
unsigned long c0status = read_c0_status() | 1; /* irqs on */
|
|
|
|
__asm__(
|
|
" .set arch=r4000 \n"
|
|
" cache 0x14, 0(%0) \n"
|
|
" cache 0x14, 32(%0) \n"
|
|
" sync \n"
|
|
" mtc0 %1, $12 \n" /* wr c0status */
|
|
" wait \n"
|
|
" nop \n"
|
|
" nop \n"
|
|
" nop \n"
|
|
" nop \n"
|
|
" .set mips0 \n"
|
|
: : "r" (au1k_wait), "r" (c0status));
|
|
}
|
|
|
|
static int __initdata nowait;
|
|
|
|
static int __init wait_disable(char *s)
|
|
{
|
|
nowait = 1;
|
|
|
|
return 1;
|
|
}
|
|
|
|
__setup("nowait", wait_disable);
|
|
|
|
void __init check_wait(void)
|
|
{
|
|
struct cpuinfo_mips *c = ¤t_cpu_data;
|
|
|
|
if (nowait) {
|
|
printk("Wait instruction disabled.\n");
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* MIPSr6 specifies that masked interrupts should unblock an executing
|
|
* wait instruction, and thus that it is safe for us to use
|
|
* r4k_wait_irqoff. Yippee!
|
|
*/
|
|
if (cpu_has_mips_r6) {
|
|
cpu_wait = r4k_wait_irqoff;
|
|
return;
|
|
}
|
|
|
|
switch (current_cpu_type()) {
|
|
case CPU_R3081:
|
|
case CPU_R3081E:
|
|
cpu_wait = r3081_wait;
|
|
break;
|
|
case CPU_TX3927:
|
|
cpu_wait = r39xx_wait;
|
|
break;
|
|
case CPU_R4200:
|
|
/* case CPU_R4300: */
|
|
case CPU_R4600:
|
|
case CPU_R4640:
|
|
case CPU_R4650:
|
|
case CPU_R4700:
|
|
case CPU_R5000:
|
|
case CPU_R5500:
|
|
case CPU_NEVADA:
|
|
case CPU_4KC:
|
|
case CPU_4KEC:
|
|
case CPU_4KSC:
|
|
case CPU_5KC:
|
|
case CPU_5KE:
|
|
case CPU_25KF:
|
|
case CPU_PR4450:
|
|
case CPU_BMIPS3300:
|
|
case CPU_BMIPS4350:
|
|
case CPU_BMIPS4380:
|
|
case CPU_CAVIUM_OCTEON:
|
|
case CPU_CAVIUM_OCTEON_PLUS:
|
|
case CPU_CAVIUM_OCTEON2:
|
|
case CPU_CAVIUM_OCTEON3:
|
|
case CPU_JZRISC:
|
|
case CPU_LOONGSON1:
|
|
case CPU_XLR:
|
|
case CPU_XLP:
|
|
cpu_wait = r4k_wait;
|
|
break;
|
|
case CPU_LOONGSON3:
|
|
if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
|
|
cpu_wait = r4k_wait;
|
|
break;
|
|
|
|
case CPU_BMIPS5000:
|
|
cpu_wait = r4k_wait_irqoff;
|
|
break;
|
|
case CPU_RM7000:
|
|
cpu_wait = rm7k_wait_irqoff;
|
|
break;
|
|
|
|
case CPU_PROAPTIV:
|
|
case CPU_P5600:
|
|
/*
|
|
* Incoming Fast Debug Channel (FDC) data during a wait
|
|
* instruction causes the wait never to resume, even if an
|
|
* interrupt is received. Avoid using wait at all if FDC data is
|
|
* likely to be received.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
|
|
break;
|
|
/* fall through */
|
|
case CPU_M14KC:
|
|
case CPU_M14KEC:
|
|
case CPU_24K:
|
|
case CPU_34K:
|
|
case CPU_1004K:
|
|
case CPU_1074K:
|
|
case CPU_INTERAPTIV:
|
|
case CPU_M5150:
|
|
case CPU_QEMU_GENERIC:
|
|
cpu_wait = r4k_wait;
|
|
if (read_c0_config7() & MIPS_CONF7_WII)
|
|
cpu_wait = r4k_wait_irqoff;
|
|
break;
|
|
|
|
case CPU_74K:
|
|
cpu_wait = r4k_wait;
|
|
if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
|
|
cpu_wait = r4k_wait_irqoff;
|
|
break;
|
|
|
|
case CPU_TX49XX:
|
|
cpu_wait = r4k_wait_irqoff;
|
|
break;
|
|
case CPU_ALCHEMY:
|
|
cpu_wait = au1k_wait;
|
|
break;
|
|
case CPU_20KC:
|
|
/*
|
|
* WAIT on Rev1.0 has E1, E2, E3 and E16.
|
|
* WAIT on Rev2.0 and Rev3.0 has E16.
|
|
* Rev3.1 WAIT is nop, why bother
|
|
*/
|
|
if ((c->processor_id & 0xff) <= 0x64)
|
|
break;
|
|
|
|
/*
|
|
* Another rev is incremeting c0_count at a reduced clock
|
|
* rate while in WAIT mode. So we basically have the choice
|
|
* between using the cp0 timer as clocksource or avoiding
|
|
* the WAIT instruction. Until more details are known,
|
|
* disable the use of WAIT for 20Kc entirely.
|
|
cpu_wait = r4k_wait;
|
|
*/
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
void arch_cpu_idle(void)
|
|
{
|
|
if (cpu_wait)
|
|
cpu_wait();
|
|
else
|
|
local_irq_enable();
|
|
}
|
|
|
|
#ifdef CONFIG_CPU_IDLE
|
|
|
|
int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
|
|
struct cpuidle_driver *drv, int index)
|
|
{
|
|
arch_cpu_idle();
|
|
return index;
|
|
}
|
|
|
|
#endif
|