linux_dsm_epyc7002/arch/x86/kernel/reboot.c

836 lines
21 KiB
C
Raw Normal View History

#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/efi.h>
#include <linux/dmi.h>
#include <linux/sched.h>
#include <linux/tboot.h>
#include <linux/delay.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/reboot_fixups.h>
#include <asm/reboot.h>
#include <asm/pci_x86.h>
#include <asm/virtext.h>
#include <asm/cpu.h>
#include <asm/nmi.h>
#ifdef CONFIG_X86_32
# include <linux/ctype.h>
# include <linux/mc146818rtc.h>
# include <asm/realmode.h>
#else
# include <asm/x86_init.h>
#endif
/*
* Power off function, if any
*/
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static const struct desc_ptr no_idt = {};
static int reboot_mode;
x86: Reorder reboot method preferences We have a never ending stream of 'reboot quirks' for new boxes that will not reboot properly under Linux (they will hang on reboot). The reason is widespread 'Windows compatible' assumption of modern x86 hardware, which expects the following reboot sequence: - hitting the ACPI reboot vector (if available) - trying the keyboard controller - hitting the ACPI reboot vector again - then giving the keyboard controller one last go This sequence expectation gets more and more embedded in modern hardware, which often lacks a keyboard controller and may even lock up if the legacy io ports are hit - and which hardware is often not tested with Linux during development. The end result is that reboot works under Windows-alike OSs but not under Linux. Rework our reboot process to meet this hardware externality a little better and match this assumption of newer x86 hardware. In addition to the ACPI,kbd,ACPI,kbd sequence we'll still fall through to attempting a legacy triple fault if nothing else works - and keep trying that and the kbd reset. Signed-off-by: Matthew Garrett <mjg@redhat.com> [ this commit will also save special casing Oaktrail boards ] Acked-by: Alan Cox <alan@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Leann Ogasawara <leann.ogasawara@canonical.com> Cc: Dave Jones <davej@redhat.com> Cc: Len Brown <len.brown@intel.com> LKML-Reference: <1301939705-2404-1-git-send-email-mjg@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-05 00:55:05 +07:00
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
/*
* This variable is used privately to keep track of whether or not
* reboot_type is still set to its default value (i.e., reboot= hasn't
* been set on the command line). This is needed so that we can
* suppress DMI scanning for reboot quirks. Without it, it's
* impossible to override a faulty reboot quirk without recompiling.
*/
static int reboot_default = 1;
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
static int reboot_cpu = -1;
#endif
/*
* This is set if we need to go through the 'emergency' path.
* When machine_emergency_restart() is called, we may be on
* an inconsistent state and won't be able to do a clean cleanup
*/
static int reboot_emergency;
/* This is set by the PCI code if either type 1 or type 2 PCI is detected */
bool port_cf9_safe = false;
/*
* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
* warm Don't set the cold reboot flag
* cold Set the cold reboot flag
* bios Reboot by jumping through the BIOS (only for X86_32)
* smp Reboot by executing reset on BSP or other CPU (only for X86_32)
* triple Force a triple fault (init)
* kbd Use the keyboard controller. cold reset (default)
* acpi Use the RESET_REG in the FADT
* efi Use efi reset_system runtime service
* pci Use the so-called "PCI reset register", CF9
* force Avoid anything that could hang.
*/
static int __init reboot_setup(char *str)
{
for (;;) {
/*
* Having anything passed on the command line via
* reboot= will cause us to disable DMI checking
* below.
*/
reboot_default = 0;
switch (*str) {
case 'w':
reboot_mode = 0x1234;
break;
case 'c':
reboot_mode = 0;
break;
#ifdef CONFIG_X86_32
#ifdef CONFIG_SMP
case 's':
if (isdigit(*(str+1))) {
reboot_cpu = (int) (*(str+1) - '0');
if (isdigit(*(str+2)))
reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
}
/*
* We will leave sorting out the final value
* when we are ready to reboot, since we might not
* have detected BSP APIC ID or smp_num_cpu
*/
break;
#endif /* CONFIG_SMP */
case 'b':
#endif
case 'a':
case 'k':
case 't':
case 'e':
case 'p':
reboot_type = *str;
break;
case 'f':
reboot_force = 1;
break;
}
str = strchr(str, ',');
if (str)
str++;
else
break;
}
return 1;
}
__setup("reboot=", reboot_setup);
#ifdef CONFIG_X86_32
/*
* Reboot options and system auto-detection code provided by
* Dell Inc. so their systems "just work". :-)
*/
/*
* Some machines require the "reboot=b" or "reboot=k" commandline options,
* this quirk makes that automatic.
*/
static int __init set_bios_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_BIOS) {
reboot_type = BOOT_BIOS;
printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
}
return 0;
}
void machine_real_restart(unsigned int type)
{
Merge branch 'x86-trampoline-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 trampoline rework from H. Peter Anvin: "This code reworks all the "trampoline"/"realmode" code (various bits that need to live in the first megabyte of memory, most but not all of which runs in real mode at some point) in the kernel into a single object. The main reason for doing this is that it eliminates the last place in the kernel where we needed pages to be mapped RWX. This code separates all that code into proper R/RW/RX pages." Fix up conflicts in arch/x86/kernel/Makefile (mca removed next to reboot code), and arch/x86/kernel/reboot.c (reboot code moved around in one branch, modified in this one), and arch/x86/tools/relocs.c (mostly same code came in earlier due to working around the ld bugs just before the 3.4 release). Also remove stale x86-relocs entry from scripts/.gitignore as per Peter Anvin. * commit '61f5446169046c217a5479517edac3a890c3bee7': (36 commits) x86, realmode: Move end signature into header.S x86, relocs: When printing an error, say relative or absolute x86, relocs: More relocations which may end up as absolute x86, relocs: Workaround for binutils 2.22.52.0.1 section bug xen-acpi-processor: Add missing #include <xen/xen.h> acpi, bgrd: Add missing <linux/io.h> to drivers/acpi/bgrt.c x86, realmode: Change EFER to a single u64 field x86, realmode: Move kernel/realmode.c to realmode/init.c x86, realmode: Move not-common bits out of trampoline_common.S x86, realmode: Mask out EFER.LMA when saving trampoline EFER x86, realmode: Fix no cache bits test in reboot_32.S x86, realmode: Make sure all generated files are listed in targets x86, realmode: build fix: remove duplicate build x86, realmode: read cr4 and EFER from kernel for 64-bit trampoline x86, realmode: fixes compilation issue in tboot.c x86, realmode: move relocs from scripts/ to arch/x86/tools x86, realmode: header for trampoline code x86, realmode: flattened rm hierachy x86, realmode: don't copy real_mode_header x86, realmode: fix 64-bit wakeup sequence ...
2012-05-30 10:14:53 +07:00
void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
real_mode_header->machine_real_restart_asm;
local_irq_disable();
/*
* Write zero to CMOS register number 0x0f, which the BIOS POST
* routine will recognize as telling it to do a proper reboot. (Well
* that's what this book in front of me says -- it may only apply to
* the Phoenix BIOS though, it's not clear). At the same time,
* disable NMIs by setting the top bit in the CMOS address register,
* as we're about to do peculiar things to the CPU. I'm not sure if
* `outb_p' is needed instead of just `outb'. Use it to be on the
* safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.)
*/
spin_lock(&rtc_lock);
CMOS_WRITE(0x00, 0x8f);
spin_unlock(&rtc_lock);
/*
* Switch back to the initial page table.
*/
load_cr3(initial_page_table);
/*
* Write 0x1234 to absolute memory location 0x472. The BIOS reads
* this on booting to tell it to "Bypass memory test (also warm
* boot)". This seems like a fairly standard thing that gets set by
* REBOOT.COM programs, and the previous reset routine did this
* too. */
*((unsigned short *)0x472) = reboot_mode;
/* Jump to the identity-mapped low memory code */
restart_lowmem(type);
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
#endif
#endif /* CONFIG_X86_32 */
/*
* Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
*/
static int __init set_pci_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_CF9) {
reboot_type = BOOT_CF9;
printk(KERN_INFO "%s series board detected. "
"Selecting PCI-method for reboots.\n", d->ident);
}
return 0;
}
static int __init set_kbd_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_KBD) {
reboot_type = BOOT_KBD;
printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
}
return 0;
}
/*
* This is a single dmi_table handling all reboot quirks. Note that
* REBOOT_BIOS is only available for 32bit
*/
static struct dmi_system_id __initdata reboot_dmi_table[] = {
#ifdef CONFIG_X86_32
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
.ident = "Dell E520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
},
},
{ /* Handle problems with rebooting on Dell 1300's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 1300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
},
},
{ /* Handle problems with rebooting on Dell 300's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 745's SFF */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 745's DFF */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 330",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
},
},
{ /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 760",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
},
},
{ /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
},
},
{ /* Handle problems with rebooting on Dell T5400's */
.callback = set_bios_reboot,
.ident = "Dell Precision T5400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
},
},
{ /* Handle problems with rebooting on Dell T7400's */
.callback = set_bios_reboot,
.ident = "Dell Precision T7400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
},
},
{ /* Handle problems with rebooting on HP laptops */
.callback = set_bios_reboot,
.ident = "HP Compaq Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
},
},
{ /* Handle problems with rebooting on Dell XPS710 */
.callback = set_bios_reboot,
.ident = "Dell XPS710",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
},
},
{ /* Handle problems with rebooting on Dell DXP061 */
.callback = set_bios_reboot,
.ident = "Dell DXP061",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
},
},
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
.ident = "Sony VGN-Z540N",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
{ /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
.callback = set_bios_reboot,
.ident = "CompuLab SBC-FITPC2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
},
},
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,
.ident = "ASUS P4S800",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
#endif /* CONFIG_X86_32 */
{ /* Handle reboot issue on Acer Aspire one */
.callback = set_kbd_reboot,
.ident = "Acer Aspire One A110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
},
},
{ /* Handle problems with rebooting on Apple MacBook5 */
.callback = set_pci_reboot,
.ident = "Apple MacBook5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
},
},
{ /* Handle problems with rebooting on Apple Macmini3,1 */
.callback = set_pci_reboot,
.ident = "Apple Macmini3,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
{ /* Handle problems with rebooting on the iMac9,1. */
.callback = set_pci_reboot,
.ident = "Apple iMac9,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
{ /* Handle problems with rebooting on the Latitude E6320. */
.callback = set_pci_reboot,
.ident = "Dell Latitude E6320",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
},
},
{ /* Handle problems with rebooting on the Latitude E5420. */
.callback = set_pci_reboot,
.ident = "Dell Latitude E5420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
},
},
{ /* Handle problems with rebooting on the Latitude E6420. */
.callback = set_pci_reboot,
.ident = "Dell Latitude E6420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
},
},
{ /* Handle problems with rebooting on the OptiPlex 990. */
.callback = set_pci_reboot,
.ident = "Dell OptiPlex 990",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
},
},
{ /* Handle problems with rebooting on the Precision M6600. */
.callback = set_pci_reboot,
.ident = "Dell OptiPlex 990",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
},
},
{ }
};
static int __init reboot_init(void)
{
/*
* Only do the DMI check if reboot_type hasn't been overridden
* on the command line
*/
if (reboot_default)
dmi_check_system(reboot_dmi_table);
return 0;
}
core_initcall(reboot_init);
static inline void kb_wait(void)
{
int i;
for (i = 0; i < 0x10000; i++) {
if ((inb(0x64) & 0x02) == 0)
break;
udelay(2);
}
}
static void vmxoff_nmi(int cpu, struct pt_regs *regs)
{
cpu_emergency_vmxoff();
}
/* Use NMIs as IPIs to tell all CPUs to disable virtualization */
static void emergency_vmx_disable_all(void)
{
/* Just make sure we won't change CPUs while doing this */
local_irq_disable();
/*
* We need to disable VMX on all CPUs before rebooting, otherwise
* we risk hanging up the machine, because the CPU ignore INIT
* signals when VMX is enabled.
*
* We can't take any locks and we may be on an inconsistent
* state, so we use NMIs as IPIs to tell the other CPUs to disable
* VMX and halt.
*
* For safety, we will avoid running the nmi_shootdown_cpus()
* stuff unnecessarily, but we don't have a way to check
* if other CPUs have VMX enabled. So we will call it only if the
* CPU we are running on has VMX enabled.
*
* We will miss cases where VMX is not enabled on all CPUs. This
* shouldn't do much harm because KVM always enable VMX on all
* CPUs anyway. But we can miss it on the small window where KVM
* is still enabling VMX.
*/
if (cpu_has_vmx() && cpu_vmx_enabled()) {
/* Disable VMX on this CPU. */
cpu_vmxoff();
/* Halt and disable VMX on the other CPUs */
nmi_shootdown_cpus(vmxoff_nmi);
}
}
void __attribute__((weak)) mach_reboot_fixups(void)
{
}
x86: Reorder reboot method preferences We have a never ending stream of 'reboot quirks' for new boxes that will not reboot properly under Linux (they will hang on reboot). The reason is widespread 'Windows compatible' assumption of modern x86 hardware, which expects the following reboot sequence: - hitting the ACPI reboot vector (if available) - trying the keyboard controller - hitting the ACPI reboot vector again - then giving the keyboard controller one last go This sequence expectation gets more and more embedded in modern hardware, which often lacks a keyboard controller and may even lock up if the legacy io ports are hit - and which hardware is often not tested with Linux during development. The end result is that reboot works under Windows-alike OSs but not under Linux. Rework our reboot process to meet this hardware externality a little better and match this assumption of newer x86 hardware. In addition to the ACPI,kbd,ACPI,kbd sequence we'll still fall through to attempting a legacy triple fault if nothing else works - and keep trying that and the kbd reset. Signed-off-by: Matthew Garrett <mjg@redhat.com> [ this commit will also save special casing Oaktrail boards ] Acked-by: Alan Cox <alan@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Leann Ogasawara <leann.ogasawara@canonical.com> Cc: Dave Jones <davej@redhat.com> Cc: Len Brown <len.brown@intel.com> LKML-Reference: <1301939705-2404-1-git-send-email-mjg@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-05 00:55:05 +07:00
/*
* Windows compatible x86 hardware expects the following on reboot:
*
* 1) If the FADT has the ACPI reboot register flag set, try it
* 2) If still alive, write to the keyboard controller
* 3) If still alive, write to the ACPI reboot register again
* 4) If still alive, write to the keyboard controller again
*
* If the machine is still alive at this stage, it gives up. We default to
* following the same pattern, except that if we're still alive after (4) we'll
* try to force a triple fault and then cycle between hitting the keyboard
* controller and doing that
*/
static void native_machine_emergency_restart(void)
{
int i;
x86: Reorder reboot method preferences We have a never ending stream of 'reboot quirks' for new boxes that will not reboot properly under Linux (they will hang on reboot). The reason is widespread 'Windows compatible' assumption of modern x86 hardware, which expects the following reboot sequence: - hitting the ACPI reboot vector (if available) - trying the keyboard controller - hitting the ACPI reboot vector again - then giving the keyboard controller one last go This sequence expectation gets more and more embedded in modern hardware, which often lacks a keyboard controller and may even lock up if the legacy io ports are hit - and which hardware is often not tested with Linux during development. The end result is that reboot works under Windows-alike OSs but not under Linux. Rework our reboot process to meet this hardware externality a little better and match this assumption of newer x86 hardware. In addition to the ACPI,kbd,ACPI,kbd sequence we'll still fall through to attempting a legacy triple fault if nothing else works - and keep trying that and the kbd reset. Signed-off-by: Matthew Garrett <mjg@redhat.com> [ this commit will also save special casing Oaktrail boards ] Acked-by: Alan Cox <alan@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Leann Ogasawara <leann.ogasawara@canonical.com> Cc: Dave Jones <davej@redhat.com> Cc: Len Brown <len.brown@intel.com> LKML-Reference: <1301939705-2404-1-git-send-email-mjg@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-05 00:55:05 +07:00
int attempt = 0;
int orig_reboot_type = reboot_type;
if (reboot_emergency)
emergency_vmx_disable_all();
tboot_shutdown(TB_SHUTDOWN_REBOOT);
/* Tell the BIOS if we want cold or warm reboot */
*((unsigned short *)__va(0x472)) = reboot_mode;
for (;;) {
/* Could also try the reset bit in the Hammer NB */
switch (reboot_type) {
case BOOT_KBD:
mach_reboot_fixups(); /* For board specific fixups */
for (i = 0; i < 10; i++) {
kb_wait();
udelay(50);
outb(0xfe, 0x64); /* Pulse reset low */
udelay(50);
}
x86: Reorder reboot method preferences We have a never ending stream of 'reboot quirks' for new boxes that will not reboot properly under Linux (they will hang on reboot). The reason is widespread 'Windows compatible' assumption of modern x86 hardware, which expects the following reboot sequence: - hitting the ACPI reboot vector (if available) - trying the keyboard controller - hitting the ACPI reboot vector again - then giving the keyboard controller one last go This sequence expectation gets more and more embedded in modern hardware, which often lacks a keyboard controller and may even lock up if the legacy io ports are hit - and which hardware is often not tested with Linux during development. The end result is that reboot works under Windows-alike OSs but not under Linux. Rework our reboot process to meet this hardware externality a little better and match this assumption of newer x86 hardware. In addition to the ACPI,kbd,ACPI,kbd sequence we'll still fall through to attempting a legacy triple fault if nothing else works - and keep trying that and the kbd reset. Signed-off-by: Matthew Garrett <mjg@redhat.com> [ this commit will also save special casing Oaktrail boards ] Acked-by: Alan Cox <alan@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Leann Ogasawara <leann.ogasawara@canonical.com> Cc: Dave Jones <davej@redhat.com> Cc: Len Brown <len.brown@intel.com> LKML-Reference: <1301939705-2404-1-git-send-email-mjg@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-05 00:55:05 +07:00
if (attempt == 0 && orig_reboot_type == BOOT_ACPI) {
attempt = 1;
reboot_type = BOOT_ACPI;
} else {
reboot_type = BOOT_TRIPLE;
}
break;
case BOOT_TRIPLE:
load_idt(&no_idt);
__asm__ __volatile__("int3");
reboot_type = BOOT_KBD;
break;
#ifdef CONFIG_X86_32
case BOOT_BIOS:
machine_real_restart(MRR_BIOS);
reboot_type = BOOT_KBD;
break;
#endif
case BOOT_ACPI:
acpi_reboot();
reboot_type = BOOT_KBD;
break;
case BOOT_EFI:
if (efi_enabled)
efi.reset_system(reboot_mode ?
EFI_RESET_WARM :
EFI_RESET_COLD,
EFI_SUCCESS, 0, NULL);
reboot_type = BOOT_KBD;
break;
case BOOT_CF9:
port_cf9_safe = true;
/* Fall through */
case BOOT_CF9_COND:
if (port_cf9_safe) {
u8 cf9 = inb(0xcf9) & ~6;
outb(cf9|2, 0xcf9); /* Request hard reset */
udelay(50);
outb(cf9|6, 0xcf9); /* Actually do the reset */
udelay(50);
}
reboot_type = BOOT_KBD;
break;
}
}
}
void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
/* The boot cpu is always logical cpu 0 */
cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr * This patch replaces the dangerous lvalue version of cpumask_of_cpu with new cpumask_of_cpu_ptr macros. These are patterned after the node_to_cpumask_ptr macros. In general terms, if there is a cpumask_of_cpu_map[] then a pointer to the cpumask_of_cpu_map[cpu] entry is used. The cpumask_of_cpu_map is provided when there is a large NR_CPUS count, reducing greatly the amount of code generated and stack space used for cpumask_of_cpu(). The pointer to the cpumask_t value is needed for calling set_cpus_allowed_ptr() to reduce the amount of stack space needed to pass the cpumask_t value. If there isn't a cpumask_of_cpu_map[], then a temporary variable is declared and filled in with value from cpumask_of_cpu(cpu) as well as a pointer variable pointing to this temporary variable. Afterwards, the pointer is used to reference the cpumask value. The compiler will optimize out the extra dereference through the pointer as well as the stack space used for the pointer, resulting in identical code. A good example of the orthogonal usages is in net/sunrpc/svc.c: case SVC_POOL_PERCPU: { unsigned int cpu = m->pool_to[pidx]; cpumask_of_cpu_ptr(cpumask, cpu); *oldmask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask); return 1; } case SVC_POOL_PERNODE: { unsigned int node = m->pool_to[pidx]; node_to_cpumask_ptr(nodecpumask, node); *oldmask = current->cpus_allowed; set_cpus_allowed_ptr(current, nodecpumask); return 1; } Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-16 04:14:30 +07:00
int reboot_cpu_id = 0;
#ifdef CONFIG_X86_32
/* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
cpu_online(reboot_cpu))
reboot_cpu_id = reboot_cpu;
#endif
/* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(reboot_cpu_id))
reboot_cpu_id = smp_processor_id();
/* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
/*
x86/reboot: Fix a warning message triggered by stop_other_cpus() When rebooting our 24 CPU Westmere servers with 3.4-rc6, we always see this warning msg: Restarting system. machine restart ------------[ cut here ]------------ WARNING: at arch/x86/kernel/smp.c:125 native_smp_send_reschedule+0x74/0xa7() Hardware name: X8DTN Modules linked in: igb [last unloaded: scsi_wait_scan] Pid: 1, comm: systemd-shutdow Not tainted 3.4.0-rc6+ #22 Call Trace: <IRQ> [<ffffffff8102a41f>] warn_slowpath_common+0x7e/0x96 [<ffffffff8102a44c>] warn_slowpath_null+0x15/0x17 [<ffffffff81018cf7>] native_smp_send_reschedule+0x74/0xa7 [<ffffffff810561c1>] trigger_load_balance+0x279/0x2a6 [<ffffffff81050112>] scheduler_tick+0xe0/0xe9 [<ffffffff81036768>] update_process_times+0x60/0x70 [<ffffffff81062f2f>] tick_sched_timer+0x68/0x92 [<ffffffff81046e33>] __run_hrtimer+0xb3/0x13c [<ffffffff81062ec7>] ? tick_nohz_handler+0xd0/0xd0 [<ffffffff810474f2>] hrtimer_interrupt+0xdb/0x198 [<ffffffff81019a35>] smp_apic_timer_interrupt+0x81/0x94 [<ffffffff81655187>] apic_timer_interrupt+0x67/0x70 <EOI> [<ffffffff8101a3c4>] ? default_send_IPI_mask_allbutself_phys+0xb4/0xc4 [<ffffffff8101c680>] physflat_send_IPI_allbutself+0x12/0x14 [<ffffffff81018db4>] native_nmi_stop_other_cpus+0x8a/0xd6 [<ffffffff810188ba>] native_machine_shutdown+0x50/0x67 [<ffffffff81018926>] machine_shutdown+0xa/0xc [<ffffffff8101897e>] native_machine_restart+0x20/0x32 [<ffffffff810189b0>] machine_restart+0xa/0xc [<ffffffff8103b196>] kernel_restart+0x47/0x4c [<ffffffff8103b2e6>] sys_reboot+0x13e/0x17c [<ffffffff8164e436>] ? _raw_spin_unlock_bh+0x10/0x12 [<ffffffff810fcac9>] ? bdi_queue_work+0xcf/0xd8 [<ffffffff810fe82f>] ? __bdi_start_writeback+0xae/0xb7 [<ffffffff810e0d64>] ? iterate_supers+0xa3/0xb7 [<ffffffff816547a2>] system_call_fastpath+0x16/0x1b ---[ end trace 320af5cb1cb60c5b ]--- The root cause seems to be the default_send_IPI_mask_allbutself_phys() takes quite some time (I measured it could be several ms) to complete sending NMIs to all the other 23 CPUs, and for HZ=250/1000 system, the time is long enough for a timer interrupt to happen, which will in turn trigger to kick load balance to a stopped CPU and cause this warning in native_smp_send_reschedule(). So disabling the local irq before stop_other_cpu() can fix this problem (tested 25 times reboot ok), and it is fine as there should be nobody caring the timer interrupt in such reboot stage. The latest 3.4 kernel slightly changes this behavior by sending REBOOT_VECTOR first and only send NMI_VECTOR if the REBOOT_VCTOR fails, and this patch is still needed to prevent the problem. Signed-off-by: Feng Tang <feng.tang@intel.com> Acked-by: Don Zickus <dzickus@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20120530231541.4c13433a@feng-i7 Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-05-30 22:15:41 +07:00
* O.K Now that I'm on the appropriate processor, stop all of the
* others. Also disable the local irq to not receive the per-cpu
* timer interrupt which may trigger scheduler's load balance.
*/
x86/reboot: Fix a warning message triggered by stop_other_cpus() When rebooting our 24 CPU Westmere servers with 3.4-rc6, we always see this warning msg: Restarting system. machine restart ------------[ cut here ]------------ WARNING: at arch/x86/kernel/smp.c:125 native_smp_send_reschedule+0x74/0xa7() Hardware name: X8DTN Modules linked in: igb [last unloaded: scsi_wait_scan] Pid: 1, comm: systemd-shutdow Not tainted 3.4.0-rc6+ #22 Call Trace: <IRQ> [<ffffffff8102a41f>] warn_slowpath_common+0x7e/0x96 [<ffffffff8102a44c>] warn_slowpath_null+0x15/0x17 [<ffffffff81018cf7>] native_smp_send_reschedule+0x74/0xa7 [<ffffffff810561c1>] trigger_load_balance+0x279/0x2a6 [<ffffffff81050112>] scheduler_tick+0xe0/0xe9 [<ffffffff81036768>] update_process_times+0x60/0x70 [<ffffffff81062f2f>] tick_sched_timer+0x68/0x92 [<ffffffff81046e33>] __run_hrtimer+0xb3/0x13c [<ffffffff81062ec7>] ? tick_nohz_handler+0xd0/0xd0 [<ffffffff810474f2>] hrtimer_interrupt+0xdb/0x198 [<ffffffff81019a35>] smp_apic_timer_interrupt+0x81/0x94 [<ffffffff81655187>] apic_timer_interrupt+0x67/0x70 <EOI> [<ffffffff8101a3c4>] ? default_send_IPI_mask_allbutself_phys+0xb4/0xc4 [<ffffffff8101c680>] physflat_send_IPI_allbutself+0x12/0x14 [<ffffffff81018db4>] native_nmi_stop_other_cpus+0x8a/0xd6 [<ffffffff810188ba>] native_machine_shutdown+0x50/0x67 [<ffffffff81018926>] machine_shutdown+0xa/0xc [<ffffffff8101897e>] native_machine_restart+0x20/0x32 [<ffffffff810189b0>] machine_restart+0xa/0xc [<ffffffff8103b196>] kernel_restart+0x47/0x4c [<ffffffff8103b2e6>] sys_reboot+0x13e/0x17c [<ffffffff8164e436>] ? _raw_spin_unlock_bh+0x10/0x12 [<ffffffff810fcac9>] ? bdi_queue_work+0xcf/0xd8 [<ffffffff810fe82f>] ? __bdi_start_writeback+0xae/0xb7 [<ffffffff810e0d64>] ? iterate_supers+0xa3/0xb7 [<ffffffff816547a2>] system_call_fastpath+0x16/0x1b ---[ end trace 320af5cb1cb60c5b ]--- The root cause seems to be the default_send_IPI_mask_allbutself_phys() takes quite some time (I measured it could be several ms) to complete sending NMIs to all the other 23 CPUs, and for HZ=250/1000 system, the time is long enough for a timer interrupt to happen, which will in turn trigger to kick load balance to a stopped CPU and cause this warning in native_smp_send_reschedule(). So disabling the local irq before stop_other_cpu() can fix this problem (tested 25 times reboot ok), and it is fine as there should be nobody caring the timer interrupt in such reboot stage. The latest 3.4 kernel slightly changes this behavior by sending REBOOT_VECTOR first and only send NMI_VECTOR if the REBOOT_VCTOR fails, and this patch is still needed to prevent the problem. Signed-off-by: Feng Tang <feng.tang@intel.com> Acked-by: Don Zickus <dzickus@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20120530231541.4c13433a@feng-i7 Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-05-30 22:15:41 +07:00
local_irq_disable();
stop_other_cpus();
#endif
lapic_shutdown();
#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
#endif
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
#ifdef CONFIG_X86_64
x86_platform.iommu_shutdown();
#endif
}
static void __machine_emergency_restart(int emergency)
{
reboot_emergency = emergency;
machine_ops.emergency_restart();
}
static void native_machine_restart(char *__unused)
{
printk("machine restart\n");
if (!reboot_force)
machine_shutdown();
__machine_emergency_restart(0);
}
static void native_machine_halt(void)
{
/* Stop other cpus and apics */
machine_shutdown();
tboot_shutdown(TB_SHUTDOWN_HALT);
stop_this_cpu(NULL);
}
static void native_machine_power_off(void)
{
[PATCH] i386/x86-64: Don't IPI to offline cpus on shutdown So why are we calling smp_send_stop from machine_halt? We don't. Looking more closely at the bug report the problem here is that halt -p is called which triggers not a halt but an attempt to power off. machine_power_off calls machine_shutdown which calls smp_send_stop. If pm_power_off is set we should never make it out machine_power_off to the call of do_exit. So pm_power_off must not be set in this case. When pm_power_off is not set we expect machine_power_off to devolve into machine_halt. So how do we fix this? Playing too much with smp_send_stop is dangerous because it must also be safe to be called from panic. It looks like the obviously correct fix is to only call machine_shutdown when pm_power_off is defined. Doing that will make Andi's assumption about not scheduling true and generally simplify what must be supported. This turns machine_power_off into a noop like machine_halt when pm_power_off is not defined. If the expected behavior is that sys_reboot(LINUX_REBOOT_CMD_POWER_OFF) becomes sys_reboot(LINUX_REBOOT_CMD_HALT) if pm_power_off is NULL this is not quite a comprehensive fix as we pass a different parameter to the reboot notifier and we set system_state to a different value before calling device_shutdown(). Unfortunately any fix more comprehensive I can think of is not obviously correct. The core problem is that there is no architecture independent way to detect if machine_power will become a noop, without calling it. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-12 04:43:12 +07:00
if (pm_power_off) {
if (!reboot_force)
machine_shutdown();
pm_power_off();
[PATCH] i386/x86-64: Don't IPI to offline cpus on shutdown So why are we calling smp_send_stop from machine_halt? We don't. Looking more closely at the bug report the problem here is that halt -p is called which triggers not a halt but an attempt to power off. machine_power_off calls machine_shutdown which calls smp_send_stop. If pm_power_off is set we should never make it out machine_power_off to the call of do_exit. So pm_power_off must not be set in this case. When pm_power_off is not set we expect machine_power_off to devolve into machine_halt. So how do we fix this? Playing too much with smp_send_stop is dangerous because it must also be safe to be called from panic. It looks like the obviously correct fix is to only call machine_shutdown when pm_power_off is defined. Doing that will make Andi's assumption about not scheduling true and generally simplify what must be supported. This turns machine_power_off into a noop like machine_halt when pm_power_off is not defined. If the expected behavior is that sys_reboot(LINUX_REBOOT_CMD_POWER_OFF) becomes sys_reboot(LINUX_REBOOT_CMD_HALT) if pm_power_off is NULL this is not quite a comprehensive fix as we pass a different parameter to the reboot notifier and we set system_state to a different value before calling device_shutdown(). Unfortunately any fix more comprehensive I can think of is not obviously correct. The core problem is that there is no architecture independent way to detect if machine_power will become a noop, without calling it. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-12 04:43:12 +07:00
}
/* A fallback in case there is no PM info available */
tboot_shutdown(TB_SHUTDOWN_HALT);
}
struct machine_ops machine_ops = {
.power_off = native_machine_power_off,
.shutdown = native_machine_shutdown,
.emergency_restart = native_machine_emergency_restart,
.restart = native_machine_restart,
.halt = native_machine_halt,
#ifdef CONFIG_KEXEC
.crash_shutdown = native_machine_crash_shutdown,
#endif
};
void machine_power_off(void)
{
machine_ops.power_off();
}
void machine_shutdown(void)
{
machine_ops.shutdown();
}
void machine_emergency_restart(void)
{
__machine_emergency_restart(1);
}
void machine_restart(char *cmd)
{
machine_ops.restart(cmd);
}
void machine_halt(void)
{
machine_ops.halt();
}
#ifdef CONFIG_KEXEC
void machine_crash_shutdown(struct pt_regs *regs)
{
machine_ops.crash_shutdown(regs);
}
#endif
#if defined(CONFIG_SMP)
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
static nmi_shootdown_cb shootdown_callback;
static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
{
int cpu;
cpu = raw_smp_processor_id();
/*
* Don't do anything if this handler is invoked on crashing cpu.
* Otherwise, system will completely hang. Crashing cpu can get
* an NMI if system was initially booted with nmi_watchdog parameter.
*/
if (cpu == crashing_cpu)
return NMI_HANDLED;
local_irq_disable();
shootdown_callback(cpu, regs);
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
halt();
for (;;)
cpu_relax();
return NMI_HANDLED;
}
static void smp_send_nmi_allbutself(void)
{
apic->send_IPI_allbutself(NMI_VECTOR);
}
/*
* Halt all other CPUs, calling the specified function on each of them
*
* This function can be used to halt all other CPUs on crash
* or emergency reboot time. The function passed as parameter
* will be called inside a NMI handler on all CPUs.
*/
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
unsigned long msecs;
local_irq_disable();
/* Make a note of crashing cpu. Will be used in NMI callback. */
crashing_cpu = safe_smp_processor_id();
shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* Return what? */
/*
* Ensure the new callback function is set before sending
* out the NMI
*/
wmb();
smp_send_nmi_allbutself();
msecs = 1000; /* Wait at most a second for the other cpus to stop */
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
mdelay(1);
msecs--;
}
/* Leave the nmi callback set */
}
#else /* !CONFIG_SMP */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
/* No other CPUs to shoot down */
}
#endif