2008-10-23 12:26:29 +07:00
|
|
|
#ifndef _ASM_X86_MCE_H
|
|
|
|
#define _ASM_X86_MCE_H
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2012-12-15 05:37:13 +07:00
|
|
|
#include <uapi/asm/mce.h>
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2012-12-21 23:03:58 +07:00
|
|
|
/*
|
|
|
|
* Machine Check support for x86
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* MCG_CAP register defines */
|
|
|
|
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
|
|
|
|
#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
|
|
|
|
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
|
|
|
|
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
|
|
|
|
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
|
|
|
|
#define MCG_EXT_CNT_SHIFT 16
|
|
|
|
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
|
|
|
|
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
|
|
|
|
|
|
|
|
/* MCG_STATUS register defines */
|
|
|
|
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
|
|
|
|
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
|
|
|
|
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
|
|
|
|
|
|
|
|
/* MCi_STATUS register defines */
|
|
|
|
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
|
|
|
|
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
|
|
|
|
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
|
|
|
|
#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
|
|
|
|
#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
|
|
|
|
#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
|
|
|
|
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
|
|
|
|
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
|
|
|
|
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
|
|
|
|
#define MCACOD 0xffff /* MCA Error Code */
|
|
|
|
|
|
|
|
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
|
|
|
|
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
|
|
|
|
#define MCACOD_SCRUBMSK 0xfff0
|
|
|
|
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
|
|
|
|
#define MCACOD_DATA 0x0134 /* Data Load */
|
|
|
|
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
|
|
|
|
|
|
|
|
/* MCi_MISC register defines */
|
|
|
|
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
|
|
|
|
#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
|
|
|
|
#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
|
|
|
|
#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
|
|
|
|
#define MCI_MISC_ADDR_PHYS 2 /* physical address */
|
|
|
|
#define MCI_MISC_ADDR_MEM 3 /* memory address */
|
|
|
|
#define MCI_MISC_ADDR_GENERIC 7 /* generic */
|
|
|
|
|
|
|
|
/* CTL2 register defines */
|
|
|
|
#define MCI_CTL2_CMCI_EN (1ULL << 30)
|
|
|
|
#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
|
|
|
|
|
|
|
|
#define MCJ_CTX_MASK 3
|
|
|
|
#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
|
|
|
|
#define MCJ_CTX_RANDOM 0 /* inject context: random */
|
|
|
|
#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
|
|
|
|
#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
|
|
|
|
#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
|
|
|
|
#define MCJ_EXCEPTION 0x8 /* raise as exception */
|
2013-06-05 01:54:14 +07:00
|
|
|
#define MCJ_IRQ_BROADCAST 0x10 /* do IRQ broadcasting */
|
2012-12-21 23:03:58 +07:00
|
|
|
|
|
|
|
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
|
|
|
|
|
|
|
|
/* Software defined banks */
|
|
|
|
#define MCE_EXTENDED_BANK 128
|
|
|
|
#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
|
|
|
|
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
|
|
|
|
|
|
|
|
#define MCE_LOG_LEN 32
|
|
|
|
#define MCE_LOG_SIGNATURE "MACHINECHECK"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This structure contains all data related to the MCE log. Also
|
|
|
|
* carries a signature to make it easier to find from external
|
|
|
|
* debugging tools. Each entry is only valid when its finished flag
|
|
|
|
* is set.
|
|
|
|
*/
|
|
|
|
struct mce_log {
|
|
|
|
char signature[12]; /* "MACHINECHECK" */
|
|
|
|
unsigned len; /* = MCE_LOG_LEN */
|
|
|
|
unsigned next;
|
|
|
|
unsigned flags;
|
|
|
|
unsigned recordlen; /* length of struct mce */
|
|
|
|
struct mce entry[MCE_LOG_LEN];
|
|
|
|
};
|
2012-10-15 23:03:57 +07:00
|
|
|
|
|
|
|
struct mca_config {
|
|
|
|
bool dont_log_ce;
|
2012-10-16 01:25:17 +07:00
|
|
|
bool cmci_disabled;
|
|
|
|
bool ignore_ce;
|
2012-10-17 17:05:33 +07:00
|
|
|
bool disabled;
|
|
|
|
bool ser;
|
|
|
|
bool bios_cmci_threshold;
|
2012-10-15 23:03:57 +07:00
|
|
|
u8 banks;
|
2012-10-16 00:59:18 +07:00
|
|
|
s8 bootlog;
|
2012-10-15 23:03:57 +07:00
|
|
|
int tolerant;
|
2012-10-16 00:59:18 +07:00
|
|
|
int monarch_timeout;
|
2012-10-16 01:25:17 +07:00
|
|
|
int panic_timeout;
|
2012-10-16 00:59:18 +07:00
|
|
|
u32 rip_msr;
|
2012-10-15 23:03:57 +07:00
|
|
|
};
|
|
|
|
|
2012-10-16 01:25:17 +07:00
|
|
|
extern struct mca_config mca_cfg;
|
2011-12-04 21:12:09 +07:00
|
|
|
extern void mce_register_decode_chain(struct notifier_block *nb);
|
|
|
|
extern void mce_unregister_decode_chain(struct notifier_block *nb);
|
2010-01-04 23:17:21 +07:00
|
|
|
|
2009-06-15 15:22:15 +07:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/init.h>
|
2011-07-27 06:09:06 +07:00
|
|
|
#include <linux/atomic.h>
|
2009-06-15 15:22:15 +07:00
|
|
|
|
2009-06-15 15:22:49 +07:00
|
|
|
extern int mce_p5_enabled;
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2009-06-15 15:27:47 +07:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-11-10 08:38:24 +07:00
|
|
|
int mcheck_init(void);
|
2009-10-16 17:31:32 +07:00
|
|
|
void mcheck_cpu_init(struct cpuinfo_x86 *c);
|
2009-06-15 15:27:47 +07:00
|
|
|
#else
|
2009-11-10 08:38:24 +07:00
|
|
|
static inline int mcheck_init(void) { return 0; }
|
2009-10-16 17:31:32 +07:00
|
|
|
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
|
2009-06-15 15:27:47 +07:00
|
|
|
#endif
|
|
|
|
|
2009-06-15 15:22:15 +07:00
|
|
|
#ifdef CONFIG_X86_ANCIENT_MCE
|
|
|
|
void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
|
|
|
|
void winchip_mcheck_init(struct cpuinfo_x86 *c);
|
2009-06-15 15:22:49 +07:00
|
|
|
static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
|
2009-06-15 15:22:15 +07:00
|
|
|
#else
|
|
|
|
static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
|
|
|
|
static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
|
2009-06-15 15:22:49 +07:00
|
|
|
static inline void enable_p5_mce(void) {}
|
2009-06-15 15:22:15 +07:00
|
|
|
#endif
|
|
|
|
|
2009-02-12 19:43:22 +07:00
|
|
|
void mce_setup(struct mce *m);
|
2007-10-17 23:04:40 +07:00
|
|
|
void mce_log(struct mce *m);
|
2012-01-27 06:49:14 +07:00
|
|
|
DECLARE_PER_CPU(struct device *, mce_device);
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2009-02-12 19:49:30 +07:00
|
|
|
/*
|
2009-07-09 05:31:45 +07:00
|
|
|
* Maximum banks number.
|
|
|
|
* This is the limit of the current register layout on
|
|
|
|
* Intel CPUs.
|
2009-02-12 19:49:30 +07:00
|
|
|
*/
|
2009-07-09 05:31:45 +07:00
|
|
|
#define MAX_NR_BANKS 32
|
2009-02-12 19:49:30 +07:00
|
|
|
|
2007-10-17 23:04:40 +07:00
|
|
|
#ifdef CONFIG_X86_MCE_INTEL
|
|
|
|
void mce_intel_feature_init(struct cpuinfo_x86 *c);
|
2009-02-12 19:49:36 +07:00
|
|
|
void cmci_clear(void);
|
|
|
|
void cmci_reenable(void);
|
2013-03-20 17:01:29 +07:00
|
|
|
void cmci_rediscover(void);
|
2009-02-12 19:49:36 +07:00
|
|
|
void cmci_recheck(void);
|
2007-10-17 23:04:40 +07:00
|
|
|
#else
|
|
|
|
static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
|
2009-02-12 19:49:36 +07:00
|
|
|
static inline void cmci_clear(void) {}
|
|
|
|
static inline void cmci_reenable(void) {}
|
2013-03-20 17:01:29 +07:00
|
|
|
static inline void cmci_rediscover(void) {}
|
2009-02-12 19:49:36 +07:00
|
|
|
static inline void cmci_recheck(void) {}
|
2007-10-17 23:04:40 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_MCE_AMD
|
|
|
|
void mce_amd_feature_init(struct cpuinfo_x86 *c);
|
|
|
|
#else
|
|
|
|
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
|
|
|
|
#endif
|
|
|
|
|
2009-05-29 00:05:33 +07:00
|
|
|
int mce_available(struct cpuinfo_x86 *c);
|
2009-02-12 19:49:36 +07:00
|
|
|
|
2009-05-28 02:56:52 +07:00
|
|
|
DECLARE_PER_CPU(unsigned, mce_exception_count);
|
2009-05-28 02:56:57 +07:00
|
|
|
DECLARE_PER_CPU(unsigned, mce_poll_count);
|
2009-05-28 02:56:52 +07:00
|
|
|
|
2007-10-17 23:04:40 +07:00
|
|
|
extern atomic_t mce_entry;
|
|
|
|
|
2009-02-12 19:49:34 +07:00
|
|
|
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
|
|
|
|
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
|
|
|
|
|
2009-02-12 19:43:23 +07:00
|
|
|
enum mcp_flags {
|
|
|
|
MCP_TIMESTAMP = (1 << 0), /* log time stamp */
|
|
|
|
MCP_UC = (1 << 1), /* log uncorrected errors */
|
2009-04-07 22:06:55 +07:00
|
|
|
MCP_DONTLOG = (1 << 2), /* only clear, don't log */
|
2009-02-12 19:43:23 +07:00
|
|
|
};
|
2009-05-29 00:05:33 +07:00
|
|
|
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
|
2009-02-12 19:43:23 +07:00
|
|
|
|
2009-05-28 02:56:58 +07:00
|
|
|
int mce_notify_irq(void);
|
x86, mce: support action-optional machine checks
Newer Intel CPUs support a new class of machine checks called recoverable
action optional.
Action Optional means that the CPU detected some form of corruption in
the background and tells the OS about using a machine check
exception. The OS can then take appropiate action, like killing the
process with the corrupted data or logging the event properly to disk.
This is done by the new generic high level memory failure handler added
in a earlier patch. The high level handler takes the address with the
failed memory and does the appropiate action, like killing the process.
In this version of the patch the high level handler is stubbed out
with a weak function to not create a direct dependency on the hwpoison
branch.
The high level handler cannot be directly called from the machine check
exception though, because it has to run in a defined process context to
be able to sleep when taking VM locks (it is not expected to sleep for a
long time, just do so in some exceptional cases like lock contention)
Thus the MCE handler has to queue a work item for process context,
trigger process context and then call the high level handler from there.
This patch adds two path to process context: through a per thread kernel
exit notify_user() callback or through a high priority work item.
The first runs when the process exits back to user space, the other when
it goes to sleep and there is no higher priority process.
The machine check handler will schedule both, and whoever runs first
will grab the event. This is done because quick reaction to this
event is critical to avoid a potential more fatal machine check
when the corruption is consumed.
There is a simple lock less ring buffer to queue the corrupted
addresses between the exception handler and the process context handler.
Then in process context it just calls the high level VM code with
the corrupted PFNs.
The code adds the required code to extract the failed address from
the CPU's machine check registers. It doesn't try to handle all
possible cases -- the specification has 6 different ways to specify
memory address -- but only the linear address.
Most of the required checking has been already done earlier in the
mce_severity rule checking engine. Following the Intel
recommendations Action Optional errors are only enabled for known
situations (encoded in MCACODs). The errors are ignored otherwise,
because they are action optional.
v2: Improve comment, disable preemption while processing ring buffer
(reported by Ying Huang)
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-05-28 02:56:59 +07:00
|
|
|
void mce_notify_process(void);
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2009-04-30 00:31:00 +07:00
|
|
|
DECLARE_PER_CPU(struct mce, injectm);
|
2011-11-04 01:46:47 +07:00
|
|
|
|
|
|
|
extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
|
|
|
|
const char __user *ubuf,
|
|
|
|
size_t usize, loff_t *off));
|
2009-04-30 00:31:00 +07:00
|
|
|
|
2009-06-15 15:27:47 +07:00
|
|
|
/*
|
|
|
|
* Exception handler
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Call the installed machine check handler for this CPU setup. */
|
|
|
|
extern void (*machine_check_vector)(struct pt_regs *, long error_code);
|
|
|
|
void do_machine_check(struct pt_regs *, long);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Threshold handler
|
|
|
|
*/
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2009-02-12 19:49:31 +07:00
|
|
|
extern void (*mce_threshold_vector)(void);
|
2009-06-15 15:27:47 +07:00
|
|
|
extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
|
2009-02-12 19:49:31 +07:00
|
|
|
|
2009-06-15 15:24:40 +07:00
|
|
|
/*
|
|
|
|
* Thermal handler
|
|
|
|
*/
|
|
|
|
|
|
|
|
void intel_init_thermal(struct cpuinfo_x86 *c);
|
|
|
|
|
|
|
|
void mce_log_therm_throt_event(__u64 status);
|
2009-11-10 08:38:24 +07:00
|
|
|
|
2011-01-03 18:52:04 +07:00
|
|
|
/* Interrupt Handler for core thermal thresholds */
|
|
|
|
extern int (*platform_thermal_notify)(__u64 msr_val);
|
|
|
|
|
2013-05-18 06:42:01 +07:00
|
|
|
/* Interrupt Handler for package thermal thresholds */
|
|
|
|
extern int (*platform_thermal_package_notify)(__u64 msr_val);
|
|
|
|
|
|
|
|
/* Callback support of rate control, return true, if
|
|
|
|
* callback has rate control */
|
|
|
|
extern bool (*platform_thermal_package_rate_control)(void);
|
|
|
|
|
2009-11-10 08:38:24 +07:00
|
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
|
|
|
extern void mcheck_intel_therm_init(void);
|
|
|
|
#else
|
|
|
|
static inline void mcheck_intel_therm_init(void) { }
|
|
|
|
#endif
|
|
|
|
|
ACPI, APEI, Generic Hardware Error Source memory error support
Generic Hardware Error Source provides a way to report platform
hardware errors (such as that from chipset). It works in so called
"Firmware First" mode, that is, hardware errors are reported to
firmware firstly, then reported to Linux by firmware. This way, some
non-standard hardware error registers or non-standard hardware link
can be checked by firmware to produce more valuable hardware error
information for Linux.
Now, only SCI notification type and memory errors are supported. More
notification type and hardware error type will be added later. These
memory errors are reported to user space through /dev/mcelog via
faking a corrected Machine Check, so that the error memory page can be
offlined by /sbin/mcelog if the error count for one page is beyond the
threshold.
On some machines, Machine Check can not report physical address for
some corrected memory errors, but GHES can do that. So this simplified
GHES is implemented firstly.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2010-05-18 13:35:20 +07:00
|
|
|
/*
|
|
|
|
* Used by APEI to report memory error via /dev/mcelog
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct cper_sec_mem_err;
|
|
|
|
extern void apei_mce_report_mem_error(int corrected,
|
|
|
|
struct cper_sec_mem_err *mem_err);
|
|
|
|
|
2008-10-23 12:26:29 +07:00
|
|
|
#endif /* _ASM_X86_MCE_H */
|