2011-12-22 05:29:42 +07:00
|
|
|
#include <linux/device.h>
|
2009-05-28 02:56:54 +07:00
|
|
|
#include <asm/mce.h>
|
|
|
|
|
|
|
|
enum severity_level {
|
|
|
|
MCE_NO_SEVERITY,
|
2014-11-18 09:09:19 +07:00
|
|
|
MCE_DEFERRED_SEVERITY,
|
|
|
|
MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
|
2009-05-28 02:56:57 +07:00
|
|
|
MCE_KEEP_SEVERITY,
|
2009-05-28 02:56:54 +07:00
|
|
|
MCE_SOME_SEVERITY,
|
2009-05-28 02:56:57 +07:00
|
|
|
MCE_AO_SEVERITY,
|
2009-05-28 02:56:54 +07:00
|
|
|
MCE_UC_SEVERITY,
|
2009-05-28 02:56:57 +07:00
|
|
|
MCE_AR_SEVERITY,
|
2009-05-28 02:56:54 +07:00
|
|
|
MCE_PANIC_SEVERITY,
|
|
|
|
};
|
|
|
|
|
2017-04-19 01:42:35 +07:00
|
|
|
extern struct blocking_notifier_head x86_mce_decoder_chain;
|
2015-08-12 23:29:34 +07:00
|
|
|
|
2009-07-09 05:31:43 +07:00
|
|
|
#define ATTR_LEN 16
|
x86/MCE/intel: Cleanup CMCI storm logic
Initially, this started with the yet another report about a race
condition in the CMCI storm adaptive period length thing. Yes, we have
to admit, it is fragile and error prone. So let's simplify it.
The simpler logic is: now, after we enter storm mode, we go straight to
polling with CMCI_STORM_INTERVAL, i.e. once a second. We remain in storm
mode as long as we see errors being logged while polling.
Theoretically, if we see an uninterrupted error stream, we will remain
in storm mode indefinitely and keep polling the MSRs.
However, when the storm is actually a burst of errors, once we have
logged them all, we back out of it after ~5 mins of polling and no more
errors logged.
If we encounter an error during those 5 minutes, we reset the polling
interval to 5 mins.
Making machine_check_poll() return a bool and denoting whether it has
seen an error or not lets us simplify a bunch of code and move the storm
handling private to mce_intel.c.
Some minor cleanups while at it.
Reported-by: Calvin Owens <calvinowens@fb.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/1417746575-23299-1-git-send-email-calvinowens@fb.com
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-01-13 21:08:51 +07:00
|
|
|
#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
|
2009-07-09 05:31:43 +07:00
|
|
|
|
|
|
|
/* One object for each MCE bank, shared by all CPUs */
|
|
|
|
struct mce_bank {
|
|
|
|
u64 ctl; /* subevents to enable */
|
|
|
|
unsigned char init; /* initialise bank? */
|
2011-12-22 05:29:42 +07:00
|
|
|
struct device_attribute attr; /* device attribute */
|
2009-07-09 05:31:43 +07:00
|
|
|
char attrname[ATTR_LEN]; /* attribute name */
|
|
|
|
};
|
|
|
|
|
2015-08-12 23:29:34 +07:00
|
|
|
struct mce_evt_llist {
|
|
|
|
struct llist_node llnode;
|
|
|
|
struct mce mce;
|
|
|
|
};
|
|
|
|
|
2017-01-24 01:35:13 +07:00
|
|
|
void mce_gen_pool_process(struct work_struct *__unused);
|
2015-08-12 23:29:34 +07:00
|
|
|
bool mce_gen_pool_empty(void);
|
|
|
|
int mce_gen_pool_add(struct mce *mce);
|
|
|
|
int mce_gen_pool_init(void);
|
2016-04-30 19:33:56 +07:00
|
|
|
struct llist_node *mce_gen_pool_prepare_records(void);
|
2015-08-12 23:29:34 +07:00
|
|
|
|
2015-03-23 22:42:53 +07:00
|
|
|
extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
|
2009-07-31 08:41:42 +07:00
|
|
|
struct dentry *mce_get_debugfs_dir(void);
|
2009-05-28 02:56:57 +07:00
|
|
|
|
2009-07-09 05:31:43 +07:00
|
|
|
extern struct mce_bank *mce_banks;
|
2013-07-01 22:38:47 +07:00
|
|
|
extern mce_banks_t mce_banks_ce_disabled;
|
2009-07-09 05:31:43 +07:00
|
|
|
|
2012-08-10 01:44:51 +07:00
|
|
|
#ifdef CONFIG_X86_MCE_INTEL
|
x86/MCE/intel: Cleanup CMCI storm logic
Initially, this started with the yet another report about a race
condition in the CMCI storm adaptive period length thing. Yes, we have
to admit, it is fragile and error prone. So let's simplify it.
The simpler logic is: now, after we enter storm mode, we go straight to
polling with CMCI_STORM_INTERVAL, i.e. once a second. We remain in storm
mode as long as we see errors being logged while polling.
Theoretically, if we see an uninterrupted error stream, we will remain
in storm mode indefinitely and keep polling the MSRs.
However, when the storm is actually a burst of errors, once we have
logged them all, we back out of it after ~5 mins of polling and no more
errors logged.
If we encounter an error during those 5 minutes, we reset the polling
interval to 5 mins.
Making machine_check_poll() return a bool and denoting whether it has
seen an error or not lets us simplify a bunch of code and move the storm
handling private to mce_intel.c.
Some minor cleanups while at it.
Reported-by: Calvin Owens <calvinowens@fb.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/1417746575-23299-1-git-send-email-calvinowens@fb.com
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-01-13 21:08:51 +07:00
|
|
|
unsigned long cmci_intel_adjust_timer(unsigned long interval);
|
|
|
|
bool mce_intel_cmci_poll(void);
|
2012-08-10 01:44:51 +07:00
|
|
|
void mce_intel_hcpu_update(unsigned long cpu);
|
2013-07-01 22:38:47 +07:00
|
|
|
void cmci_disable_bank(int bank);
|
2012-08-10 01:44:51 +07:00
|
|
|
#else
|
x86/MCE/intel: Cleanup CMCI storm logic
Initially, this started with the yet another report about a race
condition in the CMCI storm adaptive period length thing. Yes, we have
to admit, it is fragile and error prone. So let's simplify it.
The simpler logic is: now, after we enter storm mode, we go straight to
polling with CMCI_STORM_INTERVAL, i.e. once a second. We remain in storm
mode as long as we see errors being logged while polling.
Theoretically, if we see an uninterrupted error stream, we will remain
in storm mode indefinitely and keep polling the MSRs.
However, when the storm is actually a burst of errors, once we have
logged them all, we back out of it after ~5 mins of polling and no more
errors logged.
If we encounter an error during those 5 minutes, we reset the polling
interval to 5 mins.
Making machine_check_poll() return a bool and denoting whether it has
seen an error or not lets us simplify a bunch of code and move the storm
handling private to mce_intel.c.
Some minor cleanups while at it.
Reported-by: Calvin Owens <calvinowens@fb.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/1417746575-23299-1-git-send-email-calvinowens@fb.com
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-01-13 21:08:51 +07:00
|
|
|
# define cmci_intel_adjust_timer mce_adjust_timer_default
|
|
|
|
static inline bool mce_intel_cmci_poll(void) { return false; }
|
2012-08-10 01:44:51 +07:00
|
|
|
static inline void mce_intel_hcpu_update(unsigned long cpu) { }
|
2013-07-01 22:38:47 +07:00
|
|
|
static inline void cmci_disable_bank(int bank) { }
|
2012-08-10 01:44:51 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
void mce_timer_kick(unsigned long interval);
|
|
|
|
|
ACPI, APEI, Use ERST for persistent storage of MCE
Traditionally, fatal MCE will cause Linux print error log to console
then reboot. Because MCE registers will preserve their content after
warm reboot, the hardware error can be logged to disk or network after
reboot. But system may fail to warm reboot, then you may lose the
hardware error log. ERST can help here. Through saving the hardware
error log into flash via ERST before go panic, the hardware error log
can be gotten from the flash after system boot successful again.
The fatal MCE processing procedure with ERST involved is as follow:
- Hardware detect error, MCE raised
- MCE read MCE registers, check error severity (fatal), prepare error record
- Write MCE error record into flash via ERST
- Go panic, then trigger system reboot
- System reboot, /sbin/mcelog run, it reads /dev/mcelog to check flash
for error record of previous boot via ERST, and output and clear
them if available
- /sbin/mcelog logs error records into disk or network
ERST only accepts CPER record format, but there is no pre-defined CPER
section can accommodate all information in struct mce, so a customized
section type is defined to hold struct mce inside a CPER record as an
error section.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2010-05-18 13:35:22 +07:00
|
|
|
#ifdef CONFIG_ACPI_APEI
|
|
|
|
int apei_write_mce(struct mce *m);
|
|
|
|
ssize_t apei_read_mce(struct mce *m, u64 *record_id);
|
|
|
|
int apei_check_mce(void);
|
|
|
|
int apei_clear_mce(u64 record_id);
|
|
|
|
#else
|
|
|
|
static inline int apei_write_mce(struct mce *m)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline int apei_check_mce(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline int apei_clear_mce(u64 record_id)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif
|
2015-08-12 23:29:44 +07:00
|
|
|
|
|
|
|
void mce_inject_log(struct mce *m);
|
2016-04-30 19:33:56 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We consider records to be equivalent if bank+status+addr+misc all match.
|
|
|
|
* This is only used when the system is going down because of a fatal error
|
|
|
|
* to avoid cluttering the console log with essentially repeated information.
|
|
|
|
* In normal processing all errors seen are logged.
|
|
|
|
*/
|
|
|
|
static inline bool mce_cmp(struct mce *m1, struct mce *m2)
|
|
|
|
{
|
|
|
|
return m1->bank != m2->bank ||
|
|
|
|
m1->status != m2->status ||
|
|
|
|
m1->addr != m2->addr ||
|
|
|
|
m1->misc != m2->misc;
|
|
|
|
}
|
2017-03-27 16:33:03 +07:00
|
|
|
|
|
|
|
extern struct device_attribute dev_attr_trigger;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_MCELOG_LEGACY
|
2017-06-13 23:28:31 +07:00
|
|
|
void mce_work_trigger(void);
|
|
|
|
void mce_register_injector_chain(struct notifier_block *nb);
|
|
|
|
void mce_unregister_injector_chain(struct notifier_block *nb);
|
2017-03-27 16:33:03 +07:00
|
|
|
#else
|
|
|
|
static inline void mce_work_trigger(void) { }
|
2017-06-13 23:28:31 +07:00
|
|
|
static inline void mce_register_injector_chain(struct notifier_block *nb) { }
|
|
|
|
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
|
2017-03-27 16:33:03 +07:00
|
|
|
#endif
|