powerpc/pseries: hwpoison the pages upon hitting UE

Add support to hwpoison the pages upon hitting machine check
exception.

This patch queues the address where UE is hit to percpu array
and schedules work to plumb it into memory poison infrastructure.

Reviewed-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com>
[mpe: Combine #ifdefs, drop PPC_BIT8(), and empty inline stub]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Ganesh Goudar 2019-04-15 15:35:44 +05:30 committed by Michael Ellerman
parent 4df2cb633b
commit 7f177f9810
3 changed files with 85 additions and 1 deletions

View File

@ -210,6 +210,7 @@ extern void release_mce_event(void);
extern void machine_check_queue_event(void);
extern void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode, bool in_guest);
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
#endif /* CONFIG_PPC_BOOK3S_64 */

View File

@ -36,7 +36,7 @@
* Convert an address related to an mm to a PFN. NOTE: we are in real
* mode, we could potentially race with page table updates.
*/
static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
pte_t *ptep;
unsigned long flags;

View File

@ -707,6 +707,87 @@ static int mce_handle_error(struct rtas_error_log *errp)
return disposition;
}
#ifdef CONFIG_MEMORY_FAILURE
static DEFINE_PER_CPU(int, rtas_ue_count);
static DEFINE_PER_CPU(unsigned long, rtas_ue_paddr[MAX_MC_EVT]);
#define UE_EFFECTIVE_ADDR_PROVIDED 0x40
#define UE_LOGICAL_ADDR_PROVIDED 0x20
static void pseries_hwpoison_work_fn(struct work_struct *work)
{
unsigned long paddr;
int index;
while (__this_cpu_read(rtas_ue_count) > 0) {
index = __this_cpu_read(rtas_ue_count) - 1;
paddr = __this_cpu_read(rtas_ue_paddr[index]);
memory_failure(paddr >> PAGE_SHIFT, 0);
__this_cpu_dec(rtas_ue_count);
}
}
static DECLARE_WORK(hwpoison_work, pseries_hwpoison_work_fn);
static void queue_ue_paddr(unsigned long paddr)
{
int index;
index = __this_cpu_inc_return(rtas_ue_count) - 1;
if (index >= MAX_MC_EVT) {
__this_cpu_dec(rtas_ue_count);
return;
}
this_cpu_write(rtas_ue_paddr[index], paddr);
schedule_work(&hwpoison_work);
}
static void pseries_do_memory_failure(struct pt_regs *regs,
struct pseries_mc_errorlog *mce_log)
{
unsigned long paddr;
if (mce_log->sub_err_type & UE_LOGICAL_ADDR_PROVIDED) {
paddr = be64_to_cpu(mce_log->logical_address);
} else if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED) {
unsigned long pfn;
pfn = addr_to_pfn(regs,
be64_to_cpu(mce_log->effective_address));
if (pfn == ULONG_MAX)
return;
paddr = pfn << PAGE_SHIFT;
} else {
return;
}
queue_ue_paddr(paddr);
}
static void pseries_process_ue(struct pt_regs *regs,
struct rtas_error_log *errp)
{
struct pseries_errorlog *pseries_log;
struct pseries_mc_errorlog *mce_log;
if (!rtas_error_extended(errp))
return;
pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
if (!pseries_log)
return;
mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
if (mce_log->error_type == MC_ERROR_TYPE_UE)
pseries_do_memory_failure(regs, mce_log);
}
#else
static inline void pseries_process_ue(struct pt_regs *regs,
struct rtas_error_log *errp) { }
#endif /*CONFIG_MEMORY_FAILURE */
/*
* Process MCE rtas errlog event.
*/
@ -765,6 +846,8 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
recovered = 1;
}
pseries_process_ue(regs, err);
/* Queue irq work to log this rtas event later. */
irq_work_queue(&mce_errlog_process_work);