diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index db6109a885a7..a9038c951619 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -136,6 +136,25 @@ extern void perf_events_lapic_init(void); #define PERF_EVENT_INDEX_OFFSET 0 +/* + * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. + * This flag is otherwise unused and ABI specified to be 0, so nobody should + * care what we do with it. + */ +#define PERF_EFLAGS_EXACT (1UL << 3) + +#define perf_misc_flags(regs) \ +({ int misc = 0; \ + if (user_mode(regs)) \ + misc |= PERF_RECORD_MISC_USER; \ + else \ + misc |= PERF_RECORD_MISC_KERNEL; \ + if (regs->flags & PERF_EFLAGS_EXACT) \ + misc |= PERF_RECORD_MISC_EXACT; \ + misc; }) + +#define perf_instruction_pointer(regs) ((regs)->ip) + #else static inline void init_hw_perf_events(void) { } static inline void perf_events_lapic_init(void) { } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1badff6b6b28..5cb4e8dcee4b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -29,6 +29,41 @@ #include #include +/* + * best effort, GUP based copy_from_user() that assumes IRQ or NMI context + */ +static unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n) +{ + unsigned long offset, addr = (unsigned long)from; + int type = in_nmi() ? KM_NMI : KM_IRQ0; + unsigned long size, len = 0; + struct page *page; + void *map; + int ret; + + do { + ret = __get_user_pages_fast(addr, 1, 0, &page); + if (!ret) + break; + + offset = addr & (PAGE_SIZE - 1); + size = min(PAGE_SIZE - offset, n - len); + + map = kmap_atomic(page, type); + memcpy(to, map+offset, size); + kunmap_atomic(map, type); + put_page(page); + + len += size; + to += size; + addr += size; + + } while (len < n); + + return len; +} + static u64 perf_event_mask __read_mostly; struct event_constraint { @@ -1550,41 +1585,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); } -/* - * best effort, GUP based copy_from_user() that assumes IRQ or NMI context - */ -static unsigned long -copy_from_user_nmi(void *to, const void __user *from, unsigned long n) -{ - unsigned long offset, addr = (unsigned long)from; - int type = in_nmi() ? KM_NMI : KM_IRQ0; - unsigned long size, len = 0; - struct page *page; - void *map; - int ret; - - do { - ret = __get_user_pages_fast(addr, 1, 0, &page); - if (!ret) - break; - - offset = addr & (PAGE_SIZE - 1); - size = min(PAGE_SIZE - offset, n - len); - - map = kmap_atomic(page, type); - memcpy(to, map+offset, size); - kunmap_atomic(map, type); - put_page(page); - - len += size; - to += size; - addr += size; - - } while (len < n); - - return len; -} - static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) { unsigned long bytes; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 44f6ed42a934..7eb78be3b229 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -547,7 +547,7 @@ static void intel_pmu_disable_event(struct perf_event *event) x86_pmu_disable_event(event); if (unlikely(event->attr.precise)) - intel_pmu_pebs_disable(hwc); + intel_pmu_pebs_disable(event); } static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) @@ -600,7 +600,7 @@ static void intel_pmu_enable_event(struct perf_event *event) } if (unlikely(event->attr.precise)) - intel_pmu_pebs_enable(hwc); + intel_pmu_pebs_enable(event); __x86_pmu_enable_event(hwc); } diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 0d994ef213b9..50e6ff3281fc 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -331,26 +331,32 @@ intel_pebs_constraints(struct perf_event *event) return &emptyconstraint; } -static void intel_pmu_pebs_enable(struct hw_perf_event *hwc) +static void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; u64 val = cpuc->pebs_enabled; hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; val |= 1ULL << hwc->idx; wrmsrl(MSR_IA32_PEBS_ENABLE, val); + + intel_pmu_lbr_enable(event); } -static void intel_pmu_pebs_disable(struct hw_perf_event *hwc) +static void intel_pmu_pebs_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; u64 val = cpuc->pebs_enabled; val &= ~(1ULL << hwc->idx); wrmsrl(MSR_IA32_PEBS_ENABLE, val); hwc->config |= ARCH_PERFMON_EVENTSEL_INT; + + intel_pmu_lbr_disable(event); } static void intel_pmu_pebs_enable_all(void) @@ -369,6 +375,70 @@ static void intel_pmu_pebs_disable_all(void) wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } +#include + +#define MAX_INSN_SIZE 16 + +static inline bool kernel_ip(unsigned long ip) +{ +#ifdef CONFIG_X86_32 + return ip > PAGE_OFFSET; +#else + return (long)ip < 0; +#endif +} + +static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long from = cpuc->lbr_entries[0].from; + unsigned long old_to, to = cpuc->lbr_entries[0].to; + unsigned long ip = regs->ip; + + if (!cpuc->lbr_stack.nr || !from || !to) + return 0; + + if (ip < to) + return 0; + + /* + * We sampled a branch insn, rewind using the LBR stack + */ + if (ip == to) { + regs->ip = from; + return 1; + } + + do { + struct insn insn; + u8 buf[MAX_INSN_SIZE]; + void *kaddr; + + old_to = to; + if (!kernel_ip(ip)) { + int bytes, size = min_t(int, MAX_INSN_SIZE, ip - to); + + bytes = copy_from_user_nmi(buf, (void __user *)to, size); + if (bytes != size) + return 0; + + kaddr = buf; + } else + kaddr = (void *)to; + + kernel_insn_init(&insn, kaddr); + insn_get_length(&insn); + to += insn.length; + } while (to < ip); + + if (to == ip) { + regs->ip = old_to; + return 1; + } + + return 0; +} + static int intel_pmu_save_and_restart(struct perf_event *event); static void intel_pmu_disable_event(struct perf_event *event); @@ -424,6 +494,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) regs.bp = at->bp; regs.sp = at->sp; + if (intel_pmu_pebs_fixup_ip(®s)) + regs.flags |= PERF_EFLAGS_EXACT; + else + regs.flags &= ~PERF_EFLAGS_EXACT; + if (perf_event_overflow(event, 1, &data, ®s)) intel_pmu_disable_event(event); @@ -487,6 +562,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) regs.bp = at->bp; regs.sp = at->sp; + if (intel_pmu_pebs_fixup_ip(®s)) + regs.flags |= PERF_EFLAGS_EXACT; + else + regs.flags &= ~PERF_EFLAGS_EXACT; + if (perf_event_overflow(event, 1, &data, ®s)) intel_pmu_disable_event(event); } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ab4fd9ede264..be85f7c4a94f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -294,6 +294,12 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_USER (2 << 0) #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) +#define PERF_RECORD_MISC_EXACT (1 << 14) +/* + * Reserve the last bit to indicate some extended misc field + */ +#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) + struct perf_event_header { __u32 type; __u16 misc;