mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 07:05:08 +07:00
bcada3d4b8
. Fix include order for bison/flex-generated C files, from Ben Hutchings . Build fixes and documentation corrections from David Ahern . Group parsing support, from Jiri Olsa . UI/gtk refactorings and improvements from Namhyung Kim . NULL deref fix for perf script, from Namhyung Kim . Assorted cleanups from Robert Richter . Let O= makes handle relative paths, from Steven Rostedt Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJQMkGhAAoJENZQFvNTUqpAqjsQAJE5iD1LFogC8o/WjvRHz0TY Y0x+sR/XfW61KYpeq5g+UaKuFU3P44ijCoyks3y5sza97DkYgUwMpEHlLXFSM8Pp sNOapqY57s24nq3MLrhH1V9w+cSE+m2u/Gi5fGLCQekio9gkOBwYxNGk7vpKri/n LBRsMozBu/mZjMy20uWOb7Uk8xsAToh+TFaAtjyQ9Snn9nNJj49NUAp37uN888H/ ducMLq32HN5v/6Zd3q6IWdDWgZsHLkIa3R5FIs/GNe3Dih07gtYLmDol4ktPbTFm yoaWpP5wbtu/62EZlJwE393vMuoeqN/96394ZZQGFafhHVxN4+rcBhXbejBs0T2b wk/0CzntW8bbUAI/cl3SB9aui//FWOxcjG9aDQ7PsmHzPw1Q4VD0F9Mcod4p+dRX PsA9q/tST1eAiwzWYthDtj81U7iChINcXKhoZn2xn6+0+aMH+6FFNBmCH8MR5aCU BvrXhTJjvau/Ym/sILl4Tf4wfssTq49yMsn/YKCwLJ0hg0XlTObWfQRy2MOayXH9 NJvUE+9GSXoTEKhmr1AfTYEG9vObaXZyFwAI74xvPPwUYojCb4ZjEKmG0egW+VGk IJKFCaJZwwVsGau4aIbFAMP12/L8Qs/Ox91ddCJ0j5TIlSGMaqW5lbV1N1crzlTT a0GsN49NvhbFttBXrcNX =0a2X -----END PGP SIGNATURE----- Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: * Fix include order for bison/flex-generated C files, from Ben Hutchings * Build fixes and documentation corrections from David Ahern * Group parsing support, from Jiri Olsa * UI/gtk refactorings and improvements from Namhyung Kim * NULL deref fix for perf script, from Namhyung Kim * Assorted cleanups from Robert Richter * Let O= makes handle relative paths, from Steven Rostedt * perf script python fixes, from Feng Tang. * Improve 'perf lock' error message when the needed tracepoints are not present, from David Ahern. * Initial bash completion support, from Frederic Weisbecker * Allow building without libelf, from Namhyung Kim. * Support DWARF CFI based unwind to have callchains when %bp based unwinding is not possible, from Jiri Olsa. * Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF section was the end goal, several fixes for code that handles all architectures and cleanups are included, from Cody Schafer. * Add a description for the JIT interface, from Andi Kleen. * Assorted fixes for Documentation and build in 32 bit, from Robert Richter * Add support for non-tracepoint events in perf script python, from Feng Tang * Cache the libtraceevent event_format associated to each evsel early, so that we avoid relookups, i.e. calling pevent_find_event repeatedly when processing tracepoint events. [ This is to reduce the surface contact with libtraceevents and make clear what is that the perf tools needs from that lib: so far parsing the common and per event fields. ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
342 lines
7.8 KiB
C
342 lines
7.8 KiB
C
/*
|
|
* trace event based perf event profiling/tracing
|
|
*
|
|
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
|
|
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kprobes.h>
|
|
#include "trace.h"
|
|
|
|
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
|
|
|
|
/*
|
|
* Force it to be aligned to unsigned long to avoid misaligned accesses
|
|
* suprises
|
|
*/
|
|
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
|
perf_trace_t;
|
|
|
|
/* Count the events in use (per event id, not per instance) */
|
|
static int total_ref_count;
|
|
|
|
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
/* The ftrace function trace is allowed only for root. */
|
|
if (ftrace_event_is_function(tp_event) &&
|
|
perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
/* No tracing, just counting, so no obvious leak */
|
|
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
|
|
return 0;
|
|
|
|
/* Some events are ok to be traced by non-root users... */
|
|
if (p_event->attach_state == PERF_ATTACH_TASK) {
|
|
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* ...otherwise raw tracepoint data can be a severe data leak,
|
|
* only allow root to have these.
|
|
*/
|
|
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
struct hlist_head __percpu *list;
|
|
int ret = -ENOMEM;
|
|
int cpu;
|
|
|
|
p_event->tp_event = tp_event;
|
|
if (tp_event->perf_refcount++ > 0)
|
|
return 0;
|
|
|
|
list = alloc_percpu(struct hlist_head);
|
|
if (!list)
|
|
goto fail;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
|
|
|
|
tp_event->perf_events = list;
|
|
|
|
if (!total_ref_count) {
|
|
char __percpu *buf;
|
|
int i;
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
buf = (char __percpu *)alloc_percpu(perf_trace_t);
|
|
if (!buf)
|
|
goto fail;
|
|
|
|
perf_trace_buf[i] = buf;
|
|
}
|
|
}
|
|
|
|
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
total_ref_count++;
|
|
return 0;
|
|
|
|
fail:
|
|
if (!total_ref_count) {
|
|
int i;
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
free_percpu(perf_trace_buf[i]);
|
|
perf_trace_buf[i] = NULL;
|
|
}
|
|
}
|
|
|
|
if (!--tp_event->perf_refcount) {
|
|
free_percpu(tp_event->perf_events);
|
|
tp_event->perf_events = NULL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void perf_trace_event_unreg(struct perf_event *p_event)
|
|
{
|
|
struct ftrace_event_call *tp_event = p_event->tp_event;
|
|
int i;
|
|
|
|
if (--tp_event->perf_refcount > 0)
|
|
goto out;
|
|
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
|
|
|
|
/*
|
|
* Ensure our callback won't be called anymore. The buffers
|
|
* will be freed after that.
|
|
*/
|
|
tracepoint_synchronize_unregister();
|
|
|
|
free_percpu(tp_event->perf_events);
|
|
tp_event->perf_events = NULL;
|
|
|
|
if (!--total_ref_count) {
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
free_percpu(perf_trace_buf[i]);
|
|
perf_trace_buf[i] = NULL;
|
|
}
|
|
}
|
|
out:
|
|
module_put(tp_event->mod);
|
|
}
|
|
|
|
static int perf_trace_event_open(struct perf_event *p_event)
|
|
{
|
|
struct ftrace_event_call *tp_event = p_event->tp_event;
|
|
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
|
|
}
|
|
|
|
static void perf_trace_event_close(struct perf_event *p_event)
|
|
{
|
|
struct ftrace_event_call *tp_event = p_event->tp_event;
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
|
|
}
|
|
|
|
static int perf_trace_event_init(struct ftrace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
int ret;
|
|
|
|
ret = perf_trace_event_perm(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = perf_trace_event_reg(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = perf_trace_event_open(p_event);
|
|
if (ret) {
|
|
perf_trace_event_unreg(p_event);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int perf_trace_init(struct perf_event *p_event)
|
|
{
|
|
struct ftrace_event_call *tp_event;
|
|
int event_id = p_event->attr.config;
|
|
int ret = -EINVAL;
|
|
|
|
mutex_lock(&event_mutex);
|
|
list_for_each_entry(tp_event, &ftrace_events, list) {
|
|
if (tp_event->event.type == event_id &&
|
|
tp_event->class && tp_event->class->reg &&
|
|
try_module_get(tp_event->mod)) {
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
if (ret)
|
|
module_put(tp_event->mod);
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&event_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void perf_trace_destroy(struct perf_event *p_event)
|
|
{
|
|
mutex_lock(&event_mutex);
|
|
perf_trace_event_close(p_event);
|
|
perf_trace_event_unreg(p_event);
|
|
mutex_unlock(&event_mutex);
|
|
}
|
|
|
|
int perf_trace_add(struct perf_event *p_event, int flags)
|
|
{
|
|
struct ftrace_event_call *tp_event = p_event->tp_event;
|
|
struct hlist_head __percpu *pcpu_list;
|
|
struct hlist_head *list;
|
|
|
|
pcpu_list = tp_event->perf_events;
|
|
if (WARN_ON_ONCE(!pcpu_list))
|
|
return -EINVAL;
|
|
|
|
if (!(flags & PERF_EF_START))
|
|
p_event->hw.state = PERF_HES_STOPPED;
|
|
|
|
list = this_cpu_ptr(pcpu_list);
|
|
hlist_add_head_rcu(&p_event->hlist_entry, list);
|
|
|
|
return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
|
|
}
|
|
|
|
void perf_trace_del(struct perf_event *p_event, int flags)
|
|
{
|
|
struct ftrace_event_call *tp_event = p_event->tp_event;
|
|
hlist_del_rcu(&p_event->hlist_entry);
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
|
|
}
|
|
|
|
__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
|
|
struct pt_regs *regs, int *rctxp)
|
|
{
|
|
struct trace_entry *entry;
|
|
unsigned long flags;
|
|
char *raw_data;
|
|
int pc;
|
|
|
|
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
|
|
|
pc = preempt_count();
|
|
|
|
*rctxp = perf_swevent_get_recursion_context();
|
|
if (*rctxp < 0)
|
|
return NULL;
|
|
|
|
raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
|
|
|
|
/* zero the dead bytes from align to not leak stack to user */
|
|
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
|
|
|
entry = (struct trace_entry *)raw_data;
|
|
local_save_flags(flags);
|
|
tracing_generic_entry_update(entry, flags, pc);
|
|
entry->type = type;
|
|
|
|
return raw_data;
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
static void
|
|
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *ops, struct pt_regs *pt_regs)
|
|
{
|
|
struct ftrace_entry *entry;
|
|
struct hlist_head *head;
|
|
struct pt_regs regs;
|
|
int rctx;
|
|
|
|
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
|
|
sizeof(u64)) - sizeof(u32))
|
|
|
|
BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
|
|
|
|
perf_fetch_caller_regs(®s);
|
|
|
|
entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
|
|
if (!entry)
|
|
return;
|
|
|
|
entry->ip = ip;
|
|
entry->parent_ip = parent_ip;
|
|
|
|
head = this_cpu_ptr(event_function.perf_events);
|
|
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
|
|
1, ®s, head, NULL);
|
|
|
|
#undef ENTRY_SIZE
|
|
}
|
|
|
|
static int perf_ftrace_function_register(struct perf_event *event)
|
|
{
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
|
|
ops->flags |= FTRACE_OPS_FL_CONTROL;
|
|
ops->func = perf_ftrace_function_call;
|
|
return register_ftrace_function(ops);
|
|
}
|
|
|
|
static int perf_ftrace_function_unregister(struct perf_event *event)
|
|
{
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
int ret = unregister_ftrace_function(ops);
|
|
ftrace_free_filter(ops);
|
|
return ret;
|
|
}
|
|
|
|
static void perf_ftrace_function_enable(struct perf_event *event)
|
|
{
|
|
ftrace_function_local_enable(&event->ftrace_ops);
|
|
}
|
|
|
|
static void perf_ftrace_function_disable(struct perf_event *event)
|
|
{
|
|
ftrace_function_local_disable(&event->ftrace_ops);
|
|
}
|
|
|
|
int perf_ftrace_event_register(struct ftrace_event_call *call,
|
|
enum trace_reg type, void *data)
|
|
{
|
|
switch (type) {
|
|
case TRACE_REG_REGISTER:
|
|
case TRACE_REG_UNREGISTER:
|
|
break;
|
|
case TRACE_REG_PERF_REGISTER:
|
|
case TRACE_REG_PERF_UNREGISTER:
|
|
return 0;
|
|
case TRACE_REG_PERF_OPEN:
|
|
return perf_ftrace_function_register(data);
|
|
case TRACE_REG_PERF_CLOSE:
|
|
return perf_ftrace_function_unregister(data);
|
|
case TRACE_REG_PERF_ADD:
|
|
perf_ftrace_function_enable(data);
|
|
return 0;
|
|
case TRACE_REG_PERF_DEL:
|
|
perf_ftrace_function_disable(data);
|
|
return 0;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|