2019-01-16 18:10:59 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2011-10-16 22:15:04 +07:00
|
|
|
/*
|
|
|
|
* Performance events callchain code, extracted from core.c:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
2015-11-16 17:08:45 +07:00
|
|
|
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
2018-08-24 07:01:26 +07:00
|
|
|
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
2011-10-16 22:15:04 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/slab.h>
|
2017-02-09 00:51:37 +07:00
|
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
|
2011-10-16 22:15:04 +07:00
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
struct callchain_cpus_entries {
|
|
|
|
struct rcu_head rcu_head;
|
2020-05-12 03:12:27 +07:00
|
|
|
struct perf_callchain_entry *cpu_entries[];
|
2011-10-16 22:15:04 +07:00
|
|
|
};
|
|
|
|
|
2016-04-21 22:28:50 +07:00
|
|
|
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
|
2016-05-12 23:06:21 +07:00
|
|
|
int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
|
2016-04-21 22:28:50 +07:00
|
|
|
|
|
|
|
static inline size_t perf_callchain_entry__sizeof(void)
|
|
|
|
{
|
|
|
|
return (sizeof(struct perf_callchain_entry) +
|
2016-05-12 23:06:21 +07:00
|
|
|
sizeof(__u64) * (sysctl_perf_event_max_stack +
|
|
|
|
sysctl_perf_event_max_contexts_per_stack));
|
2016-04-21 22:28:50 +07:00
|
|
|
}
|
|
|
|
|
2011-10-16 22:15:04 +07:00
|
|
|
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
|
|
|
static atomic_t nr_callchain_events;
|
|
|
|
static DEFINE_MUTEX(callchain_mutex);
|
|
|
|
static struct callchain_cpus_entries *callchain_cpus_entries;
|
|
|
|
|
|
|
|
|
2016-04-28 22:30:53 +07:00
|
|
|
__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
2011-10-16 22:15:04 +07:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-04-28 22:30:53 +07:00
|
|
|
__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
2011-10-16 22:15:04 +07:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_callchain_buffers_rcu(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
entries = container_of(head, struct callchain_cpus_entries, rcu_head);
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
kfree(entries->cpu_entries[cpu]);
|
|
|
|
|
|
|
|
kfree(entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_callchain_buffers(void)
|
|
|
|
{
|
|
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
|
|
|
|
entries = callchain_cpus_entries;
|
2014-08-22 21:15:36 +07:00
|
|
|
RCU_INIT_POINTER(callchain_cpus_entries, NULL);
|
2011-10-16 22:15:04 +07:00
|
|
|
call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int alloc_callchain_buffers(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
int size;
|
|
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't use the percpu allocation API for data that can be
|
|
|
|
* accessed from NMI. Use a temporary manual per cpu allocation
|
|
|
|
* until that gets sorted out.
|
|
|
|
*/
|
|
|
|
size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
|
|
|
|
|
|
|
|
entries = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-04-21 22:28:50 +07:00
|
|
|
size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
|
2011-10-16 22:15:04 +07:00
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
|
|
|
|
cpu_to_node(cpu));
|
|
|
|
if (!entries->cpu_entries[cpu])
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_assign_pointer(callchain_cpus_entries, entries);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
kfree(entries->cpu_entries[cpu]);
|
|
|
|
kfree(entries);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-04-28 23:16:33 +07:00
|
|
|
int get_callchain_buffers(int event_max_stack)
|
2011-10-16 22:15:04 +07:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
mutex_lock(&callchain_mutex);
|
|
|
|
|
|
|
|
count = atomic_inc_return(&nr_callchain_events);
|
|
|
|
if (WARN_ON_ONCE(count < 1)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2018-04-15 16:23:51 +07:00
|
|
|
/*
|
|
|
|
* If requesting per event more than the global cap,
|
|
|
|
* return a different error to help userspace figure
|
|
|
|
* this out.
|
|
|
|
*
|
|
|
|
* And also do it here so that we have &callchain_mutex held.
|
|
|
|
*/
|
|
|
|
if (event_max_stack > sysctl_perf_event_max_stack) {
|
|
|
|
err = -EOVERFLOW;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2018-04-15 16:23:52 +07:00
|
|
|
if (count == 1)
|
|
|
|
err = alloc_callchain_buffers();
|
2011-10-16 22:15:04 +07:00
|
|
|
exit:
|
2013-07-23 07:31:00 +07:00
|
|
|
if (err)
|
|
|
|
atomic_dec(&nr_callchain_events);
|
2011-10-16 22:15:04 +07:00
|
|
|
|
2013-08-02 23:29:54 +07:00
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
|
2011-10-16 22:15:04 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void put_callchain_buffers(void)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
|
|
|
|
release_callchain_buffers();
|
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-30 13:28:43 +07:00
|
|
|
struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
2011-10-16 22:15:04 +07:00
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
struct callchain_cpus_entries *entries;
|
|
|
|
|
2014-08-18 00:30:27 +07:00
|
|
|
*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
|
2011-10-16 22:15:04 +07:00
|
|
|
if (*rctx == -1)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
entries = rcu_dereference(callchain_cpus_entries);
|
2020-06-30 13:28:43 +07:00
|
|
|
if (!entries) {
|
|
|
|
put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
|
2011-10-16 22:15:04 +07:00
|
|
|
return NULL;
|
2020-06-30 13:28:43 +07:00
|
|
|
}
|
2011-10-16 22:15:04 +07:00
|
|
|
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
2016-04-21 22:28:50 +07:00
|
|
|
return (((void *)entries->cpu_entries[cpu]) +
|
|
|
|
(*rctx * perf_callchain_entry__sizeof()));
|
2011-10-16 22:15:04 +07:00
|
|
|
}
|
|
|
|
|
2020-06-30 13:28:43 +07:00
|
|
|
void
|
2011-10-16 22:15:04 +07:00
|
|
|
put_callchain_entry(int rctx)
|
|
|
|
{
|
2014-08-18 00:30:27 +07:00
|
|
|
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
|
2011-10-16 22:15:04 +07:00
|
|
|
}
|
|
|
|
|
2016-02-18 10:58:57 +07:00
|
|
|
struct perf_callchain_entry *
|
|
|
|
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
2016-04-28 22:30:53 +07:00
|
|
|
u32 max_stack, bool crosstask, bool add_mark)
|
2016-02-18 10:58:57 +07:00
|
|
|
{
|
|
|
|
struct perf_callchain_entry *entry;
|
2016-04-28 22:30:53 +07:00
|
|
|
struct perf_callchain_entry_ctx ctx;
|
2016-02-18 10:58:57 +07:00
|
|
|
int rctx;
|
|
|
|
|
2011-10-16 22:15:04 +07:00
|
|
|
entry = get_callchain_entry(&rctx);
|
|
|
|
if (!entry)
|
2020-06-30 13:28:43 +07:00
|
|
|
return NULL;
|
2011-10-16 22:15:04 +07:00
|
|
|
|
2016-04-28 22:30:53 +07:00
|
|
|
ctx.entry = entry;
|
|
|
|
ctx.max_stack = max_stack;
|
perf core: Add a 'nr' field to perf_event_callchain_context
We will use it to count how many addresses are in the entry->ip[] array,
excluding PERF_CONTEXT_{KERNEL,USER,etc} entries, so that we can really
return the number of entries specified by the user via the relevant
sysctl, kernel.perf_event_max_contexts, or via the per event
perf_event_attr.sample_max_stack knob.
This way we keep the perf_sample->ip_callchain->nr meaning, that is the
number of entries, be it real addresses or PERF_CONTEXT_ entries, while
honouring the max_stack knobs, i.e. the end result will be max_stack
entries if we have at least that many entries in a given stack trace.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-s8teto51tdqvlfhefndtat9r@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-05-11 04:08:32 +07:00
|
|
|
ctx.nr = entry->nr = init_nr;
|
2016-05-12 23:06:21 +07:00
|
|
|
ctx.contexts = 0;
|
|
|
|
ctx.contexts_maxed = false;
|
2011-10-16 22:15:04 +07:00
|
|
|
|
2012-08-07 20:20:41 +07:00
|
|
|
if (kernel && !user_mode(regs)) {
|
2016-02-18 10:58:57 +07:00
|
|
|
if (add_mark)
|
2016-05-12 23:01:50 +07:00
|
|
|
perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
|
2016-04-28 22:30:53 +07:00
|
|
|
perf_callchain_kernel(&ctx, regs);
|
2011-10-16 22:15:04 +07:00
|
|
|
}
|
|
|
|
|
2012-08-07 20:20:41 +07:00
|
|
|
if (user) {
|
|
|
|
if (!user_mode(regs)) {
|
|
|
|
if (current->mm)
|
|
|
|
regs = task_pt_regs(current);
|
|
|
|
else
|
|
|
|
regs = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (regs) {
|
2017-05-10 00:00:04 +07:00
|
|
|
mm_segment_t fs;
|
|
|
|
|
2016-02-18 10:58:57 +07:00
|
|
|
if (crosstask)
|
2012-08-07 20:20:41 +07:00
|
|
|
goto exit_put;
|
|
|
|
|
2016-02-18 10:58:57 +07:00
|
|
|
if (add_mark)
|
2016-05-12 23:01:50 +07:00
|
|
|
perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
|
2017-05-10 00:00:04 +07:00
|
|
|
|
2020-08-12 08:33:47 +07:00
|
|
|
fs = force_uaccess_begin();
|
2016-04-28 22:30:53 +07:00
|
|
|
perf_callchain_user(&ctx, regs);
|
2020-08-12 08:33:47 +07:00
|
|
|
force_uaccess_end(fs);
|
2012-08-07 20:20:41 +07:00
|
|
|
}
|
2011-10-16 22:15:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
exit_put:
|
|
|
|
put_callchain_entry(rctx);
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
2016-04-21 22:28:50 +07:00
|
|
|
|
2016-05-12 23:06:21 +07:00
|
|
|
/*
|
|
|
|
* Used for sysctl_perf_event_max_stack and
|
|
|
|
* sysctl_perf_event_max_contexts_per_stack.
|
|
|
|
*/
|
2016-04-21 22:28:50 +07:00
|
|
|
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
2020-04-24 13:43:38 +07:00
|
|
|
void *buffer, size_t *lenp, loff_t *ppos)
|
2016-04-21 22:28:50 +07:00
|
|
|
{
|
2016-05-11 02:34:53 +07:00
|
|
|
int *value = table->data;
|
|
|
|
int new_value = *value, ret;
|
2016-04-21 22:28:50 +07:00
|
|
|
struct ctl_table new_table = *table;
|
|
|
|
|
|
|
|
new_table.data = &new_value;
|
|
|
|
ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
|
|
|
|
if (ret || !write)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
mutex_lock(&callchain_mutex);
|
|
|
|
if (atomic_read(&nr_callchain_events))
|
|
|
|
ret = -EBUSY;
|
|
|
|
else
|
2016-05-11 02:34:53 +07:00
|
|
|
*value = new_value;
|
2016-04-21 22:28:50 +07:00
|
|
|
|
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|