mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 23:26:50 +07:00
339ce1a4dc
When running perf across all cpus with backtracing (-a -g), sometimes we get samples without associated backtraces: 23.44% init [kernel] [k] restore 11.46% init eeba0c [k] 0x00000000eeba0c 6.77% swapper [kernel] [k] .perf_ctx_adjust_freq 5.73% init [kernel] [k] .__trace_hcall_entry 4.69% perf libc-2.9.so [.] 0x0000000006bb8c | |--11.11%-- 0xfffa941bbbc It turns out the backtrace code has a check for the idle task and the IP sampling does not. This creates problems when profiling an interrupt heavy workload (in my case 10Gbit ethernet) since we get no backtraces for interrupts received while idle (ie most of the workload). Right now x86 and sh check that current is not NULL, which should never happen so remove that too. Idle task's exclusion must be performed from the core code, on top of perf_event_attr:exclude_idle. Signed-off-by: Anton Blanchard <anton@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mundt <lethal@linux-sh.org> LKML-Reference: <20100118054707.GT12666@kryten> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
96 lines
2.0 KiB
C
96 lines
2.0 KiB
C
/*
|
|
* Performance event callchain support - SuperH architecture code
|
|
*
|
|
* Copyright (C) 2009 Paul Mundt
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/percpu.h>
|
|
#include <asm/unwinder.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
|
{
|
|
if (entry->nr < PERF_MAX_STACK_DEPTH)
|
|
entry->ip[entry->nr++] = ip;
|
|
}
|
|
|
|
static void callchain_warning(void *data, char *msg)
|
|
{
|
|
}
|
|
|
|
static void
|
|
callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
|
|
{
|
|
}
|
|
|
|
static int callchain_stack(void *data, char *name)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void callchain_address(void *data, unsigned long addr, int reliable)
|
|
{
|
|
struct perf_callchain_entry *entry = data;
|
|
|
|
if (reliable)
|
|
callchain_store(entry, addr);
|
|
}
|
|
|
|
static const struct stacktrace_ops callchain_ops = {
|
|
.warning = callchain_warning,
|
|
.warning_symbol = callchain_warning_symbol,
|
|
.stack = callchain_stack,
|
|
.address = callchain_address,
|
|
};
|
|
|
|
static void
|
|
perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
{
|
|
callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
callchain_store(entry, regs->pc);
|
|
|
|
unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
|
|
}
|
|
|
|
static void
|
|
perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
{
|
|
int is_user;
|
|
|
|
if (!regs)
|
|
return;
|
|
|
|
is_user = user_mode(regs);
|
|
|
|
if (is_user && current->state != TASK_RUNNING)
|
|
return;
|
|
|
|
/*
|
|
* Only the kernel side is implemented for now.
|
|
*/
|
|
if (!is_user)
|
|
perf_callchain_kernel(regs, entry);
|
|
}
|
|
|
|
/*
|
|
* No need for separate IRQ and NMI entries.
|
|
*/
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
|
|
|
|
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
|
{
|
|
struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
|
|
|
|
entry->nr = 0;
|
|
|
|
perf_do_callchain(regs, entry);
|
|
|
|
return entry;
|
|
}
|