mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
22402cd0af
stable tags to them. I searched through my INBOX just as the merge window opened and found lots of patches to pull. I ran them through all my tests and they were in linux-next for a few days. Features added this release: ---------------------------- o Module globbing. You can now filter function tracing to several modules. # echo '*:mod:*snd*' > set_ftrace_filter (Dmitry Safonov) o Tracer specific options are now visible even when the tracer is not active. It was rather annoying that you can only see and modify tracer options after enabling the tracer. Now they are in the options/ directory even when the tracer is not active. Although they are still only visible when the tracer is active in the trace_options file. o Trace options are now per instance (although some of the tracer specific options are global) o New tracefs file: set_event_pid. If any pid is added to this file, then all events in the instance will filter out events that are not part of this pid. sched_switch and sched_wakeup events handle next and the wakee pids. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWPLQ5AAoJEKKk/i67LK/8CTYIAI1u8DE5QCzv3J0p54jVpNVR J5FqEU3eXIzd6FS4JXD4nxCeMpUZAy21YnhlZpsnrbJJM5bc9bUsBCwiKKM+MuSZ ztmy2sgYKkO0h/KUdhNgYJrzis3/Ojquyx9iAqK5ST/Fr+nKYx81akFKjNK53iur RJRut45sSa8rv11LaL8sgJ6hAWQTc+YkybUdZ5xaMdJmZ6A61T7Y6VzTjbUexuvL hntCfTjYLtVd8dbfknAnf3B7n/VOO3IFF85wr7ciYR5oEVfPrF8tHmJBlhHExPpX kaXAiDDRY/UTg/5DQqnp4zmxJoR5BQ2l4pT5PwiLcnwhcphIDNYS8EYUmOYAWjU= =TjOE -----END PGP SIGNATURE----- Merge tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracking updates from Steven Rostedt: "Most of the changes are clean ups and small fixes. Some of them have stable tags to them. I searched through my INBOX just as the merge window opened and found lots of patches to pull. I ran them through all my tests and they were in linux-next for a few days. Features added this release: ---------------------------- - Module globbing. You can now filter function tracing to several modules. # echo '*:mod:*snd*' > set_ftrace_filter (Dmitry Safonov) - Tracer specific options are now visible even when the tracer is not active. It was rather annoying that you can only see and modify tracer options after enabling the tracer. Now they are in the options/ directory even when the tracer is not active. Although they are still only visible when the tracer is active in the trace_options file. - Trace options are now per instance (although some of the tracer specific options are global) - New tracefs file: set_event_pid. If any pid is added to this file, then all events in the instance will filter out events that are not part of this pid. sched_switch and sched_wakeup events handle next and the wakee pids" * tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (68 commits) tracefs: Fix refcount imbalance in start_creating() tracing: Put back comma for empty fields in boot string parsing tracing: Apply tracer specific options from kernel command line. tracing: Add some documentation about set_event_pid ring_buffer: Remove unneeded smp_wmb() before wakeup of reader benchmark tracing: Allow dumping traces without tracking trace started cpus ring_buffer: Fix more races when terminating the producer in the benchmark ring_buffer: Do no not complete benchmark reader too early tracing: Remove redundant TP_ARGS redefining tracing: Rename max_stack_lock to stack_trace_max_lock tracing: Allow arch-specific stack tracer recordmcount: arm64: Replace the ignored mcount call into nop recordmcount: Fix endianness handling bug for nop_mcount tracepoints: Fix documentation of RCU lockdep checks tracing: ftrace_event_is_function() can return boolean tracing: is_legal_op() can return boolean ring-buffer: rb_event_is_commit() can return boolean ring-buffer: rb_per_cpu_empty() can return boolean ring_buffer: ring_buffer_empty{cpu}() can return boolean ring-buffer: rb_is_reader_page() can return boolean ...
480 lines
10 KiB
C
480 lines
10 KiB
C
/*
|
|
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
|
|
*
|
|
*/
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include "trace.h"
|
|
|
|
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
|
|
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
|
|
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
|
|
|
|
/*
|
|
* Reserve one entry for the passed in ip. This will allow
|
|
* us to remove most or all of the stack size overhead
|
|
* added by the stack tracer itself.
|
|
*/
|
|
struct stack_trace stack_trace_max = {
|
|
.max_entries = STACK_TRACE_ENTRIES - 1,
|
|
.entries = &stack_dump_trace[0],
|
|
};
|
|
|
|
unsigned long stack_trace_max_size;
|
|
arch_spinlock_t stack_trace_max_lock =
|
|
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
static DEFINE_PER_CPU(int, trace_active);
|
|
static DEFINE_MUTEX(stack_sysctl_mutex);
|
|
|
|
int stack_tracer_enabled;
|
|
static int last_stack_tracer_enabled;
|
|
|
|
void stack_trace_print(void)
|
|
{
|
|
long i;
|
|
int size;
|
|
|
|
pr_emerg(" Depth Size Location (%d entries)\n"
|
|
" ----- ---- --------\n",
|
|
stack_trace_max.nr_entries);
|
|
|
|
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
|
if (stack_dump_trace[i] == ULONG_MAX)
|
|
break;
|
|
if (i+1 == stack_trace_max.nr_entries ||
|
|
stack_dump_trace[i+1] == ULONG_MAX)
|
|
size = stack_trace_index[i];
|
|
else
|
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
|
|
|
pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
|
|
size, (void *)stack_dump_trace[i]);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* When arch-specific code overides this function, the following
|
|
* data should be filled up, assuming stack_trace_max_lock is held to
|
|
* prevent concurrent updates.
|
|
* stack_trace_index[]
|
|
* stack_trace_max
|
|
* stack_trace_max_size
|
|
*/
|
|
void __weak
|
|
check_stack(unsigned long ip, unsigned long *stack)
|
|
{
|
|
unsigned long this_size, flags; unsigned long *p, *top, *start;
|
|
static int tracer_frame;
|
|
int frame_size = ACCESS_ONCE(tracer_frame);
|
|
int i, x;
|
|
|
|
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
|
|
this_size = THREAD_SIZE - this_size;
|
|
/* Remove the frame of the tracer */
|
|
this_size -= frame_size;
|
|
|
|
if (this_size <= stack_trace_max_size)
|
|
return;
|
|
|
|
/* we do not handle interrupt stacks yet */
|
|
if (!object_is_on_stack(stack))
|
|
return;
|
|
|
|
/* Can't do this from NMI context (can cause deadlocks) */
|
|
if (in_nmi())
|
|
return;
|
|
|
|
local_irq_save(flags);
|
|
arch_spin_lock(&stack_trace_max_lock);
|
|
|
|
/*
|
|
* RCU may not be watching, make it see us.
|
|
* The stack trace code uses rcu_sched.
|
|
*/
|
|
rcu_irq_enter();
|
|
|
|
/* In case another CPU set the tracer_frame on us */
|
|
if (unlikely(!frame_size))
|
|
this_size -= tracer_frame;
|
|
|
|
/* a race could have already updated it */
|
|
if (this_size <= stack_trace_max_size)
|
|
goto out;
|
|
|
|
stack_trace_max_size = this_size;
|
|
|
|
stack_trace_max.nr_entries = 0;
|
|
stack_trace_max.skip = 3;
|
|
|
|
save_stack_trace(&stack_trace_max);
|
|
|
|
/* Skip over the overhead of the stack tracer itself */
|
|
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
|
if (stack_dump_trace[i] == ip)
|
|
break;
|
|
}
|
|
|
|
/*
|
|
* Now find where in the stack these are.
|
|
*/
|
|
x = 0;
|
|
start = stack;
|
|
top = (unsigned long *)
|
|
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
|
|
|
|
/*
|
|
* Loop through all the entries. One of the entries may
|
|
* for some reason be missed on the stack, so we may
|
|
* have to account for them. If they are all there, this
|
|
* loop will only happen once. This code only takes place
|
|
* on a new max, so it is far from a fast path.
|
|
*/
|
|
while (i < stack_trace_max.nr_entries) {
|
|
int found = 0;
|
|
|
|
stack_trace_index[x] = this_size;
|
|
p = start;
|
|
|
|
for (; p < top && i < stack_trace_max.nr_entries; p++) {
|
|
if (stack_dump_trace[i] == ULONG_MAX)
|
|
break;
|
|
if (*p == stack_dump_trace[i]) {
|
|
stack_dump_trace[x] = stack_dump_trace[i++];
|
|
this_size = stack_trace_index[x++] =
|
|
(top - p) * sizeof(unsigned long);
|
|
found = 1;
|
|
/* Start the search from here */
|
|
start = p + 1;
|
|
/*
|
|
* We do not want to show the overhead
|
|
* of the stack tracer stack in the
|
|
* max stack. If we haven't figured
|
|
* out what that is, then figure it out
|
|
* now.
|
|
*/
|
|
if (unlikely(!tracer_frame)) {
|
|
tracer_frame = (p - stack) *
|
|
sizeof(unsigned long);
|
|
stack_trace_max_size -= tracer_frame;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!found)
|
|
i++;
|
|
}
|
|
|
|
stack_trace_max.nr_entries = x;
|
|
for (; x < i; x++)
|
|
stack_dump_trace[x] = ULONG_MAX;
|
|
|
|
if (task_stack_end_corrupted(current)) {
|
|
stack_trace_print();
|
|
BUG();
|
|
}
|
|
|
|
out:
|
|
rcu_irq_exit();
|
|
arch_spin_unlock(&stack_trace_max_lock);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static void
|
|
stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
|
{
|
|
unsigned long stack;
|
|
int cpu;
|
|
|
|
preempt_disable_notrace();
|
|
|
|
cpu = raw_smp_processor_id();
|
|
/* no atomic needed, we only modify this variable by this cpu */
|
|
if (per_cpu(trace_active, cpu)++ != 0)
|
|
goto out;
|
|
|
|
ip += MCOUNT_INSN_SIZE;
|
|
|
|
check_stack(ip, &stack);
|
|
|
|
out:
|
|
per_cpu(trace_active, cpu)--;
|
|
/* prevent recursion in schedule */
|
|
preempt_enable_notrace();
|
|
}
|
|
|
|
static struct ftrace_ops trace_ops __read_mostly =
|
|
{
|
|
.func = stack_trace_call,
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
|
};
|
|
|
|
static ssize_t
|
|
stack_max_size_read(struct file *filp, char __user *ubuf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
unsigned long *ptr = filp->private_data;
|
|
char buf[64];
|
|
int r;
|
|
|
|
r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
|
|
if (r > sizeof(buf))
|
|
r = sizeof(buf);
|
|
return simple_read_from_buffer(ubuf, count, ppos, buf, r);
|
|
}
|
|
|
|
static ssize_t
|
|
stack_max_size_write(struct file *filp, const char __user *ubuf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
long *ptr = filp->private_data;
|
|
unsigned long val, flags;
|
|
int ret;
|
|
int cpu;
|
|
|
|
ret = kstrtoul_from_user(ubuf, count, 10, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
local_irq_save(flags);
|
|
|
|
/*
|
|
* In case we trace inside arch_spin_lock() or after (NMI),
|
|
* we will cause circular lock, so we also need to increase
|
|
* the percpu trace_active here.
|
|
*/
|
|
cpu = smp_processor_id();
|
|
per_cpu(trace_active, cpu)++;
|
|
|
|
arch_spin_lock(&stack_trace_max_lock);
|
|
*ptr = val;
|
|
arch_spin_unlock(&stack_trace_max_lock);
|
|
|
|
per_cpu(trace_active, cpu)--;
|
|
local_irq_restore(flags);
|
|
|
|
return count;
|
|
}
|
|
|
|
static const struct file_operations stack_max_size_fops = {
|
|
.open = tracing_open_generic,
|
|
.read = stack_max_size_read,
|
|
.write = stack_max_size_write,
|
|
.llseek = default_llseek,
|
|
};
|
|
|
|
static void *
|
|
__next(struct seq_file *m, loff_t *pos)
|
|
{
|
|
long n = *pos - 1;
|
|
|
|
if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
|
|
return NULL;
|
|
|
|
m->private = (void *)n;
|
|
return &m->private;
|
|
}
|
|
|
|
static void *
|
|
t_next(struct seq_file *m, void *v, loff_t *pos)
|
|
{
|
|
(*pos)++;
|
|
return __next(m, pos);
|
|
}
|
|
|
|
static void *t_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
int cpu;
|
|
|
|
local_irq_disable();
|
|
|
|
cpu = smp_processor_id();
|
|
per_cpu(trace_active, cpu)++;
|
|
|
|
arch_spin_lock(&stack_trace_max_lock);
|
|
|
|
if (*pos == 0)
|
|
return SEQ_START_TOKEN;
|
|
|
|
return __next(m, pos);
|
|
}
|
|
|
|
static void t_stop(struct seq_file *m, void *p)
|
|
{
|
|
int cpu;
|
|
|
|
arch_spin_unlock(&stack_trace_max_lock);
|
|
|
|
cpu = smp_processor_id();
|
|
per_cpu(trace_active, cpu)--;
|
|
|
|
local_irq_enable();
|
|
}
|
|
|
|
static void trace_lookup_stack(struct seq_file *m, long i)
|
|
{
|
|
unsigned long addr = stack_dump_trace[i];
|
|
|
|
seq_printf(m, "%pS\n", (void *)addr);
|
|
}
|
|
|
|
static void print_disabled(struct seq_file *m)
|
|
{
|
|
seq_puts(m, "#\n"
|
|
"# Stack tracer disabled\n"
|
|
"#\n"
|
|
"# To enable the stack tracer, either add 'stacktrace' to the\n"
|
|
"# kernel command line\n"
|
|
"# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
|
|
"#\n");
|
|
}
|
|
|
|
static int t_show(struct seq_file *m, void *v)
|
|
{
|
|
long i;
|
|
int size;
|
|
|
|
if (v == SEQ_START_TOKEN) {
|
|
seq_printf(m, " Depth Size Location"
|
|
" (%d entries)\n"
|
|
" ----- ---- --------\n",
|
|
stack_trace_max.nr_entries);
|
|
|
|
if (!stack_tracer_enabled && !stack_trace_max_size)
|
|
print_disabled(m);
|
|
|
|
return 0;
|
|
}
|
|
|
|
i = *(long *)v;
|
|
|
|
if (i >= stack_trace_max.nr_entries ||
|
|
stack_dump_trace[i] == ULONG_MAX)
|
|
return 0;
|
|
|
|
if (i+1 == stack_trace_max.nr_entries ||
|
|
stack_dump_trace[i+1] == ULONG_MAX)
|
|
size = stack_trace_index[i];
|
|
else
|
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
|
|
|
seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
|
|
|
|
trace_lookup_stack(m, i);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations stack_trace_seq_ops = {
|
|
.start = t_start,
|
|
.next = t_next,
|
|
.stop = t_stop,
|
|
.show = t_show,
|
|
};
|
|
|
|
static int stack_trace_open(struct inode *inode, struct file *file)
|
|
{
|
|
return seq_open(file, &stack_trace_seq_ops);
|
|
}
|
|
|
|
static const struct file_operations stack_trace_fops = {
|
|
.open = stack_trace_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
static int
|
|
stack_trace_filter_open(struct inode *inode, struct file *file)
|
|
{
|
|
return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
|
|
inode, file);
|
|
}
|
|
|
|
static const struct file_operations stack_trace_filter_fops = {
|
|
.open = stack_trace_filter_open,
|
|
.read = seq_read,
|
|
.write = ftrace_filter_write,
|
|
.llseek = tracing_lseek,
|
|
.release = ftrace_regex_release,
|
|
};
|
|
|
|
int
|
|
stack_trace_sysctl(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp,
|
|
loff_t *ppos)
|
|
{
|
|
int ret;
|
|
|
|
mutex_lock(&stack_sysctl_mutex);
|
|
|
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
|
|
if (ret || !write ||
|
|
(last_stack_tracer_enabled == !!stack_tracer_enabled))
|
|
goto out;
|
|
|
|
last_stack_tracer_enabled = !!stack_tracer_enabled;
|
|
|
|
if (stack_tracer_enabled)
|
|
register_ftrace_function(&trace_ops);
|
|
else
|
|
unregister_ftrace_function(&trace_ops);
|
|
|
|
out:
|
|
mutex_unlock(&stack_sysctl_mutex);
|
|
return ret;
|
|
}
|
|
|
|
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
|
|
|
|
static __init int enable_stacktrace(char *str)
|
|
{
|
|
if (strncmp(str, "_filter=", 8) == 0)
|
|
strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
|
|
|
|
stack_tracer_enabled = 1;
|
|
last_stack_tracer_enabled = 1;
|
|
return 1;
|
|
}
|
|
__setup("stacktrace", enable_stacktrace);
|
|
|
|
static __init int stack_trace_init(void)
|
|
{
|
|
struct dentry *d_tracer;
|
|
|
|
d_tracer = tracing_init_dentry();
|
|
if (IS_ERR(d_tracer))
|
|
return 0;
|
|
|
|
trace_create_file("stack_max_size", 0644, d_tracer,
|
|
&stack_trace_max_size, &stack_max_size_fops);
|
|
|
|
trace_create_file("stack_trace", 0444, d_tracer,
|
|
NULL, &stack_trace_fops);
|
|
|
|
trace_create_file("stack_trace_filter", 0444, d_tracer,
|
|
NULL, &stack_trace_filter_fops);
|
|
|
|
if (stack_trace_filter_buf[0])
|
|
ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
|
|
|
|
if (stack_tracer_enabled)
|
|
register_ftrace_function(&trace_ops);
|
|
|
|
return 0;
|
|
}
|
|
|
|
device_initcall(stack_trace_init);
|