mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 08:20:52 +07:00
tracing: Add percpu buffers for trace_printk()
Currently, trace_printk() uses a single buffer to write into to calculate the size and format needed to save the trace. To do this safely in an SMP environment, a spin_lock() is taken to only allow one writer at a time to the buffer. But this could also affect what is being traced, and add synchronization that would not be there otherwise. Ideally, using percpu buffers would be useful, but since trace_printk() is only used in development, having per cpu buffers for something never used is a waste of space. Thus, the use of the trace_bprintk() format section is changed to be used for static fmts as well as dynamic ones. Then at boot up, we can check if the section that holds the trace_printk formats is non-empty, and if it does contain something, then we know a trace_printk() has been added to the kernel. At this time the trace_printk per cpu buffers are allocated. A check is also done at module load time in case a module is added that contains a trace_printk(). Once the buffers are allocated, they are never freed. If you use a trace_printk() then you should know what you are doing. A buffer is made for each type of context: normal softirq irq nmi The context is checked and the appropriate buffer is used. This allows for totally lockless usage of trace_printk(), and they no longer even disable interrupts. Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
a385ec4f11
commit
07d777fe8c
@ -480,15 +480,16 @@ do { \
|
|||||||
|
|
||||||
#define trace_printk(fmt, args...) \
|
#define trace_printk(fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
__trace_printk_check_format(fmt, ##args); \
|
static const char *trace_printk_fmt \
|
||||||
if (__builtin_constant_p(fmt)) { \
|
__attribute__((section("__trace_printk_fmt"))) = \
|
||||||
static const char *trace_printk_fmt \
|
__builtin_constant_p(fmt) ? fmt : NULL; \
|
||||||
__attribute__((section("__trace_printk_fmt"))) = \
|
|
||||||
__builtin_constant_p(fmt) ? fmt : NULL; \
|
|
||||||
\
|
\
|
||||||
|
__trace_printk_check_format(fmt, ##args); \
|
||||||
|
\
|
||||||
|
if (__builtin_constant_p(fmt)) \
|
||||||
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
|
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
|
||||||
} else \
|
else \
|
||||||
__trace_printk(_THIS_IP_, fmt, ##args); \
|
__trace_printk(_THIS_IP_, fmt, ##args); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
extern __printf(2, 3)
|
extern __printf(2, 3)
|
||||||
|
@ -1498,25 +1498,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
|
|||||||
|
|
||||||
#endif /* CONFIG_STACKTRACE */
|
#endif /* CONFIG_STACKTRACE */
|
||||||
|
|
||||||
|
/* created for use with alloc_percpu */
|
||||||
|
struct trace_buffer_struct {
|
||||||
|
char buffer[TRACE_BUF_SIZE];
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct trace_buffer_struct *trace_percpu_buffer;
|
||||||
|
static struct trace_buffer_struct *trace_percpu_sirq_buffer;
|
||||||
|
static struct trace_buffer_struct *trace_percpu_irq_buffer;
|
||||||
|
static struct trace_buffer_struct *trace_percpu_nmi_buffer;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The buffer used is dependent on the context. There is a per cpu
|
||||||
|
* buffer for normal context, softirq contex, hard irq context and
|
||||||
|
* for NMI context. Thise allows for lockless recording.
|
||||||
|
*
|
||||||
|
* Note, if the buffers failed to be allocated, then this returns NULL
|
||||||
|
*/
|
||||||
|
static char *get_trace_buf(void)
|
||||||
|
{
|
||||||
|
struct trace_buffer_struct *percpu_buffer;
|
||||||
|
struct trace_buffer_struct *buffer;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have allocated per cpu buffers, then we do not
|
||||||
|
* need to do any locking.
|
||||||
|
*/
|
||||||
|
if (in_nmi())
|
||||||
|
percpu_buffer = trace_percpu_nmi_buffer;
|
||||||
|
else if (in_irq())
|
||||||
|
percpu_buffer = trace_percpu_irq_buffer;
|
||||||
|
else if (in_softirq())
|
||||||
|
percpu_buffer = trace_percpu_sirq_buffer;
|
||||||
|
else
|
||||||
|
percpu_buffer = trace_percpu_buffer;
|
||||||
|
|
||||||
|
if (!percpu_buffer)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
|
||||||
|
|
||||||
|
return buffer->buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int alloc_percpu_trace_buffer(void)
|
||||||
|
{
|
||||||
|
struct trace_buffer_struct *buffers;
|
||||||
|
struct trace_buffer_struct *sirq_buffers;
|
||||||
|
struct trace_buffer_struct *irq_buffers;
|
||||||
|
struct trace_buffer_struct *nmi_buffers;
|
||||||
|
|
||||||
|
buffers = alloc_percpu(struct trace_buffer_struct);
|
||||||
|
if (!buffers)
|
||||||
|
goto err_warn;
|
||||||
|
|
||||||
|
sirq_buffers = alloc_percpu(struct trace_buffer_struct);
|
||||||
|
if (!sirq_buffers)
|
||||||
|
goto err_sirq;
|
||||||
|
|
||||||
|
irq_buffers = alloc_percpu(struct trace_buffer_struct);
|
||||||
|
if (!irq_buffers)
|
||||||
|
goto err_irq;
|
||||||
|
|
||||||
|
nmi_buffers = alloc_percpu(struct trace_buffer_struct);
|
||||||
|
if (!nmi_buffers)
|
||||||
|
goto err_nmi;
|
||||||
|
|
||||||
|
trace_percpu_buffer = buffers;
|
||||||
|
trace_percpu_sirq_buffer = sirq_buffers;
|
||||||
|
trace_percpu_irq_buffer = irq_buffers;
|
||||||
|
trace_percpu_nmi_buffer = nmi_buffers;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_nmi:
|
||||||
|
free_percpu(irq_buffers);
|
||||||
|
err_irq:
|
||||||
|
free_percpu(sirq_buffers);
|
||||||
|
err_sirq:
|
||||||
|
free_percpu(buffers);
|
||||||
|
err_warn:
|
||||||
|
WARN(1, "Could not allocate percpu trace_printk buffer");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
void trace_printk_init_buffers(void)
|
||||||
|
{
|
||||||
|
static int buffers_allocated;
|
||||||
|
|
||||||
|
if (buffers_allocated)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (alloc_percpu_trace_buffer())
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_info("ftrace: Allocated trace_printk buffers\n");
|
||||||
|
|
||||||
|
buffers_allocated = 1;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* trace_vbprintk - write binary msg to tracing buffer
|
* trace_vbprintk - write binary msg to tracing buffer
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
static arch_spinlock_t trace_buf_lock =
|
|
||||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
||||||
static u32 trace_buf[TRACE_BUF_SIZE];
|
|
||||||
|
|
||||||
struct ftrace_event_call *call = &event_bprint;
|
struct ftrace_event_call *call = &event_bprint;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
struct trace_array *tr = &global_trace;
|
struct trace_array *tr = &global_trace;
|
||||||
struct trace_array_cpu *data;
|
|
||||||
struct bprint_entry *entry;
|
struct bprint_entry *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int disable;
|
char *tbuffer;
|
||||||
int cpu, len = 0, size, pc;
|
int len = 0, size, pc;
|
||||||
|
|
||||||
if (unlikely(tracing_selftest_running || tracing_disabled))
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
||||||
return 0;
|
return 0;
|
||||||
@ -1526,43 +1620,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
|||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
cpu = raw_smp_processor_id();
|
|
||||||
data = tr->data[cpu];
|
|
||||||
|
|
||||||
disable = atomic_inc_return(&data->disabled);
|
tbuffer = get_trace_buf();
|
||||||
if (unlikely(disable != 1))
|
if (!tbuffer) {
|
||||||
|
len = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
|
||||||
|
|
||||||
|
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Lockdep uses trace_printk for lock tracing */
|
local_save_flags(flags);
|
||||||
local_irq_save(flags);
|
|
||||||
arch_spin_lock(&trace_buf_lock);
|
|
||||||
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
|
|
||||||
|
|
||||||
if (len > TRACE_BUF_SIZE || len < 0)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
size = sizeof(*entry) + sizeof(u32) * len;
|
size = sizeof(*entry) + sizeof(u32) * len;
|
||||||
buffer = tr->buffer;
|
buffer = tr->buffer;
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
|
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
|
||||||
flags, pc);
|
flags, pc);
|
||||||
if (!event)
|
if (!event)
|
||||||
goto out_unlock;
|
goto out;
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
entry->ip = ip;
|
entry->ip = ip;
|
||||||
entry->fmt = fmt;
|
entry->fmt = fmt;
|
||||||
|
|
||||||
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
|
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
|
||||||
if (!filter_check_discard(call, entry, buffer, event)) {
|
if (!filter_check_discard(call, entry, buffer, event)) {
|
||||||
ring_buffer_unlock_commit(buffer, event);
|
ring_buffer_unlock_commit(buffer, event);
|
||||||
ftrace_trace_stack(buffer, flags, 6, pc);
|
ftrace_trace_stack(buffer, flags, 6, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
arch_spin_unlock(&trace_buf_lock);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
atomic_dec_return(&data->disabled);
|
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
unpause_graph_tracing();
|
unpause_graph_tracing();
|
||||||
|
|
||||||
@ -1588,58 +1675,53 @@ int trace_array_printk(struct trace_array *tr,
|
|||||||
int trace_array_vprintk(struct trace_array *tr,
|
int trace_array_vprintk(struct trace_array *tr,
|
||||||
unsigned long ip, const char *fmt, va_list args)
|
unsigned long ip, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
||||||
static char trace_buf[TRACE_BUF_SIZE];
|
|
||||||
|
|
||||||
struct ftrace_event_call *call = &event_print;
|
struct ftrace_event_call *call = &event_print;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
struct trace_array_cpu *data;
|
int len = 0, size, pc;
|
||||||
int cpu, len = 0, size, pc;
|
|
||||||
struct print_entry *entry;
|
struct print_entry *entry;
|
||||||
unsigned long irq_flags;
|
unsigned long flags;
|
||||||
int disable;
|
char *tbuffer;
|
||||||
|
|
||||||
if (tracing_disabled || tracing_selftest_running)
|
if (tracing_disabled || tracing_selftest_running)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* Don't pollute graph traces with trace_vprintk internals */
|
||||||
|
pause_graph_tracing();
|
||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
cpu = raw_smp_processor_id();
|
|
||||||
data = tr->data[cpu];
|
|
||||||
|
|
||||||
disable = atomic_inc_return(&data->disabled);
|
|
||||||
if (unlikely(disable != 1))
|
tbuffer = get_trace_buf();
|
||||||
|
if (!tbuffer) {
|
||||||
|
len = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
|
||||||
|
if (len > TRACE_BUF_SIZE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pause_graph_tracing();
|
local_save_flags(flags);
|
||||||
raw_local_irq_save(irq_flags);
|
|
||||||
arch_spin_lock(&trace_buf_lock);
|
|
||||||
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
|
|
||||||
|
|
||||||
size = sizeof(*entry) + len + 1;
|
size = sizeof(*entry) + len + 1;
|
||||||
buffer = tr->buffer;
|
buffer = tr->buffer;
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
|
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
|
||||||
irq_flags, pc);
|
flags, pc);
|
||||||
if (!event)
|
if (!event)
|
||||||
goto out_unlock;
|
goto out;
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
entry->ip = ip;
|
entry->ip = ip;
|
||||||
|
|
||||||
memcpy(&entry->buf, trace_buf, len);
|
memcpy(&entry->buf, tbuffer, len);
|
||||||
entry->buf[len] = '\0';
|
entry->buf[len] = '\0';
|
||||||
if (!filter_check_discard(call, entry, buffer, event)) {
|
if (!filter_check_discard(call, entry, buffer, event)) {
|
||||||
ring_buffer_unlock_commit(buffer, event);
|
ring_buffer_unlock_commit(buffer, event);
|
||||||
ftrace_trace_stack(buffer, irq_flags, 6, pc);
|
ftrace_trace_stack(buffer, flags, 6, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
arch_spin_unlock(&trace_buf_lock);
|
|
||||||
raw_local_irq_restore(irq_flags);
|
|
||||||
unpause_graph_tracing();
|
|
||||||
out:
|
out:
|
||||||
atomic_dec_return(&data->disabled);
|
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
|
unpause_graph_tracing();
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
@ -4955,6 +5037,10 @@ __init static int tracer_alloc_buffers(void)
|
|||||||
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
|
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
|
||||||
goto out_free_buffer_mask;
|
goto out_free_buffer_mask;
|
||||||
|
|
||||||
|
/* Only allocate trace_printk buffers if a trace_printk exists */
|
||||||
|
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
|
||||||
|
trace_printk_init_buffers();
|
||||||
|
|
||||||
/* To save memory, keep the ring buffer size to its minimum */
|
/* To save memory, keep the ring buffer size to its minimum */
|
||||||
if (ring_buffer_expanded)
|
if (ring_buffer_expanded)
|
||||||
ring_buf_size = trace_buf_size;
|
ring_buf_size = trace_buf_size;
|
||||||
|
@ -826,6 +826,8 @@ extern struct list_head ftrace_events;
|
|||||||
extern const char *__start___trace_bprintk_fmt[];
|
extern const char *__start___trace_bprintk_fmt[];
|
||||||
extern const char *__stop___trace_bprintk_fmt[];
|
extern const char *__stop___trace_bprintk_fmt[];
|
||||||
|
|
||||||
|
void trace_printk_init_buffers(void);
|
||||||
|
|
||||||
#undef FTRACE_ENTRY
|
#undef FTRACE_ENTRY
|
||||||
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
|
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
|
||||||
extern struct ftrace_event_call \
|
extern struct ftrace_event_call \
|
||||||
|
@ -51,6 +51,10 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
|
|||||||
const char **iter;
|
const char **iter;
|
||||||
char *fmt;
|
char *fmt;
|
||||||
|
|
||||||
|
/* allocate the trace_printk per cpu buffers */
|
||||||
|
if (start != end)
|
||||||
|
trace_printk_init_buffers();
|
||||||
|
|
||||||
mutex_lock(&btrace_mutex);
|
mutex_lock(&btrace_mutex);
|
||||||
for (iter = start; iter < end; iter++) {
|
for (iter = start; iter < end; iter++) {
|
||||||
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
|
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
|
||||||
|
Loading…
Reference in New Issue
Block a user