mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 01:15:15 +07:00
0c97bf863e
Starting with GCC 9, -Warray-bounds detects cases when memset is called starting on a member of a struct but the size to be cleared ends up writing over further members. Such a call happens in the trace code to clear, at once, all members after and including `seq` on struct trace_iterator: In function 'memset', inlined from 'ftrace_dump' at kernel/trace/trace.c:8914:3: ./include/linux/string.h:344:9: warning: '__builtin_memset' offset [8505, 8560] from the object at 'iter' is out of the bounds of referenced subobject 'seq' with type 'struct trace_seq' at offset 4368 [-Warray-bounds] 344 | return __builtin_memset(p, c, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to avoid GCC complaining about it, we compute the address ourselves by adding the offsetof distance instead of referring directly to the member. Since there are two places doing this clear (trace.c and trace_kdb.c), take the chance to move the workaround into a single place in the internal header. Link: http://lkml.kernel.org/r/20190523124535.GA12931@gmail.com Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> [ Removed unnecessary parenthesis around "iter" ] Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
159 lines
3.5 KiB
C
159 lines
3.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* kdb helper for dumping the ftrace buffer
|
|
*
|
|
* Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
|
|
*
|
|
* ftrace_dump_buf based on ftrace_dump:
|
|
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/kgdb.h>
|
|
#include <linux/kdb.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
#include "trace.h"
|
|
#include "trace_output.h"
|
|
|
|
static struct trace_iterator iter;
|
|
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
|
|
|
|
static void ftrace_dump_buf(int skip_entries, long cpu_file)
|
|
{
|
|
struct trace_array *tr;
|
|
unsigned int old_userobj;
|
|
int cnt = 0, cpu;
|
|
|
|
tr = iter.tr;
|
|
|
|
old_userobj = tr->trace_flags;
|
|
|
|
/* don't look at user memory in panic mode */
|
|
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
|
|
|
kdb_printf("Dumping ftrace buffer:\n");
|
|
if (skip_entries)
|
|
kdb_printf("(skipping %d entries)\n", skip_entries);
|
|
|
|
trace_iterator_reset(&iter);
|
|
iter.iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
|
if (cpu_file == RING_BUFFER_ALL_CPUS) {
|
|
for_each_tracing_cpu(cpu) {
|
|
iter.buffer_iter[cpu] =
|
|
ring_buffer_read_prepare(iter.trace_buffer->buffer,
|
|
cpu, GFP_ATOMIC);
|
|
ring_buffer_read_start(iter.buffer_iter[cpu]);
|
|
tracing_iter_reset(&iter, cpu);
|
|
}
|
|
} else {
|
|
iter.cpu_file = cpu_file;
|
|
iter.buffer_iter[cpu_file] =
|
|
ring_buffer_read_prepare(iter.trace_buffer->buffer,
|
|
cpu_file, GFP_ATOMIC);
|
|
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
|
|
tracing_iter_reset(&iter, cpu_file);
|
|
}
|
|
|
|
while (trace_find_next_entry_inc(&iter)) {
|
|
if (!cnt)
|
|
kdb_printf("---------------------------------\n");
|
|
cnt++;
|
|
|
|
if (!skip_entries) {
|
|
print_trace_line(&iter);
|
|
trace_printk_seq(&iter.seq);
|
|
} else {
|
|
skip_entries--;
|
|
}
|
|
|
|
if (KDB_FLAG(CMD_INTERRUPT))
|
|
goto out;
|
|
}
|
|
|
|
if (!cnt)
|
|
kdb_printf(" (ftrace buffer empty)\n");
|
|
else
|
|
kdb_printf("---------------------------------\n");
|
|
|
|
out:
|
|
tr->trace_flags = old_userobj;
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
if (iter.buffer_iter[cpu]) {
|
|
ring_buffer_read_finish(iter.buffer_iter[cpu]);
|
|
iter.buffer_iter[cpu] = NULL;
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
* kdb_ftdump - Dump the ftrace log buffer
|
|
*/
|
|
static int kdb_ftdump(int argc, const char **argv)
|
|
{
|
|
int skip_entries = 0;
|
|
long cpu_file;
|
|
char *cp;
|
|
int cnt;
|
|
int cpu;
|
|
|
|
if (argc > 2)
|
|
return KDB_ARGCOUNT;
|
|
|
|
if (argc) {
|
|
skip_entries = simple_strtol(argv[1], &cp, 0);
|
|
if (*cp)
|
|
skip_entries = 0;
|
|
}
|
|
|
|
if (argc == 2) {
|
|
cpu_file = simple_strtol(argv[2], &cp, 0);
|
|
if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
|
|
!cpu_online(cpu_file))
|
|
return KDB_BADINT;
|
|
} else {
|
|
cpu_file = RING_BUFFER_ALL_CPUS;
|
|
}
|
|
|
|
kdb_trap_printk++;
|
|
|
|
trace_init_global_iter(&iter);
|
|
iter.buffer_iter = buffer_iter;
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
}
|
|
|
|
/* A negative skip_entries means skip all but the last entries */
|
|
if (skip_entries < 0) {
|
|
if (cpu_file == RING_BUFFER_ALL_CPUS)
|
|
cnt = trace_total_entries(NULL);
|
|
else
|
|
cnt = trace_total_entries_cpu(NULL, cpu_file);
|
|
skip_entries = max(cnt + skip_entries, 0);
|
|
}
|
|
|
|
ftrace_dump_buf(skip_entries, cpu_file);
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
}
|
|
|
|
kdb_trap_printk--;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __init int kdb_ftrace_register(void)
|
|
{
|
|
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
|
|
"Dump ftrace log; -skip dumps last #entries", 0,
|
|
KDB_ENABLE_ALWAYS_SAFE);
|
|
return 0;
|
|
}
|
|
|
|
late_initcall(kdb_ftrace_register);
|