mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 12:16:45 +07:00
Merge branch 'tip/tracing/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
This commit is contained in:
commit
3daeb4da9a
@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
kfree(cpu_buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Causes compile errors if the struct buffer_page gets bigger
|
||||
* than the struct page.
|
||||
*/
|
||||
extern int ring_buffer_page_too_big(void);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int rb_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu);
|
||||
@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
||||
int bsize;
|
||||
int cpu;
|
||||
|
||||
/* Paranoid! Optimizes out when all is well */
|
||||
if (sizeof(struct buffer_page) > sizeof(struct page))
|
||||
ring_buffer_page_too_big();
|
||||
|
||||
|
||||
/* keep it in its own cache line */
|
||||
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
|
||||
GFP_KERNEL);
|
||||
@ -668,8 +657,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
||||
buffer->reader_lock_key = key;
|
||||
|
||||
/* need at least two pages */
|
||||
if (buffer->pages == 1)
|
||||
buffer->pages++;
|
||||
if (buffer->pages < 2)
|
||||
buffer->pages = 2;
|
||||
|
||||
/*
|
||||
* In case of non-hotplug cpu, if the ring-buffer is allocated
|
||||
@ -1013,7 +1002,7 @@ rb_event_index(struct ring_buffer_event *event)
|
||||
{
|
||||
unsigned long addr = (unsigned long)event;
|
||||
|
||||
return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
|
||||
return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -1334,9 +1323,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
|
||||
/* We reserved something on the buffer */
|
||||
|
||||
if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
|
||||
return NULL;
|
||||
|
||||
event = __rb_page_index(tail_page, tail);
|
||||
rb_update_event(event, type, length);
|
||||
|
||||
@ -2480,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
|
||||
|
||||
static inline int rb_ok_to_lock(void)
|
||||
{
|
||||
/*
|
||||
* If an NMI die dumps out the content of the ring buffer
|
||||
* do not grab locks. We also permanently disable the ring
|
||||
* buffer too. A one time deal is all you get from reading
|
||||
* the ring buffer from an NMI.
|
||||
*/
|
||||
if (likely(!in_nmi() && !oops_in_progress))
|
||||
return 1;
|
||||
|
||||
tracing_off_permanent();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_peek - peek at the next event to be read
|
||||
* @buffer: The ring buffer to read
|
||||
@ -2495,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return NULL;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
again:
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
spin_lock(&cpu_buffer->reader_lock);
|
||||
event = rb_buffer_peek(buffer, cpu, ts);
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
if (dolock)
|
||||
spin_unlock(&cpu_buffer->reader_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (event && event->type_len == RINGBUF_TYPE_PADDING) {
|
||||
cpu_relax();
|
||||
@ -2554,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_event *event = NULL;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
|
||||
again:
|
||||
/* might be called in atomic */
|
||||
@ -2563,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
goto out;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
spin_lock(&cpu_buffer->reader_lock);
|
||||
|
||||
event = rb_buffer_peek(buffer, cpu, ts);
|
||||
if (!event)
|
||||
@ -2572,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
rb_advance_reader(cpu_buffer);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
if (dolock)
|
||||
spin_unlock(&cpu_buffer->reader_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
@ -2770,12 +2784,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
|
||||
int ring_buffer_empty(struct ring_buffer *buffer)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
|
||||
/* yes this is racy, but if you don't like the race, lock the buffer */
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
if (!rb_per_cpu_empty(cpu_buffer))
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
spin_lock(&cpu_buffer->reader_lock);
|
||||
ret = rb_per_cpu_empty(cpu_buffer);
|
||||
if (dolock)
|
||||
spin_unlock(&cpu_buffer->reader_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2791,14 +2818,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
|
||||
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
int ret;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return 1;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
ret = rb_per_cpu_empty(cpu_buffer);
|
||||
dolock = rb_ok_to_lock();
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
spin_lock(&cpu_buffer->reader_lock);
|
||||
ret = rb_per_cpu_empty(cpu_buffer);
|
||||
if (dolock)
|
||||
spin_unlock(&cpu_buffer->reader_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ static void ring_buffer_producer(void)
|
||||
* Hammer the buffer for 10 secs (this may
|
||||
* make the system stall)
|
||||
*/
|
||||
pr_info("Starting ring buffer hammer\n");
|
||||
trace_printk("Starting ring buffer hammer\n");
|
||||
do_gettimeofday(&start_tv);
|
||||
do {
|
||||
struct ring_buffer_event *event;
|
||||
@ -239,7 +239,7 @@ static void ring_buffer_producer(void)
|
||||
#endif
|
||||
|
||||
} while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
|
||||
pr_info("End ring buffer hammer\n");
|
||||
trace_printk("End ring buffer hammer\n");
|
||||
|
||||
if (consumer) {
|
||||
/* Init both completions here to avoid races */
|
||||
@ -262,49 +262,50 @@ static void ring_buffer_producer(void)
|
||||
overruns = ring_buffer_overruns(buffer);
|
||||
|
||||
if (kill_test)
|
||||
pr_info("ERROR!\n");
|
||||
pr_info("Time: %lld (usecs)\n", time);
|
||||
pr_info("Overruns: %lld\n", overruns);
|
||||
trace_printk("ERROR!\n");
|
||||
trace_printk("Time: %lld (usecs)\n", time);
|
||||
trace_printk("Overruns: %lld\n", overruns);
|
||||
if (disable_reader)
|
||||
pr_info("Read: (reader disabled)\n");
|
||||
trace_printk("Read: (reader disabled)\n");
|
||||
else
|
||||
pr_info("Read: %ld (by %s)\n", read,
|
||||
trace_printk("Read: %ld (by %s)\n", read,
|
||||
read_events ? "events" : "pages");
|
||||
pr_info("Entries: %lld\n", entries);
|
||||
pr_info("Total: %lld\n", entries + overruns + read);
|
||||
pr_info("Missed: %ld\n", missed);
|
||||
pr_info("Hit: %ld\n", hit);
|
||||
trace_printk("Entries: %lld\n", entries);
|
||||
trace_printk("Total: %lld\n", entries + overruns + read);
|
||||
trace_printk("Missed: %ld\n", missed);
|
||||
trace_printk("Hit: %ld\n", hit);
|
||||
|
||||
/* Convert time from usecs to millisecs */
|
||||
do_div(time, USEC_PER_MSEC);
|
||||
if (time)
|
||||
hit /= (long)time;
|
||||
else
|
||||
pr_info("TIME IS ZERO??\n");
|
||||
trace_printk("TIME IS ZERO??\n");
|
||||
|
||||
pr_info("Entries per millisec: %ld\n", hit);
|
||||
trace_printk("Entries per millisec: %ld\n", hit);
|
||||
|
||||
if (hit) {
|
||||
/* Calculate the average time in nanosecs */
|
||||
avg = NSEC_PER_MSEC / hit;
|
||||
pr_info("%ld ns per entry\n", avg);
|
||||
trace_printk("%ld ns per entry\n", avg);
|
||||
}
|
||||
|
||||
if (missed) {
|
||||
if (time)
|
||||
missed /= (long)time;
|
||||
|
||||
pr_info("Total iterations per millisec: %ld\n", hit + missed);
|
||||
trace_printk("Total iterations per millisec: %ld\n",
|
||||
hit + missed);
|
||||
|
||||
/* it is possible that hit + missed will overflow and be zero */
|
||||
if (!(hit + missed)) {
|
||||
pr_info("hit + missed overflowed and totalled zero!\n");
|
||||
trace_printk("hit + missed overflowed and totalled zero!\n");
|
||||
hit--; /* make it non zero */
|
||||
}
|
||||
|
||||
/* Caculate the average time in nanosecs */
|
||||
avg = NSEC_PER_MSEC / (hit + missed);
|
||||
pr_info("%ld ns per entry\n", avg);
|
||||
trace_printk("%ld ns per entry\n", avg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -355,7 +356,7 @@ static int ring_buffer_producer_thread(void *arg)
|
||||
|
||||
ring_buffer_producer();
|
||||
|
||||
pr_info("Sleeping for 10 secs\n");
|
||||
trace_printk("Sleeping for 10 secs\n");
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(HZ * SLEEP_TIME);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
Loading…
Reference in New Issue
Block a user