mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 14:30:55 +07:00
96d3fd0d31
Dave Jones got the following lockdep splat: > ====================================================== > [ INFO: possible circular locking dependency detected ] > 3.12.0-rc3+ #92 Not tainted > ------------------------------------------------------- > trinity-child2/15191 is trying to acquire lock: > (&rdp->nocb_wq){......}, at: [<ffffffff8108ff43>] __wake_up+0x23/0x50 > > but task is already holding lock: > (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230 > > which lock already depends on the new lock. > > > the existing dependency chain (in reverse order) is: > > -> #3 (&ctx->lock){-.-...}: > [<ffffffff810cc243>] lock_acquire+0x93/0x200 > [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80 > [<ffffffff811500ff>] __perf_event_task_sched_out+0x2df/0x5e0 > [<ffffffff81091b83>] perf_event_task_sched_out+0x93/0xa0 > [<ffffffff81732052>] __schedule+0x1d2/0xa20 > [<ffffffff81732f30>] preempt_schedule_irq+0x50/0xb0 > [<ffffffff817352b6>] retint_kernel+0x26/0x30 > [<ffffffff813eed04>] tty_flip_buffer_push+0x34/0x50 > [<ffffffff813f0504>] pty_write+0x54/0x60 > [<ffffffff813e900d>] n_tty_write+0x32d/0x4e0 > [<ffffffff813e5838>] tty_write+0x158/0x2d0 > [<ffffffff811c4850>] vfs_write+0xc0/0x1f0 > [<ffffffff811c52cc>] SyS_write+0x4c/0xa0 > [<ffffffff8173d4e4>] tracesys+0xdd/0xe2 > > -> #2 (&rq->lock){-.-.-.}: > [<ffffffff810cc243>] lock_acquire+0x93/0x200 > [<ffffffff81733f90>] _raw_spin_lock+0x40/0x80 > [<ffffffff810980b2>] wake_up_new_task+0xc2/0x2e0 > [<ffffffff81054336>] do_fork+0x126/0x460 > [<ffffffff81054696>] kernel_thread+0x26/0x30 > [<ffffffff8171ff93>] rest_init+0x23/0x140 > [<ffffffff81ee1e4b>] start_kernel+0x3f6/0x403 > [<ffffffff81ee1571>] x86_64_start_reservations+0x2a/0x2c > [<ffffffff81ee1664>] x86_64_start_kernel+0xf1/0xf4 > > -> #1 (&p->pi_lock){-.-.-.}: > [<ffffffff810cc243>] lock_acquire+0x93/0x200 > [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90 > [<ffffffff810979d1>] try_to_wake_up+0x31/0x350 > [<ffffffff81097d62>] default_wake_function+0x12/0x20 > [<ffffffff81084af8>] autoremove_wake_function+0x18/0x40 > [<ffffffff8108ea38>] __wake_up_common+0x58/0x90 > [<ffffffff8108ff59>] __wake_up+0x39/0x50 > [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0 > [<ffffffff81111450>] __call_rcu+0x140/0x820 > [<ffffffff81111b8d>] call_rcu+0x1d/0x20 > [<ffffffff81093697>] cpu_attach_domain+0x287/0x360 > [<ffffffff81099d7e>] build_sched_domains+0xe5e/0x10a0 > [<ffffffff81efa7fc>] sched_init_smp+0x3b7/0x47a > [<ffffffff81ee1f4e>] kernel_init_freeable+0xf6/0x202 > [<ffffffff817200be>] kernel_init+0xe/0x190 > [<ffffffff8173d22c>] ret_from_fork+0x7c/0xb0 > > -> #0 (&rdp->nocb_wq){......}: > [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0 > [<ffffffff810cc243>] lock_acquire+0x93/0x200 > [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90 > [<ffffffff8108ff43>] __wake_up+0x23/0x50 > [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0 > [<ffffffff81111450>] __call_rcu+0x140/0x820 > [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30 > [<ffffffff81149abf>] put_ctx+0x4f/0x70 > [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230 > [<ffffffff81056b8d>] do_exit+0x30d/0xcc0 > [<ffffffff8105893c>] do_group_exit+0x4c/0xc0 > [<ffffffff810589c4>] SyS_exit_group+0x14/0x20 > [<ffffffff8173d4e4>] tracesys+0xdd/0xe2 > > other info that might help us debug this: > > Chain exists of: > &rdp->nocb_wq --> &rq->lock --> &ctx->lock > > Possible unsafe locking scenario: > > CPU0 CPU1 > ---- ---- > lock(&ctx->lock); > lock(&rq->lock); > lock(&ctx->lock); > lock(&rdp->nocb_wq); > > *** DEADLOCK *** > > 1 lock held by trinity-child2/15191: > #0: (&ctx->lock){-.-...}, at: [<ffffffff81154c19>] perf_event_exit_task+0x109/0x230 > > stack backtrace: > CPU: 2 PID: 15191 Comm: trinity-child2 Not tainted 3.12.0-rc3+ #92 > ffffffff82565b70 ffff880070c2dbf8 ffffffff8172a363 ffffffff824edf40 > ffff880070c2dc38 ffffffff81726741 ffff880070c2dc90 ffff88022383b1c0 > ffff88022383aac0 0000000000000000 ffff88022383b188 ffff88022383b1c0 > Call Trace: > [<ffffffff8172a363>] dump_stack+0x4e/0x82 > [<ffffffff81726741>] print_circular_bug+0x200/0x20f > [<ffffffff810cb7ca>] __lock_acquire+0x191a/0x1be0 > [<ffffffff810c6439>] ? get_lock_stats+0x19/0x60 > [<ffffffff8100b2f4>] ? native_sched_clock+0x24/0x80 > [<ffffffff810cc243>] lock_acquire+0x93/0x200 > [<ffffffff8108ff43>] ? __wake_up+0x23/0x50 > [<ffffffff8173419b>] _raw_spin_lock_irqsave+0x4b/0x90 > [<ffffffff8108ff43>] ? __wake_up+0x23/0x50 > [<ffffffff8108ff43>] __wake_up+0x23/0x50 > [<ffffffff8110d4f8>] __call_rcu_nocb_enqueue+0xa8/0xc0 > [<ffffffff81111450>] __call_rcu+0x140/0x820 > [<ffffffff8109bc8f>] ? local_clock+0x3f/0x50 > [<ffffffff81111bb0>] kfree_call_rcu+0x20/0x30 > [<ffffffff81149abf>] put_ctx+0x4f/0x70 > [<ffffffff81154c3e>] perf_event_exit_task+0x12e/0x230 > [<ffffffff81056b8d>] do_exit+0x30d/0xcc0 > [<ffffffff810c9af5>] ? trace_hardirqs_on_caller+0x115/0x1e0 > [<ffffffff810c9bcd>] ? trace_hardirqs_on+0xd/0x10 > [<ffffffff8105893c>] do_group_exit+0x4c/0xc0 > [<ffffffff810589c4>] SyS_exit_group+0x14/0x20 > [<ffffffff8173d4e4>] tracesys+0xdd/0xe2 The underlying problem is that perf is invoking call_rcu() with the scheduler locks held, but in NOCB mode, call_rcu() will with high probability invoke the scheduler -- which just might want to use its locks. The reason that call_rcu() needs to invoke the scheduler is to wake up the corresponding rcuo callback-offload kthread, which does the job of starting up a grace period and invoking the callbacks afterwards. One solution (championed on a related problem by Lai Jiangshan) is to simply defer the wakeup to some point where scheduler locks are no longer held. Since we don't want to unnecessarily incur the cost of such deferral, the task before us is threefold: 1. Determine when it is likely that a relevant scheduler lock is held. 2. Defer the wakeup in such cases. 3. Ensure that all deferred wakeups eventually happen, preferably sooner rather than later. We use irqs_disabled_flags() as a proxy for relevant scheduler locks being held. This works because the relevant locks are always acquired with interrupts disabled. We may defer more often than needed, but that is at least safe. The wakeup deferral is tracked via a new field in the per-CPU and per-RCU-flavor rcu_data structure, namely ->nocb_defer_wakeup. This flag is checked by the RCU core processing. The __rcu_pending() function now checks this flag, which causes rcu_check_callbacks() to initiate RCU core processing at each scheduling-clock interrupt where this flag is set. Of course this is not sufficient because scheduling-clock interrupts are often turned off (the things we used to be able to count on!). So the flags are also checked on entry to any state that RCU considers to be idle, which includes both NO_HZ_IDLE idle state and NO_HZ_FULL user-mode-execution state. This approach should allow call_rcu() to be invoked regardless of what locks you might be holding, the key word being "should". Reported-by: Dave Jones <davej@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org>
502 lines
13 KiB
C
502 lines
13 KiB
C
/*
|
|
* Read-Copy Update tracing for classic implementation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
*
|
|
* Copyright IBM Corporation, 2008
|
|
*
|
|
* Papers: http://www.rdrop.com/users/paulmck/RCU
|
|
*
|
|
* For detailed explanation of Read-Copy Update mechanism see -
|
|
* Documentation/RCU
|
|
*
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/module.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/seq_file.h>
|
|
|
|
#define RCU_TREE_NONCORE
|
|
#include "tree.h"
|
|
|
|
static int r_open(struct inode *inode, struct file *file,
|
|
const struct seq_operations *op)
|
|
{
|
|
int ret = seq_open(file, op);
|
|
if (!ret) {
|
|
struct seq_file *m = (struct seq_file *)file->private_data;
|
|
m->private = inode->i_private;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static void *r_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
struct rcu_state *rsp = (struct rcu_state *)m->private;
|
|
*pos = cpumask_next(*pos - 1, cpu_possible_mask);
|
|
if ((*pos) < nr_cpu_ids)
|
|
return per_cpu_ptr(rsp->rda, *pos);
|
|
return NULL;
|
|
}
|
|
|
|
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
|
{
|
|
(*pos)++;
|
|
return r_start(m, pos);
|
|
}
|
|
|
|
static void r_stop(struct seq_file *m, void *v)
|
|
{
|
|
}
|
|
|
|
static int show_rcubarrier(struct seq_file *m, void *v)
|
|
{
|
|
struct rcu_state *rsp = (struct rcu_state *)m->private;
|
|
seq_printf(m, "bcc: %d nbd: %lu\n",
|
|
atomic_read(&rsp->barrier_cpu_count),
|
|
rsp->n_barrier_done);
|
|
return 0;
|
|
}
|
|
|
|
static int rcubarrier_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, show_rcubarrier, inode->i_private);
|
|
}
|
|
|
|
static const struct file_operations rcubarrier_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcubarrier_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
static char convert_kthread_status(unsigned int kthread_status)
|
|
{
|
|
if (kthread_status > RCU_KTHREAD_MAX)
|
|
return '?';
|
|
return "SRWOY"[kthread_status];
|
|
}
|
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
|
{
|
|
long ql, qll;
|
|
|
|
if (!rdp->beenonline)
|
|
return;
|
|
seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d",
|
|
rdp->cpu,
|
|
cpu_is_offline(rdp->cpu) ? '!' : ' ',
|
|
ulong2long(rdp->completed), ulong2long(rdp->gpnum),
|
|
rdp->passed_quiesce, rdp->qs_pending);
|
|
seq_printf(m, " dt=%d/%llx/%d df=%lu",
|
|
atomic_read(&rdp->dynticks->dynticks),
|
|
rdp->dynticks->dynticks_nesting,
|
|
rdp->dynticks->dynticks_nmi_nesting,
|
|
rdp->dynticks_fqs);
|
|
seq_printf(m, " of=%lu", rdp->offline_fqs);
|
|
rcu_nocb_q_lengths(rdp, &ql, &qll);
|
|
qll += rdp->qlen_lazy;
|
|
ql += rdp->qlen;
|
|
seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c",
|
|
qll, ql,
|
|
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
|
|
rdp->nxttail[RCU_NEXT_TAIL]],
|
|
".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
|
|
rdp->nxttail[RCU_NEXT_READY_TAIL]],
|
|
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
|
|
rdp->nxttail[RCU_WAIT_TAIL]],
|
|
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
|
|
#ifdef CONFIG_RCU_BOOST
|
|
seq_printf(m, " kt=%d/%c ktl=%x",
|
|
per_cpu(rcu_cpu_has_work, rdp->cpu),
|
|
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
|
|
rdp->cpu)),
|
|
per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
seq_printf(m, " b=%ld", rdp->blimit);
|
|
seq_printf(m, " ci=%lu nci=%lu co=%lu ca=%lu\n",
|
|
rdp->n_cbs_invoked, rdp->n_nocbs_invoked,
|
|
rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
|
|
}
|
|
|
|
static int show_rcudata(struct seq_file *m, void *v)
|
|
{
|
|
print_one_rcu_data(m, (struct rcu_data *)v);
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations rcudate_op = {
|
|
.start = r_start,
|
|
.next = r_next,
|
|
.stop = r_stop,
|
|
.show = show_rcudata,
|
|
};
|
|
|
|
static int rcudata_open(struct inode *inode, struct file *file)
|
|
{
|
|
return r_open(inode, file, &rcudate_op);
|
|
}
|
|
|
|
static const struct file_operations rcudata_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcudata_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
static int show_rcuexp(struct seq_file *m, void *v)
|
|
{
|
|
struct rcu_state *rsp = (struct rcu_state *)m->private;
|
|
|
|
seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
|
|
atomic_long_read(&rsp->expedited_start),
|
|
atomic_long_read(&rsp->expedited_done),
|
|
atomic_long_read(&rsp->expedited_wrap),
|
|
atomic_long_read(&rsp->expedited_tryfail),
|
|
atomic_long_read(&rsp->expedited_workdone1),
|
|
atomic_long_read(&rsp->expedited_workdone2),
|
|
atomic_long_read(&rsp->expedited_normal),
|
|
atomic_long_read(&rsp->expedited_stoppedcpus),
|
|
atomic_long_read(&rsp->expedited_done_tries),
|
|
atomic_long_read(&rsp->expedited_done_lost),
|
|
atomic_long_read(&rsp->expedited_done_exit));
|
|
return 0;
|
|
}
|
|
|
|
static int rcuexp_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, show_rcuexp, inode->i_private);
|
|
}
|
|
|
|
static const struct file_operations rcuexp_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcuexp_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
|
|
static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
|
|
{
|
|
seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu ",
|
|
rnp->grplo, rnp->grphi,
|
|
"T."[list_empty(&rnp->blkd_tasks)],
|
|
"N."[!rnp->gp_tasks],
|
|
"E."[!rnp->exp_tasks],
|
|
"B."[!rnp->boost_tasks],
|
|
convert_kthread_status(rnp->boost_kthread_status),
|
|
rnp->n_tasks_boosted, rnp->n_exp_boosts,
|
|
rnp->n_normal_boosts);
|
|
seq_printf(m, "j=%04x bt=%04x\n",
|
|
(int)(jiffies & 0xffff),
|
|
(int)(rnp->boost_time & 0xffff));
|
|
seq_printf(m, " balk: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
|
|
rnp->n_balk_blkd_tasks,
|
|
rnp->n_balk_exp_gp_tasks,
|
|
rnp->n_balk_boost_tasks,
|
|
rnp->n_balk_notblocked,
|
|
rnp->n_balk_notyet,
|
|
rnp->n_balk_nos);
|
|
}
|
|
|
|
static int show_rcu_node_boost(struct seq_file *m, void *unused)
|
|
{
|
|
struct rcu_node *rnp;
|
|
|
|
rcu_for_each_leaf_node(&rcu_preempt_state, rnp)
|
|
print_one_rcu_node_boost(m, rnp);
|
|
return 0;
|
|
}
|
|
|
|
static int rcu_node_boost_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, show_rcu_node_boost, NULL);
|
|
}
|
|
|
|
static const struct file_operations rcu_node_boost_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcu_node_boost_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
|
static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
|
|
{
|
|
unsigned long gpnum;
|
|
int level = 0;
|
|
struct rcu_node *rnp;
|
|
|
|
gpnum = rsp->gpnum;
|
|
seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x ",
|
|
ulong2long(rsp->completed), ulong2long(gpnum),
|
|
rsp->fqs_state,
|
|
(long)(rsp->jiffies_force_qs - jiffies),
|
|
(int)(jiffies & 0xffff));
|
|
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
|
|
rsp->n_force_qs, rsp->n_force_qs_ngp,
|
|
rsp->n_force_qs - rsp->n_force_qs_ngp,
|
|
rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
|
|
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
|
|
if (rnp->level != level) {
|
|
seq_puts(m, "\n");
|
|
level = rnp->level;
|
|
}
|
|
seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ",
|
|
rnp->qsmask, rnp->qsmaskinit,
|
|
".G"[rnp->gp_tasks != NULL],
|
|
".E"[rnp->exp_tasks != NULL],
|
|
".T"[!list_empty(&rnp->blkd_tasks)],
|
|
rnp->grplo, rnp->grphi, rnp->grpnum);
|
|
}
|
|
seq_puts(m, "\n");
|
|
}
|
|
|
|
static int show_rcuhier(struct seq_file *m, void *v)
|
|
{
|
|
struct rcu_state *rsp = (struct rcu_state *)m->private;
|
|
print_one_rcu_state(m, rsp);
|
|
return 0;
|
|
}
|
|
|
|
static int rcuhier_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, show_rcuhier, inode->i_private);
|
|
}
|
|
|
|
static const struct file_operations rcuhier_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcuhier_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
|
|
{
|
|
unsigned long flags;
|
|
unsigned long completed;
|
|
unsigned long gpnum;
|
|
unsigned long gpage;
|
|
unsigned long gpmax;
|
|
struct rcu_node *rnp = &rsp->node[0];
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
completed = ACCESS_ONCE(rsp->completed);
|
|
gpnum = ACCESS_ONCE(rsp->gpnum);
|
|
if (completed == gpnum)
|
|
gpage = 0;
|
|
else
|
|
gpage = jiffies - rsp->gp_start;
|
|
gpmax = rsp->gp_max;
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
seq_printf(m, "completed=%ld gpnum=%ld age=%ld max=%ld\n",
|
|
ulong2long(completed), ulong2long(gpnum), gpage, gpmax);
|
|
}
|
|
|
|
static int show_rcugp(struct seq_file *m, void *v)
|
|
{
|
|
struct rcu_state *rsp = (struct rcu_state *)m->private;
|
|
show_one_rcugp(m, rsp);
|
|
return 0;
|
|
}
|
|
|
|
static int rcugp_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, show_rcugp, inode->i_private);
|
|
}
|
|
|
|
static const struct file_operations rcugp_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcugp_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
|
|
{
|
|
if (!rdp->beenonline)
|
|
return;
|
|
seq_printf(m, "%3d%cnp=%ld ",
|
|
rdp->cpu,
|
|
cpu_is_offline(rdp->cpu) ? '!' : ' ',
|
|
rdp->n_rcu_pending);
|
|
seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
|
|
rdp->n_rp_qs_pending,
|
|
rdp->n_rp_report_qs,
|
|
rdp->n_rp_cb_ready,
|
|
rdp->n_rp_cpu_needs_gp);
|
|
seq_printf(m, "gpc=%ld gps=%ld nn=%ld ndw%ld\n",
|
|
rdp->n_rp_gp_completed,
|
|
rdp->n_rp_gp_started,
|
|
rdp->n_rp_nocb_defer_wakeup,
|
|
rdp->n_rp_need_nothing);
|
|
}
|
|
|
|
static int show_rcu_pending(struct seq_file *m, void *v)
|
|
{
|
|
print_one_rcu_pending(m, (struct rcu_data *)v);
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations rcu_pending_op = {
|
|
.start = r_start,
|
|
.next = r_next,
|
|
.stop = r_stop,
|
|
.show = show_rcu_pending,
|
|
};
|
|
|
|
static int rcu_pending_open(struct inode *inode, struct file *file)
|
|
{
|
|
return r_open(inode, file, &rcu_pending_op);
|
|
}
|
|
|
|
static const struct file_operations rcu_pending_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcu_pending_open,
|
|
.read = seq_read,
|
|
.llseek = no_llseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
static int show_rcutorture(struct seq_file *m, void *unused)
|
|
{
|
|
seq_printf(m, "rcutorture test sequence: %lu %s\n",
|
|
rcutorture_testseq >> 1,
|
|
(rcutorture_testseq & 0x1) ? "(test in progress)" : "");
|
|
seq_printf(m, "rcutorture update version number: %lu\n",
|
|
rcutorture_vernum);
|
|
return 0;
|
|
}
|
|
|
|
static int rcutorture_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, show_rcutorture, NULL);
|
|
}
|
|
|
|
static const struct file_operations rcutorture_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rcutorture_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static struct dentry *rcudir;
|
|
|
|
static int __init rcutree_trace_init(void)
|
|
{
|
|
struct rcu_state *rsp;
|
|
struct dentry *retval;
|
|
struct dentry *rspdir;
|
|
|
|
rcudir = debugfs_create_dir("rcu", NULL);
|
|
if (!rcudir)
|
|
goto free_out;
|
|
|
|
for_each_rcu_flavor(rsp) {
|
|
rspdir = debugfs_create_dir(rsp->name, rcudir);
|
|
if (!rspdir)
|
|
goto free_out;
|
|
|
|
retval = debugfs_create_file("rcudata", 0444,
|
|
rspdir, rsp, &rcudata_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
|
|
retval = debugfs_create_file("rcuexp", 0444,
|
|
rspdir, rsp, &rcuexp_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
|
|
retval = debugfs_create_file("rcu_pending", 0444,
|
|
rspdir, rsp, &rcu_pending_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
|
|
retval = debugfs_create_file("rcubarrier", 0444,
|
|
rspdir, rsp, &rcubarrier_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
|
|
#ifdef CONFIG_RCU_BOOST
|
|
if (rsp == &rcu_preempt_state) {
|
|
retval = debugfs_create_file("rcuboost", 0444,
|
|
rspdir, NULL, &rcu_node_boost_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
}
|
|
#endif
|
|
|
|
retval = debugfs_create_file("rcugp", 0444,
|
|
rspdir, rsp, &rcugp_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
|
|
retval = debugfs_create_file("rcuhier", 0444,
|
|
rspdir, rsp, &rcuhier_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
}
|
|
|
|
retval = debugfs_create_file("rcutorture", 0444, rcudir,
|
|
NULL, &rcutorture_fops);
|
|
if (!retval)
|
|
goto free_out;
|
|
return 0;
|
|
free_out:
|
|
debugfs_remove_recursive(rcudir);
|
|
return 1;
|
|
}
|
|
|
|
static void __exit rcutree_trace_cleanup(void)
|
|
{
|
|
debugfs_remove_recursive(rcudir);
|
|
}
|
|
|
|
|
|
module_init(rcutree_trace_init);
|
|
module_exit(rcutree_trace_cleanup);
|
|
|
|
MODULE_AUTHOR("Paul E. McKenney");
|
|
MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
|
|
MODULE_LICENSE("GPL");
|