mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-19 02:27:51 +07:00
rcu: Remove rcu_state structure's ->rda field
The rcu_state structure's ->rda field was used to find the per-CPU rcu_data structures corresponding to that rcu_state structure. But now there is only one rcu_state structure (creatively named "rcu_state") and one set of per-CPU rcu_data structures (creatively named "rcu_data"). Therefore, uses of the ->rda field can always be replaced by "rcu_data, and this commit makes that change and removes the ->rda field. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
ec5dd444b6
commit
da1df50d16
@ -75,7 +75,6 @@
|
|||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data);
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data);
|
||||||
struct rcu_state rcu_state = {
|
struct rcu_state rcu_state = {
|
||||||
.level = { &rcu_state.node[0] },
|
.level = { &rcu_state.node[0] },
|
||||||
.rda = &rcu_data,
|
|
||||||
.gp_state = RCU_GP_IDLE,
|
.gp_state = RCU_GP_IDLE,
|
||||||
.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
|
.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
|
||||||
.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
|
.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
|
||||||
@ -586,7 +585,7 @@ void show_rcu_gp_kthreads(void)
|
|||||||
if (!rcu_is_leaf_node(rnp))
|
if (!rcu_is_leaf_node(rnp))
|
||||||
continue;
|
continue;
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
if (rdp->gpwrap ||
|
if (rdp->gpwrap ||
|
||||||
ULONG_CMP_GE(rsp->gp_seq,
|
ULONG_CMP_GE(rsp->gp_seq,
|
||||||
rdp->gp_seq_needed))
|
rdp->gp_seq_needed))
|
||||||
@ -660,7 +659,7 @@ static void rcu_eqs_enter(bool user)
|
|||||||
trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
|
trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
do_nocb_deferred_wakeup(rdp);
|
do_nocb_deferred_wakeup(rdp);
|
||||||
}
|
}
|
||||||
rcu_prepare_for_idle();
|
rcu_prepare_for_idle();
|
||||||
@ -1034,7 +1033,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
|||||||
return true;
|
return true;
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
|
if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
@ -1352,7 +1351,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
|
|||||||
|
|
||||||
print_cpu_stall_info_end();
|
print_cpu_stall_info_end();
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
|
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
|
||||||
cpu)->cblist);
|
cpu)->cblist);
|
||||||
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
|
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
|
||||||
smp_processor_id(), (long)(jiffies - rsp->gp_start),
|
smp_processor_id(), (long)(jiffies - rsp->gp_start),
|
||||||
@ -1392,7 +1391,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||||
long totqlen = 0;
|
long totqlen = 0;
|
||||||
|
|
||||||
@ -1413,7 +1412,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
|
||||||
print_cpu_stall_info_end();
|
print_cpu_stall_info_end();
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
|
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
|
||||||
cpu)->cblist);
|
cpu)->cblist);
|
||||||
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
|
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
|
||||||
jiffies - rsp->gp_start,
|
jiffies - rsp->gp_start,
|
||||||
@ -1624,7 +1623,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
|
|||||||
static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
||||||
{
|
{
|
||||||
bool needmore;
|
bool needmore;
|
||||||
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
|
||||||
needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
|
needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
|
||||||
if (!needmore)
|
if (!needmore)
|
||||||
@ -1936,7 +1935,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|||||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
||||||
rcu_gp_slow(rsp, gp_init_delay);
|
rcu_gp_slow(rsp, gp_init_delay);
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
rcu_preempt_check_blocked_tasks(rsp, rnp);
|
rcu_preempt_check_blocked_tasks(rsp, rnp);
|
||||||
rnp->qsmask = rnp->qsmaskinit;
|
rnp->qsmask = rnp->qsmaskinit;
|
||||||
WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
|
WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
|
||||||
@ -2050,7 +2049,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||||||
dump_blkd_tasks(rsp, rnp, 10);
|
dump_blkd_tasks(rsp, rnp, 10);
|
||||||
WARN_ON_ONCE(rnp->qsmask);
|
WARN_ON_ONCE(rnp->qsmask);
|
||||||
WRITE_ONCE(rnp->gp_seq, new_gp_seq);
|
WRITE_ONCE(rnp->gp_seq, new_gp_seq);
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (rnp == rdp->mynode)
|
if (rnp == rdp->mynode)
|
||||||
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
|
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
|
||||||
/* smp_mb() provided by prior unlock-lock pair. */
|
/* smp_mb() provided by prior unlock-lock pair. */
|
||||||
@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||||||
trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
|
trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
|
||||||
rsp->gp_state = RCU_GP_IDLE;
|
rsp->gp_state = RCU_GP_IDLE;
|
||||||
/* Check for GP requests since above loop. */
|
/* Check for GP requests since above loop. */
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
|
if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
|
||||||
trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
|
trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
|
||||||
TPS("CleanupMore"));
|
TPS("CleanupMore"));
|
||||||
@ -2405,7 +2404,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||||||
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
|
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
|
||||||
{
|
{
|
||||||
RCU_TRACE(bool blkd;)
|
RCU_TRACE(bool blkd;)
|
||||||
RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
|
RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
|
||||||
RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
|
RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||||
@ -2469,7 +2468,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
|||||||
*/
|
*/
|
||||||
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||||
@ -2622,7 +2621,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
|
|||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
|
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
|
||||||
if ((rnp->qsmask & bit) != 0) {
|
if ((rnp->qsmask & bit) != 0) {
|
||||||
if (f(per_cpu_ptr(rsp->rda, cpu)))
|
if (f(per_cpu_ptr(&rcu_data, cpu)))
|
||||||
mask |= bit;
|
mask |= bit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2648,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|||||||
struct rcu_node *rnp_old = NULL;
|
struct rcu_node *rnp_old = NULL;
|
||||||
|
|
||||||
/* Funnel through hierarchy to reduce memory contention. */
|
/* Funnel through hierarchy to reduce memory contention. */
|
||||||
rnp = __this_cpu_read(rsp->rda->mynode);
|
rnp = __this_cpu_read(rcu_data.mynode);
|
||||||
for (; rnp != NULL; rnp = rnp->parent) {
|
for (; rnp != NULL; rnp = rnp->parent) {
|
||||||
ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
|
ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
|
||||||
!raw_spin_trylock(&rnp->fqslock);
|
!raw_spin_trylock(&rnp->fqslock);
|
||||||
@ -2740,7 +2739,7 @@ static void
|
|||||||
__rcu_process_callbacks(struct rcu_state *rsp)
|
__rcu_process_callbacks(struct rcu_state *rsp)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
||||||
struct rcu_node *rnp = rdp->mynode;
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
|
|
||||||
WARN_ON_ONCE(!rdp->beenonline);
|
WARN_ON_ONCE(!rdp->beenonline);
|
||||||
@ -2894,14 +2893,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
|
|||||||
head->func = func;
|
head->func = func;
|
||||||
head->next = NULL;
|
head->next = NULL;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
|
||||||
/* Add the callback to our list. */
|
/* Add the callback to our list. */
|
||||||
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
|
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
|
||||||
int offline;
|
int offline;
|
||||||
|
|
||||||
if (cpu != -1)
|
if (cpu != -1)
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
if (likely(rdp->mynode)) {
|
if (likely(rdp->mynode)) {
|
||||||
/* Post-boot, so this should be for a no-CBs CPU. */
|
/* Post-boot, so this should be for a no-CBs CPU. */
|
||||||
offline = !__call_rcu_nocb(rdp, head, lazy, flags);
|
offline = !__call_rcu_nocb(rdp, head, lazy, flags);
|
||||||
@ -3135,7 +3134,7 @@ static int rcu_pending(void)
|
|||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp)
|
for_each_rcu_flavor(rsp)
|
||||||
if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
|
if (__rcu_pending(rsp, this_cpu_ptr(&rcu_data)))
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3153,7 +3152,7 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
|
|||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (rcu_segcblist_empty(&rdp->cblist))
|
if (rcu_segcblist_empty(&rdp->cblist))
|
||||||
continue;
|
continue;
|
||||||
hc = true;
|
hc = true;
|
||||||
@ -3202,7 +3201,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
|
|||||||
static void rcu_barrier_func(void *type)
|
static void rcu_barrier_func(void *type)
|
||||||
{
|
{
|
||||||
struct rcu_state *rsp = type;
|
struct rcu_state *rsp = type;
|
||||||
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
||||||
|
|
||||||
_rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
|
_rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
|
||||||
rdp->barrier_head.func = rcu_barrier_callback;
|
rdp->barrier_head.func = rcu_barrier_callback;
|
||||||
@ -3262,7 +3261,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
|||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
|
if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
|
||||||
continue;
|
continue;
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
if (rcu_is_nocb_cpu(cpu)) {
|
if (rcu_is_nocb_cpu(cpu)) {
|
||||||
if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
|
if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
|
||||||
_rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
|
_rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
|
||||||
@ -3372,7 +3371,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
|
|||||||
static void __init
|
static void __init
|
||||||
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
/* Set up local state, ensuring consistent view of global state. */
|
/* Set up local state, ensuring consistent view of global state. */
|
||||||
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
|
||||||
@ -3398,7 +3397,7 @@ static void
|
|||||||
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||||
|
|
||||||
/* Set up local state, ensuring consistent view of global state. */
|
/* Set up local state, ensuring consistent view of global state. */
|
||||||
@ -3454,7 +3453,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
|
|||||||
*/
|
*/
|
||||||
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
|
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
|
rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
|
||||||
}
|
}
|
||||||
@ -3471,7 +3470,7 @@ int rcutree_online_cpu(unsigned int cpu)
|
|||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
rnp->ffmask |= rdp->grpmask;
|
rnp->ffmask |= rdp->grpmask;
|
||||||
@ -3498,7 +3497,7 @@ int rcutree_offline_cpu(unsigned int cpu)
|
|||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
rnp->ffmask &= ~rdp->grpmask;
|
rnp->ffmask &= ~rdp->grpmask;
|
||||||
@ -3532,7 +3531,7 @@ int rcutree_dead_cpu(unsigned int cpu)
|
|||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rcu_cleanup_dead_cpu(cpu, rsp);
|
rcu_cleanup_dead_cpu(cpu, rsp);
|
||||||
do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3566,7 +3565,7 @@ void rcu_cpu_starting(unsigned int cpu)
|
|||||||
per_cpu(rcu_cpu_started, cpu) = 1;
|
per_cpu(rcu_cpu_started, cpu) = 1;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
mask = rdp->grpmask;
|
mask = rdp->grpmask;
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
@ -3600,7 +3599,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
||||||
|
|
||||||
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
||||||
@ -3633,7 +3632,7 @@ void rcu_report_dead(unsigned int cpu)
|
|||||||
|
|
||||||
/* QS for any half-done expedited RCU-sched GP. */
|
/* QS for any half-done expedited RCU-sched GP. */
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(rcu_state.rda));
|
rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
rcu_preempt_deferred_qs(current);
|
rcu_preempt_deferred_qs(current);
|
||||||
for_each_rcu_flavor(rsp)
|
for_each_rcu_flavor(rsp)
|
||||||
@ -3647,7 +3646,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_data *my_rdp;
|
struct rcu_data *my_rdp;
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
|
struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
|
||||||
bool needwake;
|
bool needwake;
|
||||||
|
|
||||||
@ -3655,7 +3654,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
|
|||||||
return; /* No callbacks to migrate. */
|
return; /* No callbacks to migrate. */
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
my_rdp = this_cpu_ptr(rsp->rda);
|
my_rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
|
if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
@ -3857,7 +3856,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
while (i > rnp->grphi)
|
while (i > rnp->grphi)
|
||||||
rnp++;
|
rnp++;
|
||||||
per_cpu_ptr(rsp->rda, i)->mynode = rnp;
|
per_cpu_ptr(&rcu_data, i)->mynode = rnp;
|
||||||
rcu_boot_init_percpu_data(i, rsp);
|
rcu_boot_init_percpu_data(i, rsp);
|
||||||
}
|
}
|
||||||
list_add(&rsp->flavors, &rcu_struct_flavors);
|
list_add(&rsp->flavors, &rcu_struct_flavors);
|
||||||
|
@ -312,7 +312,6 @@ struct rcu_state {
|
|||||||
struct rcu_node *level[RCU_NUM_LVLS + 1];
|
struct rcu_node *level[RCU_NUM_LVLS + 1];
|
||||||
/* Hierarchy levels (+1 to */
|
/* Hierarchy levels (+1 to */
|
||||||
/* shut bogus gcc warning) */
|
/* shut bogus gcc warning) */
|
||||||
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
|
|
||||||
int ncpus; /* # CPUs seen so far. */
|
int ncpus; /* # CPUs seen so far. */
|
||||||
|
|
||||||
/* The following fields are guarded by the root rcu_node's lock. */
|
/* The following fields are guarded by the root rcu_node's lock. */
|
||||||
|
@ -286,7 +286,7 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
|
|||||||
*/
|
*/
|
||||||
static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
|
static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
|
||||||
struct rcu_node *rnp = rdp->mynode;
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
||||||
|
|
||||||
@ -361,7 +361,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
|||||||
mask_ofl_test = 0;
|
mask_ofl_test = 0;
|
||||||
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
|
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
|
||||||
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
|
struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
|
||||||
int snap;
|
int snap;
|
||||||
|
|
||||||
@ -390,7 +390,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
|||||||
/* IPI the remaining CPUs for expedited quiescent state. */
|
/* IPI the remaining CPUs for expedited quiescent state. */
|
||||||
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
|
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
|
||||||
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
if (!(mask_ofl_ipi & mask))
|
if (!(mask_ofl_ipi & mask))
|
||||||
continue;
|
continue;
|
||||||
@ -509,7 +509,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
|||||||
if (!(rnp->expmask & mask))
|
if (!(rnp->expmask & mask))
|
||||||
continue;
|
continue;
|
||||||
ndetected++;
|
ndetected++;
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
pr_cont(" %d-%c%c%c", cpu,
|
pr_cont(" %d-%c%c%c", cpu,
|
||||||
"O."[!!cpu_online(cpu)],
|
"O."[!!cpu_online(cpu)],
|
||||||
"o."[!!(rdp->grpmask & rnp->expmaskinit)],
|
"o."[!!(rdp->grpmask & rnp->expmaskinit)],
|
||||||
@ -642,7 +642,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for expedited grace period to complete. */
|
/* Wait for expedited grace period to complete. */
|
||||||
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
|
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
|
||||||
rnp = rcu_get_root(rsp);
|
rnp = rcu_get_root(rsp);
|
||||||
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
|
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
|
||||||
sync_exp_work_done(rsp, s));
|
sync_exp_work_done(rsp, s));
|
||||||
@ -665,7 +665,7 @@ static void sync_rcu_exp_handler(void *info)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_state *rsp = info;
|
struct rcu_state *rsp = info;
|
||||||
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
struct rcu_node *rnp = rdp->mynode;
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
struct task_struct *t = current;
|
struct task_struct *t = current;
|
||||||
|
|
||||||
@ -772,13 +772,12 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
|||||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||||
|
|
||||||
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
||||||
static void sync_sched_exp_handler(void *data)
|
static void sync_sched_exp_handler(void *unused)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
struct rcu_state *rsp = data;
|
|
||||||
|
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
||||||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
|
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
|
||||||
@ -801,7 +800,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
|
|||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
struct rcu_state *rsp = &rcu_state;
|
struct rcu_state *rsp = &rcu_state;
|
||||||
|
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
|
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
|
||||||
return;
|
return;
|
||||||
|
@ -328,7 +328,7 @@ static void rcu_qs(void)
|
|||||||
void rcu_note_context_switch(bool preempt)
|
void rcu_note_context_switch(bool preempt)
|
||||||
{
|
{
|
||||||
struct task_struct *t = current;
|
struct task_struct *t = current;
|
||||||
struct rcu_data *rdp = this_cpu_ptr(rcu_state_p->rda);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
|
|
||||||
barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
||||||
@ -488,7 +488,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
|||||||
* t->rcu_read_unlock_special cannot change.
|
* t->rcu_read_unlock_special cannot change.
|
||||||
*/
|
*/
|
||||||
special = t->rcu_read_unlock_special;
|
special = t->rcu_read_unlock_special;
|
||||||
rdp = this_cpu_ptr(rcu_state_p->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (!special.s && !rdp->deferred_qs) {
|
if (!special.s && !rdp->deferred_qs) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
@ -911,7 +911,7 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
|
|||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
|
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
|
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
|
||||||
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
|
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
|
||||||
cpu, ".o"[onl],
|
cpu, ".o"[onl],
|
||||||
@ -1437,7 +1437,7 @@ static void __init rcu_spawn_boost_kthreads(void)
|
|||||||
|
|
||||||
static void rcu_prepare_kthreads(int cpu)
|
static void rcu_prepare_kthreads(int cpu)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_node *rnp = rdp->mynode;
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
|
|
||||||
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
|
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
|
||||||
@ -1574,7 +1574,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
|
|||||||
rdtp->last_advance_all = jiffies;
|
rdtp->last_advance_all = jiffies;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1692,7 +1692,7 @@ static void rcu_prepare_for_idle(void)
|
|||||||
return;
|
return;
|
||||||
rdtp->last_accelerate = jiffies;
|
rdtp->last_accelerate = jiffies;
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
rdp = this_cpu_ptr(rsp->rda);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (!rcu_segcblist_pend_cbs(&rdp->cblist))
|
if (!rcu_segcblist_pend_cbs(&rdp->cblist))
|
||||||
continue;
|
continue;
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
@ -1778,7 +1778,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
|
|||||||
{
|
{
|
||||||
unsigned long delta;
|
unsigned long delta;
|
||||||
char fast_no_hz[72];
|
char fast_no_hz[72];
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_dynticks *rdtp = rdp->dynticks;
|
struct rcu_dynticks *rdtp = rdp->dynticks;
|
||||||
char *ticks_title;
|
char *ticks_title;
|
||||||
unsigned long ticks_value;
|
unsigned long ticks_value;
|
||||||
@ -1833,7 +1833,7 @@ static void increment_cpu_stall_ticks(void)
|
|||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
for_each_rcu_flavor(rsp)
|
for_each_rcu_flavor(rsp)
|
||||||
raw_cpu_inc(rsp->rda->ticks_this_gp);
|
raw_cpu_inc(rcu_data.ticks_this_gp);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RCU_NOCB_CPU
|
#ifdef CONFIG_RCU_NOCB_CPU
|
||||||
@ -1965,7 +1965,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
|
|||||||
*/
|
*/
|
||||||
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
|
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
unsigned long ret;
|
unsigned long ret;
|
||||||
#ifdef CONFIG_PROVE_RCU
|
#ifdef CONFIG_PROVE_RCU
|
||||||
struct rcu_head *rhp;
|
struct rcu_head *rhp;
|
||||||
@ -2426,7 +2426,7 @@ void __init rcu_init_nohz(void)
|
|||||||
|
|
||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
for_each_cpu(cpu, rcu_nocb_mask)
|
for_each_cpu(cpu, rcu_nocb_mask)
|
||||||
init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
|
init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
|
||||||
rcu_organize_nocb_kthreads(rsp);
|
rcu_organize_nocb_kthreads(rsp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2452,7 +2452,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
|
|||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
struct rcu_data *rdp_last;
|
struct rcu_data *rdp_last;
|
||||||
struct rcu_data *rdp_old_leader;
|
struct rcu_data *rdp_old_leader;
|
||||||
struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2545,7 +2545,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
|
|||||||
* we will spawn the needed set of rcu_nocb_kthread() kthreads.
|
* we will spawn the needed set of rcu_nocb_kthread() kthreads.
|
||||||
*/
|
*/
|
||||||
for_each_cpu(cpu, rcu_nocb_mask) {
|
for_each_cpu(cpu, rcu_nocb_mask) {
|
||||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
if (rdp->cpu >= nl) {
|
if (rdp->cpu >= nl) {
|
||||||
/* New leader, set up for followers & next leader. */
|
/* New leader, set up for followers & next leader. */
|
||||||
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
|
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
|
||||||
|
Loading…
Reference in New Issue
Block a user