mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-17 17:46:43 +07:00
srcu: Use rnp->lock wrappers to replace explicit memory barriers
This commit uses TREE RCU's rnp->lock wrappers to replace a few explicit memory barriers. This change also has the advantage of making SRCU's memory-ordering properties be implemented in roughly the same way as they are in Tree RCU. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
83d40bd3bc
commit
a3883df393
@ -40,7 +40,7 @@ struct srcu_data {
|
|||||||
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
|
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
|
||||||
|
|
||||||
/* Update-side state. */
|
/* Update-side state. */
|
||||||
spinlock_t lock ____cacheline_internodealigned_in_smp;
|
raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
|
||||||
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
|
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
|
||||||
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
|
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
|
||||||
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
|
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
|
||||||
@ -58,7 +58,7 @@ struct srcu_data {
|
|||||||
* Node in SRCU combining tree, similar in function to rcu_data.
|
* Node in SRCU combining tree, similar in function to rcu_data.
|
||||||
*/
|
*/
|
||||||
struct srcu_node {
|
struct srcu_node {
|
||||||
spinlock_t lock;
|
raw_spinlock_t __private lock;
|
||||||
unsigned long srcu_have_cbs[4]; /* GP seq for children */
|
unsigned long srcu_have_cbs[4]; /* GP seq for children */
|
||||||
/* having CBs, but only */
|
/* having CBs, but only */
|
||||||
/* is > ->srcu_gq_seq. */
|
/* is > ->srcu_gq_seq. */
|
||||||
@ -78,7 +78,7 @@ struct srcu_struct {
|
|||||||
struct srcu_node *level[RCU_NUM_LVLS + 1];
|
struct srcu_node *level[RCU_NUM_LVLS + 1];
|
||||||
/* First node at each level. */
|
/* First node at each level. */
|
||||||
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
|
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
|
||||||
spinlock_t gp_lock; /* protect ->srcu_cblist */
|
raw_spinlock_t __private lock; /* Protect counters */
|
||||||
struct mutex srcu_gp_mutex; /* Serialize GP work. */
|
struct mutex srcu_gp_mutex; /* Serialize GP work. */
|
||||||
unsigned int srcu_idx; /* Current rdr array element. */
|
unsigned int srcu_idx; /* Current rdr array element. */
|
||||||
unsigned long srcu_gp_seq; /* Grace-period seq #. */
|
unsigned long srcu_gp_seq; /* Grace-period seq #. */
|
||||||
@ -109,7 +109,7 @@ void process_srcu(struct work_struct *work);
|
|||||||
#define __SRCU_STRUCT_INIT(name) \
|
#define __SRCU_STRUCT_INIT(name) \
|
||||||
{ \
|
{ \
|
||||||
.sda = &name##_srcu_data, \
|
.sda = &name##_srcu_data, \
|
||||||
.gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \
|
.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
|
||||||
.srcu_gp_seq_needed = 0 - 1, \
|
.srcu_gp_seq_needed = 0 - 1, \
|
||||||
__SRCU_DEP_MAP_INIT(name) \
|
__SRCU_DEP_MAP_INIT(name) \
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
|
|||||||
|
|
||||||
/* Each pass through this loop initializes one srcu_node structure. */
|
/* Each pass through this loop initializes one srcu_node structure. */
|
||||||
rcu_for_each_node_breadth_first(sp, snp) {
|
rcu_for_each_node_breadth_first(sp, snp) {
|
||||||
spin_lock_init(&snp->lock);
|
raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
|
||||||
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
||||||
ARRAY_SIZE(snp->srcu_data_have_cbs));
|
ARRAY_SIZE(snp->srcu_data_have_cbs));
|
||||||
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
|
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
|
||||||
@ -110,7 +110,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
|
|||||||
snp_first = sp->level[level];
|
snp_first = sp->level[level];
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
sdp = per_cpu_ptr(sp->sda, cpu);
|
sdp = per_cpu_ptr(sp->sda, cpu);
|
||||||
spin_lock_init(&sdp->lock);
|
raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
|
||||||
rcu_segcblist_init(&sdp->srcu_cblist);
|
rcu_segcblist_init(&sdp->srcu_cblist);
|
||||||
sdp->srcu_cblist_invoking = false;
|
sdp->srcu_cblist_invoking = false;
|
||||||
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
|
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
|
||||||
@ -169,7 +169,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
|
|||||||
/* Don't re-initialize a lock while it is held. */
|
/* Don't re-initialize a lock while it is held. */
|
||||||
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
|
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
|
||||||
lockdep_init_map(&sp->dep_map, name, key, 0);
|
lockdep_init_map(&sp->dep_map, name, key, 0);
|
||||||
spin_lock_init(&sp->gp_lock);
|
raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
|
||||||
return init_srcu_struct_fields(sp, false);
|
return init_srcu_struct_fields(sp, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__init_srcu_struct);
|
EXPORT_SYMBOL_GPL(__init_srcu_struct);
|
||||||
@ -186,7 +186,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
|
|||||||
*/
|
*/
|
||||||
int init_srcu_struct(struct srcu_struct *sp)
|
int init_srcu_struct(struct srcu_struct *sp)
|
||||||
{
|
{
|
||||||
spin_lock_init(&sp->gp_lock);
|
raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
|
||||||
return init_srcu_struct_fields(sp, false);
|
return init_srcu_struct_fields(sp, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(init_srcu_struct);
|
EXPORT_SYMBOL_GPL(init_srcu_struct);
|
||||||
@ -197,7 +197,7 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
|
|||||||
* First-use initialization of statically allocated srcu_struct
|
* First-use initialization of statically allocated srcu_struct
|
||||||
* structure. Wiring up the combining tree is more than can be
|
* structure. Wiring up the combining tree is more than can be
|
||||||
* done with compile-time initialization, so this check is added
|
* done with compile-time initialization, so this check is added
|
||||||
* to each update-side SRCU primitive. Use ->gp_lock, which -is-
|
* to each update-side SRCU primitive. Use sp->lock, which -is-
|
||||||
* compile-time initialized, to resolve races involving multiple
|
* compile-time initialized, to resolve races involving multiple
|
||||||
* CPUs trying to garner first-use privileges.
|
* CPUs trying to garner first-use privileges.
|
||||||
*/
|
*/
|
||||||
@ -209,13 +209,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
|
|||||||
/* The smp_load_acquire() pairs with the smp_store_release(). */
|
/* The smp_load_acquire() pairs with the smp_store_release(). */
|
||||||
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
|
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
|
||||||
return; /* Already initialized. */
|
return; /* Already initialized. */
|
||||||
spin_lock_irqsave(&sp->gp_lock, flags);
|
raw_spin_lock_irqsave_rcu_node(sp, flags);
|
||||||
if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
|
if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
|
||||||
spin_unlock_irqrestore(&sp->gp_lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(sp, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
init_srcu_struct_fields(sp, true);
|
init_srcu_struct_fields(sp, true);
|
||||||
spin_unlock_irqrestore(&sp->gp_lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(sp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -411,8 +411,7 @@ static void srcu_gp_start(struct srcu_struct *sp)
|
|||||||
struct srcu_data *sdp = this_cpu_ptr(sp->sda);
|
struct srcu_data *sdp = this_cpu_ptr(sp->sda);
|
||||||
int state;
|
int state;
|
||||||
|
|
||||||
RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock),
|
lockdep_assert_held(&sp->lock);
|
||||||
"Invoked srcu_gp_start() without ->gp_lock!");
|
|
||||||
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
|
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&sp->srcu_gp_seq));
|
rcu_seq_current(&sp->srcu_gp_seq));
|
||||||
@ -513,7 +512,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
|||||||
mutex_lock(&sp->srcu_cb_mutex);
|
mutex_lock(&sp->srcu_cb_mutex);
|
||||||
|
|
||||||
/* End the current grace period. */
|
/* End the current grace period. */
|
||||||
spin_lock_irq(&sp->gp_lock);
|
raw_spin_lock_irq_rcu_node(sp);
|
||||||
idx = rcu_seq_state(sp->srcu_gp_seq);
|
idx = rcu_seq_state(sp->srcu_gp_seq);
|
||||||
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
|
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
|
||||||
cbdelay = srcu_get_delay(sp);
|
cbdelay = srcu_get_delay(sp);
|
||||||
@ -522,7 +521,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
|||||||
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
|
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
|
||||||
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
|
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
|
||||||
sp->srcu_gp_seq_needed_exp = gpseq;
|
sp->srcu_gp_seq_needed_exp = gpseq;
|
||||||
spin_unlock_irq(&sp->gp_lock);
|
raw_spin_unlock_irq_rcu_node(sp);
|
||||||
mutex_unlock(&sp->srcu_gp_mutex);
|
mutex_unlock(&sp->srcu_gp_mutex);
|
||||||
/* A new grace period can start at this point. But only one. */
|
/* A new grace period can start at this point. But only one. */
|
||||||
|
|
||||||
@ -530,7 +529,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
|||||||
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||||
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
|
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||||
rcu_for_each_node_breadth_first(sp, snp) {
|
rcu_for_each_node_breadth_first(sp, snp) {
|
||||||
spin_lock_irq(&snp->lock);
|
raw_spin_lock_irq_rcu_node(snp);
|
||||||
cbs = false;
|
cbs = false;
|
||||||
if (snp >= sp->level[rcu_num_lvls - 1])
|
if (snp >= sp->level[rcu_num_lvls - 1])
|
||||||
cbs = snp->srcu_have_cbs[idx] == gpseq;
|
cbs = snp->srcu_have_cbs[idx] == gpseq;
|
||||||
@ -540,21 +539,19 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
|||||||
snp->srcu_gp_seq_needed_exp = gpseq;
|
snp->srcu_gp_seq_needed_exp = gpseq;
|
||||||
mask = snp->srcu_data_have_cbs[idx];
|
mask = snp->srcu_data_have_cbs[idx];
|
||||||
snp->srcu_data_have_cbs[idx] = 0;
|
snp->srcu_data_have_cbs[idx] = 0;
|
||||||
spin_unlock_irq(&snp->lock);
|
raw_spin_unlock_irq_rcu_node(snp);
|
||||||
if (cbs) {
|
if (cbs)
|
||||||
smp_mb(); /* GP end before CB invocation. */
|
|
||||||
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
|
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
|
||||||
}
|
|
||||||
|
|
||||||
/* Occasionally prevent srcu_data counter wrap. */
|
/* Occasionally prevent srcu_data counter wrap. */
|
||||||
if (!(gpseq & counter_wrap_check))
|
if (!(gpseq & counter_wrap_check))
|
||||||
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
|
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
|
||||||
sdp = per_cpu_ptr(sp->sda, cpu);
|
sdp = per_cpu_ptr(sp->sda, cpu);
|
||||||
spin_lock_irqsave(&sdp->lock, flags);
|
raw_spin_lock_irqsave_rcu_node(sdp, flags);
|
||||||
if (ULONG_CMP_GE(gpseq,
|
if (ULONG_CMP_GE(gpseq,
|
||||||
sdp->srcu_gp_seq_needed + 100))
|
sdp->srcu_gp_seq_needed + 100))
|
||||||
sdp->srcu_gp_seq_needed = gpseq;
|
sdp->srcu_gp_seq_needed = gpseq;
|
||||||
spin_unlock_irqrestore(&sdp->lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -562,17 +559,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
|||||||
mutex_unlock(&sp->srcu_cb_mutex);
|
mutex_unlock(&sp->srcu_cb_mutex);
|
||||||
|
|
||||||
/* Start a new grace period if needed. */
|
/* Start a new grace period if needed. */
|
||||||
spin_lock_irq(&sp->gp_lock);
|
raw_spin_lock_irq_rcu_node(sp);
|
||||||
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
|
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
|
||||||
if (!rcu_seq_state(gpseq) &&
|
if (!rcu_seq_state(gpseq) &&
|
||||||
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
|
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
|
||||||
srcu_gp_start(sp);
|
srcu_gp_start(sp);
|
||||||
spin_unlock_irq(&sp->gp_lock);
|
raw_spin_unlock_irq_rcu_node(sp);
|
||||||
/* Throttle expedited grace periods: Should be rare! */
|
/* Throttle expedited grace periods: Should be rare! */
|
||||||
srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
|
srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
|
||||||
? 0 : SRCU_INTERVAL);
|
? 0 : SRCU_INTERVAL);
|
||||||
} else {
|
} else {
|
||||||
spin_unlock_irq(&sp->gp_lock);
|
raw_spin_unlock_irq_rcu_node(sp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -592,18 +589,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
|
|||||||
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
|
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
|
||||||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
|
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
|
||||||
return;
|
return;
|
||||||
spin_lock_irqsave(&snp->lock, flags);
|
raw_spin_lock_irqsave_rcu_node(snp, flags);
|
||||||
if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
|
if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
|
||||||
spin_unlock_irqrestore(&snp->lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
|
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
|
||||||
spin_unlock_irqrestore(&snp->lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||||
}
|
}
|
||||||
spin_lock_irqsave(&sp->gp_lock, flags);
|
raw_spin_lock_irqsave_rcu_node(sp, flags);
|
||||||
if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
|
if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
|
||||||
sp->srcu_gp_seq_needed_exp = s;
|
sp->srcu_gp_seq_needed_exp = s;
|
||||||
spin_unlock_irqrestore(&sp->gp_lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(sp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -625,14 +622,13 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
|
|||||||
for (; snp != NULL; snp = snp->srcu_parent) {
|
for (; snp != NULL; snp = snp->srcu_parent) {
|
||||||
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
|
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
|
||||||
return; /* GP already done and CBs recorded. */
|
return; /* GP already done and CBs recorded. */
|
||||||
spin_lock_irqsave(&snp->lock, flags);
|
raw_spin_lock_irqsave_rcu_node(snp, flags);
|
||||||
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
|
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
|
||||||
snp_seq = snp->srcu_have_cbs[idx];
|
snp_seq = snp->srcu_have_cbs[idx];
|
||||||
if (snp == sdp->mynode && snp_seq == s)
|
if (snp == sdp->mynode && snp_seq == s)
|
||||||
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
||||||
spin_unlock_irqrestore(&snp->lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||||
if (snp == sdp->mynode && snp_seq != s) {
|
if (snp == sdp->mynode && snp_seq != s) {
|
||||||
smp_mb(); /* CBs after GP! */
|
|
||||||
srcu_schedule_cbs_sdp(sdp, do_norm
|
srcu_schedule_cbs_sdp(sdp, do_norm
|
||||||
? SRCU_INTERVAL
|
? SRCU_INTERVAL
|
||||||
: 0);
|
: 0);
|
||||||
@ -647,11 +643,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
|
|||||||
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
|
||||||
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
|
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
|
||||||
snp->srcu_gp_seq_needed_exp = s;
|
snp->srcu_gp_seq_needed_exp = s;
|
||||||
spin_unlock_irqrestore(&snp->lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(snp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Top of tree, must ensure the grace period will be started. */
|
/* Top of tree, must ensure the grace period will be started. */
|
||||||
spin_lock_irqsave(&sp->gp_lock, flags);
|
raw_spin_lock_irqsave_rcu_node(sp, flags);
|
||||||
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
|
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
|
||||||
/*
|
/*
|
||||||
* Record need for grace period s. Pair with load
|
* Record need for grace period s. Pair with load
|
||||||
@ -670,7 +666,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
|
|||||||
queue_delayed_work(system_power_efficient_wq, &sp->work,
|
queue_delayed_work(system_power_efficient_wq, &sp->work,
|
||||||
srcu_get_delay(sp));
|
srcu_get_delay(sp));
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&sp->gp_lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(sp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -833,7 +829,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
|
|||||||
rhp->func = func;
|
rhp->func = func;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
sdp = this_cpu_ptr(sp->sda);
|
sdp = this_cpu_ptr(sp->sda);
|
||||||
spin_lock(&sdp->lock);
|
raw_spin_lock_rcu_node(sdp);
|
||||||
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
|
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&sp->srcu_gp_seq));
|
rcu_seq_current(&sp->srcu_gp_seq));
|
||||||
@ -847,7 +843,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
|
|||||||
sdp->srcu_gp_seq_needed_exp = s;
|
sdp->srcu_gp_seq_needed_exp = s;
|
||||||
needexp = true;
|
needexp = true;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&sdp->lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||||
if (needgp)
|
if (needgp)
|
||||||
srcu_funnel_gp_start(sp, sdp, s, do_norm);
|
srcu_funnel_gp_start(sp, sdp, s, do_norm);
|
||||||
else if (needexp)
|
else if (needexp)
|
||||||
@ -1018,7 +1014,7 @@ void srcu_barrier(struct srcu_struct *sp)
|
|||||||
*/
|
*/
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
sdp = per_cpu_ptr(sp->sda, cpu);
|
sdp = per_cpu_ptr(sp->sda, cpu);
|
||||||
spin_lock_irq(&sdp->lock);
|
raw_spin_lock_irq_rcu_node(sdp);
|
||||||
atomic_inc(&sp->srcu_barrier_cpu_cnt);
|
atomic_inc(&sp->srcu_barrier_cpu_cnt);
|
||||||
sdp->srcu_barrier_head.func = srcu_barrier_cb;
|
sdp->srcu_barrier_head.func = srcu_barrier_cb;
|
||||||
debug_rcu_head_queue(&sdp->srcu_barrier_head);
|
debug_rcu_head_queue(&sdp->srcu_barrier_head);
|
||||||
@ -1027,7 +1023,7 @@ void srcu_barrier(struct srcu_struct *sp)
|
|||||||
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
|
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
|
||||||
atomic_dec(&sp->srcu_barrier_cpu_cnt);
|
atomic_dec(&sp->srcu_barrier_cpu_cnt);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&sdp->lock);
|
raw_spin_unlock_irq_rcu_node(sdp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove the initial count, at which point reaching zero can happen. */
|
/* Remove the initial count, at which point reaching zero can happen. */
|
||||||
@ -1076,17 +1072,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
|
|||||||
*/
|
*/
|
||||||
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
|
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
|
||||||
if (idx == SRCU_STATE_IDLE) {
|
if (idx == SRCU_STATE_IDLE) {
|
||||||
spin_lock_irq(&sp->gp_lock);
|
raw_spin_lock_irq_rcu_node(sp);
|
||||||
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
|
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
|
||||||
WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
|
WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
|
||||||
spin_unlock_irq(&sp->gp_lock);
|
raw_spin_unlock_irq_rcu_node(sp);
|
||||||
mutex_unlock(&sp->srcu_gp_mutex);
|
mutex_unlock(&sp->srcu_gp_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
|
idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
|
||||||
if (idx == SRCU_STATE_IDLE)
|
if (idx == SRCU_STATE_IDLE)
|
||||||
srcu_gp_start(sp);
|
srcu_gp_start(sp);
|
||||||
spin_unlock_irq(&sp->gp_lock);
|
raw_spin_unlock_irq_rcu_node(sp);
|
||||||
if (idx != SRCU_STATE_IDLE) {
|
if (idx != SRCU_STATE_IDLE) {
|
||||||
mutex_unlock(&sp->srcu_gp_mutex);
|
mutex_unlock(&sp->srcu_gp_mutex);
|
||||||
return; /* Someone else started the grace period. */
|
return; /* Someone else started the grace period. */
|
||||||
@ -1135,20 +1131,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
|||||||
sdp = container_of(work, struct srcu_data, work.work);
|
sdp = container_of(work, struct srcu_data, work.work);
|
||||||
sp = sdp->sp;
|
sp = sdp->sp;
|
||||||
rcu_cblist_init(&ready_cbs);
|
rcu_cblist_init(&ready_cbs);
|
||||||
spin_lock_irq(&sdp->lock);
|
raw_spin_lock_irq_rcu_node(sdp);
|
||||||
smp_mb(); /* Old grace periods before callback invocation! */
|
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&sp->srcu_gp_seq));
|
rcu_seq_current(&sp->srcu_gp_seq));
|
||||||
if (sdp->srcu_cblist_invoking ||
|
if (sdp->srcu_cblist_invoking ||
|
||||||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
|
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
|
||||||
spin_unlock_irq(&sdp->lock);
|
raw_spin_unlock_irq_rcu_node(sdp);
|
||||||
return; /* Someone else on the job or nothing to do. */
|
return; /* Someone else on the job or nothing to do. */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We are on the job! Extract and invoke ready callbacks. */
|
/* We are on the job! Extract and invoke ready callbacks. */
|
||||||
sdp->srcu_cblist_invoking = true;
|
sdp->srcu_cblist_invoking = true;
|
||||||
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
|
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
|
||||||
spin_unlock_irq(&sdp->lock);
|
raw_spin_unlock_irq_rcu_node(sdp);
|
||||||
rhp = rcu_cblist_dequeue(&ready_cbs);
|
rhp = rcu_cblist_dequeue(&ready_cbs);
|
||||||
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
|
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
|
||||||
debug_rcu_head_unqueue(rhp);
|
debug_rcu_head_unqueue(rhp);
|
||||||
@ -1161,13 +1156,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
|||||||
* Update counts, accelerate new callbacks, and if needed,
|
* Update counts, accelerate new callbacks, and if needed,
|
||||||
* schedule another round of callback invocation.
|
* schedule another round of callback invocation.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&sdp->lock);
|
raw_spin_lock_irq_rcu_node(sdp);
|
||||||
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
|
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
|
||||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
||||||
rcu_seq_snap(&sp->srcu_gp_seq));
|
rcu_seq_snap(&sp->srcu_gp_seq));
|
||||||
sdp->srcu_cblist_invoking = false;
|
sdp->srcu_cblist_invoking = false;
|
||||||
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
|
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
|
||||||
spin_unlock_irq(&sdp->lock);
|
raw_spin_unlock_irq_rcu_node(sdp);
|
||||||
if (more)
|
if (more)
|
||||||
srcu_schedule_cbs_sdp(sdp, 0);
|
srcu_schedule_cbs_sdp(sdp, 0);
|
||||||
}
|
}
|
||||||
@ -1180,7 +1175,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
|
|||||||
{
|
{
|
||||||
bool pushgp = true;
|
bool pushgp = true;
|
||||||
|
|
||||||
spin_lock_irq(&sp->gp_lock);
|
raw_spin_lock_irq_rcu_node(sp);
|
||||||
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
|
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
|
||||||
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
|
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
|
||||||
/* All requests fulfilled, time to go idle. */
|
/* All requests fulfilled, time to go idle. */
|
||||||
@ -1190,7 +1185,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
|
|||||||
/* Outstanding request and no GP. Start one. */
|
/* Outstanding request and no GP. Start one. */
|
||||||
srcu_gp_start(sp);
|
srcu_gp_start(sp);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&sp->gp_lock);
|
raw_spin_unlock_irq_rcu_node(sp);
|
||||||
|
|
||||||
if (pushgp)
|
if (pushgp)
|
||||||
queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
|
queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
|
||||||
|
Loading…
Reference in New Issue
Block a user