sched/fair: Add some serialization to the sched_domain load-balance walk

Since the sched_domain walk is completely unserialized (!SD_SERIALIZE)
it is possible that multiple cpus in the group get elected to do the
next level. Avoid this by adding some serialization.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-vqh9ai6s0ewmeakjz80w4qz6@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2012-04-25 00:30:36 +02:00 committed by Ingo Molnar
parent c22402a2f7
commit 0ce90475dc
3 changed files with 10 additions and 2 deletions

View File

@ -927,6 +927,7 @@ struct sched_group_power {
struct sched_group { struct sched_group {
struct sched_group *next; /* Must be a circular list */ struct sched_group *next; /* Must be a circular list */
atomic_t ref; atomic_t ref;
int balance_cpu;
unsigned int group_weight; unsigned int group_weight;
struct sched_group_power *sgp; struct sched_group_power *sgp;

View File

@ -6060,6 +6060,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
atomic_inc(&sg->sgp->ref); atomic_inc(&sg->sgp->ref);
sg->balance_cpu = -1;
if (cpumask_test_cpu(cpu, sg_span)) if (cpumask_test_cpu(cpu, sg_span))
groups = sg; groups = sg;
@ -6135,6 +6136,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
cpumask_clear(sched_group_cpus(sg)); cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0; sg->sgp->power = 0;
sg->balance_cpu = -1;
for_each_cpu(j, span) { for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group) if (get_group(j, sdd, NULL) != group)

View File

@ -3828,7 +3828,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
*/ */
if (local_group) { if (local_group) {
if (idle != CPU_NEWLY_IDLE) { if (idle != CPU_NEWLY_IDLE) {
if (balance_cpu != this_cpu) { if (balance_cpu != this_cpu ||
cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) {
*balance = 0; *balance = 0;
return; return;
} }
@ -4929,7 +4930,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
int balance = 1; int balance = 1;
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long interval; unsigned long interval;
struct sched_domain *sd; struct sched_domain *sd, *last = NULL;
/* Earliest time when we have to do rebalance again */ /* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ; unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0; int update_next_balance = 0;
@ -4939,6 +4940,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
rcu_read_lock(); rcu_read_lock();
for_each_domain(cpu, sd) { for_each_domain(cpu, sd) {
last = sd;
if (!(sd->flags & SD_LOAD_BALANCE)) if (!(sd->flags & SD_LOAD_BALANCE))
continue; continue;
@ -4983,6 +4985,9 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
if (!balance) if (!balance)
break; break;
} }
for (sd = last; sd; sd = sd->child)
(void)cmpxchg(&sd->groups->balance_cpu, cpu, -1);
rcu_read_unlock(); rcu_read_unlock();
/* /*