mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 15:06:46 +07:00
sched/topology: Rename sched_group_cpus()
There's a discrepancy in naming between the sched_domain and sched_group cpumask accessor. Since we're doing changes, fix it. $ git grep sched_group_cpus | wc -l 28 $ git grep sched_domain_span | wc -l 38 Suggests changing sched_group_cpus() into sched_group_span(): for i in `git grep -l sched_group_cpus` do sed -ie 's/sched_group_cpus/sched_group_span/g' $i done Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e5c14b1fb8
commit
ae4df9d6c9
@ -5484,12 +5484,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Skip over this group if it has no CPUs allowed */
|
/* Skip over this group if it has no CPUs allowed */
|
||||||
if (!cpumask_intersects(sched_group_cpus(group),
|
if (!cpumask_intersects(sched_group_span(group),
|
||||||
&p->cpus_allowed))
|
&p->cpus_allowed))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
local_group = cpumask_test_cpu(this_cpu,
|
local_group = cpumask_test_cpu(this_cpu,
|
||||||
sched_group_cpus(group));
|
sched_group_span(group));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tally up the load of all CPUs in the group and find
|
* Tally up the load of all CPUs in the group and find
|
||||||
@ -5499,7 +5499,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|||||||
runnable_load = 0;
|
runnable_load = 0;
|
||||||
max_spare_cap = 0;
|
max_spare_cap = 0;
|
||||||
|
|
||||||
for_each_cpu(i, sched_group_cpus(group)) {
|
for_each_cpu(i, sched_group_span(group)) {
|
||||||
/* Bias balancing toward cpus of our domain */
|
/* Bias balancing toward cpus of our domain */
|
||||||
if (local_group)
|
if (local_group)
|
||||||
load = source_load(i, load_idx);
|
load = source_load(i, load_idx);
|
||||||
@ -5602,10 +5602,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|||||||
|
|
||||||
/* Check if we have any choice: */
|
/* Check if we have any choice: */
|
||||||
if (group->group_weight == 1)
|
if (group->group_weight == 1)
|
||||||
return cpumask_first(sched_group_cpus(group));
|
return cpumask_first(sched_group_span(group));
|
||||||
|
|
||||||
/* Traverse only the allowed CPUs */
|
/* Traverse only the allowed CPUs */
|
||||||
for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
|
for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
|
||||||
if (idle_cpu(i)) {
|
if (idle_cpu(i)) {
|
||||||
struct rq *rq = cpu_rq(i);
|
struct rq *rq = cpu_rq(i);
|
||||||
struct cpuidle_state *idle = idle_get_state(rq);
|
struct cpuidle_state *idle = idle_get_state(rq);
|
||||||
@ -7192,7 +7192,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
|||||||
* span the current group.
|
* span the current group.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for_each_cpu(cpu, sched_group_cpus(sdg)) {
|
for_each_cpu(cpu, sched_group_span(sdg)) {
|
||||||
struct sched_group_capacity *sgc;
|
struct sched_group_capacity *sgc;
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
|
||||||
@ -7371,7 +7371,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|||||||
|
|
||||||
memset(sgs, 0, sizeof(*sgs));
|
memset(sgs, 0, sizeof(*sgs));
|
||||||
|
|
||||||
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
|
||||||
struct rq *rq = cpu_rq(i);
|
struct rq *rq = cpu_rq(i);
|
||||||
|
|
||||||
/* Bias balancing toward cpus of our domain */
|
/* Bias balancing toward cpus of our domain */
|
||||||
@ -7535,7 +7535,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|||||||
struct sg_lb_stats *sgs = &tmp_sgs;
|
struct sg_lb_stats *sgs = &tmp_sgs;
|
||||||
int local_group;
|
int local_group;
|
||||||
|
|
||||||
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
|
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
|
||||||
if (local_group) {
|
if (local_group) {
|
||||||
sds->local = sg;
|
sds->local = sg;
|
||||||
sgs = local;
|
sgs = local;
|
||||||
@ -7890,7 +7890,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
|
|||||||
unsigned long busiest_load = 0, busiest_capacity = 1;
|
unsigned long busiest_load = 0, busiest_capacity = 1;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
|
||||||
unsigned long capacity, wl;
|
unsigned long capacity, wl;
|
||||||
enum fbq_type rt;
|
enum fbq_type rt;
|
||||||
|
|
||||||
@ -8043,7 +8043,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|||||||
.sd = sd,
|
.sd = sd,
|
||||||
.dst_cpu = this_cpu,
|
.dst_cpu = this_cpu,
|
||||||
.dst_rq = this_rq,
|
.dst_rq = this_rq,
|
||||||
.dst_grpmask = sched_group_cpus(sd->groups),
|
.dst_grpmask = sched_group_span(sd->groups),
|
||||||
.idle = idle,
|
.idle = idle,
|
||||||
.loop_break = sched_nr_migrate_break,
|
.loop_break = sched_nr_migrate_break,
|
||||||
.cpus = cpus,
|
.cpus = cpus,
|
||||||
|
@ -1048,7 +1048,7 @@ struct sched_group {
|
|||||||
unsigned long cpumask[0];
|
unsigned long cpumask[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
|
static inline struct cpumask *sched_group_span(struct sched_group *sg)
|
||||||
{
|
{
|
||||||
return to_cpumask(sg->cpumask);
|
return to_cpumask(sg->cpumask);
|
||||||
}
|
}
|
||||||
@ -1067,7 +1067,7 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg)
|
|||||||
*/
|
*/
|
||||||
static inline unsigned int group_first_cpu(struct sched_group *group)
|
static inline unsigned int group_first_cpu(struct sched_group *group)
|
||||||
{
|
{
|
||||||
return cpumask_first(sched_group_cpus(group));
|
return cpumask_first(sched_group_span(group));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int group_balance_cpu(struct sched_group *sg);
|
extern int group_balance_cpu(struct sched_group *sg);
|
||||||
|
@ -53,7 +53,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|||||||
printk(KERN_ERR "ERROR: domain->span does not contain "
|
printk(KERN_ERR "ERROR: domain->span does not contain "
|
||||||
"CPU%d\n", cpu);
|
"CPU%d\n", cpu);
|
||||||
}
|
}
|
||||||
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
|
if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
|
||||||
printk(KERN_ERR "ERROR: domain->groups does not contain"
|
printk(KERN_ERR "ERROR: domain->groups does not contain"
|
||||||
" CPU%d\n", cpu);
|
" CPU%d\n", cpu);
|
||||||
}
|
}
|
||||||
@ -66,27 +66,27 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cpumask_weight(sched_group_cpus(group))) {
|
if (!cpumask_weight(sched_group_span(group))) {
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
printk(KERN_ERR "ERROR: empty group\n");
|
printk(KERN_ERR "ERROR: empty group\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(sd->flags & SD_OVERLAP) &&
|
if (!(sd->flags & SD_OVERLAP) &&
|
||||||
cpumask_intersects(groupmask, sched_group_cpus(group))) {
|
cpumask_intersects(groupmask, sched_group_span(group))) {
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
|
cpumask_or(groupmask, groupmask, sched_group_span(group));
|
||||||
|
|
||||||
printk(KERN_CONT " %d:{ span=%*pbl",
|
printk(KERN_CONT " %d:{ span=%*pbl",
|
||||||
group->sgc->id,
|
group->sgc->id,
|
||||||
cpumask_pr_args(sched_group_cpus(group)));
|
cpumask_pr_args(sched_group_span(group)));
|
||||||
|
|
||||||
if ((sd->flags & SD_OVERLAP) &&
|
if ((sd->flags & SD_OVERLAP) &&
|
||||||
!cpumask_equal(group_balance_mask(group), sched_group_cpus(group))) {
|
!cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
|
||||||
printk(KERN_CONT " mask=%*pbl",
|
printk(KERN_CONT " mask=%*pbl",
|
||||||
cpumask_pr_args(group_balance_mask(group)));
|
cpumask_pr_args(group_balance_mask(group)));
|
||||||
}
|
}
|
||||||
@ -96,7 +96,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|||||||
|
|
||||||
if (group == sd->groups && sd->child &&
|
if (group == sd->groups && sd->child &&
|
||||||
!cpumask_equal(sched_domain_span(sd->child),
|
!cpumask_equal(sched_domain_span(sd->child),
|
||||||
sched_group_cpus(group))) {
|
sched_group_span(group))) {
|
||||||
printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
|
printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -618,7 +618,7 @@ int group_balance_cpu(struct sched_group *sg)
|
|||||||
static void
|
static void
|
||||||
build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
|
build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
|
||||||
{
|
{
|
||||||
const struct cpumask *sg_span = sched_group_cpus(sg);
|
const struct cpumask *sg_span = sched_group_span(sg);
|
||||||
struct sd_data *sdd = sd->private;
|
struct sd_data *sdd = sd->private;
|
||||||
struct sched_domain *sibling;
|
struct sched_domain *sibling;
|
||||||
int i;
|
int i;
|
||||||
@ -664,7 +664,7 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
|
|||||||
if (!sg)
|
if (!sg)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
sg_span = sched_group_cpus(sg);
|
sg_span = sched_group_span(sg);
|
||||||
if (sd->child)
|
if (sd->child)
|
||||||
cpumask_copy(sg_span, sched_domain_span(sd->child));
|
cpumask_copy(sg_span, sched_domain_span(sd->child));
|
||||||
else
|
else
|
||||||
@ -682,7 +682,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
|
|||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
build_balance_mask(sd, sg, mask);
|
build_balance_mask(sd, sg, mask);
|
||||||
cpu = cpumask_first_and(sched_group_cpus(sg), mask);
|
cpu = cpumask_first_and(sched_group_span(sg), mask);
|
||||||
|
|
||||||
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
|
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
|
||||||
if (atomic_inc_return(&sg->sgc->ref) == 1)
|
if (atomic_inc_return(&sg->sgc->ref) == 1)
|
||||||
@ -695,7 +695,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
|
|||||||
* domains and no possible iteration will get us here, we won't
|
* domains and no possible iteration will get us here, we won't
|
||||||
* die on a /0 trap.
|
* die on a /0 trap.
|
||||||
*/
|
*/
|
||||||
sg_span = sched_group_cpus(sg);
|
sg_span = sched_group_span(sg);
|
||||||
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
|
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
|
||||||
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
|
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
|
||||||
}
|
}
|
||||||
@ -737,7 +737,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|||||||
if (!sg)
|
if (!sg)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
sg_span = sched_group_cpus(sg);
|
sg_span = sched_group_span(sg);
|
||||||
cpumask_or(covered, covered, sg_span);
|
cpumask_or(covered, covered, sg_span);
|
||||||
|
|
||||||
init_overlap_sched_group(sd, sg);
|
init_overlap_sched_group(sd, sg);
|
||||||
@ -848,14 +848,14 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
|
|||||||
atomic_inc(&sg->sgc->ref);
|
atomic_inc(&sg->sgc->ref);
|
||||||
|
|
||||||
if (child) {
|
if (child) {
|
||||||
cpumask_copy(sched_group_cpus(sg), sched_domain_span(child));
|
cpumask_copy(sched_group_span(sg), sched_domain_span(child));
|
||||||
cpumask_copy(group_balance_mask(sg), sched_group_cpus(sg));
|
cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
|
||||||
} else {
|
} else {
|
||||||
cpumask_set_cpu(cpu, sched_group_cpus(sg));
|
cpumask_set_cpu(cpu, sched_group_span(sg));
|
||||||
cpumask_set_cpu(cpu, group_balance_mask(sg));
|
cpumask_set_cpu(cpu, group_balance_mask(sg));
|
||||||
}
|
}
|
||||||
|
|
||||||
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_cpus(sg));
|
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
|
||||||
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
|
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
|
||||||
|
|
||||||
return sg;
|
return sg;
|
||||||
@ -890,7 +890,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
|
|||||||
|
|
||||||
sg = get_group(i, sdd);
|
sg = get_group(i, sdd);
|
||||||
|
|
||||||
cpumask_or(covered, covered, sched_group_cpus(sg));
|
cpumask_or(covered, covered, sched_group_span(sg));
|
||||||
|
|
||||||
if (!first)
|
if (!first)
|
||||||
first = sg;
|
first = sg;
|
||||||
@ -923,12 +923,12 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
|
|||||||
do {
|
do {
|
||||||
int cpu, max_cpu = -1;
|
int cpu, max_cpu = -1;
|
||||||
|
|
||||||
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
|
sg->group_weight = cpumask_weight(sched_group_span(sg));
|
||||||
|
|
||||||
if (!(sd->flags & SD_ASYM_PACKING))
|
if (!(sd->flags & SD_ASYM_PACKING))
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
for_each_cpu(cpu, sched_group_cpus(sg)) {
|
for_each_cpu(cpu, sched_group_span(sg)) {
|
||||||
if (max_cpu < 0)
|
if (max_cpu < 0)
|
||||||
max_cpu = cpu;
|
max_cpu = cpu;
|
||||||
else if (sched_asym_prefer(cpu, max_cpu))
|
else if (sched_asym_prefer(cpu, max_cpu))
|
||||||
|
Loading…
Reference in New Issue
Block a user