mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 02:06:45 +07:00
sched/fair: Change update_load_avg() arguments
Most call sites of update_load_avg() already have cfs_rq_of(se) available, pass it down instead of recomputing it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c7b5021681
commit
88c0616ee7
@ -3480,9 +3480,8 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
#define SKIP_AGE_LOAD 0x2
|
||||
|
||||
/* Update task and its cfs_rq load average */
|
||||
static inline void update_load_avg(struct sched_entity *se, int flags)
|
||||
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
u64 now = cfs_rq_clock_task(cfs_rq);
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
int cpu = cpu_of(rq);
|
||||
@ -3643,9 +3642,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
#define UPDATE_TG 0x0
|
||||
#define SKIP_AGE_LOAD 0x0
|
||||
|
||||
static inline void update_load_avg(struct sched_entity *se, int not_used1)
|
||||
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
|
||||
{
|
||||
cfs_rq_util_change(cfs_rq_of(se));
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -3796,7 +3795,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
* its group cfs_rq
|
||||
* - Add its new weight to cfs_rq->load.weight
|
||||
*/
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
enqueue_entity_load_avg(cfs_rq, se);
|
||||
update_cfs_shares(se);
|
||||
account_entity_enqueue(cfs_rq, se);
|
||||
@ -3880,7 +3879,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
* - For group entity, update its weight to reflect the new share
|
||||
* of its group cfs_rq.
|
||||
*/
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
dequeue_entity_load_avg(cfs_rq, se);
|
||||
|
||||
update_stats_dequeue(cfs_rq, se, flags);
|
||||
@ -3968,7 +3967,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
*/
|
||||
update_stats_wait_end(cfs_rq, se);
|
||||
__dequeue_entity(cfs_rq, se);
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
}
|
||||
|
||||
update_stats_curr_start(cfs_rq, se);
|
||||
@ -4070,7 +4069,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
|
||||
/* Put 'current' back into the tree. */
|
||||
__enqueue_entity(cfs_rq, prev);
|
||||
/* in !on_rq case, update occurred at dequeue */
|
||||
update_load_avg(prev, 0);
|
||||
update_load_avg(cfs_rq, prev, 0);
|
||||
}
|
||||
cfs_rq->curr = NULL;
|
||||
}
|
||||
@ -4086,7 +4085,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
/*
|
||||
* Ensure that runnable average is periodically updated.
|
||||
*/
|
||||
update_load_avg(curr, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, curr, UPDATE_TG);
|
||||
update_cfs_shares(curr);
|
||||
|
||||
#ifdef CONFIG_SCHED_HRTICK
|
||||
@ -5004,7 +5003,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
update_cfs_shares(se);
|
||||
}
|
||||
|
||||
@ -5063,7 +5062,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
update_cfs_shares(se);
|
||||
}
|
||||
|
||||
@ -7121,7 +7120,7 @@ static void update_blocked_averages(int cpu)
|
||||
/* Propagate pending load changes to the parent, if any: */
|
||||
se = cfs_rq->tg->se[cpu];
|
||||
if (se && !skip_blocked_update(se))
|
||||
update_load_avg(se, 0);
|
||||
update_load_avg(cfs_rq_of(se), se, 0);
|
||||
|
||||
/*
|
||||
* There can be a lot of idle CPU cgroups. Don't let fully
|
||||
@ -9295,7 +9294,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
}
|
||||
}
|
||||
#else
|
||||
@ -9307,7 +9306,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
|
||||
/* Catch up with the cfs_rq and remove our load when we leave */
|
||||
update_load_avg(se, 0);
|
||||
update_load_avg(cfs_rq, se, 0);
|
||||
detach_entity_load_avg(cfs_rq, se);
|
||||
update_tg_load_avg(cfs_rq, false);
|
||||
propagate_entity_cfs_rq(se);
|
||||
@ -9326,7 +9325,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
|
||||
#endif
|
||||
|
||||
/* Synchronize entity with its cfs_rq */
|
||||
update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
||||
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
||||
attach_entity_load_avg(cfs_rq, se);
|
||||
update_tg_load_avg(cfs_rq, false);
|
||||
propagate_entity_cfs_rq(se);
|
||||
@ -9610,7 +9609,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
update_rq_clock(rq);
|
||||
for_each_sched_entity(se) {
|
||||
update_load_avg(se, UPDATE_TG);
|
||||
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
|
||||
update_cfs_shares(se);
|
||||
}
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
|
Loading…
Reference in New Issue
Block a user