mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-19 20:07:04 +07:00
sched: Rework CPU hotplug task selection
The CPU hotplug task selection is the only place where we used put_prev_task() on a task that is not current. While looking at that, it occured to me that we can simplify all that by by using a custom pick loop. Since we don't need to put current, we can do away with the fake task too. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Aaron Lu <aaron.lwe@gmail.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: mingo@kernel.org Cc: Phil Auld <pauld@redhat.com> Cc: Julien Desfossez <jdesfossez@digitalocean.com> Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
This commit is contained in:
parent
f95d4eaee6
commit
10e7071b2f
@ -6082,22 +6082,23 @@ static void calc_load_migrate(struct rq *rq)
|
||||
atomic_long_add(delta, &calc_load_tasks);
|
||||
}
|
||||
|
||||
static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
|
||||
static struct task_struct *__pick_migrate_task(struct rq *rq)
|
||||
{
|
||||
const struct sched_class *class;
|
||||
struct task_struct *next;
|
||||
|
||||
for_each_class(class) {
|
||||
next = class->pick_next_task(rq, NULL, NULL);
|
||||
if (next) {
|
||||
next->sched_class->put_prev_task(rq, next);
|
||||
return next;
|
||||
}
|
||||
}
|
||||
|
||||
/* The idle class should always have a runnable task */
|
||||
BUG();
|
||||
}
|
||||
|
||||
static const struct sched_class fake_sched_class = {
|
||||
.put_prev_task = put_prev_task_fake,
|
||||
};
|
||||
|
||||
static struct task_struct fake_task = {
|
||||
/*
|
||||
* Avoid pull_{rt,dl}_task()
|
||||
*/
|
||||
.prio = MAX_PRIO + 1,
|
||||
.sched_class = &fake_sched_class,
|
||||
};
|
||||
|
||||
/*
|
||||
* Migrate all tasks from the rq, sleeping tasks will be migrated by
|
||||
* try_to_wake_up()->select_task_rq().
|
||||
@ -6139,12 +6140,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
|
||||
if (rq->nr_running == 1)
|
||||
break;
|
||||
|
||||
/*
|
||||
* pick_next_task() assumes pinned rq->lock:
|
||||
*/
|
||||
next = pick_next_task(rq, &fake_task, rf);
|
||||
BUG_ON(!next);
|
||||
put_prev_task(rq, next);
|
||||
next = __pick_migrate_task(rq);
|
||||
|
||||
/*
|
||||
* Rules for changing task_struct::cpus_mask are holding
|
||||
|
@ -1751,6 +1751,7 @@ struct sched_class {
|
||||
|
||||
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
WARN_ON_ONCE(rq->curr != prev);
|
||||
prev->sched_class->put_prev_task(rq, prev);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user