mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 07:40:55 +07:00
sched: refine negative nice level granularity
refine the granularity of negative nice level tasks: let them reschedule more often to offset the effect of them consuming their wait_runtime proportionately slower. (This makes nice-0 task scheduling smoother in the presence of negatively reniced tasks.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a69edb5560
commit
7cff8cf61c
@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
/*
|
||||
* Negative nice levels get the same granularity as nice-0:
|
||||
*/
|
||||
if (likely(curr->load.weight >= NICE_0_LOAD))
|
||||
if (likely(curr->load.weight == NICE_0_LOAD))
|
||||
return granularity;
|
||||
/*
|
||||
* Positive nice level tasks get linearly finer
|
||||
* Positive nice levels get the same granularity as nice-0:
|
||||
*/
|
||||
if (likely(curr->load.weight < NICE_0_LOAD)) {
|
||||
tmp = curr->load.weight * (u64)granularity;
|
||||
return (long) (tmp >> NICE_0_SHIFT);
|
||||
}
|
||||
/*
|
||||
* Negative nice level tasks get linearly finer
|
||||
* granularity:
|
||||
*/
|
||||
tmp = curr->load.weight * (u64)granularity;
|
||||
tmp = curr->load.inv_weight * (u64)granularity;
|
||||
|
||||
/*
|
||||
* It will always fit into 'long':
|
||||
*/
|
||||
return (long) (tmp >> NICE_0_SHIFT);
|
||||
return (long) (tmp >> WMULT_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
Loading…
Reference in New Issue
Block a user