mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 06:40:53 +07:00
sched / idle: Call idle_set_state() from cpuidle_enter_state()
Introduce a wrapper function around idle_set_state() called sched_idle_set_state() that will pass this_rq() to it as the first argument and make cpuidle_enter_state() call the new function before and after entering the target state. At the same time, remove direct invocations of idle_set_state() from call_cpuidle(). This will allow the invocation of default_idle_call() to be moved from call_cpuidle() to cpuidle_enter_state() safely and call_cpuidle() to be simplified a bit as a result. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Tested-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Tested-by: Sudeep Holla <sudeep.holla@arm.com> Acked-by: Kevin Hilman <khilman@linaro.org>
This commit is contained in:
parent
7312280bd2
commit
faad384928
@ -170,6 +170,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
if (broadcast && tick_broadcast_enter())
|
||||
return -EBUSY;
|
||||
|
||||
/* Take note of the planned idle state. */
|
||||
sched_idle_set_state(target_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(index, dev->cpu);
|
||||
time_start = ktime_get();
|
||||
|
||||
@ -178,6 +181,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
time_end = ktime_get();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
/* The cpu is no longer idle or about to enter idle. */
|
||||
sched_idle_set_state(NULL);
|
||||
|
||||
if (broadcast) {
|
||||
if (WARN_ON_ONCE(!irqs_disabled()))
|
||||
local_irq_disable();
|
||||
|
@ -200,6 +200,9 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
|
||||
struct cpuidle_device *dev) {return NULL; }
|
||||
#endif
|
||||
|
||||
/* kernel/sched/idle.c */
|
||||
extern void sched_idle_set_state(struct cpuidle_state *idle_state);
|
||||
|
||||
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
|
||||
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
|
||||
#else
|
||||
|
@ -15,6 +15,15 @@
|
||||
|
||||
#include "sched.h"
|
||||
|
||||
/**
|
||||
* sched_idle_set_state - Record idle state for the current CPU.
|
||||
* @idle_state: State to record.
|
||||
*/
|
||||
void sched_idle_set_state(struct cpuidle_state *idle_state)
|
||||
{
|
||||
idle_set_state(this_rq(), idle_state);
|
||||
}
|
||||
|
||||
static int __read_mostly cpu_idle_force_poll;
|
||||
|
||||
void cpu_idle_poll_ctrl(bool enable)
|
||||
@ -100,9 +109,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Take note of the planned idle state. */
|
||||
idle_set_state(this_rq(), &drv->states[next_state]);
|
||||
|
||||
/*
|
||||
* Enter the idle state previously returned by the governor decision.
|
||||
* This function will block until an interrupt occurs and will take
|
||||
@ -110,9 +116,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
*/
|
||||
entered_state = cpuidle_enter(drv, dev, next_state);
|
||||
|
||||
/* The cpu is no longer idle or about to enter idle. */
|
||||
idle_set_state(this_rq(), NULL);
|
||||
|
||||
if (entered_state == -EBUSY)
|
||||
default_idle_call();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user