mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 02:25:20 +07:00
8234f6734c
PM-runtime uses the timer infrastructure for autosuspend. This implies
that the minimum time before autosuspending a device is in the range
of 1 tick included to 2 ticks excluded
-On arm64 this means between 4ms and 8ms with default jiffies
configuration
-And on arm, it is between 10ms and 20ms
These values are quite high for embedded systems which sometimes want
the duration to be in the range of 1 ms.
It is possible to switch autosuspend over to using hrtimers to get
finer granularity for short durations and take advantage of slack to
retain some margins and get long timeouts with minimum wakeups.
On an arm64 platform that uses 1ms for autosuspending timeout of its
GPU, idle power is reduced by 10% with hrtimer.
The latency impact on arm64 hikey octo cores is:
- mark_last_busy: from 1.11 us to 1.25 us
- rpm_suspend: from 15.54 us to 15.38 us
[Only the code path of rpm_suspend() that starts hrtimer has been
measured.]
arm64 image (arm64 default defconfig) decreases by around 3KB
with following details:
$ size vmlinux-timer
text data bss dec hex filename
12034646 6869268 386840 19290754 1265a82 vmlinux
$ size vmlinux-hrtimer
text data bss dec hex filename
12030550 6870164 387032 19287746 1264ec2 vmlinux
The latency impact on arm 32bits snowball dual cores is :
- mark_last_busy: from 0.31 us usec to 0.77 us
- rpm_suspend: from 6.83 us to 6.67 usec
The increase of the image for snowball platform that I used for
testing performance impact, is neglictable (244B).
$ size vmlinux-timer
text data bss dec hex filename
7157961 2119580 264120 9541661 91981d build-ux500/vmlinux
size vmlinux-hrtimer
text data bss dec hex filename
7157773 2119884
264248 9541905 919911 vmlinux-hrtimer
And arm 32bits image (multi_v7_defconfig) increases by around 1.7KB
with following details:
$ size vmlinux-timer
text data bss dec hex filename
13304443 6803420 402768 20510631 138f7a7 vmlinux
$ size vmlinux-hrtimer
text data bss dec hex filename
13304299 6805276 402768 20512343 138fe57 vmlinux
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
280 lines
8.5 KiB
C
280 lines
8.5 KiB
C
/*
|
|
* pm_runtime.h - Device run-time power management helper functions.
|
|
*
|
|
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
|
|
*
|
|
* This file is released under the GPLv2.
|
|
*/
|
|
|
|
#ifndef _LINUX_PM_RUNTIME_H
|
|
#define _LINUX_PM_RUNTIME_H
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/pm.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
/* Runtime PM flag argument bits */
|
|
#define RPM_ASYNC 0x01 /* Request is asynchronous */
|
|
#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
|
|
state change */
|
|
#define RPM_GET_PUT 0x04 /* Increment/decrement the
|
|
usage_count */
|
|
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
|
|
|
|
#ifdef CONFIG_PM
|
|
extern struct workqueue_struct *pm_wq;
|
|
|
|
static inline bool queue_pm_work(struct work_struct *work)
|
|
{
|
|
return queue_work(pm_wq, work);
|
|
}
|
|
|
|
extern int pm_generic_runtime_suspend(struct device *dev);
|
|
extern int pm_generic_runtime_resume(struct device *dev);
|
|
extern int pm_runtime_force_suspend(struct device *dev);
|
|
extern int pm_runtime_force_resume(struct device *dev);
|
|
|
|
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
|
|
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
|
|
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
|
|
extern int pm_runtime_get_if_in_use(struct device *dev);
|
|
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
|
|
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
|
|
extern int pm_runtime_barrier(struct device *dev);
|
|
extern void pm_runtime_enable(struct device *dev);
|
|
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
|
|
extern void pm_runtime_allow(struct device *dev);
|
|
extern void pm_runtime_forbid(struct device *dev);
|
|
extern void pm_runtime_no_callbacks(struct device *dev);
|
|
extern void pm_runtime_irq_safe(struct device *dev);
|
|
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
|
|
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
|
|
extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
|
|
extern void pm_runtime_update_max_time_suspended(struct device *dev,
|
|
s64 delta_ns);
|
|
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
|
|
extern void pm_runtime_clean_up_links(struct device *dev);
|
|
extern void pm_runtime_get_suppliers(struct device *dev);
|
|
extern void pm_runtime_put_suppliers(struct device *dev);
|
|
extern void pm_runtime_new_link(struct device *dev);
|
|
extern void pm_runtime_drop_link(struct device *dev);
|
|
|
|
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
|
|
{
|
|
dev->power.ignore_children = enable;
|
|
}
|
|
|
|
static inline void pm_runtime_get_noresume(struct device *dev)
|
|
{
|
|
atomic_inc(&dev->power.usage_count);
|
|
}
|
|
|
|
static inline void pm_runtime_put_noidle(struct device *dev)
|
|
{
|
|
atomic_add_unless(&dev->power.usage_count, -1, 0);
|
|
}
|
|
|
|
static inline bool pm_runtime_suspended(struct device *dev)
|
|
{
|
|
return dev->power.runtime_status == RPM_SUSPENDED
|
|
&& !dev->power.disable_depth;
|
|
}
|
|
|
|
static inline bool pm_runtime_active(struct device *dev)
|
|
{
|
|
return dev->power.runtime_status == RPM_ACTIVE
|
|
|| dev->power.disable_depth;
|
|
}
|
|
|
|
static inline bool pm_runtime_status_suspended(struct device *dev)
|
|
{
|
|
return dev->power.runtime_status == RPM_SUSPENDED;
|
|
}
|
|
|
|
static inline bool pm_runtime_enabled(struct device *dev)
|
|
{
|
|
return !dev->power.disable_depth;
|
|
}
|
|
|
|
static inline bool pm_runtime_callbacks_present(struct device *dev)
|
|
{
|
|
return !dev->power.no_callbacks;
|
|
}
|
|
|
|
static inline void pm_runtime_mark_last_busy(struct device *dev)
|
|
{
|
|
WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
|
|
}
|
|
|
|
static inline bool pm_runtime_is_irq_safe(struct device *dev)
|
|
{
|
|
return dev->power.irq_safe;
|
|
}
|
|
|
|
#else /* !CONFIG_PM */
|
|
|
|
static inline bool queue_pm_work(struct work_struct *work) { return false; }
|
|
|
|
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
|
|
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
|
|
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
|
|
static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
|
|
|
|
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
|
|
{
|
|
return 1;
|
|
}
|
|
static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
static inline int pm_runtime_get_if_in_use(struct device *dev)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
static inline int __pm_runtime_set_status(struct device *dev,
|
|
unsigned int status) { return 0; }
|
|
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
|
|
static inline void pm_runtime_enable(struct device *dev) {}
|
|
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
|
|
static inline void pm_runtime_allow(struct device *dev) {}
|
|
static inline void pm_runtime_forbid(struct device *dev) {}
|
|
|
|
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
|
|
static inline void pm_runtime_get_noresume(struct device *dev) {}
|
|
static inline void pm_runtime_put_noidle(struct device *dev) {}
|
|
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
|
|
static inline bool pm_runtime_active(struct device *dev) { return true; }
|
|
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
|
|
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
|
|
|
|
static inline void pm_runtime_no_callbacks(struct device *dev) {}
|
|
static inline void pm_runtime_irq_safe(struct device *dev) {}
|
|
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
|
|
|
|
static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
|
|
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
|
|
static inline void __pm_runtime_use_autosuspend(struct device *dev,
|
|
bool use) {}
|
|
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
|
|
int delay) {}
|
|
static inline u64 pm_runtime_autosuspend_expiration(
|
|
struct device *dev) { return 0; }
|
|
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
|
|
bool enable){}
|
|
static inline void pm_runtime_clean_up_links(struct device *dev) {}
|
|
static inline void pm_runtime_get_suppliers(struct device *dev) {}
|
|
static inline void pm_runtime_put_suppliers(struct device *dev) {}
|
|
static inline void pm_runtime_new_link(struct device *dev) {}
|
|
static inline void pm_runtime_drop_link(struct device *dev) {}
|
|
|
|
#endif /* !CONFIG_PM */
|
|
|
|
static inline int pm_runtime_idle(struct device *dev)
|
|
{
|
|
return __pm_runtime_idle(dev, 0);
|
|
}
|
|
|
|
static inline int pm_runtime_suspend(struct device *dev)
|
|
{
|
|
return __pm_runtime_suspend(dev, 0);
|
|
}
|
|
|
|
static inline int pm_runtime_autosuspend(struct device *dev)
|
|
{
|
|
return __pm_runtime_suspend(dev, RPM_AUTO);
|
|
}
|
|
|
|
static inline int pm_runtime_resume(struct device *dev)
|
|
{
|
|
return __pm_runtime_resume(dev, 0);
|
|
}
|
|
|
|
static inline int pm_request_idle(struct device *dev)
|
|
{
|
|
return __pm_runtime_idle(dev, RPM_ASYNC);
|
|
}
|
|
|
|
static inline int pm_request_resume(struct device *dev)
|
|
{
|
|
return __pm_runtime_resume(dev, RPM_ASYNC);
|
|
}
|
|
|
|
static inline int pm_request_autosuspend(struct device *dev)
|
|
{
|
|
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
|
|
}
|
|
|
|
static inline int pm_runtime_get(struct device *dev)
|
|
{
|
|
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
|
|
}
|
|
|
|
static inline int pm_runtime_get_sync(struct device *dev)
|
|
{
|
|
return __pm_runtime_resume(dev, RPM_GET_PUT);
|
|
}
|
|
|
|
static inline int pm_runtime_put(struct device *dev)
|
|
{
|
|
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
|
|
}
|
|
|
|
static inline int pm_runtime_put_autosuspend(struct device *dev)
|
|
{
|
|
return __pm_runtime_suspend(dev,
|
|
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
|
|
}
|
|
|
|
static inline int pm_runtime_put_sync(struct device *dev)
|
|
{
|
|
return __pm_runtime_idle(dev, RPM_GET_PUT);
|
|
}
|
|
|
|
static inline int pm_runtime_put_sync_suspend(struct device *dev)
|
|
{
|
|
return __pm_runtime_suspend(dev, RPM_GET_PUT);
|
|
}
|
|
|
|
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
|
|
{
|
|
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
|
|
}
|
|
|
|
static inline int pm_runtime_set_active(struct device *dev)
|
|
{
|
|
return __pm_runtime_set_status(dev, RPM_ACTIVE);
|
|
}
|
|
|
|
static inline int pm_runtime_set_suspended(struct device *dev)
|
|
{
|
|
return __pm_runtime_set_status(dev, RPM_SUSPENDED);
|
|
}
|
|
|
|
static inline void pm_runtime_disable(struct device *dev)
|
|
{
|
|
__pm_runtime_disable(dev, true);
|
|
}
|
|
|
|
static inline void pm_runtime_use_autosuspend(struct device *dev)
|
|
{
|
|
__pm_runtime_use_autosuspend(dev, true);
|
|
}
|
|
|
|
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
|
|
{
|
|
__pm_runtime_use_autosuspend(dev, false);
|
|
}
|
|
|
|
#endif
|