2011-07-02 03:12:45 +07:00
|
|
|
/*
|
|
|
|
* drivers/base/power/domain.c - Common code related to device power domains.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
|
|
|
*
|
|
|
|
* This file is released under the GPLv2.
|
|
|
|
*/
|
|
|
|
|
2015-06-26 16:14:14 +07:00
|
|
|
#include <linux/delay.h>
|
2011-07-02 03:12:45 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/io.h>
|
2014-09-20 01:27:36 +07:00
|
|
|
#include <linux/platform_device.h>
|
2011-07-02 03:12:45 +07:00
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/pm_domain.h>
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
#include <linux/pm_qos.h>
|
2014-12-01 18:50:21 +07:00
|
|
|
#include <linux/pm_clock.h>
|
2011-07-02 03:12:45 +07:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/err.h>
|
2011-07-12 05:39:29 +07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/suspend.h>
|
2011-11-27 19:11:36 +07:00
|
|
|
#include <linux/export.h>
|
|
|
|
|
2016-01-07 22:46:14 +07:00
|
|
|
#include "power.h"
|
|
|
|
|
2015-06-26 16:14:14 +07:00
|
|
|
#define GENPD_RETRY_MAX_MS 250 /* Approximate */
|
|
|
|
|
2011-11-27 19:11:36 +07:00
|
|
|
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
|
|
|
|
({ \
|
|
|
|
type (*__routine)(struct device *__d); \
|
|
|
|
type __ret = (type)0; \
|
|
|
|
\
|
|
|
|
__routine = genpd->dev_ops.callback; \
|
|
|
|
if (__routine) { \
|
|
|
|
__ret = __routine(dev); \
|
|
|
|
} \
|
|
|
|
__ret; \
|
|
|
|
})
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2011-07-13 17:31:52 +07:00
|
|
|
static LIST_HEAD(gpd_list);
|
|
|
|
static DEFINE_MUTEX(gpd_list_lock);
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
struct genpd_lock_ops {
|
|
|
|
void (*lock)(struct generic_pm_domain *genpd);
|
|
|
|
void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
|
|
|
|
int (*lock_interruptible)(struct generic_pm_domain *genpd);
|
|
|
|
void (*unlock)(struct generic_pm_domain *genpd);
|
|
|
|
};
|
|
|
|
|
|
|
|
static void genpd_lock_mtx(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
mutex_lock(&genpd->mlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
|
|
|
|
int depth)
|
|
|
|
{
|
|
|
|
mutex_lock_nested(&genpd->mlock, depth);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
return mutex_lock_interruptible(&genpd->mlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
return mutex_unlock(&genpd->mlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct genpd_lock_ops genpd_mtx_ops = {
|
|
|
|
.lock = genpd_lock_mtx,
|
|
|
|
.lock_nested = genpd_lock_nested_mtx,
|
|
|
|
.lock_interruptible = genpd_lock_interruptible_mtx,
|
|
|
|
.unlock = genpd_unlock_mtx,
|
|
|
|
};
|
|
|
|
|
2016-10-15 00:47:55 +07:00
|
|
|
static void genpd_lock_spin(struct generic_pm_domain *genpd)
|
|
|
|
__acquires(&genpd->slock)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&genpd->slock, flags);
|
|
|
|
genpd->lock_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
|
|
|
|
int depth)
|
|
|
|
__acquires(&genpd->slock)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave_nested(&genpd->slock, flags, depth);
|
|
|
|
genpd->lock_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
|
|
|
|
__acquires(&genpd->slock)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&genpd->slock, flags);
|
|
|
|
genpd->lock_flags = flags;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void genpd_unlock_spin(struct generic_pm_domain *genpd)
|
|
|
|
__releases(&genpd->slock)
|
|
|
|
{
|
|
|
|
spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct genpd_lock_ops genpd_spin_ops = {
|
|
|
|
.lock = genpd_lock_spin,
|
|
|
|
.lock_nested = genpd_lock_nested_spin,
|
|
|
|
.lock_interruptible = genpd_lock_interruptible_spin,
|
|
|
|
.unlock = genpd_unlock_spin,
|
|
|
|
};
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
#define genpd_lock(p) p->lock_ops->lock(p)
|
|
|
|
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
|
|
|
|
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
|
|
|
|
#define genpd_unlock(p) p->lock_ops->unlock(p)
|
|
|
|
|
2016-10-15 00:47:55 +07:00
|
|
|
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
|
|
|
|
|
|
|
|
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
|
|
|
|
struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
|
|
|
|
|
2017-01-31 23:01:03 +07:00
|
|
|
/* Warn once if IRQ safe dev in no sleep domain */
|
2016-10-15 00:47:55 +07:00
|
|
|
if (ret)
|
|
|
|
dev_warn_once(dev, "PM domain %s will not be powered off\n",
|
|
|
|
genpd->name);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-21 00:20:33 +07:00
|
|
|
/*
|
|
|
|
* Get the generic PM domain for a particular struct device.
|
|
|
|
* This validates the struct device pointer, the PM domain pointer,
|
|
|
|
* and checks that the PM domain pointer is a real generic PM domain.
|
|
|
|
* Any failure results in NULL being returned.
|
|
|
|
*/
|
2016-09-12 18:01:08 +07:00
|
|
|
static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
|
2015-03-21 00:20:33 +07:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd = NULL, *gpd;
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
|
|
|
if (&gpd->domain == dev->pm_domain) {
|
|
|
|
genpd = gpd;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return genpd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This should only be used where we are certain that the pm_domain
|
|
|
|
* attached to the device is a genpd domain.
|
|
|
|
*/
|
|
|
|
static struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
2011-07-02 03:13:10 +07:00
|
|
|
{
|
|
|
|
if (IS_ERR_OR_NULL(dev->pm_domain))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
return pd_to_genpd(dev->pm_domain);
|
2011-07-02 03:13:10 +07:00
|
|
|
}
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2015-10-15 22:02:19 +07:00
|
|
|
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
|
2011-11-27 19:11:36 +07:00
|
|
|
{
|
2015-10-15 22:02:19 +07:00
|
|
|
return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
|
2011-11-27 19:11:36 +07:00
|
|
|
}
|
|
|
|
|
2015-10-15 22:02:19 +07:00
|
|
|
static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
|
2011-11-27 19:11:36 +07:00
|
|
|
{
|
2015-10-15 22:02:19 +07:00
|
|
|
return GENPD_DEV_CALLBACK(genpd, int, start, dev);
|
2011-11-27 19:11:36 +07:00
|
|
|
}
|
|
|
|
|
2011-08-09 04:43:04 +07:00
|
|
|
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
2011-08-09 04:43:04 +07:00
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
|
|
|
|
ret = !!atomic_dec_and_test(&genpd->sd_count);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
atomic_inc(&genpd->sd_count);
|
2014-03-18 00:06:10 +07:00
|
|
|
smp_mb__after_atomic();
|
2011-07-02 03:12:45 +07:00
|
|
|
}
|
|
|
|
|
2016-12-08 20:45:20 +07:00
|
|
|
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
2014-11-11 01:39:19 +07:00
|
|
|
{
|
2016-02-15 17:10:51 +07:00
|
|
|
unsigned int state_idx = genpd->state_idx;
|
2014-11-11 01:39:19 +07:00
|
|
|
ktime_t time_start;
|
|
|
|
s64 elapsed_ns;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!genpd->power_on)
|
|
|
|
return 0;
|
|
|
|
|
2015-05-29 22:24:23 +07:00
|
|
|
if (!timed)
|
|
|
|
return genpd->power_on(genpd);
|
|
|
|
|
2014-11-11 01:39:19 +07:00
|
|
|
time_start = ktime_get();
|
|
|
|
ret = genpd->power_on(genpd);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
2016-02-15 17:10:51 +07:00
|
|
|
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
|
2014-11-11 01:39:19 +07:00
|
|
|
return ret;
|
|
|
|
|
2016-02-15 17:10:51 +07:00
|
|
|
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
|
2014-11-11 01:39:19 +07:00
|
|
|
genpd->max_off_time_changed = true;
|
2015-03-21 00:20:28 +07:00
|
|
|
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
|
|
|
|
genpd->name, "on", elapsed_ns);
|
2014-11-11 01:39:19 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-12-08 20:45:20 +07:00
|
|
|
static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
|
2014-11-11 01:39:19 +07:00
|
|
|
{
|
2016-02-15 17:10:51 +07:00
|
|
|
unsigned int state_idx = genpd->state_idx;
|
2014-11-11 01:39:19 +07:00
|
|
|
ktime_t time_start;
|
|
|
|
s64 elapsed_ns;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!genpd->power_off)
|
|
|
|
return 0;
|
|
|
|
|
2015-05-29 22:24:23 +07:00
|
|
|
if (!timed)
|
|
|
|
return genpd->power_off(genpd);
|
|
|
|
|
2014-11-11 01:39:19 +07:00
|
|
|
time_start = ktime_get();
|
|
|
|
ret = genpd->power_off(genpd);
|
|
|
|
if (ret == -EBUSY)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
2016-02-15 17:10:51 +07:00
|
|
|
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
|
2014-11-11 01:39:19 +07:00
|
|
|
return ret;
|
|
|
|
|
2016-02-15 17:10:51 +07:00
|
|
|
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
|
2014-11-11 01:39:19 +07:00
|
|
|
genpd->max_off_time_changed = true;
|
2015-03-21 00:20:28 +07:00
|
|
|
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
|
|
|
|
genpd->name, "off", elapsed_ns);
|
2014-11-11 01:39:19 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-09-02 15:16:13 +07:00
|
|
|
/**
|
2016-12-08 20:45:20 +07:00
|
|
|
* genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
|
2016-01-27 14:29:27 +07:00
|
|
|
* @genpd: PM domain to power off.
|
2015-09-02 15:16:13 +07:00
|
|
|
*
|
2016-12-08 20:45:20 +07:00
|
|
|
* Queue up the execution of genpd_power_off() unless it's already been done
|
2015-09-02 15:16:13 +07:00
|
|
|
* before.
|
|
|
|
*/
|
|
|
|
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
queue_work(pm_wq, &genpd->power_off_work);
|
|
|
|
}
|
|
|
|
|
2017-02-17 16:55:23 +07:00
|
|
|
/**
|
|
|
|
* genpd_power_off - Remove power from a given PM domain.
|
|
|
|
* @genpd: PM domain to power down.
|
2017-02-17 16:55:24 +07:00
|
|
|
* @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
|
|
|
|
* RPM status of the releated device is in an intermediate state, not yet turned
|
|
|
|
* into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
|
|
|
|
* be RPM_SUSPENDED, while it tries to power off the PM domain.
|
2017-02-17 16:55:23 +07:00
|
|
|
*
|
|
|
|
* If all of the @genpd's devices have been suspended and all of its subdomains
|
|
|
|
* have been powered down, remove power from @genpd.
|
|
|
|
*/
|
2017-02-17 16:55:25 +07:00
|
|
|
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
|
|
|
unsigned int depth)
|
2017-02-17 16:55:23 +07:00
|
|
|
{
|
|
|
|
struct pm_domain_data *pdd;
|
|
|
|
struct gpd_link *link;
|
|
|
|
unsigned int not_suspended = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not try to power off the domain in the following situations:
|
|
|
|
* (1) The domain is already in the "power off" state.
|
|
|
|
* (2) System suspend is in progress.
|
|
|
|
*/
|
|
|
|
if (genpd->status == GPD_STATE_POWER_OFF
|
|
|
|
|| genpd->prepared_count > 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (atomic_read(&genpd->sd_count) > 0)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
|
|
|
enum pm_qos_flags_status stat;
|
|
|
|
|
|
|
|
stat = dev_pm_qos_flags(pdd->dev,
|
|
|
|
PM_QOS_FLAG_NO_POWER_OFF
|
|
|
|
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
|
|
|
if (stat > PM_QOS_FLAGS_NONE)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not allow PM domain to be powered off, when an IRQ safe
|
|
|
|
* device is part of a non-IRQ safe domain.
|
|
|
|
*/
|
|
|
|
if (!pm_runtime_suspended(pdd->dev) ||
|
|
|
|
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
|
|
|
|
not_suspended++;
|
|
|
|
}
|
|
|
|
|
2017-02-17 16:55:24 +07:00
|
|
|
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
|
2017-02-17 16:55:23 +07:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (genpd->gov && genpd->gov->power_down_ok) {
|
|
|
|
if (!genpd->gov->power_down_ok(&genpd->domain))
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (genpd->power_off) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (atomic_read(&genpd->sd_count) > 0)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If sd_count > 0 at this point, one of the subdomains hasn't
|
|
|
|
* managed to call genpd_power_on() for the master yet after
|
|
|
|
* incrementing it. In that case genpd_power_on() will wait
|
|
|
|
* for us to drop the lock, so we can call .power_off() and let
|
|
|
|
* the genpd_power_on() restore power for us (this shouldn't
|
|
|
|
* happen very often).
|
|
|
|
*/
|
|
|
|
ret = _genpd_power_off(genpd, true);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
genpd->status = GPD_STATE_POWER_OFF;
|
|
|
|
|
|
|
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
|
genpd_sd_counter_dec(link->master);
|
2017-02-17 16:55:25 +07:00
|
|
|
genpd_lock_nested(link->master, depth + 1);
|
|
|
|
genpd_power_off(link->master, false, depth + 1);
|
|
|
|
genpd_unlock(link->master);
|
2017-02-17 16:55:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-02 03:13:10 +07:00
|
|
|
/**
|
2016-12-08 20:45:20 +07:00
|
|
|
* genpd_power_on - Restore power to a given PM domain and its masters.
|
2011-07-02 03:13:10 +07:00
|
|
|
* @genpd: PM domain to power up.
|
2016-01-20 16:13:42 +07:00
|
|
|
* @depth: nesting count for lockdep.
|
2011-07-02 03:13:10 +07:00
|
|
|
*
|
2011-08-09 04:43:40 +07:00
|
|
|
* Restore power to @genpd and all of its masters so that it is possible to
|
2011-07-02 03:13:10 +07:00
|
|
|
* resume a device belonging to it.
|
|
|
|
*/
|
2016-12-08 20:45:20 +07:00
|
|
|
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
2011-07-02 03:13:10 +07:00
|
|
|
{
|
2011-08-09 04:43:40 +07:00
|
|
|
struct gpd_link *link;
|
2011-07-02 03:13:10 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
if (genpd->status == GPD_STATE_ACTIVE)
|
2011-08-09 04:43:29 +07:00
|
|
|
return 0;
|
2011-07-02 03:13:10 +07:00
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
/*
|
|
|
|
* The list is guaranteed not to change while the loop below is being
|
|
|
|
* executed, unless one of the masters' .power_on() callbacks fiddles
|
|
|
|
* with it.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
2016-01-20 16:13:42 +07:00
|
|
|
struct generic_pm_domain *master = link->master;
|
|
|
|
|
|
|
|
genpd_sd_counter_inc(master);
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock_nested(master, depth + 1);
|
2016-12-08 20:45:20 +07:00
|
|
|
ret = genpd_power_on(master, depth + 1);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(master);
|
2011-07-02 03:13:10 +07:00
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
if (ret) {
|
2016-01-20 16:13:42 +07:00
|
|
|
genpd_sd_counter_dec(master);
|
2011-08-09 04:43:22 +07:00
|
|
|
goto err;
|
2011-08-09 04:43:40 +07:00
|
|
|
}
|
2011-07-02 03:13:10 +07:00
|
|
|
}
|
|
|
|
|
2016-12-08 20:45:20 +07:00
|
|
|
ret = _genpd_power_on(genpd, true);
|
2014-11-11 01:39:19 +07:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2011-07-02 03:13:10 +07:00
|
|
|
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
genpd->status = GPD_STATE_ACTIVE;
|
2011-08-09 04:43:29 +07:00
|
|
|
return 0;
|
2011-08-09 04:43:22 +07:00
|
|
|
|
|
|
|
err:
|
2015-09-02 15:16:13 +07:00
|
|
|
list_for_each_entry_continue_reverse(link,
|
|
|
|
&genpd->slave_links,
|
|
|
|
slave_node) {
|
2011-08-09 04:43:40 +07:00
|
|
|
genpd_sd_counter_dec(link->master);
|
2017-02-17 16:55:25 +07:00
|
|
|
genpd_lock_nested(link->master, depth + 1);
|
|
|
|
genpd_power_off(link->master, false, depth + 1);
|
|
|
|
genpd_unlock(link->master);
|
2015-09-02 15:16:13 +07:00
|
|
|
}
|
2011-08-09 04:43:22 +07:00
|
|
|
|
2011-08-09 04:43:29 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
|
|
|
|
unsigned long val, void *ptr)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain_data *gpd_data;
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
|
|
|
|
dev = gpd_data->base.dev;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
struct generic_pm_domain *genpd;
|
|
|
|
struct pm_domain_data *pdd;
|
|
|
|
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
|
|
|
|
|
pdd = dev->power.subsys_data ?
|
|
|
|
dev->power.subsys_data->domain_data : NULL;
|
2012-07-06 03:12:32 +07:00
|
|
|
if (pdd && pdd->dev) {
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
to_gpd_data(pdd)->td.constraint_changed = true;
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
} else {
|
|
|
|
genpd = ERR_PTR(-ENODATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
|
|
|
|
|
|
if (!IS_ERR(genpd)) {
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
genpd->max_off_time_changed = true;
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
dev = dev->parent;
|
|
|
|
if (!dev || dev->power.ignore_children)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
/**
|
|
|
|
* genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
|
|
|
|
* @work: Work structure used for scheduling the execution of this function.
|
|
|
|
*/
|
|
|
|
static void genpd_power_off_work_fn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
|
|
|
|
|
|
|
genpd = container_of(work, struct generic_pm_domain, power_off_work);
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2017-02-17 16:55:25 +07:00
|
|
|
genpd_power_off(genpd, false, 0);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
}
|
|
|
|
|
2016-03-31 16:21:27 +07:00
|
|
|
/**
|
|
|
|
* __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
|
|
|
|
* @dev: Device to handle.
|
|
|
|
*/
|
|
|
|
static int __genpd_runtime_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
int (*cb)(struct device *__dev);
|
|
|
|
|
|
|
|
if (dev->type && dev->type->pm)
|
|
|
|
cb = dev->type->pm->runtime_suspend;
|
|
|
|
else if (dev->class && dev->class->pm)
|
|
|
|
cb = dev->class->pm->runtime_suspend;
|
|
|
|
else if (dev->bus && dev->bus->pm)
|
|
|
|
cb = dev->bus->pm->runtime_suspend;
|
|
|
|
else
|
|
|
|
cb = NULL;
|
|
|
|
|
|
|
|
if (!cb && dev->driver && dev->driver->pm)
|
|
|
|
cb = dev->driver->pm->runtime_suspend;
|
|
|
|
|
|
|
|
return cb ? cb(dev) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
|
|
|
|
* @dev: Device to handle.
|
|
|
|
*/
|
|
|
|
static int __genpd_runtime_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
int (*cb)(struct device *__dev);
|
|
|
|
|
|
|
|
if (dev->type && dev->type->pm)
|
|
|
|
cb = dev->type->pm->runtime_resume;
|
|
|
|
else if (dev->class && dev->class->pm)
|
|
|
|
cb = dev->class->pm->runtime_resume;
|
|
|
|
else if (dev->bus && dev->bus->pm)
|
|
|
|
cb = dev->bus->pm->runtime_resume;
|
|
|
|
else
|
|
|
|
cb = NULL;
|
|
|
|
|
|
|
|
if (!cb && dev->driver && dev->driver->pm)
|
|
|
|
cb = dev->driver->pm->runtime_resume;
|
|
|
|
|
|
|
|
return cb ? cb(dev) : 0;
|
|
|
|
}
|
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
/**
|
2016-03-31 16:21:26 +07:00
|
|
|
* genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
|
2011-07-02 03:12:45 +07:00
|
|
|
* @dev: Device to suspend.
|
|
|
|
*
|
|
|
|
* Carry out a runtime suspend of a device under the assumption that its
|
|
|
|
* pm_domain field points to the domain member of an object of type
|
|
|
|
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
|
|
|
|
*/
|
2016-03-31 16:21:26 +07:00
|
|
|
static int genpd_runtime_suspend(struct device *dev)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-03-31 16:21:25 +07:00
|
|
|
bool (*suspend_ok)(struct device *__dev);
|
2015-10-15 22:02:19 +07:00
|
|
|
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
|
2015-11-30 22:21:38 +07:00
|
|
|
bool runtime_pm = pm_runtime_enabled(dev);
|
2015-10-15 22:02:19 +07:00
|
|
|
ktime_t time_start;
|
|
|
|
s64 elapsed_ns;
|
2011-11-27 19:11:36 +07:00
|
|
|
int ret;
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
2011-07-02 03:13:10 +07:00
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
2011-07-02 03:12:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-11-30 22:21:38 +07:00
|
|
|
/*
|
|
|
|
* A runtime PM centric subsystem/driver may re-use the runtime PM
|
|
|
|
* callbacks for other purposes than runtime PM. In those scenarios
|
|
|
|
* runtime PM is disabled. Under these circumstances, we shall skip
|
|
|
|
* validating/measuring the PM QoS latency.
|
|
|
|
*/
|
2016-03-31 16:21:25 +07:00
|
|
|
suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
|
|
|
|
if (runtime_pm && suspend_ok && !suspend_ok(dev))
|
2011-12-01 06:02:05 +07:00
|
|
|
return -EBUSY;
|
|
|
|
|
2015-10-15 22:02:19 +07:00
|
|
|
/* Measure suspend latency. */
|
2016-12-26 05:56:58 +07:00
|
|
|
time_start = 0;
|
2015-11-30 22:21:38 +07:00
|
|
|
if (runtime_pm)
|
|
|
|
time_start = ktime_get();
|
2015-10-15 22:02:19 +07:00
|
|
|
|
2016-03-31 16:21:27 +07:00
|
|
|
ret = __genpd_runtime_suspend(dev);
|
2011-11-27 19:11:36 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-07-12 05:39:29 +07:00
|
|
|
|
2015-10-15 22:02:19 +07:00
|
|
|
ret = genpd_stop_dev(genpd, dev);
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
if (ret) {
|
2016-03-31 16:21:27 +07:00
|
|
|
__genpd_runtime_resume(dev);
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-10-15 22:02:19 +07:00
|
|
|
/* Update suspend latency value if the measured time exceeds it. */
|
2015-11-30 22:21:38 +07:00
|
|
|
if (runtime_pm) {
|
|
|
|
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
|
|
|
if (elapsed_ns > td->suspend_latency_ns) {
|
|
|
|
td->suspend_latency_ns = elapsed_ns;
|
|
|
|
dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
|
|
|
|
elapsed_ns);
|
|
|
|
genpd->max_off_time_changed = true;
|
|
|
|
td->constraint_changed = true;
|
|
|
|
}
|
2015-10-15 22:02:19 +07:00
|
|
|
}
|
|
|
|
|
2011-08-25 20:37:04 +07:00
|
|
|
/*
|
2016-10-15 00:47:55 +07:00
|
|
|
* If power.irq_safe is set, this routine may be run with
|
|
|
|
* IRQs disabled, so suspend only if the PM domain also is irq_safe.
|
2011-08-25 20:37:04 +07:00
|
|
|
*/
|
2016-10-15 00:47:55 +07:00
|
|
|
if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
|
2011-08-25 20:37:04 +07:00
|
|
|
return 0;
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2017-02-17 16:55:25 +07:00
|
|
|
genpd_power_off(genpd, true, 0);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-03-31 16:21:26 +07:00
|
|
|
* genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
2011-07-02 03:12:45 +07:00
|
|
|
* @dev: Device to resume.
|
|
|
|
*
|
|
|
|
* Carry out a runtime resume of a device under the assumption that its
|
|
|
|
* pm_domain field points to the domain member of an object of type
|
|
|
|
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
|
|
|
|
*/
|
2016-03-31 16:21:26 +07:00
|
|
|
static int genpd_runtime_resume(struct device *dev)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2015-10-15 22:02:19 +07:00
|
|
|
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
|
2015-11-30 22:21:38 +07:00
|
|
|
bool runtime_pm = pm_runtime_enabled(dev);
|
2015-10-15 22:02:19 +07:00
|
|
|
ktime_t time_start;
|
|
|
|
s64 elapsed_ns;
|
2011-07-02 03:12:45 +07:00
|
|
|
int ret;
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
bool timed = true;
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
2011-07-02 03:13:10 +07:00
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
2011-07-02 03:12:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-15 00:47:55 +07:00
|
|
|
/*
|
|
|
|
* As we don't power off a non IRQ safe domain, which holds
|
|
|
|
* an IRQ safe device, we don't need to restore power to it.
|
|
|
|
*/
|
|
|
|
if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
timed = false;
|
|
|
|
goto out;
|
|
|
|
}
|
2011-08-25 20:37:04 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2016-12-08 20:45:20 +07:00
|
|
|
ret = genpd_power_on(genpd, 0);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
PM / Domains: Allow callbacks to execute all runtime PM helpers
A deadlock may occur if one of the PM domains' .start_device() or
.stop_device() callbacks or a device driver's .runtime_suspend() or
.runtime_resume() callback executed by the core generic PM domain
code uses a "wrong" runtime PM helper function. This happens, for
example, if .runtime_resume() from one device's driver calls
pm_runtime_resume() for another device in the same PM domain.
A similar situation may take place if a device's parent is in the
same PM domain, in which case the runtime PM framework may execute
pm_genpd_runtime_resume() automatically for the parent (if it is
suspended at the moment). This, of course, is undesirable, so
the generic PM domains code should be modified to prevent it from
happening.
The runtime PM framework guarantees that pm_genpd_runtime_suspend()
and pm_genpd_runtime_resume() won't be executed in parallel for
the same device, so the generic PM domains code need not worry
about those cases. Still, it needs to prevent the other possible
race conditions between pm_genpd_runtime_suspend(),
pm_genpd_runtime_resume(), pm_genpd_poweron() and pm_genpd_poweroff()
from happening and it needs to avoid deadlocks at the same time.
To this end, modify the generic PM domains code to relax
synchronization rules so that:
* pm_genpd_poweron() doesn't wait for the PM domain status to
change from GPD_STATE_BUSY. If it finds that the status is
not GPD_STATE_POWER_OFF, it returns without powering the domain on
(it may modify the status depending on the circumstances).
* pm_genpd_poweroff() returns as soon as it finds that the PM
domain's status changed from GPD_STATE_BUSY after it's released
the PM domain's lock.
* pm_genpd_runtime_suspend() doesn't wait for the PM domain status
to change from GPD_STATE_BUSY after executing the domain's
.stop_device() callback and executes pm_genpd_poweroff() only
if pm_genpd_runtime_resume() is not executed in parallel.
* pm_genpd_runtime_resume() doesn't wait for the PM domain status
to change from GPD_STATE_BUSY after executing pm_genpd_poweron()
and sets the domain's status to GPD_STATE_BUSY and increments its
counter of resuming devices (introduced by this change) immediately
after acquiring the lock. The counter of resuming devices is then
decremented after executing __pm_genpd_runtime_resume() for the
device and the domain's status is reset to GPD_STATE_ACTIVE (unless
there are more resuming devices in the domain, in which case the
status remains GPD_STATE_BUSY).
This way, for example, if a device driver's .runtime_resume()
callback executes pm_runtime_resume() for another device in the same
PM domain, pm_genpd_poweron() called by pm_genpd_runtime_resume()
invoked by the runtime PM framework will not block and it will see
that there's nothing to do for it. Next, the PM domain's lock will
be acquired without waiting for its status to change from
GPD_STATE_BUSY and the device driver's .runtime_resume() callback
will be executed. In turn, if pm_runtime_suspend() is executed by
one device driver's .runtime_resume() callback for another device in
the same PM domain, pm_genpd_poweroff() executed by
pm_genpd_runtime_suspend() invoked by the runtime PM framework as a
result will notice that one of the devices in the domain is being
resumed, so it will return immediately.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-07-12 05:39:36 +07:00
|
|
|
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
PM / Domains: Allow callbacks to execute all runtime PM helpers
A deadlock may occur if one of the PM domains' .start_device() or
.stop_device() callbacks or a device driver's .runtime_suspend() or
.runtime_resume() callback executed by the core generic PM domain
code uses a "wrong" runtime PM helper function. This happens, for
example, if .runtime_resume() from one device's driver calls
pm_runtime_resume() for another device in the same PM domain.
A similar situation may take place if a device's parent is in the
same PM domain, in which case the runtime PM framework may execute
pm_genpd_runtime_resume() automatically for the parent (if it is
suspended at the moment). This, of course, is undesirable, so
the generic PM domains code should be modified to prevent it from
happening.
The runtime PM framework guarantees that pm_genpd_runtime_suspend()
and pm_genpd_runtime_resume() won't be executed in parallel for
the same device, so the generic PM domains code need not worry
about those cases. Still, it needs to prevent the other possible
race conditions between pm_genpd_runtime_suspend(),
pm_genpd_runtime_resume(), pm_genpd_poweron() and pm_genpd_poweroff()
from happening and it needs to avoid deadlocks at the same time.
To this end, modify the generic PM domains code to relax
synchronization rules so that:
* pm_genpd_poweron() doesn't wait for the PM domain status to
change from GPD_STATE_BUSY. If it finds that the status is
not GPD_STATE_POWER_OFF, it returns without powering the domain on
(it may modify the status depending on the circumstances).
* pm_genpd_poweroff() returns as soon as it finds that the PM
domain's status changed from GPD_STATE_BUSY after it's released
the PM domain's lock.
* pm_genpd_runtime_suspend() doesn't wait for the PM domain status
to change from GPD_STATE_BUSY after executing the domain's
.stop_device() callback and executes pm_genpd_poweroff() only
if pm_genpd_runtime_resume() is not executed in parallel.
* pm_genpd_runtime_resume() doesn't wait for the PM domain status
to change from GPD_STATE_BUSY after executing pm_genpd_poweron()
and sets the domain's status to GPD_STATE_BUSY and increments its
counter of resuming devices (introduced by this change) immediately
after acquiring the lock. The counter of resuming devices is then
decremented after executing __pm_genpd_runtime_resume() for the
device and the domain's status is reset to GPD_STATE_ACTIVE (unless
there are more resuming devices in the domain, in which case the
status remains GPD_STATE_BUSY).
This way, for example, if a device driver's .runtime_resume()
callback executes pm_runtime_resume() for another device in the same
PM domain, pm_genpd_poweron() called by pm_genpd_runtime_resume()
invoked by the runtime PM framework will not block and it will see
that there's nothing to do for it. Next, the PM domain's lock will
be acquired without waiting for its status to change from
GPD_STATE_BUSY and the device driver's .runtime_resume() callback
will be executed. In turn, if pm_runtime_suspend() is executed by
one device driver's .runtime_resume() callback for another device in
the same PM domain, pm_genpd_poweroff() executed by
pm_genpd_runtime_suspend() invoked by the runtime PM framework as a
result will notice that one of the devices in the domain is being
resumed, so it will return immediately.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-07-12 05:39:36 +07:00
|
|
|
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
out:
|
2015-10-15 22:02:19 +07:00
|
|
|
/* Measure resume latency. */
|
2016-12-30 18:34:08 +07:00
|
|
|
time_start = 0;
|
2015-11-30 22:21:38 +07:00
|
|
|
if (timed && runtime_pm)
|
2015-10-15 22:02:19 +07:00
|
|
|
time_start = ktime_get();
|
|
|
|
|
2016-03-02 06:20:38 +07:00
|
|
|
ret = genpd_start_dev(genpd, dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_poweroff;
|
|
|
|
|
2016-03-31 16:21:27 +07:00
|
|
|
ret = __genpd_runtime_resume(dev);
|
2016-03-02 06:20:38 +07:00
|
|
|
if (ret)
|
|
|
|
goto err_stop;
|
2015-10-15 22:02:19 +07:00
|
|
|
|
|
|
|
/* Update resume latency value if the measured time exceeds it. */
|
2015-11-30 22:21:38 +07:00
|
|
|
if (timed && runtime_pm) {
|
2015-10-15 22:02:19 +07:00
|
|
|
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
|
|
|
if (elapsed_ns > td->resume_latency_ns) {
|
|
|
|
td->resume_latency_ns = elapsed_ns;
|
|
|
|
dev_dbg(dev, "resume latency exceeded, %lld ns\n",
|
|
|
|
elapsed_ns);
|
|
|
|
genpd->max_off_time_changed = true;
|
|
|
|
td->constraint_changed = true;
|
|
|
|
}
|
|
|
|
}
|
2011-07-12 05:39:29 +07:00
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
return 0;
|
2016-03-02 06:20:38 +07:00
|
|
|
|
|
|
|
err_stop:
|
|
|
|
genpd_stop_dev(genpd, dev);
|
|
|
|
err_poweroff:
|
2016-10-15 00:47:55 +07:00
|
|
|
if (!pm_runtime_is_irq_safe(dev) ||
|
|
|
|
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2017-02-17 16:55:25 +07:00
|
|
|
genpd_power_off(genpd, true, 0);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2016-03-02 06:20:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2011-07-02 03:12:45 +07:00
|
|
|
}
|
|
|
|
|
2014-03-28 12:20:21 +07:00
|
|
|
static bool pd_ignore_unused;
|
|
|
|
static int __init pd_ignore_unused_setup(char *__unused)
|
|
|
|
{
|
|
|
|
pd_ignore_unused = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("pd_ignore_unused", pd_ignore_unused_setup);
|
|
|
|
|
2011-08-14 18:34:31 +07:00
|
|
|
/**
|
2016-12-08 20:45:20 +07:00
|
|
|
* genpd_power_off_unused - Power off all PM domains with no devices in use.
|
2011-08-14 18:34:31 +07:00
|
|
|
*/
|
2016-12-08 20:45:20 +07:00
|
|
|
static int __init genpd_power_off_unused(void)
|
2011-08-14 18:34:31 +07:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
|
|
|
|
2014-03-28 12:20:21 +07:00
|
|
|
if (pd_ignore_unused) {
|
|
|
|
pr_warn("genpd: Not disabling unused power domains\n");
|
2015-10-06 19:27:42 +07:00
|
|
|
return 0;
|
2014-03-28 12:20:21 +07:00
|
|
|
}
|
|
|
|
|
2011-08-14 18:34:31 +07:00
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
|
|
|
|
list_for_each_entry(genpd, &gpd_list, gpd_list_node)
|
|
|
|
genpd_queue_power_off_work(genpd);
|
|
|
|
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
2014-09-03 17:52:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-08 20:45:20 +07:00
|
|
|
late_initcall(genpd_power_off_unused);
|
2014-09-03 17:52:26 +07:00
|
|
|
|
2016-09-12 18:01:10 +07:00
|
|
|
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2012-08-06 06:39:57 +07:00
|
|
|
/**
|
|
|
|
* pm_genpd_present - Check if the given PM domain has been initialized.
|
|
|
|
* @genpd: PM domain to check.
|
|
|
|
*/
|
2014-11-11 01:37:39 +07:00
|
|
|
static bool pm_genpd_present(const struct generic_pm_domain *genpd)
|
2012-08-06 06:39:57 +07:00
|
|
|
{
|
2014-11-11 01:37:39 +07:00
|
|
|
const struct generic_pm_domain *gpd;
|
2012-08-06 06:39:57 +07:00
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(genpd))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
list_for_each_entry(gpd, &gpd_list, gpd_list_node)
|
|
|
|
if (gpd == genpd)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:10 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
|
2011-11-27 19:11:36 +07:00
|
|
|
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
|
|
|
|
}
|
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
/**
|
2016-12-08 20:45:20 +07:00
|
|
|
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
|
2011-07-02 03:13:19 +07:00
|
|
|
* @genpd: PM domain to power off, if possible.
|
2017-02-08 19:39:00 +07:00
|
|
|
* @use_lock: use the lock.
|
|
|
|
* @depth: nesting count for lockdep.
|
2011-07-02 03:13:19 +07:00
|
|
|
*
|
|
|
|
* Check if the given PM domain can be powered off (during system suspend or
|
2011-08-09 04:43:40 +07:00
|
|
|
* hibernation) and do that if so. Also, in that case propagate to its masters.
|
2011-07-02 03:13:19 +07:00
|
|
|
*
|
2012-08-06 06:39:57 +07:00
|
|
|
* This function is only called in "noirq" and "syscore" stages of system power
|
2017-02-08 19:39:00 +07:00
|
|
|
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
|
|
|
|
* these cases the lock must be held.
|
2011-07-02 03:13:19 +07:00
|
|
|
*/
|
2017-02-08 19:39:00 +07:00
|
|
|
static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|
|
|
|
unsigned int depth)
|
2011-07-02 03:13:19 +07:00
|
|
|
{
|
2011-08-09 04:43:40 +07:00
|
|
|
struct gpd_link *link;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2011-07-12 05:39:29 +07:00
|
|
|
if (genpd->status == GPD_STATE_POWER_OFF)
|
2011-07-02 03:13:19 +07:00
|
|
|
return;
|
|
|
|
|
2011-08-09 04:43:04 +07:00
|
|
|
if (genpd->suspended_count != genpd->device_count
|
|
|
|
|| atomic_read(&genpd->sd_count) > 0)
|
2011-07-02 03:13:19 +07:00
|
|
|
return;
|
|
|
|
|
2016-02-15 17:10:51 +07:00
|
|
|
/* Choose the deepest state when suspending */
|
|
|
|
genpd->state_idx = genpd->state_count - 1;
|
2016-12-08 20:45:20 +07:00
|
|
|
_genpd_power_off(genpd, false);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2011-07-12 05:39:29 +07:00
|
|
|
genpd->status = GPD_STATE_POWER_OFF;
|
2011-08-09 04:43:40 +07:00
|
|
|
|
|
|
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
|
genpd_sd_counter_dec(link->master);
|
2017-02-08 19:39:00 +07:00
|
|
|
|
|
|
|
if (use_lock)
|
|
|
|
genpd_lock_nested(link->master, depth + 1);
|
|
|
|
|
|
|
|
genpd_sync_power_off(link->master, use_lock, depth + 1);
|
|
|
|
|
|
|
|
if (use_lock)
|
|
|
|
genpd_unlock(link->master);
|
2011-07-02 03:13:19 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-06 06:39:16 +07:00
|
|
|
/**
|
2016-12-08 20:45:20 +07:00
|
|
|
* genpd_sync_power_on - Synchronously power on a PM domain and its masters.
|
2012-08-06 06:39:16 +07:00
|
|
|
* @genpd: PM domain to power on.
|
2017-02-08 19:39:00 +07:00
|
|
|
* @use_lock: use the lock.
|
|
|
|
* @depth: nesting count for lockdep.
|
2012-08-06 06:39:16 +07:00
|
|
|
*
|
2012-08-06 06:39:57 +07:00
|
|
|
* This function is only called in "noirq" and "syscore" stages of system power
|
2017-02-08 19:39:00 +07:00
|
|
|
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
|
|
|
|
* these cases the lock must be held.
|
2012-08-06 06:39:16 +07:00
|
|
|
*/
|
2017-02-08 19:39:00 +07:00
|
|
|
static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
|
|
|
|
unsigned int depth)
|
2012-08-06 06:39:16 +07:00
|
|
|
{
|
|
|
|
struct gpd_link *link;
|
|
|
|
|
PM / Domains: Remove intermediate states from the power off sequence
Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend())
doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks.
Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which
postpones that until *all* the devices in the genpd are runtime suspended.
When pm_genpd_poweroff() discovers that the last device in the genpd is
about to be runtime suspended, it calls __pm_genpd_save_device() for *all*
the devices in the genpd sequentially. Furthermore,
__pm_genpd_save_device() invokes the ->start() callback, walks the
hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop()
callback. This causes a "thundering herd" problem.
Let's address this issue by having pm_genpd_runtime_suspend() immediately
walk the hierarchy of the ->runtime_suspend() callbacks, instead of
postponing that to the power off sequence via pm_genpd_poweroff(). If the
selected ->runtime_suspend() callback doesn't return an error code, call
pm_genpd_poweroff() to see if it's feasible to also power off the PM
domain.
Adopting this change enables us to simplify parts of the code in genpd,
for example the locking mechanism. Additionally, it gives some positive
side effects, as described below.
i)
One device's ->runtime_resume() latency is no longer affected by other
devices' latencies in a genpd.
The complexity genpd has to support the option to abort the power off
sequence suffers from latency issues. More precisely, a device that is
requested to be runtime resumed, may end up waiting for
__pm_genpd_save_device() to complete its operations for *another* device.
That's because pm_genpd_poweroff() can't confirm an abort request while it
waits for __pm_genpd_save_device() to return.
As this patch removes the intermediate states in pm_genpd_poweroff() while
powering off the PM domain, we no longer need the ability to abort that
sequence.
ii)
Make pm_runtime[_status]_suspended() reliable when used with genpd.
Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend()
will return 0 without actually walking the hierarchy of the
->runtime_suspend() callbacks. However, by returning 0 the runtime PM core
considers the device as runtime_suspended, so
pm_runtime[_status]_suspended() will return true, even though the device
isn't (yet) runtime suspended.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks,
pm_runtime[_status]_suspended() will accurately reflect the status of the
device.
iii)
Enable fine-grained PM through runtime PM callbacks in drivers/subsystems.
There are currently cases were drivers/subsystems implements runtime PM
callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to
power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend()
postpones invoking these callbacks until *all* the devices in the genpd
are runtime suspended. In essence, one runtime resumed device prevents
fine-grained PM for other devices within the same genpd.
After this patch, since pm_genpd_runtime_suspend() immediately walks the
hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled
throughout all the levels of runtime PM callbacks.
iiii)
Enable fine-grained PM for IRQ safe devices
Per the definition for an IRQ safe device, its runtime PM callbacks must
be able to execute in atomic context. In the path while genpd walks the
hierarchy of the ->runtime_suspend() callbacks for the device, it uses a
mutex. Therefore, genpd prevents that path to be executed for IRQ safe
devices.
As this patch changes pm_genpd_runtime_suspend() to immediately walk the
hierarchy of the ->runtime_suspend() callbacks and without needing to use
a mutex, fine-grained PM is enabled throughout all the levels of runtime
PM callbacks for IRQ safe devices.
Unfortunately this patch also comes with a drawback, as described in the
summary below.
Driver's/subsystem's runtime PM callbacks may be invoked even when the
genpd hasn't actually powered off the PM domain, potentially introducing
unnecessary latency.
However, in most cases, saving/restoring register contexts for devices are
typically fast operations or can be optimized in device specific ways
(e.g. shadow copies of register contents in memory, device-specific checks
to see if context has been lost before restoring context, etc.).
Still, in some cases the driver/subsystem may suffer from latency if
runtime PM is used in a very fine-grained manner (e.g. for each IO request
or xfer). To prevent that extra overhead, the driver/subsystem may deploy
the runtime PM autosuspend feature.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-06-18 20:17:53 +07:00
|
|
|
if (genpd->status == GPD_STATE_ACTIVE)
|
2012-08-06 06:39:16 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
|
genpd_sd_counter_inc(link->master);
|
2017-02-08 19:39:00 +07:00
|
|
|
|
|
|
|
if (use_lock)
|
|
|
|
genpd_lock_nested(link->master, depth + 1);
|
|
|
|
|
|
|
|
genpd_sync_power_on(link->master, use_lock, depth + 1);
|
|
|
|
|
|
|
|
if (use_lock)
|
|
|
|
genpd_unlock(link->master);
|
2012-08-06 06:39:16 +07:00
|
|
|
}
|
|
|
|
|
2016-12-08 20:45:20 +07:00
|
|
|
_genpd_power_on(genpd, false);
|
2012-08-06 06:39:16 +07:00
|
|
|
|
|
|
|
genpd->status = GPD_STATE_ACTIVE;
|
|
|
|
}
|
|
|
|
|
PM / Domains: Improve handling of wakeup devices during system suspend
Kevin points out that if there's a device that can wake up the system
from sleep states, but it doesn't generate wakeup signals by itself
(they are generated on its behalf by other parts of the system) and
it currently is not enabled to wake up the system (that is,
device_may_wakeup() returns "false" for it), we may need to change
its wakeup settings during system suspend (for example, the device
might have been configured to signal remote wakeup from the system's
working state, as needed by runtime PM). Therefore the generic PM
domains code should invoke the system suspend callbacks provided by
the device's driver, which it doesn't do if the PM domain is powered
off during the system suspend's "prepare" stage. This is a valid
point. Moreover, this code also should make sure that system wakeup
devices that are enabled to wake up the system from sleep states and
have to remain active for this purpose are not suspended while the
system is in a sleep state.
To avoid the above issues, make the generic PM domains' .prepare()
routine, pm_genpd_prepare(), force runtime resume of devices whose
system wakeup settings may need to be changed during system suspend
or that should remain active while the system is in a sleep state to
be able to wake it up from that state.
Reported-by: Kevin Hilman <khilman@ti.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-07-12 05:39:57 +07:00
|
|
|
/**
|
|
|
|
* resume_needed - Check whether to resume a device before system suspend.
|
|
|
|
* @dev: Device to check.
|
|
|
|
* @genpd: PM domain the device belongs to.
|
|
|
|
*
|
|
|
|
* There are two cases in which a device that can wake up the system from sleep
|
|
|
|
* states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
|
|
|
|
* to wake up the system and it has to remain active for this purpose while the
|
|
|
|
* system is in the sleep state and (2) if the device is not enabled to wake up
|
|
|
|
* the system from sleep states and it generally doesn't generate wakeup signals
|
|
|
|
* by itself (those signals are generated on its behalf by other parts of the
|
|
|
|
* system). In the latter case it may be necessary to reconfigure the device's
|
|
|
|
* wakeup settings during system suspend, because it may have been set up to
|
|
|
|
* signal remote wakeup from the system's working state as needed by runtime PM.
|
|
|
|
* Return 'true' in either of the above cases.
|
|
|
|
*/
|
|
|
|
static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
bool active_wakeup;
|
|
|
|
|
|
|
|
if (!device_can_wakeup(dev))
|
|
|
|
return false;
|
|
|
|
|
2011-11-27 19:11:36 +07:00
|
|
|
active_wakeup = genpd_dev_active_wakeup(genpd, dev);
|
PM / Domains: Improve handling of wakeup devices during system suspend
Kevin points out that if there's a device that can wake up the system
from sleep states, but it doesn't generate wakeup signals by itself
(they are generated on its behalf by other parts of the system) and
it currently is not enabled to wake up the system (that is,
device_may_wakeup() returns "false" for it), we may need to change
its wakeup settings during system suspend (for example, the device
might have been configured to signal remote wakeup from the system's
working state, as needed by runtime PM). Therefore the generic PM
domains code should invoke the system suspend callbacks provided by
the device's driver, which it doesn't do if the PM domain is powered
off during the system suspend's "prepare" stage. This is a valid
point. Moreover, this code also should make sure that system wakeup
devices that are enabled to wake up the system from sleep states and
have to remain active for this purpose are not suspended while the
system is in a sleep state.
To avoid the above issues, make the generic PM domains' .prepare()
routine, pm_genpd_prepare(), force runtime resume of devices whose
system wakeup settings may need to be changed during system suspend
or that should remain active while the system is in a sleep state to
be able to wake it up from that state.
Reported-by: Kevin Hilman <khilman@ti.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-07-12 05:39:57 +07:00
|
|
|
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
|
|
|
|
}
|
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
/**
|
|
|
|
* pm_genpd_prepare - Start power transition of a device in a PM domain.
|
|
|
|
* @dev: Device to start the transition of.
|
|
|
|
*
|
|
|
|
* Start a power transition of a device (during a system-wide power transition)
|
|
|
|
* under the assumption that its pm_domain field points to the domain member of
|
|
|
|
* an object of type struct generic_pm_domain representing a PM domain
|
|
|
|
* consisting of I/O devices.
|
|
|
|
*/
|
|
|
|
static int pm_genpd_prepare(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2011-07-12 05:39:21 +07:00
|
|
|
int ret;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-07-12 05:39:29 +07:00
|
|
|
/*
|
|
|
|
* If a wakeup request is pending for the device, it should be woken up
|
|
|
|
* at this point and a system wakeup event should be reported if it's
|
|
|
|
* set up to wake up the system from sleep states.
|
|
|
|
*/
|
PM / Domains: Improve handling of wakeup devices during system suspend
Kevin points out that if there's a device that can wake up the system
from sleep states, but it doesn't generate wakeup signals by itself
(they are generated on its behalf by other parts of the system) and
it currently is not enabled to wake up the system (that is,
device_may_wakeup() returns "false" for it), we may need to change
its wakeup settings during system suspend (for example, the device
might have been configured to signal remote wakeup from the system's
working state, as needed by runtime PM). Therefore the generic PM
domains code should invoke the system suspend callbacks provided by
the device's driver, which it doesn't do if the PM domain is powered
off during the system suspend's "prepare" stage. This is a valid
point. Moreover, this code also should make sure that system wakeup
devices that are enabled to wake up the system from sleep states and
have to remain active for this purpose are not suspended while the
system is in a sleep state.
To avoid the above issues, make the generic PM domains' .prepare()
routine, pm_genpd_prepare(), force runtime resume of devices whose
system wakeup settings may need to be changed during system suspend
or that should remain active while the system is in a sleep state to
be able to wake it up from that state.
Reported-by: Kevin Hilman <khilman@ti.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-07-12 05:39:57 +07:00
|
|
|
if (resume_needed(dev, genpd))
|
|
|
|
pm_runtime_resume(dev);
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
if (genpd->prepared_count++ == 0)
|
2012-03-14 04:39:37 +07:00
|
|
|
genpd->suspended_count = 0;
|
2011-07-12 05:39:29 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2011-07-12 05:39:21 +07:00
|
|
|
ret = pm_generic_prepare(dev);
|
|
|
|
if (ret) {
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2011-07-12 05:39:21 +07:00
|
|
|
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
genpd->prepared_count--;
|
2011-07-12 05:39:21 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-12 05:39:21 +07:00
|
|
|
}
|
2011-07-12 05:39:29 +07:00
|
|
|
|
2011-07-12 05:39:21 +07:00
|
|
|
return ret;
|
2011-07-02 03:13:19 +07:00
|
|
|
}
|
|
|
|
|
2012-01-30 02:39:02 +07:00
|
|
|
/**
|
|
|
|
* pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
|
|
|
|
* @dev: Device to suspend.
|
|
|
|
*
|
|
|
|
* Stop the device and remove power from the domain if all devices in it have
|
|
|
|
* been stopped.
|
|
|
|
*/
|
|
|
|
static int pm_genpd_suspend_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-05-30 16:33:14 +07:00
|
|
|
int ret;
|
2012-01-30 02:39:02 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return -EINVAL;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
|
PM / Domains: Wakeup devices support for system sleep transitions
There is the problem how to handle devices set up to wake up the
system from sleep states during system-wide power transitions.
In some cases, those devices can be turned off entirely, because the
wakeup signals will be generated on their behalf anyway. In some
other cases, they will generate wakeup signals if their clocks are
stopped, but only if power is not removed from them. Finally, in
some cases, they can only generate wakeup signals if power is not
removed from them and their clocks are enabled.
To allow platform-specific code to decide whether or not to put
wakeup devices (and their PM domains) into low-power state during
system-wide transitions, such as system suspend, introduce a new
generic PM domain callback, .active_wakeup(), that will be used
during the "noirq" phase of system suspend and hibernation (after
image creation) to decide what to do with wakeup devices.
Specifically, if this callback is present and returns "true", the
generic PM domain code will not execute .stop_device() for the
given wakeup device and its PM domain won't be powered off.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Kevin Hilman <khilman@ti.com>
2011-07-02 03:13:29 +07:00
|
|
|
return 0;
|
|
|
|
|
2016-05-30 16:33:14 +07:00
|
|
|
if (genpd->dev_ops.stop && genpd->dev_ops.start) {
|
|
|
|
ret = pm_runtime_force_suspend(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_lock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
genpd->suspended_count++;
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_sync_power_off(genpd, true, 0);
|
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-01-30 02:39:02 +07:00
|
|
|
* pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
|
2011-07-02 03:13:19 +07:00
|
|
|
* @dev: Device to resume.
|
|
|
|
*
|
2012-01-30 02:39:02 +07:00
|
|
|
* Restore power to the device's PM domain, if necessary, and start the device.
|
2011-07-02 03:13:19 +07:00
|
|
|
*/
|
|
|
|
static int pm_genpd_resume_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-05-30 16:33:14 +07:00
|
|
|
int ret = 0;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
|
2011-07-02 03:13:19 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_lock(genpd);
|
|
|
|
genpd_sync_power_on(genpd, true, 0);
|
2011-07-02 03:13:19 +07:00
|
|
|
genpd->suspended_count--;
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2016-05-30 16:33:14 +07:00
|
|
|
if (genpd->dev_ops.stop && genpd->dev_ops.start)
|
|
|
|
ret = pm_runtime_force_resume(dev);
|
|
|
|
|
|
|
|
return ret;
|
2011-07-02 03:13:19 +07:00
|
|
|
}
|
|
|
|
|
2012-01-30 02:39:02 +07:00
|
|
|
/**
|
|
|
|
* pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
|
2011-07-02 03:13:19 +07:00
|
|
|
* @dev: Device to freeze.
|
|
|
|
*
|
|
|
|
* Carry out a late freeze of a device under the assumption that its
|
|
|
|
* pm_domain field points to the domain member of an object of type
|
|
|
|
* struct generic_pm_domain representing a power domain consisting of I/O
|
|
|
|
* devices.
|
|
|
|
*/
|
|
|
|
static int pm_genpd_freeze_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-05-30 16:33:14 +07:00
|
|
|
int ret = 0;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-30 16:33:14 +07:00
|
|
|
if (genpd->dev_ops.stop && genpd->dev_ops.start)
|
|
|
|
ret = pm_runtime_force_suspend(dev);
|
|
|
|
|
|
|
|
return ret;
|
2012-01-30 02:39:02 +07:00
|
|
|
}
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2012-01-30 02:39:02 +07:00
|
|
|
/**
|
|
|
|
* pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
|
|
|
|
* @dev: Device to thaw.
|
|
|
|
*
|
|
|
|
* Start the device, unless power has been removed from the domain already
|
|
|
|
* before the system transition.
|
|
|
|
*/
|
|
|
|
static int pm_genpd_thaw_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-05-30 16:33:14 +07:00
|
|
|
int ret = 0;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2012-01-30 02:39:02 +07:00
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2012-01-30 02:39:02 +07:00
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-30 16:33:14 +07:00
|
|
|
if (genpd->dev_ops.stop && genpd->dev_ops.start)
|
|
|
|
ret = pm_runtime_force_resume(dev);
|
|
|
|
|
|
|
|
return ret;
|
2011-07-02 03:13:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-01-30 02:39:02 +07:00
|
|
|
* pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
|
2011-07-02 03:13:19 +07:00
|
|
|
* @dev: Device to resume.
|
|
|
|
*
|
2012-01-30 02:39:02 +07:00
|
|
|
* Make sure the domain will be in the same power state as before the
|
|
|
|
* hibernation the system is resuming from and start the device if necessary.
|
2011-07-02 03:13:19 +07:00
|
|
|
*/
|
|
|
|
static int pm_genpd_restore_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-05-30 16:33:14 +07:00
|
|
|
int ret = 0;
|
2011-07-02 03:13:19 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
2012-03-14 04:39:37 +07:00
|
|
|
* At this point suspended_count == 0 means we are being run for the
|
|
|
|
* first time for the given domain in the present cycle.
|
2011-07-02 03:13:19 +07:00
|
|
|
*/
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_lock(genpd);
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
if (genpd->suspended_count++ == 0)
|
2011-07-02 03:13:19 +07:00
|
|
|
/*
|
2012-03-14 04:39:37 +07:00
|
|
|
* The boot kernel might put the domain into arbitrary state,
|
2016-12-08 20:45:20 +07:00
|
|
|
* so make it appear as powered off to genpd_sync_power_on(),
|
2012-08-06 06:39:16 +07:00
|
|
|
* so that it tries to power it on in case it was really off.
|
2011-07-02 03:13:19 +07:00
|
|
|
*/
|
2012-03-14 04:39:37 +07:00
|
|
|
genpd->status = GPD_STATE_POWER_OFF;
|
2012-03-19 16:38:14 +07:00
|
|
|
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_sync_power_on(genpd, true, 0);
|
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2016-05-30 16:33:14 +07:00
|
|
|
if (genpd->dev_ops.stop && genpd->dev_ops.start)
|
|
|
|
ret = pm_runtime_force_resume(dev);
|
|
|
|
|
|
|
|
return ret;
|
2011-07-02 03:13:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pm_genpd_complete - Complete power transition of a device in a power domain.
|
|
|
|
* @dev: Device to complete the transition of.
|
|
|
|
*
|
|
|
|
* Complete a power transition of a device (during a system-wide power
|
|
|
|
* transition) under the assumption that its pm_domain field points to the
|
|
|
|
* domain member of an object of type struct generic_pm_domain representing
|
|
|
|
* a power domain consisting of I/O devices.
|
|
|
|
*/
|
|
|
|
static void pm_genpd_complete(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(genpd))
|
|
|
|
return;
|
|
|
|
|
PM / Domains: Allow runtime PM during system PM phases
In cases when a PM domain isn't powered off when genpd's ->prepare()
callback is invoked, genpd runtime resumes and disables runtime PM for the
device. This behaviour was needed when genpd managed intermediate states
during the power off sequence, as to maintain proper low power states of
devices during system PM suspend/resume.
Commit ba2bbfbf6307 (PM / Domains: Remove intermediate states from the
power off sequence), enables genpd to improve its behaviour in that
respect.
The PM core disables runtime PM at __device_suspend_late() before it calls
a system PM "late" callback for a device. When resuming a device, after a
corresponding "early" callback has been invoked, the PM core re-enables
runtime PM.
By changing genpd to allow runtime PM according to the same system PM
phases as the PM core, devices can be runtime resumed by their
corresponding subsystem/driver when really needed.
In this way, genpd no longer need to runtime resume the device from its
->prepare() callback. In most cases that avoids unnecessary and energy-
wasting operations of runtime resuming devices that have nothing to do,
only to runtime suspend them shortly after.
Although, because of changing this behaviour in genpd and due to that
genpd powers on the PM domain unconditionally in the system PM resume
"noirq" phase, it could potentially cause a PM domain to stay powered
on even if it's unused after the system has resumed. To avoid this,
schedule a power off work when genpd's system PM ->complete() callback
has been invoked for the last device in the PM domain.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:33:13 +07:00
|
|
|
pm_generic_complete(dev);
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
PM / Domains: Allow genpd to power on during system PM phases
If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.
This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.
To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.
This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.
More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.
Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:43:07 +07:00
|
|
|
genpd->prepared_count--;
|
PM / Domains: Allow runtime PM during system PM phases
In cases when a PM domain isn't powered off when genpd's ->prepare()
callback is invoked, genpd runtime resumes and disables runtime PM for the
device. This behaviour was needed when genpd managed intermediate states
during the power off sequence, as to maintain proper low power states of
devices during system PM suspend/resume.
Commit ba2bbfbf6307 (PM / Domains: Remove intermediate states from the
power off sequence), enables genpd to improve its behaviour in that
respect.
The PM core disables runtime PM at __device_suspend_late() before it calls
a system PM "late" callback for a device. When resuming a device, after a
corresponding "early" callback has been invoked, the PM core re-enables
runtime PM.
By changing genpd to allow runtime PM according to the same system PM
phases as the PM core, devices can be runtime resumed by their
corresponding subsystem/driver when really needed.
In this way, genpd no longer need to runtime resume the device from its
->prepare() callback. In most cases that avoids unnecessary and energy-
wasting operations of runtime resuming devices that have nothing to do,
only to runtime suspend them shortly after.
Although, because of changing this behaviour in genpd and due to that
genpd powers on the PM domain unconditionally in the system PM resume
"noirq" phase, it could potentially cause a PM domain to stay powered
on even if it's unused after the system has resumed. To avoid this,
schedule a power off work when genpd's system PM ->complete() callback
has been invoked for the last device in the PM domain.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-30 16:33:13 +07:00
|
|
|
if (!genpd->prepared_count)
|
|
|
|
genpd_queue_power_off_work(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:13:19 +07:00
|
|
|
}
|
|
|
|
|
2012-08-06 06:39:57 +07:00
|
|
|
/**
|
2014-09-03 17:52:24 +07:00
|
|
|
* genpd_syscore_switch - Switch power during system core suspend or resume.
|
2012-08-06 06:39:57 +07:00
|
|
|
* @dev: Device that normally is marked as "always on" to switch power for.
|
|
|
|
*
|
|
|
|
* This routine may only be called during the system core (syscore) suspend or
|
|
|
|
* resume phase for devices whose "always on" flags are set.
|
|
|
|
*/
|
2014-09-03 17:52:24 +07:00
|
|
|
static void genpd_syscore_switch(struct device *dev, bool suspend)
|
2012-08-06 06:39:57 +07:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
|
|
|
|
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
|
if (!pm_genpd_present(genpd))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (suspend) {
|
|
|
|
genpd->suspended_count++;
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_sync_power_off(genpd, false, 0);
|
2012-08-06 06:39:57 +07:00
|
|
|
} else {
|
2017-02-08 19:39:00 +07:00
|
|
|
genpd_sync_power_on(genpd, false, 0);
|
2012-08-06 06:39:57 +07:00
|
|
|
genpd->suspended_count--;
|
|
|
|
}
|
|
|
|
}
|
2014-09-03 17:52:24 +07:00
|
|
|
|
|
|
|
void pm_genpd_syscore_poweroff(struct device *dev)
|
|
|
|
{
|
|
|
|
genpd_syscore_switch(dev, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
|
|
|
|
|
|
|
|
void pm_genpd_syscore_poweron(struct device *dev)
|
|
|
|
{
|
|
|
|
genpd_syscore_switch(dev, false);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
|
2012-08-06 06:39:57 +07:00
|
|
|
|
2014-11-28 04:38:05 +07:00
|
|
|
#else /* !CONFIG_PM_SLEEP */
|
2011-07-02 03:13:19 +07:00
|
|
|
|
|
|
|
#define pm_genpd_prepare NULL
|
|
|
|
#define pm_genpd_suspend_noirq NULL
|
|
|
|
#define pm_genpd_resume_noirq NULL
|
|
|
|
#define pm_genpd_freeze_noirq NULL
|
|
|
|
#define pm_genpd_thaw_noirq NULL
|
|
|
|
#define pm_genpd_restore_noirq NULL
|
|
|
|
#define pm_genpd_complete NULL
|
|
|
|
|
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
|
2015-01-28 03:13:44 +07:00
|
|
|
static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
|
|
|
|
struct generic_pm_domain *genpd,
|
|
|
|
struct gpd_timing_data *td)
|
2012-07-06 03:12:32 +07:00
|
|
|
{
|
|
|
|
struct generic_pm_domain_data *gpd_data;
|
2015-01-28 03:13:43 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dev_pm_get_subsys_data(dev);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
2012-07-06 03:12:32 +07:00
|
|
|
|
|
|
|
gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
|
2015-01-28 03:13:43 +07:00
|
|
|
if (!gpd_data) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_put;
|
|
|
|
}
|
2012-07-06 03:12:32 +07:00
|
|
|
|
2015-01-28 03:13:44 +07:00
|
|
|
if (td)
|
|
|
|
gpd_data->td = *td;
|
|
|
|
|
|
|
|
gpd_data->base.dev = dev;
|
|
|
|
gpd_data->td.constraint_changed = true;
|
|
|
|
gpd_data->td.effective_constraint_ns = -1;
|
|
|
|
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
|
|
|
|
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
|
|
|
|
|
if (dev->power.subsys_data->domain_data) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->power.subsys_data->domain_data = &gpd_data->base;
|
|
|
|
|
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
|
|
|
2016-01-07 22:46:13 +07:00
|
|
|
dev_pm_domain_set(dev, &genpd->domain);
|
|
|
|
|
2012-07-06 03:12:32 +07:00
|
|
|
return gpd_data;
|
2015-01-28 03:13:43 +07:00
|
|
|
|
2015-01-28 03:13:44 +07:00
|
|
|
err_free:
|
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
|
|
kfree(gpd_data);
|
2015-01-28 03:13:43 +07:00
|
|
|
err_put:
|
|
|
|
dev_pm_put_subsys_data(dev);
|
|
|
|
return ERR_PTR(ret);
|
2012-07-06 03:12:32 +07:00
|
|
|
}
|
|
|
|
|
2015-01-28 03:13:38 +07:00
|
|
|
static void genpd_free_dev_data(struct device *dev,
|
|
|
|
struct generic_pm_domain_data *gpd_data)
|
2012-07-06 03:12:32 +07:00
|
|
|
{
|
2016-01-07 22:46:13 +07:00
|
|
|
dev_pm_domain_set(dev, NULL);
|
|
|
|
|
2015-01-28 03:13:44 +07:00
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
|
|
|
|
|
dev->power.subsys_data->domain_data = NULL;
|
|
|
|
|
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
|
|
|
2012-07-06 03:12:32 +07:00
|
|
|
kfree(gpd_data);
|
2015-01-28 03:13:43 +07:00
|
|
|
dev_pm_put_subsys_data(dev);
|
2012-07-06 03:12:32 +07:00
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:11 +07:00
|
|
|
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
|
|
|
struct gpd_timing_data *td)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
2015-01-28 03:13:42 +07:00
|
|
|
struct generic_pm_domain_data *gpd_data;
|
2011-07-02 03:12:45 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-01-28 03:13:44 +07:00
|
|
|
gpd_data = genpd_alloc_dev_data(dev, genpd, td);
|
2015-01-28 03:13:43 +07:00
|
|
|
if (IS_ERR(gpd_data))
|
|
|
|
return PTR_ERR(gpd_data);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
if (genpd->prepared_count > 0) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-01-28 03:13:45 +07:00
|
|
|
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2014-09-25 23:28:28 +07:00
|
|
|
|
2015-01-28 03:13:40 +07:00
|
|
|
genpd->device_count++;
|
|
|
|
genpd->max_off_time_changed = true;
|
|
|
|
|
2012-07-06 03:12:32 +07:00
|
|
|
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
out:
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2015-01-28 03:13:42 +07:00
|
|
|
if (ret)
|
|
|
|
genpd_free_dev_data(dev, gpd_data);
|
|
|
|
else
|
|
|
|
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
|
2012-07-06 03:12:32 +07:00
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
return ret;
|
|
|
|
}
|
2016-09-12 18:01:11 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* __pm_genpd_add_device - Add a device to an I/O PM domain.
|
|
|
|
* @genpd: PM domain to add the device to.
|
|
|
|
* @dev: Device to be added.
|
|
|
|
* @td: Set of PM QoS timing parameters to attach to the device.
|
|
|
|
*/
|
|
|
|
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
|
|
|
struct gpd_timing_data *td)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
ret = genpd_add_device(genpd, dev, td);
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2015-11-18 02:42:00 +07:00
|
|
|
EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2016-09-21 20:38:50 +07:00
|
|
|
static int genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
|
struct device *dev)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
struct generic_pm_domain_data *gpd_data;
|
2011-08-25 20:34:12 +07:00
|
|
|
struct pm_domain_data *pdd;
|
2012-05-02 02:33:53 +07:00
|
|
|
int ret = 0;
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
2015-01-28 03:13:42 +07:00
|
|
|
pdd = dev->power.subsys_data->domain_data;
|
|
|
|
gpd_data = to_gpd_data(pdd);
|
|
|
|
dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
if (genpd->prepared_count > 0) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
genpd->device_count--;
|
|
|
|
genpd->max_off_time_changed = true;
|
|
|
|
|
2014-09-25 23:28:28 +07:00
|
|
|
if (genpd->detach_dev)
|
2014-11-06 06:37:08 +07:00
|
|
|
genpd->detach_dev(genpd, dev);
|
2014-09-25 23:28:28 +07:00
|
|
|
|
2012-05-02 02:33:53 +07:00
|
|
|
list_del_init(&pdd->list_node);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
|
2015-01-28 03:13:39 +07:00
|
|
|
genpd_free_dev_data(dev, gpd_data);
|
2012-07-06 03:12:32 +07:00
|
|
|
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
return 0;
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
out:
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2015-01-28 03:13:42 +07:00
|
|
|
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-09-21 20:38:50 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
|
|
|
|
* @genpd: PM domain to remove the device from.
|
|
|
|
* @dev: Device to be removed.
|
|
|
|
*/
|
|
|
|
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
if (!genpd || genpd != genpd_lookup_dev(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return genpd_remove_device(genpd, dev);
|
|
|
|
}
|
2015-11-18 02:42:00 +07:00
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2016-09-12 18:01:11 +07:00
|
|
|
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|
|
|
struct generic_pm_domain *subdomain)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
2015-10-29 04:19:50 +07:00
|
|
|
struct gpd_link *link, *itr;
|
2011-07-02 03:12:45 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
2012-08-07 06:08:37 +07:00
|
|
|
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
|
|
|
|
|| genpd == subdomain)
|
2011-07-02 03:12:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-15 00:47:55 +07:00
|
|
|
/*
|
|
|
|
* If the domain can be powered on/off in an IRQ safe
|
|
|
|
* context, ensure that the subdomain can also be
|
|
|
|
* powered on/off in that context.
|
|
|
|
*/
|
|
|
|
if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
|
2016-11-10 19:52:15 +07:00
|
|
|
WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
|
2016-10-15 00:47:55 +07:00
|
|
|
genpd->name, subdomain->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-10-29 04:19:50 +07:00
|
|
|
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
|
|
|
if (!link)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(subdomain);
|
|
|
|
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2011-07-12 05:39:29 +07:00
|
|
|
if (genpd->status == GPD_STATE_POWER_OFF
|
2011-08-09 04:43:59 +07:00
|
|
|
&& subdomain->status != GPD_STATE_POWER_OFF) {
|
2011-07-02 03:12:45 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-10-29 04:19:50 +07:00
|
|
|
list_for_each_entry(itr, &genpd->master_links, master_node) {
|
|
|
|
if (itr->slave == subdomain && itr->master == genpd) {
|
2011-07-02 03:12:45 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
link->master = genpd;
|
|
|
|
list_add_tail(&link->master_node, &genpd->master_links);
|
2011-08-09 04:43:59 +07:00
|
|
|
link->slave = subdomain;
|
|
|
|
list_add_tail(&link->slave_node, &subdomain->slave_links);
|
|
|
|
if (subdomain->status != GPD_STATE_POWER_OFF)
|
2011-08-09 04:43:04 +07:00
|
|
|
genpd_sd_counter_inc(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
out:
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
|
|
|
genpd_unlock(subdomain);
|
2015-10-29 04:19:50 +07:00
|
|
|
if (ret)
|
|
|
|
kfree(link);
|
2011-07-02 03:12:45 +07:00
|
|
|
return ret;
|
|
|
|
}
|
2016-09-12 18:01:11 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
|
|
|
* @genpd: Master PM domain to add the subdomain to.
|
|
|
|
* @subdomain: Subdomain to be added.
|
|
|
|
*/
|
|
|
|
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|
|
|
struct generic_pm_domain *subdomain)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
ret = genpd_add_subdomain(genpd, subdomain);
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2015-10-02 02:22:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
|
|
|
* @genpd: Master PM domain to remove the subdomain from.
|
2011-08-09 04:43:40 +07:00
|
|
|
* @subdomain: Subdomain to be removed.
|
2011-07-02 03:12:45 +07:00
|
|
|
*/
|
|
|
|
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
2011-08-09 04:43:40 +07:00
|
|
|
struct generic_pm_domain *subdomain)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
2011-08-09 04:43:40 +07:00
|
|
|
struct gpd_link *link;
|
2011-07-02 03:12:45 +07:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
2011-07-02 03:12:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(subdomain);
|
|
|
|
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2016-03-04 17:55:14 +07:00
|
|
|
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
|
2015-09-03 15:10:37 +07:00
|
|
|
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
|
|
|
|
subdomain->name);
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
list_for_each_entry(link, &genpd->master_links, master_node) {
|
|
|
|
if (link->slave != subdomain)
|
2011-07-02 03:12:45 +07:00
|
|
|
continue;
|
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
list_del(&link->master_node);
|
|
|
|
list_del(&link->slave_node);
|
|
|
|
kfree(link);
|
2011-07-12 05:39:29 +07:00
|
|
|
if (subdomain->status != GPD_STATE_POWER_OFF)
|
2011-07-02 03:12:45 +07:00
|
|
|
genpd_sd_counter_dec(genpd);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-09-03 15:10:37 +07:00
|
|
|
out:
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
|
|
|
genpd_unlock(subdomain);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2015-10-02 02:22:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2016-10-15 00:47:49 +07:00
|
|
|
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
struct genpd_power_state *state;
|
|
|
|
|
|
|
|
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
|
|
|
if (!state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
genpd->states = state;
|
|
|
|
genpd->state_count = 1;
|
|
|
|
genpd->free = state;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-15 00:47:55 +07:00
|
|
|
static void genpd_lock_init(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
|
|
|
|
spin_lock_init(&genpd->slock);
|
|
|
|
genpd->lock_ops = &genpd_spin_ops;
|
|
|
|
} else {
|
|
|
|
mutex_init(&genpd->mlock);
|
|
|
|
genpd->lock_ops = &genpd_mtx_ops;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
/**
|
|
|
|
* pm_genpd_init - Initialize a generic I/O PM domain object.
|
|
|
|
* @genpd: PM domain object to initialize.
|
|
|
|
* @gov: PM domain governor to associate with the domain (may be NULL).
|
|
|
|
* @is_off: Initial value of the domain's power_is_off field.
|
2016-06-17 17:27:52 +07:00
|
|
|
*
|
|
|
|
* Returns 0 on successful initialization, else a negative error code.
|
2011-07-02 03:12:45 +07:00
|
|
|
*/
|
2016-06-17 17:27:52 +07:00
|
|
|
int pm_genpd_init(struct generic_pm_domain *genpd,
|
|
|
|
struct dev_power_governor *gov, bool is_off)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
2016-10-15 00:47:49 +07:00
|
|
|
int ret;
|
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
if (IS_ERR_OR_NULL(genpd))
|
2016-06-17 17:27:52 +07:00
|
|
|
return -EINVAL;
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
INIT_LIST_HEAD(&genpd->master_links);
|
|
|
|
INIT_LIST_HEAD(&genpd->slave_links);
|
2011-07-02 03:12:45 +07:00
|
|
|
INIT_LIST_HEAD(&genpd->dev_list);
|
2016-10-15 00:47:55 +07:00
|
|
|
genpd_lock_init(genpd);
|
2011-07-02 03:12:45 +07:00
|
|
|
genpd->gov = gov;
|
|
|
|
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
2011-08-09 04:43:04 +07:00
|
|
|
atomic_set(&genpd->sd_count, 0);
|
2011-07-12 05:39:29 +07:00
|
|
|
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
2011-07-02 03:13:19 +07:00
|
|
|
genpd->device_count = 0;
|
2011-12-01 06:02:10 +07:00
|
|
|
genpd->max_off_time_ns = -1;
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
genpd->max_off_time_changed = true;
|
2016-09-12 18:01:12 +07:00
|
|
|
genpd->provider = NULL;
|
|
|
|
genpd->has_provider = false;
|
2016-03-31 16:21:26 +07:00
|
|
|
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
|
|
|
|
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
|
2011-07-02 03:13:19 +07:00
|
|
|
genpd->domain.ops.prepare = pm_genpd_prepare;
|
|
|
|
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
|
|
|
|
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
|
|
|
|
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
|
|
|
|
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
|
PM / Domains: Rework system suspend callback routines (v2)
The current generic PM domains code attempts to use the generic
system suspend operations along with the domains' device stop/start
routines, which requires device drivers to assume that their
system suspend/resume (and hibernation/restore) callbacks will always
be used with generic PM domains. However, in theory, the same
hardware may be used in devices that don't belong to any PM domain,
in which case it would be necessary to add "fake" PM domains to
satisfy the above assumption. Also, the domain the hardware belongs
to may not be handled with the help of the generic code.
To allow device drivers that may be used along with the generic PM
domains code of more flexibility, add new device callbacks,
.suspend(), .suspend_late(), .resume_early(), .resume(), .freeze(),
.freeze_late(), .thaw_early(), and .thaw(), that can be supplied by
the drivers in addition to their "standard" system suspend and
hibernation callbacks. These new callbacks, if defined, will be used
by the generic PM domains code for the handling of system suspend and
hibernation instead of the "standard" ones. This will allow drivers
to be designed to work with generic PM domains as well as without
them.
For backwards compatibility, introduce default implementations of the
new callbacks for PM domains that will execute pm_generic_suspend(),
pm_generic_suspend_noirq(), pm_generic_resume_noirq(),
pm_generic_resume(), pm_generic_freeze(), pm_generic_freeze_noirq(),
pm_generic_thaw_noirq(), and pm_generic_thaw(), respectively, for the
given device if its driver doesn't define those callbacks.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2011-11-27 19:11:51 +07:00
|
|
|
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
|
2011-07-02 03:13:19 +07:00
|
|
|
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
|
|
|
|
genpd->domain.ops.complete = pm_genpd_complete;
|
2014-12-01 18:50:21 +07:00
|
|
|
|
|
|
|
if (genpd->flags & GENPD_FLAG_PM_CLK) {
|
|
|
|
genpd->dev_ops.stop = pm_clk_suspend;
|
|
|
|
genpd->dev_ops.start = pm_clk_resume;
|
|
|
|
}
|
|
|
|
|
2016-02-15 17:10:51 +07:00
|
|
|
/* Use only one "off" state if there were no states declared */
|
2016-10-15 00:47:49 +07:00
|
|
|
if (genpd->state_count == 0) {
|
|
|
|
ret = genpd_set_default_power_state(genpd);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-02-15 17:10:51 +07:00
|
|
|
|
2011-07-13 17:31:52 +07:00
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
list_add(&genpd->gpd_list_node, &gpd_list);
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
2016-06-17 17:27:52 +07:00
|
|
|
|
|
|
|
return 0;
|
2011-07-13 17:31:52 +07:00
|
|
|
}
|
2015-08-13 13:21:57 +07:00
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_init);
|
2014-09-20 01:27:36 +07:00
|
|
|
|
2016-09-12 18:01:13 +07:00
|
|
|
static int genpd_remove(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
struct gpd_link *l, *link;
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(genpd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(genpd);
|
2016-09-12 18:01:13 +07:00
|
|
|
|
|
|
|
if (genpd->has_provider) {
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2016-09-12 18:01:13 +07:00
|
|
|
pr_err("Provider present, unable to remove %s\n", genpd->name);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&genpd->master_links) || genpd->device_count) {
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2016-09-12 18:01:13 +07:00
|
|
|
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
|
|
|
|
list_del(&link->master_node);
|
|
|
|
list_del(&link->slave_node);
|
|
|
|
kfree(link);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&genpd->gpd_list_node);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
2016-09-12 18:01:13 +07:00
|
|
|
cancel_work_sync(&genpd->power_off_work);
|
2016-10-15 00:47:49 +07:00
|
|
|
kfree(genpd->free);
|
2016-09-12 18:01:13 +07:00
|
|
|
pr_debug("%s: removed %s\n", __func__, genpd->name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pm_genpd_remove - Remove a generic I/O PM domain
|
|
|
|
* @genpd: Pointer to PM domain that is to be removed.
|
|
|
|
*
|
|
|
|
* To remove the PM domain, this function:
|
|
|
|
* - Removes the PM domain as a subdomain to any parent domains,
|
|
|
|
* if it was added.
|
|
|
|
* - Removes the PM domain from the list of registered PM domains.
|
|
|
|
*
|
|
|
|
* The PM domain will only be removed, if the associated provider has
|
|
|
|
* been removed, it is not a parent to any other PM domain and has no
|
|
|
|
* devices associated with it.
|
|
|
|
*/
|
|
|
|
int pm_genpd_remove(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
ret = genpd_remove(genpd);
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_remove);
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
|
2016-09-12 18:01:09 +07:00
|
|
|
|
|
|
|
typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
|
|
|
|
void *data);
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
/*
|
|
|
|
* Device Tree based PM domain providers.
|
|
|
|
*
|
|
|
|
* The code below implements generic device tree based PM domain providers that
|
|
|
|
* bind device tree nodes with generic PM domains registered in the system.
|
|
|
|
*
|
|
|
|
* Any driver that registers generic PM domains and needs to support binding of
|
|
|
|
* devices to these domains is supposed to register a PM domain provider, which
|
|
|
|
* maps a PM domain specifier retrieved from the device tree to a PM domain.
|
|
|
|
*
|
|
|
|
* Two simple mapping functions have been provided for convenience:
|
2016-09-12 18:01:09 +07:00
|
|
|
* - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
|
|
|
|
* - genpd_xlate_onecell() for mapping of multiple PM domains per node by
|
2014-09-20 01:27:36 +07:00
|
|
|
* index.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct of_genpd_provider - PM domain provider registration structure
|
|
|
|
* @link: Entry in global list of PM domain providers
|
|
|
|
* @node: Pointer to device tree node of PM domain provider
|
|
|
|
* @xlate: Provider-specific xlate callback mapping a set of specifier cells
|
|
|
|
* into a PM domain.
|
|
|
|
* @data: context pointer to be passed into @xlate callback
|
|
|
|
*/
|
|
|
|
struct of_genpd_provider {
|
|
|
|
struct list_head link;
|
|
|
|
struct device_node *node;
|
|
|
|
genpd_xlate_t xlate;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* List of registered PM domain providers. */
|
|
|
|
static LIST_HEAD(of_genpd_providers);
|
|
|
|
/* Mutex to protect the list above. */
|
|
|
|
static DEFINE_MUTEX(of_genpd_mutex);
|
|
|
|
|
|
|
|
/**
|
2016-09-12 18:01:09 +07:00
|
|
|
* genpd_xlate_simple() - Xlate function for direct node-domain mapping
|
2014-09-20 01:27:36 +07:00
|
|
|
* @genpdspec: OF phandle args to map into a PM domain
|
|
|
|
* @data: xlate function private data - pointer to struct generic_pm_domain
|
|
|
|
*
|
|
|
|
* This is a generic xlate function that can be used to model PM domains that
|
|
|
|
* have their own device tree nodes. The private data of xlate function needs
|
|
|
|
* to be a valid pointer to struct generic_pm_domain.
|
|
|
|
*/
|
2016-09-12 18:01:09 +07:00
|
|
|
static struct generic_pm_domain *genpd_xlate_simple(
|
2014-09-20 01:27:36 +07:00
|
|
|
struct of_phandle_args *genpdspec,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
if (genpdspec->args_count != 0)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-09-12 18:01:09 +07:00
|
|
|
* genpd_xlate_onecell() - Xlate function using a single index.
|
2014-09-20 01:27:36 +07:00
|
|
|
* @genpdspec: OF phandle args to map into a PM domain
|
|
|
|
* @data: xlate function private data - pointer to struct genpd_onecell_data
|
|
|
|
*
|
|
|
|
* This is a generic xlate function that can be used to model simple PM domain
|
|
|
|
* controllers that have one device tree node and provide multiple PM domains.
|
|
|
|
* A single cell is used as an index into an array of PM domains specified in
|
|
|
|
* the genpd_onecell_data struct when registering the provider.
|
|
|
|
*/
|
2016-09-12 18:01:09 +07:00
|
|
|
static struct generic_pm_domain *genpd_xlate_onecell(
|
2014-09-20 01:27:36 +07:00
|
|
|
struct of_phandle_args *genpdspec,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct genpd_onecell_data *genpd_data = data;
|
|
|
|
unsigned int idx = genpdspec->args[0];
|
|
|
|
|
|
|
|
if (genpdspec->args_count != 1)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (idx >= genpd_data->num_domains) {
|
|
|
|
pr_err("%s: invalid domain index %u\n", __func__, idx);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!genpd_data->domains[idx])
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
return genpd_data->domains[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-09-12 18:01:09 +07:00
|
|
|
* genpd_add_provider() - Register a PM domain provider for a node
|
2014-09-20 01:27:36 +07:00
|
|
|
* @np: Device node pointer associated with the PM domain provider.
|
|
|
|
* @xlate: Callback for decoding PM domain from phandle arguments.
|
|
|
|
* @data: Context pointer for @xlate callback.
|
|
|
|
*/
|
2016-09-12 18:01:09 +07:00
|
|
|
static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
|
|
|
|
void *data)
|
2014-09-20 01:27:36 +07:00
|
|
|
{
|
|
|
|
struct of_genpd_provider *cp;
|
|
|
|
|
|
|
|
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
|
|
|
|
if (!cp)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cp->node = of_node_get(np);
|
|
|
|
cp->data = data;
|
|
|
|
cp->xlate = xlate;
|
|
|
|
|
|
|
|
mutex_lock(&of_genpd_mutex);
|
|
|
|
list_add(&cp->link, &of_genpd_providers);
|
|
|
|
mutex_unlock(&of_genpd_mutex);
|
|
|
|
pr_debug("Added domain provider from %s\n", np->full_name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-09-12 18:01:09 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* of_genpd_add_provider_simple() - Register a simple PM domain provider
|
|
|
|
* @np: Device node pointer associated with the PM domain provider.
|
|
|
|
* @genpd: Pointer to PM domain associated with the PM domain provider.
|
|
|
|
*/
|
|
|
|
int of_genpd_add_provider_simple(struct device_node *np,
|
|
|
|
struct generic_pm_domain *genpd)
|
|
|
|
{
|
2016-09-12 18:01:10 +07:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
if (!np || !genpd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
|
|
|
|
if (pm_genpd_present(genpd))
|
|
|
|
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
|
|
|
|
|
2016-09-12 18:01:12 +07:00
|
|
|
if (!ret) {
|
|
|
|
genpd->provider = &np->fwnode;
|
|
|
|
genpd->has_provider = true;
|
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:10 +07:00
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2016-09-12 18:01:09 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* of_genpd_add_provider_onecell() - Register a onecell PM domain provider
|
|
|
|
* @np: Device node pointer associated with the PM domain provider.
|
|
|
|
* @data: Pointer to the data associated with the PM domain provider.
|
|
|
|
*/
|
|
|
|
int of_genpd_add_provider_onecell(struct device_node *np,
|
|
|
|
struct genpd_onecell_data *data)
|
|
|
|
{
|
2016-09-12 18:01:10 +07:00
|
|
|
unsigned int i;
|
2016-09-12 18:01:12 +07:00
|
|
|
int ret = -EINVAL;
|
2016-09-12 18:01:10 +07:00
|
|
|
|
|
|
|
if (!np || !data)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < data->num_domains; i++) {
|
2016-09-15 19:05:23 +07:00
|
|
|
if (!data->domains[i])
|
|
|
|
continue;
|
2016-09-12 18:01:12 +07:00
|
|
|
if (!pm_genpd_present(data->domains[i]))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
data->domains[i]->provider = &np->fwnode;
|
|
|
|
data->domains[i]->has_provider = true;
|
2016-09-12 18:01:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = genpd_add_provider(np, genpd_xlate_onecell, data);
|
2016-09-12 18:01:12 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
while (i--) {
|
2016-09-15 19:05:23 +07:00
|
|
|
if (!data->domains[i])
|
|
|
|
continue;
|
2016-09-12 18:01:12 +07:00
|
|
|
data->domains[i]->provider = NULL;
|
|
|
|
data->domains[i]->has_provider = false;
|
|
|
|
}
|
2016-09-12 18:01:10 +07:00
|
|
|
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2016-09-12 18:01:09 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
|
2014-09-20 01:27:36 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* of_genpd_del_provider() - Remove a previously registered PM domain provider
|
|
|
|
* @np: Device node pointer associated with the PM domain provider
|
|
|
|
*/
|
|
|
|
void of_genpd_del_provider(struct device_node *np)
|
|
|
|
{
|
|
|
|
struct of_genpd_provider *cp;
|
2016-09-12 18:01:12 +07:00
|
|
|
struct generic_pm_domain *gpd;
|
2014-09-20 01:27:36 +07:00
|
|
|
|
2016-09-12 18:01:12 +07:00
|
|
|
mutex_lock(&gpd_list_lock);
|
2014-09-20 01:27:36 +07:00
|
|
|
mutex_lock(&of_genpd_mutex);
|
|
|
|
list_for_each_entry(cp, &of_genpd_providers, link) {
|
|
|
|
if (cp->node == np) {
|
2016-09-12 18:01:12 +07:00
|
|
|
/*
|
|
|
|
* For each PM domain associated with the
|
|
|
|
* provider, set the 'has_provider' to false
|
|
|
|
* so that the PM domain can be safely removed.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(gpd, &gpd_list, gpd_list_node)
|
|
|
|
if (gpd->provider == &np->fwnode)
|
|
|
|
gpd->has_provider = false;
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
list_del(&cp->link);
|
|
|
|
of_node_put(cp->node);
|
|
|
|
kfree(cp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&of_genpd_mutex);
|
2016-09-12 18:01:12 +07:00
|
|
|
mutex_unlock(&gpd_list_lock);
|
2014-09-20 01:27:36 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_del_provider);
|
|
|
|
|
|
|
|
/**
|
2016-09-12 18:01:08 +07:00
|
|
|
* genpd_get_from_provider() - Look-up PM domain
|
2014-09-20 01:27:36 +07:00
|
|
|
* @genpdspec: OF phandle args to use for look-up
|
|
|
|
*
|
|
|
|
* Looks for a PM domain provider under the node specified by @genpdspec and if
|
|
|
|
* found, uses xlate function of the provider to map phandle args to a PM
|
|
|
|
* domain.
|
|
|
|
*
|
|
|
|
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
|
|
|
|
* on failure.
|
|
|
|
*/
|
2016-09-12 18:01:08 +07:00
|
|
|
static struct generic_pm_domain *genpd_get_from_provider(
|
2014-09-20 01:27:36 +07:00
|
|
|
struct of_phandle_args *genpdspec)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
|
|
|
|
struct of_genpd_provider *provider;
|
|
|
|
|
2016-03-04 17:55:15 +07:00
|
|
|
if (!genpdspec)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
mutex_lock(&of_genpd_mutex);
|
|
|
|
|
|
|
|
/* Check if we have such a provider in our array */
|
|
|
|
list_for_each_entry(provider, &of_genpd_providers, link) {
|
|
|
|
if (provider->node == genpdspec->np)
|
|
|
|
genpd = provider->xlate(genpdspec, provider->data);
|
|
|
|
if (!IS_ERR(genpd))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&of_genpd_mutex);
|
|
|
|
|
|
|
|
return genpd;
|
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:05 +07:00
|
|
|
/**
|
|
|
|
* of_genpd_add_device() - Add a device to an I/O PM domain
|
|
|
|
* @genpdspec: OF phandle args to use for look-up PM domain
|
|
|
|
* @dev: Device to be added.
|
|
|
|
*
|
|
|
|
* Looks-up an I/O PM domain based upon phandle args provided and adds
|
|
|
|
* the device to the PM domain. Returns a negative error code on failure.
|
|
|
|
*/
|
|
|
|
int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd;
|
2016-09-12 18:01:11 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
2016-09-12 18:01:05 +07:00
|
|
|
|
2016-09-12 18:01:08 +07:00
|
|
|
genpd = genpd_get_from_provider(genpdspec);
|
2016-09-12 18:01:11 +07:00
|
|
|
if (IS_ERR(genpd)) {
|
|
|
|
ret = PTR_ERR(genpd);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = genpd_add_device(genpd, dev, NULL);
|
2016-09-12 18:01:05 +07:00
|
|
|
|
2016-09-12 18:01:11 +07:00
|
|
|
out:
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2016-09-12 18:01:05 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_add_device);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
|
|
|
* @parent_spec: OF phandle args to use for parent PM domain look-up
|
|
|
|
* @subdomain_spec: OF phandle args to use for subdomain look-up
|
|
|
|
*
|
|
|
|
* Looks-up a parent PM domain and subdomain based upon phandle args
|
|
|
|
* provided and adds the subdomain to the parent PM domain. Returns a
|
|
|
|
* negative error code on failure.
|
|
|
|
*/
|
|
|
|
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
|
|
|
|
struct of_phandle_args *subdomain_spec)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *parent, *subdomain;
|
2016-09-12 18:01:11 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
2016-09-12 18:01:05 +07:00
|
|
|
|
2016-09-12 18:01:08 +07:00
|
|
|
parent = genpd_get_from_provider(parent_spec);
|
2016-09-12 18:01:11 +07:00
|
|
|
if (IS_ERR(parent)) {
|
|
|
|
ret = PTR_ERR(parent);
|
|
|
|
goto out;
|
|
|
|
}
|
2016-09-12 18:01:05 +07:00
|
|
|
|
2016-09-12 18:01:08 +07:00
|
|
|
subdomain = genpd_get_from_provider(subdomain_spec);
|
2016-09-12 18:01:11 +07:00
|
|
|
if (IS_ERR(subdomain)) {
|
|
|
|
ret = PTR_ERR(subdomain);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = genpd_add_subdomain(parent, subdomain);
|
2016-09-12 18:01:05 +07:00
|
|
|
|
2016-09-12 18:01:11 +07:00
|
|
|
out:
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2016-09-12 18:01:05 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
|
|
|
|
|
2016-09-12 18:01:14 +07:00
|
|
|
/**
|
|
|
|
* of_genpd_remove_last - Remove the last PM domain registered for a provider
|
|
|
|
* @provider: Pointer to device structure associated with provider
|
|
|
|
*
|
|
|
|
* Find the last PM domain that was added by a particular provider and
|
|
|
|
* remove this PM domain from the list of PM domains. The provider is
|
|
|
|
* identified by the 'provider' device structure that is passed. The PM
|
|
|
|
* domain will only be removed, if the provider associated with domain
|
|
|
|
* has been removed.
|
|
|
|
*
|
|
|
|
* Returns a valid pointer to struct generic_pm_domain on success or
|
|
|
|
* ERR_PTR() on failure.
|
|
|
|
*/
|
|
|
|
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(np))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
mutex_lock(&gpd_list_lock);
|
|
|
|
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
|
|
|
if (gpd->provider == &np->fwnode) {
|
|
|
|
ret = genpd_remove(gpd);
|
|
|
|
genpd = ret ? ERR_PTR(ret) : gpd;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return genpd;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_remove_last);
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
/**
|
|
|
|
* genpd_dev_pm_detach - Detach a device from its PM domain.
|
2015-08-27 16:17:00 +07:00
|
|
|
* @dev: Device to detach.
|
2014-09-20 01:27:36 +07:00
|
|
|
* @power_off: Currently not used
|
|
|
|
*
|
|
|
|
* Try to locate a corresponding generic PM domain, which the device was
|
|
|
|
* attached to previously. If such is found, the device is detached from it.
|
|
|
|
*/
|
|
|
|
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
|
|
|
|
{
|
2015-03-21 00:20:33 +07:00
|
|
|
struct generic_pm_domain *pd;
|
2015-06-26 16:14:14 +07:00
|
|
|
unsigned int i;
|
2014-09-20 01:27:36 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
2016-09-21 20:38:50 +07:00
|
|
|
pd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(pd))
|
2014-09-20 01:27:36 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
|
|
|
|
|
2015-06-26 16:14:14 +07:00
|
|
|
for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
|
2016-09-21 20:38:50 +07:00
|
|
|
ret = genpd_remove_device(pd, dev);
|
2014-09-20 01:27:36 +07:00
|
|
|
if (ret != -EAGAIN)
|
|
|
|
break;
|
2015-06-26 16:14:14 +07:00
|
|
|
|
|
|
|
mdelay(i);
|
2014-09-20 01:27:36 +07:00
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(dev, "failed to remove from PM domain %s: %d",
|
|
|
|
pd->name, ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if PM domain can be powered off after removing this device. */
|
|
|
|
genpd_queue_power_off_work(pd);
|
|
|
|
}
|
|
|
|
|
2015-03-20 21:55:12 +07:00
|
|
|
static void genpd_dev_pm_sync(struct device *dev)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *pd;
|
|
|
|
|
|
|
|
pd = dev_to_genpd(dev);
|
|
|
|
if (IS_ERR(pd))
|
|
|
|
return;
|
|
|
|
|
|
|
|
genpd_queue_power_off_work(pd);
|
|
|
|
}
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
/**
|
|
|
|
* genpd_dev_pm_attach - Attach a device to its PM domain using DT.
|
|
|
|
* @dev: Device to attach.
|
|
|
|
*
|
|
|
|
* Parse device's OF node to find a PM domain specifier. If such is found,
|
|
|
|
* attaches the device to retrieved pm_domain ops.
|
|
|
|
*
|
|
|
|
* Both generic and legacy Samsung-specific DT bindings are supported to keep
|
|
|
|
* backwards compatibility with existing DTBs.
|
|
|
|
*
|
2015-07-31 16:20:00 +07:00
|
|
|
* Returns 0 on successfully attached PM domain or negative error code. Note
|
|
|
|
* that if a power-domain exists for the device, but it cannot be found or
|
|
|
|
* turned on, then return -EPROBE_DEFER to ensure that the device is not
|
|
|
|
* probed and to re-try again later.
|
2014-09-20 01:27:36 +07:00
|
|
|
*/
|
|
|
|
int genpd_dev_pm_attach(struct device *dev)
|
|
|
|
{
|
|
|
|
struct of_phandle_args pd_args;
|
|
|
|
struct generic_pm_domain *pd;
|
2015-06-26 16:14:14 +07:00
|
|
|
unsigned int i;
|
2014-09-20 01:27:36 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!dev->of_node)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (dev->pm_domain)
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
|
|
|
|
"#power-domain-cells", 0, &pd_args);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (ret != -ENOENT)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try legacy Samsung-specific bindings
|
|
|
|
* (for backwards compatibility of DT ABI)
|
|
|
|
*/
|
|
|
|
pd_args.args_count = 0;
|
|
|
|
pd_args.np = of_parse_phandle(dev->of_node,
|
|
|
|
"samsung,power-domain", 0);
|
|
|
|
if (!pd_args.np)
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:11 +07:00
|
|
|
mutex_lock(&gpd_list_lock);
|
2016-09-12 18:01:08 +07:00
|
|
|
pd = genpd_get_from_provider(&pd_args);
|
2015-12-02 00:39:31 +07:00
|
|
|
of_node_put(pd_args.np);
|
2014-09-20 01:27:36 +07:00
|
|
|
if (IS_ERR(pd)) {
|
2016-09-12 18:01:11 +07:00
|
|
|
mutex_unlock(&gpd_list_lock);
|
2014-09-20 01:27:36 +07:00
|
|
|
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
|
|
|
|
__func__, PTR_ERR(pd));
|
2015-07-31 16:20:00 +07:00
|
|
|
return -EPROBE_DEFER;
|
2014-09-20 01:27:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
|
|
|
|
|
2015-06-26 16:14:14 +07:00
|
|
|
for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
|
2016-09-12 18:01:11 +07:00
|
|
|
ret = genpd_add_device(pd, dev, NULL);
|
2014-09-20 01:27:36 +07:00
|
|
|
if (ret != -EAGAIN)
|
|
|
|
break;
|
2015-06-26 16:14:14 +07:00
|
|
|
|
|
|
|
mdelay(i);
|
2014-09-20 01:27:36 +07:00
|
|
|
cond_resched();
|
|
|
|
}
|
2016-09-12 18:01:11 +07:00
|
|
|
mutex_unlock(&gpd_list_lock);
|
2014-09-20 01:27:36 +07:00
|
|
|
|
|
|
|
if (ret < 0) {
|
2016-11-30 19:24:56 +07:00
|
|
|
if (ret != -EPROBE_DEFER)
|
|
|
|
dev_err(dev, "failed to add to PM domain %s: %d",
|
|
|
|
pd->name, ret);
|
2015-07-31 16:20:00 +07:00
|
|
|
goto out;
|
2014-09-20 01:27:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
dev->pm_domain->detach = genpd_dev_pm_detach;
|
2015-03-20 21:55:12 +07:00
|
|
|
dev->pm_domain->sync = genpd_dev_pm_sync;
|
2014-09-20 01:27:36 +07:00
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_lock(pd);
|
2016-12-08 20:45:20 +07:00
|
|
|
ret = genpd_power_on(pd, 0);
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(pd);
|
2015-07-31 16:20:00 +07:00
|
|
|
out:
|
|
|
|
return ret ? -EPROBE_DEFER : 0;
|
2014-09-20 01:27:36 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
|
2016-10-15 00:47:51 +07:00
|
|
|
|
|
|
|
static const struct of_device_id idle_state_match[] = {
|
2016-11-04 04:54:35 +07:00
|
|
|
{ .compatible = "domain-idle-state", },
|
2016-10-15 00:47:51 +07:00
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
static int genpd_parse_state(struct genpd_power_state *genpd_state,
|
|
|
|
struct device_node *state_node)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
u32 residency;
|
|
|
|
u32 entry_latency, exit_latency;
|
|
|
|
const struct of_device_id *match_id;
|
|
|
|
|
|
|
|
match_id = of_match_node(idle_state_match, state_node);
|
|
|
|
if (!match_id)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = of_property_read_u32(state_node, "entry-latency-us",
|
|
|
|
&entry_latency);
|
|
|
|
if (err) {
|
|
|
|
pr_debug(" * %s missing entry-latency-us property\n",
|
|
|
|
state_node->full_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = of_property_read_u32(state_node, "exit-latency-us",
|
|
|
|
&exit_latency);
|
|
|
|
if (err) {
|
|
|
|
pr_debug(" * %s missing exit-latency-us property\n",
|
|
|
|
state_node->full_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = of_property_read_u32(state_node, "min-residency-us", &residency);
|
|
|
|
if (!err)
|
|
|
|
genpd_state->residency_ns = 1000 * residency;
|
|
|
|
|
|
|
|
genpd_state->power_on_latency_ns = 1000 * exit_latency;
|
|
|
|
genpd_state->power_off_latency_ns = 1000 * entry_latency;
|
2016-10-15 00:47:52 +07:00
|
|
|
genpd_state->fwnode = &state_node->fwnode;
|
2016-10-15 00:47:51 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* of_genpd_parse_idle_states: Return array of idle states for the genpd.
|
|
|
|
*
|
|
|
|
* @dn: The genpd device node
|
|
|
|
* @states: The pointer to which the state array will be saved.
|
|
|
|
* @n: The count of elements in the array returned from this function.
|
|
|
|
*
|
|
|
|
* Returns the device states parsed from the OF node. The memory for the states
|
|
|
|
* is allocated by this function and is the responsibility of the caller to
|
|
|
|
* free the memory after use.
|
|
|
|
*/
|
|
|
|
int of_genpd_parse_idle_states(struct device_node *dn,
|
|
|
|
struct genpd_power_state **states, int *n)
|
|
|
|
{
|
|
|
|
struct genpd_power_state *st;
|
|
|
|
struct device_node *np;
|
|
|
|
int i = 0;
|
|
|
|
int err, ret;
|
|
|
|
int count;
|
|
|
|
struct of_phandle_iterator it;
|
|
|
|
|
|
|
|
count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
|
2016-10-25 23:33:27 +07:00
|
|
|
if (count <= 0)
|
2016-10-15 00:47:51 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
st = kcalloc(count, sizeof(*st), GFP_KERNEL);
|
|
|
|
if (!st)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Loop over the phandles until all the requested entry is found */
|
|
|
|
of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
|
|
|
|
np = it.node;
|
|
|
|
ret = genpd_parse_state(&st[i++], np);
|
|
|
|
if (ret) {
|
|
|
|
pr_err
|
|
|
|
("Parsing idle state node %s failed with err %d\n",
|
|
|
|
np->full_name, ret);
|
|
|
|
of_node_put(np);
|
|
|
|
kfree(st);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*n = count;
|
|
|
|
*states = st;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
|
|
|
|
|
2014-11-28 04:38:05 +07:00
|
|
|
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
|
|
|
|
|
|
|
|
/*** debugfs support ***/
|
|
|
|
|
2016-08-11 17:40:05 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
#include <linux/pm.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kobject.h>
|
|
|
|
static struct dentry *pm_genpd_debugfs_dir;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: This function is a slightly modified version of rtpm_status_show
|
2014-11-28 04:38:05 +07:00
|
|
|
* from sysfs.c, so generalize it.
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
*/
|
|
|
|
static void rtpm_status_str(struct seq_file *s, struct device *dev)
|
|
|
|
{
|
|
|
|
static const char * const status_lookup[] = {
|
|
|
|
[RPM_ACTIVE] = "active",
|
|
|
|
[RPM_RESUMING] = "resuming",
|
|
|
|
[RPM_SUSPENDED] = "suspended",
|
|
|
|
[RPM_SUSPENDING] = "suspending"
|
|
|
|
};
|
|
|
|
const char *p = "";
|
|
|
|
|
|
|
|
if (dev->power.runtime_error)
|
|
|
|
p = "error";
|
|
|
|
else if (dev->power.disable_depth)
|
|
|
|
p = "unsupported";
|
|
|
|
else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
|
|
|
|
p = status_lookup[dev->power.runtime_status];
|
|
|
|
else
|
|
|
|
WARN_ON(1);
|
|
|
|
|
|
|
|
seq_puts(s, p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pm_genpd_summary_one(struct seq_file *s,
|
2015-03-03 02:24:28 +07:00
|
|
|
struct generic_pm_domain *genpd)
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
{
|
|
|
|
static const char * const status_lookup[] = {
|
|
|
|
[GPD_STATE_ACTIVE] = "on",
|
|
|
|
[GPD_STATE_POWER_OFF] = "off"
|
|
|
|
};
|
|
|
|
struct pm_domain_data *pm_data;
|
|
|
|
const char *kobj_path;
|
|
|
|
struct gpd_link *link;
|
2016-02-23 23:49:17 +07:00
|
|
|
char state[16];
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
int ret;
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
ret = genpd_lock_interruptible(genpd);
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
if (ret)
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
2015-03-03 02:24:28 +07:00
|
|
|
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
goto exit;
|
2016-02-15 17:10:51 +07:00
|
|
|
if (genpd->status == GPD_STATE_POWER_OFF)
|
2016-02-23 23:49:18 +07:00
|
|
|
snprintf(state, sizeof(state), "%s-%u",
|
2016-02-23 23:49:17 +07:00
|
|
|
status_lookup[genpd->status], genpd->state_idx);
|
2016-02-15 17:10:51 +07:00
|
|
|
else
|
2016-02-23 23:49:17 +07:00
|
|
|
snprintf(state, sizeof(state), "%s",
|
|
|
|
status_lookup[genpd->status]);
|
|
|
|
seq_printf(s, "%-30s %-15s ", genpd->name, state);
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Modifications on the list require holding locks on both
|
|
|
|
* master and slave, so we are safe.
|
2015-03-03 02:24:28 +07:00
|
|
|
* Also genpd->name is immutable.
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
*/
|
2015-03-03 02:24:28 +07:00
|
|
|
list_for_each_entry(link, &genpd->master_links, master_node) {
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
seq_printf(s, "%s", link->slave->name);
|
2015-03-03 02:24:28 +07:00
|
|
|
if (!list_is_last(&link->master_node, &genpd->master_links))
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
seq_puts(s, ", ");
|
|
|
|
}
|
|
|
|
|
2015-03-03 02:24:28 +07:00
|
|
|
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
|
2016-10-15 00:47:55 +07:00
|
|
|
kobj_path = kobject_get_path(&pm_data->dev->kobj,
|
|
|
|
genpd_is_irq_safe(genpd) ?
|
|
|
|
GFP_ATOMIC : GFP_KERNEL);
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
if (kobj_path == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
seq_printf(s, "\n %-50s ", kobj_path);
|
|
|
|
rtpm_status_str(s, pm_data->dev);
|
|
|
|
kfree(kobj_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_puts(s, "\n");
|
|
|
|
exit:
|
2016-10-15 00:47:54 +07:00
|
|
|
genpd_unlock(genpd);
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pm_genpd_summary_show(struct seq_file *s, void *data)
|
|
|
|
{
|
2015-03-03 02:24:28 +07:00
|
|
|
struct generic_pm_domain *genpd;
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
2015-08-11 19:50:49 +07:00
|
|
|
seq_puts(s, "domain status slaves\n");
|
|
|
|
seq_puts(s, " /device runtime status\n");
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
seq_puts(s, "----------------------------------------------------------------------\n");
|
|
|
|
|
|
|
|
ret = mutex_lock_interruptible(&gpd_list_lock);
|
|
|
|
if (ret)
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
2015-03-03 02:24:28 +07:00
|
|
|
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
|
|
|
|
ret = pm_genpd_summary_one(s, genpd);
|
PM / Domains: add debugfs listing of struct generic_pm_domain-s
Add /sys/kernel/debug/pm_genpd/pm_genpd_summary file, which
lists power domains in the system, their statuses and attached devices,
resembling /sys/kernel/debug/clk/clk_summary.
Currently it is impossible to inspect (from userland) whether
a power domain is on or off. And, if it is on, which device blocks it
from powering down. This change allows developers working on
embedded devices power efficiency to list all necessary information
about generic power domains in one place.
The content of pm_genpd/pm_genpd_summary file is generated by iterating
over all generic power domain in the system, and, for each,
over registered devices and over the subdomains, if present.
Example output:
$ cat /sys/kernel/debug/pm_genpd/pm_genpd_summary
domain status slaves
/device runtime status
----------------------------------------------------------------------
a4su off
a3sg off
a3sm on
a3sp on
/devices/e6600000.pwm suspended
/devices/e6c50000.serial active
/devices/e6850000.sd suspended
/devices/e6bd0000.mmc active
a4s on a3sp, a3sm, a3sg
/devices/e6900000.irqpin unsupported
/devices/e6900004.irqpin unsupported
/devices/e6900008.irqpin unsupported
/devices/e690000c.irqpin unsupported
/devices/e9a00000.ethernet active
a3rv off
a4r off a3rv
/devices/fff20000.i2c suspended
a4lc off
c5 on a4lc, a4r, a4s, a4su
/devices/e6050000.pfc unsupported
/devices/e6138000.timer active
To enable this feature, compile the kernel with debugfs
and CONFIG_PM_ADVANCED_DEBUG enabled.
Signed-off-by: Maciej Matraszek <m.matraszek@samsung.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-09-15 18:09:10 +07:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&gpd_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pm_genpd_summary_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, pm_genpd_summary_show, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations pm_genpd_summary_fops = {
|
|
|
|
.open = pm_genpd_summary_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init pm_genpd_debug_init(void)
|
|
|
|
{
|
|
|
|
struct dentry *d;
|
|
|
|
|
|
|
|
pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
|
|
|
|
|
|
|
|
if (!pm_genpd_debugfs_dir)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
|
|
|
|
pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
|
|
|
|
if (!d)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall(pm_genpd_debug_init);
|
|
|
|
|
|
|
|
static void __exit pm_genpd_debug_exit(void)
|
|
|
|
{
|
|
|
|
debugfs_remove_recursive(pm_genpd_debugfs_dir);
|
|
|
|
}
|
|
|
|
__exitcall(pm_genpd_debug_exit);
|
2016-08-11 17:40:05 +07:00
|
|
|
#endif /* CONFIG_DEBUG_FS */
|