2019-06-01 15:08:42 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2011-07-02 03:12:45 +07:00
|
|
|
/*
|
|
|
|
* pm_domain.h - Definitions and headers related to device power domains.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_PM_DOMAIN_H
|
|
|
|
#define _LINUX_PM_DOMAIN_H
|
|
|
|
|
|
|
|
#include <linux/device.h>
|
2012-01-30 23:46:54 +07:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/pm.h>
|
2011-12-01 06:05:31 +07:00
|
|
|
#include <linux/err.h>
|
2012-01-27 13:22:07 +07:00
|
|
|
#include <linux/of.h>
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
#include <linux/notifier.h>
|
2016-10-15 00:47:55 +07:00
|
|
|
#include <linux/spinlock.h>
|
2019-03-27 21:35:46 +07:00
|
|
|
#include <linux/cpumask.h>
|
2011-07-02 03:12:45 +07:00
|
|
|
|
2018-10-03 21:38:16 +07:00
|
|
|
/*
|
|
|
|
* Flags to control the behaviour of a genpd.
|
|
|
|
*
|
|
|
|
* These flags may be set in the struct generic_pm_domain's flags field by a
|
|
|
|
* genpd backend driver. The flags must be set before it calls pm_genpd_init(),
|
|
|
|
* which initializes a genpd.
|
|
|
|
*
|
|
|
|
* GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework,
|
|
|
|
* while powering on/off attached devices.
|
|
|
|
*
|
|
|
|
* GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks,
|
|
|
|
* ->power_on|off(), doesn't sleep. Hence, these
|
|
|
|
* can be invoked from within atomic context, which
|
|
|
|
* enables genpd to power on/off the PM domain,
|
|
|
|
* even when pm_runtime_is_irq_safe() returns true,
|
|
|
|
* for any of its attached devices. Note that, a
|
|
|
|
* genpd having this flag set, requires its
|
|
|
|
* masterdomains to also have it set.
|
|
|
|
*
|
|
|
|
* GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain
|
|
|
|
* powered on.
|
|
|
|
*
|
|
|
|
* GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
|
|
|
|
* on, in case any of its attached devices is used
|
|
|
|
* in the wakeup path to serve system wakeups.
|
2019-03-27 21:35:46 +07:00
|
|
|
*
|
|
|
|
* GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
|
|
|
|
* devices attached, which may belong to CPUs or
|
|
|
|
* possibly have subdomains with CPUs attached.
|
|
|
|
* This flag enables the genpd backend driver to
|
|
|
|
* deploy idle power management support for CPUs
|
|
|
|
* and groups of CPUs. Note that, the backend
|
|
|
|
* driver must then comply with the so called,
|
|
|
|
* last-man-standing algorithm, for the CPUs in the
|
|
|
|
* PM domain.
|
2019-04-30 22:06:11 +07:00
|
|
|
*
|
|
|
|
* GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
|
|
|
|
* powered on except for system suspend.
|
2018-10-03 21:38:16 +07:00
|
|
|
*/
|
|
|
|
#define GENPD_FLAG_PM_CLK (1U << 0)
|
|
|
|
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
|
|
|
|
#define GENPD_FLAG_ALWAYS_ON (1U << 2)
|
|
|
|
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
|
2019-03-27 21:35:46 +07:00
|
|
|
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
|
2019-04-30 22:06:11 +07:00
|
|
|
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
|
2014-12-01 18:50:21 +07:00
|
|
|
|
2011-07-12 05:39:29 +07:00
|
|
|
enum gpd_status {
|
|
|
|
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
|
|
|
GPD_STATE_POWER_OFF, /* PM domain is off */
|
|
|
|
};
|
2011-07-02 03:13:19 +07:00
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
struct dev_power_governor {
|
|
|
|
bool (*power_down_ok)(struct dev_pm_domain *domain);
|
2016-03-31 16:21:25 +07:00
|
|
|
bool (*suspend_ok)(struct device *dev);
|
2011-07-02 03:12:45 +07:00
|
|
|
};
|
|
|
|
|
2011-11-27 19:11:36 +07:00
|
|
|
struct gpd_dev_ops {
|
|
|
|
int (*start)(struct device *dev);
|
|
|
|
int (*stop)(struct device *dev);
|
|
|
|
};
|
|
|
|
|
2016-02-15 17:10:51 +07:00
|
|
|
struct genpd_power_state {
|
|
|
|
s64 power_off_latency_ns;
|
|
|
|
s64 power_on_latency_ns;
|
2016-10-15 00:47:50 +07:00
|
|
|
s64 residency_ns;
|
2016-10-15 00:47:52 +07:00
|
|
|
struct fwnode_handle *fwnode;
|
2017-07-15 00:10:15 +07:00
|
|
|
ktime_t idle_time;
|
2019-03-27 21:35:45 +07:00
|
|
|
void *data;
|
2016-02-15 17:10:51 +07:00
|
|
|
};
|
|
|
|
|
2016-10-15 00:47:54 +07:00
|
|
|
struct genpd_lock_ops;
|
2017-11-29 16:51:51 +07:00
|
|
|
struct dev_pm_opp;
|
2018-11-02 12:48:08 +07:00
|
|
|
struct opp_table;
|
2016-10-15 00:47:54 +07:00
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
struct generic_pm_domain {
|
2017-03-17 12:56:19 +07:00
|
|
|
struct device dev;
|
2011-07-02 03:12:45 +07:00
|
|
|
struct dev_pm_domain domain; /* PM domain operations */
|
2011-07-13 17:31:52 +07:00
|
|
|
struct list_head gpd_list_node; /* Node in the global PM domains list */
|
2011-08-09 04:43:40 +07:00
|
|
|
struct list_head master_links; /* Links with PM domain as a master */
|
|
|
|
struct list_head slave_links; /* Links with PM domain as a slave */
|
2011-07-02 03:12:45 +07:00
|
|
|
struct list_head dev_list; /* List of devices */
|
|
|
|
struct dev_power_governor *gov;
|
|
|
|
struct work_struct power_off_work;
|
2016-09-12 18:01:12 +07:00
|
|
|
struct fwnode_handle *provider; /* Identity of the domain provider */
|
|
|
|
bool has_provider;
|
2014-08-29 20:13:21 +07:00
|
|
|
const char *name;
|
2011-08-09 04:43:04 +07:00
|
|
|
atomic_t sd_count; /* Number of subdomains with power "on" */
|
2011-07-12 05:39:29 +07:00
|
|
|
enum gpd_status status; /* Current state of the domain */
|
2011-07-02 03:13:19 +07:00
|
|
|
unsigned int device_count; /* Number of devices */
|
|
|
|
unsigned int suspended_count; /* System suspend device counter */
|
|
|
|
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
2017-10-12 16:37:23 +07:00
|
|
|
unsigned int performance_state; /* Aggregated max performance state */
|
2019-03-27 21:35:46 +07:00
|
|
|
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
|
2011-07-02 03:12:45 +07:00
|
|
|
int (*power_off)(struct generic_pm_domain *domain);
|
|
|
|
int (*power_on)(struct generic_pm_domain *domain);
|
2018-11-02 12:48:08 +07:00
|
|
|
struct opp_table *opp_table; /* OPP table of the genpd */
|
2017-11-29 16:51:51 +07:00
|
|
|
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
|
|
|
|
struct dev_pm_opp *opp);
|
2017-10-12 16:37:23 +07:00
|
|
|
int (*set_performance_state)(struct generic_pm_domain *genpd,
|
|
|
|
unsigned int state);
|
2011-11-27 19:11:36 +07:00
|
|
|
struct gpd_dev_ops dev_ops;
|
2011-12-01 06:02:10 +07:00
|
|
|
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
bool max_off_time_changed;
|
|
|
|
bool cached_power_down_ok;
|
2019-04-12 01:17:33 +07:00
|
|
|
bool cached_power_down_state_idx;
|
2014-11-06 06:37:08 +07:00
|
|
|
int (*attach_dev)(struct generic_pm_domain *domain,
|
|
|
|
struct device *dev);
|
|
|
|
void (*detach_dev)(struct generic_pm_domain *domain,
|
|
|
|
struct device *dev);
|
2014-12-01 18:50:21 +07:00
|
|
|
unsigned int flags; /* Bit field of configs for genpd */
|
2016-10-15 00:47:49 +07:00
|
|
|
struct genpd_power_state *states;
|
2019-03-27 21:35:45 +07:00
|
|
|
void (*free_states)(struct genpd_power_state *states,
|
|
|
|
unsigned int state_count);
|
2016-02-15 17:10:51 +07:00
|
|
|
unsigned int state_count; /* number of states */
|
|
|
|
unsigned int state_idx; /* state that genpd will go to when off */
|
2017-07-15 00:10:15 +07:00
|
|
|
ktime_t on_time;
|
|
|
|
ktime_t accounting_time;
|
2016-10-15 00:47:54 +07:00
|
|
|
const struct genpd_lock_ops *lock_ops;
|
2016-10-15 00:47:55 +07:00
|
|
|
union {
|
|
|
|
struct mutex mlock;
|
|
|
|
struct {
|
|
|
|
spinlock_t slock;
|
|
|
|
unsigned long lock_flags;
|
|
|
|
};
|
|
|
|
};
|
2016-02-15 17:10:51 +07:00
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
};
|
|
|
|
|
2011-07-02 03:13:19 +07:00
|
|
|
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
|
|
|
{
|
|
|
|
return container_of(pd, struct generic_pm_domain, domain);
|
|
|
|
}
|
|
|
|
|
2011-08-09 04:43:40 +07:00
|
|
|
struct gpd_link {
|
|
|
|
struct generic_pm_domain *master;
|
|
|
|
struct list_head master_node;
|
|
|
|
struct generic_pm_domain *slave;
|
|
|
|
struct list_head slave_node;
|
2018-11-02 16:10:19 +07:00
|
|
|
|
|
|
|
/* Sub-domain's per-master domain performance state */
|
|
|
|
unsigned int performance_state;
|
|
|
|
unsigned int prev_performance_state;
|
2011-08-09 04:43:40 +07:00
|
|
|
};
|
|
|
|
|
2011-12-01 06:02:05 +07:00
|
|
|
struct gpd_timing_data {
|
2015-10-15 22:02:19 +07:00
|
|
|
s64 suspend_latency_ns;
|
|
|
|
s64 resume_latency_ns;
|
2012-04-30 03:54:17 +07:00
|
|
|
s64 effective_constraint_ns;
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
bool constraint_changed;
|
2016-03-31 16:21:25 +07:00
|
|
|
bool cached_suspend_ok;
|
2011-12-01 06:02:05 +07:00
|
|
|
};
|
|
|
|
|
2014-11-14 14:41:32 +07:00
|
|
|
struct pm_domain_data {
|
|
|
|
struct list_head list_node;
|
|
|
|
struct device *dev;
|
|
|
|
};
|
|
|
|
|
2011-09-27 01:22:02 +07:00
|
|
|
struct generic_pm_domain_data {
|
|
|
|
struct pm_domain_data base;
|
2011-12-01 06:02:05 +07:00
|
|
|
struct gpd_timing_data td;
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 02:34:07 +07:00
|
|
|
struct notifier_block nb;
|
2019-04-25 16:04:13 +07:00
|
|
|
int cpu;
|
2017-10-12 16:37:23 +07:00
|
|
|
unsigned int performance_state;
|
2017-04-04 22:51:29 +07:00
|
|
|
void *data;
|
2011-09-27 01:22:02 +07:00
|
|
|
};
|
|
|
|
|
2012-02-05 04:26:49 +07:00
|
|
|
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
2011-09-27 01:22:02 +07:00
|
|
|
static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
|
|
|
|
{
|
|
|
|
return container_of(pdd, struct generic_pm_domain_data, base);
|
|
|
|
}
|
|
|
|
|
2011-11-27 19:11:36 +07:00
|
|
|
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
|
|
|
|
{
|
|
|
|
return to_gpd_data(dev->power.subsys_data->domain_data);
|
|
|
|
}
|
|
|
|
|
2018-05-29 17:04:14 +07:00
|
|
|
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev);
|
2018-05-29 17:04:15 +07:00
|
|
|
int pm_genpd_remove_device(struct device *dev);
|
2018-05-29 17:04:13 +07:00
|
|
|
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
2019-07-16 16:43:20 +07:00
|
|
|
struct generic_pm_domain *subdomain);
|
2018-05-29 17:04:13 +07:00
|
|
|
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
2019-07-16 16:43:20 +07:00
|
|
|
struct generic_pm_domain *subdomain);
|
2018-05-29 17:04:13 +07:00
|
|
|
int pm_genpd_init(struct generic_pm_domain *genpd,
|
|
|
|
struct dev_power_governor *gov, bool is_off);
|
|
|
|
int pm_genpd_remove(struct generic_pm_domain *genpd);
|
|
|
|
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
|
2011-12-01 06:02:05 +07:00
|
|
|
|
2014-09-03 17:52:31 +07:00
|
|
|
extern struct dev_power_governor simple_qos_governor;
|
2011-12-09 05:27:28 +07:00
|
|
|
extern struct dev_power_governor pm_domain_always_on_gov;
|
2019-04-12 01:17:33 +07:00
|
|
|
#ifdef CONFIG_CPU_IDLE
|
|
|
|
extern struct dev_power_governor pm_domain_cpu_gov;
|
|
|
|
#endif
|
2011-07-02 03:12:45 +07:00
|
|
|
#else
|
2011-12-01 06:02:05 +07:00
|
|
|
|
2012-02-26 04:14:18 +07:00
|
|
|
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENOSYS);
|
|
|
|
}
|
2018-05-29 17:04:14 +07:00
|
|
|
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
|
|
|
struct device *dev)
|
2011-12-01 06:02:05 +07:00
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
2018-05-29 17:04:15 +07:00
|
|
|
static inline int pm_genpd_remove_device(struct device *dev)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
2019-07-16 16:43:20 +07:00
|
|
|
struct generic_pm_domain *subdomain)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
2019-07-16 16:43:20 +07:00
|
|
|
struct generic_pm_domain *subdomain)
|
2011-07-02 03:12:45 +07:00
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
2016-06-17 17:27:52 +07:00
|
|
|
static inline int pm_genpd_init(struct generic_pm_domain *genpd,
|
|
|
|
struct dev_power_governor *gov, bool is_off)
|
2011-12-01 06:02:05 +07:00
|
|
|
{
|
2016-06-17 17:27:52 +07:00
|
|
|
return -ENOSYS;
|
2011-12-01 06:02:05 +07:00
|
|
|
}
|
2016-09-12 18:01:13 +07:00
|
|
|
static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2017-02-09 01:37:55 +07:00
|
|
|
|
2017-10-12 16:37:23 +07:00
|
|
|
static inline int dev_pm_genpd_set_performance_state(struct device *dev,
|
|
|
|
unsigned int state)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
2017-02-09 01:37:55 +07:00
|
|
|
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
|
|
|
|
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
|
2011-08-14 18:34:31 +07:00
|
|
|
#endif
|
|
|
|
|
2012-08-06 06:39:57 +07:00
|
|
|
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
|
2018-05-29 17:04:13 +07:00
|
|
|
void pm_genpd_syscore_poweroff(struct device *dev);
|
|
|
|
void pm_genpd_syscore_poweron(struct device *dev);
|
2012-08-06 06:39:57 +07:00
|
|
|
#else
|
2014-09-03 17:52:24 +07:00
|
|
|
static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
|
|
|
|
static inline void pm_genpd_syscore_poweron(struct device *dev) {}
|
2012-08-06 06:39:57 +07:00
|
|
|
#endif
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
/* OF PM domain providers */
|
|
|
|
struct of_device_id;
|
|
|
|
|
2017-03-29 23:34:50 +07:00
|
|
|
typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
|
|
|
|
void *data);
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
struct genpd_onecell_data {
|
|
|
|
struct generic_pm_domain **domains;
|
|
|
|
unsigned int num_domains;
|
2017-03-29 23:34:50 +07:00
|
|
|
genpd_xlate_t xlate;
|
2014-09-20 01:27:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
|
2016-09-12 18:01:09 +07:00
|
|
|
int of_genpd_add_provider_simple(struct device_node *np,
|
|
|
|
struct generic_pm_domain *genpd);
|
|
|
|
int of_genpd_add_provider_onecell(struct device_node *np,
|
|
|
|
struct genpd_onecell_data *data);
|
2014-09-20 01:27:36 +07:00
|
|
|
void of_genpd_del_provider(struct device_node *np);
|
2018-05-29 17:04:13 +07:00
|
|
|
int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
|
2019-07-16 16:43:20 +07:00
|
|
|
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
|
|
|
|
struct of_phandle_args *subdomain_spec);
|
2018-05-29 17:04:13 +07:00
|
|
|
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
|
|
|
|
int of_genpd_parse_idle_states(struct device_node *dn,
|
|
|
|
struct genpd_power_state **states, int *n);
|
2018-06-13 21:52:04 +07:00
|
|
|
unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
|
|
|
|
struct dev_pm_opp *opp);
|
2014-09-20 01:27:36 +07:00
|
|
|
|
|
|
|
int genpd_dev_pm_attach(struct device *dev);
|
PM / Domains: Add support for multi PM domains per device to genpd
To support devices being partitioned across multiple PM domains, let's
begin with extending genpd to cope with these kind of configurations.
Therefore, add a new exported function genpd_dev_pm_attach_by_id(), which
is similar to the existing genpd_dev_pm_attach(), but with the difference
that it allows its callers to provide an index to the PM domain that it
wants to attach.
Note that, genpd_dev_pm_attach_by_id() shall only be called by the driver
core / PM core, similar to how the existing dev_pm_domain_attach() makes
use of genpd_dev_pm_attach(). However, this is implemented by following
changes on top.
Because, only one PM domain can be attached per device, genpd needs to
create a virtual device that it can attach/detach instead. More precisely,
let the new function genpd_dev_pm_attach_by_id() register a virtual struct
device via calling device_register(). Then let it attach this device to the
corresponding PM domain, rather than the one that is provided by the
caller. The actual attaching is done via re-using the existing genpd OF
functions.
At successful attachment, genpd_dev_pm_attach_by_id() returns the created
virtual device, which allows the caller to operate on it to deal with power
management. Following changes on top, provides more details in this
regards.
To deal with detaching of a PM domain for the multiple PM domains case,
let's also extend the existing genpd_dev_pm_detach() function, to cover the
cleanup of the created virtual device, via make it call device_unregister()
on it. In this way, there is no need to introduce a new function to deal
with detach for the multiple PM domain case, but instead the existing one
is re-used.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-05-31 17:59:58 +07:00
|
|
|
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
|
|
|
|
unsigned int index);
|
2018-06-29 18:04:31 +07:00
|
|
|
struct device *genpd_dev_pm_attach_by_name(struct device *dev,
|
2019-02-15 01:12:48 +07:00
|
|
|
const char *name);
|
2014-09-20 01:27:36 +07:00
|
|
|
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
|
2016-09-12 18:01:09 +07:00
|
|
|
static inline int of_genpd_add_provider_simple(struct device_node *np,
|
|
|
|
struct generic_pm_domain *genpd)
|
2014-09-20 01:27:36 +07:00
|
|
|
{
|
2016-09-12 18:01:09 +07:00
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int of_genpd_add_provider_onecell(struct device_node *np,
|
|
|
|
struct genpd_onecell_data *data)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
2014-09-20 01:27:36 +07:00
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:09 +07:00
|
|
|
static inline void of_genpd_del_provider(struct device_node *np) {}
|
2014-09-20 01:27:36 +07:00
|
|
|
|
2016-09-12 18:01:05 +07:00
|
|
|
static inline int of_genpd_add_device(struct of_phandle_args *args,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2019-07-16 16:43:20 +07:00
|
|
|
static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
|
|
|
|
struct of_phandle_args *subdomain_spec)
|
2016-09-12 18:01:05 +07:00
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2016-10-15 00:47:51 +07:00
|
|
|
static inline int of_genpd_parse_idle_states(struct device_node *dn,
|
|
|
|
struct genpd_power_state **states, int *n)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2018-06-13 21:52:04 +07:00
|
|
|
static inline unsigned int
|
|
|
|
pm_genpd_opp_to_performance_state(struct device *genpd_dev,
|
|
|
|
struct dev_pm_opp *opp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-20 01:27:36 +07:00
|
|
|
static inline int genpd_dev_pm_attach(struct device *dev)
|
|
|
|
{
|
2018-05-09 17:17:52 +07:00
|
|
|
return 0;
|
2014-09-20 01:27:36 +07:00
|
|
|
}
|
2016-09-12 18:01:14 +07:00
|
|
|
|
PM / Domains: Add support for multi PM domains per device to genpd
To support devices being partitioned across multiple PM domains, let's
begin with extending genpd to cope with these kind of configurations.
Therefore, add a new exported function genpd_dev_pm_attach_by_id(), which
is similar to the existing genpd_dev_pm_attach(), but with the difference
that it allows its callers to provide an index to the PM domain that it
wants to attach.
Note that, genpd_dev_pm_attach_by_id() shall only be called by the driver
core / PM core, similar to how the existing dev_pm_domain_attach() makes
use of genpd_dev_pm_attach(). However, this is implemented by following
changes on top.
Because, only one PM domain can be attached per device, genpd needs to
create a virtual device that it can attach/detach instead. More precisely,
let the new function genpd_dev_pm_attach_by_id() register a virtual struct
device via calling device_register(). Then let it attach this device to the
corresponding PM domain, rather than the one that is provided by the
caller. The actual attaching is done via re-using the existing genpd OF
functions.
At successful attachment, genpd_dev_pm_attach_by_id() returns the created
virtual device, which allows the caller to operate on it to deal with power
management. Following changes on top, provides more details in this
regards.
To deal with detaching of a PM domain for the multiple PM domains case,
let's also extend the existing genpd_dev_pm_detach() function, to cover the
cleanup of the created virtual device, via make it call device_unregister()
on it. In this way, there is no need to introduce a new function to deal
with detach for the multiple PM domain case, but instead the existing one
is re-used.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-05-31 17:59:58 +07:00
|
|
|
static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
|
|
|
|
unsigned int index)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-06-29 18:04:31 +07:00
|
|
|
static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
|
2019-02-15 01:12:48 +07:00
|
|
|
const char *name)
|
2018-06-29 18:04:31 +07:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-09-12 18:01:14 +07:00
|
|
|
static inline
|
|
|
|
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENOTSUPP);
|
|
|
|
}
|
2014-09-20 01:27:36 +07:00
|
|
|
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
|
|
|
|
|
2014-09-29 18:58:47 +07:00
|
|
|
#ifdef CONFIG_PM
|
2018-05-29 17:04:13 +07:00
|
|
|
int dev_pm_domain_attach(struct device *dev, bool power_on);
|
PM / Domains: Add dev_pm_domain_attach_by_id() to manage multi PM domains
The existing dev_pm_domain_attach() function, allows a single PM domain to
be attached per device. To be able to support devices that are partitioned
across multiple PM domains, let's introduce a new interface,
dev_pm_domain_attach_by_id().
The dev_pm_domain_attach_by_id() returns a new allocated struct device with
the corresponding attached PM domain. This enables for example a driver to
operate on the new device from a power management point of view. The driver
may then also benefit from using the received device, to set up so called
device-links towards its original device. Depending on the situation, these
links may then be dynamically changed.
The new interface is typically called by drivers during their probe phase,
in case they manages devices which uses multiple PM domains. If that is the
case, the driver also becomes responsible of managing the detaching of the
PM domains, which typically should be done at the remove phase. Detaching
is done by calling the existing dev_pm_domain_detach() function and for
each of the received devices from dev_pm_domain_attach_by_id().
Note, currently its only genpd that supports multiple PM domains per
device, but dev_pm_domain_attach_by_id() can easily by extended to cover
other PM domain types, if/when needed.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-05-31 17:59:59 +07:00
|
|
|
struct device *dev_pm_domain_attach_by_id(struct device *dev,
|
|
|
|
unsigned int index);
|
2018-06-29 18:04:32 +07:00
|
|
|
struct device *dev_pm_domain_attach_by_name(struct device *dev,
|
2019-02-15 01:12:49 +07:00
|
|
|
const char *name);
|
2018-05-29 17:04:13 +07:00
|
|
|
void dev_pm_domain_detach(struct device *dev, bool power_off);
|
PM / Domains: Introduce dev_pm_domain_start()
For a subsystem/driver that either doesn't support runtime PM or makes use
of pm_runtime_set_active() during ->probe(), may try to access its device
when probing, even if it may not be fully powered on from the PM domain's
point of view. This may be the case when the used PM domain is a genpd
provider, that implements genpd's ->start|stop() device callbacks.
There are cases where the subsystem/driver managed to avoid the above
problem, simply by calling pm_runtime_enable() and pm_runtime_get_sync()
during ->probe(). However, this approach comes with a drawback, especially
if the subsystem/driver implements a ->runtime_resume() callback.
More precisely, the subsystem/driver then needs to use a device flag, which
is checked in its ->runtime_resume() callback, as to avoid powering on its
resources the first time the callback is invoked. This is needed because
the subsystem/driver has already powered on the resources for the device,
during ->probe() and before it called pm_runtime_get_sync().
In a way to avoid this boilerplate code and the inefficient check for "if
(first_time_suspend)" in the ->runtime_resume() callback for these
subsystems/drivers, let's introduce and export a dev_pm_domain_start()
function, that may be called during ->probe() instead.
Moreover, let the dev_pm_domain_start() invoke an optional ->start()
callback, added to the struct dev_pm_domain, as to allow a PM domain
specific implementation.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2019-10-16 20:16:03 +07:00
|
|
|
int dev_pm_domain_start(struct device *dev);
|
2018-05-29 17:04:13 +07:00
|
|
|
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
|
2014-09-29 18:58:47 +07:00
|
|
|
#else
|
|
|
|
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
|
|
|
|
{
|
2018-05-09 17:17:52 +07:00
|
|
|
return 0;
|
2014-09-29 18:58:47 +07:00
|
|
|
}
|
PM / Domains: Add dev_pm_domain_attach_by_id() to manage multi PM domains
The existing dev_pm_domain_attach() function, allows a single PM domain to
be attached per device. To be able to support devices that are partitioned
across multiple PM domains, let's introduce a new interface,
dev_pm_domain_attach_by_id().
The dev_pm_domain_attach_by_id() returns a new allocated struct device with
the corresponding attached PM domain. This enables for example a driver to
operate on the new device from a power management point of view. The driver
may then also benefit from using the received device, to set up so called
device-links towards its original device. Depending on the situation, these
links may then be dynamically changed.
The new interface is typically called by drivers during their probe phase,
in case they manages devices which uses multiple PM domains. If that is the
case, the driver also becomes responsible of managing the detaching of the
PM domains, which typically should be done at the remove phase. Detaching
is done by calling the existing dev_pm_domain_detach() function and for
each of the received devices from dev_pm_domain_attach_by_id().
Note, currently its only genpd that supports multiple PM domains per
device, but dev_pm_domain_attach_by_id() can easily by extended to cover
other PM domain types, if/when needed.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-05-31 17:59:59 +07:00
|
|
|
static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
|
|
|
|
unsigned int index)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-06-29 18:04:32 +07:00
|
|
|
static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
|
2019-02-15 01:12:49 +07:00
|
|
|
const char *name)
|
2018-06-29 18:04:32 +07:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-09-29 18:58:47 +07:00
|
|
|
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
|
PM / Domains: Introduce dev_pm_domain_start()
For a subsystem/driver that either doesn't support runtime PM or makes use
of pm_runtime_set_active() during ->probe(), may try to access its device
when probing, even if it may not be fully powered on from the PM domain's
point of view. This may be the case when the used PM domain is a genpd
provider, that implements genpd's ->start|stop() device callbacks.
There are cases where the subsystem/driver managed to avoid the above
problem, simply by calling pm_runtime_enable() and pm_runtime_get_sync()
during ->probe(). However, this approach comes with a drawback, especially
if the subsystem/driver implements a ->runtime_resume() callback.
More precisely, the subsystem/driver then needs to use a device flag, which
is checked in its ->runtime_resume() callback, as to avoid powering on its
resources the first time the callback is invoked. This is needed because
the subsystem/driver has already powered on the resources for the device,
during ->probe() and before it called pm_runtime_get_sync().
In a way to avoid this boilerplate code and the inefficient check for "if
(first_time_suspend)" in the ->runtime_resume() callback for these
subsystems/drivers, let's introduce and export a dev_pm_domain_start()
function, that may be called during ->probe() instead.
Moreover, let the dev_pm_domain_start() invoke an optional ->start()
callback, added to the struct dev_pm_domain, as to allow a PM domain
specific implementation.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2019-10-16 20:16:03 +07:00
|
|
|
static inline int dev_pm_domain_start(struct device *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2016-01-07 22:46:13 +07:00
|
|
|
static inline void dev_pm_domain_set(struct device *dev,
|
|
|
|
struct dev_pm_domain *pd) {}
|
2014-09-29 18:58:47 +07:00
|
|
|
#endif
|
|
|
|
|
2011-07-02 03:12:45 +07:00
|
|
|
#endif /* _LINUX_PM_DOMAIN_H */
|