mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 11:10:53 +07:00
Merge branch 'pm-cpufreq'
* pm-cpufreq: (36 commits) cpufreq: Add qcs404 to cpufreq-dt-platdev blacklist cpufreq: qcom: Add support for qcs404 on nvmem driver cpufreq: qcom: Refactor the driver to make it easier to extend cpufreq: qcom: Re-organise kryo cpufreq to use it for other nvmem based qcom socs dt-bindings: opp: Add qcom-opp bindings with properties needed for CPR dt-bindings: opp: qcom-nvmem: Support pstates provided by a power domain Documentation: cpufreq: Update policy notifier documentation cpufreq: Remove CPUFREQ_ADJUST and CPUFREQ_NOTIFY policy notifier events sched/cpufreq: Align trace event behavior of fast switching ACPI: cpufreq: Switch to QoS requests instead of cpufreq notifier video: pxafb: Remove cpufreq policy notifier video: sa1100fb: Remove cpufreq policy notifier arch_topology: Use CPUFREQ_CREATE_POLICY instead of CPUFREQ_NOTIFY cpufreq: powerpc_cbe: Switch to QoS requests for freq limits cpufreq: powerpc: macintosh: Switch to QoS requests for freq limits cpufreq: Print driver name if cpufreq_suspend() fails cpufreq: mediatek: Add support for mt8183 cpufreq: mediatek: change to regulator_get_optional cpufreq: imx-cpufreq-dt: Add i.MX8MN support cpufreq: Use imx-cpufreq-dt for i.MX8MN's speed grading ...
This commit is contained in:
commit
ca61a72ac3
@ -57,19 +57,11 @@ transition notifiers.
|
||||
2.1 CPUFreq policy notifiers
|
||||
----------------------------
|
||||
|
||||
These are notified when a new policy is intended to be set. Each
|
||||
CPUFreq policy notifier is called twice for a policy transition:
|
||||
These are notified when a new policy is created or removed.
|
||||
|
||||
1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if
|
||||
they see a need for this - may it be thermal considerations or
|
||||
hardware limitations.
|
||||
|
||||
2.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
|
||||
- if two hardware drivers failed to agree on a new policy before this
|
||||
stage, the incompatible hardware shall be shut down, and the user
|
||||
informed of this.
|
||||
|
||||
The phase is specified in the second argument to the notifier.
|
||||
The phase is specified in the second argument to the notifier. The phase is
|
||||
CPUFREQ_CREATE_POLICY when the policy is first created and it is
|
||||
CPUFREQ_REMOVE_POLICY when the policy is removed.
|
||||
|
||||
The third argument, a void *pointer, points to a struct cpufreq_policy
|
||||
consisting of several values, including min, max (the lower and upper
|
||||
|
@ -1,25 +1,38 @@
|
||||
Qualcomm Technologies, Inc. KRYO CPUFreq and OPP bindings
|
||||
Qualcomm Technologies, Inc. NVMEM CPUFreq and OPP bindings
|
||||
===================================
|
||||
|
||||
In Certain Qualcomm Technologies, Inc. SoCs like apq8096 and msm8996
|
||||
that have KRYO processors, the CPU ferequencies subset and voltage value
|
||||
of each OPP varies based on the silicon variant in use.
|
||||
In Certain Qualcomm Technologies, Inc. SoCs like apq8096 and msm8996,
|
||||
the CPU frequencies subset and voltage value of each OPP varies based on
|
||||
the silicon variant in use.
|
||||
Qualcomm Technologies, Inc. Process Voltage Scaling Tables
|
||||
defines the voltage and frequency value based on the msm-id in SMEM
|
||||
and speedbin blown in the efuse combination.
|
||||
The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC
|
||||
The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
|
||||
to provide the OPP framework with required information (existing HW bitmap).
|
||||
This is used to determine the voltage and frequency value for each OPP of
|
||||
operating-points-v2 table when it is parsed by the OPP framework.
|
||||
|
||||
Required properties:
|
||||
--------------------
|
||||
In 'cpus' nodes:
|
||||
In 'cpu' nodes:
|
||||
- operating-points-v2: Phandle to the operating-points-v2 table to use.
|
||||
|
||||
In 'operating-points-v2' table:
|
||||
- compatible: Should be
|
||||
- 'operating-points-v2-kryo-cpu' for apq8096 and msm8996.
|
||||
|
||||
Optional properties:
|
||||
--------------------
|
||||
In 'cpu' nodes:
|
||||
- power-domains: A phandle pointing to the PM domain specifier which provides
|
||||
the performance states available for active state management.
|
||||
Please refer to the power-domains bindings
|
||||
Documentation/devicetree/bindings/power/power_domain.txt
|
||||
and also examples below.
|
||||
- power-domain-names: Should be
|
||||
- 'cpr' for qcs404.
|
||||
|
||||
In 'operating-points-v2' table:
|
||||
- nvmem-cells: A phandle pointing to a nvmem-cells node representing the
|
||||
efuse registers that has information about the
|
||||
speedbin that is used to select the right frequency/voltage
|
||||
@ -678,3 +691,105 @@ soc {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
Example 2:
|
||||
---------
|
||||
|
||||
cpus {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
CPU0: cpu@100 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a53";
|
||||
reg = <0x100>;
|
||||
....
|
||||
clocks = <&apcs_glb>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
power-domains = <&cpr>;
|
||||
power-domain-names = "cpr";
|
||||
};
|
||||
|
||||
CPU1: cpu@101 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a53";
|
||||
reg = <0x101>;
|
||||
....
|
||||
clocks = <&apcs_glb>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
power-domains = <&cpr>;
|
||||
power-domain-names = "cpr";
|
||||
};
|
||||
|
||||
CPU2: cpu@102 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a53";
|
||||
reg = <0x102>;
|
||||
....
|
||||
clocks = <&apcs_glb>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
power-domains = <&cpr>;
|
||||
power-domain-names = "cpr";
|
||||
};
|
||||
|
||||
CPU3: cpu@103 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a53";
|
||||
reg = <0x103>;
|
||||
....
|
||||
clocks = <&apcs_glb>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
power-domains = <&cpr>;
|
||||
power-domain-names = "cpr";
|
||||
};
|
||||
};
|
||||
|
||||
cpu_opp_table: cpu-opp-table {
|
||||
compatible = "operating-points-v2-kryo-cpu";
|
||||
opp-shared;
|
||||
|
||||
opp-1094400000 {
|
||||
opp-hz = /bits/ 64 <1094400000>;
|
||||
required-opps = <&cpr_opp1>;
|
||||
};
|
||||
opp-1248000000 {
|
||||
opp-hz = /bits/ 64 <1248000000>;
|
||||
required-opps = <&cpr_opp2>;
|
||||
};
|
||||
opp-1401600000 {
|
||||
opp-hz = /bits/ 64 <1401600000>;
|
||||
required-opps = <&cpr_opp3>;
|
||||
};
|
||||
};
|
||||
|
||||
cpr_opp_table: cpr-opp-table {
|
||||
compatible = "operating-points-v2-qcom-level";
|
||||
|
||||
cpr_opp1: opp1 {
|
||||
opp-level = <1>;
|
||||
qcom,opp-fuse-level = <1>;
|
||||
};
|
||||
cpr_opp2: opp2 {
|
||||
opp-level = <2>;
|
||||
qcom,opp-fuse-level = <2>;
|
||||
};
|
||||
cpr_opp3: opp3 {
|
||||
opp-level = <3>;
|
||||
qcom,opp-fuse-level = <3>;
|
||||
};
|
||||
};
|
||||
|
||||
....
|
||||
|
||||
soc {
|
||||
....
|
||||
cpr: power-controller@b018000 {
|
||||
compatible = "qcom,qcs404-cpr", "qcom,cpr";
|
||||
reg = <0x0b018000 0x1000>;
|
||||
....
|
||||
vdd-apc-supply = <&pms405_s3>;
|
||||
#power-domain-cells = <0>;
|
||||
operating-points-v2 = <&cpr_opp_table>;
|
||||
....
|
||||
};
|
||||
};
|
19
Documentation/devicetree/bindings/opp/qcom-opp.txt
Normal file
19
Documentation/devicetree/bindings/opp/qcom-opp.txt
Normal file
@ -0,0 +1,19 @@
|
||||
Qualcomm OPP bindings to describe OPP nodes
|
||||
|
||||
The bindings are based on top of the operating-points-v2 bindings
|
||||
described in Documentation/devicetree/bindings/opp/opp.txt
|
||||
Additional properties are described below.
|
||||
|
||||
* OPP Table Node
|
||||
|
||||
Required properties:
|
||||
- compatible: Allow OPPs to express their compatibility. It should be:
|
||||
"operating-points-v2-qcom-level"
|
||||
|
||||
* OPP Node
|
||||
|
||||
Required properties:
|
||||
- qcom,opp-fuse-level: A positive value representing the fuse corner/level
|
||||
associated with this OPP node. Sometimes several corners/levels shares
|
||||
a certain fuse corner/level. A fuse corner/level contains e.g. ref uV,
|
||||
min uV, and max uV.
|
167
Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
Normal file
167
Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
Normal file
@ -0,0 +1,167 @@
|
||||
Allwinner Technologies, Inc. NVMEM CPUFreq and OPP bindings
|
||||
===================================
|
||||
|
||||
For some SoCs, the CPU frequency subset and voltage value of each OPP
|
||||
varies based on the silicon variant in use. Allwinner Process Voltage
|
||||
Scaling Tables defines the voltage and frequency value based on the
|
||||
speedbin blown in the efuse combination. The sun50i-cpufreq-nvmem driver
|
||||
reads the efuse value from the SoC to provide the OPP framework with
|
||||
required information.
|
||||
|
||||
Required properties:
|
||||
--------------------
|
||||
In 'cpus' nodes:
|
||||
- operating-points-v2: Phandle to the operating-points-v2 table to use.
|
||||
|
||||
In 'operating-points-v2' table:
|
||||
- compatible: Should be
|
||||
- 'allwinner,sun50i-h6-operating-points'.
|
||||
- nvmem-cells: A phandle pointing to a nvmem-cells node representing the
|
||||
efuse registers that has information about the speedbin
|
||||
that is used to select the right frequency/voltage value
|
||||
pair. Please refer the for nvmem-cells bindings
|
||||
Documentation/devicetree/bindings/nvmem/nvmem.txt and
|
||||
also examples below.
|
||||
|
||||
In every OPP node:
|
||||
- opp-microvolt-<name>: Voltage in micro Volts.
|
||||
At runtime, the platform can pick a <name> and
|
||||
matching opp-microvolt-<name> property.
|
||||
[See: opp.txt]
|
||||
HW: <name>:
|
||||
sun50i-h6 speed0 speed1 speed2
|
||||
|
||||
Example 1:
|
||||
---------
|
||||
|
||||
cpus {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
cpu0: cpu@0 {
|
||||
compatible = "arm,cortex-a53";
|
||||
device_type = "cpu";
|
||||
reg = <0>;
|
||||
enable-method = "psci";
|
||||
clocks = <&ccu CLK_CPUX>;
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
cpu1: cpu@1 {
|
||||
compatible = "arm,cortex-a53";
|
||||
device_type = "cpu";
|
||||
reg = <1>;
|
||||
enable-method = "psci";
|
||||
clocks = <&ccu CLK_CPUX>;
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
cpu2: cpu@2 {
|
||||
compatible = "arm,cortex-a53";
|
||||
device_type = "cpu";
|
||||
reg = <2>;
|
||||
enable-method = "psci";
|
||||
clocks = <&ccu CLK_CPUX>;
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
cpu3: cpu@3 {
|
||||
compatible = "arm,cortex-a53";
|
||||
device_type = "cpu";
|
||||
reg = <3>;
|
||||
enable-method = "psci";
|
||||
clocks = <&ccu CLK_CPUX>;
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
cpu_opp_table: opp_table {
|
||||
compatible = "allwinner,sun50i-h6-operating-points";
|
||||
nvmem-cells = <&speedbin_efuse>;
|
||||
opp-shared;
|
||||
|
||||
opp@480000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <480000000>;
|
||||
|
||||
opp-microvolt-speed0 = <880000>;
|
||||
opp-microvolt-speed1 = <820000>;
|
||||
opp-microvolt-speed2 = <800000>;
|
||||
};
|
||||
|
||||
opp@720000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <720000000>;
|
||||
|
||||
opp-microvolt-speed0 = <880000>;
|
||||
opp-microvolt-speed1 = <820000>;
|
||||
opp-microvolt-speed2 = <800000>;
|
||||
};
|
||||
|
||||
opp@816000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <816000000>;
|
||||
|
||||
opp-microvolt-speed0 = <880000>;
|
||||
opp-microvolt-speed1 = <820000>;
|
||||
opp-microvolt-speed2 = <800000>;
|
||||
};
|
||||
|
||||
opp@888000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <888000000>;
|
||||
|
||||
opp-microvolt-speed0 = <940000>;
|
||||
opp-microvolt-speed1 = <820000>;
|
||||
opp-microvolt-speed2 = <800000>;
|
||||
};
|
||||
|
||||
opp@1080000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <1080000000>;
|
||||
|
||||
opp-microvolt-speed0 = <1060000>;
|
||||
opp-microvolt-speed1 = <880000>;
|
||||
opp-microvolt-speed2 = <840000>;
|
||||
};
|
||||
|
||||
opp@1320000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <1320000000>;
|
||||
|
||||
opp-microvolt-speed0 = <1160000>;
|
||||
opp-microvolt-speed1 = <940000>;
|
||||
opp-microvolt-speed2 = <900000>;
|
||||
};
|
||||
|
||||
opp@1488000000 {
|
||||
clock-latency-ns = <244144>; /* 8 32k periods */
|
||||
opp-hz = /bits/ 64 <1488000000>;
|
||||
|
||||
opp-microvolt-speed0 = <1160000>;
|
||||
opp-microvolt-speed1 = <1000000>;
|
||||
opp-microvolt-speed2 = <960000>;
|
||||
};
|
||||
};
|
||||
....
|
||||
soc {
|
||||
....
|
||||
sid: sid@3006000 {
|
||||
compatible = "allwinner,sun50i-h6-sid";
|
||||
reg = <0x03006000 0x400>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
....
|
||||
speedbin_efuse: speed@1c {
|
||||
reg = <0x1c 4>;
|
||||
};
|
||||
};
|
||||
};
|
11
MAINTAINERS
11
MAINTAINERS
@ -676,6 +676,13 @@ L: linux-media@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/staging/media/allegro-dvt/
|
||||
|
||||
ALLWINNER CPUFREQ DRIVER
|
||||
M: Yangtao Li <tiny.windzz@gmail.com>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
|
||||
F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
|
||||
|
||||
ALLWINNER SECURITY SYSTEM
|
||||
M: Corentin Labbe <clabbe.montjoie@gmail.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
@ -13308,8 +13315,8 @@ QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
|
||||
M: Ilia Lin <ilia.lin@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
|
||||
F: drivers/cpufreq/qcom-cpufreq-kryo.c
|
||||
F: Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
|
||||
F: drivers/cpufreq/qcom-cpufreq-nvmem.c
|
||||
|
||||
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
|
||||
M: Timur Tabi <timur@kernel.org>
|
||||
|
@ -284,6 +284,29 @@ static int acpi_processor_stop(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool acpi_processor_cpufreq_init;
|
||||
|
||||
static int acpi_processor_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
int cpu = policy->cpu;
|
||||
|
||||
if (event == CPUFREQ_CREATE_POLICY) {
|
||||
acpi_thermal_cpufreq_init(cpu);
|
||||
acpi_processor_ppc_init(cpu);
|
||||
} else if (event == CPUFREQ_REMOVE_POLICY) {
|
||||
acpi_processor_ppc_exit(cpu);
|
||||
acpi_thermal_cpufreq_exit(cpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block acpi_processor_notifier_block = {
|
||||
.notifier_call = acpi_processor_notifier,
|
||||
};
|
||||
|
||||
/*
|
||||
* We keep the driver loaded even when ACPI is not running.
|
||||
* This is needed for the powernow-k8 driver, that works even without
|
||||
@ -310,8 +333,12 @@ static int __init acpi_processor_driver_init(void)
|
||||
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
|
||||
NULL, acpi_soft_cpu_dead);
|
||||
|
||||
acpi_thermal_cpufreq_init();
|
||||
acpi_processor_ppc_init();
|
||||
if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER)) {
|
||||
acpi_processor_cpufreq_init = true;
|
||||
acpi_processor_ignore_ppc_init();
|
||||
}
|
||||
|
||||
acpi_processor_throttling_init();
|
||||
return 0;
|
||||
err:
|
||||
@ -324,8 +351,12 @@ static void __exit acpi_processor_driver_exit(void)
|
||||
if (acpi_disabled)
|
||||
return;
|
||||
|
||||
acpi_processor_ppc_exit();
|
||||
acpi_thermal_cpufreq_exit();
|
||||
if (acpi_processor_cpufreq_init) {
|
||||
cpufreq_unregister_notifier(&acpi_processor_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
acpi_processor_cpufreq_init = false;
|
||||
}
|
||||
|
||||
cpuhp_remove_state_nocalls(hp_online);
|
||||
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
|
||||
driver_unregister(&acpi_processor_driver);
|
||||
|
@ -50,57 +50,13 @@ module_param(ignore_ppc, int, 0644);
|
||||
MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
|
||||
"limited by BIOS, this should help");
|
||||
|
||||
#define PPC_REGISTERED 1
|
||||
#define PPC_IN_USE 2
|
||||
|
||||
static int acpi_processor_ppc_status;
|
||||
|
||||
static int acpi_processor_ppc_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
struct acpi_processor *pr;
|
||||
unsigned int ppc = 0;
|
||||
|
||||
if (ignore_ppc < 0)
|
||||
ignore_ppc = 0;
|
||||
|
||||
if (ignore_ppc)
|
||||
return 0;
|
||||
|
||||
if (event != CPUFREQ_ADJUST)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&performance_mutex);
|
||||
|
||||
pr = per_cpu(processors, policy->cpu);
|
||||
if (!pr || !pr->performance)
|
||||
goto out;
|
||||
|
||||
ppc = (unsigned int)pr->performance_platform_limit;
|
||||
|
||||
if (ppc >= pr->performance->state_count)
|
||||
goto out;
|
||||
|
||||
cpufreq_verify_within_limits(policy, 0,
|
||||
pr->performance->states[ppc].
|
||||
core_frequency * 1000);
|
||||
|
||||
out:
|
||||
mutex_unlock(&performance_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block acpi_ppc_notifier_block = {
|
||||
.notifier_call = acpi_processor_ppc_notifier,
|
||||
};
|
||||
static bool acpi_processor_ppc_in_use;
|
||||
|
||||
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
|
||||
{
|
||||
acpi_status status = 0;
|
||||
unsigned long long ppc = 0;
|
||||
|
||||
int ret;
|
||||
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
@ -112,7 +68,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
|
||||
status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
|
||||
|
||||
if (status != AE_NOT_FOUND)
|
||||
acpi_processor_ppc_status |= PPC_IN_USE;
|
||||
acpi_processor_ppc_in_use = true;
|
||||
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
|
||||
@ -124,6 +80,17 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
|
||||
|
||||
pr->performance_platform_limit = (int)ppc;
|
||||
|
||||
if (ppc >= pr->performance->state_count ||
|
||||
unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
|
||||
return 0;
|
||||
|
||||
ret = dev_pm_qos_update_request(&pr->perflib_req,
|
||||
pr->performance->states[ppc].core_frequency * 1000);
|
||||
if (ret < 0) {
|
||||
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
|
||||
pr->id, ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -184,23 +151,32 @@ int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_processor_get_bios_limit);
|
||||
|
||||
void acpi_processor_ppc_init(void)
|
||||
void acpi_processor_ignore_ppc_init(void)
|
||||
{
|
||||
if (!cpufreq_register_notifier
|
||||
(&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
|
||||
acpi_processor_ppc_status |= PPC_REGISTERED;
|
||||
else
|
||||
printk(KERN_DEBUG
|
||||
"Warning: Processor Platform Limit not supported.\n");
|
||||
if (ignore_ppc < 0)
|
||||
ignore_ppc = 0;
|
||||
}
|
||||
|
||||
void acpi_processor_ppc_exit(void)
|
||||
void acpi_processor_ppc_init(int cpu)
|
||||
{
|
||||
if (acpi_processor_ppc_status & PPC_REGISTERED)
|
||||
cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
int ret;
|
||||
|
||||
acpi_processor_ppc_status &= ~PPC_REGISTERED;
|
||||
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
|
||||
&pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
|
||||
INT_MAX);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
|
||||
ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void acpi_processor_ppc_exit(int cpu)
|
||||
{
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
|
||||
dev_pm_qos_remove_request(&pr->perflib_req);
|
||||
}
|
||||
|
||||
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
|
||||
@ -477,7 +453,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
|
||||
static int is_done = 0;
|
||||
int result;
|
||||
|
||||
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
|
||||
if (!acpi_processor_cpufreq_init)
|
||||
return -EBUSY;
|
||||
|
||||
if (!try_module_get(calling_module))
|
||||
@ -513,7 +489,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
|
||||
* we can allow the cpufreq driver to be rmmod'ed. */
|
||||
is_done = 1;
|
||||
|
||||
if (!(acpi_processor_ppc_status & PPC_IN_USE))
|
||||
if (!acpi_processor_ppc_in_use)
|
||||
module_put(calling_module);
|
||||
|
||||
return 0;
|
||||
@ -742,7 +718,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
|
||||
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
|
||||
if (!acpi_processor_cpufreq_init)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&performance_mutex);
|
||||
|
@ -35,7 +35,6 @@ ACPI_MODULE_NAME("processor_thermal");
|
||||
#define CPUFREQ_THERMAL_MAX_STEP 3
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
|
||||
static unsigned int acpi_thermal_cpufreq_is_init = 0;
|
||||
|
||||
#define reduction_pctg(cpu) \
|
||||
per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
|
||||
@ -61,35 +60,11 @@ static int phys_package_first_cpu(int cpu)
|
||||
static int cpu_has_cpufreq(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy policy;
|
||||
if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
|
||||
if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
unsigned long max_freq = 0;
|
||||
|
||||
if (event != CPUFREQ_ADJUST)
|
||||
goto out;
|
||||
|
||||
max_freq = (
|
||||
policy->cpuinfo.max_freq *
|
||||
(100 - reduction_pctg(policy->cpu) * 20)
|
||||
) / 100;
|
||||
|
||||
cpufreq_verify_within_limits(policy, 0, max_freq);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
|
||||
.notifier_call = acpi_thermal_cpufreq_notifier,
|
||||
};
|
||||
|
||||
static int cpufreq_get_max_state(unsigned int cpu)
|
||||
{
|
||||
if (!cpu_has_cpufreq(cpu))
|
||||
@ -108,7 +83,10 @@ static int cpufreq_get_cur_state(unsigned int cpu)
|
||||
|
||||
static int cpufreq_set_cur_state(unsigned int cpu, int state)
|
||||
{
|
||||
int i;
|
||||
struct cpufreq_policy *policy;
|
||||
struct acpi_processor *pr;
|
||||
unsigned long max_freq;
|
||||
int i, ret;
|
||||
|
||||
if (!cpu_has_cpufreq(cpu))
|
||||
return 0;
|
||||
@ -121,33 +99,53 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
|
||||
* frequency.
|
||||
*/
|
||||
for_each_online_cpu(i) {
|
||||
if (topology_physical_package_id(i) ==
|
||||
if (topology_physical_package_id(i) !=
|
||||
topology_physical_package_id(cpu))
|
||||
cpufreq_update_policy(i);
|
||||
continue;
|
||||
|
||||
pr = per_cpu(processors, i);
|
||||
|
||||
if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
|
||||
continue;
|
||||
|
||||
policy = cpufreq_cpu_get(i);
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
|
||||
if (ret < 0) {
|
||||
pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
|
||||
pr->id, ret);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void acpi_thermal_cpufreq_init(void)
|
||||
void acpi_thermal_cpufreq_init(int cpu)
|
||||
{
|
||||
int i;
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
int ret;
|
||||
|
||||
i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
if (!i)
|
||||
acpi_thermal_cpufreq_is_init = 1;
|
||||
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
|
||||
&pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
|
||||
INT_MAX);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
|
||||
ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void acpi_thermal_cpufreq_exit(void)
|
||||
void acpi_thermal_cpufreq_exit(int cpu)
|
||||
{
|
||||
if (acpi_thermal_cpufreq_is_init)
|
||||
cpufreq_unregister_notifier
|
||||
(&acpi_thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
|
||||
acpi_thermal_cpufreq_is_init = 0;
|
||||
dev_pm_qos_remove_request(&pr->thermal_req);
|
||||
}
|
||||
|
||||
#else /* ! CONFIG_CPU_FREQ */
|
||||
static int cpufreq_get_max_state(unsigned int cpu)
|
||||
{
|
||||
|
@ -174,7 +174,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
|
||||
if (!raw_capacity)
|
||||
return 0;
|
||||
|
||||
if (val != CPUFREQ_NOTIFY)
|
||||
if (val != CPUFREQ_CREATE_POLICY)
|
||||
return 0;
|
||||
|
||||
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
|
||||
|
@ -19,6 +19,18 @@ config ACPI_CPPC_CPUFREQ
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
|
||||
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
|
||||
depends on ARCH_SUNXI
|
||||
depends on NVMEM_SUNXI_SID
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the nvmem based CPUFreq driver for Allwinner
|
||||
h6 SoC.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called sun50i-cpufreq-nvmem.
|
||||
|
||||
config ARM_ARMADA_37XX_CPUFREQ
|
||||
tristate "Armada 37xx CPUFreq support"
|
||||
depends on ARCH_MVEBU && CPUFREQ_DT
|
||||
@ -120,8 +132,8 @@ config ARM_OMAP2PLUS_CPUFREQ
|
||||
depends on ARCH_OMAP2PLUS
|
||||
default ARCH_OMAP2PLUS
|
||||
|
||||
config ARM_QCOM_CPUFREQ_KRYO
|
||||
tristate "Qualcomm Kryo based CPUFreq"
|
||||
config ARM_QCOM_CPUFREQ_NVMEM
|
||||
tristate "Qualcomm nvmem based CPUFreq"
|
||||
depends on ARM64
|
||||
depends on QCOM_QFPROM
|
||||
depends on QCOM_SMEM
|
||||
|
@ -64,7 +64,7 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
|
||||
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
|
||||
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
|
||||
obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
|
||||
obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o
|
||||
obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
|
||||
obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
|
||||
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
|
||||
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
|
||||
@ -80,6 +80,7 @@ obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
|
||||
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
|
||||
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
|
||||
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
|
||||
obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
|
||||
obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
|
||||
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
|
||||
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
|
||||
|
@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void)
|
||||
|
||||
nb_cpus = num_possible_cpus();
|
||||
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
|
||||
if (!freq_tables)
|
||||
return -ENOMEM;
|
||||
cpumask_copy(&cpus, cpu_possible_mask);
|
||||
|
||||
/*
|
||||
|
@ -101,12 +101,15 @@ static const struct of_device_id whitelist[] __initconst = {
|
||||
* platforms using "operating-points-v2" property.
|
||||
*/
|
||||
static const struct of_device_id blacklist[] __initconst = {
|
||||
{ .compatible = "allwinner,sun50i-h6", },
|
||||
|
||||
{ .compatible = "calxeda,highbank", },
|
||||
{ .compatible = "calxeda,ecx-2000", },
|
||||
|
||||
{ .compatible = "fsl,imx7d", },
|
||||
{ .compatible = "fsl,imx8mq", },
|
||||
{ .compatible = "fsl,imx8mm", },
|
||||
{ .compatible = "fsl,imx8mn", },
|
||||
|
||||
{ .compatible = "marvell,armadaxp", },
|
||||
|
||||
@ -117,12 +120,14 @@ static const struct of_device_id blacklist[] __initconst = {
|
||||
{ .compatible = "mediatek,mt817x", },
|
||||
{ .compatible = "mediatek,mt8173", },
|
||||
{ .compatible = "mediatek,mt8176", },
|
||||
{ .compatible = "mediatek,mt8183", },
|
||||
|
||||
{ .compatible = "nvidia,tegra124", },
|
||||
{ .compatible = "nvidia,tegra210", },
|
||||
|
||||
{ .compatible = "qcom,apq8096", },
|
||||
{ .compatible = "qcom,msm8996", },
|
||||
{ .compatible = "qcom,qcs404", },
|
||||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
|
@ -1266,7 +1266,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
|
||||
DEV_PM_QOS_MAX_FREQUENCY);
|
||||
dev_pm_qos_remove_notifier(dev, &policy->nb_min,
|
||||
DEV_PM_QOS_MIN_FREQUENCY);
|
||||
dev_pm_qos_remove_request(policy->max_freq_req);
|
||||
|
||||
if (policy->max_freq_req) {
|
||||
/*
|
||||
* CPUFREQ_CREATE_POLICY notification is sent only after
|
||||
* successfully adding max_freq_req request.
|
||||
*/
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_REMOVE_POLICY, policy);
|
||||
dev_pm_qos_remove_request(policy->max_freq_req);
|
||||
}
|
||||
|
||||
dev_pm_qos_remove_request(policy->min_freq_req);
|
||||
kfree(policy->min_freq_req);
|
||||
|
||||
@ -1391,6 +1401,9 @@ static int cpufreq_online(unsigned int cpu)
|
||||
ret);
|
||||
goto out_destroy_policy;
|
||||
}
|
||||
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_CREATE_POLICY, policy);
|
||||
}
|
||||
|
||||
if (cpufreq_driver->get && has_target()) {
|
||||
@ -1807,8 +1820,8 @@ void cpufreq_suspend(void)
|
||||
}
|
||||
|
||||
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
|
||||
pr_err("%s: Failed to suspend driver: %p\n", __func__,
|
||||
policy);
|
||||
pr_err("%s: Failed to suspend driver: %s\n", __func__,
|
||||
cpufreq_driver->name);
|
||||
}
|
||||
|
||||
suspend:
|
||||
@ -2140,7 +2153,7 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
@ -2347,15 +2360,13 @@ EXPORT_SYMBOL(cpufreq_get_policy);
|
||||
* @policy: Policy object to modify.
|
||||
* @new_policy: New policy data.
|
||||
*
|
||||
* Pass @new_policy to the cpufreq driver's ->verify() callback, run the
|
||||
* installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
|
||||
* the driver's ->verify() callback again and run the notifiers for it again
|
||||
* with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
|
||||
* of @new_policy to @policy and either invoke the driver's ->setpolicy()
|
||||
* callback (if present) or carry out a governor update for @policy. That is,
|
||||
* run the current governor's ->limits() callback (if the governor field in
|
||||
* @new_policy points to the same object as the one in @policy) or replace the
|
||||
* governor for @policy with the new one stored in @new_policy.
|
||||
* Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
|
||||
* min and max parameters of @new_policy to @policy and either invoke the
|
||||
* driver's ->setpolicy() callback (if present) or carry out a governor update
|
||||
* for @policy. That is, run the current governor's ->limits() callback (if the
|
||||
* governor field in @new_policy points to the same object as the one in
|
||||
* @policy) or replace the governor for @policy with the new one stored in
|
||||
* @new_policy.
|
||||
*
|
||||
* The cpuinfo part of @policy is not updated by this function.
|
||||
*/
|
||||
@ -2383,26 +2394,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The notifier-chain shall be removed once all the users of
|
||||
* CPUFREQ_ADJUST are moved to use the QoS framework.
|
||||
*/
|
||||
/* adjust if necessary - all reasons */
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_ADJUST, new_policy);
|
||||
|
||||
/*
|
||||
* verify the cpu speed can be set within this limit, which might be
|
||||
* different to the first one
|
||||
*/
|
||||
ret = cpufreq_driver->verify(new_policy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* notification of the new policy */
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_NOTIFY, new_policy);
|
||||
|
||||
policy->min = new_policy->min;
|
||||
policy->max = new_policy->max;
|
||||
trace_cpu_frequency_limits(policy);
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
|
||||
#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
|
||||
#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
|
||||
#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
|
||||
#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
|
||||
|
||||
@ -34,7 +35,12 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
|
||||
if (of_machine_is_compatible("fsl,imx8mn"))
|
||||
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
|
||||
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
|
||||
else
|
||||
speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
|
||||
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
|
||||
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
|
||||
|
||||
/*
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
@ -1085,6 +1086,47 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver intel_pstate;
|
||||
|
||||
static void update_qos_request(enum dev_pm_qos_req_type type)
|
||||
{
|
||||
int max_state, turbo_max, freq, i, perf_pct;
|
||||
struct dev_pm_qos_request *req;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct cpudata *cpu = all_cpu_data[i];
|
||||
|
||||
policy = cpufreq_cpu_get(i);
|
||||
if (!policy)
|
||||
continue;
|
||||
|
||||
req = policy->driver_data;
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
if (!req)
|
||||
continue;
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
|
||||
else
|
||||
turbo_max = cpu->pstate.turbo_pstate;
|
||||
|
||||
if (type == DEV_PM_QOS_MIN_FREQUENCY) {
|
||||
perf_pct = global.min_perf_pct;
|
||||
} else {
|
||||
req++;
|
||||
perf_pct = global.max_perf_pct;
|
||||
}
|
||||
|
||||
freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
|
||||
freq *= cpu->pstate.scaling;
|
||||
|
||||
if (dev_pm_qos_update_request(req, freq) < 0)
|
||||
pr_warn("Failed to update freq constraint: CPU%d\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
@ -1108,7 +1150,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
if (intel_pstate_driver == &intel_pstate)
|
||||
intel_pstate_update_policies();
|
||||
else
|
||||
update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
@ -1139,7 +1184,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
if (intel_pstate_driver == &intel_pstate)
|
||||
intel_pstate_update_policies();
|
||||
else
|
||||
update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
@ -2332,8 +2380,16 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
||||
|
||||
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret = __intel_pstate_cpu_init(policy);
|
||||
int max_state, turbo_max, min_freq, max_freq, ret;
|
||||
struct dev_pm_qos_request *req;
|
||||
struct cpudata *cpu;
|
||||
struct device *dev;
|
||||
|
||||
dev = get_cpu_device(policy->cpu);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
ret = __intel_pstate_cpu_init(policy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2342,7 +2398,63 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
/* This reflects the intel_pstate_get_cpu_pstates() setting. */
|
||||
policy->cur = policy->cpuinfo.min_freq;
|
||||
|
||||
req = kcalloc(2, sizeof(*req), GFP_KERNEL);
|
||||
if (!req) {
|
||||
ret = -ENOMEM;
|
||||
goto pstate_exit;
|
||||
}
|
||||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
|
||||
else
|
||||
turbo_max = cpu->pstate.turbo_pstate;
|
||||
|
||||
min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
|
||||
min_freq *= cpu->pstate.scaling;
|
||||
max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
|
||||
max_freq *= cpu->pstate.scaling;
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
|
||||
min_freq);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
|
||||
goto free_req;
|
||||
}
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
|
||||
max_freq);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
|
||||
goto remove_min_req;
|
||||
}
|
||||
|
||||
policy->driver_data = req;
|
||||
|
||||
return 0;
|
||||
|
||||
remove_min_req:
|
||||
dev_pm_qos_remove_request(req);
|
||||
free_req:
|
||||
kfree(req);
|
||||
pstate_exit:
|
||||
intel_pstate_exit_perf_limits(policy);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct dev_pm_qos_request *req;
|
||||
|
||||
req = policy->driver_data;
|
||||
|
||||
dev_pm_qos_remove_request(req + 1);
|
||||
dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
|
||||
return intel_pstate_cpu_exit(policy);
|
||||
}
|
||||
|
||||
static struct cpufreq_driver intel_cpufreq = {
|
||||
@ -2351,7 +2463,7 @@ static struct cpufreq_driver intel_cpufreq = {
|
||||
.target = intel_cpufreq_target,
|
||||
.fast_switch = intel_cpufreq_fast_switch,
|
||||
.init = intel_cpufreq_cpu_init,
|
||||
.exit = intel_pstate_cpu_exit,
|
||||
.exit = intel_cpufreq_cpu_exit,
|
||||
.stop_cpu = intel_cpufreq_stop_cpu,
|
||||
.update_limits = intel_pstate_update_limits,
|
||||
.name = "intel_cpufreq",
|
||||
|
@ -338,7 +338,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
|
||||
goto out_free_resources;
|
||||
}
|
||||
|
||||
proc_reg = regulator_get_exclusive(cpu_dev, "proc");
|
||||
proc_reg = regulator_get_optional(cpu_dev, "proc");
|
||||
if (IS_ERR(proc_reg)) {
|
||||
if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
|
||||
pr_warn("proc regulator for cpu%d not ready, retry.\n",
|
||||
@ -535,6 +535,8 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
|
||||
{ .compatible = "mediatek,mt817x", },
|
||||
{ .compatible = "mediatek,mt8173", },
|
||||
{ .compatible = "mediatek,mt8176", },
|
||||
{ .compatible = "mediatek,mt8183", },
|
||||
{ .compatible = "mediatek,mt8516", },
|
||||
|
||||
{ }
|
||||
};
|
||||
|
@ -110,6 +110,13 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
#endif
|
||||
|
||||
policy->freq_table = cbe_freqs;
|
||||
cbe_cpufreq_pmi_policy_init(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
cbe_cpufreq_pmi_policy_exit(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -129,6 +136,7 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = cbe_cpufreq_target,
|
||||
.init = cbe_cpufreq_cpu_init,
|
||||
.exit = cbe_cpufreq_cpu_exit,
|
||||
.name = "cbe-cpufreq",
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
};
|
||||
@ -139,15 +147,24 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
|
||||
|
||||
static int __init cbe_cpufreq_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!machine_is(cell))
|
||||
return -ENODEV;
|
||||
|
||||
return cpufreq_register_driver(&cbe_cpufreq_driver);
|
||||
cbe_cpufreq_pmi_init();
|
||||
|
||||
ret = cpufreq_register_driver(&cbe_cpufreq_driver);
|
||||
if (ret)
|
||||
cbe_cpufreq_pmi_exit();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit cbe_cpufreq_exit(void)
|
||||
{
|
||||
cpufreq_unregister_driver(&cbe_cpufreq_driver);
|
||||
cbe_cpufreq_pmi_exit();
|
||||
}
|
||||
|
||||
module_init(cbe_cpufreq_init);
|
||||
|
@ -20,6 +20,14 @@ int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
|
||||
|
||||
#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
|
||||
extern bool cbe_cpufreq_has_pmi;
|
||||
void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
|
||||
void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
|
||||
void cbe_cpufreq_pmi_init(void);
|
||||
void cbe_cpufreq_pmi_exit(void);
|
||||
#else
|
||||
#define cbe_cpufreq_has_pmi (0)
|
||||
static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
|
||||
static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
|
||||
static inline void cbe_cpufreq_pmi_init(void) {}
|
||||
static inline void cbe_cpufreq_pmi_exit(void) {}
|
||||
#endif
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/timer.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/prom.h>
|
||||
@ -24,8 +25,6 @@
|
||||
|
||||
#include "ppc_cbe_cpufreq.h"
|
||||
|
||||
static u8 pmi_slow_mode_limit[MAX_CBE];
|
||||
|
||||
bool cbe_cpufreq_has_pmi = false;
|
||||
EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
|
||||
|
||||
@ -65,64 +64,89 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
|
||||
|
||||
static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
struct dev_pm_qos_request *req;
|
||||
u8 node, slow_mode;
|
||||
int cpu, ret;
|
||||
|
||||
BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
|
||||
|
||||
node = pmi_msg.data1;
|
||||
slow_mode = pmi_msg.data2;
|
||||
|
||||
pmi_slow_mode_limit[node] = slow_mode;
|
||||
cpu = cbe_node_to_cpu(node);
|
||||
|
||||
pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
|
||||
}
|
||||
|
||||
static int pmi_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
|
||||
u8 node;
|
||||
|
||||
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
|
||||
* policy events?)
|
||||
*/
|
||||
node = cbe_cpu_to_node(policy->cpu);
|
||||
|
||||
pr_debug("got notified, event=%lu, node=%u\n", event, node);
|
||||
|
||||
if (pmi_slow_mode_limit[node] != 0) {
|
||||
pr_debug("limiting node %d to slow mode %d\n",
|
||||
node, pmi_slow_mode_limit[node]);
|
||||
|
||||
cpufreq_verify_within_limits(policy, 0,
|
||||
|
||||
cbe_freqs[pmi_slow_mode_limit[node]].frequency);
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy) {
|
||||
pr_warn("cpufreq policy not found cpu%d\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
req = policy->driver_data;
|
||||
|
||||
static struct notifier_block pmi_notifier_block = {
|
||||
.notifier_call = pmi_notifier,
|
||||
};
|
||||
ret = dev_pm_qos_update_request(req,
|
||||
policy->freq_table[slow_mode].frequency);
|
||||
if (ret < 0)
|
||||
pr_warn("Failed to update freq constraint: %d\n", ret);
|
||||
else
|
||||
pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
static struct pmi_handler cbe_pmi_handler = {
|
||||
.type = PMI_TYPE_FREQ_CHANGE,
|
||||
.handle_pmi_message = cbe_cpufreq_handle_pmi,
|
||||
};
|
||||
|
||||
|
||||
|
||||
static int __init cbe_cpufreq_pmi_init(void)
|
||||
void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
|
||||
struct dev_pm_qos_request *req;
|
||||
int ret;
|
||||
|
||||
if (!cbe_cpufreq_has_pmi)
|
||||
return -ENODEV;
|
||||
return;
|
||||
|
||||
cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
return 0;
|
||||
ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
|
||||
DEV_PM_QOS_MAX_FREQUENCY,
|
||||
policy->freq_table[0].frequency);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to add freq constraint (%d)\n", ret);
|
||||
kfree(req);
|
||||
return;
|
||||
}
|
||||
|
||||
policy->driver_data = req;
|
||||
}
|
||||
device_initcall(cbe_cpufreq_pmi_init);
|
||||
EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
|
||||
|
||||
void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct dev_pm_qos_request *req = policy->driver_data;
|
||||
|
||||
if (cbe_cpufreq_has_pmi) {
|
||||
dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
|
||||
|
||||
void cbe_cpufreq_pmi_init(void)
|
||||
{
|
||||
if (!pmi_register_handler(&cbe_pmi_handler))
|
||||
cbe_cpufreq_has_pmi = true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
|
||||
|
||||
void cbe_cpufreq_pmi_exit(void)
|
||||
{
|
||||
pmi_unregister_handler(&cbe_pmi_handler);
|
||||
cbe_cpufreq_has_pmi = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#define LUT_VOLT GENMASK(11, 0)
|
||||
#define LUT_ROW_SIZE 32
|
||||
#define CLK_HW_DIV 2
|
||||
#define LUT_TURBO_IND 1
|
||||
|
||||
/* Register offsets */
|
||||
#define REG_ENABLE 0x0
|
||||
@ -34,9 +35,12 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
void __iomem *perf_state_reg = policy->driver_data;
|
||||
unsigned long freq = policy->freq_table[index].frequency;
|
||||
|
||||
writel_relaxed(index, perf_state_reg);
|
||||
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -63,6 +67,7 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
|
||||
{
|
||||
void __iomem *perf_state_reg = policy->driver_data;
|
||||
int index;
|
||||
unsigned long freq;
|
||||
|
||||
index = policy->cached_resolved_idx;
|
||||
if (index < 0)
|
||||
@ -70,16 +75,19 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
|
||||
|
||||
writel_relaxed(index, perf_state_reg);
|
||||
|
||||
return policy->freq_table[index].frequency;
|
||||
freq = policy->freq_table[index].frequency;
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
||||
struct cpufreq_policy *policy,
|
||||
void __iomem *base)
|
||||
{
|
||||
u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
|
||||
u32 data, src, lval, i, core_count, prev_freq = 0, freq;
|
||||
u32 volt;
|
||||
unsigned int max_cores = cpumask_weight(policy->cpus);
|
||||
struct cpufreq_frequency_table *table;
|
||||
|
||||
table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
|
||||
@ -102,12 +110,12 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
||||
else
|
||||
freq = cpu_hw_rate / 1000;
|
||||
|
||||
if (freq != prev_freq && core_count == max_cores) {
|
||||
if (freq != prev_freq && core_count != LUT_TURBO_IND) {
|
||||
table[i].frequency = freq;
|
||||
dev_pm_opp_add(cpu_dev, freq * 1000, volt);
|
||||
dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
|
||||
freq, core_count);
|
||||
} else {
|
||||
} else if (core_count == LUT_TURBO_IND) {
|
||||
table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
|
||||
@ -115,14 +123,14 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
||||
* Two of the same frequencies with the same core counts means
|
||||
* end of table
|
||||
*/
|
||||
if (i > 0 && prev_freq == freq && prev_cc == core_count) {
|
||||
if (i > 0 && prev_freq == freq) {
|
||||
struct cpufreq_frequency_table *prev = &table[i - 1];
|
||||
|
||||
/*
|
||||
* Only treat the last frequency that might be a boost
|
||||
* as the boost frequency
|
||||
*/
|
||||
if (prev_cc != max_cores) {
|
||||
if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
|
||||
prev->frequency = prev_freq;
|
||||
prev->flags = CPUFREQ_BOOST_FREQ;
|
||||
dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
|
||||
@ -131,7 +139,6 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
||||
break;
|
||||
}
|
||||
|
||||
prev_cc = core_count;
|
||||
prev_freq = freq;
|
||||
}
|
||||
|
||||
|
@ -1,249 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
|
||||
* the CPU frequency subset and voltage value of each OPP varies
|
||||
* based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
|
||||
* defines the voltage and frequency value based on the msm-id in SMEM
|
||||
* and speedbin blown in the efuse combination.
|
||||
* The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC
|
||||
* to provide the OPP framework with required information.
|
||||
* This is used to determine the voltage and frequency value for each OPP of
|
||||
* operating-points-v2 table when it is parsed by the OPP framework.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
|
||||
#define MSM_ID_SMEM 137
|
||||
|
||||
enum _msm_id {
|
||||
MSM8996V3 = 0xF6ul,
|
||||
APQ8096V3 = 0x123ul,
|
||||
MSM8996SG = 0x131ul,
|
||||
APQ8096SG = 0x138ul,
|
||||
};
|
||||
|
||||
enum _msm8996_version {
|
||||
MSM8996_V3,
|
||||
MSM8996_SG,
|
||||
NUM_OF_MSM8996_VERSIONS,
|
||||
};
|
||||
|
||||
static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
|
||||
|
||||
static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
|
||||
{
|
||||
size_t len;
|
||||
u32 *msm_id;
|
||||
enum _msm8996_version version;
|
||||
|
||||
msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
|
||||
if (IS_ERR(msm_id))
|
||||
return NUM_OF_MSM8996_VERSIONS;
|
||||
|
||||
/* The first 4 bytes are format, next to them is the actual msm-id */
|
||||
msm_id++;
|
||||
|
||||
switch ((enum _msm_id)*msm_id) {
|
||||
case MSM8996V3:
|
||||
case APQ8096V3:
|
||||
version = MSM8996_V3;
|
||||
break;
|
||||
case MSM8996SG:
|
||||
case APQ8096SG:
|
||||
version = MSM8996_SG;
|
||||
break;
|
||||
default:
|
||||
version = NUM_OF_MSM8996_VERSIONS;
|
||||
}
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct opp_table **opp_tables;
|
||||
enum _msm8996_version msm8996_version;
|
||||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
unsigned cpu;
|
||||
u8 *speedbin;
|
||||
u32 versions;
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
msm8996_version = qcom_cpufreq_kryo_get_msm_id();
|
||||
if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
|
||||
dev_err(cpu_dev, "Not Snapdragon 820/821!");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
|
||||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
|
||||
if (!ret) {
|
||||
of_node_put(np);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||
of_node_put(np);
|
||||
if (IS_ERR(speedbin_nvmem)) {
|
||||
if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
|
||||
PTR_ERR(speedbin_nvmem));
|
||||
return PTR_ERR(speedbin_nvmem);
|
||||
}
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
if (IS_ERR(speedbin))
|
||||
return PTR_ERR(speedbin);
|
||||
|
||||
switch (msm8996_version) {
|
||||
case MSM8996_V3:
|
||||
versions = 1 << (unsigned int)(*speedbin);
|
||||
break;
|
||||
case MSM8996_SG:
|
||||
versions = 1 << ((unsigned int)(*speedbin) + 4);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
kfree(speedbin);
|
||||
|
||||
opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
|
||||
if (!opp_tables)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (NULL == cpu_dev) {
|
||||
ret = -ENODEV;
|
||||
goto free_opp;
|
||||
}
|
||||
|
||||
opp_tables[cpu] = dev_pm_opp_set_supported_hw(cpu_dev,
|
||||
&versions, 1);
|
||||
if (IS_ERR(opp_tables[cpu])) {
|
||||
ret = PTR_ERR(opp_tables[cpu]);
|
||||
dev_err(cpu_dev, "Failed to set supported hardware\n");
|
||||
goto free_opp;
|
||||
}
|
||||
}
|
||||
|
||||
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
|
||||
NULL, 0);
|
||||
if (!IS_ERR(cpufreq_dt_pdev)) {
|
||||
platform_set_drvdata(pdev, opp_tables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = PTR_ERR(cpufreq_dt_pdev);
|
||||
dev_err(cpu_dev, "Failed to register platform device\n");
|
||||
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (IS_ERR_OR_NULL(opp_tables[cpu]))
|
||||
break;
|
||||
dev_pm_opp_put_supported_hw(opp_tables[cpu]);
|
||||
}
|
||||
kfree(opp_tables);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct opp_table **opp_tables = platform_get_drvdata(pdev);
|
||||
unsigned int cpu;
|
||||
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
dev_pm_opp_put_supported_hw(opp_tables[cpu]);
|
||||
|
||||
kfree(opp_tables);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver qcom_cpufreq_kryo_driver = {
|
||||
.probe = qcom_cpufreq_kryo_probe,
|
||||
.remove = qcom_cpufreq_kryo_remove,
|
||||
.driver = {
|
||||
.name = "qcom-cpufreq-kryo",
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
|
||||
{ .compatible = "qcom,apq8096", },
|
||||
{ .compatible = "qcom,msm8996", },
|
||||
{}
|
||||
};
|
||||
|
||||
/*
|
||||
* Since the driver depends on smem and nvmem drivers, which may
|
||||
* return EPROBE_DEFER, all the real activity is done in the probe,
|
||||
* which may be defered as well. The init here is only registering
|
||||
* the driver and the platform device.
|
||||
*/
|
||||
static int __init qcom_cpufreq_kryo_init(void)
|
||||
{
|
||||
struct device_node *np = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
match = of_match_node(qcom_cpufreq_kryo_match_list, np);
|
||||
of_node_put(np);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
ret = platform_driver_register(&qcom_cpufreq_kryo_driver);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
kryo_cpufreq_pdev = platform_device_register_simple(
|
||||
"qcom-cpufreq-kryo", -1, NULL, 0);
|
||||
ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
|
||||
if (0 == ret)
|
||||
return 0;
|
||||
|
||||
platform_driver_unregister(&qcom_cpufreq_kryo_driver);
|
||||
return ret;
|
||||
}
|
||||
module_init(qcom_cpufreq_kryo_init);
|
||||
|
||||
static void __exit qcom_cpufreq_kryo_exit(void)
|
||||
{
|
||||
platform_device_unregister(kryo_cpufreq_pdev);
|
||||
platform_driver_unregister(&qcom_cpufreq_kryo_driver);
|
||||
}
|
||||
module_exit(qcom_cpufreq_kryo_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
|
||||
MODULE_LICENSE("GPL v2");
|
352
drivers/cpufreq/qcom-cpufreq-nvmem.c
Normal file
352
drivers/cpufreq/qcom-cpufreq-nvmem.c
Normal file
@ -0,0 +1,352 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
|
||||
* the CPU frequency subset and voltage value of each OPP varies
|
||||
* based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
|
||||
* defines the voltage and frequency value based on the msm-id in SMEM
|
||||
* and speedbin blown in the efuse combination.
|
||||
* The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
|
||||
* to provide the OPP framework with required information.
|
||||
* This is used to determine the voltage and frequency value for each OPP of
|
||||
* operating-points-v2 table when it is parsed by the OPP framework.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
|
||||
#define MSM_ID_SMEM 137
|
||||
|
||||
enum _msm_id {
|
||||
MSM8996V3 = 0xF6ul,
|
||||
APQ8096V3 = 0x123ul,
|
||||
MSM8996SG = 0x131ul,
|
||||
APQ8096SG = 0x138ul,
|
||||
};
|
||||
|
||||
enum _msm8996_version {
|
||||
MSM8996_V3,
|
||||
MSM8996_SG,
|
||||
NUM_OF_MSM8996_VERSIONS,
|
||||
};
|
||||
|
||||
struct qcom_cpufreq_drv;
|
||||
|
||||
struct qcom_cpufreq_match_data {
|
||||
int (*get_version)(struct device *cpu_dev,
|
||||
struct nvmem_cell *speedbin_nvmem,
|
||||
struct qcom_cpufreq_drv *drv);
|
||||
const char **genpd_names;
|
||||
};
|
||||
|
||||
struct qcom_cpufreq_drv {
|
||||
struct opp_table **opp_tables;
|
||||
struct opp_table **genpd_opp_tables;
|
||||
u32 versions;
|
||||
const struct qcom_cpufreq_match_data *data;
|
||||
};
|
||||
|
||||
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
|
||||
|
||||
static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
|
||||
{
|
||||
size_t len;
|
||||
u32 *msm_id;
|
||||
enum _msm8996_version version;
|
||||
|
||||
msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
|
||||
if (IS_ERR(msm_id))
|
||||
return NUM_OF_MSM8996_VERSIONS;
|
||||
|
||||
/* The first 4 bytes are format, next to them is the actual msm-id */
|
||||
msm_id++;
|
||||
|
||||
switch ((enum _msm_id)*msm_id) {
|
||||
case MSM8996V3:
|
||||
case APQ8096V3:
|
||||
version = MSM8996_V3;
|
||||
break;
|
||||
case MSM8996SG:
|
||||
case APQ8096SG:
|
||||
version = MSM8996_SG;
|
||||
break;
|
||||
default:
|
||||
version = NUM_OF_MSM8996_VERSIONS;
|
||||
}
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
|
||||
struct nvmem_cell *speedbin_nvmem,
|
||||
struct qcom_cpufreq_drv *drv)
|
||||
{
|
||||
size_t len;
|
||||
u8 *speedbin;
|
||||
enum _msm8996_version msm8996_version;
|
||||
|
||||
msm8996_version = qcom_cpufreq_get_msm_id();
|
||||
if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
|
||||
dev_err(cpu_dev, "Not Snapdragon 820/821!");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
if (IS_ERR(speedbin))
|
||||
return PTR_ERR(speedbin);
|
||||
|
||||
switch (msm8996_version) {
|
||||
case MSM8996_V3:
|
||||
drv->versions = 1 << (unsigned int)(*speedbin);
|
||||
break;
|
||||
case MSM8996_SG:
|
||||
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(speedbin);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
||||
.get_version = qcom_cpufreq_kryo_name_version,
|
||||
};
|
||||
|
||||
static const char *qcs404_genpd_names[] = { "cpr", NULL };
|
||||
|
||||
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
|
||||
.genpd_names = qcs404_genpd_names,
|
||||
};
|
||||
|
||||
static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct qcom_cpufreq_drv *drv;
|
||||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
unsigned cpu;
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
|
||||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
|
||||
if (!ret) {
|
||||
of_node_put(np);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
|
||||
if (!drv)
|
||||
return -ENOMEM;
|
||||
|
||||
match = pdev->dev.platform_data;
|
||||
drv->data = match->data;
|
||||
if (!drv->data) {
|
||||
ret = -ENODEV;
|
||||
goto free_drv;
|
||||
}
|
||||
|
||||
if (drv->data->get_version) {
|
||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||
if (IS_ERR(speedbin_nvmem)) {
|
||||
if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev,
|
||||
"Could not get nvmem cell: %ld\n",
|
||||
PTR_ERR(speedbin_nvmem));
|
||||
ret = PTR_ERR(speedbin_nvmem);
|
||||
goto free_drv;
|
||||
}
|
||||
|
||||
ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv);
|
||||
if (ret) {
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
goto free_drv;
|
||||
}
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
}
|
||||
of_node_put(np);
|
||||
|
||||
drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables),
|
||||
GFP_KERNEL);
|
||||
if (!drv->opp_tables) {
|
||||
ret = -ENOMEM;
|
||||
goto free_drv;
|
||||
}
|
||||
|
||||
drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
|
||||
sizeof(*drv->genpd_opp_tables),
|
||||
GFP_KERNEL);
|
||||
if (!drv->genpd_opp_tables) {
|
||||
ret = -ENOMEM;
|
||||
goto free_opp;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (NULL == cpu_dev) {
|
||||
ret = -ENODEV;
|
||||
goto free_genpd_opp;
|
||||
}
|
||||
|
||||
if (drv->data->get_version) {
|
||||
drv->opp_tables[cpu] =
|
||||
dev_pm_opp_set_supported_hw(cpu_dev,
|
||||
&drv->versions, 1);
|
||||
if (IS_ERR(drv->opp_tables[cpu])) {
|
||||
ret = PTR_ERR(drv->opp_tables[cpu]);
|
||||
dev_err(cpu_dev,
|
||||
"Failed to set supported hardware\n");
|
||||
goto free_genpd_opp;
|
||||
}
|
||||
}
|
||||
|
||||
if (drv->data->genpd_names) {
|
||||
drv->genpd_opp_tables[cpu] =
|
||||
dev_pm_opp_attach_genpd(cpu_dev,
|
||||
drv->data->genpd_names,
|
||||
NULL);
|
||||
if (IS_ERR(drv->genpd_opp_tables[cpu])) {
|
||||
ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev,
|
||||
"Could not attach to pm_domain: %d\n",
|
||||
ret);
|
||||
goto free_genpd_opp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
|
||||
NULL, 0);
|
||||
if (!IS_ERR(cpufreq_dt_pdev)) {
|
||||
platform_set_drvdata(pdev, drv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = PTR_ERR(cpufreq_dt_pdev);
|
||||
dev_err(cpu_dev, "Failed to register platform device\n");
|
||||
|
||||
free_genpd_opp:
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
|
||||
break;
|
||||
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
|
||||
}
|
||||
kfree(drv->genpd_opp_tables);
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (IS_ERR_OR_NULL(drv->opp_tables[cpu]))
|
||||
break;
|
||||
dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
|
||||
}
|
||||
kfree(drv->opp_tables);
|
||||
free_drv:
|
||||
kfree(drv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
|
||||
unsigned int cpu;
|
||||
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (drv->opp_tables[cpu])
|
||||
dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
|
||||
if (drv->genpd_opp_tables[cpu])
|
||||
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
|
||||
}
|
||||
|
||||
kfree(drv->opp_tables);
|
||||
kfree(drv->genpd_opp_tables);
|
||||
kfree(drv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver qcom_cpufreq_driver = {
|
||||
.probe = qcom_cpufreq_probe,
|
||||
.remove = qcom_cpufreq_remove,
|
||||
.driver = {
|
||||
.name = "qcom-cpufreq-nvmem",
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
|
||||
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
|
||||
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
|
||||
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
|
||||
{},
|
||||
};
|
||||
|
||||
/*
|
||||
* Since the driver depends on smem and nvmem drivers, which may
|
||||
* return EPROBE_DEFER, all the real activity is done in the probe,
|
||||
* which may be defered as well. The init here is only registering
|
||||
* the driver and the platform device.
|
||||
*/
|
||||
static int __init qcom_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *np = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
match = of_match_node(qcom_cpufreq_match_list, np);
|
||||
of_node_put(np);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
ret = platform_driver_register(&qcom_cpufreq_driver);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
cpufreq_pdev = platform_device_register_data(NULL, "qcom-cpufreq-nvmem",
|
||||
-1, match, sizeof(*match));
|
||||
ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
|
||||
if (0 == ret)
|
||||
return 0;
|
||||
|
||||
platform_driver_unregister(&qcom_cpufreq_driver);
|
||||
return ret;
|
||||
}
|
||||
module_init(qcom_cpufreq_init);
|
||||
|
||||
static void __exit qcom_cpufreq_exit(void)
|
||||
{
|
||||
platform_device_unregister(cpufreq_pdev);
|
||||
platform_driver_unregister(&qcom_cpufreq_driver);
|
||||
}
|
||||
module_exit(qcom_cpufreq_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. CPUfreq driver");
|
||||
MODULE_LICENSE("GPL v2");
|
226
drivers/cpufreq/sun50i-cpufreq-nvmem.c
Normal file
226
drivers/cpufreq/sun50i-cpufreq-nvmem.c
Normal file
@ -0,0 +1,226 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Allwinner CPUFreq nvmem based driver
|
||||
*
|
||||
* The sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
|
||||
* provide the OPP framework with required information.
|
||||
*
|
||||
* Copyright (C) 2019 Yangtao Li <tiny.windzz@gmail.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define MAX_NAME_LEN 7
|
||||
|
||||
#define NVMEM_MASK 0x7
|
||||
#define NVMEM_SHIFT 5
|
||||
|
||||
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
|
||||
|
||||
/**
|
||||
* sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
|
||||
* @versions: Set to the value parsed from efuse
|
||||
*
|
||||
* Returns 0 if success.
|
||||
*/
|
||||
static int sun50i_cpufreq_get_efuse(u32 *versions)
|
||||
{
|
||||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
u32 *speedbin, efuse_value;
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
|
||||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
ret = of_device_is_compatible(np,
|
||||
"allwinner,sun50i-h6-operating-points");
|
||||
if (!ret) {
|
||||
of_node_put(np);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||
of_node_put(np);
|
||||
if (IS_ERR(speedbin_nvmem)) {
|
||||
if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
|
||||
pr_err("Could not get nvmem cell: %ld\n",
|
||||
PTR_ERR(speedbin_nvmem));
|
||||
return PTR_ERR(speedbin_nvmem);
|
||||
}
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
if (IS_ERR(speedbin))
|
||||
return PTR_ERR(speedbin);
|
||||
|
||||
efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
|
||||
switch (efuse_value) {
|
||||
case 0b0001:
|
||||
*versions = 1;
|
||||
break;
|
||||
case 0b0011:
|
||||
*versions = 2;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* For other situations, we treat it as bin0.
|
||||
* This vf table can be run for any good cpu.
|
||||
*/
|
||||
*versions = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(speedbin);
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct opp_table **opp_tables;
|
||||
char name[MAX_NAME_LEN];
|
||||
unsigned int cpu;
|
||||
u32 speed = 0;
|
||||
int ret;
|
||||
|
||||
opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
|
||||
GFP_KERNEL);
|
||||
if (!opp_tables)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = sun50i_cpufreq_get_efuse(&speed);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
snprintf(name, MAX_NAME_LEN, "speed%d", speed);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct device *cpu_dev = get_cpu_device(cpu);
|
||||
|
||||
if (!cpu_dev) {
|
||||
ret = -ENODEV;
|
||||
goto free_opp;
|
||||
}
|
||||
|
||||
opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
|
||||
if (IS_ERR(opp_tables[cpu])) {
|
||||
ret = PTR_ERR(opp_tables[cpu]);
|
||||
pr_err("Failed to set prop name\n");
|
||||
goto free_opp;
|
||||
}
|
||||
}
|
||||
|
||||
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
|
||||
NULL, 0);
|
||||
if (!IS_ERR(cpufreq_dt_pdev)) {
|
||||
platform_set_drvdata(pdev, opp_tables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = PTR_ERR(cpufreq_dt_pdev);
|
||||
pr_err("Failed to register platform device\n");
|
||||
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (IS_ERR_OR_NULL(opp_tables[cpu]))
|
||||
break;
|
||||
dev_pm_opp_put_prop_name(opp_tables[cpu]);
|
||||
}
|
||||
kfree(opp_tables);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct opp_table **opp_tables = platform_get_drvdata(pdev);
|
||||
unsigned int cpu;
|
||||
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
dev_pm_opp_put_prop_name(opp_tables[cpu]);
|
||||
|
||||
kfree(opp_tables);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sun50i_cpufreq_driver = {
|
||||
.probe = sun50i_cpufreq_nvmem_probe,
|
||||
.remove = sun50i_cpufreq_nvmem_remove,
|
||||
.driver = {
|
||||
.name = "sun50i-cpufreq-nvmem",
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id sun50i_cpufreq_match_list[] = {
|
||||
{ .compatible = "allwinner,sun50i-h6" },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct of_device_id *sun50i_cpufreq_match_node(void)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct device_node *np;
|
||||
|
||||
np = of_find_node_by_path("/");
|
||||
match = of_match_node(sun50i_cpufreq_match_list, np);
|
||||
of_node_put(np);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
|
||||
* all the real activity is done in the probe, which may be defered as well.
|
||||
* The init here is only registering the driver and the platform device.
|
||||
*/
|
||||
static int __init sun50i_cpufreq_init(void)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
match = sun50i_cpufreq_match_node();
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
ret = platform_driver_register(&sun50i_cpufreq_driver);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
sun50i_cpufreq_pdev =
|
||||
platform_device_register_simple("sun50i-cpufreq-nvmem",
|
||||
-1, NULL, 0);
|
||||
ret = PTR_ERR_OR_ZERO(sun50i_cpufreq_pdev);
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
|
||||
platform_driver_unregister(&sun50i_cpufreq_driver);
|
||||
return ret;
|
||||
}
|
||||
module_init(sun50i_cpufreq_init);
|
||||
|
||||
static void __exit sun50i_cpufreq_exit(void)
|
||||
{
|
||||
platform_device_unregister(sun50i_cpufreq_pdev);
|
||||
platform_driver_unregister(&sun50i_cpufreq_driver);
|
||||
}
|
||||
module_exit(sun50i_cpufreq_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Sun50i-h6 cpufreq driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -77,6 +77,7 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
|
||||
case DRA7_EFUSE_HAS_ALL_MPU_OPP:
|
||||
case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
|
||||
calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
|
||||
/* Fall through */
|
||||
case DRA7_EFUSE_HAS_OD_MPU_OPP:
|
||||
calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
|
||||
}
|
||||
|
@ -3,9 +3,11 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
|
||||
#include <asm/prom.h>
|
||||
@ -16,36 +18,24 @@
|
||||
|
||||
static int clamped;
|
||||
static struct wf_control *clamp_control;
|
||||
|
||||
static int clamp_notifier_call(struct notifier_block *self,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *p = data;
|
||||
unsigned long max_freq;
|
||||
|
||||
if (event != CPUFREQ_ADJUST)
|
||||
return 0;
|
||||
|
||||
max_freq = clamped ? (p->cpuinfo.min_freq) : (p->cpuinfo.max_freq);
|
||||
cpufreq_verify_within_limits(p, 0, max_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block clamp_notifier = {
|
||||
.notifier_call = clamp_notifier_call,
|
||||
};
|
||||
static struct dev_pm_qos_request qos_req;
|
||||
static unsigned int min_freq, max_freq;
|
||||
|
||||
static int clamp_set(struct wf_control *ct, s32 value)
|
||||
{
|
||||
if (value)
|
||||
unsigned int freq;
|
||||
|
||||
if (value) {
|
||||
freq = min_freq;
|
||||
printk(KERN_INFO "windfarm: Clamping CPU frequency to "
|
||||
"minimum !\n");
|
||||
else
|
||||
} else {
|
||||
freq = max_freq;
|
||||
printk(KERN_INFO "windfarm: CPU frequency unclamped !\n");
|
||||
}
|
||||
clamped = value;
|
||||
cpufreq_update_policy(0);
|
||||
return 0;
|
||||
|
||||
return dev_pm_qos_update_request(&qos_req, freq);
|
||||
}
|
||||
|
||||
static int clamp_get(struct wf_control *ct, s32 *value)
|
||||
@ -74,27 +64,60 @@ static const struct wf_control_ops clamp_ops = {
|
||||
|
||||
static int __init wf_cpufreq_clamp_init(void)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
struct wf_control *clamp;
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
policy = cpufreq_cpu_get(0);
|
||||
if (!policy) {
|
||||
pr_warn("%s: cpufreq policy not found cpu0\n", __func__);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
min_freq = policy->cpuinfo.min_freq;
|
||||
max_freq = policy->cpuinfo.max_freq;
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
dev = get_cpu_device(0);
|
||||
if (unlikely(!dev)) {
|
||||
pr_warn("%s: No cpu device for cpu0\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
|
||||
if (clamp == NULL)
|
||||
return -ENOMEM;
|
||||
cpufreq_register_notifier(&clamp_notifier, CPUFREQ_POLICY_NOTIFIER);
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
|
||||
max_freq);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
|
||||
ret);
|
||||
goto free;
|
||||
}
|
||||
|
||||
clamp->ops = &clamp_ops;
|
||||
clamp->name = "cpufreq-clamp";
|
||||
if (wf_register_control(clamp))
|
||||
ret = wf_register_control(clamp);
|
||||
if (ret)
|
||||
goto fail;
|
||||
clamp_control = clamp;
|
||||
return 0;
|
||||
fail:
|
||||
dev_pm_qos_remove_request(&qos_req);
|
||||
|
||||
free:
|
||||
kfree(clamp);
|
||||
return -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit wf_cpufreq_clamp_exit(void)
|
||||
{
|
||||
if (clamp_control)
|
||||
if (clamp_control) {
|
||||
wf_unregister_control(clamp_control);
|
||||
dev_pm_qos_remove_request(&qos_req);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -401,6 +401,54 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
|
||||
|
||||
/**
|
||||
* dev_pm_opp_find_level_exact() - search for an exact level
|
||||
* @dev: device for which we do this operation
|
||||
* @level: level to search for
|
||||
*
|
||||
* Return: Searches for exact match in the opp table and returns pointer to the
|
||||
* matching opp if found, else returns ERR_PTR in case of error and should
|
||||
* be handled using IS_ERR. Error return values can be:
|
||||
* EINVAL: for bad pointer
|
||||
* ERANGE: no match found for search
|
||||
* ENODEV: if device not found in list of registered devices
|
||||
*
|
||||
* The callers are required to call dev_pm_opp_put() for the returned OPP after
|
||||
* use.
|
||||
*/
|
||||
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
|
||||
unsigned int level)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||
|
||||
opp_table = _find_opp_table(dev);
|
||||
if (IS_ERR(opp_table)) {
|
||||
int r = PTR_ERR(opp_table);
|
||||
|
||||
dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
mutex_lock(&opp_table->lock);
|
||||
|
||||
list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
|
||||
if (temp_opp->level == level) {
|
||||
opp = temp_opp;
|
||||
|
||||
/* Increment the reference count of OPP */
|
||||
dev_pm_opp_get(opp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&opp_table->lock);
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
|
||||
return opp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
|
||||
|
||||
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
|
||||
unsigned long *freq)
|
||||
{
|
||||
@ -1771,6 +1819,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
|
||||
* dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
|
||||
* @dev: Consumer device for which the genpd is getting attached.
|
||||
* @names: Null terminated array of pointers containing names of genpd to attach.
|
||||
* @virt_devs: Pointer to return the array of virtual devices.
|
||||
*
|
||||
* Multiple generic power domains for a device are supported with the help of
|
||||
* virtual genpd devices, which are created for each consumer device - genpd
|
||||
@ -1784,12 +1833,16 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
|
||||
*
|
||||
* This helper needs to be called once with a list of all genpd to attach.
|
||||
* Otherwise the original device structure will be used instead by the OPP core.
|
||||
*
|
||||
* The order of entries in the names array must match the order in which
|
||||
* "required-opps" are added in DT.
|
||||
*/
|
||||
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names)
|
||||
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
|
||||
const char **names, struct device ***virt_devs)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
struct device *virt_dev;
|
||||
int index, ret = -EINVAL;
|
||||
int index = 0, ret = -EINVAL;
|
||||
const char **name = names;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
@ -1815,14 +1868,6 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
|
||||
goto unlock;
|
||||
|
||||
while (*name) {
|
||||
index = of_property_match_string(dev->of_node,
|
||||
"power-domain-names", *name);
|
||||
if (index < 0) {
|
||||
dev_err(dev, "Failed to find power domain: %s (%d)\n",
|
||||
*name, index);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (index >= opp_table->required_opp_count) {
|
||||
dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
|
||||
*name, opp_table->required_opp_count, index);
|
||||
@ -1843,9 +1888,12 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
|
||||
}
|
||||
|
||||
opp_table->genpd_virt_devs[index] = virt_dev;
|
||||
index++;
|
||||
name++;
|
||||
}
|
||||
|
||||
if (virt_devs)
|
||||
*virt_devs = opp_table->genpd_virt_devs;
|
||||
mutex_unlock(&opp_table->genpd_virt_dev_lock);
|
||||
|
||||
return opp_table;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpu_cooling.h>
|
||||
@ -66,8 +67,6 @@ struct time_in_idle {
|
||||
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
|
||||
* @cpufreq_state: integer value representing the current state of cpufreq
|
||||
* cooling devices.
|
||||
* @clipped_freq: integer value representing the absolute value of the clipped
|
||||
* frequency.
|
||||
* @max_level: maximum cooling level. One less than total number of valid
|
||||
* cpufreq frequencies.
|
||||
* @freq_table: Freq table in descending order of frequencies
|
||||
@ -84,12 +83,12 @@ struct cpufreq_cooling_device {
|
||||
int id;
|
||||
u32 last_load;
|
||||
unsigned int cpufreq_state;
|
||||
unsigned int clipped_freq;
|
||||
unsigned int max_level;
|
||||
struct freq_table *freq_table; /* In descending order */
|
||||
struct cpufreq_policy *policy;
|
||||
struct list_head node;
|
||||
struct time_in_idle *idle_time;
|
||||
struct dev_pm_qos_request qos_req;
|
||||
};
|
||||
|
||||
static DEFINE_IDA(cpufreq_ida);
|
||||
@ -118,59 +117,6 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
|
||||
return level - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
|
||||
* @nb: struct notifier_block * with callback info.
|
||||
* @event: value showing cpufreq event for which this function invoked.
|
||||
* @data: callback-specific data
|
||||
*
|
||||
* Callback to hijack the notification on cpufreq policy transition.
|
||||
* Every time there is a change in policy, we will intercept and
|
||||
* update the cpufreq policy with thermal constraints.
|
||||
*
|
||||
* Return: 0 (success)
|
||||
*/
|
||||
static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
unsigned long clipped_freq;
|
||||
struct cpufreq_cooling_device *cpufreq_cdev;
|
||||
|
||||
if (event != CPUFREQ_ADJUST)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mutex_lock(&cooling_list_lock);
|
||||
list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
|
||||
/*
|
||||
* A new copy of the policy is sent to the notifier and can't
|
||||
* compare that directly.
|
||||
*/
|
||||
if (policy->cpu != cpufreq_cdev->policy->cpu)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* policy->max is the maximum allowed frequency defined by user
|
||||
* and clipped_freq is the maximum that thermal constraints
|
||||
* allow.
|
||||
*
|
||||
* If clipped_freq is lower than policy->max, then we need to
|
||||
* readjust policy->max.
|
||||
*
|
||||
* But, if clipped_freq is greater than policy->max, we don't
|
||||
* need to do anything.
|
||||
*/
|
||||
clipped_freq = cpufreq_cdev->clipped_freq;
|
||||
|
||||
if (policy->max > clipped_freq)
|
||||
cpufreq_verify_within_limits(policy, 0, clipped_freq);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* update_freq_table() - Update the freq table with power numbers
|
||||
* @cpufreq_cdev: the cpufreq cooling device in which to update the table
|
||||
@ -374,7 +320,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long state)
|
||||
{
|
||||
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
||||
unsigned int clip_freq;
|
||||
|
||||
/* Request state should be less than max_level */
|
||||
if (WARN_ON(state > cpufreq_cdev->max_level))
|
||||
@ -384,13 +329,10 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
if (cpufreq_cdev->cpufreq_state == state)
|
||||
return 0;
|
||||
|
||||
clip_freq = cpufreq_cdev->freq_table[state].frequency;
|
||||
cpufreq_cdev->cpufreq_state = state;
|
||||
cpufreq_cdev->clipped_freq = clip_freq;
|
||||
|
||||
cpufreq_update_policy(cpufreq_cdev->policy->cpu);
|
||||
|
||||
return 0;
|
||||
return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
|
||||
cpufreq_cdev->freq_table[state].frequency);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -554,11 +496,6 @@ static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
|
||||
.power2state = cpufreq_power2state,
|
||||
};
|
||||
|
||||
/* Notifier for cpufreq policy change */
|
||||
static struct notifier_block thermal_cpufreq_notifier_block = {
|
||||
.notifier_call = cpufreq_thermal_notifier,
|
||||
};
|
||||
|
||||
static unsigned int find_next_max(struct cpufreq_frequency_table *table,
|
||||
unsigned int prev_max)
|
||||
{
|
||||
@ -596,9 +533,16 @@ __cpufreq_cooling_register(struct device_node *np,
|
||||
struct cpufreq_cooling_device *cpufreq_cdev;
|
||||
char dev_name[THERMAL_NAME_LENGTH];
|
||||
unsigned int freq, i, num_cpus;
|
||||
struct device *dev;
|
||||
int ret;
|
||||
struct thermal_cooling_device_ops *cooling_ops;
|
||||
bool first;
|
||||
|
||||
dev = get_cpu_device(policy->cpu);
|
||||
if (unlikely(!dev)) {
|
||||
pr_warn("No cpu device for cpu %d\n", policy->cpu);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
|
||||
if (IS_ERR_OR_NULL(policy)) {
|
||||
pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
|
||||
@ -671,25 +615,29 @@ __cpufreq_cooling_register(struct device_node *np,
|
||||
cooling_ops = &cpufreq_cooling_ops;
|
||||
}
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
|
||||
DEV_PM_QOS_MAX_FREQUENCY,
|
||||
cpufreq_cdev->freq_table[0].frequency);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
|
||||
ret);
|
||||
cdev = ERR_PTR(ret);
|
||||
goto remove_ida;
|
||||
}
|
||||
|
||||
cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
|
||||
cooling_ops);
|
||||
if (IS_ERR(cdev))
|
||||
goto remove_ida;
|
||||
|
||||
cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
|
||||
goto remove_qos_req;
|
||||
|
||||
mutex_lock(&cooling_list_lock);
|
||||
/* Register the notifier for first cpufreq cooling device */
|
||||
first = list_empty(&cpufreq_cdev_list);
|
||||
list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
if (first)
|
||||
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
|
||||
return cdev;
|
||||
|
||||
remove_qos_req:
|
||||
dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
|
||||
remove_ida:
|
||||
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
|
||||
free_table:
|
||||
@ -777,7 +725,6 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
|
||||
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
||||
{
|
||||
struct cpufreq_cooling_device *cpufreq_cdev;
|
||||
bool last;
|
||||
|
||||
if (!cdev)
|
||||
return;
|
||||
@ -786,15 +733,10 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
||||
|
||||
mutex_lock(&cooling_list_lock);
|
||||
list_del(&cpufreq_cdev->node);
|
||||
/* Unregister the notifier for the last cpufreq cooling device */
|
||||
last = list_empty(&cpufreq_cdev_list);
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
if (last)
|
||||
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
|
||||
thermal_cooling_device_unregister(cdev);
|
||||
dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
|
||||
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
|
||||
kfree(cpufreq_cdev->idle_time);
|
||||
kfree(cpufreq_cdev->freq_table);
|
||||
|
@ -1678,24 +1678,6 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
|
||||
{
|
||||
struct pxafb_info *fbi = TO_INF(nb, freq_policy);
|
||||
struct fb_var_screeninfo *var = &fbi->fb.var;
|
||||
struct cpufreq_policy *policy = data;
|
||||
|
||||
switch (val) {
|
||||
case CPUFREQ_ADJUST:
|
||||
pr_debug("min dma period: %d ps, "
|
||||
"new clock %d kHz\n", pxafb_display_dma_period(var),
|
||||
policy->max);
|
||||
/* TODO: fill in min/max values */
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
@ -2400,11 +2382,8 @@ static int pxafb_probe(struct platform_device *dev)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
fbi->freq_transition.notifier_call = pxafb_freq_transition;
|
||||
fbi->freq_policy.notifier_call = pxafb_freq_policy;
|
||||
cpufreq_register_notifier(&fbi->freq_transition,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
cpufreq_register_notifier(&fbi->freq_policy,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -162,7 +162,6 @@ struct pxafb_info {
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct notifier_block freq_transition;
|
||||
struct notifier_block freq_policy;
|
||||
#endif
|
||||
|
||||
struct regulator *lcd_supply;
|
||||
|
@ -1005,31 +1005,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct sa1100fb_info *fbi = TO_INF(nb, freq_policy);
|
||||
struct cpufreq_policy *policy = data;
|
||||
|
||||
switch (val) {
|
||||
case CPUFREQ_ADJUST:
|
||||
dev_dbg(fbi->dev, "min dma period: %d ps, "
|
||||
"new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
|
||||
policy->max);
|
||||
/* todo: fill in min/max values */
|
||||
break;
|
||||
case CPUFREQ_NOTIFY:
|
||||
do {} while(0);
|
||||
/* todo: panic if min/max values aren't fulfilled
|
||||
* [can't really happen unless there's a bug in the
|
||||
* CPU policy verififcation process *
|
||||
*/
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
@ -1242,9 +1217,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
|
||||
fbi->freq_policy.notifier_call = sa1100fb_freq_policy;
|
||||
cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
|
||||
cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER);
|
||||
#endif
|
||||
|
||||
/* This driver cannot be unloaded at the moment */
|
||||
|
@ -64,7 +64,6 @@ struct sa1100fb_info {
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct notifier_block freq_transition;
|
||||
struct notifier_block freq_policy;
|
||||
#endif
|
||||
|
||||
const struct sa1100fb_mach_info *inf;
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/thermal.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
@ -230,6 +232,8 @@ struct acpi_processor {
|
||||
struct acpi_processor_limit limit;
|
||||
struct thermal_cooling_device *cdev;
|
||||
struct device *dev; /* Processor device. */
|
||||
struct dev_pm_qos_request perflib_req;
|
||||
struct dev_pm_qos_request thermal_req;
|
||||
};
|
||||
|
||||
struct acpi_processor_errata {
|
||||
@ -296,16 +300,22 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
|
||||
/* in processor_perflib.c */
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
void acpi_processor_ppc_init(void);
|
||||
void acpi_processor_ppc_exit(void);
|
||||
extern bool acpi_processor_cpufreq_init;
|
||||
void acpi_processor_ignore_ppc_init(void);
|
||||
void acpi_processor_ppc_init(int cpu);
|
||||
void acpi_processor_ppc_exit(int cpu);
|
||||
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
|
||||
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
|
||||
#else
|
||||
static inline void acpi_processor_ppc_init(void)
|
||||
static inline void acpi_processor_ignore_ppc_init(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
static inline void acpi_processor_ppc_exit(void)
|
||||
static inline void acpi_processor_ppc_init(int cpu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
static inline void acpi_processor_ppc_exit(int cpu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -421,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
|
||||
int acpi_processor_get_limit_info(struct acpi_processor *pr);
|
||||
extern const struct thermal_cooling_device_ops processor_cooling_ops;
|
||||
#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
|
||||
void acpi_thermal_cpufreq_init(void);
|
||||
void acpi_thermal_cpufreq_exit(void);
|
||||
void acpi_thermal_cpufreq_init(int cpu);
|
||||
void acpi_thermal_cpufreq_exit(int cpu);
|
||||
#else
|
||||
static inline void acpi_thermal_cpufreq_init(void)
|
||||
static inline void acpi_thermal_cpufreq_init(int cpu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
static inline void acpi_thermal_cpufreq_exit(void)
|
||||
static inline void acpi_thermal_cpufreq_exit(int cpu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
@ -456,8 +456,8 @@ static inline void cpufreq_resume(void) {}
|
||||
#define CPUFREQ_POSTCHANGE (1)
|
||||
|
||||
/* Policy Notifiers */
|
||||
#define CPUFREQ_ADJUST (0)
|
||||
#define CPUFREQ_NOTIFY (1)
|
||||
#define CPUFREQ_CREATE_POLICY (0)
|
||||
#define CPUFREQ_REMOVE_POLICY (1)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
|
||||
|
@ -96,6 +96,8 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
|
||||
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
||||
unsigned long freq,
|
||||
bool available);
|
||||
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
|
||||
unsigned int level);
|
||||
|
||||
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
|
||||
unsigned long *freq);
|
||||
@ -128,7 +130,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
|
||||
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
|
||||
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
|
||||
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
|
||||
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names);
|
||||
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
|
||||
void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
|
||||
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
|
||||
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
|
||||
@ -200,6 +202,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
|
||||
unsigned int level)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
|
||||
unsigned long *freq)
|
||||
{
|
||||
@ -292,7 +300,7 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const
|
||||
|
||||
static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
|
||||
|
||||
static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names)
|
||||
static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
@ -117,6 +117,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
|
||||
unsigned int next_freq)
|
||||
{
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
int cpu;
|
||||
|
||||
if (!sugov_update_next_freq(sg_policy, time, next_freq))
|
||||
return;
|
||||
@ -126,7 +127,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
|
||||
return;
|
||||
|
||||
policy->cur = next_freq;
|
||||
trace_cpu_frequency(next_freq, smp_processor_id());
|
||||
|
||||
if (trace_cpu_frequency_enabled()) {
|
||||
for_each_cpu(cpu, policy->cpus)
|
||||
trace_cpu_frequency(next_freq, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
|
||||
|
Loading…
Reference in New Issue
Block a user