Merge branch 'pm-cpufreq'

* pm-cpufreq: (60 commits)
  cpufreq: pmac32-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: pmac64-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: maple-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: arm_big_little: remove device tree parsing for cpu nodes
  cpufreq: kirkwood-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: spear-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: highbank-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: cpufreq-cpu0: remove device tree parsing for cpu nodes
  cpufreq: imx6q-cpufreq: remove device tree parsing for cpu nodes
  drivers/bus: arm-cci: avoid parsing DT for cpu device nodes
  ARM: mvebu: remove device tree parsing for cpu nodes
  ARM: topology: remove hwid/MPIDR dependency from cpu_capacity
  of/device: add helper to get cpu device node from logical cpu index
  driver/core: cpu: initialize of_node in cpu's device struture
  ARM: DT/kernel: define ARM specific arch_match_cpu_phys_id
  of: move of_get_cpu_node implementation to DT core library
  powerpc: refactor of_get_cpu_node to support other architectures
  openrisc: remove undefined of_get_cpu_node declaration
  microblaze: remove undefined of_get_cpu_node declaration
  cpufreq: fix bad unlock balance on !CONFIG_SMP
  ...
This commit is contained in:
Rafael J. Wysocki 2013-08-27 01:44:40 +02:00
commit 7a330a5416
77 changed files with 965 additions and 1168 deletions

View File

@ -50,8 +50,6 @@ What shall this struct cpufreq_driver contain?
cpufreq_driver.name - The name of this driver. cpufreq_driver.name - The name of this driver.
cpufreq_driver.owner - THIS_MODULE;
cpufreq_driver.init - A pointer to the per-CPU initialization cpufreq_driver.init - A pointer to the per-CPU initialization
function. function.

View File

@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void)
} }
} }
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
}
/** /**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob * @dt_phys: physical address of dt blob

View File

@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = {
{NULL, }, {NULL, },
}; };
struct cpu_capacity { unsigned long *__cpu_capacity;
unsigned long hwid; #define cpu_capacity(cpu) __cpu_capacity[cpu]
unsigned long capacity;
};
struct cpu_capacity *cpu_capacity;
unsigned long middle_capacity = 1; unsigned long middle_capacity = 1;
@ -100,15 +96,19 @@ static void __init parse_dt_topology(void)
unsigned long capacity = 0; unsigned long capacity = 0;
int alloc_size, cpu = 0; int alloc_size, cpu = 0;
alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
while ((cn = of_find_node_by_type(cn, "cpu"))) { for_each_possible_cpu(cpu) {
const u32 *rate, *reg; const u32 *rate;
int len; int len;
if (cpu >= num_possible_cpus()) /* too early to use cpu->of_node */
break; cn = of_get_cpu_node(cpu, NULL);
if (!cn) {
pr_err("missing device node for CPU %d\n", cpu);
continue;
}
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible)) if (of_device_is_compatible(cn, cpu_eff->compatible))
@ -124,12 +124,6 @@ static void __init parse_dt_topology(void)
continue; continue;
} }
reg = of_get_property(cn, "reg", &len);
if (!reg || len != 4) {
pr_err("%s missing reg property\n", cn->full_name);
continue;
}
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
/* Save min capacity of the system */ /* Save min capacity of the system */
@ -140,13 +134,9 @@ static void __init parse_dt_topology(void)
if (capacity > max_capacity) if (capacity > max_capacity)
max_capacity = capacity; max_capacity = capacity;
cpu_capacity[cpu].capacity = capacity; cpu_capacity(cpu) = capacity;
cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
} }
if (cpu < num_possible_cpus())
cpu_capacity[cpu].hwid = (unsigned long)(-1);
/* If min and max capacities are equals, we bypass the update of the /* If min and max capacities are equals, we bypass the update of the
* cpu_scale because all CPUs have the same capacity. Otherwise, we * cpu_scale because all CPUs have the same capacity. Otherwise, we
* compute a middle_capacity factor that will ensure that the capacity * compute a middle_capacity factor that will ensure that the capacity
@ -154,9 +144,7 @@ static void __init parse_dt_topology(void)
* SCHED_POWER_SCALE, which is the default value, but with the * SCHED_POWER_SCALE, which is the default value, but with the
* constraint explained near table_efficiency[]. * constraint explained near table_efficiency[].
*/ */
if (min_capacity == max_capacity) if (4*max_capacity < (3*(max_capacity + min_capacity)))
cpu_capacity[0].hwid = (unsigned long)(-1);
else if (4*max_capacity < (3*(max_capacity + min_capacity)))
middle_capacity = (min_capacity + max_capacity) middle_capacity = (min_capacity + max_capacity)
>> (SCHED_POWER_SHIFT+1); >> (SCHED_POWER_SHIFT+1);
else else
@ -170,23 +158,12 @@ static void __init parse_dt_topology(void)
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
* function returns directly for SMP system. * function returns directly for SMP system.
*/ */
void update_cpu_power(unsigned int cpu, unsigned long hwid) void update_cpu_power(unsigned int cpu)
{ {
unsigned int idx = 0; if (!cpu_capacity(cpu))
/* look for the cpu's hwid in the cpu capacity table */
for (idx = 0; idx < num_possible_cpus(); idx++) {
if (cpu_capacity[idx].hwid == hwid)
break;
if (cpu_capacity[idx].hwid == -1)
return;
}
if (idx == num_possible_cpus())
return; return;
set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
printk(KERN_INFO "CPU%u: update cpu_power %lu\n", printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
cpu, arch_scale_freq_power(NULL, cpu)); cpu, arch_scale_freq_power(NULL, cpu));
@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid)
#else #else
static inline void parse_dt_topology(void) {} static inline void parse_dt_topology(void) {}
static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} static inline void update_cpu_power(unsigned int cpuid) {}
#endif #endif
/* /*
@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid); update_siblings_masks(cpuid);
update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); update_cpu_power(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id, cpuid, cpu_topology[cpuid].thread_id,

View File

@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev)
{ {
struct device_node *np; struct device_node *np;
np = of_find_node_by_path("/cpus/cpu@0"); np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
pr_warn("failed to find cpu0 node\n"); pr_warn("failed to find cpu0 node\n");
return; return;
} }
cpu_dev->of_node = np;
if (of_init_opp_table(cpu_dev)) { if (of_init_opp_table(cpu_dev)) {
pr_warn("failed to init OPP table\n"); pr_warn("failed to init OPP table\n");
goto put_node; goto put_node;

View File

@ -29,45 +29,40 @@
#include "pmsu.h" #include "pmsu.h"
#include "coherency.h" #include "coherency.h"
static struct clk *__init get_cpu_clk(int cpu)
{
struct clk *cpu_clk;
struct device_node *np = of_get_cpu_node(cpu, NULL);
if (WARN(!np, "missing cpu node\n"))
return NULL;
cpu_clk = of_clk_get(np, 0);
if (WARN_ON(IS_ERR(cpu_clk)))
return NULL;
return cpu_clk;
}
void __init set_secondary_cpus_clock(void) void __init set_secondary_cpus_clock(void)
{ {
int thiscpu; int thiscpu, cpu;
unsigned long rate; unsigned long rate;
struct clk *cpu_clk = NULL; struct clk *cpu_clk;
struct device_node *np = NULL;
thiscpu = smp_processor_id(); thiscpu = smp_processor_id();
for_each_node_by_type(np, "cpu") { cpu_clk = get_cpu_clk(thiscpu);
int err; if (!cpu_clk)
int cpu;
err = of_property_read_u32(np, "reg", &cpu);
if (WARN_ON(err))
return;
if (cpu == thiscpu) {
cpu_clk = of_clk_get(np, 0);
break;
}
}
if (WARN_ON(IS_ERR(cpu_clk)))
return; return;
clk_prepare_enable(cpu_clk); clk_prepare_enable(cpu_clk);
rate = clk_get_rate(cpu_clk); rate = clk_get_rate(cpu_clk);
/* set all the other CPU clk to the same rate than the boot CPU */ /* set all the other CPU clk to the same rate than the boot CPU */
for_each_node_by_type(np, "cpu") { for_each_possible_cpu(cpu) {
int err; if (cpu == thiscpu)
int cpu; continue;
cpu_clk = get_cpu_clk(cpu);
err = of_property_read_u32(np, "reg", &cpu); if (!cpu_clk)
if (WARN_ON(err))
return; return;
clk_set_rate(cpu_clk, rate);
if (cpu != thiscpu) {
cpu_clk = of_clk_get(np, 0);
clk_set_rate(cpu_clk, rate);
}
} }
} }

View File

@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void); extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void); extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
/* Get the MAC address */ /* Get the MAC address */
extern const void *of_get_mac_address(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np);

View File

@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void); extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
/* cache lookup */ /* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np); struct device_node *of_find_next_cache_node(struct device_node *np);

View File

@ -865,49 +865,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup); __initcall(prom_reconfig_setup);
#endif #endif
/* Find the device node for a given logical cpu number, also returns the cpu bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
* local thread number (index in ibm,interrupt-server#s) if relevant and
* asked for (non NULL)
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{ {
int hardid; return (int)phys_id == get_hard_smp_processor_id(cpu);
struct device_node *np;
hardid = get_hard_smp_processor_id(cpu);
for_each_node_by_type(np, "cpu") {
const u32 *intserv;
unsigned int plen, t;
/* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
* fallback to "reg" property and assume no threads
*/
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
&plen);
if (intserv == NULL) {
const u32 *reg = of_get_property(np, "reg", NULL);
if (reg == NULL)
continue;
if (*reg == hardid) {
if (thread)
*thread = 0;
return np;
}
} else {
plen /= sizeof(u32);
for (t = 0; t < plen; t++) {
if (hardid == intserv[t]) {
if (thread)
*thread = t;
return np;
}
}
}
}
return NULL;
} }
EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG) #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob; static struct debugfs_blob_wrapper flat_dt_blob;

View File

@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val);
extern u16 amd_get_nb_id(int cpu); extern u16 amd_get_nb_id(int cpu);
struct aperfmperf {
u64 aperf, mperf;
};
static inline void get_aperfmperf(struct aperfmperf *am)
{
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
rdmsrl(MSR_IA32_APERF, am->aperf);
rdmsrl(MSR_IA32_MPERF, am->mperf);
}
#define APERFMPERF_SHIFT 10
static inline
unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
struct aperfmperf *new)
{
u64 aperf = new->aperf - old->aperf;
u64 mperf = new->mperf - old->mperf;
unsigned long ratio = aperf;
mperf >>= APERFMPERF_SHIFT;
if (mperf)
ratio = div64_u64(aperf, mperf);
return ratio;
}
extern unsigned long arch_align_stack(unsigned long sp); extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end); extern void free_init_pages(char *what, unsigned long begin, unsigned long end);

View File

@ -14,6 +14,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/of.h>
#include "base.h" #include "base.h"
@ -289,6 +290,7 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.release = cpu_device_release; cpu->dev.release = cpu_device_release;
cpu->dev.offline_disabled = !cpu->hotpluggable; cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num); cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
cpu->dev.bus->uevent = arch_cpu_uevent; cpu->dev.bus->uevent = arch_cpu_uevent;
#endif #endif

View File

@ -122,17 +122,8 @@ EXPORT_SYMBOL_GPL(cci_ace_get_port);
static void __init cci_ace_init_ports(void) static void __init cci_ace_init_ports(void)
{ {
int port, ac, cpu; int port, cpu;
u64 hwid; struct device_node *cpun;
const u32 *cell;
struct device_node *cpun, *cpus;
cpus = of_find_node_by_path("/cpus");
if (WARN(!cpus, "Missing cpus node, bailing out\n"))
return;
if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac)))
ac = of_n_addr_cells(cpus);
/* /*
* Port index look-up speeds up the function disabling ports by CPU, * Port index look-up speeds up the function disabling ports by CPU,
@ -141,18 +132,13 @@ static void __init cci_ace_init_ports(void)
* The stashed index array is initialized for all possible CPUs * The stashed index array is initialized for all possible CPUs
* at probe time. * at probe time.
*/ */
for_each_child_of_node(cpus, cpun) { for_each_possible_cpu(cpu) {
if (of_node_cmp(cpun->type, "cpu")) /* too early to use cpu->of_node */
continue; cpun = of_get_cpu_node(cpu, NULL);
cell = of_get_property(cpun, "reg", NULL);
if (WARN(!cell, "%s: missing reg property\n", cpun->full_name)) if (WARN(!cpun, "Missing cpu device node\n"))
continue; continue;
hwid = of_read_number(cell, ac);
cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK);
if (cpu < 0 || !cpu_possible(cpu))
continue;
port = __cci_ace_get_port(cpun, ACE_PORT); port = __cci_ace_get_port(cpun, ACE_PORT);
if (port < 0) if (port < 0)
continue; continue;

View File

@ -17,37 +17,47 @@ config ARM_DT_BL_CPUFREQ
big.LITTLE platform. This gets frequency tables from DT. big.LITTLE platform. This gets frequency tables from DT.
config ARM_EXYNOS_CPUFREQ config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs" bool
depends on ARCH_EXYNOS
select CPU_FREQ_TABLE select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver common part for Samsung
EXYNOS SoCs.
If in doubt, say N.
config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4210_CPUFREQ
def_bool CPU_EXYNOS4210 bool "SAMSUNG EXYNOS4210"
depends on CPU_EXYNOS4210
default y
select ARM_EXYNOS_CPUFREQ
help help
This adds the CPUFreq driver for Samsung EXYNOS4210 This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210). SoC (S5PV310 or S5PC210).
If in doubt, say N.
config ARM_EXYNOS4X12_CPUFREQ config ARM_EXYNOS4X12_CPUFREQ
def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412) bool "SAMSUNG EXYNOS4x12"
depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
default y
select ARM_EXYNOS_CPUFREQ
help help
This adds the CPUFreq driver for Samsung EXYNOS4X12 This adds the CPUFreq driver for Samsung EXYNOS4X12
SoC (EXYNOS4212 or EXYNOS4412). SoC (EXYNOS4212 or EXYNOS4412).
If in doubt, say N.
config ARM_EXYNOS5250_CPUFREQ config ARM_EXYNOS5250_CPUFREQ
def_bool SOC_EXYNOS5250 bool "SAMSUNG EXYNOS5250"
depends on SOC_EXYNOS5250
default y
select ARM_EXYNOS_CPUFREQ
help help
This adds the CPUFreq driver for Samsung EXYNOS5250 This adds the CPUFreq driver for Samsung EXYNOS5250
SoC. SoC.
If in doubt, say N.
config ARM_EXYNOS5440_CPUFREQ config ARM_EXYNOS5440_CPUFREQ
def_bool SOC_EXYNOS5440 bool "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF depends on HAVE_CLK && PM_OPP && OF
default y
select CPU_FREQ_TABLE select CPU_FREQ_TABLE
help help
This adds the CPUFreq driver for Samsung EXYNOS5440 This adds the CPUFreq driver for Samsung EXYNOS5440
@ -55,6 +65,8 @@ config ARM_EXYNOS5440_CPUFREQ
different than previous exynos controllers so not using different than previous exynos controllers so not using
the common exynos framework. the common exynos framework.
If in doubt, say N.
config ARM_HIGHBANK_CPUFREQ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based" tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK depends on ARCH_HIGHBANK

View File

@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers. # powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod. # speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o

View File

@ -45,7 +45,6 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include "mperf.h"
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_DESCRIPTION("ACPI Processor P-States Driver");
@ -198,7 +197,7 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", boost_enabled); return sprintf(buf, "%u\n", boost_enabled);
} }
static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); cpufreq_freq_attr_rw(cpb);
#endif #endif
static int check_est_cpu(unsigned int cpuid) static int check_est_cpu(unsigned int cpuid)
@ -710,7 +709,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
return blacklisted; return blacklisted;
#endif #endif
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
@ -800,7 +799,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg; goto err_unreg;
} }
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * data->freq_table = kmalloc(sizeof(*data->freq_table) *
(perf->state_count+1), GFP_KERNEL); (perf->state_count+1), GFP_KERNEL);
if (!data->freq_table) { if (!data->freq_table) {
result = -ENOMEM; result = -ENOMEM;
@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* notify BIOS that we exist */ /* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE); acpi_processor_notify_smm(THIS_MODULE);
/* Check for APERF/MPERF support in hardware */
if (boot_cpu_has(X86_FEATURE_APERFMPERF))
acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
pr_debug("CPU%u - ACPI performance management activated.\n", cpu); pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++) for (i = 0; i < perf->state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
@ -941,7 +936,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.exit = acpi_cpufreq_cpu_exit, .exit = acpi_cpufreq_cpu_exit,
.resume = acpi_cpufreq_resume, .resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq", .name = "acpi-cpufreq",
.owner = THIS_MODULE,
.attr = acpi_cpufreq_attr, .attr = acpi_cpufreq_attr,
}; };

View File

@ -19,12 +19,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/opp.h> #include <linux/opp.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -34,27 +33,13 @@
/* get cpu node with valid operating-points */ /* get cpu node with valid operating-points */
static struct device_node *get_cpu_node_with_valid_op(int cpu) static struct device_node *get_cpu_node_with_valid_op(int cpu)
{ {
struct device_node *np = NULL, *parent; struct device_node *np = of_cpu_device_node_get(cpu);
int count = 0;
parent = of_find_node_by_path("/cpus"); if (!of_get_property(np, "operating-points", NULL)) {
if (!parent) { of_node_put(np);
pr_err("failed to find OF /cpus\n"); np = NULL;
return NULL;
} }
for_each_child_of_node(parent, np) {
if (count++ != cpu)
continue;
if (!of_get_property(np, "operating-points", NULL)) {
of_node_put(np);
np = NULL;
}
break;
}
of_node_put(parent);
return np; return np;
} }
@ -63,11 +48,12 @@ static int dt_init_opp_table(struct device *cpu_dev)
struct device_node *np; struct device_node *np;
int ret; int ret;
np = get_cpu_node_with_valid_op(cpu_dev->id); np = of_node_get(cpu_dev->of_node);
if (!np) if (!np) {
return -ENODATA; pr_err("failed to find cpu%d node\n", cpu_dev->id);
return -ENOENT;
}
cpu_dev->of_node = np;
ret = of_init_opp_table(cpu_dev); ret = of_init_opp_table(cpu_dev);
of_node_put(np); of_node_put(np);
@ -79,9 +65,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
struct device_node *np; struct device_node *np;
u32 transition_latency = CPUFREQ_ETERNAL; u32 transition_latency = CPUFREQ_ETERNAL;
np = get_cpu_node_with_valid_op(cpu_dev->id); np = of_node_get(cpu_dev->of_node);
if (!np) if (!np) {
pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n");
return CPUFREQ_ETERNAL; return CPUFREQ_ETERNAL;
}
of_property_read_u32(np, "clock-latency", &transition_latency); of_property_read_u32(np, "clock-latency", &transition_latency);
of_node_put(np); of_node_put(np);

View File

@ -108,7 +108,6 @@ static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver at32_driver = { static struct cpufreq_driver at32_driver = {
.name = "at32ap", .name = "at32ap",
.owner = THIS_MODULE,
.init = at32_cpufreq_driver_init, .init = at32_cpufreq_driver_init,
.verify = at32_verify_speed, .verify = at32_verify_speed,
.target = at32_set_target, .target = at32_set_target,

View File

@ -225,7 +225,6 @@ static struct cpufreq_driver bfin_driver = {
.get = bfin_getfreq_khz, .get = bfin_getfreq_khz,
.init = __bfin_cpu_init, .init = __bfin_cpu_init,
.name = "bfin cpufreq", .name = "bfin cpufreq",
.owner = THIS_MODULE,
.attr = bfin_freq_attr, .attr = bfin_freq_attr,
}; };

View File

@ -69,7 +69,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (cpu_reg) { if (!IS_ERR(cpu_reg)) {
rcu_read_lock(); rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) { if (IS_ERR(opp)) {
@ -90,7 +90,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
freqs.new / 1000, volt ? volt / 1000 : -1); freqs.new / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */ /* scaling up? scale voltage before frequency */
if (cpu_reg && freqs.new > freqs.old) { if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol); ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) { if (ret) {
pr_err("failed to scale voltage up: %d\n", ret); pr_err("failed to scale voltage up: %d\n", ret);
@ -102,14 +102,14 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(cpu_clk, freq_exact); ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) { if (ret) {
pr_err("failed to set clock rate: %d\n", ret); pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg) if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol); regulator_set_voltage_tol(cpu_reg, volt_old, tol);
freqs.new = freqs.old; freqs.new = freqs.old;
goto post_notify; goto post_notify;
} }
/* scaling down? scale voltage after frequency */ /* scaling down? scale voltage after frequency */
if (cpu_reg && freqs.new < freqs.old) { if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol); ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) { if (ret) {
pr_err("failed to scale voltage down: %d\n", ret); pr_err("failed to scale voltage down: %d\n", ret);
@ -174,29 +174,17 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
static int cpu0_cpufreq_probe(struct platform_device *pdev) static int cpu0_cpufreq_probe(struct platform_device *pdev)
{ {
struct device_node *np, *parent; struct device_node *np;
int ret; int ret;
parent = of_find_node_by_path("/cpus"); cpu_dev = &pdev->dev;
if (!parent) {
pr_err("failed to find OF /cpus\n");
return -ENOENT;
}
for_each_child_of_node(parent, np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
pr_err("failed to find cpu0 node\n"); pr_err("failed to find cpu0 node\n");
ret = -ENOENT; return -ENOENT;
goto out_put_parent;
} }
cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) { if (IS_ERR(cpu_reg)) {
/* /*
@ -210,7 +198,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
} }
pr_warn("failed to get cpu0 regulator: %ld\n", pr_warn("failed to get cpu0 regulator: %ld\n",
PTR_ERR(cpu_reg)); PTR_ERR(cpu_reg));
cpu_reg = NULL;
} }
cpu_clk = devm_clk_get(cpu_dev, NULL); cpu_clk = devm_clk_get(cpu_dev, NULL);
@ -269,15 +256,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
} }
of_node_put(np); of_node_put(np);
of_node_put(parent);
return 0; return 0;
out_free_table: out_free_table:
opp_free_cpufreq_table(cpu_dev, &freq_table); opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node: out_put_node:
of_node_put(np); of_node_put(np);
out_put_parent:
of_node_put(parent);
return ret; return ret;
} }

View File

@ -379,7 +379,6 @@ static struct cpufreq_driver nforce2_driver = {
.get = nforce2_get, .get = nforce2_get,
.init = nforce2_cpu_init, .init = nforce2_cpu_init,
.exit = nforce2_cpu_exit, .exit = nforce2_cpu_exit,
.owner = THIS_MODULE,
}; };
#ifdef MODULE #ifdef MODULE

File diff suppressed because it is too large Load Diff

View File

@ -11,19 +11,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
/* Conservative governor macros */ /* Conservative governor macros */
@ -329,7 +317,7 @@ static int cs_init(struct dbs_data *dbs_data)
{ {
struct cs_dbs_tuners *tuners; struct cs_dbs_tuners *tuners;
tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL); tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) { if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__); pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM; return -ENOMEM;

View File

@ -16,15 +16,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cputime.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
@ -53,7 +47,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
policy = cdbs->cur_policy; policy = cdbs->cur_policy;
/* Get Absolute Load (in terms of freq for ondemand gov) */ /* Get Absolute Load */
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs; struct cpu_dbs_common_info *j_cdbs;
u64 cur_wall_time, cur_idle_time; u64 cur_wall_time, cur_idle_time;
@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
load = 100 * (wall_time - idle_time) / wall_time; load = 100 * (wall_time - idle_time) / wall_time;
if (dbs_data->cdata->governor == GOV_ONDEMAND) {
int freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
load *= freq_avg;
}
if (load > max_load) if (load > max_load)
max_load = load; max_load = load;
} }

View File

@ -18,10 +18,9 @@
#define _CPUFREQ_GOVERNOR_H #define _CPUFREQ_GOVERNOR_H
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/kobject.h> #include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
/* /*
* The polling frequency depends on the capability of the processor. Default * The polling frequency depends on the capability of the processor. Default
@ -169,7 +168,6 @@ struct od_dbs_tuners {
unsigned int sampling_rate; unsigned int sampling_rate;
unsigned int sampling_down_factor; unsigned int sampling_down_factor;
unsigned int up_threshold; unsigned int up_threshold;
unsigned int adj_up_threshold;
unsigned int powersave_bias; unsigned int powersave_bias;
unsigned int io_is_busy; unsigned int io_is_busy;
}; };
@ -223,7 +221,7 @@ struct od_ops {
void (*powersave_bias_init_cpu)(int cpu); void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation); unsigned int freq_next, unsigned int relation);
void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
}; };
struct cs_ops { struct cs_ops {

View File

@ -12,28 +12,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h> #include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu-defs.h> #include <linux/percpu-defs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
/* On-demand governor macros */ /* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1) #define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000) #define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11) #define MIN_FREQUENCY_UP_THRESHOLD (11)
@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void)
} }
} }
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{ {
struct dbs_data *dbs_data = p->governor_data; struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (od_tuners->powersave_bias) if (od_tuners->powersave_bias)
freq = od_ops.powersave_bias_target(p, freq, freq = od_ops.powersave_bias_target(policy, freq,
CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
else if (p->cur == p->max) else if (policy->cur == policy->max)
return; return;
__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
} }
/* /*
* Every sampling_rate, we check, if current idle time is less than 20% * Every sampling_rate, we check, if current idle time is less than 20%
* (default), then we try to increase frequency. Every sampling_rate, we look * (default), then we try to increase frequency. Else, we adjust the frequency
* for the lowest frequency which can sustain the load while keeping idle time * proportional to load.
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of current frequency
*/ */
static void od_check_cpu(int cpu, unsigned int load_freq) static void od_check_cpu(int cpu, unsigned int load)
{ {
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@ -178,29 +162,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
dbs_info->freq_lo = 0; dbs_info->freq_lo = 0;
/* Check for frequency increase */ /* Check for frequency increase */
if (load_freq > od_tuners->up_threshold * policy->cur) { if (load > od_tuners->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */ /* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max) if (policy->cur < policy->max)
dbs_info->rate_mult = dbs_info->rate_mult =
od_tuners->sampling_down_factor; od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max); dbs_freq_increase(policy, policy->max);
return; return;
} } else {
/* Calculate the next frequency proportional to load */
/* Check for frequency decrease */
/* if we cannot reduce the frequency anymore, break out early */
if (policy->cur == policy->min)
return;
/*
* The optimal frequency is the frequency that is the lowest that can
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
if (load_freq < od_tuners->adj_up_threshold
* policy->cur) {
unsigned int freq_next; unsigned int freq_next;
freq_next = load_freq / od_tuners->adj_up_threshold; freq_next = load * policy->cpuinfo.max_freq / 100;
/* No longer fully busy, reset rate_mult */ /* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1; dbs_info->rate_mult = 1;
@ -374,9 +346,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
input < MIN_FREQUENCY_UP_THRESHOLD) { input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL; return -EINVAL;
} }
/* Calculate the new adj_up_threshold */
od_tuners->adj_up_threshold += input;
od_tuners->adj_up_threshold -= od_tuners->up_threshold;
od_tuners->up_threshold = input; od_tuners->up_threshold = input;
return count; return count;
@ -513,7 +482,7 @@ static int od_init(struct dbs_data *dbs_data)
u64 idle_time; u64 idle_time;
int cpu; int cpu;
tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) { if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__); pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM; return -ENOMEM;
@ -525,8 +494,6 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) { if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */ /* Idle micro accounting is supported. Use finer thresholds */
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/* /*
* In nohz/micro accounting case we set the minimum frequency * In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred * not depending on HZ, but fixed (very low). The deferred
@ -535,8 +502,6 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else { } else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
DEF_FREQUENCY_DOWN_DIFFERENTIAL;
/* For correct statistics, we need 10 ticks for each measure */ /* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *

View File

@ -12,10 +12,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
static int cpufreq_governor_performance(struct cpufreq_policy *policy, static int cpufreq_governor_performance(struct cpufreq_policy *policy,
unsigned int event) unsigned int event)

View File

@ -12,10 +12,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
static int cpufreq_governor_powersave(struct cpufreq_policy *policy, static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
unsigned int event) unsigned int event)

View File

@ -9,17 +9,10 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/sysfs.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/jiffies.h> #include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/kobject.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <asm/cputime.h> #include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock; static spinlock_t cpufreq_stats_lock;
@ -200,22 +193,22 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
{ {
unsigned int i, j, count = 0, ret = 0; unsigned int i, j, count = 0, ret = 0;
struct cpufreq_stats *stat; struct cpufreq_stats *stat;
struct cpufreq_policy *data; struct cpufreq_policy *current_policy;
unsigned int alloc_size; unsigned int alloc_size;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
if (per_cpu(cpufreq_stats_table, cpu)) if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY; return -EBUSY;
stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL) if ((stat) == NULL)
return -ENOMEM; return -ENOMEM;
data = cpufreq_cpu_get(cpu); current_policy = cpufreq_cpu_get(cpu);
if (data == NULL) { if (current_policy == NULL) {
ret = -EINVAL; ret = -EINVAL;
goto error_get_fail; goto error_get_fail;
} }
ret = sysfs_create_group(&data->kobj, &stats_attr_group); ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
if (ret) if (ret)
goto error_out; goto error_out;
@ -258,10 +251,10 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->last_time = get_jiffies_64(); stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur); stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock); spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(data); cpufreq_cpu_put(current_policy);
return 0; return 0;
error_out: error_out:
cpufreq_cpu_put(data); cpufreq_cpu_put(current_policy);
error_get_fail: error_get_fail:
kfree(stat); kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL; per_cpu(cpufreq_stats_table, cpu) = NULL;
@ -348,16 +341,10 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
switch (action) { switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu); cpufreq_stats_free_sysfs(cpu);
break; break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu); cpufreq_stats_free_table(cpu);
break; break;
} }
@ -390,8 +377,6 @@ static int __init cpufreq_stats_init(void)
return ret; return ret;
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_update_policy(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block, ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER); CPUFREQ_TRANSITION_NOTIFIER);

View File

@ -111,7 +111,6 @@ static struct cpufreq_driver cris_freq_driver = {
.init = cris_freq_cpu_init, .init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit, .exit = cris_freq_cpu_exit,
.name = "cris_freq", .name = "cris_freq",
.owner = THIS_MODULE,
.attr = cris_freq_attr, .attr = cris_freq_attr,
}; };

View File

@ -108,7 +108,6 @@ static struct cpufreq_driver cris_freq_driver = {
.init = cris_freq_cpu_init, .init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit, .exit = cris_freq_cpu_exit,
.name = "cris_freq", .name = "cris_freq",
.owner = THIS_MODULE,
.attr = cris_freq_attr, .attr = cris_freq_attr,
}; };

View File

@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf;
/* Minimum necessary to get acpi_processor_get_bios_limit() working */ /* Minimum necessary to get acpi_processor_get_bios_limit() working */
static int eps_acpi_init(void) static int eps_acpi_init(void)
{ {
eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance), eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
GFP_KERNEL); GFP_KERNEL);
if (!eps_acpi_cpu_perf) if (!eps_acpi_cpu_perf)
return -ENOMEM; return -ENOMEM;
@ -366,7 +366,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2; states = 2;
/* Allocate private data and frequency table for current cpu */ /* Allocate private data and frequency table for current cpu */
centaur = kzalloc(sizeof(struct eps_cpu_data) centaur = kzalloc(sizeof(*centaur)
+ (states + 1) * sizeof(struct cpufreq_frequency_table), + (states + 1) * sizeof(struct cpufreq_frequency_table),
GFP_KERNEL); GFP_KERNEL);
if (!centaur) if (!centaur)
@ -436,7 +436,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit, .exit = eps_cpu_exit,
.get = eps_get, .get = eps_get,
.name = "e_powersaver", .name = "e_powersaver",
.owner = THIS_MODULE,
.attr = eps_attr, .attr = eps_attr,
}; };

View File

@ -274,7 +274,6 @@ static struct cpufreq_driver elanfreq_driver = {
.init = elanfreq_cpu_init, .init = elanfreq_cpu_init,
.exit = elanfreq_cpu_exit, .exit = elanfreq_cpu_exit,
.name = "elanfreq", .name = "elanfreq",
.owner = THIS_MODULE,
.attr = elanfreq_attr, .attr = elanfreq_attr,
}; };

View File

@ -289,7 +289,7 @@ static int __init exynos_cpufreq_init(void)
{ {
int ret = -EINVAL; int ret = -EINVAL;
exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL); exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
if (!exynos_info) if (!exynos_info)
return -ENOMEM; return -ENOMEM;
@ -332,7 +332,6 @@ static int __init exynos_cpufreq_init(void)
regulator_put(arm_regulator); regulator_put(arm_regulator);
err_vdd_arm: err_vdd_arm:
kfree(exynos_info); kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL; return -EINVAL;
} }
late_initcall(exynos_cpufreq_init); late_initcall(exynos_cpufreq_init);

View File

@ -43,6 +43,27 @@ struct exynos_dvfs_info {
bool (*need_apll_change)(unsigned int, unsigned int); bool (*need_apll_change)(unsigned int, unsigned int);
}; };
#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *); extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
#else
static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
return -EOPNOTSUPP;
}
#endif
#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *); extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
#else
static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
return -EOPNOTSUPP;
}
#endif
#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *); extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
#else
static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
return -EOPNOTSUPP;
}
#endif

View File

@ -238,6 +238,9 @@ static int exynos_target(struct cpufreq_policy *policy,
freqs.old = dvfs_info->cur_frequency; freqs.old = dvfs_info->cur_frequency;
freqs.new = freq_table[index].frequency; freqs.new = freq_table[index].frequency;
if (freqs.old == freqs.new)
goto out;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Set the target frequency in all C0_3_PSTATE register */ /* Set the target frequency in all C0_3_PSTATE register */

View File

@ -11,10 +11,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/module.h>
/********************************************************************* /*********************************************************************
* FREQUENCY TABLE HELPERS * * FREQUENCY TABLE HELPERS *

View File

@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value)
* gx_detect_chipset: * gx_detect_chipset:
* *
**/ **/
static __init struct pci_dev *gx_detect_chipset(void) static struct pci_dev * __init gx_detect_chipset(void)
{ {
struct pci_dev *gx_pci = NULL; struct pci_dev *gx_pci = NULL;
@ -446,7 +446,6 @@ static struct cpufreq_driver gx_suspmod_driver = {
.target = cpufreq_gx_target, .target = cpufreq_gx_target,
.init = cpufreq_gx_cpu_init, .init = cpufreq_gx_cpu_init,
.name = "gx-suspmod", .name = "gx-suspmod",
.owner = THIS_MODULE,
}; };
static int __init cpufreq_gx_init(void) static int __init cpufreq_gx_init(void)
@ -466,7 +465,7 @@ static int __init cpufreq_gx_init(void)
pr_debug("geode suspend modulation available.\n"); pr_debug("geode suspend modulation available.\n");
params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); params = kzalloc(sizeof(*params), GFP_KERNEL);
if (params == NULL) if (params == NULL)
return -ENOMEM; return -ENOMEM;

View File

@ -69,24 +69,18 @@ static int hb_cpufreq_driver_init(void)
if (!of_machine_is_compatible("calxeda,highbank")) if (!of_machine_is_compatible("calxeda,highbank"))
return -ENODEV; return -ENODEV;
for_each_child_of_node(of_find_node_by_path("/cpus"), np) cpu_dev = get_cpu_device(0);
if (of_get_property(np, "operating-points", NULL)) if (!cpu_dev) {
break; pr_err("failed to get highbank cpufreq device\n");
return -ENODEV;
}
np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
pr_err("failed to find highbank cpufreq node\n"); pr_err("failed to find highbank cpufreq node\n");
return -ENOENT; return -ENOENT;
} }
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("failed to get highbank cpufreq device\n");
ret = -ENODEV;
goto out_put_node;
}
cpu_dev->of_node = np;
cpu_clk = clk_get(cpu_dev, NULL); cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) { if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk); ret = PTR_ERR(cpu_clk);

View File

@ -274,7 +274,7 @@ acpi_cpufreq_cpu_init (
pr_debug("acpi_cpufreq_cpu_init\n"); pr_debug("acpi_cpufreq_cpu_init\n");
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return (-ENOMEM); return (-ENOMEM);
@ -304,7 +304,7 @@ acpi_cpufreq_cpu_init (
} }
/* alloc freq_table */ /* alloc freq_table */
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * data->freq_table = kmalloc(sizeof(*data->freq_table) *
(data->acpi_data.state_count + 1), (data->acpi_data.state_count + 1),
GFP_KERNEL); GFP_KERNEL);
if (!data->freq_table) { if (!data->freq_table) {
@ -409,7 +409,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.init = acpi_cpufreq_cpu_init, .init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit, .exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq", .name = "acpi-cpufreq",
.owner = THIS_MODULE,
.attr = acpi_cpufreq_attr, .attr = acpi_cpufreq_attr,
}; };

View File

@ -221,14 +221,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
cpu_dev = &pdev->dev; cpu_dev = &pdev->dev;
np = of_find_node_by_path("/cpus/cpu@0"); np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
dev_err(cpu_dev, "failed to find cpu0 node\n"); dev_err(cpu_dev, "failed to find cpu0 node\n");
return -ENOENT; return -ENOENT;
} }
cpu_dev->of_node = np;
arm_clk = devm_clk_get(cpu_dev, "arm"); arm_clk = devm_clk_get(cpu_dev, "arm");
pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");

View File

@ -665,7 +665,6 @@ static struct cpufreq_driver intel_pstate_driver = {
.init = intel_pstate_cpu_init, .init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit, .exit = intel_pstate_cpu_exit,
.name = "intel_pstate", .name = "intel_pstate",
.owner = THIS_MODULE,
}; };
static int __initdata no_load; static int __initdata no_load;

View File

@ -14,7 +14,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
@ -158,7 +158,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.init = kirkwood_cpufreq_cpu_init, .init = kirkwood_cpufreq_cpu_init,
.exit = kirkwood_cpufreq_cpu_exit, .exit = kirkwood_cpufreq_cpu_exit,
.name = "kirkwood-cpufreq", .name = "kirkwood-cpufreq",
.owner = THIS_MODULE,
.attr = kirkwood_cpufreq_attr, .attr = kirkwood_cpufreq_attr,
}; };
@ -175,9 +174,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(priv.base)) if (IS_ERR(priv.base))
return PTR_ERR(priv.base); return PTR_ERR(priv.base);
np = of_find_node_by_path("/cpus/cpu@0"); np = of_cpu_device_node_get(0);
if (!np) if (!np) {
dev_err(&pdev->dev, "failed to get cpu device node\n");
return -ENODEV; return -ENODEV;
}
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) { if (IS_ERR(priv.cpu_clk)) {

View File

@ -948,7 +948,6 @@ static struct cpufreq_driver longhaul_driver = {
.init = longhaul_cpu_init, .init = longhaul_cpu_init,
.exit = longhaul_cpu_exit, .exit = longhaul_cpu_exit,
.name = "longhaul", .name = "longhaul",
.owner = THIS_MODULE,
.attr = longhaul_attr, .attr = longhaul_attr,
}; };

View File

@ -286,7 +286,6 @@ static struct cpufreq_driver longrun_driver = {
.get = longrun_get, .get = longrun_get,
.init = longrun_cpu_init, .init = longrun_cpu_init,
.name = "longrun", .name = "longrun",
.owner = THIS_MODULE,
}; };
static const struct x86_cpu_id longrun_ids[] = { static const struct x86_cpu_id longrun_ids[] = {

View File

@ -158,7 +158,6 @@ static struct freq_attr *loongson2_table_attr[] = {
}; };
static struct cpufreq_driver loongson2_cpufreq_driver = { static struct cpufreq_driver loongson2_cpufreq_driver = {
.owner = THIS_MODULE,
.name = "loongson2", .name = "loongson2",
.init = loongson2_cpufreq_cpu_init, .init = loongson2_cpufreq_cpu_init,
.verify = loongson2_cpufreq_verify, .verify = loongson2_cpufreq_verify,

View File

@ -24,7 +24,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/of.h> #include <linux/of_device.h>
#define DBG(fmt...) pr_debug(fmt) #define DBG(fmt...) pr_debug(fmt)
@ -190,7 +190,6 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver maple_cpufreq_driver = { static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple", .name = "maple",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init, .init = maple_cpufreq_cpu_init,
.verify = maple_cpufreq_verify, .verify = maple_cpufreq_verify,
@ -201,7 +200,6 @@ static struct cpufreq_driver maple_cpufreq_driver = {
static int __init maple_cpufreq_init(void) static int __init maple_cpufreq_init(void)
{ {
struct device_node *cpus;
struct device_node *cpunode; struct device_node *cpunode;
unsigned int psize; unsigned int psize;
unsigned long max_freq; unsigned long max_freq;
@ -217,24 +215,11 @@ static int __init maple_cpufreq_init(void)
!of_machine_is_compatible("Momentum,Apache")) !of_machine_is_compatible("Momentum,Apache"))
return 0; return 0;
cpus = of_find_node_by_path("/cpus");
if (cpus == NULL) {
DBG("No /cpus node !\n");
return -ENODEV;
}
/* Get first CPU node */ /* Get first CPU node */
for (cpunode = NULL; cpunode = of_cpu_device_node_get(0);
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
if (reg == NULL || (*reg) != 0)
continue;
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) { if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n"); printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
goto bail_cpus; goto bail_noprops;
} }
/* Check 970FX for now */ /* Check 970FX for now */
@ -290,14 +275,11 @@ static int __init maple_cpufreq_init(void)
rc = cpufreq_register_driver(&maple_cpufreq_driver); rc = cpufreq_register_driver(&maple_cpufreq_driver);
of_node_put(cpunode); of_node_put(cpunode);
of_node_put(cpus);
return rc; return rc;
bail_noprops: bail_noprops:
of_node_put(cpunode); of_node_put(cpunode);
bail_cpus:
of_node_put(cpus);
return rc; return rc;
} }

View File

@ -1,51 +0,0 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include "mperf.h"
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur)
{
struct aperfmperf *am = _cur;
get_aperfmperf(am);
}
/*
* Return the measured active (C0) frequency on this CPU since last call
* to this function.
* Input: cpu number
* Return: Average CPU frequency in terms of max frequency (zero on error)
*
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
* over a period of time, while CPU is in C0 state.
* IA32_MPERF counts at the rate of max advertised frequency
* IA32_APERF counts at the rate of actual CPU frequency
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
* no meaning should be associated with absolute values of these MSRs.
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu)
{
struct aperfmperf perf;
unsigned long ratio;
unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval;
}
EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
MODULE_LICENSE("GPL");

View File

@ -1,9 +0,0 @@
/*
* (c) 2010 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu);

View File

@ -279,7 +279,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.exit = cpufreq_p4_cpu_exit, .exit = cpufreq_p4_cpu_exit,
.get = cpufreq_p4_get, .get = cpufreq_p4_get,
.name = "p4-clockmod", .name = "p4-clockmod",
.owner = THIS_MODULE,
.attr = p4clockmod_attr, .attr = p4clockmod_attr,
}; };

View File

@ -297,7 +297,6 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
static struct cpufreq_driver pas_cpufreq_driver = { static struct cpufreq_driver pas_cpufreq_driver = {
.name = "pas-cpufreq", .name = "pas-cpufreq",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init, .init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit, .exit = pas_cpufreq_cpu_exit,

View File

@ -587,7 +587,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = {
.init = pcc_cpufreq_cpu_init, .init = pcc_cpufreq_cpu_init,
.exit = pcc_cpufreq_cpu_exit, .exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq", .name = "pcc-cpufreq",
.owner = THIS_MODULE,
}; };
static int __init pcc_cpufreq_init(void) static int __init pcc_cpufreq_init(void)

View File

@ -25,6 +25,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/of_device.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/irq.h> #include <asm/irq.h>
@ -477,7 +478,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.flags = CPUFREQ_PM_NO_WARN, .flags = CPUFREQ_PM_NO_WARN,
.attr = pmac_cpu_freqs_attr, .attr = pmac_cpu_freqs_attr,
.name = "powermac", .name = "powermac",
.owner = THIS_MODULE,
}; };
@ -649,8 +649,8 @@ static int __init pmac_cpufreq_setup(void)
if (strstr(cmd_line, "nocpufreq")) if (strstr(cmd_line, "nocpufreq"))
return 0; return 0;
/* Assume only one CPU */ /* Get first CPU node */
cpunode = of_find_node_by_type(NULL, "cpu"); cpunode = of_cpu_device_node_get(0);
if (!cpunode) if (!cpunode)
goto out; goto out;

View File

@ -22,6 +22,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/of_device.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/irq.h> #include <asm/irq.h>
@ -371,7 +372,6 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver g5_cpufreq_driver = { static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac", .name = "powermac",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init, .init = g5_cpufreq_cpu_init,
.verify = g5_cpufreq_verify, .verify = g5_cpufreq_verify,
@ -383,9 +383,8 @@ static struct cpufreq_driver g5_cpufreq_driver = {
#ifdef CONFIG_PMAC_SMU #ifdef CONFIG_PMAC_SMU
static int __init g5_neo2_cpufreq_init(struct device_node *cpus) static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
{ {
struct device_node *cpunode;
unsigned int psize, ssize; unsigned int psize, ssize;
unsigned long max_freq; unsigned long max_freq;
char *freq_method, *volt_method; char *freq_method, *volt_method;
@ -405,20 +404,6 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
else else
return -ENODEV; return -ENODEV;
/* Get first CPU node */
for (cpunode = NULL;
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
if (reg == NULL || (*reg) != 0)
continue;
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
return -ENODEV;
}
/* Check 970FX for now */ /* Check 970FX for now */
valp = of_get_property(cpunode, "cpu-version", NULL); valp = of_get_property(cpunode, "cpu-version", NULL);
if (!valp) { if (!valp) {
@ -447,9 +432,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
if (!shdr) if (!shdr)
goto bail_noprops; goto bail_noprops;
g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
ssize = (shdr->len * sizeof(u32)) - ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
sizeof(struct smu_sdbp_header); g5_fvt_count = ssize / sizeof(*g5_fvt_table);
g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
g5_fvt_cur = 0; g5_fvt_cur = 0;
/* Sanity checking */ /* Sanity checking */
@ -537,9 +521,9 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
#endif /* CONFIG_PMAC_SMU */ #endif /* CONFIG_PMAC_SMU */
static int __init g5_pm72_cpufreq_init(struct device_node *cpus) static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
{ {
struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL; struct device_node *cpuid = NULL, *hwclock = NULL;
const u8 *eeprom = NULL; const u8 *eeprom = NULL;
const u32 *valp; const u32 *valp;
u64 max_freq, min_freq, ih, il; u64 max_freq, min_freq, ih, il;
@ -548,17 +532,6 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and" DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
" RackMac3,1...\n"); " RackMac3,1...\n");
/* Get first CPU node */
for (cpunode = NULL;
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
return -ENODEV;
}
/* Lookup the cpuid eeprom node */ /* Lookup the cpuid eeprom node */
cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
if (cpuid != NULL) if (cpuid != NULL)
@ -718,25 +691,25 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
static int __init g5_cpufreq_init(void) static int __init g5_cpufreq_init(void)
{ {
struct device_node *cpus; struct device_node *cpunode;
int rc = 0; int rc = 0;
cpus = of_find_node_by_path("/cpus"); /* Get first CPU node */
if (cpus == NULL) { cpunode = of_cpu_device_node_get(0);
DBG("No /cpus node !\n"); if (cpunode == NULL) {
pr_err("cpufreq: Can't find any CPU node\n");
return -ENODEV; return -ENODEV;
} }
if (of_machine_is_compatible("PowerMac7,2") || if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") || of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1")) of_machine_is_compatible("RackMac3,1"))
rc = g5_pm72_cpufreq_init(cpus); rc = g5_pm72_cpufreq_init(cpunode);
#ifdef CONFIG_PMAC_SMU #ifdef CONFIG_PMAC_SMU
else else
rc = g5_neo2_cpufreq_init(cpus); rc = g5_neo2_cpufreq_init(cpunode);
#endif /* CONFIG_PMAC_SMU */ #endif /* CONFIG_PMAC_SMU */
of_node_put(cpus);
return rc; return rc;
} }

View File

@ -207,7 +207,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit, .exit = powernow_k6_cpu_exit,
.get = powernow_k6_get, .get = powernow_k6_get,
.name = "powernow-k6", .name = "powernow-k6",
.owner = THIS_MODULE,
.attr = powernow_k6_attr, .attr = powernow_k6_attr,
}; };

View File

@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst)
unsigned int speed; unsigned int speed;
u8 fid, vid; u8 fid, vid;
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL); (number_scales + 1)), GFP_KERNEL);
if (!powernow_table) if (!powernow_table)
return -ENOMEM; return -ENOMEM;
@ -309,8 +309,7 @@ static int powernow_acpi_init(void)
goto err0; goto err0;
} }
acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
GFP_KERNEL);
if (!acpi_processor_perf) { if (!acpi_processor_perf) {
retval = -ENOMEM; retval = -ENOMEM;
goto err0; goto err0;
@ -346,7 +345,7 @@ static int powernow_acpi_init(void)
goto err2; goto err2;
} }
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL); (number_scales + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
retval = -ENOMEM; retval = -ENOMEM;
@ -497,7 +496,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
"relevant to this CPU).\n", "relevant to this CPU).\n",
psb->numpst); psb->numpst);
p += sizeof(struct psb_s); p += sizeof(*psb);
pst = (struct pst_s *) p; pst = (struct pst_s *) p;
@ -510,12 +509,12 @@ static int powernow_decode_bios(int maxfid, int startvid)
(maxfid == pst->maxfid) && (maxfid == pst->maxfid) &&
(startvid == pst->startvid)) { (startvid == pst->startvid)) {
print_pst_entry(pst, j); print_pst_entry(pst, j);
p = (char *)pst + sizeof(struct pst_s); p = (char *)pst + sizeof(*pst);
ret = get_ranges(p); ret = get_ranges(p);
return ret; return ret;
} else { } else {
unsigned int k; unsigned int k;
p = (char *)pst + sizeof(struct pst_s); p = (char *)pst + sizeof(*pst);
for (k = 0; k < number_scales; k++) for (k = 0; k < number_scales; k++)
p += 2; p += 2;
} }
@ -717,7 +716,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init, .init = powernow_cpu_init,
.exit = powernow_cpu_exit, .exit = powernow_cpu_exit,
.name = "powernow-k7", .name = "powernow-k7",
.owner = THIS_MODULE,
.attr = powernow_table_attr, .attr = powernow_table_attr,
}; };

View File

@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
if (check_pst_table(data, pst, maxvid)) if (check_pst_table(data, pst, maxvid))
return -EINVAL; return -EINVAL;
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) powernow_table = kmalloc((sizeof(*powernow_table)
* (data->numps + 1)), GFP_KERNEL); * (data->numps + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
} }
/* fill in data->powernow_table */ /* fill in data->powernow_table */
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) powernow_table = kmalloc((sizeof(*powernow_table)
* (data->acpi_data.state_count + 1)), GFP_KERNEL); * (data->acpi_data.state_count + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
pr_debug("powernow_table memory alloc failure\n"); pr_debug("powernow_table memory alloc failure\n");
@ -1106,7 +1106,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
if (rc) if (rc)
return -ENODEV; return -ENODEV;
data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) { if (!data) {
printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
return -ENOMEM; return -ENOMEM;
@ -1240,7 +1240,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit, .exit = powernowk8_cpu_exit,
.get = powernowk8_get, .get = powernowk8_get,
.name = "powernow-k8", .name = "powernow-k8",
.owner = THIS_MODULE,
.attr = powernow_k8_attr, .attr = powernow_k8_attr,
}; };

View File

@ -300,7 +300,6 @@ static struct freq_attr *corenet_cpufreq_attr[] = {
static struct cpufreq_driver ppc_corenet_cpufreq_driver = { static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq", .name = "ppc_cpufreq",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init, .init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit), .exit = __exit_p(corenet_cpufreq_cpu_exit),

View File

@ -181,7 +181,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
.init = cbe_cpufreq_cpu_init, .init = cbe_cpufreq_cpu_init,
.exit = cbe_cpufreq_cpu_exit, .exit = cbe_cpufreq_cpu_exit,
.name = "cbe-cpufreq", .name = "cbe-cpufreq",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
}; };

View File

@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
return ret; return ret;
} }
static __init void pxa_cpufreq_init_voltages(void) static void __init pxa_cpufreq_init_voltages(void)
{ {
vcc_core = regulator_get(NULL, "vcc_core"); vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) { if (IS_ERR(vcc_core)) {
@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
return 0; return 0;
} }
static __init void pxa_cpufreq_init_voltages(void) { } static void __init pxa_cpufreq_init_voltages(void) { }
#endif #endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table, static void find_freq_tables(struct cpufreq_frequency_table **freq_table,

View File

@ -213,10 +213,12 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
policy->cur = policy->min = policy->max; policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310()) if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); ret = setup_freqs_table(policy, pxa300_freqs,
ARRAY_SIZE(pxa300_freqs));
if (cpu_is_pxa320()) if (cpu_is_pxa320())
ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); ret = setup_freqs_table(policy, pxa320_freqs,
ARRAY_SIZE(pxa320_freqs));
if (ret) { if (ret) {
pr_err("failed to setup frequency table\n"); pr_err("failed to setup frequency table\n");

View File

@ -524,7 +524,6 @@ static struct freq_attr *s3c2416_cpufreq_attr[] = {
}; };
static struct cpufreq_driver s3c2416_cpufreq_driver = { static struct cpufreq_driver s3c2416_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0, .flags = 0,
.verify = s3c2416_cpufreq_verify_speed, .verify = s3c2416_cpufreq_verify_speed,
.target = s3c2416_cpufreq_set_target, .target = s3c2416_cpufreq_set_target,

View File

@ -392,7 +392,7 @@ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
return 0; return 0;
} }
static __init int s3c_cpufreq_initclks(void) static int __init s3c_cpufreq_initclks(void)
{ {
_clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll"); _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
_clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal"); _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
@ -522,7 +522,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
/* Copy the board information so that each board can make this /* Copy the board information so that each board can make this
* initdata. */ * initdata. */
ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL); ours = kzalloc(sizeof(*ours), GFP_KERNEL);
if (ours == NULL) { if (ours == NULL) {
printk(KERN_ERR "%s: no memory\n", __func__); printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM; return -ENOMEM;
@ -615,7 +615,7 @@ static int s3c_cpufreq_build_freq(void)
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++; size++;
ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL); ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) { if (!ftab) {
printk(KERN_ERR "%s: no memory for tables\n", __func__); printk(KERN_ERR "%s: no memory for tables\n", __func__);
return -ENOMEM; return -ENOMEM;
@ -691,7 +691,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
struct cpufreq_frequency_table *vals; struct cpufreq_frequency_table *vals;
unsigned int size; unsigned int size;
size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1); size = sizeof(*vals) * (plls_no + 1);
vals = kmalloc(size, GFP_KERNEL); vals = kmalloc(size, GFP_KERNEL);
if (vals) { if (vals) {

View File

@ -263,7 +263,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
} }
static struct cpufreq_driver s3c64xx_cpufreq_driver = { static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0, .flags = 0,
.verify = s3c64xx_cpufreq_verify_speed, .verify = s3c64xx_cpufreq_verify_speed,
.target = s3c64xx_cpufreq_set_target, .target = s3c64xx_cpufreq_set_target,

View File

@ -147,7 +147,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.init = sc520_freq_cpu_init, .init = sc520_freq_cpu_init,
.exit = sc520_freq_cpu_exit, .exit = sc520_freq_cpu_exit,
.name = "sc520_freq", .name = "sc520_freq",
.owner = THIS_MODULE,
.attr = sc520_freq_attr, .attr = sc520_freq_attr,
}; };

View File

@ -160,7 +160,6 @@ static struct freq_attr *sh_freq_attr[] = {
}; };
static struct cpufreq_driver sh_cpufreq_driver = { static struct cpufreq_driver sh_cpufreq_driver = {
.owner = THIS_MODULE,
.name = "sh", .name = "sh",
.get = sh_cpufreq_get, .get = sh_cpufreq_get,
.target = sh_cpufreq_target, .target = sh_cpufreq_target,

View File

@ -351,12 +351,11 @@ static int __init us2e_freq_init(void)
struct cpufreq_driver *driver; struct cpufreq_driver *driver;
ret = -ENOMEM; ret = -ENOMEM;
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver) if (!driver)
goto err_out; goto err_out;
us2e_freq_table = kzalloc( us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
GFP_KERNEL); GFP_KERNEL);
if (!us2e_freq_table) if (!us2e_freq_table)
goto err_out; goto err_out;
@ -366,7 +365,6 @@ static int __init us2e_freq_init(void)
driver->target = us2e_freq_target; driver->target = us2e_freq_target;
driver->get = us2e_freq_get; driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit; driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe"); strcpy(driver->name, "UltraSPARC-IIe");
cpufreq_us2e_driver = driver; cpufreq_us2e_driver = driver;

View File

@ -212,12 +212,11 @@ static int __init us3_freq_init(void)
struct cpufreq_driver *driver; struct cpufreq_driver *driver;
ret = -ENOMEM; ret = -ENOMEM;
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver) if (!driver)
goto err_out; goto err_out;
us3_freq_table = kzalloc( us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
(NR_CPUS * sizeof(struct us3_freq_percpu_info)),
GFP_KERNEL); GFP_KERNEL);
if (!us3_freq_table) if (!us3_freq_table)
goto err_out; goto err_out;
@ -227,7 +226,6 @@ static int __init us3_freq_init(void)
driver->target = us3_freq_target; driver->target = us3_freq_target;
driver->get = us3_freq_get; driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit; driver->exit = us3_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-III"); strcpy(driver->name, "UltraSPARC-III");
cpufreq_us3_driver = driver; cpufreq_us3_driver = driver;

View File

@ -18,7 +18,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
@ -223,7 +223,7 @@ static int spear_cpufreq_driver_init(void)
const __be32 *val; const __be32 *val;
int cnt, i, ret; int cnt, i, ret;
np = of_find_node_by_path("/cpus/cpu@0"); np = of_cpu_device_node_get(0);
if (!np) { if (!np) {
pr_err("No cpu node found"); pr_err("No cpu node found");
return -ENODEV; return -ENODEV;

View File

@ -575,7 +575,6 @@ static struct cpufreq_driver centrino_driver = {
.target = centrino_target, .target = centrino_target,
.get = get_cur_freq, .get = get_cur_freq,
.attr = centrino_attr, .attr = centrino_attr,
.owner = THIS_MODULE,
}; };
/* /*

View File

@ -378,7 +378,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init, .init = speedstep_cpu_init,
.exit = speedstep_cpu_exit, .exit = speedstep_cpu_exit,
.get = speedstep_get, .get = speedstep_get,
.owner = THIS_MODULE,
.attr = speedstep_attr, .attr = speedstep_attr,
}; };

View File

@ -375,7 +375,6 @@ static struct cpufreq_driver speedstep_driver = {
.exit = speedstep_cpu_exit, .exit = speedstep_cpu_exit,
.get = speedstep_get, .get = speedstep_get,
.resume = speedstep_resume, .resume = speedstep_resume,
.owner = THIS_MODULE,
.attr = speedstep_attr, .attr = speedstep_attr,
}; };

View File

@ -24,7 +24,7 @@ static struct cpufreq_driver ucv2_driver;
/* make sure that only the "userspace" governor is run /* make sure that only the "userspace" governor is run
* -- anything else wouldn't make sense on this platform, anyway. * -- anything else wouldn't make sense on this platform, anyway.
*/ */
int ucv2_verify_speed(struct cpufreq_policy *policy) static int ucv2_verify_speed(struct cpufreq_policy *policy)
{ {
if (policy->cpu) if (policy->cpu)
return -EINVAL; return -EINVAL;

View File

@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/cpu.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
@ -230,6 +231,100 @@ const void *of_get_property(const struct device_node *np, const char *name,
} }
EXPORT_SYMBOL(of_get_property); EXPORT_SYMBOL(of_get_property);
/*
* arch_match_cpu_phys_id - Match the given logical CPU and physical id
*
* @cpu: logical cpu index of a core/thread
* @phys_id: physical identifier of a core/thread
*
* CPU logical to physical index mapping is architecture specific.
* However this __weak function provides a default match of physical
* id to logical cpu index. phys_id provided here is usually values read
* from the device tree which must match the hardware internal registers.
*
* Returns true if the physical identifier and the logical cpu index
* correspond to the same core/thread, false otherwise.
*/
bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (u32)phys_id == cpu;
}
/**
* Checks if the given "prop_name" property holds the physical id of the
* core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
* NULL, local thread number within the core is returned in it.
*/
static bool __of_find_n_match_cpu_property(struct device_node *cpun,
const char *prop_name, int cpu, unsigned int *thread)
{
const __be32 *cell;
int ac, prop_len, tid;
u64 hwid;
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
if (!cell)
return false;
prop_len /= sizeof(*cell);
for (tid = 0; tid < prop_len; tid++) {
hwid = of_read_number(cell, ac);
if (arch_match_cpu_phys_id(cpu, hwid)) {
if (thread)
*thread = tid;
return true;
}
cell += ac;
}
return false;
}
/**
* of_get_cpu_node - Get device node associated with the given logical CPU
*
* @cpu: CPU number(logical index) for which device node is required
* @thread: if not NULL, local thread number within the physical core is
* returned
*
* The main purpose of this function is to retrieve the device node for the
* given logical CPU index. It should be used to initialize the of_node in
* cpu device. Once of_node in cpu device is populated, all the further
* references can use that instead.
*
* CPU logical to physical index mapping is architecture specific and is built
* before booting secondary cores. This function uses arch_match_cpu_phys_id
* which can be overridden by architecture specific implementation.
*
* Returns a node pointer for the logical cpu if found, else NULL.
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
struct device_node *cpun, *cpus;
cpus = of_find_node_by_path("/cpus");
if (!cpus) {
pr_warn("Missing cpus node, bailing out\n");
return NULL;
}
for_each_child_of_node(cpus, cpun) {
if (of_node_cmp(cpun->type, "cpu"))
continue;
/* Check for non-standard "ibm,ppc-interrupt-server#s" property
* for thread ids on PowerPC. If it doesn't exist fallback to
* standard "reg" property.
*/
if (IS_ENABLED(CONFIG_PPC) &&
__of_find_n_match_cpu_property(cpun,
"ibm,ppc-interrupt-server#s", cpu, thread))
return cpun;
if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
return cpun;
}
return NULL;
}
EXPORT_SYMBOL(of_get_cpu_node);
/** Checks if the given "compat" string matches one of the strings in /** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property * the device's "compatible" property
*/ */

View File

@ -28,6 +28,7 @@ struct cpu {
extern int register_cpu(struct cpu *cpu, int num); extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu); extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu); extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern int cpu_add_dev_attr(struct device_attribute *attr); extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr); extern void cpu_remove_dev_attr(struct device_attribute *attr);

View File

@ -11,71 +11,36 @@
#ifndef _LINUX_CPUFREQ_H #ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H #define _LINUX_CPUFREQ_H
#include <asm/cputime.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/threads.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/div64.h> #include <linux/completion.h>
#include <linux/kobject.h>
#define CPUFREQ_NAME_LEN 16 #include <linux/notifier.h>
/* Print length for names. Extra 1 space for accomodating '\n' in prints */ #include <linux/sysfs.h>
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
/********************************************************************* /*********************************************************************
* CPUFREQ NOTIFIER INTERFACE * * CPUFREQ INTERFACE *
*********************************************************************/ *********************************************************************/
/*
#define CPUFREQ_TRANSITION_NOTIFIER (0) * Frequency values here are CPU kHz
#define CPUFREQ_POLICY_NOTIFIER (1) *
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
extern void disable_cpufreq(void);
#else /* CONFIG_CPU_FREQ */
static inline int cpufreq_register_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline void disable_cpufreq(void) { }
#endif /* CONFIG_CPU_FREQ */
/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
* two generic policies are available:
*/
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
/* Frequency values here are CPU kHz so that hardware which doesn't run
* with some frequencies can complain without having to guess what per
* cent / per mille means.
* Maximum transition latency is in nanoseconds - if it's unknown, * Maximum transition latency is in nanoseconds - if it's unknown,
* CPUFREQ_ETERNAL shall be used. * CPUFREQ_ETERNAL shall be used.
*/ */
#define CPUFREQ_ETERNAL (-1)
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
struct cpufreq_governor; struct cpufreq_governor;
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ struct cpufreq_freqs {
extern struct kobject *cpufreq_global_kobject; unsigned int cpu; /* cpu nr */
int cpufreq_get_global_kobject(void); unsigned int old;
void cpufreq_put_global_kobject(void); unsigned int new;
int cpufreq_sysfs_create_file(const struct attribute *attr); u8 flags; /* flags of cpufreq_driver, see below. */
void cpufreq_sysfs_remove_file(const struct attribute *attr); };
#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo { struct cpufreq_cpuinfo {
unsigned int max_freq; unsigned int max_freq;
unsigned int min_freq; unsigned int min_freq;
@ -117,111 +82,59 @@ struct cpufreq_policy {
struct cpufreq_real_policy user_policy; struct cpufreq_real_policy user_policy;
struct list_head policy_list;
struct kobject kobj; struct kobject kobj;
struct completion kobj_unregister; struct completion kobj_unregister;
int transition_ongoing; /* Tracks transition status */ int transition_ongoing; /* Tracks transition status */
}; };
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
/* Only for ACPI */ /* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
static inline bool policy_is_shared(struct cpufreq_policy *policy) static inline bool policy_is_shared(struct cpufreq_policy *policy)
{ {
return cpumask_weight(policy->cpus) > 1; return cpumask_weight(policy->cpus) > 1;
} }
/******************** cpufreq transition notifiers *******************/ /* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
int cpufreq_get_global_kobject(void);
void cpufreq_put_global_kobject(void);
int cpufreq_sysfs_create_file(const struct attribute *attr);
void cpufreq_sysfs_remove_file(const struct attribute *attr);
#define CPUFREQ_PRECHANGE (0) #ifdef CONFIG_CPU_FREQ
#define CPUFREQ_POSTCHANGE (1) unsigned int cpufreq_get(unsigned int cpu);
#define CPUFREQ_RESUMECHANGE (8) unsigned int cpufreq_quick_get(unsigned int cpu);
#define CPUFREQ_SUSPENDCHANGE (9) unsigned int cpufreq_quick_get_max(unsigned int cpu);
void disable_cpufreq(void);
struct cpufreq_freqs { u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
unsigned int cpu; /* cpu nr */ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
unsigned int old; int cpufreq_update_policy(unsigned int cpu);
unsigned int new; bool have_governor_per_policy(void);
u8 flags; /* flags of cpufreq_driver, see below. */ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
}; #else
static inline unsigned int cpufreq_get(unsigned int cpu)
/**
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
* safe)
* @old: old value
* @div: divisor
* @mult: multiplier
*
*
* new = old * mult / div
*/
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
u_int mult)
{ {
#if BITS_PER_LONG == 32 return 0;
}
u64 result = ((u64) old) * ((u64) mult); static inline unsigned int cpufreq_quick_get(unsigned int cpu)
do_div(result, div); {
return (unsigned long) result; return 0;
}
#elif BITS_PER_LONG == 64 static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
unsigned long result = old * ((u64) mult); return 0;
result /= div; }
return result; static inline void disable_cpufreq(void) { }
#endif #endif
};
/*********************************************************************
* CPUFREQ GOVERNORS *
*********************************************************************/
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int initialized;
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
};
/*
* Pass a target to the cpufreq driver.
*/
extern int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
unsigned int cpu);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
/********************************************************************* /*********************************************************************
* CPUFREQ DRIVER INTERFACE * * CPUFREQ DRIVER INTERFACE *
@ -230,76 +143,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
struct freq_attr;
struct cpufreq_driver {
struct module *owner;
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
* This should be set by platforms having multiple clock-domains, i.e.
* supporting multiple policies. With this sysfs directories of governor
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
* use the same governor with different tunables for different clusters.
*/
bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
int (*verify) (struct cpufreq_policy *policy);
/* define one out of two */
int (*setpolicy) (struct cpufreq_policy *policy);
int (*target) (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
/* should be defined, if possible */
unsigned int (*get) (unsigned int cpu);
/* optional */
unsigned int (*getavg) (struct cpufreq_policy *policy,
unsigned int cpu);
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
/* flags */
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
* "constants" aren't affected by
* frequency transitions */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
* mismatches */
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state);
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max)
{
if (policy->min < min)
policy->min = min;
if (policy->max < min)
policy->max = min;
if (policy->min > max)
policy->min = max;
if (policy->max > max)
policy->max = max;
if (policy->min > policy->max)
policy->min = policy->max;
return;
}
struct freq_attr { struct freq_attr {
struct attribute attr; struct attribute attr;
ssize_t (*show)(struct cpufreq_policy *, char *); ssize_t (*show)(struct cpufreq_policy *, char *);
@ -334,52 +177,181 @@ __ATTR(_name, 0444, show_##_name, NULL)
static struct global_attr _name = \ static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name) __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *data); struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
* This should be set by platforms having multiple clock-domains, i.e.
* supporting multiple policies. With this sysfs directories of governor
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
* use the same governor with different tunables for different clusters.
*/
bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
int (*verify) (struct cpufreq_policy *policy);
/* define one out of two */
int (*setpolicy) (struct cpufreq_policy *policy);
int (*target) (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
/* should be defined, if possible */
unsigned int (*get) (unsigned int cpu);
/* optional */
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
/* flags */
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
* "constants" aren't affected by
* frequency transitions */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
* mismatches */
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
const char *cpufreq_get_current_driver(void); const char *cpufreq_get_current_driver(void);
/********************************************************************* static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
* CPUFREQ 2.6. INTERFACE * unsigned int min, unsigned int max)
*********************************************************************/
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
#ifdef CONFIG_CPU_FREQ
/*
* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
*/
unsigned int cpufreq_get(unsigned int cpu);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{ {
return 0; if (policy->min < min)
policy->min = min;
if (policy->max < min)
policy->max = min;
if (policy->min > max)
policy->min = max;
if (policy->max > max)
policy->max = max;
if (policy->min > policy->max)
policy->min = policy->max;
return;
} }
#endif
/*
* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
*/
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
#else
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
{
return 0;
}
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
#endif
/********************************************************************* /*********************************************************************
* CPUFREQ DEFAULT GOVERNOR * * CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/ *********************************************************************/
#define CPUFREQ_TRANSITION_NOTIFIER (0)
#define CPUFREQ_POLICY_NOTIFIER (1)
/* Transition notifiers */
#define CPUFREQ_PRECHANGE (0)
#define CPUFREQ_POSTCHANGE (1)
#define CPUFREQ_RESUMECHANGE (8)
#define CPUFREQ_SUSPENDCHANGE (9)
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state);
#else /* CONFIG_CPU_FREQ */
static inline int cpufreq_register_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
#endif /* !CONFIG_CPU_FREQ */
/**
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
* safe)
* @old: old value
* @div: divisor
* @mult: multiplier
*
*
* new = old * mult / div
*/
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
u_int mult)
{
#if BITS_PER_LONG == 32
u64 result = ((u64) old) * ((u64) mult);
do_div(result, div);
return (unsigned long) result;
#elif BITS_PER_LONG == 64
unsigned long result = old * ((u64) mult);
result /= div;
return result;
#endif
}
/*********************************************************************
* CPUFREQ GOVERNORS *
*********************************************************************/
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
* two generic policies are available:
*/
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
/* Governor Events */
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int initialized;
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
};
/* Pass a target to the cpufreq driver */
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
/* CPUFREQ DEFAULT GOVERNOR */
/* /*
* Performance governor is fallback governor if any other gov failed to auto * Performance governor is fallback governor if any other gov failed to auto
* load due latency restrictions * load due latency restrictions
@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int relation, unsigned int relation,
unsigned int *index); unsigned int *index);
/* the following 3 funtions are for cpufreq core use only */ void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */ /* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu); unsigned int cpu);
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
void cpufreq_frequency_table_put_attr(unsigned int cpu); void cpufreq_frequency_table_put_attr(unsigned int cpu);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#endif /* _LINUX_CPUFREQ_H */ #endif /* _LINUX_CPUFREQ_H */

View File

@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node, extern const void *of_get_property(const struct device_node *node,
const char *name, const char *name,
int *lenp); int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#define for_each_property_of_node(dn, pp) \ #define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next) for (pp = dn->properties; pp != NULL; pp = pp->next)
@ -459,6 +460,12 @@ static inline const void *of_get_property(const struct device_node *node,
return NULL; return NULL;
} }
static inline struct device_node *of_get_cpu_node(int cpu,
unsigned int *thread)
{
return NULL;
}
static inline int of_property_read_u64(const struct device_node *np, static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value) const char *propname, u64 *out_value)
{ {

View File

@ -1,6 +1,7 @@
#ifndef _LINUX_OF_DEVICE_H #ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H #define _LINUX_OF_DEVICE_H
#include <linux/cpu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of_platform.h> /* temporary until merge */ #include <linux/of_platform.h> /* temporary until merge */
@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
of_node_put(dev->of_node); of_node_put(dev->of_node);
} }
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return NULL;
return of_node_get(cpu_dev->of_node);
}
#else /* CONFIG_OF */ #else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev, static inline int of_driver_match_device(struct device *dev,
@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
{ {
return NULL; return NULL;
} }
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
#endif /* CONFIG_OF */ #endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */ #endif /* _LINUX_OF_DEVICE_H */