mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 18:06:40 +07:00
e8d05276f2
commit2f7021a8
"cpufreq: protect 'policy->cpus' from offlining during __gov_queue_work()" caused a regression in CPU hotplug, because it lead to a deadlock between cpufreq governor worker thread and the CPU hotplug writer task. Lockdep splat corresponding to this deadlock is shown below: [ 60.277396] ====================================================== [ 60.277400] [ INFO: possible circular locking dependency detected ] [ 60.277407] 3.10.0-rc7-dbg-01385-g241fd04-dirty #1744 Not tainted [ 60.277411] ------------------------------------------------------- [ 60.277417] bash/2225 is trying to acquire lock: [ 60.277422] ((&(&j_cdbs->work)->work)){+.+...}, at: [<ffffffff810621b5>] flush_work+0x5/0x280 [ 60.277444] but task is already holding lock: [ 60.277449] (cpu_hotplug.lock){+.+.+.}, at: [<ffffffff81042d8b>] cpu_hotplug_begin+0x2b/0x60 [ 60.277465] which lock already depends on the new lock. [ 60.277472] the existing dependency chain (in reverse order) is: [ 60.277477] -> #2 (cpu_hotplug.lock){+.+.+.}: [ 60.277490] [<ffffffff810ac6d4>] lock_acquire+0xa4/0x200 [ 60.277503] [<ffffffff815b6157>] mutex_lock_nested+0x67/0x410 [ 60.277514] [<ffffffff81042cbc>] get_online_cpus+0x3c/0x60 [ 60.277522] [<ffffffff814b842a>] gov_queue_work+0x2a/0xb0 [ 60.277532] [<ffffffff814b7891>] cs_dbs_timer+0xc1/0xe0 [ 60.277543] [<ffffffff8106302d>] process_one_work+0x1cd/0x6a0 [ 60.277552] [<ffffffff81063d31>] worker_thread+0x121/0x3a0 [ 60.277560] [<ffffffff8106ae2b>] kthread+0xdb/0xe0 [ 60.277569] [<ffffffff815bb96c>] ret_from_fork+0x7c/0xb0 [ 60.277580] -> #1 (&j_cdbs->timer_mutex){+.+...}: [ 60.277592] [<ffffffff810ac6d4>] lock_acquire+0xa4/0x200 [ 60.277600] [<ffffffff815b6157>] mutex_lock_nested+0x67/0x410 [ 60.277608] [<ffffffff814b785d>] cs_dbs_timer+0x8d/0xe0 [ 60.277616] [<ffffffff8106302d>] process_one_work+0x1cd/0x6a0 [ 60.277624] [<ffffffff81063d31>] worker_thread+0x121/0x3a0 [ 60.277633] [<ffffffff8106ae2b>] kthread+0xdb/0xe0 [ 60.277640] [<ffffffff815bb96c>] ret_from_fork+0x7c/0xb0 [ 60.277649] -> #0 ((&(&j_cdbs->work)->work)){+.+...}: [ 60.277661] [<ffffffff810ab826>] __lock_acquire+0x1766/0x1d30 [ 60.277669] [<ffffffff810ac6d4>] lock_acquire+0xa4/0x200 [ 60.277677] [<ffffffff810621ed>] flush_work+0x3d/0x280 [ 60.277685] [<ffffffff81062d8a>] __cancel_work_timer+0x8a/0x120 [ 60.277693] [<ffffffff81062e53>] cancel_delayed_work_sync+0x13/0x20 [ 60.277701] [<ffffffff814b89d9>] cpufreq_governor_dbs+0x529/0x6f0 [ 60.277709] [<ffffffff814b76a7>] cs_cpufreq_governor_dbs+0x17/0x20 [ 60.277719] [<ffffffff814b5df8>] __cpufreq_governor+0x48/0x100 [ 60.277728] [<ffffffff814b6b80>] __cpufreq_remove_dev.isra.14+0x80/0x3c0 [ 60.277737] [<ffffffff815adc0d>] cpufreq_cpu_callback+0x38/0x4c [ 60.277747] [<ffffffff81071a4d>] notifier_call_chain+0x5d/0x110 [ 60.277759] [<ffffffff81071b0e>] __raw_notifier_call_chain+0xe/0x10 [ 60.277768] [<ffffffff815a0a68>] _cpu_down+0x88/0x330 [ 60.277779] [<ffffffff815a0d46>] cpu_down+0x36/0x50 [ 60.277788] [<ffffffff815a2748>] store_online+0x98/0xd0 [ 60.277796] [<ffffffff81452a28>] dev_attr_store+0x18/0x30 [ 60.277806] [<ffffffff811d9edb>] sysfs_write_file+0xdb/0x150 [ 60.277818] [<ffffffff8116806d>] vfs_write+0xbd/0x1f0 [ 60.277826] [<ffffffff811686fc>] SyS_write+0x4c/0xa0 [ 60.277834] [<ffffffff815bbbbe>] tracesys+0xd0/0xd5 [ 60.277842] other info that might help us debug this: [ 60.277848] Chain exists of: (&(&j_cdbs->work)->work) --> &j_cdbs->timer_mutex --> cpu_hotplug.lock [ 60.277864] Possible unsafe locking scenario: [ 60.277869] CPU0 CPU1 [ 60.277873] ---- ---- [ 60.277877] lock(cpu_hotplug.lock); [ 60.277885] lock(&j_cdbs->timer_mutex); [ 60.277892] lock(cpu_hotplug.lock); [ 60.277900] lock((&(&j_cdbs->work)->work)); [ 60.277907] *** DEADLOCK *** [ 60.277915] 6 locks held by bash/2225: [ 60.277919] #0: (sb_writers#6){.+.+.+}, at: [<ffffffff81168173>] vfs_write+0x1c3/0x1f0 [ 60.277937] #1: (&buffer->mutex){+.+.+.}, at: [<ffffffff811d9e3c>] sysfs_write_file+0x3c/0x150 [ 60.277954] #2: (s_active#61){.+.+.+}, at: [<ffffffff811d9ec3>] sysfs_write_file+0xc3/0x150 [ 60.277972] #3: (x86_cpu_hotplug_driver_mutex){+.+...}, at: [<ffffffff81024cf7>] cpu_hotplug_driver_lock+0x17/0x20 [ 60.277990] #4: (cpu_add_remove_lock){+.+.+.}, at: [<ffffffff815a0d32>] cpu_down+0x22/0x50 [ 60.278007] #5: (cpu_hotplug.lock){+.+.+.}, at: [<ffffffff81042d8b>] cpu_hotplug_begin+0x2b/0x60 [ 60.278023] stack backtrace: [ 60.278031] CPU: 3 PID: 2225 Comm: bash Not tainted 3.10.0-rc7-dbg-01385-g241fd04-dirty #1744 [ 60.278037] Hardware name: Acer Aspire 5741G /Aspire 5741G , BIOS V1.20 02/08/2011 [ 60.278042] ffffffff8204e110 ffff88014df6b9f8 ffffffff815b3d90 ffff88014df6ba38 [ 60.278055] ffffffff815b0a8d ffff880150ed3f60 ffff880150ed4770 3871c4002c8980b2 [ 60.278068] ffff880150ed4748 ffff880150ed4770 ffff880150ed3f60 ffff88014df6bb00 [ 60.278081] Call Trace: [ 60.278091] [<ffffffff815b3d90>] dump_stack+0x19/0x1b [ 60.278101] [<ffffffff815b0a8d>] print_circular_bug+0x2b6/0x2c5 [ 60.278111] [<ffffffff810ab826>] __lock_acquire+0x1766/0x1d30 [ 60.278123] [<ffffffff81067e08>] ? __kernel_text_address+0x58/0x80 [ 60.278134] [<ffffffff810ac6d4>] lock_acquire+0xa4/0x200 [ 60.278142] [<ffffffff810621b5>] ? flush_work+0x5/0x280 [ 60.278151] [<ffffffff810621ed>] flush_work+0x3d/0x280 [ 60.278159] [<ffffffff810621b5>] ? flush_work+0x5/0x280 [ 60.278169] [<ffffffff810a9b14>] ? mark_held_locks+0x94/0x140 [ 60.278178] [<ffffffff81062d77>] ? __cancel_work_timer+0x77/0x120 [ 60.278188] [<ffffffff810a9cbd>] ? trace_hardirqs_on_caller+0xfd/0x1c0 [ 60.278196] [<ffffffff81062d8a>] __cancel_work_timer+0x8a/0x120 [ 60.278206] [<ffffffff81062e53>] cancel_delayed_work_sync+0x13/0x20 [ 60.278214] [<ffffffff814b89d9>] cpufreq_governor_dbs+0x529/0x6f0 [ 60.278225] [<ffffffff814b76a7>] cs_cpufreq_governor_dbs+0x17/0x20 [ 60.278234] [<ffffffff814b5df8>] __cpufreq_governor+0x48/0x100 [ 60.278244] [<ffffffff814b6b80>] __cpufreq_remove_dev.isra.14+0x80/0x3c0 [ 60.278255] [<ffffffff815adc0d>] cpufreq_cpu_callback+0x38/0x4c [ 60.278265] [<ffffffff81071a4d>] notifier_call_chain+0x5d/0x110 [ 60.278275] [<ffffffff81071b0e>] __raw_notifier_call_chain+0xe/0x10 [ 60.278284] [<ffffffff815a0a68>] _cpu_down+0x88/0x330 [ 60.278292] [<ffffffff81024cf7>] ? cpu_hotplug_driver_lock+0x17/0x20 [ 60.278302] [<ffffffff815a0d46>] cpu_down+0x36/0x50 [ 60.278311] [<ffffffff815a2748>] store_online+0x98/0xd0 [ 60.278320] [<ffffffff81452a28>] dev_attr_store+0x18/0x30 [ 60.278329] [<ffffffff811d9edb>] sysfs_write_file+0xdb/0x150 [ 60.278337] [<ffffffff8116806d>] vfs_write+0xbd/0x1f0 [ 60.278347] [<ffffffff81185950>] ? fget_light+0x320/0x4b0 [ 60.278355] [<ffffffff811686fc>] SyS_write+0x4c/0xa0 [ 60.278364] [<ffffffff815bbbbe>] tracesys+0xd0/0xd5 [ 60.280582] smpboot: CPU 1 is now offline The intention of that commit was to avoid warnings during CPU hotplug, which indicated that offline CPUs were getting IPIs from the cpufreq governor's work items. But the real root-cause of that problem was commita66b2e5
(cpufreq: Preserve sysfs files across suspend/resume) because it totally skipped all the cpufreq callbacks during CPU hotplug in the suspend/resume path, and hence it never actually shut down the cpufreq governor's worker threads during CPU offline in the suspend/resume path. Reflecting back, the reason why we never suspected that commit as the root-cause earlier, was that the original issue was reported with just the halt command and nobody had brought in suspend/resume to the equation. The reason for _that_ in turn, as it turns out, is that earlier halt/shutdown was being done by disabling non-boot CPUs while tasks were frozen, just like suspend/resume.... but commitcf7df378a
(reboot: migrate shutdown/reboot to boot cpu) which came somewhere along that very same time changed that logic: shutdown/halt no longer takes CPUs offline. Thus, the test-cases for reproducing the bug were vastly different and thus we went totally off the trail. Overall, it was one hell of a confusion with so many commits affecting each other and also affecting the symptoms of the problems in subtle ways. Finally, now since the original problematic commit (a66b2e5
) has been completely reverted, revert this intermediate fix too (2f7021a8
), to fix the CPU hotplug deadlock. Phew! Reported-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Tested-by: Peter Wu <lekensteyn@gmail.com> Cc: 3.10+ <stable@vger.kernel.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
387 lines
10 KiB
C
387 lines
10 KiB
C
/*
|
|
* drivers/cpufreq/cpufreq_governor.c
|
|
*
|
|
* CPUFREQ governors common code
|
|
*
|
|
* Copyright (C) 2001 Russell King
|
|
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
|
|
* (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
|
|
* (C) 2009 Alexander Clouter <alex@digriz.org.uk>
|
|
* (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <asm/cputime.h>
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "cpufreq_governor.h"
|
|
|
|
static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
|
|
{
|
|
if (have_governor_per_policy())
|
|
return dbs_data->cdata->attr_group_gov_pol;
|
|
else
|
|
return dbs_data->cdata->attr_group_gov_sys;
|
|
}
|
|
|
|
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
|
{
|
|
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
|
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
|
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
|
struct cpufreq_policy *policy;
|
|
unsigned int max_load = 0;
|
|
unsigned int ignore_nice;
|
|
unsigned int j;
|
|
|
|
if (dbs_data->cdata->governor == GOV_ONDEMAND)
|
|
ignore_nice = od_tuners->ignore_nice;
|
|
else
|
|
ignore_nice = cs_tuners->ignore_nice;
|
|
|
|
policy = cdbs->cur_policy;
|
|
|
|
/* Get Absolute Load (in terms of freq for ondemand gov) */
|
|
for_each_cpu(j, policy->cpus) {
|
|
struct cpu_dbs_common_info *j_cdbs;
|
|
u64 cur_wall_time, cur_idle_time;
|
|
unsigned int idle_time, wall_time;
|
|
unsigned int load;
|
|
int io_busy = 0;
|
|
|
|
j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
|
|
|
|
/*
|
|
* For the purpose of ondemand, waiting for disk IO is
|
|
* an indication that you're performance critical, and
|
|
* not that the system is actually idle. So do not add
|
|
* the iowait time to the cpu idle time.
|
|
*/
|
|
if (dbs_data->cdata->governor == GOV_ONDEMAND)
|
|
io_busy = od_tuners->io_is_busy;
|
|
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
|
|
|
|
wall_time = (unsigned int)
|
|
(cur_wall_time - j_cdbs->prev_cpu_wall);
|
|
j_cdbs->prev_cpu_wall = cur_wall_time;
|
|
|
|
idle_time = (unsigned int)
|
|
(cur_idle_time - j_cdbs->prev_cpu_idle);
|
|
j_cdbs->prev_cpu_idle = cur_idle_time;
|
|
|
|
if (ignore_nice) {
|
|
u64 cur_nice;
|
|
unsigned long cur_nice_jiffies;
|
|
|
|
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
|
|
cdbs->prev_cpu_nice;
|
|
/*
|
|
* Assumption: nice time between sampling periods will
|
|
* be less than 2^32 jiffies for 32 bit sys
|
|
*/
|
|
cur_nice_jiffies = (unsigned long)
|
|
cputime64_to_jiffies64(cur_nice);
|
|
|
|
cdbs->prev_cpu_nice =
|
|
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
|
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
|
}
|
|
|
|
if (unlikely(!wall_time || wall_time < idle_time))
|
|
continue;
|
|
|
|
load = 100 * (wall_time - idle_time) / wall_time;
|
|
|
|
if (dbs_data->cdata->governor == GOV_ONDEMAND) {
|
|
int freq_avg = __cpufreq_driver_getavg(policy, j);
|
|
if (freq_avg <= 0)
|
|
freq_avg = policy->cur;
|
|
|
|
load *= freq_avg;
|
|
}
|
|
|
|
if (load > max_load)
|
|
max_load = load;
|
|
}
|
|
|
|
dbs_data->cdata->gov_check_cpu(cpu, max_load);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dbs_check_cpu);
|
|
|
|
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
|
|
unsigned int delay)
|
|
{
|
|
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
|
|
|
mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
|
|
}
|
|
|
|
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
|
|
unsigned int delay, bool all_cpus)
|
|
{
|
|
int i;
|
|
|
|
if (!all_cpus) {
|
|
__gov_queue_work(smp_processor_id(), dbs_data, delay);
|
|
} else {
|
|
for_each_cpu(i, policy->cpus)
|
|
__gov_queue_work(i, dbs_data, delay);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(gov_queue_work);
|
|
|
|
static inline void gov_cancel_work(struct dbs_data *dbs_data,
|
|
struct cpufreq_policy *policy)
|
|
{
|
|
struct cpu_dbs_common_info *cdbs;
|
|
int i;
|
|
|
|
for_each_cpu(i, policy->cpus) {
|
|
cdbs = dbs_data->cdata->get_cpu_cdbs(i);
|
|
cancel_delayed_work_sync(&cdbs->work);
|
|
}
|
|
}
|
|
|
|
/* Will return if we need to evaluate cpu load again or not */
|
|
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
|
|
unsigned int sampling_rate)
|
|
{
|
|
if (policy_is_shared(cdbs->cur_policy)) {
|
|
ktime_t time_now = ktime_get();
|
|
s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
|
|
|
|
/* Do nothing if we recently have sampled */
|
|
if (delta_us < (s64)(sampling_rate / 2))
|
|
return false;
|
|
else
|
|
cdbs->time_stamp = time_now;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(need_load_eval);
|
|
|
|
static void set_sampling_rate(struct dbs_data *dbs_data,
|
|
unsigned int sampling_rate)
|
|
{
|
|
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
|
|
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
|
cs_tuners->sampling_rate = sampling_rate;
|
|
} else {
|
|
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
|
od_tuners->sampling_rate = sampling_rate;
|
|
}
|
|
}
|
|
|
|
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|
struct common_dbs_data *cdata, unsigned int event)
|
|
{
|
|
struct dbs_data *dbs_data;
|
|
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
|
|
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
|
|
struct od_ops *od_ops = NULL;
|
|
struct od_dbs_tuners *od_tuners = NULL;
|
|
struct cs_dbs_tuners *cs_tuners = NULL;
|
|
struct cpu_dbs_common_info *cpu_cdbs;
|
|
unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
|
|
int io_busy = 0;
|
|
int rc;
|
|
|
|
if (have_governor_per_policy())
|
|
dbs_data = policy->governor_data;
|
|
else
|
|
dbs_data = cdata->gdbs_data;
|
|
|
|
WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
|
|
|
|
switch (event) {
|
|
case CPUFREQ_GOV_POLICY_INIT:
|
|
if (have_governor_per_policy()) {
|
|
WARN_ON(dbs_data);
|
|
} else if (dbs_data) {
|
|
dbs_data->usage_count++;
|
|
policy->governor_data = dbs_data;
|
|
return 0;
|
|
}
|
|
|
|
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
|
|
if (!dbs_data) {
|
|
pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
dbs_data->cdata = cdata;
|
|
dbs_data->usage_count = 1;
|
|
rc = cdata->init(dbs_data);
|
|
if (rc) {
|
|
pr_err("%s: POLICY_INIT: init() failed\n", __func__);
|
|
kfree(dbs_data);
|
|
return rc;
|
|
}
|
|
|
|
if (!have_governor_per_policy())
|
|
WARN_ON(cpufreq_get_global_kobject());
|
|
|
|
rc = sysfs_create_group(get_governor_parent_kobj(policy),
|
|
get_sysfs_attr(dbs_data));
|
|
if (rc) {
|
|
cdata->exit(dbs_data);
|
|
kfree(dbs_data);
|
|
return rc;
|
|
}
|
|
|
|
policy->governor_data = dbs_data;
|
|
|
|
/* policy latency is in nS. Convert it to uS first */
|
|
latency = policy->cpuinfo.transition_latency / 1000;
|
|
if (latency == 0)
|
|
latency = 1;
|
|
|
|
/* Bring kernel and HW constraints together */
|
|
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
|
|
MIN_LATENCY_MULTIPLIER * latency);
|
|
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
|
|
latency * LATENCY_MULTIPLIER));
|
|
|
|
if ((cdata->governor == GOV_CONSERVATIVE) &&
|
|
(!policy->governor->initialized)) {
|
|
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
|
|
|
|
cpufreq_register_notifier(cs_ops->notifier_block,
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
}
|
|
|
|
if (!have_governor_per_policy())
|
|
cdata->gdbs_data = dbs_data;
|
|
|
|
return 0;
|
|
case CPUFREQ_GOV_POLICY_EXIT:
|
|
if (!--dbs_data->usage_count) {
|
|
sysfs_remove_group(get_governor_parent_kobj(policy),
|
|
get_sysfs_attr(dbs_data));
|
|
|
|
if (!have_governor_per_policy())
|
|
cpufreq_put_global_kobject();
|
|
|
|
if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
|
|
(policy->governor->initialized == 1)) {
|
|
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
|
|
|
|
cpufreq_unregister_notifier(cs_ops->notifier_block,
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
}
|
|
|
|
cdata->exit(dbs_data);
|
|
kfree(dbs_data);
|
|
cdata->gdbs_data = NULL;
|
|
}
|
|
|
|
policy->governor_data = NULL;
|
|
return 0;
|
|
}
|
|
|
|
cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
|
|
|
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
|
|
cs_tuners = dbs_data->tuners;
|
|
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
|
|
sampling_rate = cs_tuners->sampling_rate;
|
|
ignore_nice = cs_tuners->ignore_nice;
|
|
} else {
|
|
od_tuners = dbs_data->tuners;
|
|
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
|
|
sampling_rate = od_tuners->sampling_rate;
|
|
ignore_nice = od_tuners->ignore_nice;
|
|
od_ops = dbs_data->cdata->gov_ops;
|
|
io_busy = od_tuners->io_is_busy;
|
|
}
|
|
|
|
switch (event) {
|
|
case CPUFREQ_GOV_START:
|
|
if (!policy->cur)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&dbs_data->mutex);
|
|
|
|
for_each_cpu(j, policy->cpus) {
|
|
struct cpu_dbs_common_info *j_cdbs =
|
|
dbs_data->cdata->get_cpu_cdbs(j);
|
|
|
|
j_cdbs->cpu = j;
|
|
j_cdbs->cur_policy = policy;
|
|
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
|
|
&j_cdbs->prev_cpu_wall, io_busy);
|
|
if (ignore_nice)
|
|
j_cdbs->prev_cpu_nice =
|
|
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
|
|
|
mutex_init(&j_cdbs->timer_mutex);
|
|
INIT_DEFERRABLE_WORK(&j_cdbs->work,
|
|
dbs_data->cdata->gov_dbs_timer);
|
|
}
|
|
|
|
/*
|
|
* conservative does not implement micro like ondemand
|
|
* governor, thus we are bound to jiffes/HZ
|
|
*/
|
|
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
|
|
cs_dbs_info->down_skip = 0;
|
|
cs_dbs_info->enable = 1;
|
|
cs_dbs_info->requested_freq = policy->cur;
|
|
} else {
|
|
od_dbs_info->rate_mult = 1;
|
|
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
|
od_ops->powersave_bias_init_cpu(cpu);
|
|
}
|
|
|
|
mutex_unlock(&dbs_data->mutex);
|
|
|
|
/* Initiate timer time stamp */
|
|
cpu_cdbs->time_stamp = ktime_get();
|
|
|
|
gov_queue_work(dbs_data, policy,
|
|
delay_for_sampling_rate(sampling_rate), true);
|
|
break;
|
|
|
|
case CPUFREQ_GOV_STOP:
|
|
if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
|
|
cs_dbs_info->enable = 0;
|
|
|
|
gov_cancel_work(dbs_data, policy);
|
|
|
|
mutex_lock(&dbs_data->mutex);
|
|
mutex_destroy(&cpu_cdbs->timer_mutex);
|
|
cpu_cdbs->cur_policy = NULL;
|
|
|
|
mutex_unlock(&dbs_data->mutex);
|
|
|
|
break;
|
|
|
|
case CPUFREQ_GOV_LIMITS:
|
|
mutex_lock(&cpu_cdbs->timer_mutex);
|
|
if (policy->max < cpu_cdbs->cur_policy->cur)
|
|
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
|
policy->max, CPUFREQ_RELATION_H);
|
|
else if (policy->min > cpu_cdbs->cur_policy->cur)
|
|
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
|
policy->min, CPUFREQ_RELATION_L);
|
|
dbs_check_cpu(dbs_data, cpu);
|
|
mutex_unlock(&cpu_cdbs->timer_mutex);
|
|
break;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
|