summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq_governor.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-02-27 06:54:03 (GMT)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-03-31 23:11:35 (GMT)
commit031299b3be30f3ecab110fff8faad85af70e1797 (patch)
tree3e823df16db43dffbf14b7895811059e22427a1e /drivers/cpufreq/cpufreq_governor.c
parent9d44592018e617abf62a5f6a5d92a04aa07e7625 (diff)
downloadlinux-fsl-qoriq-031299b3be30f3ecab110fff8faad85af70e1797.tar.xz
cpufreq: governors: Avoid unnecessary per cpu timer interrupts
Following patch has introduced per cpu timers or works for ondemand and conservative governors. commit 2abfa876f1117b0ab45f191fb1f82c41b1cbc8fe Author: Rickard Andersson <rickard.andersson@stericsson.com> Date: Thu Dec 27 14:55:38 2012 +0000 cpufreq: handle SW coordinated CPUs This causes additional unnecessary interrupts on all cpus when the load is recently evaluated by any other cpu. i.e. When load is recently evaluated by cpu x, we don't really need any other cpu to evaluate this load again for the next sampling_rate time. Some sort of code is present to avoid that but we are still getting timer interrupts for all cpus. A good way of avoiding this would be to modify delays for all cpus (policy->cpus) whenever any cpu has evaluated load. This patch does this change and some related code cleanup. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_governor.c')
-rw-r--r--drivers/cpufreq/cpufreq_governor.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 26fbb72..326f0c2 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -178,20 +178,38 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
-static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
- unsigned int sampling_rate)
+static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
+ unsigned int delay)
{
- int delay = delay_for_sampling_rate(sampling_rate);
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- schedule_delayed_work_on(cpu, &cdbs->work, delay);
+ mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
}
-static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+ unsigned int delay, bool all_cpus)
{
- struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+ int i;
+
+ if (!all_cpus) {
+ __gov_queue_work(smp_processor_id(), dbs_data, delay);
+ } else {
+ for_each_cpu(i, policy->cpus)
+ __gov_queue_work(i, dbs_data, delay);
+ }
+}
+EXPORT_SYMBOL_GPL(gov_queue_work);
+
+static inline void gov_cancel_work(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy)
+{
+ struct cpu_dbs_common_info *cdbs;
+ int i;
- cancel_delayed_work_sync(&cdbs->work);
+ for_each_cpu(i, policy->cpus) {
+ cdbs = dbs_data->cdata->get_cpu_cdbs(i);
+ cancel_delayed_work_sync(&cdbs->work);
+ }
}
/* Will return if we need to evaluate cpu load again or not */
@@ -380,16 +398,15 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
- for_each_cpu(j, policy->cpus)
- dbs_timer_init(dbs_data, j, sampling_rate);
+ gov_queue_work(dbs_data, policy,
+ delay_for_sampling_rate(sampling_rate), true);
break;
case CPUFREQ_GOV_STOP:
if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
- for_each_cpu(j, policy->cpus)
- dbs_timer_exit(dbs_data, j);
+ gov_cancel_work(dbs_data, policy);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);