summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-freq/governors.txt11
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c3
-rw-r--r--arch/arm/mach-s5pv310/cpufreq.c3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
-rw-r--r--include/linux/cpufreq.h11
11 files changed, 36 insertions, 248 deletions
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 737988f..e74d0a2 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -158,6 +158,17 @@ intensive calculation on your laptop that you do not care how long it
takes to complete as you can 'nice' it and prevent it from taking part
in the deciding process of whether to increase your CPU frequency.
+sampling_down_factor: this parameter controls the rate at which the
+kernel makes a decision on when to decrease the frequency while running
+at top speed. When set to 1 (the default) decisions to reevaluate load
+are made at the same interval regardless of current clock speed. But
+when set to greater than 1 (e.g. 100) it acts as a multiplier for the
+scheduling interval for reevaluating load when the CPU is at its top
+speed due to high load. This improves performance by reducing the overhead
+of load evaluation and helping the CPU stay at its top speed when truly
+busy, rather than shifting back and forth in speed. This tunable has no
+effect on behavior at lower speeds/lower CPU loads.
+
2.5 Conservative
----------------
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index a6f2292..22046e2 100644
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -390,8 +390,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy,
- pm_message_t pmsg)
+static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-s5pv310/cpufreq.c
index b04cbc7..7c08ad7 100644
--- a/arch/arm/mach-s5pv310/cpufreq.c
+++ b/arch/arm/mach-s5pv310/cpufreq.c
@@ -458,8 +458,7 @@ static int s5pv310_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_PM
-static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy,
- pm_message_t pmsg)
+static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c
index 25a8fc7..eea75ff 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq.c
@@ -433,7 +433,7 @@ static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
-static int s3c_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
{
suspend_pll.frequency = clk_get_rate(_clk_mpll);
suspend_pll.index = __raw_readl(S3C2410_MPLLCON);
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 415ca6d..04af5f4 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np)
return offset;
}
-static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
{
/* Ok, this could be made a bit smarter, but let's be robust for now. We
* always force a speed change to high speed before sleep, to make sure
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4a5a42b..755a31e 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -315,8 +315,6 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
input.count = 4;
input.pointer = in_params;
- input.count = 4;
- input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = OSC_UUID;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c567dec..1ae4133 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -630,8 +630,7 @@ static void print_basics(struct powernow_k8_data *data)
data->powernow_table[j].frequency/1000);
} else {
printk(KERN_INFO PFX
- " %d : fid 0x%x (%d MHz), vid 0x%x\n",
- j,
+ "fid 0x%x (%d MHz), vid 0x%x\n",
data->powernow_table[j].index & 0xff,
data->powernow_table[j].frequency/1000,
data->powernow_table[j].index >> 8);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09..0f17ad8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
goto out;
if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy, pmsg);
+ ret = cpufreq_driver->suspend(cpu_policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 94284c8..33b56e5 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
@@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-/*** delete after deprecation time ***/
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(sampling_down_factor);
-show_one_old(up_threshold);
-show_one_old(down_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(freq_step);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold) {
- mutex_unlock(&dbs_mutex);
+ input <= dbs_tuners_ins.down_threshold)
return -EINVAL;
- }
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold) {
- mutex_unlock(&dbs_mutex);
+ input >= dbs_tuners_ins.up_threshold)
return -EINVAL;
- }
dbs_tuners_ins.down_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
return count;
- }
+
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
@@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.freq_step = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
@@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(sampling_down_factor);
-write_one_old(up_threshold);
-write_one_old(down_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(freq_step);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(sampling_down_factor);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(down_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(freq_step);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &sampling_down_factor_old.attr,
- &up_threshold_old.attr,
- &down_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &freq_step_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "conservative",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 58aa85e..891360e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
@@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
-/*** delete after deprecation time ***/
-
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(up_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(powersave_bias);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.io_is_busy = !!input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
@@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->rate_mult = 1;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
return count;
}
dbs_tuners_ins.ignore_nice = input;
@@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.powersave_bias = input;
ondemand_powersave_bias_init();
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
@@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(up_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(powersave_bias);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(powersave_bias);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &up_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &powersave_bias_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "ondemand",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work)
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+ int delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work)
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
+ } else {
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
}
schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
@@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
@@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c3e9de8..9343dd3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -230,7 +230,7 @@ struct cpufreq_driver {
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
- int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
+ int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
@@ -281,19 +281,10 @@ __ATTR(_name, 0444, show_##_name, NULL)
static struct freq_attr _name = \
__ATTR(_name, _perm, show_##_name, NULL)
-#define cpufreq_freq_attr_ro_old(_name) \
-static struct freq_attr _name##_old = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
#define cpufreq_freq_attr_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
-#define cpufreq_freq_attr_rw_old(_name) \
-static struct freq_attr _name##_old = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,