From 0d1857a1b987ef874aeb4b99c9c385fa5645bf61 Mon Sep 17 00:00:00 2001 From: Nathan Zimmer Date: Fri, 22 Feb 2013 16:24:34 +0000 Subject: cpufreq: Convert the cpufreq_driver_lock to a rwlock This eliminates the contention I am seeing in __cpufreq_cpu_get. It also nicely stages the lock to be replaced by the rcu. Signed-off-by: Nathan Zimmer Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b02824d..c5996fe 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -45,7 +45,7 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); /* This one keeps track of the previously set governor of a removed CPU */ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); #endif -static DEFINE_SPINLOCK(cpufreq_driver_lock); +static DEFINE_RWLOCK(cpufreq_driver_lock); /* * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure @@ -137,7 +137,7 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) goto err_out; /* get the cpufreq driver */ - spin_lock_irqsave(&cpufreq_driver_lock, flags); + read_lock_irqsave(&cpufreq_driver_lock, flags); if (!cpufreq_driver) goto err_out_unlock; @@ -155,13 +155,13 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) if (!sysfs && !kobject_get(&data->kobj)) goto err_out_put_module; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); return data; err_out_put_module: module_put(cpufreq_driver->owner); err_out_unlock: - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); err_out: return NULL; } @@ -266,9 +266,9 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); - spin_lock_irqsave(&cpufreq_driver_lock, flags); + read_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, freqs->cpu); - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); switch (state) { @@ -765,12 +765,12 @@ static int cpufreq_add_dev_interface(unsigned int cpu, goto err_out_kobj_put; } - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) { per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_policy_cpu, j) = policy->cpu; } - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = cpufreq_add_dev_symlink(cpu, policy); if (ret) @@ -813,12 +813,12 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, lock_policy_rwsem_write(sibling); - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); cpumask_set_cpu(cpu, policy->cpus); per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; per_cpu(cpufreq_cpu_data, cpu) = policy; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); unlock_policy_rwsem_write(sibling); @@ -871,15 +871,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) #ifdef CONFIG_HOTPLUG_CPU /* Check if this cpu was hot-unplugged earlier and has siblings */ - spin_lock_irqsave(&cpufreq_driver_lock, flags); + read_lock_irqsave(&cpufreq_driver_lock, flags); for_each_online_cpu(sibling) { struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); return cpufreq_add_policy_cpu(cpu, sibling, dev); } } - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif #endif @@ -952,10 +952,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) return 0; err_out_unregister: - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = NULL; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); @@ -1008,12 +1008,12 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif pr_debug("%s: unregistering CPU %u\n", __func__, cpu); - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); data = per_cpu(cpufreq_cpu_data, cpu); per_cpu(cpufreq_cpu_data, cpu) = NULL; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); if (!data) { pr_debug("%s: No cpu_data found\n", __func__); @@ -1047,9 +1047,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif WARN_ON(lock_policy_rwsem_write(cpu)); cpumask_set_cpu(cpu, data->cpus); - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); per_cpu(cpufreq_cpu_data, cpu) = data; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); unlock_policy_rwsem_write(cpu); @@ -1848,13 +1848,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); return -EBUSY; } cpufreq_driver = driver_data; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = subsys_interface_register(&cpufreq_interface); if (ret) @@ -1886,9 +1886,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_null_driver: - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); @@ -1914,9 +1914,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) subsys_interface_unregister(&cpufreq_interface); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); - spin_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); return 0; } -- cgit v0.10.2 From 7bd353a995d9049262661d85811d6109140582a3 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 Mar 2013 15:58:57 +0000 Subject: cpufreq: Add per policy governor-init/exit infrastructure Currently, there can't be multiple instances of single governor_type. If we have a multi-package system, where we have multiple instances of struct policy (per package), we can't have multiple instances of same governor. i.e. We can't have multiple instances of ondemand governor for multiple packages. Governors directory in sysfs is created at /sys/devices/system/cpu/cpufreq/ governor-name/. Which again reflects that there can be only one instance of a governor_type in the system. This is a bottleneck for multicluster system, where we want different packages to use same governor type, but with different tunables. This patch is inclined towards providing this infrastructure. Because we are required to allocate governor's resources dynamically now, we must do it at policy creation and end. And so got CPUFREQ_GOV_POLICY_INIT/EXIT. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c5996fe..08df7a1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1070,6 +1070,8 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif /* If cpu is last user of policy, free policy */ if (cpus == 1) { + __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + lock_policy_rwsem_read(cpu); kobj = &data->kobj; cmp = &data->kobj_unregister; @@ -1651,7 +1653,7 @@ EXPORT_SYMBOL(cpufreq_get_policy); static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) { - int ret = 0; + int ret = 0, failed = 1; pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); @@ -1705,18 +1707,31 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, pr_debug("governor switch\n"); /* end old governor */ - if (data->governor) + if (data->governor) { __cpufreq_governor(data, CPUFREQ_GOV_STOP); + __cpufreq_governor(data, + CPUFREQ_GOV_POLICY_EXIT); + } /* start new governor */ data->governor = policy->governor; - if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { + if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { + if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) + failed = 0; + else + __cpufreq_governor(data, + CPUFREQ_GOV_POLICY_EXIT); + } + + if (failed) { /* new governor failed, so re-start old one */ pr_debug("starting governor %s failed\n", data->governor->name); if (old_gov) { data->governor = old_gov; __cpufreq_governor(data, + CPUFREQ_GOV_POLICY_INIT); + __cpufreq_governor(data, CPUFREQ_GOV_START); } ret = -EINVAL; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a22944c..b7393b5 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -106,6 +106,7 @@ struct cpufreq_policy { * governors are used */ unsigned int policy; /* see above */ struct cpufreq_governor *governor; /* see below */ + void *governor_data; struct work_struct update; /* if update_policy() needs to be * called, but you're in IRQ context */ @@ -178,9 +179,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu * CPUFREQ GOVERNORS * *********************************************************************/ -#define CPUFREQ_GOV_START 1 -#define CPUFREQ_GOV_STOP 2 -#define CPUFREQ_GOV_LIMITS 3 +#define CPUFREQ_GOV_START 1 +#define CPUFREQ_GOV_STOP 2 +#define CPUFREQ_GOV_LIMITS 3 +#define CPUFREQ_GOV_POLICY_INIT 4 +#define CPUFREQ_GOV_POLICY_EXIT 5 struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; -- cgit v0.10.2 From 4d5dcc4211f9def4281eafb54b8ed483862e8135 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 Mar 2013 15:58:58 +0000 Subject: cpufreq: governor: Implement per policy instances of governors Currently, there can't be multiple instances of single governor_type. If we have a multi-package system, where we have multiple instances of struct policy (per package), we can't have multiple instances of same governor. i.e. We can't have multiple instances of ondemand governor for multiple packages. Governors directory in sysfs is created at /sys/devices/system/cpu/cpufreq/ governor-name/. Which again reflects that there can be only one instance of a governor_type in the system. This is a bottleneck for multicluster system, where we want different packages to use same governor type, but with different tunables. This patch uses the infrastructure provided by earlier patch and implements init/exit routines for ondemand and conservative governors. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 08df7a1..85963fc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -128,6 +128,11 @@ void disable_cpufreq(void) static LIST_HEAD(cpufreq_governor_list); static DEFINE_MUTEX(cpufreq_governor_mutex); +bool have_governor_per_policy(void) +{ + return cpufreq_driver->have_governor_per_policy; +} + static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) { struct cpufreq_policy *data; @@ -1546,10 +1551,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, policy->cpu, event); ret = policy->governor->governor(policy, event); - if (event == CPUFREQ_GOV_START) - policy->governor->initialized++; - else if (event == CPUFREQ_GOV_STOP) - policy->governor->initialized--; + if (!ret) { + if (event == CPUFREQ_GOV_POLICY_INIT) + policy->governor->initialized++; + else if (event == CPUFREQ_GOV_POLICY_EXIT) + policy->governor->initialized--; + } /* we keep one module reference alive for each CPU governed by this CPU */ diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 4fd0006..98b4946 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -31,17 +32,8 @@ #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) -static struct dbs_data cs_dbs_data; static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); -static struct cs_dbs_tuners cs_tuners = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, - .freq_step = 5, -}; - /* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency Every sampling_rate * @@ -55,24 +47,26 @@ static void cs_check_cpu(int cpu, unsigned int load) { struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + struct dbs_data *dbs_data = policy->governor_data; + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int freq_target; /* * break out if we 'cannot' reduce the speed as the user might * want freq_step to be zero */ - if (cs_tuners.freq_step == 0) + if (cs_tuners->freq_step == 0) return; /* Check for frequency increase */ - if (load > cs_tuners.up_threshold) { + if (load > cs_tuners->up_threshold) { dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ if (dbs_info->requested_freq == policy->max) return; - freq_target = (cs_tuners.freq_step * policy->max) / 100; + freq_target = (cs_tuners->freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows.... */ if (unlikely(freq_target == 0)) @@ -92,8 +86,8 @@ static void cs_check_cpu(int cpu, unsigned int load) * support the current CPU usage without triggering the up policy. To be * safe, we focus 10 points under the threshold. */ - if (load < (cs_tuners.down_threshold - 10)) { - freq_target = (cs_tuners.freq_step * policy->max) / 100; + if (load < (cs_tuners->down_threshold - 10)) { + freq_target = (cs_tuners->freq_step * policy->max) / 100; dbs_info->requested_freq -= freq_target; if (dbs_info->requested_freq < policy->min) @@ -119,11 +113,13 @@ static void cs_dbs_timer(struct work_struct *work) unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); + struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + int delay = delay_for_sampling_rate(cs_tuners->sampling_rate); mutex_lock(&core_dbs_info->cdbs.timer_mutex); - if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate)) - dbs_check_cpu(&cs_dbs_data, cpu); + if (need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) + dbs_check_cpu(dbs_data, cpu); schedule_delayed_work_on(smp_processor_id(), dw, delay); mutex_unlock(&core_dbs_info->cdbs.timer_mutex); @@ -154,16 +150,12 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, } /************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate); -} +static struct common_dbs_data cs_dbs_cdata; -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, - const char *buf, size_t count) +static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, + const char *buf, size_t count) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); @@ -171,13 +163,14 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - cs_tuners.sampling_down_factor = input; + cs_tuners->sampling_down_factor = input; return count; } -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); @@ -185,43 +178,46 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); + cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate); return count; } -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); - if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) + if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) return -EINVAL; - cs_tuners.up_threshold = input; + cs_tuners->up_threshold = input; return count; } -static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); /* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || - input >= cs_tuners.up_threshold) + input >= cs_tuners->up_threshold) return -EINVAL; - cs_tuners.down_threshold = input; + cs_tuners->down_threshold = input; return count; } -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input, j; int ret; @@ -232,10 +228,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - if (input == cs_tuners.ignore_nice) /* nothing to do */ + if (input == cs_tuners->ignore_nice) /* nothing to do */ return count; - cs_tuners.ignore_nice = input; + cs_tuners->ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -243,16 +239,17 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall); - if (cs_tuners.ignore_nice) + if (cs_tuners->ignore_nice) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } -static ssize_t store_freq_step(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); @@ -267,43 +264,88 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, * no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ - cs_tuners.freq_step = input; + cs_tuners->freq_step = input; return count; } -show_one(cs, sampling_rate, sampling_rate); -show_one(cs, sampling_down_factor, sampling_down_factor); -show_one(cs, up_threshold, up_threshold); -show_one(cs, down_threshold, down_threshold); -show_one(cs, ignore_nice_load, ignore_nice); -show_one(cs, freq_step, freq_step); - -define_one_global_rw(sampling_rate); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(up_threshold); -define_one_global_rw(down_threshold); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(freq_step); -define_one_global_ro(sampling_rate_min); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - &freq_step.attr, +show_store_one(cs, sampling_rate); +show_store_one(cs, sampling_down_factor); +show_store_one(cs, up_threshold); +show_store_one(cs, down_threshold); +show_store_one(cs, ignore_nice); +show_store_one(cs, freq_step); +declare_show_sampling_rate_min(cs); + +gov_sys_pol_attr_rw(sampling_rate); +gov_sys_pol_attr_rw(sampling_down_factor); +gov_sys_pol_attr_rw(up_threshold); +gov_sys_pol_attr_rw(down_threshold); +gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(freq_step); +gov_sys_pol_attr_ro(sampling_rate_min); + +static struct attribute *dbs_attributes_gov_sys[] = { + &sampling_rate_min_gov_sys.attr, + &sampling_rate_gov_sys.attr, + &sampling_down_factor_gov_sys.attr, + &up_threshold_gov_sys.attr, + &down_threshold_gov_sys.attr, + &ignore_nice_gov_sys.attr, + &freq_step_gov_sys.attr, NULL }; -static struct attribute_group cs_attr_group = { - .attrs = dbs_attributes, +static struct attribute_group cs_attr_group_gov_sys = { + .attrs = dbs_attributes_gov_sys, + .name = "conservative", +}; + +static struct attribute *dbs_attributes_gov_pol[] = { + &sampling_rate_min_gov_pol.attr, + &sampling_rate_gov_pol.attr, + &sampling_down_factor_gov_pol.attr, + &up_threshold_gov_pol.attr, + &down_threshold_gov_pol.attr, + &ignore_nice_gov_pol.attr, + &freq_step_gov_pol.attr, + NULL +}; + +static struct attribute_group cs_attr_group_gov_pol = { + .attrs = dbs_attributes_gov_pol, .name = "conservative", }; /************************** sysfs end ************************/ +static int cs_init(struct dbs_data *dbs_data) +{ + struct cs_dbs_tuners *tuners; + + tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL); + if (!tuners) { + pr_err("%s: kzalloc failed\n", __func__); + return -ENOMEM; + } + + tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; + tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; + tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; + tuners->ignore_nice = 0; + tuners->freq_step = 5; + + dbs_data->tuners = tuners; + dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * + jiffies_to_usecs(10); + mutex_init(&dbs_data->mutex); + return 0; +} + +static void cs_exit(struct dbs_data *dbs_data) +{ + kfree(dbs_data->tuners); +} + define_get_cpu_dbs_routines(cs_cpu_dbs_info); static struct notifier_block cs_cpufreq_notifier_block = { @@ -314,21 +356,23 @@ static struct cs_ops cs_ops = { .notifier_block = &cs_cpufreq_notifier_block, }; -static struct dbs_data cs_dbs_data = { +static struct common_dbs_data cs_dbs_cdata = { .governor = GOV_CONSERVATIVE, - .attr_group = &cs_attr_group, - .tuners = &cs_tuners, + .attr_group_gov_sys = &cs_attr_group_gov_sys, + .attr_group_gov_pol = &cs_attr_group_gov_pol, .get_cpu_cdbs = get_cpu_cdbs, .get_cpu_dbs_info_s = get_cpu_dbs_info_s, .gov_dbs_timer = cs_dbs_timer, .gov_check_cpu = cs_check_cpu, .gov_ops = &cs_ops, + .init = cs_init, + .exit = cs_exit, }; static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { - return cpufreq_governor_dbs(&cs_dbs_data, policy, event); + return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); } #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE @@ -343,7 +387,6 @@ struct cpufreq_governor cpufreq_gov_conservative = { static int __init cpufreq_gov_dbs_init(void) { - mutex_init(&cs_dbs_data.mutex); return cpufreq_register_governor(&cpufreq_gov_conservative); } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 5a76086..26fbb72 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -22,12 +22,29 @@ #include #include #include +#include #include #include #include #include "cpufreq_governor.h" +static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) +{ + if (have_governor_per_policy()) + return &policy->kobj; + else + return cpufreq_global_kobject; +} + +static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) +{ + if (have_governor_per_policy()) + return dbs_data->cdata->attr_group_gov_pol; + else + return dbs_data->cdata->attr_group_gov_sys; +} + static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; @@ -65,7 +82,7 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) { - struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); + struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cpufreq_policy *policy; @@ -73,7 +90,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) unsigned int ignore_nice; unsigned int j; - if (dbs_data->governor == GOV_ONDEMAND) + if (dbs_data->cdata->governor == GOV_ONDEMAND) ignore_nice = od_tuners->ignore_nice; else ignore_nice = cs_tuners->ignore_nice; @@ -87,7 +104,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) unsigned int idle_time, wall_time, iowait_time; unsigned int load; - j_cdbs = dbs_data->get_cpu_cdbs(j); + j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); @@ -117,9 +134,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) idle_time += jiffies_to_usecs(cur_nice_jiffies); } - if (dbs_data->governor == GOV_ONDEMAND) { + if (dbs_data->cdata->governor == GOV_ONDEMAND) { struct od_cpu_dbs_info_s *od_j_dbs_info = - dbs_data->get_cpu_dbs_info_s(cpu); + dbs_data->cdata->get_cpu_dbs_info_s(cpu); cur_iowait_time = get_cpu_iowait_time_us(j, &cur_wall_time); @@ -145,7 +162,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) load = 100 * (wall_time - idle_time) / wall_time; - if (dbs_data->governor == GOV_ONDEMAND) { + if (dbs_data->cdata->governor == GOV_ONDEMAND) { int freq_avg = __cpufreq_driver_getavg(policy, j); if (freq_avg <= 0) freq_avg = policy->cur; @@ -157,7 +174,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) max_load = load; } - dbs_data->gov_check_cpu(cpu, max_load); + dbs_data->cdata->gov_check_cpu(cpu, max_load); } EXPORT_SYMBOL_GPL(dbs_check_cpu); @@ -165,14 +182,14 @@ static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu, unsigned int sampling_rate) { int delay = delay_for_sampling_rate(sampling_rate); - struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); + struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); schedule_delayed_work_on(cpu, &cdbs->work, delay); } static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu) { - struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); + struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); cancel_delayed_work_sync(&cdbs->work); } @@ -196,31 +213,128 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, } EXPORT_SYMBOL_GPL(need_load_eval); -int cpufreq_governor_dbs(struct dbs_data *dbs_data, - struct cpufreq_policy *policy, unsigned int event) +static void set_sampling_rate(struct dbs_data *dbs_data, + unsigned int sampling_rate) +{ + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + cs_tuners->sampling_rate = sampling_rate; + } else { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + od_tuners->sampling_rate = sampling_rate; + } +} + +int cpufreq_governor_dbs(struct cpufreq_policy *policy, + struct common_dbs_data *cdata, unsigned int event) { + struct dbs_data *dbs_data; struct od_cpu_dbs_info_s *od_dbs_info = NULL; struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; - struct cs_ops *cs_ops = NULL; struct od_ops *od_ops = NULL; - struct od_dbs_tuners *od_tuners = dbs_data->tuners; - struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + struct od_dbs_tuners *od_tuners = NULL; + struct cs_dbs_tuners *cs_tuners = NULL; struct cpu_dbs_common_info *cpu_cdbs; - unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; + unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; int rc; - cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); + if (have_governor_per_policy()) + dbs_data = policy->governor_data; + else + dbs_data = cdata->gdbs_data; + + WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); + + switch (event) { + case CPUFREQ_GOV_POLICY_INIT: + if (have_governor_per_policy()) { + WARN_ON(dbs_data); + } else if (dbs_data) { + policy->governor_data = dbs_data; + return 0; + } + + dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); + if (!dbs_data) { + pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); + return -ENOMEM; + } + + dbs_data->cdata = cdata; + rc = cdata->init(dbs_data); + if (rc) { + pr_err("%s: POLICY_INIT: init() failed\n", __func__); + kfree(dbs_data); + return rc; + } + + rc = sysfs_create_group(get_governor_parent_kobj(policy), + get_sysfs_attr(dbs_data)); + if (rc) { + cdata->exit(dbs_data); + kfree(dbs_data); + return rc; + } + + policy->governor_data = dbs_data; + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + /* Bring kernel and HW constraints together */ + dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, + latency * LATENCY_MULTIPLIER)); + + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; + + cpufreq_register_notifier(cs_ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + if (!have_governor_per_policy()) + cdata->gdbs_data = dbs_data; + + return 0; + case CPUFREQ_GOV_POLICY_EXIT: + if ((policy->governor->initialized == 1) || + have_governor_per_policy()) { + sysfs_remove_group(get_governor_parent_kobj(policy), + get_sysfs_attr(dbs_data)); + + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; + + cpufreq_unregister_notifier(cs_ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + cdata->exit(dbs_data); + kfree(dbs_data); + cdata->gdbs_data = NULL; + } - if (dbs_data->governor == GOV_CONSERVATIVE) { - cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); - sampling_rate = &cs_tuners->sampling_rate; + policy->governor_data = NULL; + return 0; + } + + cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); + + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + cs_tuners = dbs_data->tuners; + cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); + sampling_rate = cs_tuners->sampling_rate; ignore_nice = cs_tuners->ignore_nice; - cs_ops = dbs_data->gov_ops; } else { - od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); - sampling_rate = &od_tuners->sampling_rate; + od_tuners = dbs_data->tuners; + od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); + sampling_rate = od_tuners->sampling_rate; ignore_nice = od_tuners->ignore_nice; - od_ops = dbs_data->gov_ops; + od_ops = dbs_data->cdata->gov_ops; } switch (event) { @@ -232,7 +346,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data, for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs = - dbs_data->get_cpu_cdbs(j); + dbs_data->cdata->get_cpu_cdbs(j); j_cdbs->cpu = j; j_cdbs->cur_policy = policy; @@ -244,69 +358,34 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data, mutex_init(&j_cdbs->timer_mutex); INIT_DEFERRABLE_WORK(&j_cdbs->work, - dbs_data->gov_dbs_timer); - } - - if (!policy->governor->initialized) { - rc = sysfs_create_group(cpufreq_global_kobject, - dbs_data->attr_group); - if (rc) { - mutex_unlock(&dbs_data->mutex); - return rc; - } + dbs_data->cdata->gov_dbs_timer); } /* * conservative does not implement micro like ondemand * governor, thus we are bound to jiffes/HZ */ - if (dbs_data->governor == GOV_CONSERVATIVE) { + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; cs_dbs_info->requested_freq = policy->cur; - - if (!policy->governor->initialized) { - cpufreq_register_notifier(cs_ops->notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - dbs_data->min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * - jiffies_to_usecs(10); - } } else { od_dbs_info->rate_mult = 1; od_dbs_info->sample_type = OD_NORMAL_SAMPLE; od_ops->powersave_bias_init_cpu(cpu); - - if (!policy->governor->initialized) - od_tuners->io_is_busy = od_ops->io_busy(); } - if (policy->governor->initialized) - goto unlock; - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - /* Bring kernel and HW constraints together */ - dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - *sampling_rate = max(dbs_data->min_sampling_rate, latency * - LATENCY_MULTIPLIER); -unlock: mutex_unlock(&dbs_data->mutex); /* Initiate timer time stamp */ cpu_cdbs->time_stamp = ktime_get(); for_each_cpu(j, policy->cpus) - dbs_timer_init(dbs_data, j, *sampling_rate); + dbs_timer_init(dbs_data, j, sampling_rate); break; case CPUFREQ_GOV_STOP: - if (dbs_data->governor == GOV_CONSERVATIVE) + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) cs_dbs_info->enable = 0; for_each_cpu(j, policy->cpus) @@ -315,13 +394,6 @@ unlock: mutex_lock(&dbs_data->mutex); mutex_destroy(&cpu_cdbs->timer_mutex); - if (policy->governor->initialized == 1) { - sysfs_remove_group(cpufreq_global_kobject, - dbs_data->attr_group); - if (dbs_data->governor == GOV_CONSERVATIVE) - cpufreq_unregister_notifier(cs_ops->notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } mutex_unlock(&dbs_data->mutex); break; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01..c83cabf 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -40,14 +40,75 @@ /* Ondemand Sampling types */ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; -/* Macro creating sysfs show routines */ -#define show_one(_gov, file_name, object) \ -static ssize_t show_##file_name \ +/* + * Macro for creating governors sysfs routines + * + * - gov_sys: One governor instance per whole system + * - gov_pol: One governor instance per policy + */ + +/* Create attributes */ +#define gov_sys_attr_ro(_name) \ +static struct global_attr _name##_gov_sys = \ +__ATTR(_name, 0444, show_##_name##_gov_sys, NULL) + +#define gov_sys_attr_rw(_name) \ +static struct global_attr _name##_gov_sys = \ +__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys) + +#define gov_pol_attr_ro(_name) \ +static struct freq_attr _name##_gov_pol = \ +__ATTR(_name, 0444, show_##_name##_gov_pol, NULL) + +#define gov_pol_attr_rw(_name) \ +static struct freq_attr _name##_gov_pol = \ +__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol) + +#define gov_sys_pol_attr_rw(_name) \ + gov_sys_attr_rw(_name); \ + gov_pol_attr_rw(_name) + +#define gov_sys_pol_attr_ro(_name) \ + gov_sys_attr_ro(_name); \ + gov_pol_attr_ro(_name) + +/* Create show/store routines */ +#define show_one(_gov, file_name) \ +static ssize_t show_##file_name##_gov_sys \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ - return sprintf(buf, "%u\n", _gov##_tuners.object); \ + struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \ + return sprintf(buf, "%u\n", tuners->file_name); \ +} \ + \ +static ssize_t show_##file_name##_gov_pol \ +(struct cpufreq_policy *policy, char *buf) \ +{ \ + struct dbs_data *dbs_data = policy->governor_data; \ + struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \ + return sprintf(buf, "%u\n", tuners->file_name); \ +} + +#define store_one(_gov, file_name) \ +static ssize_t store_##file_name##_gov_sys \ +(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \ +{ \ + struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \ + return store_##file_name(dbs_data, buf, count); \ +} \ + \ +static ssize_t store_##file_name##_gov_pol \ +(struct cpufreq_policy *policy, const char *buf, size_t count) \ +{ \ + struct dbs_data *dbs_data = policy->governor_data; \ + return store_##file_name(dbs_data, buf, count); \ } +#define show_store_one(_gov, file_name) \ +show_one(_gov, file_name); \ +store_one(_gov, file_name) + +/* create helper routines */ #define define_get_cpu_dbs_routines(_dbs_info) \ static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ { \ @@ -103,7 +164,7 @@ struct cs_cpu_dbs_info_s { unsigned int enable:1; }; -/* Governers sysfs tunables */ +/* Per policy Governers sysfs tunables */ struct od_dbs_tuners { unsigned int ignore_nice; unsigned int sampling_rate; @@ -123,31 +184,42 @@ struct cs_dbs_tuners { unsigned int freq_step; }; -/* Per Governer data */ -struct dbs_data { +/* Common Governer data across policies */ +struct dbs_data; +struct common_dbs_data { /* Common across governors */ #define GOV_ONDEMAND 0 #define GOV_CONSERVATIVE 1 int governor; - unsigned int min_sampling_rate; - struct attribute_group *attr_group; - void *tuners; + struct attribute_group *attr_group_gov_sys; /* one governor - system */ + struct attribute_group *attr_group_gov_pol; /* one governor - policy */ - /* dbs_mutex protects dbs_enable in governor start/stop */ - struct mutex mutex; + /* Common data for platforms that don't set have_governor_per_policy */ + struct dbs_data *gdbs_data; struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); void *(*get_cpu_dbs_info_s)(int cpu); void (*gov_dbs_timer)(struct work_struct *work); void (*gov_check_cpu)(int cpu, unsigned int load); + int (*init)(struct dbs_data *dbs_data); + void (*exit)(struct dbs_data *dbs_data); /* Governor specific ops, see below */ void *gov_ops; }; +/* Governer Per policy data */ +struct dbs_data { + struct common_dbs_data *cdata; + unsigned int min_sampling_rate; + void *tuners; + + /* dbs_mutex protects dbs_enable in governor start/stop */ + struct mutex mutex; +}; + /* Governor specific ops, will be passed to dbs_data->gov_ops */ struct od_ops { - int (*io_busy)(void); void (*powersave_bias_init_cpu)(int cpu); unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation); @@ -169,10 +241,25 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate) return delay; } +#define declare_show_sampling_rate_min(_gov) \ +static ssize_t show_sampling_rate_min_gov_sys \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \ + return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ +} \ + \ +static ssize_t show_sampling_rate_min_gov_pol \ +(struct cpufreq_policy *policy, char *buf) \ +{ \ + struct dbs_data *dbs_data = policy->governor_data; \ + return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ +} + u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); -int cpufreq_governor_dbs(struct dbs_data *dbs_data, - struct cpufreq_policy *policy, unsigned int event); +int cpufreq_governor_dbs(struct cpufreq_policy *policy, + struct common_dbs_data *cdata, unsigned int event); #endif /* _CPUFREQ_GOVERNER_H */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f3eb26c..15e80ee 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -37,22 +38,12 @@ #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) -static struct dbs_data od_dbs_data; static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND static struct cpufreq_governor cpufreq_gov_ondemand; #endif -static struct od_dbs_tuners od_tuners = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - - DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, - .powersave_bias = 0, -}; - static void ondemand_powersave_bias_init_cpu(int cpu) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); @@ -98,6 +89,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, unsigned int jiffies_total, jiffies_hi, jiffies_lo; struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); + struct dbs_data *dbs_data = policy->governor_data; + struct od_dbs_tuners *od_tuners = dbs_data->tuners; if (!dbs_info->freq_table) { dbs_info->freq_lo = 0; @@ -108,7 +101,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, relation, &index); freq_req = dbs_info->freq_table[index].frequency; - freq_reduc = freq_req * od_tuners.powersave_bias / 1000; + freq_reduc = freq_req * od_tuners->powersave_bias / 1000; freq_avg = freq_req - freq_reduc; /* Find freq bounds for freq_avg in freq_table */ @@ -127,7 +120,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, dbs_info->freq_lo_jiffies = 0; return freq_lo; } - jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); + jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate); jiffies_hi = (freq_avg - freq_lo) * jiffies_total; jiffies_hi += ((freq_hi - freq_lo) / 2); jiffies_hi /= (freq_hi - freq_lo); @@ -148,12 +141,15 @@ static void ondemand_powersave_bias_init(void) static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) { - if (od_tuners.powersave_bias) + struct dbs_data *dbs_data = p->governor_data; + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + + if (od_tuners->powersave_bias) freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); else if (p->cur == p->max) return; - __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? + __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); } @@ -170,15 +166,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + struct dbs_data *dbs_data = policy->governor_data; + struct od_dbs_tuners *od_tuners = dbs_data->tuners; dbs_info->freq_lo = 0; /* Check for frequency increase */ - if (load_freq > od_tuners.up_threshold * policy->cur) { + if (load_freq > od_tuners->up_threshold * policy->cur) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = - od_tuners.sampling_down_factor; + od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); return; } @@ -193,9 +191,10 @@ static void od_check_cpu(int cpu, unsigned int load_freq) * support the current CPU usage without triggering the up policy. To be * safe, we focus 10 points under the threshold. */ - if (load_freq < od_tuners.adj_up_threshold * policy->cur) { + if (load_freq < od_tuners->adj_up_threshold + * policy->cur) { unsigned int freq_next; - freq_next = load_freq / od_tuners.adj_up_threshold; + freq_next = load_freq / od_tuners->adj_up_threshold; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; @@ -203,7 +202,7 @@ static void od_check_cpu(int cpu, unsigned int load_freq) if (freq_next < policy->min) freq_next = policy->min; - if (!od_tuners.powersave_bias) { + if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } else { @@ -223,12 +222,14 @@ static void od_dbs_timer(struct work_struct *work) unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; + struct od_dbs_tuners *od_tuners = dbs_data->tuners; int delay, sample_type = core_dbs_info->sample_type; bool eval_load; mutex_lock(&core_dbs_info->cdbs.timer_mutex); eval_load = need_load_eval(&core_dbs_info->cdbs, - od_tuners.sampling_rate); + od_tuners->sampling_rate); /* Common NORMAL_SAMPLE setup */ core_dbs_info->sample_type = OD_NORMAL_SAMPLE; @@ -240,13 +241,13 @@ static void od_dbs_timer(struct work_struct *work) CPUFREQ_RELATION_H); } else { if (eval_load) - dbs_check_cpu(&od_dbs_data, cpu); + dbs_check_cpu(dbs_data, cpu); if (core_dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ core_dbs_info->sample_type = OD_SUB_SAMPLE; delay = core_dbs_info->freq_hi_jiffies; } else { - delay = delay_for_sampling_rate(od_tuners.sampling_rate + delay = delay_for_sampling_rate(od_tuners->sampling_rate * core_dbs_info->rate_mult); } } @@ -256,12 +257,7 @@ static void od_dbs_timer(struct work_struct *work) } /************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); -} +static struct common_dbs_data od_dbs_cdata; /** * update_sampling_rate - update sampling rate effective immediately if needed. @@ -276,12 +272,14 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj, * reducing the sampling rate, we need to make the new value effective * immediately. */ -static void update_sampling_rate(unsigned int new_rate) +static void update_sampling_rate(struct dbs_data *dbs_data, + unsigned int new_rate) { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; int cpu; - od_tuners.sampling_rate = new_rate = max(new_rate, - od_dbs_data.min_sampling_rate); + od_tuners->sampling_rate = new_rate = max(new_rate, + dbs_data->min_sampling_rate); for_each_online_cpu(cpu) { struct cpufreq_policy *policy; @@ -322,34 +320,37 @@ static void update_sampling_rate(unsigned int new_rate) } } -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, + size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - update_sampling_rate(input); + + update_sampling_rate(dbs_data, input); return count; } -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - od_tuners.io_is_busy = !!input; + od_tuners->io_is_busy = !!input; return count; } -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); @@ -359,23 +360,24 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, return -EINVAL; } /* Calculate the new adj_up_threshold */ - od_tuners.adj_up_threshold += input; - od_tuners.adj_up_threshold -= od_tuners.up_threshold; + od_tuners->adj_up_threshold += input; + od_tuners->adj_up_threshold -= od_tuners->up_threshold; - od_tuners.up_threshold = input; + od_tuners->up_threshold = input; return count; } -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, const char *buf, size_t count) +static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, + const char *buf, size_t count) { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input, j; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - od_tuners.sampling_down_factor = input; + od_tuners->sampling_down_factor = input; /* Reset down sampling multiplier in case it was active */ for_each_online_cpu(j) { @@ -386,9 +388,10 @@ static ssize_t store_sampling_down_factor(struct kobject *a, return count; } -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; int ret; @@ -401,10 +404,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - if (input == od_tuners.ignore_nice) { /* nothing to do */ + if (input == od_tuners->ignore_nice) { /* nothing to do */ return count; } - od_tuners.ignore_nice = input; + od_tuners->ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -412,7 +415,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall); - if (od_tuners.ignore_nice) + if (od_tuners->ignore_nice) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -420,9 +423,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, - const char *buf, size_t count) +static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf, + size_t count) { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); @@ -433,68 +437,138 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, if (input > 1000) input = 1000; - od_tuners.powersave_bias = input; + od_tuners->powersave_bias = input; ondemand_powersave_bias_init(); return count; } -show_one(od, sampling_rate, sampling_rate); -show_one(od, io_is_busy, io_is_busy); -show_one(od, up_threshold, up_threshold); -show_one(od, sampling_down_factor, sampling_down_factor); -show_one(od, ignore_nice_load, ignore_nice); -show_one(od, powersave_bias, powersave_bias); - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(powersave_bias); -define_one_global_ro(sampling_rate_min); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &sampling_down_factor.attr, - &ignore_nice_load.attr, - &powersave_bias.attr, - &io_is_busy.attr, +show_store_one(od, sampling_rate); +show_store_one(od, io_is_busy); +show_store_one(od, up_threshold); +show_store_one(od, sampling_down_factor); +show_store_one(od, ignore_nice); +show_store_one(od, powersave_bias); +declare_show_sampling_rate_min(od); + +gov_sys_pol_attr_rw(sampling_rate); +gov_sys_pol_attr_rw(io_is_busy); +gov_sys_pol_attr_rw(up_threshold); +gov_sys_pol_attr_rw(sampling_down_factor); +gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(powersave_bias); +gov_sys_pol_attr_ro(sampling_rate_min); + +static struct attribute *dbs_attributes_gov_sys[] = { + &sampling_rate_min_gov_sys.attr, + &sampling_rate_gov_sys.attr, + &up_threshold_gov_sys.attr, + &sampling_down_factor_gov_sys.attr, + &ignore_nice_gov_sys.attr, + &powersave_bias_gov_sys.attr, + &io_is_busy_gov_sys.attr, NULL }; -static struct attribute_group od_attr_group = { - .attrs = dbs_attributes, +static struct attribute_group od_attr_group_gov_sys = { + .attrs = dbs_attributes_gov_sys, + .name = "ondemand", +}; + +static struct attribute *dbs_attributes_gov_pol[] = { + &sampling_rate_min_gov_pol.attr, + &sampling_rate_gov_pol.attr, + &up_threshold_gov_pol.attr, + &sampling_down_factor_gov_pol.attr, + &ignore_nice_gov_pol.attr, + &powersave_bias_gov_pol.attr, + &io_is_busy_gov_pol.attr, + NULL +}; + +static struct attribute_group od_attr_group_gov_pol = { + .attrs = dbs_attributes_gov_pol, .name = "ondemand", }; /************************** sysfs end ************************/ +static int od_init(struct dbs_data *dbs_data) +{ + struct od_dbs_tuners *tuners; + u64 idle_time; + int cpu; + + tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); + if (!tuners) { + pr_err("%s: kzalloc failed\n", __func__); + return -ENOMEM; + } + + cpu = get_cpu(); + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In nohz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; + tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - + DEF_FREQUENCY_DOWN_DIFFERENTIAL; + + /* For correct statistics, we need 10 ticks for each measure */ + dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * + jiffies_to_usecs(10); + } + + tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; + tuners->ignore_nice = 0; + tuners->powersave_bias = 0; + tuners->io_is_busy = should_io_be_busy(); + + dbs_data->tuners = tuners; + pr_info("%s: tuners %p\n", __func__, tuners); + mutex_init(&dbs_data->mutex); + return 0; +} + +static void od_exit(struct dbs_data *dbs_data) +{ + kfree(dbs_data->tuners); +} + define_get_cpu_dbs_routines(od_cpu_dbs_info); static struct od_ops od_ops = { - .io_busy = should_io_be_busy, .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, .powersave_bias_target = powersave_bias_target, .freq_increase = dbs_freq_increase, }; -static struct dbs_data od_dbs_data = { +static struct common_dbs_data od_dbs_cdata = { .governor = GOV_ONDEMAND, - .attr_group = &od_attr_group, - .tuners = &od_tuners, + .attr_group_gov_sys = &od_attr_group_gov_sys, + .attr_group_gov_pol = &od_attr_group_gov_pol, .get_cpu_cdbs = get_cpu_cdbs, .get_cpu_dbs_info_s = get_cpu_dbs_info_s, .gov_dbs_timer = od_dbs_timer, .gov_check_cpu = od_check_cpu, .gov_ops = &od_ops, + .init = od_init, + .exit = od_exit, }; static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { - return cpufreq_governor_dbs(&od_dbs_data, policy, event); + return cpufreq_governor_dbs(policy, &od_dbs_cdata, event); } #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND @@ -509,29 +583,6 @@ struct cpufreq_governor cpufreq_gov_ondemand = { static int __init cpufreq_gov_dbs_init(void) { - u64 idle_time; - int cpu = get_cpu(); - - mutex_init(&od_dbs_data.mutex); - idle_time = get_cpu_idle_time_us(cpu, NULL); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In nohz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * - jiffies_to_usecs(10); - } - return cpufreq_register_governor(&cpufreq_gov_ondemand); } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b7393b5..4bbc572 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -232,6 +232,13 @@ struct cpufreq_driver { struct module *owner; char name[CPUFREQ_NAME_LEN]; u8 flags; + /* + * This should be set by platforms having multiple clock-domains, i.e. + * supporting multiple policies. With this sysfs directories of governor + * would be created in cpu/cpu/cpufreq/ directory and so they can + * use the same governor with different tunables for different clusters. + */ + bool have_governor_per_policy; /* needed by all drivers */ int (*init) (struct cpufreq_policy *policy); @@ -332,6 +339,7 @@ const char *cpufreq_get_current_driver(void); *********************************************************************/ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); +bool have_governor_per_policy(void); #ifdef CONFIG_CPU_FREQ /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ -- cgit v0.10.2 From 98104ee28f451024170a9dfb7bec31bfcb7e7c14 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 26 Feb 2013 15:07:24 +0530 Subject: cpufreq: governor: Set MIN_LATENCY_MULTIPLIER to 20 Currently MIN_LATENCY_MULTIPLIER is set defined as 100 and so on a system with transition latency of 1 ms, the minimum sampling time comes to be around 100 ms. That is quite big if you want to get better performance for your system. Redefine MIN_LATENCY_MULTIPLIER to 20 so that we can support 20ms sampling rate for such platforms. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index c83cabf..27b588a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -34,7 +34,7 @@ */ #define MIN_SAMPLING_RATE_RATIO (2) #define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) +#define MIN_LATENCY_MULTIPLIER (20) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) /* Ondemand Sampling types */ -- cgit v0.10.2 From 9d44592018e617abf62a5f6a5d92a04aa07e7625 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 Feb 2013 11:06:36 +0530 Subject: cpufreq: ondemand: Don't update sample_type if we don't evaluate load again Because we have per cpu timer now, we check if we need to evaluate load again or not (In case it is recently evaluated). Here the 2nd cpu which got timer interrupt updates core_dbs_info->sample_type irrespective of load evaluation is required or not. Which is wrong as the first cpu is dependent on this variable set to an older value. Moreover it would be best in this case to schedule 2nd cpu's timer to sampling_rate instead of freq_lo or hi as that must be managed by the other cpu. In case the other cpu idles in between then also we wouldn't loose much power. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 15e80ee..c90d345c 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -224,34 +224,32 @@ static void od_dbs_timer(struct work_struct *work) cpu); struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; - int delay, sample_type = core_dbs_info->sample_type; - bool eval_load; + int delay = 0, sample_type = core_dbs_info->sample_type; mutex_lock(&core_dbs_info->cdbs.timer_mutex); - eval_load = need_load_eval(&core_dbs_info->cdbs, - od_tuners->sampling_rate); + if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) + goto max_delay; /* Common NORMAL_SAMPLE setup */ core_dbs_info->sample_type = OD_NORMAL_SAMPLE; if (sample_type == OD_SUB_SAMPLE) { delay = core_dbs_info->freq_lo_jiffies; - if (eval_load) - __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, - core_dbs_info->freq_lo, - CPUFREQ_RELATION_H); + __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, + core_dbs_info->freq_lo, CPUFREQ_RELATION_H); } else { - if (eval_load) - dbs_check_cpu(dbs_data, cpu); + dbs_check_cpu(dbs_data, cpu); if (core_dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ core_dbs_info->sample_type = OD_SUB_SAMPLE; delay = core_dbs_info->freq_hi_jiffies; - } else { - delay = delay_for_sampling_rate(od_tuners->sampling_rate - * core_dbs_info->rate_mult); } } +max_delay: + if (!delay) + delay = delay_for_sampling_rate(od_tuners->sampling_rate + * core_dbs_info->rate_mult); + schedule_delayed_work_on(smp_processor_id(), dw, delay); mutex_unlock(&core_dbs_info->cdbs.timer_mutex); } -- cgit v0.10.2 From 031299b3be30f3ecab110fff8faad85af70e1797 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 Feb 2013 12:24:03 +0530 Subject: cpufreq: governors: Avoid unnecessary per cpu timer interrupts Following patch has introduced per cpu timers or works for ondemand and conservative governors. commit 2abfa876f1117b0ab45f191fb1f82c41b1cbc8fe Author: Rickard Andersson Date: Thu Dec 27 14:55:38 2012 +0000 cpufreq: handle SW coordinated CPUs This causes additional unnecessary interrupts on all cpus when the load is recently evaluated by any other cpu. i.e. When load is recently evaluated by cpu x, we don't really need any other cpu to evaluate this load again for the next sampling_rate time. Some sort of code is present to avoid that but we are still getting timer interrupts for all cpus. A good way of avoiding this would be to modify delays for all cpus (policy->cpus) whenever any cpu has evaluated load. This patch does this change and some related code cleanup. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 98b4946..6fe6050 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -107,7 +107,6 @@ static void cs_check_cpu(int cpu, unsigned int load) static void cs_dbs_timer(struct work_struct *work) { - struct delayed_work *dw = to_delayed_work(work); struct cs_cpu_dbs_info_s *dbs_info = container_of(work, struct cs_cpu_dbs_info_s, cdbs.work.work); unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; @@ -116,12 +115,15 @@ static void cs_dbs_timer(struct work_struct *work) struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; int delay = delay_for_sampling_rate(cs_tuners->sampling_rate); + bool modify_all = true; mutex_lock(&core_dbs_info->cdbs.timer_mutex); - if (need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) + if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) + modify_all = false; + else dbs_check_cpu(dbs_data, cpu); - schedule_delayed_work_on(smp_processor_id(), dw, delay); + gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); mutex_unlock(&core_dbs_info->cdbs.timer_mutex); } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 26fbb72..326f0c2 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -178,20 +178,38 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) } EXPORT_SYMBOL_GPL(dbs_check_cpu); -static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu, - unsigned int sampling_rate) +static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, + unsigned int delay) { - int delay = delay_for_sampling_rate(sampling_rate); struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); - schedule_delayed_work_on(cpu, &cdbs->work, delay); + mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); } -static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu) +void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, + unsigned int delay, bool all_cpus) { - struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); + int i; + + if (!all_cpus) { + __gov_queue_work(smp_processor_id(), dbs_data, delay); + } else { + for_each_cpu(i, policy->cpus) + __gov_queue_work(i, dbs_data, delay); + } +} +EXPORT_SYMBOL_GPL(gov_queue_work); + +static inline void gov_cancel_work(struct dbs_data *dbs_data, + struct cpufreq_policy *policy) +{ + struct cpu_dbs_common_info *cdbs; + int i; - cancel_delayed_work_sync(&cdbs->work); + for_each_cpu(i, policy->cpus) { + cdbs = dbs_data->cdata->get_cpu_cdbs(i); + cancel_delayed_work_sync(&cdbs->work); + } } /* Will return if we need to evaluate cpu load again or not */ @@ -380,16 +398,15 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, /* Initiate timer time stamp */ cpu_cdbs->time_stamp = ktime_get(); - for_each_cpu(j, policy->cpus) - dbs_timer_init(dbs_data, j, sampling_rate); + gov_queue_work(dbs_data, policy, + delay_for_sampling_rate(sampling_rate), true); break; case CPUFREQ_GOV_STOP: if (dbs_data->cdata->governor == GOV_CONSERVATIVE) cs_dbs_info->enable = 0; - for_each_cpu(j, policy->cpus) - dbs_timer_exit(dbs_data, j); + gov_cancel_work(dbs_data, policy); mutex_lock(&dbs_data->mutex); mutex_destroy(&cpu_cdbs->timer_mutex); diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 27b588a..c9c269f 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -262,4 +262,6 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct common_dbs_data *cdata, unsigned int event); +void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, + unsigned int delay, bool all_cpus); #endif /* _CPUFREQ_GOVERNER_H */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c90d345c..459f9ee 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -216,7 +216,6 @@ static void od_check_cpu(int cpu, unsigned int load_freq) static void od_dbs_timer(struct work_struct *work) { - struct delayed_work *dw = to_delayed_work(work); struct od_cpu_dbs_info_s *dbs_info = container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; @@ -225,10 +224,13 @@ static void od_dbs_timer(struct work_struct *work) struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; int delay = 0, sample_type = core_dbs_info->sample_type; + bool modify_all = true; mutex_lock(&core_dbs_info->cdbs.timer_mutex); - if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) + if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) { + modify_all = false; goto max_delay; + } /* Common NORMAL_SAMPLE setup */ core_dbs_info->sample_type = OD_NORMAL_SAMPLE; @@ -250,7 +252,7 @@ max_delay: delay = delay_for_sampling_rate(od_tuners->sampling_rate * core_dbs_info->rate_mult); - schedule_delayed_work_on(smp_processor_id(), dw, delay); + gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); mutex_unlock(&core_dbs_info->cdbs.timer_mutex); } @@ -310,8 +312,8 @@ static void update_sampling_rate(struct dbs_data *dbs_data, cancel_delayed_work_sync(&dbs_info->cdbs.work); mutex_lock(&dbs_info->cdbs.timer_mutex); - schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, - usecs_to_jiffies(new_rate)); + gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, + usecs_to_jiffies(new_rate), true); } mutex_unlock(&dbs_info->cdbs.timer_mutex); -- cgit v0.10.2 From ad529a9cd2306782907371830135a140e4572919 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 28 Feb 2013 05:38:01 +0000 Subject: cpufreq: conservative: Break out earlier on the lowest frequency If we're on the lowest frequency, no need to calculate new freq. Break out even earlier in this case. Signed-off-by: Namhyung Kim Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 6fe6050..302931e 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -87,18 +87,18 @@ static void cs_check_cpu(int cpu, unsigned int load) * safe, we focus 10 points under the threshold. */ if (load < (cs_tuners->down_threshold - 10)) { - freq_target = (cs_tuners->freq_step * policy->max) / 100; - - dbs_info->requested_freq -= freq_target; - if (dbs_info->requested_freq < policy->min) - dbs_info->requested_freq = policy->min; - /* * if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; + freq_target = (cs_tuners->freq_step * policy->max) / 100; + + dbs_info->requested_freq -= freq_target; + if (dbs_info->requested_freq < policy->min) + dbs_info->requested_freq = policy->min; + __cpufreq_driver_target(policy, dbs_info->requested_freq, CPUFREQ_RELATION_H); return; -- cgit v0.10.2 From fed573d5c91ab8f12801740cfac0567e33635b1e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 28 Feb 2013 05:38:02 +0000 Subject: cpufreq: conservative: Fix relation when decreasing frequency The relation should be CPUFREQ_RELATION_L to find optimal frequency when decreasing. Signed-off-by: Namhyung Kim Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 302931e..a4af9c3 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -100,7 +100,7 @@ static void cs_check_cpu(int cpu, unsigned int load) dbs_info->requested_freq = policy->min; __cpufreq_driver_target(policy, dbs_info->requested_freq, - CPUFREQ_RELATION_H); + CPUFREQ_RELATION_L); return; } } -- cgit v0.10.2 From 9366d84052e7c5b2eca804c08cfcd00b490f4de2 Mon Sep 17 00:00:00 2001 From: Stratos Karafotis Date: Thu, 28 Feb 2013 16:57:32 +0000 Subject: cpufreq: governors: Calculate iowait time only when necessary Currently we always calculate the CPU iowait time and add it to idle time. If we are in ondemand and we use io_is_busy, we re-calculate iowait time and we subtract it from idle time. With this patch iowait time is calculated only when necessary avoiding the double call to get_cpu_iowait_time_us. We use a parameter in function get_cpu_idle_time to distinguish when the iowait time will be added to idle time or not, without the need of keeping the prev_io_wait. Signed-off-by: Stratos Karafotis Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a4af9c3..52f76b1 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -240,7 +240,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, struct cs_cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->cdbs.prev_cpu_wall); + &dbs_info->cdbs.prev_cpu_wall, 0); if (cs_tuners->ignore_nice) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 326f0c2..443442d 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -67,13 +67,13 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) return cputime_to_usecs(idle_time); } -u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) { - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); if (idle_time == -1ULL) return get_cpu_idle_time_jiffy(cpu, wall); - else + else if (!io_busy) idle_time += get_cpu_iowait_time_us(cpu, wall); return idle_time; @@ -100,13 +100,22 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) /* Get Absolute Load (in terms of freq for ondemand gov) */ for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs; - u64 cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; + u64 cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; unsigned int load; + int io_busy = 0; j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + /* + * For the purpose of ondemand, waiting for disk IO is + * an indication that you're performance critical, and + * not that the system is actually idle. So do not add + * the iowait time to the cpu idle time. + */ + if (dbs_data->cdata->governor == GOV_ONDEMAND) + io_busy = od_tuners->io_is_busy; + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); wall_time = (unsigned int) (cur_wall_time - j_cdbs->prev_cpu_wall); @@ -134,29 +143,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) idle_time += jiffies_to_usecs(cur_nice_jiffies); } - if (dbs_data->cdata->governor == GOV_ONDEMAND) { - struct od_cpu_dbs_info_s *od_j_dbs_info = - dbs_data->cdata->get_cpu_dbs_info_s(cpu); - - cur_iowait_time = get_cpu_iowait_time_us(j, - &cur_wall_time); - if (cur_iowait_time == -1ULL) - cur_iowait_time = 0; - - iowait_time = (unsigned int) (cur_iowait_time - - od_j_dbs_info->prev_cpu_iowait); - od_j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - /* - * For the purpose of ondemand, waiting for disk IO is - * an indication that you're performance critical, and - * not that the system is actually idle. So subtract the - * iowait time from the cpu idle time. - */ - if (od_tuners->io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - } - if (unlikely(!wall_time || wall_time < idle_time)) continue; @@ -254,6 +240,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct cs_dbs_tuners *cs_tuners = NULL; struct cpu_dbs_common_info *cpu_cdbs; unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; + int io_busy = 0; int rc; if (have_governor_per_policy()) @@ -353,6 +340,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, sampling_rate = od_tuners->sampling_rate; ignore_nice = od_tuners->ignore_nice; od_ops = dbs_data->cdata->gov_ops; + io_busy = od_tuners->io_is_busy; } switch (event) { @@ -369,7 +357,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_cdbs->cpu = j; j_cdbs->cur_policy = policy; j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, - &j_cdbs->prev_cpu_wall); + &j_cdbs->prev_cpu_wall, io_busy); if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index c9c269f..513cc82 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -148,7 +148,6 @@ struct cpu_dbs_common_info { struct od_cpu_dbs_info_s { struct cpu_dbs_common_info cdbs; - u64 prev_cpu_iowait; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_jiffies; @@ -256,7 +255,7 @@ static ssize_t show_sampling_rate_min_gov_pol \ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ } -u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 459f9ee..1471478 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -339,11 +339,20 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; int ret; + unsigned int j; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; od_tuners->io_is_busy = !!input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + j); + dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); + } return count; } @@ -414,7 +423,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, struct od_cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->cdbs.prev_cpu_wall); + &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); if (od_tuners->ignore_nice) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; -- cgit v0.10.2 From 7af1c0568d33339318c6710381921a3a3d40eebb Mon Sep 17 00:00:00 2001 From: Stratos Karafotis Date: Tue, 5 Mar 2013 22:06:29 +0000 Subject: cpufreq: conservative: Fix sampling_down_factor functionality sampling_down_factor tunable is unused since commit 8e677ce83bf41ba9c74e5b6d9ee60b07d4e5ed93 (4 years ago). This patch restores the original functionality and documents the tunable. Signed-off-by: Stratos Karafotis Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index c7a2eb8..4dfed30 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt @@ -191,6 +191,12 @@ governor but for the opposite direction. For example when set to its default value of '20' it means that if the CPU usage needs to be below 20% between samples to have the frequency decreased. +sampling_down_factor: similar functionality as in "ondemand" governor. +But in "conservative", it controls the rate at which the kernel makes +a decision on when to decrease the frequency while running in any +speed. Load for frequency increase is still evaluated every +sampling rate. + 3. The Governor Interface in the CPUfreq Core ============================================= diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 52f76b1..d746d6a 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -36,9 +36,9 @@ static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); /* * Every sampling_rate, we check, if current idle time is less than 20% - * (default), then we try to increase frequency Every sampling_rate * - * sampling_down_factor, we check, if current idle time is more than 80%, then - * we try to decrease frequency + * (default), then we try to increase frequency. Every sampling_rate * + * sampling_down_factor, we check, if current idle time is more than 80% + * (default), then we try to decrease frequency * * Any frequency increase takes it to the maximum frequency. Frequency reduction * happens at minimum steps of 5% (default) of maximum frequency @@ -81,6 +81,11 @@ static void cs_check_cpu(int cpu, unsigned int load) return; } + /* if sampling_down_factor is active break out early */ + if (++dbs_info->down_skip < cs_tuners->sampling_down_factor) + return; + dbs_info->down_skip = 0; + /* * The optimal frequency is the frequency that is the lowest that can * support the current CPU usage without triggering the up policy. To be -- cgit v0.10.2 From 27ed3cd2ebf4cd78b198be9758c538cdede36d8a Mon Sep 17 00:00:00 2001 From: Stratos Karafotis Date: Tue, 5 Mar 2013 22:06:40 +0000 Subject: cpufreq: conservative: Fix the logic in frequency decrease checking When we evaluate the CPU load for frequency decrease we have to compare the load against down_threshold. There is no need to subtract 10 points from down_threshold. Instead, we have to use the default down_threshold or user's selection unmodified. Signed-off-by: Stratos Karafotis Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index d746d6a..0d6a9e5 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -86,12 +86,8 @@ static void cs_check_cpu(int cpu, unsigned int load) return; dbs_info->down_skip = 0; - /* - * The optimal frequency is the frequency that is the lowest that can - * support the current CPU usage without triggering the up policy. To be - * safe, we focus 10 points under the threshold. - */ - if (load < (cs_tuners->down_threshold - 10)) { + /* Check for frequency decrease */ + if (load < cs_tuners->down_threshold) { /* * if we cannot reduce the frequency anymore, break out early */ -- cgit v0.10.2 From 5df6055939f295d723871d9781e73495b131b3d6 Mon Sep 17 00:00:00 2001 From: "jhbird.choi@samsung.com" Date: Mon, 18 Mar 2013 08:09:42 +0000 Subject: cpufreq: Fix unsigned variable being checked for negative value clk_round_rate() returns singed value which was assigned to an unsigned variable. So it can't be checked for negative. Signed-off-by: Jonghwan Choi Acked-by: Shawn Guo Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb..6bb88af 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -44,7 +44,8 @@ static int cpu0_set_target(struct cpufreq_policy *policy, { struct cpufreq_freqs freqs; struct opp *opp; - unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0; + unsigned long volt = 0, volt_old = 0, tol = 0; + long freq_Hz; unsigned int index, cpu; int ret; -- cgit v0.10.2 From cc721c4fdf742387f4e5a061fa8a7ab6fac453bf Mon Sep 17 00:00:00 2001 From: Silviu-Mihai Popescu Date: Fri, 22 Mar 2013 00:10:22 +0000 Subject: cpufreq: kirkwood: fix coccicheck warnings Convert all uses of devm_request_and_ioremap() to the newly introduced devm_ioremap_resource() which provides more consistent error handling. devm_ioremap_resource() provides its own error messages so all explicit error messages can be removed from the failure code paths. Signed-off-by: Silviu-Mihai Popescu Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index 0e83e3c..6052476 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -175,11 +175,9 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Cannot get memory resource\n"); return -ENODEV; } - priv.base = devm_request_and_ioremap(&pdev->dev, res); - if (!priv.base) { - dev_err(&pdev->dev, "Cannot ioremap\n"); - return -EADDRNOTAVAIL; - } + priv.base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv.base)) + return PTR_ERR(priv.base); np = of_find_node_by_path("/cpus/cpu@0"); if (!np) -- cgit v0.10.2 From 987658477216c3d8e56d1cfc8a17d2f930d2a632 Mon Sep 17 00:00:00 2001 From: Stratos Karafotis Date: Fri, 22 Mar 2013 08:03:17 +0000 Subject: cpufreq: conservative: Use an inline function to evaluate freq_target Use an inline function to evaluate freq_target to avoid duplicate code. Also, define a macro for the default frequency step. Signed-off-by: Stratos Karafotis Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0d6a9e5..0ceb2ef 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -29,11 +29,24 @@ /* Conservative governor macros */ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) +#define DEF_FREQUENCY_STEP (5) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); +static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, + struct cpufreq_policy *policy) +{ + unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100; + + /* max freq cannot be less than 100. But who knows... */ + if (unlikely(freq_target == 0)) + freq_target = DEF_FREQUENCY_STEP; + + return freq_target; +} + /* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate * @@ -49,7 +62,6 @@ static void cs_check_cpu(int cpu, unsigned int load) struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; - unsigned int freq_target; /* * break out if we 'cannot' reduce the speed as the user might @@ -66,13 +78,7 @@ static void cs_check_cpu(int cpu, unsigned int load) if (dbs_info->requested_freq == policy->max) return; - freq_target = (cs_tuners->freq_step * policy->max) / 100; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - dbs_info->requested_freq += freq_target; + dbs_info->requested_freq += get_freq_target(cs_tuners, policy); if (dbs_info->requested_freq > policy->max) dbs_info->requested_freq = policy->max; @@ -94,9 +100,7 @@ static void cs_check_cpu(int cpu, unsigned int load) if (policy->cur == policy->min) return; - freq_target = (cs_tuners->freq_step * policy->max) / 100; - - dbs_info->requested_freq -= freq_target; + dbs_info->requested_freq -= get_freq_target(cs_tuners, policy); if (dbs_info->requested_freq < policy->min) dbs_info->requested_freq = policy->min; @@ -335,7 +339,7 @@ static int cs_init(struct dbs_data *dbs_data) tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; tuners->ignore_nice = 0; - tuners->freq_step = 5; + tuners->freq_step = DEF_FREQUENCY_STEP; dbs_data->tuners = tuners; dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * -- cgit v0.10.2 From b2a63431b49f4ef4df1ea5ddb799af48cfa349fc Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:41 +0000 Subject: cpufreq: cpu0: Fix mistake in Documentation example "clock-latency" is incorrectly written as "transition-latency" in an example present in Documentation of cpufreq-cpu0. Fix it. Signed-off-by: Viresh Kumar Acked-by: Shawn Guo Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt index 4416ccc..051f764 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt @@ -32,7 +32,7 @@ cpus { 396000 950000 198000 850000 >; - transition-latency = <61036>; /* two CLK32 periods */ + clock-latency = <61036>; /* two CLK32 periods */ }; cpu@1 { -- cgit v0.10.2 From 3a7818e62751a4dda40d6ea0bdb0022d56aee3c1 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:42 +0000 Subject: cpufreq: Documentation: Fix cpufreq_frequency_table name At few places in documentation cpufreq_frequency_table is written as cpufreq_freq_table. Fix these. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index 72f70b1..7c7721d 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt @@ -184,10 +184,10 @@ the reference implementation in drivers/cpufreq/longrun.c As most cpufreq processors only allow for being set to a few specific frequencies, a "frequency table" with some functions might assist in some work of the processor driver. Such a "frequency table" consists -of an array of struct cpufreq_freq_table entries, with any value in +of an array of struct cpufreq_frequency_table entries, with any value in "index" you want to use, and the corresponding frequency in "frequency". At the end of the table, you need to add a -cpufreq_freq_table entry with frequency set to CPUFREQ_TABLE_END. And +cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END. And if you want to skip one entry in the table, set the frequency to CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending order. -- cgit v0.10.2 From 746b3df98b2edc0e0844537a247e820ac6503031 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:43 +0000 Subject: cpufreq: cpufreq-cpu0: No need to check cpu number in init() It is not possible for init() to be called for any cpu other than cpu0. During bootup whatever cpu is used to boot system will be assigned as cpu0. And later on policy->cpu can only change if we hotunplug all cpus first and then hotplug them back in different order, which isn't possible (system requires atleast one cpu to be up always :)). Though I can see one situation where policy->cpu can be different then zero. - Hot-unplug cpu 0. - rmmod cpufreq-cpu0 module - insmod it back - hotplug cpu 0 again. Here, policy->cpu would be different. But the driver doesn't have any dependency on cpu0 as such. We don't mind which cpu of a system is policy->cpu and so this check is just not required. Remove it. Signed-off-by: Viresh Kumar Acked-by: Shawn Guo Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 6bb88af..4c5a4cf 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -130,9 +130,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy) { int ret; - if (policy->cpu != 0) - return -EINVAL; - ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (ret) { pr_err("invalid frequency table: %d\n", ret); -- cgit v0.10.2 From fd143b4d6fb763183ef6e46d1ab84a42c59079af Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:44 +0000 Subject: cpufreq: cpufreq-cpu0: Call CPUFREQ_POSTCHANGE notifier for failure cases too Currently we are simply returning from target() if we encounter some error after broadcasting CPUFREQ_PRECHANGE notifier. Which looks to be wrong as others might depend on POSTCHANGE notifier for their functioning. So, better broadcast CPUFREQ_POSTCHANGE notifier for these failure cases too, but with old frequency. Signed-off-by: Viresh Kumar Acked-by: Shawn Guo Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4c5a4cf..a7e51bd 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -77,7 +77,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy, if (IS_ERR(opp)) { rcu_read_unlock(); pr_err("failed to find OPP for %ld\n", freq_Hz); - return PTR_ERR(opp); + freqs.new = freqs.old; + ret = PTR_ERR(opp); + goto post_notify; } volt = opp_get_voltage(opp); rcu_read_unlock(); @@ -95,7 +97,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, if (ret) { pr_err("failed to scale voltage up: %d\n", ret); freqs.new = freqs.old; - return ret; + goto post_notify; } } @@ -104,7 +106,8 @@ static int cpu0_set_target(struct cpufreq_policy *policy, pr_err("failed to set clock rate: %d\n", ret); if (cpu_reg) regulator_set_voltage_tol(cpu_reg, volt_old, tol); - return ret; + freqs.new = freqs.old; + goto post_notify; } /* scaling down? scale voltage after frequency */ @@ -114,16 +117,16 @@ static int cpu0_set_target(struct cpufreq_policy *policy, pr_err("failed to scale voltage down: %d\n", ret); clk_set_rate(cpu_clk, freqs.old * 1000); freqs.new = freqs.old; - return ret; } } +post_notify: for_each_online_cpu(cpu) { freqs.cpu = cpu; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } - return 0; + return ret; } static int cpu0_cpufreq_init(struct cpufreq_policy *policy) -- cgit v0.10.2 From b43a7ffbf33be7e4d3b10b7714ee663ea2c52fe2 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Sun, 24 Mar 2013 11:56:43 +0530 Subject: cpufreq: Notify all policy->cpus in cpufreq_notify_transition() policy->cpus contains all online cpus that have single shared clock line. And their frequencies are always updated together. Many SMP system's cpufreq drivers take care of this in individual drivers but the best place for this code is in cpufreq core. This patch modifies cpufreq_notify_transition() to notify frequency change for all cpus in policy->cpus and hence updates all users of this API. Signed-off-by: Viresh Kumar Acked-by: Stephen Warren Tested-by: Stephen Warren Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 4729eaa..55eb870 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c @@ -90,7 +90,6 @@ static int davinci_target(struct cpufreq_policy *policy, freqs.old = davinci_getspeed(0); freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; - freqs.cpu = 0; if (freqs.old == freqs.new) return ret; @@ -102,7 +101,7 @@ static int davinci_target(struct cpufreq_policy *policy, if (ret) return -EINVAL; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* if moving to higher frequency, up the voltage beforehand */ if (pdata->set_voltage && freqs.new > freqs.old) { @@ -126,7 +125,7 @@ static int davinci_target(struct cpufreq_policy *policy, pdata->set_voltage(idx); out: - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } diff --git a/arch/arm/mach-imx/cpufreq.c b/arch/arm/mach-imx/cpufreq.c index d8c75c3..cfce5e3 100644 --- a/arch/arm/mach-imx/cpufreq.c +++ b/arch/arm/mach-imx/cpufreq.c @@ -87,13 +87,12 @@ static int mxc_set_target(struct cpufreq_policy *policy, freqs.old = clk_get_rate(cpu_clk) / 1000; freqs.new = freq_Hz / 1000; - freqs.cpu = 0; freqs.flags = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); ret = set_cpu_freq(freq_Hz); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c index 590c192..df863c3 100644 --- a/arch/arm/mach-integrator/cpu.c +++ b/arch/arm/mach-integrator/cpu.c @@ -123,14 +123,12 @@ static int integrator_set_target(struct cpufreq_policy *policy, vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); freqs.new = icst_hz(&cclk_params, vco) / 1000; - freqs.cpu = policy->cpu; - if (freqs.old == freqs.new) { set_cpus_allowed(current, cpus_allowed); return 0; } - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); cm_osc = __raw_readl(CM_OSC); @@ -151,7 +149,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, */ set_cpus_allowed(current, cpus_allowed); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 6a7aeab..f1ca4da 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c @@ -311,7 +311,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, new_freq_mem = pxa_freq_settings[idx].membus; freqs.old = policy->cur; freqs.new = new_freq_cpu; - freqs.cpu = policy->cpu; if (freq_debug) pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", @@ -327,7 +326,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, * you should add a notify client with any platform specific * Vcc changing capability */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* Calculate the next MDREFR. If we're slowing down the SDRAM clock * we need to preset the smaller DRI before the change. If we're @@ -382,7 +381,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, * you should add a notify client with any platform specific * SDRAM refresh timer adjustments */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); /* * Even if voltage setting fails, we don't report it, as the frequency diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c index b85b4ab..8c45b2b 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c @@ -184,7 +184,6 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = next->cpufreq_mhz * 1000; - freqs.cpu = policy->cpu; pr_debug("CPU frequency from %d MHz to %d MHz%s\n", freqs.old / 1000, freqs.new / 1000, @@ -193,14 +192,14 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, if (freqs.old == target_freq) return 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); local_irq_save(flags); __update_core_freq(next); __update_bus_freq(next); local_irq_restore(flags); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/arch/arm/mach-s3c24xx/cpufreq.c b/arch/arm/mach-s3c24xx/cpufreq.c index 5f181e7..3c0e78e 100644 --- a/arch/arm/mach-s3c24xx/cpufreq.c +++ b/arch/arm/mach-s3c24xx/cpufreq.c @@ -204,7 +204,6 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, freqs.old = cpu_cur.freq; freqs.new = cpu_new.freq; - freqs.freqs.cpu = 0; freqs.freqs.old = cpu_cur.freq.armclk / 1000; freqs.freqs.new = cpu_new.freq.armclk / 1000; @@ -218,9 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); /* start the frequency change */ - - if (policy) - cpufreq_notify_transition(&freqs.freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); /* If hclk is staying the same, then we do not need to * re-write the IO or the refresh timings whilst we are changing @@ -264,8 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, local_irq_restore(flags); /* notify everyone we've done this */ - if (policy) - cpufreq_notify_transition(&freqs.freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); s3c_freq_dbg("%s: finished\n", __func__); return 0; diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c index e8f4d1e..3268761 100644 --- a/arch/arm/mach-sa1100/cpu-sa1100.c +++ b/arch/arm/mach-sa1100/cpu-sa1100.c @@ -201,9 +201,8 @@ static int sa1100_target(struct cpufreq_policy *policy, freqs.old = cur; freqs.new = sa11x0_ppcr_to_freq(new_ppcr); - freqs.cpu = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (freqs.new > cur) sa1100_update_dram_timings(cur, freqs.new); @@ -213,7 +212,7 @@ static int sa1100_target(struct cpufreq_policy *policy, if (freqs.new < cur) sa1100_update_dram_timings(cur, freqs.new); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c index 48c45b0..38a7733 100644 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ b/arch/arm/mach-sa1100/cpu-sa1110.c @@ -258,7 +258,6 @@ static int sa1110_target(struct cpufreq_policy *policy, freqs.old = sa11x0_getspeed(0); freqs.new = sa11x0_ppcr_to_freq(ppcr); - freqs.cpu = 0; sdram_calculate_timing(&sd, freqs.new, sdram); @@ -279,7 +278,7 @@ static int sa1110_target(struct cpufreq_policy *policy, sd.mdcas[2] = 0xaaaaaaaa; #endif - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* * The clock could be going away for some time. Set the SDRAMs @@ -327,7 +326,7 @@ static int sa1110_target(struct cpufreq_policy *policy, */ sdram_update_refresh(freqs.new, sdram); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index e3d6e15..11ca730 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -106,7 +106,8 @@ out: return ret; } -static int tegra_update_cpu_speed(unsigned long rate) +static int tegra_update_cpu_speed(struct cpufreq_policy *policy, + unsigned long rate) { int ret = 0; struct cpufreq_freqs freqs; @@ -128,8 +129,7 @@ static int tegra_update_cpu_speed(unsigned long rate) else clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ - for_each_online_cpu(freqs.cpu) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); #ifdef CONFIG_CPU_FREQ_DEBUG printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", @@ -143,8 +143,7 @@ static int tegra_update_cpu_speed(unsigned long rate) return ret; } - for_each_online_cpu(freqs.cpu) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } @@ -181,7 +180,7 @@ static int tegra_target(struct cpufreq_policy *policy, target_cpu_speed[policy->cpu] = freq; - ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); + ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed()); out: mutex_unlock(&tegra_cpu_lock); @@ -193,10 +192,12 @@ static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, { mutex_lock(&tegra_cpu_lock); if (event == PM_SUSPEND_PREPARE) { + struct cpufreq_policy *policy = cpufreq_cpu_get(0); is_suspended = true; pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", freq_table[0].frequency); - tegra_update_cpu_speed(freq_table[0].frequency); + tegra_update_cpu_speed(policy, freq_table[0].frequency); + cpufreq_cpu_put(policy); } else if (event == PM_POST_SUSPEND) { is_suspended = false; } diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/arch/avr32/mach-at32ap/cpufreq.c index 18b7656..6544887 100644 --- a/arch/avr32/mach-at32ap/cpufreq.c +++ b/arch/avr32/mach-at32ap/cpufreq.c @@ -61,7 +61,6 @@ static int at32_set_target(struct cpufreq_policy *policy, freqs.old = at32_get_speed(0); freqs.new = (freq + 500) / 1000; - freqs.cpu = 0; freqs.flags = 0; if (!ref_freq) { @@ -69,7 +68,7 @@ static int at32_set_target(struct cpufreq_policy *policy, loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; } - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (freqs.old < freqs.new) boot_cpu_data.loops_per_jiffy = cpufreq_scale( loops_per_jiffy_ref, ref_freq, freqs.new); @@ -77,7 +76,7 @@ static int at32_set_target(struct cpufreq_policy *policy, if (freqs.new < freqs.old) boot_cpu_data.loops_per_jiffy = cpufreq_scale( loops_per_jiffy_ref, ref_freq, freqs.new); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("cpufreq: set frequency %lu Hz\n", freq); diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c index d88bd31..995511e80 100644 --- a/arch/blackfin/mach-common/cpufreq.c +++ b/arch/blackfin/mach-common/cpufreq.c @@ -127,13 +127,13 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new) } #endif -static int bfin_target(struct cpufreq_policy *poli, +static int bfin_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { #ifndef CONFIG_BF60x unsigned int plldiv; #endif - unsigned int index, cpu; + unsigned int index; unsigned long cclk_hz; struct cpufreq_freqs freqs; static unsigned long lpj_ref; @@ -144,59 +144,48 @@ static int bfin_target(struct cpufreq_policy *poli, cycles_t cycles; #endif - for_each_online_cpu(cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, + relation, &index)) + return -EINVAL; - if (!policy) - continue; + cclk_hz = bfin_freq_table[index].frequency; - if (cpufreq_frequency_table_target(policy, bfin_freq_table, - target_freq, relation, &index)) - return -EINVAL; + freqs.old = bfin_getfreq_khz(0); + freqs.new = cclk_hz; - cclk_hz = bfin_freq_table[index].frequency; + pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", + cclk_hz, target_freq, freqs.old); - freqs.old = bfin_getfreq_khz(0); - freqs.new = cclk_hz; - freqs.cpu = cpu; - - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", - cclk_hz, target_freq, freqs.old); - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - if (cpu == CPUFREQ_CPU) { + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); #ifndef CONFIG_BF60x - plldiv = (bfin_read_PLL_DIV() & SSEL) | - dpm_state_table[index].csel; - bfin_write_PLL_DIV(plldiv); + plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; + bfin_write_PLL_DIV(plldiv); #else - ret = cpu_set_cclk(cpu, freqs.new * 1000); - if (ret != 0) { - WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); - break; - } + ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); + if (ret != 0) { + WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); + return ret; + } #endif - on_each_cpu(bfin_adjust_core_timer, &index, 1); + on_each_cpu(bfin_adjust_core_timer, &index, 1); #if defined(CONFIG_CYCLES_CLOCKSOURCE) - cycles = get_cycles(); - SSYNC(); - cycles += 10; /* ~10 cycles we lose after get_cycles() */ - __bfin_cycles_off += - (cycles << __bfin_cycles_mod) - (cycles << index); - __bfin_cycles_mod = index; + cycles = get_cycles(); + SSYNC(); + cycles += 10; /* ~10 cycles we lose after get_cycles() */ + __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); + __bfin_cycles_mod = index; #endif - if (!lpj_ref_freq) { - lpj_ref = loops_per_jiffy; - lpj_ref_freq = freqs.old; - } - if (freqs.new != freqs.old) { - loops_per_jiffy = cpufreq_scale(lpj_ref, - lpj_ref_freq, freqs.new); - } - } - /* TODO: just test case for cycles clock source, remove later */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + if (!lpj_ref_freq) { + lpj_ref = loops_per_jiffy; + lpj_ref_freq = freqs.old; } + if (freqs.new != freqs.old) { + loops_per_jiffy = cpufreq_scale(lpj_ref, + lpj_ref_freq, freqs.new); + } + + /* TODO: just test case for cycles clock source, remove later */ + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("cpufreq: done\n"); return ret; diff --git a/arch/cris/arch-v32/mach-a3/cpufreq.c b/arch/cris/arch-v32/mach-a3/cpufreq.c index ee391ec..ee142c4 100644 --- a/arch/cris/arch-v32/mach-a3/cpufreq.c +++ b/arch/cris/arch-v32/mach-a3/cpufreq.c @@ -27,23 +27,17 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) return clk_ctrl.pll ? 200000 : 6000; } -static void cris_freq_set_cpu_state(unsigned int state) +static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int state) { - int i = 0; struct cpufreq_freqs freqs; reg_clkgen_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); -#ifdef CONFIG_SMP - for_each_present_cpu(i) -#endif - { - freqs.old = cris_freq_get_cpu_frequency(i); - freqs.new = cris_freq_table[state].frequency; - freqs.cpu = i; - } + freqs.old = cris_freq_get_cpu_frequency(policy->cpu); + freqs.new = cris_freq_table[state].frequency; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); local_irq_disable(); @@ -57,7 +51,7 @@ static void cris_freq_set_cpu_state(unsigned int state) local_irq_enable(); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); }; static int cris_freq_verify(struct cpufreq_policy *policy) @@ -75,7 +69,7 @@ static int cris_freq_target(struct cpufreq_policy *policy, target_freq, relation, &newstate)) return -EINVAL; - cris_freq_set_cpu_state(newstate); + cris_freq_set_cpu_state(policy, newstate); return 0; } diff --git a/arch/cris/arch-v32/mach-fs/cpufreq.c b/arch/cris/arch-v32/mach-fs/cpufreq.c index d92cf70..1295223 100644 --- a/arch/cris/arch-v32/mach-fs/cpufreq.c +++ b/arch/cris/arch-v32/mach-fs/cpufreq.c @@ -27,20 +27,17 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) return clk_ctrl.pll ? 200000 : 6000; } -static void cris_freq_set_cpu_state(unsigned int state) +static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int state) { - int i; struct cpufreq_freqs freqs; reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - for_each_possible_cpu(i) { - freqs.old = cris_freq_get_cpu_frequency(i); - freqs.new = cris_freq_table[state].frequency; - freqs.cpu = i; - } + freqs.old = cris_freq_get_cpu_frequency(policy->cpu); + freqs.new = cris_freq_table[state].frequency; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); local_irq_disable(); @@ -54,7 +51,7 @@ static void cris_freq_set_cpu_state(unsigned int state) local_irq_enable(); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); }; static int cris_freq_verify(struct cpufreq_policy *policy) @@ -71,7 +68,7 @@ static int cris_freq_target(struct cpufreq_policy *policy, (policy, cris_freq_table, target_freq, relation, &newstate)) return -EINVAL; - cris_freq_set_cpu_state(newstate); + cris_freq_set_cpu_state(policy, newstate); return 0; } diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index f09b174..4700fef 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -137,7 +137,7 @@ migrate_end: static int processor_set_freq ( struct cpufreq_acpi_io *data, - unsigned int cpu, + struct cpufreq_policy *policy, int state) { int ret = 0; @@ -149,8 +149,8 @@ processor_set_freq ( pr_debug("processor_set_freq\n"); saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (smp_processor_id() != cpu) { + set_cpus_allowed_ptr(current, cpumask_of(policy->cpu)); + if (smp_processor_id() != policy->cpu) { retval = -EAGAIN; goto migrate_end; } @@ -170,12 +170,11 @@ processor_set_freq ( data->acpi_data.state, state); /* cpufreq frequency struct */ - cpufreq_freqs.cpu = cpu; cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; cpufreq_freqs.new = data->freq_table[state].frequency; /* notify cpufreq */ - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); /* * First we write the target state's 'control' value to the @@ -189,17 +188,20 @@ processor_set_freq ( ret = processor_set_pstate(value); if (ret) { unsigned int tmp = cpufreq_freqs.new; - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &cpufreq_freqs, + CPUFREQ_POSTCHANGE); cpufreq_freqs.new = cpufreq_freqs.old; cpufreq_freqs.old = tmp; - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &cpufreq_freqs, + CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &cpufreq_freqs, + CPUFREQ_POSTCHANGE); printk(KERN_WARNING "Transition failed with error %d\n", ret); retval = -ENODEV; goto migrate_end; } - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); data->acpi_data.state = state; @@ -240,7 +242,7 @@ acpi_cpufreq_target ( if (result) return (result); - result = processor_set_freq(data, policy->cpu, next_state); + result = processor_set_freq(data, policy, next_state); return (result); } diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index 3237c52..bafda70 100644 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -80,7 +80,6 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - freqs.cpu = cpu; freqs.old = loongson2_cpufreq_get(cpu); freqs.new = freq; freqs.flags = 0; @@ -89,7 +88,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, return 0; /* notifiers */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); set_cpus_allowed_ptr(current, &cpus_allowed); @@ -97,7 +96,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, clk_set_rate(cpuclk, freq); /* notifiers */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("cpufreq: set frequency %u kHz\n", freq); diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c index d4c39e3..718c6a3 100644 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c @@ -156,10 +156,9 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = cbe_freqs[cbe_pmode_new].frequency; - freqs.cpu = policy->cpu; mutex_lock(&cbe_switch_mutex); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); pr_debug("setting frequency for cpu %d to %d kHz, " \ "1/%d of max frequency\n", @@ -169,7 +168,7 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy, rc = set_pmode(policy->cpu, cbe_pmode_new); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&cbe_switch_mutex); return rc; diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index 890f30e..be1e795 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c @@ -273,10 +273,9 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = pas_freqs[pas_astate_new].frequency; - freqs.cpu = policy->cpu; mutex_lock(&pas_switch_mutex); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", policy->cpu, @@ -288,7 +287,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, for_each_online_cpu(i) set_astate(i, pas_astate_new); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&pas_switch_mutex); ppc_proc_freq = freqs.new * 1000ul; diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 311b804..3104fad 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -335,7 +335,8 @@ static int pmu_set_cpu_speed(int low_speed) return 0; } -static int do_set_cpu_speed(int speed_mode, int notify) +static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, + int notify) { struct cpufreq_freqs freqs; unsigned long l3cr; @@ -343,13 +344,12 @@ static int do_set_cpu_speed(int speed_mode, int notify) freqs.old = cur_freq; freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; - freqs.cpu = smp_processor_id(); if (freqs.old == freqs.new) return 0; if (notify) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (speed_mode == CPUFREQ_LOW && cpu_has_feature(CPU_FTR_L3CR)) { l3cr = _get_L3CR(); @@ -366,7 +366,7 @@ static int do_set_cpu_speed(int speed_mode, int notify) _set_L3CR(prev_l3cr); } if (notify) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; return 0; @@ -393,7 +393,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy, target_freq, relation, &newstate)) return -EINVAL; - rc = do_set_cpu_speed(newstate, 1); + rc = do_set_cpu_speed(policy, newstate, 1); ppc_proc_freq = cur_freq * 1000ul; return rc; @@ -442,7 +442,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) no_schedule = 1; sleep_freq = cur_freq; if (cur_freq == low_freq && !is_pmu_based) - do_set_cpu_speed(CPUFREQ_HIGH, 0); + do_set_cpu_speed(policy, CPUFREQ_HIGH, 0); return 0; } @@ -458,7 +458,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) * is that we force a switch to whatever it was, which is * probably high speed due to our suspend() routine */ - do_set_cpu_speed(sleep_freq == low_freq ? + do_set_cpu_speed(policy, sleep_freq == low_freq ? CPUFREQ_LOW : CPUFREQ_HIGH, 0); ppc_proc_freq = cur_freq * 1000ul; diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index 9650c60..7ba4234 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c @@ -339,11 +339,10 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy, freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; freqs.new = g5_cpu_freqs[newstate].frequency; - freqs.cpu = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); rc = g5_switch_freq(newstate); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&g5_switch_mutex); diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index e68b45b..2c7bd94 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -69,15 +69,14 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); - freqs.cpu = cpu; freqs.old = sh_cpufreq_get(cpu); freqs.new = (freq + 500) / 1000; freqs.flags = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); set_cpus_allowed_ptr(current, &cpus_allowed); clk_set_rate(cpuclk, freq); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); dev_dbg(dev, "set frequency %lu Hz\n", freq); diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c index 489fc15..abe963d 100644 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ b/arch/sparc/kernel/us2e_cpufreq.c @@ -248,8 +248,10 @@ static unsigned int us2e_freq_get(unsigned int cpu) return clock_tick / estar_to_divisor(estar); } -static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) +static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, + unsigned int index) { + unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq; unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; @@ -272,14 +274,13 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) freqs.old = clock_tick / old_divisor; freqs.new = new_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (old_divisor != divisor) us2e_transition(estar, new_bits, clock_tick * 1000, old_divisor, divisor); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); set_cpus_allowed_ptr(current, &cpus_allowed); } @@ -295,7 +296,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, target_freq, relation, &new_index)) return -EINVAL; - us2e_set_cpu_divider_index(policy->cpu, new_index); + us2e_set_cpu_divider_index(policy, new_index); return 0; } @@ -335,7 +336,7 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us2e_driver) - us2e_set_cpu_divider_index(policy->cpu, 0); + us2e_set_cpu_divider_index(policy, 0); return 0; } diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c index eb1624b..7ceb9c8 100644 --- a/arch/sparc/kernel/us3_cpufreq.c +++ b/arch/sparc/kernel/us3_cpufreq.c @@ -96,8 +96,10 @@ static unsigned int us3_freq_get(unsigned int cpu) return ret; } -static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) +static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, + unsigned int index) { + unsigned int cpu = policy->cpu; unsigned long new_bits, new_freq, reg; cpumask_t cpus_allowed; struct cpufreq_freqs freqs; @@ -131,14 +133,13 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) freqs.old = get_current_freq(cpu, reg); freqs.new = new_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); reg &= ~SAFARI_CFG_DIV_MASK; reg |= new_bits; write_safari_cfg(reg); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); set_cpus_allowed_ptr(current, &cpus_allowed); } @@ -156,7 +157,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, &new_index)) return -EINVAL; - us3_set_cpu_divider_index(policy->cpu, new_index); + us3_set_cpu_divider_index(policy, new_index); return 0; } @@ -192,7 +193,7 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) static int us3_freq_cpu_exit(struct cpufreq_policy *policy) { if (cpufreq_us3_driver) - us3_set_cpu_divider_index(policy->cpu, 0); + us3_set_cpu_divider_index(policy, 0); return 0; } diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/arch/unicore32/kernel/cpu-ucv2.c index 4a99f62..ba5a71c 100644 --- a/arch/unicore32/kernel/cpu-ucv2.c +++ b/arch/unicore32/kernel/cpu-ucv2.c @@ -52,15 +52,14 @@ static int ucv2_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (!clk_set_rate(mclk, target_freq * 1000)) { freqs.old = cur; freqs.new = target_freq; - freqs.cpu = 0; } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 57a8774..11b8b4b 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -423,7 +423,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, struct drv_cmd cmd; unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ - unsigned int i; int result = 0; pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); @@ -486,10 +485,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); drv_write(&cmd); @@ -502,10 +498,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } } - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); perf->state = next_perf_state; out: diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index a7e51bd..6561853 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -46,7 +46,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, struct opp *opp; unsigned long volt = 0, volt_old = 0, tol = 0; long freq_Hz; - unsigned int index, cpu; + unsigned int index; int ret; ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, @@ -66,10 +66,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - for_each_online_cpu(cpu) { - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (cpu_reg) { rcu_read_lock(); @@ -121,10 +118,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, } post_notify: - for_each_online_cpu(cpu) { - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 13d311e..224a478 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -263,7 +263,6 @@ static int nforce2_target(struct cpufreq_policy *policy, freqs.old = nforce2_get(policy->cpu); freqs.new = target_fsb * fid * 100; - freqs.cpu = 0; /* Only one CPU on nForce2 platforms */ if (freqs.old == freqs.new) return 0; @@ -271,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy, pr_debug("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* Disable IRQs */ /* local_irq_save(flags); */ @@ -286,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy, /* Enable IRQs */ /* local_irq_restore(flags); */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 85963fc..0198cd0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -249,19 +249,9 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) #endif -/** - * cpufreq_notify_transition - call notifier chain and adjust_jiffies - * on frequency transition. - * - * This function calls the transition notifiers and the "adjust_jiffies" - * function. It is called twice on all CPU frequency changes that have - * external effects. - */ -void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) +void __cpufreq_notify_transition(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, unsigned int state) { - struct cpufreq_policy *policy; - unsigned long flags; - BUG_ON(irqs_disabled()); if (cpufreq_disabled()) @@ -271,10 +261,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); - read_lock_irqsave(&cpufreq_driver_lock, flags); - policy = per_cpu(cpufreq_cpu_data, freqs->cpu); - read_unlock_irqrestore(&cpufreq_driver_lock, flags); - switch (state) { case CPUFREQ_PRECHANGE: @@ -308,6 +294,20 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) break; } } +/** + * cpufreq_notify_transition - call notifier chain and adjust_jiffies + * on frequency transition. + * + * This function calls the transition notifiers and the "adjust_jiffies" + * function. It is called twice on all CPU frequency changes that have + * external effects. + */ +void cpufreq_notify_transition(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, unsigned int state) +{ + for_each_cpu(freqs->cpu, policy->cpus) + __cpufreq_notify_transition(policy, freqs, state); +} EXPORT_SYMBOL_GPL(cpufreq_notify_transition); @@ -1141,16 +1141,23 @@ static void handle_update(struct work_struct *work) static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) { + struct cpufreq_policy *policy; struct cpufreq_freqs freqs; + unsigned long flags; + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " "core thinks of %u, is %u kHz.\n", old_freq, new_freq); - freqs.cpu = cpu; freqs.old = old_freq; freqs.new = new_freq; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + read_lock_irqsave(&cpufreq_driver_lock, flags); + policy = per_cpu(cpufreq_cpu_data, cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); } diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 72f0c3e..7192a6d 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -55,8 +55,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy, return 0; /* pre-change notification */ - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* update armss clk frequency */ ret = clk_set_rate(armss_clk, freqs.new * 1000); @@ -68,8 +67,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy, } /* post change notification */ - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 3fffbe6..37380fb 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -104,7 +104,7 @@ static unsigned int eps_get(unsigned int cpu) } static int eps_set_state(struct eps_cpu_data *centaur, - unsigned int cpu, + struct cpufreq_policy *policy, u32 dest_state) { struct cpufreq_freqs freqs; @@ -112,10 +112,9 @@ static int eps_set_state(struct eps_cpu_data *centaur, int err = 0; int i; - freqs.old = eps_get(cpu); + freqs.old = eps_get(policy->cpu); freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* Wait while CPU is busy */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); @@ -162,7 +161,7 @@ postchange: current_multiplier); } #endif - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return err; } @@ -190,7 +189,7 @@ static int eps_target(struct cpufreq_policy *policy, /* Make frequency transition */ dest_state = centaur->freq_table[newstate].index & 0xffff; - ret = eps_set_state(centaur, cpu, dest_state); + ret = eps_set_state(centaur, policy, dest_state); if (ret) printk(KERN_ERR "eps: Timeout!\n"); return ret; diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 960671f..658d860 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -117,15 +117,15 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) * There is no return value. */ -static void elanfreq_set_cpu_state(unsigned int state) +static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int state) { struct cpufreq_freqs freqs; freqs.old = elanfreq_get_cpu_frequency(0); freqs.new = elan_multiplier[state].clock; - freqs.cpu = 0; /* elanfreq.c is UP only driver */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", elan_multiplier[state].clock); @@ -161,7 +161,7 @@ static void elanfreq_set_cpu_state(unsigned int state) udelay(10000); local_irq_enable(); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); }; @@ -188,7 +188,7 @@ static int elanfreq_target(struct cpufreq_policy *policy, target_freq, relation, &newstate)) return -EINVAL; - elanfreq_set_cpu_state(newstate); + elanfreq_set_cpu_state(policy, newstate); return 0; } diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 78057a3..c0c4ce5 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -70,7 +70,6 @@ static int exynos_cpufreq_scale(unsigned int target_freq) freqs.old = policy->cur; freqs.new = target_freq; - freqs.cpu = policy->cpu; if (freqs.new == freqs.old) goto out; @@ -105,8 +104,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq) } arm_volt = volt_table[index]; - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* When the new frequency is higher than current frequency */ if ((freqs.new > freqs.old) && !safe_arm_volt) { @@ -131,8 +129,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq) exynos_info->set_freq(old_index, index); - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); /* When the new frequency is lower than current frequency */ if ((freqs.new < freqs.old) || diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 456bee0..3dfc99b 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -251,14 +251,13 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, * set cpu speed in khz. **/ -static void gx_set_cpuspeed(unsigned int khz) +static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) { u8 suscfg, pmer1; unsigned int new_khz; unsigned long flags; struct cpufreq_freqs freqs; - freqs.cpu = 0; freqs.old = gx_get_cpuspeed(0); new_khz = gx_validate_speed(khz, &gx_params->on_duration, @@ -266,11 +265,9 @@ static void gx_set_cpuspeed(unsigned int khz) freqs.new = new_khz; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); local_irq_save(flags); - - if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ switch (gx_params->cs55x0->device) { @@ -317,7 +314,7 @@ static void gx_set_cpuspeed(unsigned int khz) gx_params->pci_suscfg = suscfg; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); @@ -397,7 +394,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy, tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); } - gx_set_cpuspeed(tmp_freq); + gx_set_cpuspeed(policy, tmp_freq); return 0; } diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 54e336d..b78bc35 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -50,7 +50,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; struct opp *opp; unsigned long freq_hz, volt, volt_old; - unsigned int index, cpu; + unsigned int index; int ret; ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, @@ -68,10 +68,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - for_each_online_cpu(cpu) { - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); rcu_read_lock(); opp = opp_find_freq_ceil(cpu_dev, &freq_hz); @@ -166,10 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, } } - for_each_online_cpu(cpu) { - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index 6052476..d36ea8d 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -55,7 +55,8 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) return kirkwood_freq_table[0].frequency; } -static void kirkwood_cpufreq_set_cpu_state(unsigned int index) +static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int index) { struct cpufreq_freqs freqs; unsigned int state = kirkwood_freq_table[index].index; @@ -63,9 +64,8 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index) freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); freqs.new = kirkwood_freq_table[index].frequency; - freqs.cpu = 0; /* Kirkwood is UP */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", kirkwood_freq_table[index].frequency); @@ -99,7 +99,7 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index) local_irq_enable(); } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); }; static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) @@ -117,7 +117,7 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, target_freq, relation, &index)) return -EINVAL; - kirkwood_cpufreq_set_cpu_state(index); + kirkwood_cpufreq_set_cpu_state(policy, index); return 0; } diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 1180d53..b448638 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -242,7 +242,8 @@ static void do_powersaver(int cx_address, unsigned int mults_index, * Sets a new clock ratio. */ -static void longhaul_setstate(unsigned int table_index) +static void longhaul_setstate(struct cpufreq_policy *policy, + unsigned int table_index) { unsigned int mults_index; int speed, mult; @@ -267,9 +268,8 @@ static void longhaul_setstate(unsigned int table_index) freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; - freqs.cpu = 0; /* longhaul.c is UP only driver */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); @@ -386,7 +386,7 @@ retry_loop: } } /* Report true CPU frequency */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); if (!bm_timeout) printk(KERN_INFO PFX "Warning: Timeout while waiting for " @@ -648,7 +648,7 @@ static int longhaul_target(struct cpufreq_policy *policy, return 0; if (!can_scale_voltage) - longhaul_setstate(table_index); + longhaul_setstate(policy, table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both @@ -663,7 +663,7 @@ static int longhaul_target(struct cpufreq_policy *policy, while (i != table_index) { vid = (longhaul_table[i].index >> 8) & 0x1f; if (vid != current_vid) { - longhaul_setstate(i); + longhaul_setstate(policy, i); current_vid = vid; msleep(200); } @@ -672,7 +672,7 @@ static int longhaul_target(struct cpufreq_policy *policy, else i--; } - longhaul_setstate(table_index); + longhaul_setstate(policy, table_index); } longhaul_index = table_index; return 0; @@ -998,15 +998,17 @@ static int __init longhaul_init(void) static void __exit longhaul_exit(void) { + struct cpufreq_policy *policy = cpufreq_cpu_get(0); int i; for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { - longhaul_setstate(i); + longhaul_setstate(policy, i); break; } } + cpufreq_cpu_put(policy); cpufreq_unregister_driver(&longhaul_driver); kfree(longhaul_table); } diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index d4c4989..cdd6291 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c @@ -158,11 +158,10 @@ static int maple_cpufreq_target(struct cpufreq_policy *policy, freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; freqs.new = maple_cpu_freqs[newstate].frequency; - freqs.cpu = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); rc = maple_scom_switch_freq(newstate); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&maple_switch_mutex); diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 9128c07..b610edd 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -88,16 +88,12 @@ static int omap_target(struct cpufreq_policy *policy, } freqs.old = omap_getspeed(policy->cpu); - freqs.cpu = policy->cpu; if (freqs.old == freqs.new && policy->cur == freqs.new) return ret; /* notifiers */ - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); freq = freqs.new * 1000; ret = clk_round_rate(mpu_clk, freq); @@ -157,10 +153,7 @@ static int omap_target(struct cpufreq_policy *policy, done: /* notifiers */ - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 827629c9..4b2e773 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -125,10 +125,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, return 0; /* notifiers */ - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* run on each logical CPU, * see section 13.15.3 of IA32 Intel Architecture Software @@ -138,10 +135,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); /* notifiers */ - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 503996a..0de0008 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -215,8 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, (pcch_virt_addr + pcc_cpu_data->input_offset)); freqs.new = target_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); @@ -237,7 +236,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, } iowrite16(0, &pcch_hdr->status); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); spin_unlock(&pcc_lock); diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index af23e0b..ea0222a 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -68,7 +68,8 @@ static int powernow_k6_get_cpu_multiplier(void) * * Tries to change the PowerNow! multiplier */ -static void powernow_k6_set_state(unsigned int best_i) +static void powernow_k6_set_state(struct cpufreq_policy *policy, + unsigned int best_i) { unsigned long outvalue = 0, invalue = 0; unsigned long msrval; @@ -81,9 +82,8 @@ static void powernow_k6_set_state(unsigned int best_i) freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); freqs.new = busfreq * clock_ratio[best_i].index; - freqs.cpu = 0; /* powernow-k6.c is UP only driver */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* we now need to transform best_i to the BVC format, see AMD#23446 */ @@ -98,7 +98,7 @@ static void powernow_k6_set_state(unsigned int best_i) msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return; } @@ -136,7 +136,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy, target_freq, relation, &newstate)) return -EINVAL; - powernow_k6_set_state(newstate); + powernow_k6_set_state(policy, newstate); return 0; } @@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) unsigned int i; for (i = 0; i < 8; i++) { if (i == max_multiplier) - powernow_k6_set_state(i); + powernow_k6_set_state(policy, i); } cpufreq_frequency_table_put_attr(policy->cpu); return 0; diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 334cc2f..53888da 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -248,7 +248,7 @@ static void change_VID(int vid) } -static void change_speed(unsigned int index) +static void change_speed(struct cpufreq_policy *policy, unsigned int index) { u8 fid, vid; struct cpufreq_freqs freqs; @@ -263,15 +263,13 @@ static void change_speed(unsigned int index) fid = powernow_table[index].index & 0xFF; vid = (powernow_table[index].index & 0xFF00) >> 8; - freqs.cpu = 0; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; freqs.new = powernow_table[index].frequency; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* Now do the magic poking into the MSRs. */ @@ -292,7 +290,7 @@ static void change_speed(unsigned int index) if (have_a0 == 1) local_irq_enable(); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); } @@ -546,7 +544,7 @@ static int powernow_target(struct cpufreq_policy *policy, relation, &newstate)) return -EINVAL; - change_speed(newstate); + change_speed(policy, newstate); return 0; } diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index d13a136..52137a3 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -928,9 +928,10 @@ static int get_transition_latency(struct powernow_k8_data *data) static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) { + struct cpufreq_policy *policy; u32 fid = 0; u32 vid = 0; - int res, i; + int res; struct cpufreq_freqs freqs; pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); @@ -959,10 +960,10 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); - for_each_cpu(i, data->available_cores) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + policy = cpufreq_cpu_get(smp_processor_id()); + cpufreq_cpu_put(policy); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); res = transition_fid_vid(data, fid, vid); if (res) @@ -970,10 +971,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, freqs.new = find_khz_freq_from_fid(data->currfid); - for_each_cpu(i, data->available_cores) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return res; } diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index bcc053b..4f1881e 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -256,7 +256,6 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, goto out; } - freqs.cpu = 0; freqs.flags = 0; freqs.old = s3c_freq->is_dvs ? FREQ_DVS : clk_get_rate(s3c_freq->armclk) / 1000; @@ -274,7 +273,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, if (!to_dvs && freqs.old == freqs.new) goto out; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (to_dvs) { pr_debug("cpufreq: enter dvs\n"); @@ -287,7 +286,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); out: mutex_unlock(&cpufreq_lock); diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 6f9490b..27cacb5 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -84,7 +84,6 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, if (ret != 0) return ret; - freqs.cpu = 0; freqs.old = clk_get_rate(armclk) / 1000; freqs.new = s3c64xx_freq_table[i].frequency; freqs.flags = 0; @@ -95,7 +94,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); #ifdef CONFIG_REGULATOR if (vddarm && freqs.new > freqs.old) { @@ -117,7 +116,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, goto err; } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); #ifdef CONFIG_REGULATOR if (vddarm && freqs.new < freqs.old) { @@ -141,7 +140,7 @@ err_clk: if (clk_set_rate(armclk, freqs.old * 1000) < 0) pr_err("Failed to restore original clock rate\n"); err: - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index a484aae..5c77570 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -229,7 +229,6 @@ static int s5pv210_target(struct cpufreq_policy *policy, } freqs.new = s5pv210_freq_table[index].frequency; - freqs.cpu = 0; if (freqs.new == freqs.old) goto exit; @@ -256,7 +255,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, goto exit; } - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* Check if there need to change PLL */ if ((index == L0) || (priv_index == L0)) @@ -468,7 +467,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, } } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); if (freqs.new < freqs.old) { regulator_set_voltage(int_regulator, diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index e42e073..f740b13 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -53,7 +53,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) } } -static void sc520_freq_set_cpu_state(unsigned int state) +static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int state) { struct cpufreq_freqs freqs; @@ -61,9 +62,8 @@ static void sc520_freq_set_cpu_state(unsigned int state) freqs.old = sc520_freq_get_cpu_frequency(0); freqs.new = sc520_freq_table[state].frequency; - freqs.cpu = 0; /* AMD Elan is UP */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); pr_debug("attempting to set frequency to %i kHz\n", sc520_freq_table[state].frequency); @@ -75,7 +75,7 @@ static void sc520_freq_set_cpu_state(unsigned int state) local_irq_enable(); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); }; static int sc520_freq_verify(struct cpufreq_policy *policy) @@ -93,7 +93,7 @@ static int sc520_freq_target(struct cpufreq_policy *policy, target_freq, relation, &newstate)) return -EINVAL; - sc520_freq_set_cpu_state(newstate); + sc520_freq_set_cpu_state(policy, newstate); return 0; } diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 7e4d773..156829f 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -121,7 +121,6 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, target_freq, relation, &index)) return -EINVAL; - freqs.cpu = policy->cpu; freqs.old = spear_cpufreq_get(0); newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; @@ -158,8 +157,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, freqs.new = newfreq / 1000; freqs.new /= mult; - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (mult == 2) ret = spear1340_set_cpu_rate(srcclk, newfreq); @@ -172,8 +170,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; } - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 3a953d5..3dbbcc3 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -457,7 +457,7 @@ static int centrino_target (struct cpufreq_policy *policy, unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; struct cpufreq_freqs freqs; int retval = 0; - unsigned int j, k, first_cpu, tmp; + unsigned int j, first_cpu, tmp; cpumask_var_t covered_cpus; if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) @@ -522,13 +522,8 @@ static int centrino_target (struct cpufreq_policy *policy, pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu(k, policy->cpus) { - if (!cpu_online(k)) - continue; - freqs.cpu = k; - cpufreq_notify_transition(&freqs, + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - } first_cpu = 0; /* all but 16 LSB are reserved, treat them with care */ @@ -544,12 +539,7 @@ static int centrino_target (struct cpufreq_policy *policy, cpumask_set_cpu(j, covered_cpus); } - for_each_cpu(k, policy->cpus) { - if (!cpu_online(k)) - continue; - freqs.cpu = k; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); if (unlikely(retval)) { /* @@ -565,12 +555,8 @@ static int centrino_target (struct cpufreq_policy *policy, tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu(j, policy->cpus) { - if (!cpu_online(j)) - continue; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); } retval = 0; diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index e29b59a..e2e5aa9 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -263,7 +263,6 @@ static int speedstep_target(struct cpufreq_policy *policy, { unsigned int newstate = 0, policy_cpu; struct cpufreq_freqs freqs; - int i; if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) @@ -272,7 +271,6 @@ static int speedstep_target(struct cpufreq_policy *policy, policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); freqs.old = speedstep_get(policy_cpu); freqs.new = speedstep_freqs[newstate].frequency; - freqs.cpu = policy->cpu; pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); @@ -280,18 +278,12 @@ static int speedstep_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, true); - for_each_cpu(i, policy->cpus) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 6a457fc..f5a6b70 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -252,14 +252,13 @@ static int speedstep_target(struct cpufreq_policy *policy, freqs.old = speedstep_freqs[speedstep_get_state()].frequency; freqs.new = speedstep_freqs[newstate].frequency; - freqs.cpu = 0; /* speedstep.c is UP only driver */ if (freqs.old == freqs.new) return 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); speedstep_set_state(newstate); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4bbc572..037d36a 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -278,8 +278,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); - +void cpufreq_notify_transition(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, unsigned int state); static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) { -- cgit v0.10.2 From e9f51837c97d199dfa06ef448797c584755837a8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:46 +0000 Subject: cpufreq: Don't check if cpu is online/offline for cpufreq callbacks cpufreq layer doesn't call cpufreq driver's callback for any offline CPU and so checking that isn't useful. Lets get rid of it. Signed-off-by: Viresh Kumar Acked-by: David S. Miller Signed-off-by: Rafael J. Wysocki diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index bafda70..8488957 100644 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -61,9 +61,6 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; unsigned int freq; - if (!cpu_online(cpu)) - return -ENODEV; - cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -109,9 +106,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned long rate; int ret; - if (!cpu_online(policy->cpu)) - return -ENODEV; - cpuclk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpuclk)) { printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index 2c7bd94..0fdf64b 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -51,9 +51,6 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, struct device *dev; long freq; - if (!cpu_online(cpu)) - return -ENODEV; - cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -111,9 +108,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *freq_table; struct device *dev; - if (!cpu_online(cpu)) - return -ENODEV; - dev = get_cpu_device(cpu); cpuclk = clk_get(dev, "cpu_clk"); diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c index abe963d..306ae46 100644 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ b/arch/sparc/kernel/us2e_cpufreq.c @@ -234,9 +234,6 @@ static unsigned int us2e_freq_get(unsigned int cpu) cpumask_t cpus_allowed; unsigned long clock_tick, estar; - if (!cpu_online(cpu)) - return 0; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -257,9 +254,6 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, cpumask_t cpus_allowed; struct cpufreq_freqs freqs; - if (!cpu_online(cpu)) - return; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c index 7ceb9c8..c71ee14 100644 --- a/arch/sparc/kernel/us3_cpufreq.c +++ b/arch/sparc/kernel/us3_cpufreq.c @@ -82,9 +82,6 @@ static unsigned int us3_freq_get(unsigned int cpu) unsigned long reg; unsigned int ret; - if (!cpu_online(cpu)) - return 0; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -104,9 +101,6 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, cpumask_t cpus_allowed; struct cpufreq_freqs freqs; - if (!cpu_online(cpu)) - return; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(cpu)); diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 4b2e773..421ef37 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -58,8 +58,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) { u32 l, h; - if (!cpu_online(cpu) || - (newstate > DC_DISABLE) || (newstate == DC_RESV)) + if ((newstate > DC_DISABLE) || (newstate == DC_RESV)) return -EINVAL; rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 52137a3..b828efe 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1102,9 +1102,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) struct init_on_cpu init_on_cpu; int rc; - if (!cpu_online(pol->cpu)) - return -ENODEV; - smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); if (rc) return -ENODEV; diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 3dbbcc3..618e6f4 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -481,10 +481,6 @@ static int centrino_target (struct cpufreq_policy *policy, for_each_cpu(j, policy->cpus) { int good_cpu; - /* cpufreq holds the hotplug lock, so we are safe here */ - if (!cpu_online(j)) - continue; - /* * Support for SMP systems. * Make sure we are running on CPU that wants to change freq -- cgit v0.10.2 From 8a00627a187dca212c43a511852f06dd2c94b9aa Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:47 +0000 Subject: cpufreq: drivers: don't check range of target freq in .target() Cpufreq core checks the range of target_freq before calling driver->target() and so we don't need to do it again. Remove it. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 55eb870..8fb0c2a 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c @@ -79,15 +79,6 @@ static int davinci_target(struct cpufreq_policy *policy, struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; struct clk *armclk = cpufreq.armclk; - /* - * Ensure desired rate is within allowed range. Some govenors - * (ondemand) will just pass target_freq=0 to get the minimum. - */ - if (target_freq < policy->cpuinfo.min_freq) - target_freq = policy->cpuinfo.min_freq; - if (target_freq > policy->cpuinfo.max_freq) - target_freq = policy->cpuinfo.max_freq; - freqs.old = davinci_getspeed(0); freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 7192a6d..15ed367 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -37,12 +37,6 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy, unsigned int idx; int ret; - /* scale the target frequency to one of the extremes supported */ - if (target_freq < policy->cpuinfo.min_freq) - target_freq = policy->cpuinfo.min_freq; - if (target_freq > policy->cpuinfo.max_freq) - target_freq = policy->cpuinfo.max_freq; - /* Lookup the next frequency */ if (cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &idx)) -- cgit v0.10.2 From eb2f50ff93f08c8001d0d7e1148948ba80989aaf Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:48 +0000 Subject: cpufreq: drivers: Remove unnecessary assignments of policy-> members Some assignments of policy-> min/max/cur/cpuinfo.min_freq/cpuinfo.max_freq aren't required as part of it is done by cpufreq driver or cpufreq core. Remove them. At some places we merge multiple lines together too. Signed-off-by: Viresh Kumar Acked-by: Sekhar Nori Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index 7c7721d..a3585ea 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt @@ -108,8 +108,9 @@ policy->governor must contain the "default policy" for cpufreq_driver.target is called with these values. -For setting some of these values, the frequency table helpers might be -helpful. See the section 2 for more information on them. +For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the +frequency table helpers might be helpful. See the section 2 for more information +on them. SMP systems normally have same clock source for a group of cpus. For these the .init() would be called only once for the first online cpu. Here the .init() diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 8fb0c2a..7c2e943 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c @@ -137,21 +137,16 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) return result; } - policy->cur = policy->min = policy->max = davinci_getspeed(0); - - if (freq_table) { - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, - policy->cpu); - } else { - policy->cpuinfo.min_freq = policy->min; - policy->cpuinfo.max_freq = policy->max; + policy->cur = davinci_getspeed(0); + + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (result) { + pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", + __func__); + return result; } - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - policy->cur = davinci_getspeed(0); + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); /* * Time measurement across the target() function yields ~1500-1800us diff --git a/arch/arm/mach-imx/cpufreq.c b/arch/arm/mach-imx/cpufreq.c index cfce5e3..387dc4c 100644 --- a/arch/arm/mach-imx/cpufreq.c +++ b/arch/arm/mach-imx/cpufreq.c @@ -144,14 +144,11 @@ static int mxc_cpufreq_init(struct cpufreq_policy *policy) imx_freq_table[i].frequency = CPUFREQ_TABLE_END; policy->cur = clk_get_rate(cpu_clk) / 1000; - policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min; - policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max; /* Manual states, that PLL stabilizes in two CLK32 periods */ policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ; ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table); - if (ret < 0) { printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n", __func__, ret); diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index 0fdf64b..88c8fee 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -116,7 +116,7 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return PTR_ERR(cpuclk); } - policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu); + policy->cur = sh_cpufreq_get(cpu); freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; if (freq_table) { @@ -129,15 +129,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) dev_notice(dev, "no frequency table found, falling back " "to rate rounding.\n"); - policy->cpuinfo.min_freq = + policy->min = policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = + policy->max = policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; } - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 224a478..af1542d 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -359,12 +359,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) min_fsb = NFORCE2_MIN_FSB; /* cpuinfo and default policy values */ - policy->cpuinfo.min_freq = min_fsb * fid * 100; - policy->cpuinfo.max_freq = max_fsb * fid * 100; + policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; + policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->cur = nforce2_get(policy->cpu); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; return 0; } diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index b610edd..ad7549c 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -177,7 +177,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) goto fail_ck; } - policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); + policy->cur = omap_getspeed(policy->cpu); if (!freq_table) result = opp_init_cpufreq_table(mpu_dev, &freq_table); @@ -196,8 +196,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; policy->cur = omap_getspeed(policy->cpu); /* -- cgit v0.10.2 From 8a67f0ef2b684b34162d39e8f436fe39e9635a19 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Apr 2013 12:57:49 +0000 Subject: cpufreq: ARM big LITTLE: Add generic cpufreq driver and its DT glue big LITTLE is ARM's new Architecture focussing power/performance needs of modern world. More information about big LITTLE can be found here: http://www.arm.com/products/processors/technologies/biglittleprocessing.php http://lwn.net/Articles/481055/ In order to keep cpufreq support for all big LITTLE platforms simple/generic, this patch tries to add a generic cpufreq driver layer for all big LITTLE platforms. The driver is divided into two parts: - Core driver: Generic and shared across all big LITTLE SoC's - Glue drivers: Per platform drivers providing ops to the core driver This patch adds in a generic glue driver which would extract information from Device Tree. Future SoC's can either reuse the DT glue or write their own depending on the need. Signed-off-by: Sudeep KarkadaNagesha Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt new file mode 100644 index 0000000..0715695 --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt @@ -0,0 +1,65 @@ +Generic ARM big LITTLE cpufreq driver's DT glue +----------------------------------------------- + +This is DT specific glue layer for generic cpufreq driver for big LITTLE +systems. + +Both required and optional properties listed below must be defined +under node /cpus/cpu@x. Where x is the first cpu inside a cluster. + +FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster +must be present contiguously. Generic DT driver will check only node 'x' for +cpu:x. + +Required properties: +- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt + for details + +Optional properties: +- clock-latency: Specify the possible maximum transition latency for clock, + in unit of nanoseconds. + +Examples: + +cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + compatible = "arm,cortex-a15"; + reg = <0>; + next-level-cache = <&L2>; + operating-points = < + /* kHz uV */ + 792000 1100000 + 396000 950000 + 198000 850000 + >; + clock-latency = <61036>; /* two CLK32 periods */ + }; + + cpu@1 { + compatible = "arm,cortex-a15"; + reg = <1>; + next-level-cache = <&L2>; + }; + + cpu@100 { + compatible = "arm,cortex-a7"; + reg = <100>; + next-level-cache = <&L2>; + operating-points = < + /* kHz uV */ + 792000 950000 + 396000 750000 + 198000 450000 + >; + clock-latency = <61036>; /* two CLK32 periods */ + }; + + cpu@101 { + compatible = "arm,cortex-a7"; + reg = <101>; + next-level-cache = <&L2>; + }; +}; diff --git a/MAINTAINERS b/MAINTAINERS index 74e58a4..be73983 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2206,6 +2206,17 @@ S: Maintained F: drivers/cpufreq/ F: include/linux/cpufreq.h +CPU FREQUENCY DRIVERS - ARM BIG LITTLE +M: Viresh Kumar +M: Sudeep KarkadaNagesha +L: cpufreq@vger.kernel.org +L: linux-pm@vger.kernel.org +W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php +S: Maintained +F: drivers/cpufreq/arm_big_little.h +F: drivers/cpufreq/arm_big_little.c +F: drivers/cpufreq/arm_big_little_dt.c + CPUID/MSR DRIVER M: "H. Peter Anvin" S: Maintained diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 030ddf6..87b7e48 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -2,6 +2,18 @@ # ARM CPU Frequency scaling drivers # +config ARM_BIG_LITTLE_CPUFREQ + tristate + depends on ARM_CPU_TOPOLOGY + +config ARM_DT_BL_CPUFREQ + tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" + select ARM_BIG_LITTLE_CPUFREQ + depends on OF && HAVE_CLK + help + This enables the Generic CPUfreq driver for ARM big.LITTLE platform. + This gets frequency tables from DT. + config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" depends on ARCH_OMAP2PLUS diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 863fd18..ba9a3e1 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -44,6 +44,11 @@ obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o ################################################################################## # ARM SoC drivers +obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o +# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big +# LITTLE drivers, so that it is probed last. +obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o + obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c new file mode 100644 index 0000000..1d29d1a --- /dev/null +++ b/drivers/cpufreq/arm_big_little.c @@ -0,0 +1,282 @@ +/* + * ARM big.LITTLE Platforms CPUFreq support + * + * Copyright (C) 2013 ARM Ltd. + * Sudeep KarkadaNagesha + * + * Copyright (C) 2013 Linaro. + * Viresh Kumar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arm_big_little.h" + +/* Currently we support only two clusters */ +#define MAX_CLUSTERS 2 + +static struct cpufreq_arm_bL_ops *arm_bL_ops; +static struct clk *clk[MAX_CLUSTERS]; +static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; +static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; + +static int cpu_to_cluster(int cpu) +{ + return topology_physical_package_id(cpu); +} + +static unsigned int bL_cpufreq_get(unsigned int cpu) +{ + u32 cur_cluster = cpu_to_cluster(cpu); + + return clk_get_rate(clk[cur_cluster]) / 1000; +} + +/* Validate policy frequency range */ +static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy) +{ + u32 cur_cluster = cpu_to_cluster(policy->cpu); + + return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]); +} + +/* Set clock frequency */ +static int bL_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + struct cpufreq_freqs freqs; + u32 cpu = policy->cpu, freq_tab_idx, cur_cluster; + int ret = 0; + + cur_cluster = cpu_to_cluster(policy->cpu); + + freqs.old = bL_cpufreq_get(policy->cpu); + + /* Determine valid target frequency using freq_table */ + cpufreq_frequency_table_target(policy, freq_table[cur_cluster], + target_freq, relation, &freq_tab_idx); + freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency; + + freqs.cpu = policy->cpu; + + pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", + __func__, cpu, cur_cluster, freqs.old, target_freq, + freqs.new); + + if (freqs.old == freqs.new) + return 0; + + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); + if (ret) { + pr_err("clk_set_rate failed: %d\n", ret); + return ret; + } + + policy->cur = freqs.new; + + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return ret; +} + +static void put_cluster_clk_and_freq_table(struct device *cpu_dev) +{ + u32 cluster = cpu_to_cluster(cpu_dev->id); + + if (!atomic_dec_return(&cluster_usage[cluster])) { + clk_put(clk[cluster]); + opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); + } +} + +static int get_cluster_clk_and_freq_table(struct device *cpu_dev) +{ + u32 cluster = cpu_to_cluster(cpu_dev->id); + char name[14] = "cpu-cluster."; + int ret; + + if (atomic_inc_return(&cluster_usage[cluster]) != 1) + return 0; + + ret = arm_bL_ops->init_opp_table(cpu_dev); + if (ret) { + dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", + __func__, cpu_dev->id, ret); + goto atomic_dec; + } + + ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); + if (ret) { + dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", + __func__, cpu_dev->id, ret); + goto atomic_dec; + } + + name[12] = cluster + '0'; + clk[cluster] = clk_get_sys(name, NULL); + if (!IS_ERR(clk[cluster])) { + dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", + __func__, clk[cluster], freq_table[cluster], + cluster); + return 0; + } + + dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", + __func__, cpu_dev->id, cluster); + ret = PTR_ERR(clk[cluster]); + opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + +atomic_dec: + atomic_dec(&cluster_usage[cluster]); + dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, + cluster); + return ret; +} + +/* Per-CPU initialization */ +static int bL_cpufreq_init(struct cpufreq_policy *policy) +{ + u32 cur_cluster = cpu_to_cluster(policy->cpu); + struct device *cpu_dev; + int ret; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + policy->cpu); + return -ENODEV; + } + + ret = get_cluster_clk_and_freq_table(cpu_dev); + if (ret) + return ret; + + ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]); + if (ret) { + dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", + policy->cpu, cur_cluster); + put_cluster_clk_and_freq_table(cpu_dev); + return ret; + } + + cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu); + + if (arm_bL_ops->get_transition_latency) + policy->cpuinfo.transition_latency = + arm_bL_ops->get_transition_latency(cpu_dev); + else + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + policy->cur = bL_cpufreq_get(policy->cpu); + + cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + + dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); + return 0; +} + +static int bL_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct device *cpu_dev; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + policy->cpu); + return -ENODEV; + } + + put_cluster_clk_and_freq_table(cpu_dev); + dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); + + return 0; +} + +/* Export freq_table to sysfs */ +static struct freq_attr *bL_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver bL_cpufreq_driver = { + .name = "arm-big-little", + .flags = CPUFREQ_STICKY, + .verify = bL_cpufreq_verify_policy, + .target = bL_cpufreq_set_target, + .get = bL_cpufreq_get, + .init = bL_cpufreq_init, + .exit = bL_cpufreq_exit, + .have_multiple_policies = true, + .attr = bL_cpufreq_attr, +}; + +int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) +{ + int ret; + + if (arm_bL_ops) { + pr_debug("%s: Already registered: %s, exiting\n", __func__, + arm_bL_ops->name); + return -EBUSY; + } + + if (!ops || !strlen(ops->name) || !ops->init_opp_table) { + pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); + return -ENODEV; + } + + arm_bL_ops = ops; + + ret = cpufreq_register_driver(&bL_cpufreq_driver); + if (ret) { + pr_info("%s: Failed registering platform driver: %s, err: %d\n", + __func__, ops->name, ret); + arm_bL_ops = NULL; + } else { + pr_info("%s: Registered platform driver: %s\n", __func__, + ops->name); + } + + return ret; +} +EXPORT_SYMBOL_GPL(bL_cpufreq_register); + +void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) +{ + if (arm_bL_ops != ops) { + pr_err("%s: Registered with: %s, can't unregister, exiting\n", + __func__, arm_bL_ops->name); + return; + } + + cpufreq_unregister_driver(&bL_cpufreq_driver); + pr_info("%s: Un-registered platform driver: %s\n", __func__, + arm_bL_ops->name); + arm_bL_ops = NULL; +} +EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h new file mode 100644 index 0000000..70f18fc --- /dev/null +++ b/drivers/cpufreq/arm_big_little.h @@ -0,0 +1,40 @@ +/* + * ARM big.LITTLE platform's CPUFreq header file + * + * Copyright (C) 2013 ARM Ltd. + * Sudeep KarkadaNagesha + * + * Copyright (C) 2013 Linaro. + * Viresh Kumar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef CPUFREQ_ARM_BIG_LITTLE_H +#define CPUFREQ_ARM_BIG_LITTLE_H + +#include +#include +#include + +struct cpufreq_arm_bL_ops { + char name[CPUFREQ_NAME_LEN]; + int (*get_transition_latency)(struct device *cpu_dev); + + /* + * This must set opp table for cpu_dev in a similar way as done by + * of_init_opp_table(). + */ + int (*init_opp_table)(struct device *cpu_dev); +}; + +int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); +void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); + +#endif /* CPUFREQ_ARM_BIG_LITTLE_H */ diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c new file mode 100644 index 0000000..452ff46 --- /dev/null +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -0,0 +1,92 @@ +/* + * Generic big.LITTLE CPUFreq Interface driver + * + * It provides necessary ops to arm_big_little cpufreq driver and gets + * Frequency information from Device Tree. Freq table in DT must be in KHz. + * + * Copyright (C) 2013 Linaro. + * Viresh Kumar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include "arm_big_little.h" + +static int dt_init_opp_table(struct device *cpu_dev) +{ + struct device_node *np = NULL; + int count = 0, ret; + + for_each_child_of_node(of_find_node_by_path("/cpus"), np) { + if (count++ != cpu_dev->id) + continue; + if (!of_get_property(np, "operating-points", NULL)) + return -ENODATA; + + cpu_dev->of_node = np; + + ret = of_init_opp_table(cpu_dev); + if (ret) + return ret; + + return 0; + } + + return -ENODEV; +} + +static int dt_get_transition_latency(struct device *cpu_dev) +{ + struct device_node *np = NULL; + u32 transition_latency = CPUFREQ_ETERNAL; + int count = 0; + + for_each_child_of_node(of_find_node_by_path("/cpus"), np) { + if (count++ != cpu_dev->id) + continue; + + of_property_read_u32(np, "clock-latency", &transition_latency); + return 0; + } + + return -ENODEV; +} + +static struct cpufreq_arm_bL_ops dt_bL_ops = { + .name = "dt-bl", + .get_transition_latency = dt_get_transition_latency, + .init_opp_table = dt_init_opp_table, +}; + +static int generic_bL_init(void) +{ + return bL_cpufreq_register(&dt_bL_ops); +} +module_init(generic_bL_init); + +static void generic_bL_exit(void) +{ + return bL_cpufreq_unregister(&dt_bL_ops); +} +module_exit(generic_bL_exit); + +MODULE_AUTHOR("Viresh Kumar "); +MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From a0ea048a6f72fbe9083b52ba26cd642e23ca026c Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:09 +0000 Subject: cpufreq: ARM: Arrange drivers in alphabetical order Normally we keep drivers in alphabetical inside Kconfig and Makefile and over time this was broken for ARM cpufreq drivers. Fix it. Signed-off-by: Viresh Kumar Acked-by: Stephen Warren Tested-by: Stephen Warren Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 87b7e48..25d866a 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -14,6 +14,64 @@ config ARM_DT_BL_CPUFREQ This enables the Generic CPUfreq driver for ARM big.LITTLE platform. This gets frequency tables from DT. +config ARM_EXYNOS_CPUFREQ + bool "SAMSUNG EXYNOS SoCs" + depends on ARCH_EXYNOS + default y + help + This adds the CPUFreq driver common part for Samsung + EXYNOS SoCs. + + If in doubt, say N. + +config ARM_EXYNOS4210_CPUFREQ + def_bool CPU_EXYNOS4210 + help + This adds the CPUFreq driver for Samsung EXYNOS4210 + SoC (S5PV310 or S5PC210). + +config ARM_EXYNOS4X12_CPUFREQ + def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412) + help + This adds the CPUFreq driver for Samsung EXYNOS4X12 + SoC (EXYNOS4212 or EXYNOS4412). + +config ARM_EXYNOS5250_CPUFREQ + def_bool SOC_EXYNOS5250 + help + This adds the CPUFreq driver for Samsung EXYNOS5250 + SoC. + +config ARM_HIGHBANK_CPUFREQ + tristate "Calxeda Highbank-based" + depends on ARCH_HIGHBANK + select CPU_FREQ_TABLE + select GENERIC_CPUFREQ_CPU0 + select PM_OPP + select REGULATOR + + default m + help + This adds the CPUFreq driver for Calxeda Highbank SoC + based boards. + + If in doubt, say N. + +config ARM_IMX6Q_CPUFREQ + tristate "Freescale i.MX6Q cpufreq support" + depends on SOC_IMX6Q + depends on REGULATOR_ANATOP + help + This adds cpufreq driver support for Freescale i.MX6Q SOC. + + If in doubt, say N. + +config ARM_KIRKWOOD_CPUFREQ + def_bool ARCH_KIRKWOOD && OF + help + This adds the CPUFreq driver for Marvell Kirkwood + SoCs. + config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" depends on ARCH_OMAP2PLUS @@ -61,67 +119,9 @@ config ARM_S5PV210_CPUFREQ If in doubt, say N. -config ARM_EXYNOS_CPUFREQ - bool "SAMSUNG EXYNOS SoCs" - depends on ARCH_EXYNOS - default y - help - This adds the CPUFreq driver common part for Samsung - EXYNOS SoCs. - - If in doubt, say N. - -config ARM_EXYNOS4210_CPUFREQ - def_bool CPU_EXYNOS4210 - help - This adds the CPUFreq driver for Samsung EXYNOS4210 - SoC (S5PV310 or S5PC210). - -config ARM_EXYNOS4X12_CPUFREQ - def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412) - help - This adds the CPUFreq driver for Samsung EXYNOS4X12 - SoC (EXYNOS4212 or EXYNOS4412). - -config ARM_EXYNOS5250_CPUFREQ - def_bool SOC_EXYNOS5250 - help - This adds the CPUFreq driver for Samsung EXYNOS5250 - SoC. - -config ARM_KIRKWOOD_CPUFREQ - def_bool ARCH_KIRKWOOD && OF - help - This adds the CPUFreq driver for Marvell Kirkwood - SoCs. - -config ARM_IMX6Q_CPUFREQ - tristate "Freescale i.MX6Q cpufreq support" - depends on SOC_IMX6Q - depends on REGULATOR_ANATOP - help - This adds cpufreq driver support for Freescale i.MX6Q SOC. - - If in doubt, say N. - config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" depends on PLAT_SPEAR default y help This adds the CPUFreq driver support for SPEAr SOCs. - -config ARM_HIGHBANK_CPUFREQ - tristate "Calxeda Highbank-based" - depends on ARCH_HIGHBANK - select CPU_FREQ_TABLE - select GENERIC_CPUFREQ_CPU0 - select PM_OPP - select REGULATOR - - default m - help - This adds the CPUFreq driver for Calxeda Highbank SoC - based boards. - - If in doubt, say N. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ba9a3e1..2c25504 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -50,18 +50,18 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o -obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o -obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o -obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o +obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o +obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o +obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o +obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o +obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o -obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o -obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o ################################################################################## # PowerPC platform drivers -- cgit v0.10.2 From ceff98e3338cc53e31491aa8db395dd66327b210 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:10 +0000 Subject: cpufreq: tegra: Move driver to drivers/cpufreq This patch moves cpufreq driver of ARM based tegra platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Stephen Warren Tested-by: Stephen Warren Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index f6b46ae..09b578f 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -24,7 +24,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpuidle-tegra30.o endif obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o -obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o obj-$(CONFIG_TEGRA_PCI) += pcie.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-dt-tegra20.o diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c deleted file mode 100644 index 11ca730..0000000 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ /dev/null @@ -1,294 +0,0 @@ -/* - * arch/arm/mach-tegra/cpu-tegra.c - * - * Copyright (C) 2010 Google, Inc. - * - * Author: - * Colin Cross - * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Frequency table index must be sequential starting at 0 */ -static struct cpufreq_frequency_table freq_table[] = { - { 0, 216000 }, - { 1, 312000 }, - { 2, 456000 }, - { 3, 608000 }, - { 4, 760000 }, - { 5, 816000 }, - { 6, 912000 }, - { 7, 1000000 }, - { 8, CPUFREQ_TABLE_END }, -}; - -#define NUM_CPUS 2 - -static struct clk *cpu_clk; -static struct clk *pll_x_clk; -static struct clk *pll_p_clk; -static struct clk *emc_clk; - -static unsigned long target_cpu_speed[NUM_CPUS]; -static DEFINE_MUTEX(tegra_cpu_lock); -static bool is_suspended; - -static int tegra_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int tegra_getspeed(unsigned int cpu) -{ - unsigned long rate; - - if (cpu >= NUM_CPUS) - return 0; - - rate = clk_get_rate(cpu_clk) / 1000; - return rate; -} - -static int tegra_cpu_clk_set_rate(unsigned long rate) -{ - int ret; - - /* - * Take an extra reference to the main pll so it doesn't turn - * off when we move the cpu off of it - */ - clk_prepare_enable(pll_x_clk); - - ret = clk_set_parent(cpu_clk, pll_p_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_p\n"); - goto out; - } - - if (rate == clk_get_rate(pll_p_clk)) - goto out; - - ret = clk_set_rate(pll_x_clk, rate); - if (ret) { - pr_err("Failed to change pll_x to %lu\n", rate); - goto out; - } - - ret = clk_set_parent(cpu_clk, pll_x_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_x\n"); - goto out; - } - -out: - clk_disable_unprepare(pll_x_clk); - return ret; -} - -static int tegra_update_cpu_speed(struct cpufreq_policy *policy, - unsigned long rate) -{ - int ret = 0; - struct cpufreq_freqs freqs; - - freqs.old = tegra_getspeed(0); - freqs.new = rate; - - if (freqs.old == freqs.new) - return ret; - - /* - * Vote on memory bus frequency based on cpu frequency - * This sets the minimum frequency, display or avp may request higher - */ - if (rate >= 816000) - clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ - else if (rate >= 456000) - clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ - else - clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - -#ifdef CONFIG_CPU_FREQ_DEBUG - printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", - freqs.old, freqs.new); -#endif - - ret = tegra_cpu_clk_set_rate(freqs.new * 1000); - if (ret) { - pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", - freqs.new); - return ret; - } - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static unsigned long tegra_cpu_highest_speed(void) -{ - unsigned long rate = 0; - int i; - - for_each_online_cpu(i) - rate = max(rate, target_cpu_speed[i]); - return rate; -} - -static int tegra_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int idx; - unsigned int freq; - int ret = 0; - - mutex_lock(&tegra_cpu_lock); - - if (is_suspended) { - ret = -EBUSY; - goto out; - } - - cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &idx); - - freq = freq_table[idx].frequency; - - target_cpu_speed[policy->cpu] = freq; - - ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed()); - -out: - mutex_unlock(&tegra_cpu_lock); - return ret; -} - -static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, - void *dummy) -{ - mutex_lock(&tegra_cpu_lock); - if (event == PM_SUSPEND_PREPARE) { - struct cpufreq_policy *policy = cpufreq_cpu_get(0); - is_suspended = true; - pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", - freq_table[0].frequency); - tegra_update_cpu_speed(policy, freq_table[0].frequency); - cpufreq_cpu_put(policy); - } else if (event == PM_POST_SUSPEND) { - is_suspended = false; - } - mutex_unlock(&tegra_cpu_lock); - - return NOTIFY_OK; -} - -static struct notifier_block tegra_cpu_pm_notifier = { - .notifier_call = tegra_pm_notify, -}; - -static int tegra_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu >= NUM_CPUS) - return -EINVAL; - - clk_prepare_enable(emc_clk); - clk_prepare_enable(cpu_clk); - - cpufreq_frequency_table_cpuinfo(policy, freq_table); - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - policy->cur = tegra_getspeed(policy->cpu); - target_cpu_speed[policy->cpu] = policy->cur; - - /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; - - cpumask_copy(policy->cpus, cpu_possible_mask); - - if (policy->cpu == 0) - register_pm_notifier(&tegra_cpu_pm_notifier); - - return 0; -} - -static int tegra_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_cpuinfo(policy, freq_table); - clk_disable_unprepare(emc_clk); - return 0; -} - -static struct freq_attr *tegra_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver tegra_cpufreq_driver = { - .verify = tegra_verify_speed, - .target = tegra_target, - .get = tegra_getspeed, - .init = tegra_cpu_init, - .exit = tegra_cpu_exit, - .name = "tegra", - .attr = tegra_cpufreq_attr, -}; - -static int __init tegra_cpufreq_init(void) -{ - cpu_clk = clk_get_sys(NULL, "cpu"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); - - pll_x_clk = clk_get_sys(NULL, "pll_x"); - if (IS_ERR(pll_x_clk)) - return PTR_ERR(pll_x_clk); - - pll_p_clk = clk_get_sys(NULL, "pll_p_cclk"); - if (IS_ERR(pll_p_clk)) - return PTR_ERR(pll_p_clk); - - emc_clk = clk_get_sys("cpu", "emc"); - if (IS_ERR(emc_clk)) { - clk_put(cpu_clk); - return PTR_ERR(emc_clk); - } - - return cpufreq_register_driver(&tegra_cpufreq_driver); -} - -static void __exit tegra_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&tegra_cpufreq_driver); - clk_put(emc_clk); - clk_put(cpu_clk); -} - - -MODULE_AUTHOR("Colin Cross "); -MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); -MODULE_LICENSE("GPL"); -module_init(tegra_cpufreq_init); -module_exit(tegra_cpufreq_exit); diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 2c25504..c34094b 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o +obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o ################################################################################## # PowerPC platform drivers diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c new file mode 100644 index 0000000..c74c0e1 --- /dev/null +++ b/drivers/cpufreq/tegra-cpufreq.c @@ -0,0 +1,292 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Colin Cross + * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Frequency table index must be sequential starting at 0 */ +static struct cpufreq_frequency_table freq_table[] = { + { 0, 216000 }, + { 1, 312000 }, + { 2, 456000 }, + { 3, 608000 }, + { 4, 760000 }, + { 5, 816000 }, + { 6, 912000 }, + { 7, 1000000 }, + { 8, CPUFREQ_TABLE_END }, +}; + +#define NUM_CPUS 2 + +static struct clk *cpu_clk; +static struct clk *pll_x_clk; +static struct clk *pll_p_clk; +static struct clk *emc_clk; + +static unsigned long target_cpu_speed[NUM_CPUS]; +static DEFINE_MUTEX(tegra_cpu_lock); +static bool is_suspended; + +static int tegra_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static unsigned int tegra_getspeed(unsigned int cpu) +{ + unsigned long rate; + + if (cpu >= NUM_CPUS) + return 0; + + rate = clk_get_rate(cpu_clk) / 1000; + return rate; +} + +static int tegra_cpu_clk_set_rate(unsigned long rate) +{ + int ret; + + /* + * Take an extra reference to the main pll so it doesn't turn + * off when we move the cpu off of it + */ + clk_prepare_enable(pll_x_clk); + + ret = clk_set_parent(cpu_clk, pll_p_clk); + if (ret) { + pr_err("Failed to switch cpu to clock pll_p\n"); + goto out; + } + + if (rate == clk_get_rate(pll_p_clk)) + goto out; + + ret = clk_set_rate(pll_x_clk, rate); + if (ret) { + pr_err("Failed to change pll_x to %lu\n", rate); + goto out; + } + + ret = clk_set_parent(cpu_clk, pll_x_clk); + if (ret) { + pr_err("Failed to switch cpu to clock pll_x\n"); + goto out; + } + +out: + clk_disable_unprepare(pll_x_clk); + return ret; +} + +static int tegra_update_cpu_speed(struct cpufreq_policy *policy, + unsigned long rate) +{ + int ret = 0; + struct cpufreq_freqs freqs; + + freqs.old = tegra_getspeed(0); + freqs.new = rate; + + if (freqs.old == freqs.new) + return ret; + + /* + * Vote on memory bus frequency based on cpu frequency + * This sets the minimum frequency, display or avp may request higher + */ + if (rate >= 816000) + clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ + else if (rate >= 456000) + clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ + else + clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + +#ifdef CONFIG_CPU_FREQ_DEBUG + printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", + freqs.old, freqs.new); +#endif + + ret = tegra_cpu_clk_set_rate(freqs.new * 1000); + if (ret) { + pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", + freqs.new); + return ret; + } + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static unsigned long tegra_cpu_highest_speed(void) +{ + unsigned long rate = 0; + int i; + + for_each_online_cpu(i) + rate = max(rate, target_cpu_speed[i]); + return rate; +} + +static int tegra_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int idx; + unsigned int freq; + int ret = 0; + + mutex_lock(&tegra_cpu_lock); + + if (is_suspended) { + ret = -EBUSY; + goto out; + } + + cpufreq_frequency_table_target(policy, freq_table, target_freq, + relation, &idx); + + freq = freq_table[idx].frequency; + + target_cpu_speed[policy->cpu] = freq; + + ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed()); + +out: + mutex_unlock(&tegra_cpu_lock); + return ret; +} + +static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, + void *dummy) +{ + mutex_lock(&tegra_cpu_lock); + if (event == PM_SUSPEND_PREPARE) { + struct cpufreq_policy *policy = cpufreq_cpu_get(0); + is_suspended = true; + pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", + freq_table[0].frequency); + tegra_update_cpu_speed(policy, freq_table[0].frequency); + cpufreq_cpu_put(policy); + } else if (event == PM_POST_SUSPEND) { + is_suspended = false; + } + mutex_unlock(&tegra_cpu_lock); + + return NOTIFY_OK; +} + +static struct notifier_block tegra_cpu_pm_notifier = { + .notifier_call = tegra_pm_notify, +}; + +static int tegra_cpu_init(struct cpufreq_policy *policy) +{ + if (policy->cpu >= NUM_CPUS) + return -EINVAL; + + clk_prepare_enable(emc_clk); + clk_prepare_enable(cpu_clk); + + cpufreq_frequency_table_cpuinfo(policy, freq_table); + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + policy->cur = tegra_getspeed(policy->cpu); + target_cpu_speed[policy->cpu] = policy->cur; + + /* FIXME: what's the actual transition time? */ + policy->cpuinfo.transition_latency = 300 * 1000; + + cpumask_copy(policy->cpus, cpu_possible_mask); + + if (policy->cpu == 0) + register_pm_notifier(&tegra_cpu_pm_notifier); + + return 0; +} + +static int tegra_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_cpuinfo(policy, freq_table); + clk_disable_unprepare(emc_clk); + return 0; +} + +static struct freq_attr *tegra_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver tegra_cpufreq_driver = { + .verify = tegra_verify_speed, + .target = tegra_target, + .get = tegra_getspeed, + .init = tegra_cpu_init, + .exit = tegra_cpu_exit, + .name = "tegra", + .attr = tegra_cpufreq_attr, +}; + +static int __init tegra_cpufreq_init(void) +{ + cpu_clk = clk_get_sys(NULL, "cpu"); + if (IS_ERR(cpu_clk)) + return PTR_ERR(cpu_clk); + + pll_x_clk = clk_get_sys(NULL, "pll_x"); + if (IS_ERR(pll_x_clk)) + return PTR_ERR(pll_x_clk); + + pll_p_clk = clk_get_sys(NULL, "pll_p_cclk"); + if (IS_ERR(pll_p_clk)) + return PTR_ERR(pll_p_clk); + + emc_clk = clk_get_sys("cpu", "emc"); + if (IS_ERR(emc_clk)) { + clk_put(cpu_clk); + return PTR_ERR(emc_clk); + } + + return cpufreq_register_driver(&tegra_cpufreq_driver); +} + +static void __exit tegra_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&tegra_cpufreq_driver); + clk_put(emc_clk); + clk_put(cpu_clk); +} + + +MODULE_AUTHOR("Colin Cross "); +MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); +MODULE_LICENSE("GPL"); +module_init(tegra_cpufreq_init); +module_exit(tegra_cpufreq_exit); -- cgit v0.10.2 From 8a7b1227e303d7e927214ee0f260041aef44bb58 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:11 +0000 Subject: cpufreq: davinci: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of ARM based davinci platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Sekhar Nori Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile index fb5c1aa..dd1ffcc 100644 --- a/arch/arm/mach-davinci/Makefile +++ b/arch/arm/mach-davinci/Makefile @@ -37,7 +37,6 @@ obj-$(CONFIG_MACH_MITYOMAPL138) += board-mityomapl138.o obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o # Power Management -obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_SUSPEND) += pm.o sleep.o obj-$(CONFIG_HAVE_CLK) += pm_domain.o diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c deleted file mode 100644 index 7c2e943..0000000 --- a/arch/arm/mach-davinci/cpufreq.c +++ /dev/null @@ -1,233 +0,0 @@ -/* - * CPU frequency scaling for DaVinci - * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ - * - * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows: - * - * Copyright (C) 2005 Nokia Corporation - * Written by Tony Lindgren - * - * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * Updated to support OMAP3 - * Rajendra Nayak - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "clock.h" - -struct davinci_cpufreq { - struct device *dev; - struct clk *armclk; - struct clk *asyncclk; - unsigned long asyncrate; -}; -static struct davinci_cpufreq cpufreq; - -static int davinci_verify_speed(struct cpufreq_policy *policy) -{ - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct cpufreq_frequency_table *freq_table = pdata->freq_table; - struct clk *armclk = cpufreq.armclk; - - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); - - if (policy->cpu) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; - policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - -static unsigned int davinci_getspeed(unsigned int cpu) -{ - if (cpu) - return 0; - - return clk_get_rate(cpufreq.armclk) / 1000; -} - -static int davinci_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - int ret = 0; - unsigned int idx; - struct cpufreq_freqs freqs; - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct clk *armclk = cpufreq.armclk; - - freqs.old = davinci_getspeed(0); - freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; - - if (freqs.old == freqs.new) - return ret; - - dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); - - ret = cpufreq_frequency_table_target(policy, pdata->freq_table, - freqs.new, relation, &idx); - if (ret) - return -EINVAL; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - /* if moving to higher frequency, up the voltage beforehand */ - if (pdata->set_voltage && freqs.new > freqs.old) { - ret = pdata->set_voltage(idx); - if (ret) - goto out; - } - - ret = clk_set_rate(armclk, idx); - if (ret) - goto out; - - if (cpufreq.asyncclk) { - ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); - if (ret) - goto out; - } - - /* if moving to lower freq, lower the voltage after lowering freq */ - if (pdata->set_voltage && freqs.new < freqs.old) - pdata->set_voltage(idx); - -out: - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return ret; -} - -static int davinci_cpu_init(struct cpufreq_policy *policy) -{ - int result = 0; - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct cpufreq_frequency_table *freq_table = pdata->freq_table; - - if (policy->cpu != 0) - return -EINVAL; - - /* Finish platform specific initialization */ - if (pdata->init) { - result = pdata->init(); - if (result) - return result; - } - - policy->cur = davinci_getspeed(0); - - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (result) { - pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", - __func__); - return result; - } - - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - - /* - * Time measurement across the target() function yields ~1500-1800us - * time taken with no drivers on notification list. - * Setting the latency to 2000 us to accommodate addition of drivers - * to pre/post change notification list. - */ - policy->cpuinfo.transition_latency = 2000 * 1000; - return 0; -} - -static int davinci_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *davinci_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver davinci_driver = { - .flags = CPUFREQ_STICKY, - .verify = davinci_verify_speed, - .target = davinci_target, - .get = davinci_getspeed, - .init = davinci_cpu_init, - .exit = davinci_cpu_exit, - .name = "davinci", - .attr = davinci_cpufreq_attr, -}; - -static int __init davinci_cpufreq_probe(struct platform_device *pdev) -{ - struct davinci_cpufreq_config *pdata = pdev->dev.platform_data; - struct clk *asyncclk; - - if (!pdata) - return -EINVAL; - if (!pdata->freq_table) - return -EINVAL; - - cpufreq.dev = &pdev->dev; - - cpufreq.armclk = clk_get(NULL, "arm"); - if (IS_ERR(cpufreq.armclk)) { - dev_err(cpufreq.dev, "Unable to get ARM clock\n"); - return PTR_ERR(cpufreq.armclk); - } - - asyncclk = clk_get(cpufreq.dev, "async"); - if (!IS_ERR(asyncclk)) { - cpufreq.asyncclk = asyncclk; - cpufreq.asyncrate = clk_get_rate(asyncclk); - } - - return cpufreq_register_driver(&davinci_driver); -} - -static int __exit davinci_cpufreq_remove(struct platform_device *pdev) -{ - clk_put(cpufreq.armclk); - - if (cpufreq.asyncclk) - clk_put(cpufreq.asyncclk); - - return cpufreq_unregister_driver(&davinci_driver); -} - -static struct platform_driver davinci_cpufreq_driver = { - .driver = { - .name = "cpufreq-davinci", - .owner = THIS_MODULE, - }, - .remove = __exit_p(davinci_cpufreq_remove), -}; - -int __init davinci_cpufreq_init(void) -{ - return platform_driver_probe(&davinci_cpufreq_driver, - davinci_cpufreq_probe); -} - diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c34094b..c0e8921 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o +obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c new file mode 100644 index 0000000..c33c76c --- /dev/null +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -0,0 +1,231 @@ +/* + * CPU frequency scaling for DaVinci + * + * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * + * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows: + * + * Copyright (C) 2005 Nokia Corporation + * Written by Tony Lindgren + * + * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King + * + * Copyright (C) 2007-2008 Texas Instruments, Inc. + * Updated to support OMAP3 + * Rajendra Nayak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct davinci_cpufreq { + struct device *dev; + struct clk *armclk; + struct clk *asyncclk; + unsigned long asyncrate; +}; +static struct davinci_cpufreq cpufreq; + +static int davinci_verify_speed(struct cpufreq_policy *policy) +{ + struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; + struct cpufreq_frequency_table *freq_table = pdata->freq_table; + struct clk *armclk = cpufreq.armclk; + + if (freq_table) + return cpufreq_frequency_table_verify(policy, freq_table); + + if (policy->cpu) + return -EINVAL; + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; + policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static unsigned int davinci_getspeed(unsigned int cpu) +{ + if (cpu) + return 0; + + return clk_get_rate(cpufreq.armclk) / 1000; +} + +static int davinci_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + int ret = 0; + unsigned int idx; + struct cpufreq_freqs freqs; + struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; + struct clk *armclk = cpufreq.armclk; + + freqs.old = davinci_getspeed(0); + freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; + + if (freqs.old == freqs.new) + return ret; + + dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); + + ret = cpufreq_frequency_table_target(policy, pdata->freq_table, + freqs.new, relation, &idx); + if (ret) + return -EINVAL; + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + /* if moving to higher frequency, up the voltage beforehand */ + if (pdata->set_voltage && freqs.new > freqs.old) { + ret = pdata->set_voltage(idx); + if (ret) + goto out; + } + + ret = clk_set_rate(armclk, idx); + if (ret) + goto out; + + if (cpufreq.asyncclk) { + ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); + if (ret) + goto out; + } + + /* if moving to lower freq, lower the voltage after lowering freq */ + if (pdata->set_voltage && freqs.new < freqs.old) + pdata->set_voltage(idx); + +out: + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return ret; +} + +static int davinci_cpu_init(struct cpufreq_policy *policy) +{ + int result = 0; + struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; + struct cpufreq_frequency_table *freq_table = pdata->freq_table; + + if (policy->cpu != 0) + return -EINVAL; + + /* Finish platform specific initialization */ + if (pdata->init) { + result = pdata->init(); + if (result) + return result; + } + + policy->cur = davinci_getspeed(0); + + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (result) { + pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", + __func__); + return result; + } + + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + + /* + * Time measurement across the target() function yields ~1500-1800us + * time taken with no drivers on notification list. + * Setting the latency to 2000 us to accommodate addition of drivers + * to pre/post change notification list. + */ + policy->cpuinfo.transition_latency = 2000 * 1000; + return 0; +} + +static int davinci_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static struct freq_attr *davinci_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver davinci_driver = { + .flags = CPUFREQ_STICKY, + .verify = davinci_verify_speed, + .target = davinci_target, + .get = davinci_getspeed, + .init = davinci_cpu_init, + .exit = davinci_cpu_exit, + .name = "davinci", + .attr = davinci_cpufreq_attr, +}; + +static int __init davinci_cpufreq_probe(struct platform_device *pdev) +{ + struct davinci_cpufreq_config *pdata = pdev->dev.platform_data; + struct clk *asyncclk; + + if (!pdata) + return -EINVAL; + if (!pdata->freq_table) + return -EINVAL; + + cpufreq.dev = &pdev->dev; + + cpufreq.armclk = clk_get(NULL, "arm"); + if (IS_ERR(cpufreq.armclk)) { + dev_err(cpufreq.dev, "Unable to get ARM clock\n"); + return PTR_ERR(cpufreq.armclk); + } + + asyncclk = clk_get(cpufreq.dev, "async"); + if (!IS_ERR(asyncclk)) { + cpufreq.asyncclk = asyncclk; + cpufreq.asyncrate = clk_get_rate(asyncclk); + } + + return cpufreq_register_driver(&davinci_driver); +} + +static int __exit davinci_cpufreq_remove(struct platform_device *pdev) +{ + clk_put(cpufreq.armclk); + + if (cpufreq.asyncclk) + clk_put(cpufreq.asyncclk); + + return cpufreq_unregister_driver(&davinci_driver); +} + +static struct platform_driver davinci_cpufreq_driver = { + .driver = { + .name = "cpufreq-davinci", + .owner = THIS_MODULE, + }, + .remove = __exit_p(davinci_cpufreq_remove), +}; + +int __init davinci_cpufreq_init(void) +{ + return platform_driver_probe(&davinci_cpufreq_driver, + davinci_cpufreq_probe); +} + -- cgit v0.10.2 From adde904b445eedf7ad6a8451a416c8080472c4c7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:12 +0000 Subject: cpufreq: pxa3xx: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of ARM based pxa3xx platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Eric Miao Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 12c5005..929e700 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_PM) += pm.o sleep.o standby.o ifeq ($(CONFIG_CPU_FREQ),y) obj-$(CONFIG_PXA25x) += cpufreq-pxa2xx.o obj-$(CONFIG_PXA27x) += cpufreq-pxa2xx.o -obj-$(CONFIG_PXA3xx) += cpufreq-pxa3xx.o endif # Generic drivers that other drivers may depend upon diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c deleted file mode 100644 index 8c45b2b..0000000 --- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c +++ /dev/null @@ -1,257 +0,0 @@ -/* - * linux/arch/arm/mach-pxa/cpufreq-pxa3xx.c - * - * Copyright (C) 2008 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "generic.h" - -#define HSS_104M (0) -#define HSS_156M (1) -#define HSS_208M (2) -#define HSS_312M (3) - -#define SMCFS_78M (0) -#define SMCFS_104M (2) -#define SMCFS_208M (5) - -#define SFLFS_104M (0) -#define SFLFS_156M (1) -#define SFLFS_208M (2) -#define SFLFS_312M (3) - -#define XSPCLK_156M (0) -#define XSPCLK_NONE (3) - -#define DMCFS_26M (0) -#define DMCFS_260M (3) - -struct pxa3xx_freq_info { - unsigned int cpufreq_mhz; - unsigned int core_xl : 5; - unsigned int core_xn : 3; - unsigned int hss : 2; - unsigned int dmcfs : 2; - unsigned int smcfs : 3; - unsigned int sflfs : 2; - unsigned int df_clkdiv : 3; - - int vcc_core; /* in mV */ - int vcc_sram; /* in mV */ -}; - -#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \ -{ \ - .cpufreq_mhz = cpufreq, \ - .core_xl = _xl, \ - .core_xn = _xn, \ - .hss = HSS_##_hss##M, \ - .dmcfs = DMCFS_##_dmc##M, \ - .smcfs = SMCFS_##_smc##M, \ - .sflfs = SFLFS_##_sfl##M, \ - .df_clkdiv = _dfi, \ - .vcc_core = vcore, \ - .vcc_sram = vsram, \ -} - -static struct pxa3xx_freq_info pxa300_freqs[] = { - /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ - OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ - OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ - OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ - OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ -}; - -static struct pxa3xx_freq_info pxa320_freqs[] = { - /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ - OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ - OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ - OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ - OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ - OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */ -}; - -static unsigned int pxa3xx_freqs_num; -static struct pxa3xx_freq_info *pxa3xx_freqs; -static struct cpufreq_frequency_table *pxa3xx_freqs_table; - -static int setup_freqs_table(struct cpufreq_policy *policy, - struct pxa3xx_freq_info *freqs, int num) -{ - struct cpufreq_frequency_table *table; - int i; - - table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL); - if (table == NULL) - return -ENOMEM; - - for (i = 0; i < num; i++) { - table[i].index = i; - table[i].frequency = freqs[i].cpufreq_mhz * 1000; - } - table[num].index = i; - table[num].frequency = CPUFREQ_TABLE_END; - - pxa3xx_freqs = freqs; - pxa3xx_freqs_num = num; - pxa3xx_freqs_table = table; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static void __update_core_freq(struct pxa3xx_freq_info *info) -{ - uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK; - uint32_t accr = ACCR; - uint32_t xclkcfg; - - accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK); - accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl); - - /* No clock until core PLL is re-locked */ - accr |= ACCR_XSPCLK(XSPCLK_NONE); - - xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */ - - ACCR = accr; - __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg)); - - while ((ACSR & mask) != (accr & mask)) - cpu_relax(); -} - -static void __update_bus_freq(struct pxa3xx_freq_info *info) -{ - uint32_t mask; - uint32_t accr = ACCR; - - mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK | - ACCR_DMCFS_MASK; - - accr &= ~mask; - accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) | - ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs); - - ACCR = accr; - - while ((ACSR & mask) != (accr & mask)) - cpu_relax(); -} - -static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); -} - -static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) -{ - return pxa3xx_get_clk_frequency_khz(0); -} - -static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct pxa3xx_freq_info *next; - struct cpufreq_freqs freqs; - unsigned long flags; - int idx; - - if (policy->cpu != 0) - return -EINVAL; - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, - target_freq, relation, &idx)) - return -EINVAL; - - next = &pxa3xx_freqs[idx]; - - freqs.old = policy->cur; - freqs.new = next->cpufreq_mhz * 1000; - - pr_debug("CPU frequency from %d MHz to %d MHz%s\n", - freqs.old / 1000, freqs.new / 1000, - (freqs.old == freqs.new) ? " (skipped)" : ""); - - if (freqs.old == target_freq) - return 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - local_irq_save(flags); - __update_core_freq(next); - __update_bus_freq(next); - local_irq_restore(flags); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) -{ - int ret = -EINVAL; - - /* set default policy and cpuinfo */ - policy->cpuinfo.min_freq = 104000; - policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; - policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->max = pxa3xx_get_clk_frequency_khz(0); - policy->cur = policy->min = policy->max; - - if (cpu_is_pxa300() || cpu_is_pxa310()) - ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); - - if (cpu_is_pxa320()) - ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); - - if (ret) { - pr_err("failed to setup frequency table\n"); - return ret; - } - - pr_info("CPUFREQ support for PXA3xx initialized\n"); - return 0; -} - -static struct cpufreq_driver pxa3xx_cpufreq_driver = { - .verify = pxa3xx_cpufreq_verify, - .target = pxa3xx_cpufreq_set, - .init = pxa3xx_cpufreq_init, - .get = pxa3xx_cpufreq_get, - .name = "pxa3xx-cpufreq", -}; - -static int __init cpufreq_init(void) -{ - if (cpu_is_pxa3xx()) - return cpufreq_register_driver(&pxa3xx_cpufreq_driver); - - return 0; -} -module_init(cpufreq_init); - -static void __exit cpufreq_exit(void) -{ - cpufreq_unregister_driver(&pxa3xx_cpufreq_driver); -} -module_exit(cpufreq_exit); - -MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx"); -MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-pxa/include/mach/generic.h b/arch/arm/mach-pxa/include/mach/generic.h new file mode 100644 index 0000000..665542e --- /dev/null +++ b/arch/arm/mach-pxa/include/mach/generic.h @@ -0,0 +1 @@ +#include "../../generic.h" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c0e8921..3458bf2 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o +obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c new file mode 100644 index 0000000..15d60f8 --- /dev/null +++ b/drivers/cpufreq/pxa3xx-cpufreq.c @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2008 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define HSS_104M (0) +#define HSS_156M (1) +#define HSS_208M (2) +#define HSS_312M (3) + +#define SMCFS_78M (0) +#define SMCFS_104M (2) +#define SMCFS_208M (5) + +#define SFLFS_104M (0) +#define SFLFS_156M (1) +#define SFLFS_208M (2) +#define SFLFS_312M (3) + +#define XSPCLK_156M (0) +#define XSPCLK_NONE (3) + +#define DMCFS_26M (0) +#define DMCFS_260M (3) + +struct pxa3xx_freq_info { + unsigned int cpufreq_mhz; + unsigned int core_xl : 5; + unsigned int core_xn : 3; + unsigned int hss : 2; + unsigned int dmcfs : 2; + unsigned int smcfs : 3; + unsigned int sflfs : 2; + unsigned int df_clkdiv : 3; + + int vcc_core; /* in mV */ + int vcc_sram; /* in mV */ +}; + +#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \ +{ \ + .cpufreq_mhz = cpufreq, \ + .core_xl = _xl, \ + .core_xn = _xn, \ + .hss = HSS_##_hss##M, \ + .dmcfs = DMCFS_##_dmc##M, \ + .smcfs = SMCFS_##_smc##M, \ + .sflfs = SFLFS_##_sfl##M, \ + .df_clkdiv = _dfi, \ + .vcc_core = vcore, \ + .vcc_sram = vsram, \ +} + +static struct pxa3xx_freq_info pxa300_freqs[] = { + /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ + OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ + OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ + OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ + OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ +}; + +static struct pxa3xx_freq_info pxa320_freqs[] = { + /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ + OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ + OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ + OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ + OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ + OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */ +}; + +static unsigned int pxa3xx_freqs_num; +static struct pxa3xx_freq_info *pxa3xx_freqs; +static struct cpufreq_frequency_table *pxa3xx_freqs_table; + +static int setup_freqs_table(struct cpufreq_policy *policy, + struct pxa3xx_freq_info *freqs, int num) +{ + struct cpufreq_frequency_table *table; + int i; + + table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL); + if (table == NULL) + return -ENOMEM; + + for (i = 0; i < num; i++) { + table[i].index = i; + table[i].frequency = freqs[i].cpufreq_mhz * 1000; + } + table[num].index = i; + table[num].frequency = CPUFREQ_TABLE_END; + + pxa3xx_freqs = freqs; + pxa3xx_freqs_num = num; + pxa3xx_freqs_table = table; + + return cpufreq_frequency_table_cpuinfo(policy, table); +} + +static void __update_core_freq(struct pxa3xx_freq_info *info) +{ + uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK; + uint32_t accr = ACCR; + uint32_t xclkcfg; + + accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK); + accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl); + + /* No clock until core PLL is re-locked */ + accr |= ACCR_XSPCLK(XSPCLK_NONE); + + xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */ + + ACCR = accr; + __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg)); + + while ((ACSR & mask) != (accr & mask)) + cpu_relax(); +} + +static void __update_bus_freq(struct pxa3xx_freq_info *info) +{ + uint32_t mask; + uint32_t accr = ACCR; + + mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK | + ACCR_DMCFS_MASK; + + accr &= ~mask; + accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) | + ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs); + + ACCR = accr; + + while ((ACSR & mask) != (accr & mask)) + cpu_relax(); +} + +static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); +} + +static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) +{ + return pxa3xx_get_clk_frequency_khz(0); +} + +static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct pxa3xx_freq_info *next; + struct cpufreq_freqs freqs; + unsigned long flags; + int idx; + + if (policy->cpu != 0) + return -EINVAL; + + /* Lookup the next frequency */ + if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, + target_freq, relation, &idx)) + return -EINVAL; + + next = &pxa3xx_freqs[idx]; + + freqs.old = policy->cur; + freqs.new = next->cpufreq_mhz * 1000; + + pr_debug("CPU frequency from %d MHz to %d MHz%s\n", + freqs.old / 1000, freqs.new / 1000, + (freqs.old == freqs.new) ? " (skipped)" : ""); + + if (freqs.old == target_freq) + return 0; + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + local_irq_save(flags); + __update_core_freq(next); + __update_bus_freq(next); + local_irq_restore(flags); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret = -EINVAL; + + /* set default policy and cpuinfo */ + policy->cpuinfo.min_freq = 104000; + policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; + policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ + policy->max = pxa3xx_get_clk_frequency_khz(0); + policy->cur = policy->min = policy->max; + + if (cpu_is_pxa300() || cpu_is_pxa310()) + ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); + + if (cpu_is_pxa320()) + ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); + + if (ret) { + pr_err("failed to setup frequency table\n"); + return ret; + } + + pr_info("CPUFREQ support for PXA3xx initialized\n"); + return 0; +} + +static struct cpufreq_driver pxa3xx_cpufreq_driver = { + .verify = pxa3xx_cpufreq_verify, + .target = pxa3xx_cpufreq_set, + .init = pxa3xx_cpufreq_init, + .get = pxa3xx_cpufreq_get, + .name = "pxa3xx-cpufreq", +}; + +static int __init cpufreq_init(void) +{ + if (cpu_is_pxa3xx()) + return cpufreq_register_driver(&pxa3xx_cpufreq_driver); + + return 0; +} +module_init(cpufreq_init); + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&pxa3xx_cpufreq_driver); +} +module_exit(cpufreq_exit); + +MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From cffc96eb4f91ba0f974b352a28b15f84950bd776 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:13 +0000 Subject: cpufreq: pxa2xx: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of ARM based pxa2xx platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Eric Miao Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 13b7394..76eb836 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2177,13 +2177,6 @@ config CPU_FREQ_INTEGRATOR If in doubt, say Y. -config CPU_FREQ_PXA - bool - depends on CPU_FREQ && ARCH_PXA && PXA25x - default y - select CPU_FREQ_DEFAULT_GOV_USERSPACE - select CPU_FREQ_TABLE - config CPU_FREQ_S3C bool help diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 929e700..648867a 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -7,11 +7,6 @@ obj-y += clock.o devices.o generic.o irq.o \ time.o reset.o obj-$(CONFIG_PM) += pm.o sleep.o standby.o -ifeq ($(CONFIG_CPU_FREQ),y) -obj-$(CONFIG_PXA25x) += cpufreq-pxa2xx.o -obj-$(CONFIG_PXA27x) += cpufreq-pxa2xx.o -endif - # Generic drivers that other drivers may depend upon # SoC-specific code diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c deleted file mode 100644 index f1ca4da..0000000 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ /dev/null @@ -1,493 +0,0 @@ -/* - * linux/arch/arm/mach-pxa/cpufreq-pxa2xx.c - * - * Copyright (C) 2002,2003 Intrinsyc Software - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * History: - * 31-Jul-2002 : Initial version [FB] - * 29-Jan-2003 : added PXA255 support [FB] - * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) - * - * Note: - * This driver may change the memory bus clock rate, but will not do any - * platform specific access timing changes... for example if you have flash - * memory connected to CS0, you will need to register a platform specific - * notifier which will adjust the memory access strobes to maintain a - * minimum strobe width. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#ifdef DEBUG -static unsigned int freq_debug; -module_param(freq_debug, uint, 0); -MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); -#else -#define freq_debug 0 -#endif - -static struct regulator *vcc_core; - -static unsigned int pxa27x_maxfreq; -module_param(pxa27x_maxfreq, uint, 0); -MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" - "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); - -typedef struct { - unsigned int khz; - unsigned int membus; - unsigned int cccr; - unsigned int div2; - unsigned int cclkcfg; - int vmin; - int vmax; -} pxa_freqs_t; - -/* Define the refresh period in mSec for the SDRAM and the number of rows */ -#define SDRAM_TREF 64 /* standard 64ms SDRAM */ -static unsigned int sdram_rows; - -#define CCLKCFG_TURBO 0x1 -#define CCLKCFG_FCS 0x2 -#define CCLKCFG_HALFTURBO 0x4 -#define CCLKCFG_FASTBUS 0x8 -#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2) -#define MDREFR_DRI_MASK 0xFFF - -#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3) -#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3) - -/* - * PXA255 definitions - */ -/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ -#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS - -static pxa_freqs_t pxa255_run_freqs[] = -{ - /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ - { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ - {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */ - {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */ - {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */ - {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */ - {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */ -}; - -/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ -static pxa_freqs_t pxa255_turbo_freqs[] = -{ - /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ - { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ - {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */ - {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */ - {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */ - {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */ -}; - -#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs) -#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs) - -static struct cpufreq_frequency_table - pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1]; -static struct cpufreq_frequency_table - pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1]; - -static unsigned int pxa255_turbo_table; -module_param(pxa255_turbo_table, uint, 0); -MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)"); - -/* - * PXA270 definitions - * - * For the PXA27x: - * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG. - * - * A = 0 => memory controller clock from table 3-7, - * A = 1 => memory controller clock = system bus clock - * Run mode frequency = 13 MHz * L - * Turbo mode frequency = 13 MHz * L * N - * System bus frequency = 13 MHz * L / (B + 1) - * - * In CCCR: - * A = 1 - * L = 16 oscillator to run mode ratio - * 2N = 6 2 * (turbo mode to run mode ratio) - * - * In CCLKCFG: - * B = 1 Fast bus mode - * HT = 0 Half-Turbo mode - * T = 1 Turbo mode - * - * For now, just support some of the combinations in table 3-7 of - * PXA27x Processor Family Developer's Manual to simplify frequency - * change sequences. - */ -#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L) -#define CCLKCFG2(B, HT, T) \ - (CCLKCFG_FCS | \ - ((B) ? CCLKCFG_FASTBUS : 0) | \ - ((HT) ? CCLKCFG_HALFTURBO : 0) | \ - ((T) ? CCLKCFG_TURBO : 0)) - -static pxa_freqs_t pxa27x_freqs[] = { - {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, - {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, - {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, - {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 }, - {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 }, - {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 }, - {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 } -}; - -#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs) -static struct cpufreq_frequency_table - pxa27x_freq_table[NUM_PXA27x_FREQS+1]; - -extern unsigned get_clk_frequency_khz(int info); - -#ifdef CONFIG_REGULATOR - -static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) -{ - int ret = 0; - int vmin, vmax; - - if (!cpu_is_pxa27x()) - return 0; - - vmin = pxa_freq->vmin; - vmax = pxa_freq->vmax; - if ((vmin == -1) || (vmax == -1)) - return 0; - - ret = regulator_set_voltage(vcc_core, vmin, vmax); - if (ret) - pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n", - vmin, vmax); - return ret; -} - -static __init void pxa_cpufreq_init_voltages(void) -{ - vcc_core = regulator_get(NULL, "vcc_core"); - if (IS_ERR(vcc_core)) { - pr_info("cpufreq: Didn't find vcc_core regulator\n"); - vcc_core = NULL; - } else { - pr_info("cpufreq: Found vcc_core regulator\n"); - } -} -#else -static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) -{ - return 0; -} - -static __init void pxa_cpufreq_init_voltages(void) { } -#endif - -static void find_freq_tables(struct cpufreq_frequency_table **freq_table, - pxa_freqs_t **pxa_freqs) -{ - if (cpu_is_pxa25x()) { - if (!pxa255_turbo_table) { - *pxa_freqs = pxa255_run_freqs; - *freq_table = pxa255_run_freq_table; - } else { - *pxa_freqs = pxa255_turbo_freqs; - *freq_table = pxa255_turbo_freq_table; - } - } - if (cpu_is_pxa27x()) { - *pxa_freqs = pxa27x_freqs; - *freq_table = pxa27x_freq_table; - } -} - -static void pxa27x_guess_max_freq(void) -{ - if (!pxa27x_maxfreq) { - pxa27x_maxfreq = 416000; - printk(KERN_INFO "PXA CPU 27x max frequency not defined " - "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", - pxa27x_maxfreq); - } else { - pxa27x_maxfreq *= 1000; - } -} - -static void init_sdram_rows(void) -{ - uint32_t mdcnfg = __raw_readl(MDCNFG); - unsigned int drac2 = 0, drac0 = 0; - - if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3)) - drac2 = MDCNFG_DRAC2(mdcnfg); - - if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1)) - drac0 = MDCNFG_DRAC0(mdcnfg); - - sdram_rows = 1 << (11 + max(drac0, drac2)); -} - -static u32 mdrefr_dri(unsigned int freq) -{ - u32 interval = freq * SDRAM_TREF / sdram_rows; - - return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; -} - -/* find a valid frequency point */ -static int pxa_verify_policy(struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freqs; - int ret; - - find_freq_tables(&pxa_freqs_table, &pxa_freqs); - ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); - - if (freq_debug) - pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", - policy->min, policy->max); - - return ret; -} - -static unsigned int pxa_cpufreq_get(unsigned int cpu) -{ - return get_clk_frequency_khz(0); -} - -static int pxa_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freq_settings; - struct cpufreq_freqs freqs; - unsigned int idx; - unsigned long flags; - unsigned int new_freq_cpu, new_freq_mem; - unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; - int ret = 0; - - /* Get the current policy */ - find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa_freqs_table, - target_freq, relation, &idx)) { - return -EINVAL; - } - - new_freq_cpu = pxa_freq_settings[idx].khz; - new_freq_mem = pxa_freq_settings[idx].membus; - freqs.old = policy->cur; - freqs.new = new_freq_cpu; - - if (freq_debug) - pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", - freqs.new / 1000, (pxa_freq_settings[idx].div2) ? - (new_freq_mem / 2000) : (new_freq_mem / 1000)); - - if (vcc_core && freqs.new > freqs.old) - ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); - if (ret) - return ret; - /* - * Tell everyone what we're about to do... - * you should add a notify client with any platform specific - * Vcc changing capability - */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - /* Calculate the next MDREFR. If we're slowing down the SDRAM clock - * we need to preset the smaller DRI before the change. If we're - * speeding up we need to set the larger DRI value after the change. - */ - preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR); - if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) { - preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK); - preset_mdrefr |= mdrefr_dri(new_freq_mem); - } - postset_mdrefr = - (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem); - - /* If we're dividing the memory clock by two for the SDRAM clock, this - * must be set prior to the change. Clearing the divide must be done - * after the change. - */ - if (pxa_freq_settings[idx].div2) { - preset_mdrefr |= MDREFR_DB2_MASK; - postset_mdrefr |= MDREFR_DB2_MASK; - } else { - postset_mdrefr &= ~MDREFR_DB2_MASK; - } - - local_irq_save(flags); - - /* Set new the CCCR and prepare CCLKCFG */ - CCCR = pxa_freq_settings[idx].cccr; - cclkcfg = pxa_freq_settings[idx].cclkcfg; - - asm volatile(" \n\ - ldr r4, [%1] /* load MDREFR */ \n\ - b 2f \n\ - .align 5 \n\ -1: \n\ - str %3, [%1] /* preset the MDREFR */ \n\ - mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\ - str %4, [%1] /* postset the MDREFR */ \n\ - \n\ - b 3f \n\ -2: b 1b \n\ -3: nop \n\ - " - : "=&r" (unused) - : "r" (MDREFR), "r" (cclkcfg), - "r" (preset_mdrefr), "r" (postset_mdrefr) - : "r4", "r5"); - local_irq_restore(flags); - - /* - * Tell everyone what we've just done... - * you should add a notify client with any platform specific - * SDRAM refresh timer adjustments - */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - /* - * Even if voltage setting fails, we don't report it, as the frequency - * change succeeded. The voltage reduction is not a critical failure, - * only power savings will suffer from this. - * - * Note: if the voltage change fails, and a return value is returned, a - * bug is triggered (seems a deadlock). Should anybody find out where, - * the "return 0" should become a "return ret". - */ - if (vcc_core && freqs.new < freqs.old) - ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); - - return 0; -} - -static int pxa_cpufreq_init(struct cpufreq_policy *policy) -{ - int i; - unsigned int freq; - struct cpufreq_frequency_table *pxa255_freq_table; - pxa_freqs_t *pxa255_freqs; - - /* try to guess pxa27x cpu */ - if (cpu_is_pxa27x()) - pxa27x_guess_max_freq(); - - pxa_cpufreq_init_voltages(); - - init_sdram_rows(); - - /* set default policy and cpuinfo */ - policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->cur = get_clk_frequency_khz(0); /* current freq */ - policy->min = policy->max = policy->cur; - - /* Generate pxa25x the run cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { - pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; - pxa255_run_freq_table[i].index = i; - } - pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; - - /* Generate pxa25x the turbo cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { - pxa255_turbo_freq_table[i].frequency = - pxa255_turbo_freqs[i].khz; - pxa255_turbo_freq_table[i].index = i; - } - pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; - - pxa255_turbo_table = !!pxa255_turbo_table; - - /* Generate the pxa27x cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA27x_FREQS; i++) { - freq = pxa27x_freqs[i].khz; - if (freq > pxa27x_maxfreq) - break; - pxa27x_freq_table[i].frequency = freq; - pxa27x_freq_table[i].index = i; - } - pxa27x_freq_table[i].index = i; - pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; - - /* - * Set the policy's minimum and maximum frequencies from the tables - * just constructed. This sets cpuinfo.mxx_freq, min and max. - */ - if (cpu_is_pxa25x()) { - find_freq_tables(&pxa255_freq_table, &pxa255_freqs); - pr_info("PXA255 cpufreq using %s frequency table\n", - pxa255_turbo_table ? "turbo" : "run"); - cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); - } - else if (cpu_is_pxa27x()) - cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); - - printk(KERN_INFO "PXA CPU frequency change support initialized\n"); - - return 0; -} - -static struct cpufreq_driver pxa_cpufreq_driver = { - .verify = pxa_verify_policy, - .target = pxa_set_target, - .init = pxa_cpufreq_init, - .get = pxa_cpufreq_get, - .name = "PXA2xx", -}; - -static int __init pxa_cpu_init(void) -{ - int ret = -ENODEV; - if (cpu_is_pxa25x() || cpu_is_pxa27x()) - ret = cpufreq_register_driver(&pxa_cpufreq_driver); - return ret; -} - -static void __exit pxa_cpu_exit(void) -{ - cpufreq_unregister_driver(&pxa_cpufreq_driver); -} - - -MODULE_AUTHOR("Intrinsyc Software Inc."); -MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture"); -MODULE_LICENSE("GPL"); -module_init(pxa_cpu_init); -module_exit(pxa_cpu_exit); diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 3458bf2..aa766fb 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -59,6 +59,8 @@ obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o +obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o +obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c new file mode 100644 index 0000000..fe4c55b --- /dev/null +++ b/drivers/cpufreq/pxa2xx-cpufreq.c @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2002,2003 Intrinsyc Software + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * History: + * 31-Jul-2002 : Initial version [FB] + * 29-Jan-2003 : added PXA255 support [FB] + * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) + * + * Note: + * This driver may change the memory bus clock rate, but will not do any + * platform specific access timing changes... for example if you have flash + * memory connected to CS0, you will need to register a platform specific + * notifier which will adjust the memory access strobes to maintain a + * minimum strobe width. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef DEBUG +static unsigned int freq_debug; +module_param(freq_debug, uint, 0); +MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); +#else +#define freq_debug 0 +#endif + +static struct regulator *vcc_core; + +static unsigned int pxa27x_maxfreq; +module_param(pxa27x_maxfreq, uint, 0); +MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" + "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); + +typedef struct { + unsigned int khz; + unsigned int membus; + unsigned int cccr; + unsigned int div2; + unsigned int cclkcfg; + int vmin; + int vmax; +} pxa_freqs_t; + +/* Define the refresh period in mSec for the SDRAM and the number of rows */ +#define SDRAM_TREF 64 /* standard 64ms SDRAM */ +static unsigned int sdram_rows; + +#define CCLKCFG_TURBO 0x1 +#define CCLKCFG_FCS 0x2 +#define CCLKCFG_HALFTURBO 0x4 +#define CCLKCFG_FASTBUS 0x8 +#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2) +#define MDREFR_DRI_MASK 0xFFF + +#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3) +#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3) + +/* + * PXA255 definitions + */ +/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ +#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS + +static pxa_freqs_t pxa255_run_freqs[] = +{ + /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ + { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ + {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */ + {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */ + {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */ + {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */ + {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */ +}; + +/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ +static pxa_freqs_t pxa255_turbo_freqs[] = +{ + /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ + { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ + {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */ + {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */ + {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */ + {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */ +}; + +#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs) +#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs) + +static struct cpufreq_frequency_table + pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1]; +static struct cpufreq_frequency_table + pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1]; + +static unsigned int pxa255_turbo_table; +module_param(pxa255_turbo_table, uint, 0); +MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)"); + +/* + * PXA270 definitions + * + * For the PXA27x: + * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG. + * + * A = 0 => memory controller clock from table 3-7, + * A = 1 => memory controller clock = system bus clock + * Run mode frequency = 13 MHz * L + * Turbo mode frequency = 13 MHz * L * N + * System bus frequency = 13 MHz * L / (B + 1) + * + * In CCCR: + * A = 1 + * L = 16 oscillator to run mode ratio + * 2N = 6 2 * (turbo mode to run mode ratio) + * + * In CCLKCFG: + * B = 1 Fast bus mode + * HT = 0 Half-Turbo mode + * T = 1 Turbo mode + * + * For now, just support some of the combinations in table 3-7 of + * PXA27x Processor Family Developer's Manual to simplify frequency + * change sequences. + */ +#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L) +#define CCLKCFG2(B, HT, T) \ + (CCLKCFG_FCS | \ + ((B) ? CCLKCFG_FASTBUS : 0) | \ + ((HT) ? CCLKCFG_HALFTURBO : 0) | \ + ((T) ? CCLKCFG_TURBO : 0)) + +static pxa_freqs_t pxa27x_freqs[] = { + {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, + {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, + {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, + {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 }, + {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 }, + {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 }, + {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 } +}; + +#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs) +static struct cpufreq_frequency_table + pxa27x_freq_table[NUM_PXA27x_FREQS+1]; + +extern unsigned get_clk_frequency_khz(int info); + +#ifdef CONFIG_REGULATOR + +static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) +{ + int ret = 0; + int vmin, vmax; + + if (!cpu_is_pxa27x()) + return 0; + + vmin = pxa_freq->vmin; + vmax = pxa_freq->vmax; + if ((vmin == -1) || (vmax == -1)) + return 0; + + ret = regulator_set_voltage(vcc_core, vmin, vmax); + if (ret) + pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n", + vmin, vmax); + return ret; +} + +static __init void pxa_cpufreq_init_voltages(void) +{ + vcc_core = regulator_get(NULL, "vcc_core"); + if (IS_ERR(vcc_core)) { + pr_info("cpufreq: Didn't find vcc_core regulator\n"); + vcc_core = NULL; + } else { + pr_info("cpufreq: Found vcc_core regulator\n"); + } +} +#else +static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) +{ + return 0; +} + +static __init void pxa_cpufreq_init_voltages(void) { } +#endif + +static void find_freq_tables(struct cpufreq_frequency_table **freq_table, + pxa_freqs_t **pxa_freqs) +{ + if (cpu_is_pxa25x()) { + if (!pxa255_turbo_table) { + *pxa_freqs = pxa255_run_freqs; + *freq_table = pxa255_run_freq_table; + } else { + *pxa_freqs = pxa255_turbo_freqs; + *freq_table = pxa255_turbo_freq_table; + } + } + if (cpu_is_pxa27x()) { + *pxa_freqs = pxa27x_freqs; + *freq_table = pxa27x_freq_table; + } +} + +static void pxa27x_guess_max_freq(void) +{ + if (!pxa27x_maxfreq) { + pxa27x_maxfreq = 416000; + printk(KERN_INFO "PXA CPU 27x max frequency not defined " + "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", + pxa27x_maxfreq); + } else { + pxa27x_maxfreq *= 1000; + } +} + +static void init_sdram_rows(void) +{ + uint32_t mdcnfg = __raw_readl(MDCNFG); + unsigned int drac2 = 0, drac0 = 0; + + if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3)) + drac2 = MDCNFG_DRAC2(mdcnfg); + + if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1)) + drac0 = MDCNFG_DRAC0(mdcnfg); + + sdram_rows = 1 << (11 + max(drac0, drac2)); +} + +static u32 mdrefr_dri(unsigned int freq) +{ + u32 interval = freq * SDRAM_TREF / sdram_rows; + + return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; +} + +/* find a valid frequency point */ +static int pxa_verify_policy(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *pxa_freqs_table; + pxa_freqs_t *pxa_freqs; + int ret; + + find_freq_tables(&pxa_freqs_table, &pxa_freqs); + ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); + + if (freq_debug) + pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", + policy->min, policy->max); + + return ret; +} + +static unsigned int pxa_cpufreq_get(unsigned int cpu) +{ + return get_clk_frequency_khz(0); +} + +static int pxa_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_frequency_table *pxa_freqs_table; + pxa_freqs_t *pxa_freq_settings; + struct cpufreq_freqs freqs; + unsigned int idx; + unsigned long flags; + unsigned int new_freq_cpu, new_freq_mem; + unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; + int ret = 0; + + /* Get the current policy */ + find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); + + /* Lookup the next frequency */ + if (cpufreq_frequency_table_target(policy, pxa_freqs_table, + target_freq, relation, &idx)) { + return -EINVAL; + } + + new_freq_cpu = pxa_freq_settings[idx].khz; + new_freq_mem = pxa_freq_settings[idx].membus; + freqs.old = policy->cur; + freqs.new = new_freq_cpu; + + if (freq_debug) + pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", + freqs.new / 1000, (pxa_freq_settings[idx].div2) ? + (new_freq_mem / 2000) : (new_freq_mem / 1000)); + + if (vcc_core && freqs.new > freqs.old) + ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); + if (ret) + return ret; + /* + * Tell everyone what we're about to do... + * you should add a notify client with any platform specific + * Vcc changing capability + */ + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + /* Calculate the next MDREFR. If we're slowing down the SDRAM clock + * we need to preset the smaller DRI before the change. If we're + * speeding up we need to set the larger DRI value after the change. + */ + preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR); + if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) { + preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK); + preset_mdrefr |= mdrefr_dri(new_freq_mem); + } + postset_mdrefr = + (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem); + + /* If we're dividing the memory clock by two for the SDRAM clock, this + * must be set prior to the change. Clearing the divide must be done + * after the change. + */ + if (pxa_freq_settings[idx].div2) { + preset_mdrefr |= MDREFR_DB2_MASK; + postset_mdrefr |= MDREFR_DB2_MASK; + } else { + postset_mdrefr &= ~MDREFR_DB2_MASK; + } + + local_irq_save(flags); + + /* Set new the CCCR and prepare CCLKCFG */ + CCCR = pxa_freq_settings[idx].cccr; + cclkcfg = pxa_freq_settings[idx].cclkcfg; + + asm volatile(" \n\ + ldr r4, [%1] /* load MDREFR */ \n\ + b 2f \n\ + .align 5 \n\ +1: \n\ + str %3, [%1] /* preset the MDREFR */ \n\ + mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\ + str %4, [%1] /* postset the MDREFR */ \n\ + \n\ + b 3f \n\ +2: b 1b \n\ +3: nop \n\ + " + : "=&r" (unused) + : "r" (MDREFR), "r" (cclkcfg), + "r" (preset_mdrefr), "r" (postset_mdrefr) + : "r4", "r5"); + local_irq_restore(flags); + + /* + * Tell everyone what we've just done... + * you should add a notify client with any platform specific + * SDRAM refresh timer adjustments + */ + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + /* + * Even if voltage setting fails, we don't report it, as the frequency + * change succeeded. The voltage reduction is not a critical failure, + * only power savings will suffer from this. + * + * Note: if the voltage change fails, and a return value is returned, a + * bug is triggered (seems a deadlock). Should anybody find out where, + * the "return 0" should become a "return ret". + */ + if (vcc_core && freqs.new < freqs.old) + ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); + + return 0; +} + +static int pxa_cpufreq_init(struct cpufreq_policy *policy) +{ + int i; + unsigned int freq; + struct cpufreq_frequency_table *pxa255_freq_table; + pxa_freqs_t *pxa255_freqs; + + /* try to guess pxa27x cpu */ + if (cpu_is_pxa27x()) + pxa27x_guess_max_freq(); + + pxa_cpufreq_init_voltages(); + + init_sdram_rows(); + + /* set default policy and cpuinfo */ + policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ + policy->cur = get_clk_frequency_khz(0); /* current freq */ + policy->min = policy->max = policy->cur; + + /* Generate pxa25x the run cpufreq_frequency_table struct */ + for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { + pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; + pxa255_run_freq_table[i].index = i; + } + pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; + + /* Generate pxa25x the turbo cpufreq_frequency_table struct */ + for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { + pxa255_turbo_freq_table[i].frequency = + pxa255_turbo_freqs[i].khz; + pxa255_turbo_freq_table[i].index = i; + } + pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; + + pxa255_turbo_table = !!pxa255_turbo_table; + + /* Generate the pxa27x cpufreq_frequency_table struct */ + for (i = 0; i < NUM_PXA27x_FREQS; i++) { + freq = pxa27x_freqs[i].khz; + if (freq > pxa27x_maxfreq) + break; + pxa27x_freq_table[i].frequency = freq; + pxa27x_freq_table[i].index = i; + } + pxa27x_freq_table[i].index = i; + pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; + + /* + * Set the policy's minimum and maximum frequencies from the tables + * just constructed. This sets cpuinfo.mxx_freq, min and max. + */ + if (cpu_is_pxa25x()) { + find_freq_tables(&pxa255_freq_table, &pxa255_freqs); + pr_info("PXA255 cpufreq using %s frequency table\n", + pxa255_turbo_table ? "turbo" : "run"); + cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); + } + else if (cpu_is_pxa27x()) + cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); + + printk(KERN_INFO "PXA CPU frequency change support initialized\n"); + + return 0; +} + +static struct cpufreq_driver pxa_cpufreq_driver = { + .verify = pxa_verify_policy, + .target = pxa_set_target, + .init = pxa_cpufreq_init, + .get = pxa_cpufreq_get, + .name = "PXA2xx", +}; + +static int __init pxa_cpu_init(void) +{ + int ret = -ENODEV; + if (cpu_is_pxa25x() || cpu_is_pxa27x()) + ret = cpufreq_register_driver(&pxa_cpufreq_driver); + return ret; +} + +static void __exit pxa_cpu_exit(void) +{ + cpufreq_unregister_driver(&pxa_cpufreq_driver); +} + + +MODULE_AUTHOR("Intrinsyc Software Inc."); +MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture"); +MODULE_LICENSE("GPL"); +module_init(pxa_cpu_init); +module_exit(pxa_cpu_exit); -- cgit v0.10.2 From b7e614c8bf5c898b172d7dfed9853fdda35be5cc Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:14 +0000 Subject: cpufreq: integrator: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of ARM based integrator platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Arnd Bergmann Acked-by: Linus Walleij Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 76eb836..c3563f6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2166,17 +2166,6 @@ config CPU_FREQ_SA1100 config CPU_FREQ_SA1110 bool -config CPU_FREQ_INTEGRATOR - tristate "CPUfreq driver for ARM Integrator CPUs" - depends on ARCH_INTEGRATOR && CPU_FREQ - default y - help - This enables the CPUfreq driver for ARM Integrator CPUs. - - For details, take a look at . - - If in doubt, say Y. - config CPU_FREQ_S3C bool help diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile index 5521d18..d14d6b7 100644 --- a/arch/arm/mach-integrator/Makefile +++ b/arch/arm/mach-integrator/Makefile @@ -9,5 +9,4 @@ obj-$(CONFIG_ARCH_INTEGRATOR_AP) += integrator_ap.o obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o obj-$(CONFIG_PCI) += pci_v3.o pci.o -obj-$(CONFIG_CPU_FREQ_INTEGRATOR) += cpu.o obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c deleted file mode 100644 index df863c3..0000000 --- a/arch/arm/mach-integrator/cpu.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * linux/arch/arm/mach-integrator/cpu.c - * - * Copyright (C) 2001-2002 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * CPU support functions - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static struct cpufreq_driver integrator_driver; - -#define CM_ID __io_address(INTEGRATOR_HDR_ID) -#define CM_OSC __io_address(INTEGRATOR_HDR_OSC) -#define CM_STAT __io_address(INTEGRATOR_HDR_STAT) -#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK) - -static const struct icst_params lclk_params = { - .ref = 24000000, - .vco_max = ICST525_VCO_MAX_5V, - .vco_min = ICST525_VCO_MIN, - .vd_min = 8, - .vd_max = 132, - .rd_min = 24, - .rd_max = 24, - .s2div = icst525_s2div, - .idx2s = icst525_idx2s, -}; - -static const struct icst_params cclk_params = { - .ref = 24000000, - .vco_max = ICST525_VCO_MAX_5V, - .vco_min = ICST525_VCO_MIN, - .vd_min = 12, - .vd_max = 160, - .rd_min = 24, - .rd_max = 24, - .s2div = icst525_s2div, - .idx2s = icst525_idx2s, -}; - -/* - * Validate the speed policy. - */ -static int integrator_verify_policy(struct cpufreq_policy *policy) -{ - struct icst_vco vco; - - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); - policy->max = icst_hz(&cclk_params, vco) / 1000; - - vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); - policy->min = icst_hz(&cclk_params, vco) / 1000; - - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - return 0; -} - - -static int integrator_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - cpumask_t cpus_allowed; - int cpu = policy->cpu; - struct icst_vco vco; - struct cpufreq_freqs freqs; - u_int cm_osc; - - /* - * Save this threads cpus_allowed mask. - */ - cpus_allowed = current->cpus_allowed; - - /* - * Bind to the specified CPU. When this call returns, - * we should be running on the right CPU. - */ - set_cpus_allowed(current, cpumask_of_cpu(cpu)); - BUG_ON(cpu != smp_processor_id()); - - /* get current setting */ - cm_osc = __raw_readl(CM_OSC); - - if (machine_is_integrator()) { - vco.s = (cm_osc >> 8) & 7; - } else if (machine_is_cintegrator()) { - vco.s = 1; - } - vco.v = cm_osc & 255; - vco.r = 22; - freqs.old = icst_hz(&cclk_params, vco) / 1000; - - /* icst_hz_to_vco rounds down -- so we need the next - * larger freq in case of CPUFREQ_RELATION_L. - */ - if (relation == CPUFREQ_RELATION_L) - target_freq += 999; - if (target_freq > policy->max) - target_freq = policy->max; - vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); - freqs.new = icst_hz(&cclk_params, vco) / 1000; - - if (freqs.old == freqs.new) { - set_cpus_allowed(current, cpus_allowed); - return 0; - } - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - cm_osc = __raw_readl(CM_OSC); - - if (machine_is_integrator()) { - cm_osc &= 0xfffff800; - cm_osc |= vco.s << 8; - } else if (machine_is_cintegrator()) { - cm_osc &= 0xffffff00; - } - cm_osc |= vco.v; - - __raw_writel(0xa05f, CM_LOCK); - __raw_writel(cm_osc, CM_OSC); - __raw_writel(0, CM_LOCK); - - /* - * Restore the CPUs allowed mask. - */ - set_cpus_allowed(current, cpus_allowed); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static unsigned int integrator_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned int current_freq; - u_int cm_osc; - struct icst_vco vco; - - cpus_allowed = current->cpus_allowed; - - set_cpus_allowed(current, cpumask_of_cpu(cpu)); - BUG_ON(cpu != smp_processor_id()); - - /* detect memory etc. */ - cm_osc = __raw_readl(CM_OSC); - - if (machine_is_integrator()) { - vco.s = (cm_osc >> 8) & 7; - } else { - vco.s = 1; - } - vco.v = cm_osc & 255; - vco.r = 22; - - current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ - - set_cpus_allowed(current, cpus_allowed); - - return current_freq; -} - -static int integrator_cpufreq_init(struct cpufreq_policy *policy) -{ - - /* set default policy and cpuinfo */ - policy->cpuinfo.max_freq = 160000; - policy->cpuinfo.min_freq = 12000; - policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ - policy->cur = policy->min = policy->max = integrator_get(policy->cpu); - - return 0; -} - -static struct cpufreq_driver integrator_driver = { - .verify = integrator_verify_policy, - .target = integrator_set_target, - .get = integrator_get, - .init = integrator_cpufreq_init, - .name = "integrator", -}; - -static int __init integrator_cpu_init(void) -{ - return cpufreq_register_driver(&integrator_driver); -} - -static void __exit integrator_cpu_exit(void) -{ - cpufreq_unregister_driver(&integrator_driver); -} - -MODULE_AUTHOR ("Russell M. King"); -MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); -MODULE_LICENSE ("GPL"); - -module_init(integrator_cpu_init); -module_exit(integrator_cpu_exit); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 25d866a..97f208d 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -66,6 +66,14 @@ config ARM_IMX6Q_CPUFREQ If in doubt, say N. +config ARM_INTEGRATOR + tristate "CPUfreq driver for ARM Integrator CPUs" + depends on ARCH_INTEGRATOR + default y + help + This enables the CPUfreq driver for ARM Integrator CPUs. + If in doubt, say Y. + config ARM_KIRKWOOD_CPUFREQ def_bool ARCH_KIRKWOOD && OF help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index aa766fb..8d58016 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o +obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c new file mode 100644 index 0000000..f7c99df --- /dev/null +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2001-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * CPU support functions + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static struct cpufreq_driver integrator_driver; + +#define CM_ID __io_address(INTEGRATOR_HDR_ID) +#define CM_OSC __io_address(INTEGRATOR_HDR_OSC) +#define CM_STAT __io_address(INTEGRATOR_HDR_STAT) +#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK) + +static const struct icst_params lclk_params = { + .ref = 24000000, + .vco_max = ICST525_VCO_MAX_5V, + .vco_min = ICST525_VCO_MIN, + .vd_min = 8, + .vd_max = 132, + .rd_min = 24, + .rd_max = 24, + .s2div = icst525_s2div, + .idx2s = icst525_idx2s, +}; + +static const struct icst_params cclk_params = { + .ref = 24000000, + .vco_max = ICST525_VCO_MAX_5V, + .vco_min = ICST525_VCO_MIN, + .vd_min = 12, + .vd_max = 160, + .rd_min = 24, + .rd_max = 24, + .s2div = icst525_s2div, + .idx2s = icst525_idx2s, +}; + +/* + * Validate the speed policy. + */ +static int integrator_verify_policy(struct cpufreq_policy *policy) +{ + struct icst_vco vco; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); + policy->max = icst_hz(&cclk_params, vco) / 1000; + + vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); + policy->min = icst_hz(&cclk_params, vco) / 1000; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + return 0; +} + + +static int integrator_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + cpumask_t cpus_allowed; + int cpu = policy->cpu; + struct icst_vco vco; + struct cpufreq_freqs freqs; + u_int cm_osc; + + /* + * Save this threads cpus_allowed mask. + */ + cpus_allowed = current->cpus_allowed; + + /* + * Bind to the specified CPU. When this call returns, + * we should be running on the right CPU. + */ + set_cpus_allowed(current, cpumask_of_cpu(cpu)); + BUG_ON(cpu != smp_processor_id()); + + /* get current setting */ + cm_osc = __raw_readl(CM_OSC); + + if (machine_is_integrator()) { + vco.s = (cm_osc >> 8) & 7; + } else if (machine_is_cintegrator()) { + vco.s = 1; + } + vco.v = cm_osc & 255; + vco.r = 22; + freqs.old = icst_hz(&cclk_params, vco) / 1000; + + /* icst_hz_to_vco rounds down -- so we need the next + * larger freq in case of CPUFREQ_RELATION_L. + */ + if (relation == CPUFREQ_RELATION_L) + target_freq += 999; + if (target_freq > policy->max) + target_freq = policy->max; + vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); + freqs.new = icst_hz(&cclk_params, vco) / 1000; + + if (freqs.old == freqs.new) { + set_cpus_allowed(current, cpus_allowed); + return 0; + } + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + cm_osc = __raw_readl(CM_OSC); + + if (machine_is_integrator()) { + cm_osc &= 0xfffff800; + cm_osc |= vco.s << 8; + } else if (machine_is_cintegrator()) { + cm_osc &= 0xffffff00; + } + cm_osc |= vco.v; + + __raw_writel(0xa05f, CM_LOCK); + __raw_writel(cm_osc, CM_OSC); + __raw_writel(0, CM_LOCK); + + /* + * Restore the CPUs allowed mask. + */ + set_cpus_allowed(current, cpus_allowed); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static unsigned int integrator_get(unsigned int cpu) +{ + cpumask_t cpus_allowed; + unsigned int current_freq; + u_int cm_osc; + struct icst_vco vco; + + cpus_allowed = current->cpus_allowed; + + set_cpus_allowed(current, cpumask_of_cpu(cpu)); + BUG_ON(cpu != smp_processor_id()); + + /* detect memory etc. */ + cm_osc = __raw_readl(CM_OSC); + + if (machine_is_integrator()) { + vco.s = (cm_osc >> 8) & 7; + } else { + vco.s = 1; + } + vco.v = cm_osc & 255; + vco.r = 22; + + current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ + + set_cpus_allowed(current, cpus_allowed); + + return current_freq; +} + +static int integrator_cpufreq_init(struct cpufreq_policy *policy) +{ + + /* set default policy and cpuinfo */ + policy->cpuinfo.max_freq = 160000; + policy->cpuinfo.min_freq = 12000; + policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ + policy->cur = policy->min = policy->max = integrator_get(policy->cpu); + + return 0; +} + +static struct cpufreq_driver integrator_driver = { + .verify = integrator_verify_policy, + .target = integrator_set_target, + .get = integrator_get, + .init = integrator_cpufreq_init, + .name = "integrator", +}; + +static int __init integrator_cpu_init(void) +{ + return cpufreq_register_driver(&integrator_driver); +} + +static void __exit integrator_cpu_exit(void) +{ + cpufreq_unregister_driver(&integrator_driver); +} + +MODULE_AUTHOR ("Russell M. King"); +MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); +MODULE_LICENSE ("GPL"); + +module_init(integrator_cpu_init); +module_exit(integrator_cpu_exit); -- cgit v0.10.2 From 59a2e613d07fbd592ff711c87458eabcf9c98902 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:16 +0000 Subject: cpufreq: sa11x0: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of ARM based sa11x0 platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c3563f6..940b13f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2150,7 +2150,6 @@ endmenu menu "CPU Power Management" if ARCH_HAS_CPUFREQ - source "drivers/cpufreq/Kconfig" config CPU_FREQ_IMX @@ -2160,12 +2159,6 @@ config CPU_FREQ_IMX help This enables the CPUfreq driver for i.MX CPUs. -config CPU_FREQ_SA1100 - bool - -config CPU_FREQ_SA1110 - bool - config CPU_FREQ_S3C bool help diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index ca14dbd..04f9784 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig @@ -4,7 +4,7 @@ menu "SA11x0 Implementations" config SA1100_ASSABET bool "Assabet" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ help Say Y here if you are using the Intel(R) StrongARM(R) SA-1110 Microprocessor Development Board (also known as the Assabet). @@ -20,7 +20,7 @@ config ASSABET_NEPONSET config SA1100_CERF bool "CerfBoard" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ help The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued). More information is available at: @@ -47,7 +47,7 @@ endchoice config SA1100_COLLIE bool "Sharp Zaurus SL5500" - # FIXME: select CPU_FREQ_SA11x0 + # FIXME: select ARM_SA11x0_CPUFREQ select SHARP_LOCOMO select SHARP_PARAM select SHARP_SCOOP @@ -56,7 +56,7 @@ config SA1100_COLLIE config SA1100_H3100 bool "Compaq iPAQ H3100" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ select HTC_EGPIO help Say Y here if you intend to run this kernel on the Compaq iPAQ @@ -67,7 +67,7 @@ config SA1100_H3100 config SA1100_H3600 bool "Compaq iPAQ H3600/H3700" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ select HTC_EGPIO help Say Y here if you intend to run this kernel on the Compaq iPAQ @@ -78,7 +78,7 @@ config SA1100_H3600 config SA1100_BADGE4 bool "HP Labs BadgePAD 4" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ select SA1111 help Say Y here if you want to build a kernel for the HP Laboratories @@ -86,7 +86,7 @@ config SA1100_BADGE4 config SA1100_JORNADA720 bool "HP Jornada 720" - # FIXME: select CPU_FREQ_SA11x0 + # FIXME: select ARM_SA11x0_CPUFREQ select SA1111 help Say Y here if you want to build a kernel for the HP Jornada 720 @@ -105,14 +105,14 @@ config SA1100_JORNADA720_SSP config SA1100_HACKKIT bool "HackKit Core CPU Board" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help Say Y here to support the HackKit Core CPU Board ; config SA1100_LART bool "LART" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help Say Y here if you are using the Linux Advanced Radio Terminal (also known as the LART). See for @@ -120,7 +120,7 @@ config SA1100_LART config SA1100_NANOENGINE bool "nanoEngine" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ select PCI select PCI_NANOENGINE help @@ -130,7 +130,7 @@ config SA1100_NANOENGINE config SA1100_PLEB bool "PLEB" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help Say Y here if you are using version 1 of the Portable Linux Embedded Board (also known as PLEB). @@ -139,7 +139,7 @@ config SA1100_PLEB config SA1100_SHANNON bool "Shannon" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help The Shannon (also known as a Tuxscreen, and also as a IS2630) was a limited edition webphone produced by Philips. The Shannon is a SA1100 @@ -148,7 +148,7 @@ config SA1100_SHANNON config SA1100_SIMPAD bool "Simpad" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ help The SIEMENS webpad SIMpad is based on the StrongARM 1110. There are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile index 1aed9e7..2732eef 100644 --- a/arch/arm/mach-sa1100/Makefile +++ b/arch/arm/mach-sa1100/Makefile @@ -8,9 +8,6 @@ obj-m := obj-n := obj- := -obj-$(CONFIG_CPU_FREQ_SA1100) += cpu-sa1100.o -obj-$(CONFIG_CPU_FREQ_SA1110) += cpu-sa1110.o - # Specific board support obj-$(CONFIG_SA1100_ASSABET) += assabet.o obj-$(CONFIG_ASSABET_NEPONSET) += neponset.o diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c deleted file mode 100644 index 3268761..0000000 --- a/arch/arm/mach-sa1100/cpu-sa1100.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * cpu-sa1100.c: clock scaling for the SA1100 - * - * Copyright (C) 2000 2001, The Delft University of Technology - * - * Authors: - * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version - * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl): - * - major rewrite for linux-2.3.99 - * - rewritten for the more generic power management scheme in - * linux-2.4.5-rmk1 - * - * This software has been developed while working on the LART - * computing board (http://www.lartmaker.nl/), which is - * sponsored by the Mobile Multi-media Communications - * (http://www.mobimedia.org/) and Ubiquitous Communications - * (http://www.ubicom.tudelft.nl/) projects. - * - * The authors can be reached at: - * - * Erik Mouw - * Information and Communication Theory Group - * Faculty of Information Technology and Systems - * Delft University of Technology - * P.O. Box 5031 - * 2600 GA Delft - * The Netherlands - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * - * Theory of operations - * ==================== - * - * Clock scaling can be used to lower the power consumption of the CPU - * core. This will give you a somewhat longer running time. - * - * The SA-1100 has a single register to change the core clock speed: - * - * PPCR 0x90020014 PLL config - * - * However, the DRAM timings are closely related to the core clock - * speed, so we need to change these, too. The used registers are: - * - * MDCNFG 0xA0000000 DRAM config - * MDCAS0 0xA0000004 Access waveform - * MDCAS1 0xA0000008 Access waveform - * MDCAS2 0xA000000C Access waveform - * - * Care must be taken to change the DRAM parameters the correct way, - * because otherwise the DRAM becomes unusable and the kernel will - * crash. - * - * The simple solution to avoid a kernel crash is to put the actual - * clock change in ROM and jump to that code from the kernel. The main - * disadvantage is that the ROM has to be modified, which is not - * possible on all SA-1100 platforms. Another disadvantage is that - * jumping to ROM makes clock switching unnecessary complicated. - * - * The idea behind this driver is that the memory configuration can be - * changed while running from DRAM (even with interrupts turned on!) - * as long as all re-configuration steps yield a valid DRAM - * configuration. The advantages are clear: it will run on all SA-1100 - * platforms, and the code is very simple. - * - * If you really want to understand what is going on in - * sa1100_update_dram_timings(), you'll have to read sections 8.2, - * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor - * Developers Manual" (available for free from Intel). - * - */ - -#include -#include -#include -#include -#include - -#include - -#include - -#include "generic.h" - -struct sa1100_dram_regs { - int speed; - u32 mdcnfg; - u32 mdcas0; - u32 mdcas1; - u32 mdcas2; -}; - - -static struct cpufreq_driver sa1100_driver; - -static struct sa1100_dram_regs sa1100_dram_settings[] = { - /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */ - { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */ - { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */ - { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */ - {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */ - {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */ - {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */ - {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */ - {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */ - {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */ - {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */ - {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */ - {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */ - {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */ - {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */ - {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */ - {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */ - { 0, 0, 0, 0, 0 } /* last entry */ -}; - -static void sa1100_update_dram_timings(int current_speed, int new_speed) -{ - struct sa1100_dram_regs *settings = sa1100_dram_settings; - - /* find speed */ - while (settings->speed != 0) { - if (new_speed == settings->speed) - break; - - settings++; - } - - if (settings->speed == 0) { - panic("%s: couldn't find dram setting for speed %d\n", - __func__, new_speed); - } - - /* No risk, no fun: run with interrupts on! */ - if (new_speed > current_speed) { - /* We're going FASTER, so first relax the memory - * timings before changing the core frequency - */ - - /* Half the memory access clock */ - MDCNFG |= MDCNFG_CDB2; - - /* The order of these statements IS important, keep 8 - * pulses!! - */ - MDCAS2 = settings->mdcas2; - MDCAS1 = settings->mdcas1; - MDCAS0 = settings->mdcas0; - MDCNFG = settings->mdcnfg; - } else { - /* We're going SLOWER: first decrease the core - * frequency and then tighten the memory settings. - */ - - /* Half the memory access clock */ - MDCNFG |= MDCNFG_CDB2; - - /* The order of these statements IS important, keep 8 - * pulses!! - */ - MDCAS0 = settings->mdcas0; - MDCAS1 = settings->mdcas1; - MDCAS2 = settings->mdcas2; - MDCNFG = settings->mdcnfg; - } -} - -static int sa1100_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cur = sa11x0_getspeed(0); - unsigned int new_ppcr; - struct cpufreq_freqs freqs; - - new_ppcr = sa11x0_freq_to_ppcr(target_freq); - switch (relation) { - case CPUFREQ_RELATION_L: - if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) - new_ppcr--; - break; - case CPUFREQ_RELATION_H: - if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) - new_ppcr--; - break; - } - - freqs.old = cur; - freqs.new = sa11x0_ppcr_to_freq(new_ppcr); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - if (freqs.new > cur) - sa1100_update_dram_timings(cur, freqs.new); - - PPCR = new_ppcr; - - if (freqs.new < cur) - sa1100_update_dram_timings(cur, freqs.new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int __init sa1100_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; -} - -static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1100_target, - .get = sa11x0_getspeed, - .init = sa1100_cpu_init, - .name = "sa1100", -}; - -static int __init sa1100_dram_init(void) -{ - if (cpu_is_sa1100()) - return cpufreq_register_driver(&sa1100_driver); - else - return -ENODEV; -} - -arch_initcall(sa1100_dram_init); diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c deleted file mode 100644 index 38a7733..0000000 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ /dev/null @@ -1,407 +0,0 @@ -/* - * linux/arch/arm/mach-sa1100/cpu-sa1110.c - * - * Copyright (C) 2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Note: there are two erratas that apply to the SA1110 here: - * 7 - SDRAM auto-power-up failure (rev A0) - * 13 - Corruption of internal register reads/writes following - * SDRAM reads (rev A0, B0, B1) - * - * We ignore rev. A0 and B0 devices; I don't think they're worth supporting. - * - * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type - */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "generic.h" - -#undef DEBUG - -struct sdram_params { - const char name[20]; - u_char rows; /* bits */ - u_char cas_latency; /* cycles */ - u_char tck; /* clock cycle time (ns) */ - u_char trcd; /* activate to r/w (ns) */ - u_char trp; /* precharge to activate (ns) */ - u_char twr; /* write recovery time (ns) */ - u_short refresh; /* refresh time for array (us) */ -}; - -struct sdram_info { - u_int mdcnfg; - u_int mdrefr; - u_int mdcas[3]; -}; - -static struct sdram_params sdram_tbl[] __initdata = { - { /* Toshiba TC59SM716 CL2 */ - .name = "TC59SM716-CL2", - .rows = 12, - .tck = 10, - .trcd = 20, - .trp = 20, - .twr = 10, - .refresh = 64000, - .cas_latency = 2, - }, { /* Toshiba TC59SM716 CL3 */ - .name = "TC59SM716-CL3", - .rows = 12, - .tck = 8, - .trcd = 20, - .trp = 20, - .twr = 8, - .refresh = 64000, - .cas_latency = 3, - }, { /* Samsung K4S641632D TC75 */ - .name = "K4S641632D", - .rows = 14, - .tck = 9, - .trcd = 27, - .trp = 20, - .twr = 9, - .refresh = 64000, - .cas_latency = 3, - }, { /* Samsung K4S281632B-1H */ - .name = "K4S281632B-1H", - .rows = 12, - .tck = 10, - .trp = 20, - .twr = 10, - .refresh = 64000, - .cas_latency = 3, - }, { /* Samsung KM416S4030CT */ - .name = "KM416S4030CT", - .rows = 13, - .tck = 8, - .trcd = 24, /* 3 CLKs */ - .trp = 24, /* 3 CLKs */ - .twr = 16, /* Trdl: 2 CLKs */ - .refresh = 64000, - .cas_latency = 3, - }, { /* Winbond W982516AH75L CL3 */ - .name = "W982516AH75L", - .rows = 16, - .tck = 8, - .trcd = 20, - .trp = 20, - .twr = 8, - .refresh = 64000, - .cas_latency = 3, - }, { /* Micron MT48LC8M16A2TG-75 */ - .name = "MT48LC8M16A2TG-75", - .rows = 12, - .tck = 8, - .trcd = 20, - .trp = 20, - .twr = 8, - .refresh = 64000, - .cas_latency = 3, - }, -}; - -static struct sdram_params sdram_params; - -/* - * Given a period in ns and frequency in khz, calculate the number of - * cycles of frequency in period. Note that we round up to the next - * cycle, even if we are only slightly over. - */ -static inline u_int ns_to_cycles(u_int ns, u_int khz) -{ - return (ns * khz + 999999) / 1000000; -} - -/* - * Create the MDCAS register bit pattern. - */ -static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) -{ - u_int shift; - - rcd = 2 * rcd - 1; - shift = delayed + 1 + rcd; - - mdcas[0] = (1 << rcd) - 1; - mdcas[0] |= 0x55555555 << shift; - mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1); -} - -static void -sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz, - struct sdram_params *sdram) -{ - u_int mem_khz, sd_khz, trp, twr; - - mem_khz = cpu_khz / 2; - sd_khz = mem_khz; - - /* - * If SDCLK would invalidate the SDRAM timings, - * run SDCLK at half speed. - * - * CPU steppings prior to B2 must either run the memory at - * half speed or use delayed read latching (errata 13). - */ - if ((ns_to_cycles(sdram->tck, sd_khz) > 1) || - (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000)) - sd_khz /= 2; - - sd->mdcnfg = MDCNFG & 0x007f007f; - - twr = ns_to_cycles(sdram->twr, mem_khz); - - /* trp should always be >1 */ - trp = ns_to_cycles(sdram->trp, mem_khz) - 1; - if (trp < 1) - trp = 1; - - sd->mdcnfg |= trp << 8; - sd->mdcnfg |= trp << 24; - sd->mdcnfg |= sdram->cas_latency << 12; - sd->mdcnfg |= sdram->cas_latency << 28; - sd->mdcnfg |= twr << 14; - sd->mdcnfg |= twr << 30; - - sd->mdrefr = MDREFR & 0xffbffff0; - sd->mdrefr |= 7; - - if (sd_khz != mem_khz) - sd->mdrefr |= MDREFR_K1DB2; - - /* initial number of '1's in MDCAS + 1 */ - set_mdcas(sd->mdcas, sd_khz >= 62000, - ns_to_cycles(sdram->trcd, mem_khz)); - -#ifdef DEBUG - printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n", - sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1], - sd->mdcas[2]); -#endif -} - -/* - * Set the SDRAM refresh rate. - */ -static inline void sdram_set_refresh(u_int dri) -{ - MDREFR = (MDREFR & 0xffff000f) | (dri << 4); - (void) MDREFR; -} - -/* - * Update the refresh period. We do this such that we always refresh - * the SDRAMs within their permissible period. The refresh period is - * always a multiple of the memory clock (fixed at cpu_clock / 2). - * - * FIXME: we don't currently take account of burst accesses here, - * but neither do Intels DM nor Angel. - */ -static void -sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) -{ - u_int ns_row = (sdram->refresh * 1000) >> sdram->rows; - u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32; - -#ifdef DEBUG - mdelay(250); - printk(KERN_DEBUG "new dri value = %d\n", dri); -#endif - - sdram_set_refresh(dri); -} - -/* - * Ok, set the CPU frequency. - */ -static int sa1110_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct sdram_params *sdram = &sdram_params; - struct cpufreq_freqs freqs; - struct sdram_info sd; - unsigned long flags; - unsigned int ppcr, unused; - - switch (relation) { - case CPUFREQ_RELATION_L: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (sa11x0_ppcr_to_freq(ppcr) > policy->max) - ppcr--; - break; - case CPUFREQ_RELATION_H: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) - ppcr--; - break; - default: - return -EINVAL; - } - - freqs.old = sa11x0_getspeed(0); - freqs.new = sa11x0_ppcr_to_freq(ppcr); - - sdram_calculate_timing(&sd, freqs.new, sdram); - -#if 0 - /* - * These values are wrong according to the SA1110 documentation - * and errata, but they seem to work. Need to get a storage - * scope on to the SDRAM signals to work out why. - */ - if (policy->max < 147500) { - sd.mdrefr |= MDREFR_K1DB2; - sd.mdcas[0] = 0xaaaaaa7f; - } else { - sd.mdrefr &= ~MDREFR_K1DB2; - sd.mdcas[0] = 0xaaaaaa9f; - } - sd.mdcas[1] = 0xaaaaaaaa; - sd.mdcas[2] = 0xaaaaaaaa; -#endif - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - /* - * The clock could be going away for some time. Set the SDRAMs - * to refresh rapidly (every 64 memory clock cycles). To get - * through the whole array, we need to wait 262144 mclk cycles. - * We wait 20ms to be safe. - */ - sdram_set_refresh(2); - if (!irqs_disabled()) - msleep(20); - else - mdelay(20); - - /* - * Reprogram the DRAM timings with interrupts disabled, and - * ensure that we are doing this within a complete cache line. - * This means that we won't access SDRAM for the duration of - * the programming. - */ - local_irq_save(flags); - asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); - udelay(10); - __asm__ __volatile__("\n\ - b 2f \n\ - .align 5 \n\ -1: str %3, [%1, #0] @ MDCNFG \n\ - str %4, [%1, #28] @ MDREFR \n\ - str %5, [%1, #4] @ MDCAS0 \n\ - str %6, [%1, #8] @ MDCAS1 \n\ - str %7, [%1, #12] @ MDCAS2 \n\ - str %8, [%2, #0] @ PPCR \n\ - ldr %0, [%1, #0] \n\ - b 3f \n\ -2: b 1b \n\ -3: nop \n\ - nop" - : "=&r" (unused) - : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg), - "r" (sd.mdrefr), "r" (sd.mdcas[0]), - "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr)); - local_irq_restore(flags); - - /* - * Now, return the SDRAM refresh back to normal. - */ - sdram_update_refresh(freqs.new, sdram); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int __init sa1110_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; -} - -/* sa1110_driver needs __refdata because it must remain after init registers - * it with cpufreq_register_driver() */ -static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1110_target, - .get = sa11x0_getspeed, - .init = sa1110_cpu_init, - .name = "sa1110", -}; - -static struct sdram_params *sa1110_find_sdram(const char *name) -{ - struct sdram_params *sdram; - - for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl); - sdram++) - if (strcmp(name, sdram->name) == 0) - return sdram; - - return NULL; -} - -static char sdram_name[16]; - -static int __init sa1110_clk_init(void) -{ - struct sdram_params *sdram; - const char *name = sdram_name; - - if (!cpu_is_sa1110()) - return -ENODEV; - - if (!name[0]) { - if (machine_is_assabet()) - name = "TC59SM716-CL3"; - if (machine_is_pt_system3()) - name = "K4S641632D"; - if (machine_is_h3100()) - name = "KM416S4030CT"; - if (machine_is_jornada720()) - name = "K4S281632B-1H"; - if (machine_is_nanoengine()) - name = "MT48LC8M16A2TG-75"; - } - - sdram = sa1110_find_sdram(name); - if (sdram) { - printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d" - " twr: %d refresh: %d cas_latency: %d\n", - sdram->tck, sdram->trcd, sdram->trp, - sdram->twr, sdram->refresh, sdram->cas_latency); - - memcpy(&sdram_params, sdram, sizeof(sdram_params)); - - return cpufreq_register_driver(&sa1110_driver); - } - - return 0; -} - -module_param_string(sdram, sdram_name, sizeof(sdram_name), 0); -arch_initcall(sa1110_clk_init); diff --git a/arch/arm/mach-sa1100/include/mach/generic.h b/arch/arm/mach-sa1100/include/mach/generic.h new file mode 100644 index 0000000..665542e --- /dev/null +++ b/arch/arm/mach-sa1100/include/mach/generic.h @@ -0,0 +1 @@ +#include "../../generic.h" diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 97f208d..09da6a3 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -127,6 +127,12 @@ config ARM_S5PV210_CPUFREQ If in doubt, say N. +config ARM_SA1100_CPUFREQ + bool + +config ARM_SA1110_CPUFREQ + bool + config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" depends on PLAT_SPEAR diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 8d58016..8b21016 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -66,6 +66,8 @@ obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o +obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o +obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c new file mode 100644 index 0000000..cff18e8 --- /dev/null +++ b/drivers/cpufreq/sa1100-cpufreq.c @@ -0,0 +1,247 @@ +/* + * cpu-sa1100.c: clock scaling for the SA1100 + * + * Copyright (C) 2000 2001, The Delft University of Technology + * + * Authors: + * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version + * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl): + * - major rewrite for linux-2.3.99 + * - rewritten for the more generic power management scheme in + * linux-2.4.5-rmk1 + * + * This software has been developed while working on the LART + * computing board (http://www.lartmaker.nl/), which is + * sponsored by the Mobile Multi-media Communications + * (http://www.mobimedia.org/) and Ubiquitous Communications + * (http://www.ubicom.tudelft.nl/) projects. + * + * The authors can be reached at: + * + * Erik Mouw + * Information and Communication Theory Group + * Faculty of Information Technology and Systems + * Delft University of Technology + * P.O. Box 5031 + * 2600 GA Delft + * The Netherlands + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * Theory of operations + * ==================== + * + * Clock scaling can be used to lower the power consumption of the CPU + * core. This will give you a somewhat longer running time. + * + * The SA-1100 has a single register to change the core clock speed: + * + * PPCR 0x90020014 PLL config + * + * However, the DRAM timings are closely related to the core clock + * speed, so we need to change these, too. The used registers are: + * + * MDCNFG 0xA0000000 DRAM config + * MDCAS0 0xA0000004 Access waveform + * MDCAS1 0xA0000008 Access waveform + * MDCAS2 0xA000000C Access waveform + * + * Care must be taken to change the DRAM parameters the correct way, + * because otherwise the DRAM becomes unusable and the kernel will + * crash. + * + * The simple solution to avoid a kernel crash is to put the actual + * clock change in ROM and jump to that code from the kernel. The main + * disadvantage is that the ROM has to be modified, which is not + * possible on all SA-1100 platforms. Another disadvantage is that + * jumping to ROM makes clock switching unnecessary complicated. + * + * The idea behind this driver is that the memory configuration can be + * changed while running from DRAM (even with interrupts turned on!) + * as long as all re-configuration steps yield a valid DRAM + * configuration. The advantages are clear: it will run on all SA-1100 + * platforms, and the code is very simple. + * + * If you really want to understand what is going on in + * sa1100_update_dram_timings(), you'll have to read sections 8.2, + * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor + * Developers Manual" (available for free from Intel). + * + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +struct sa1100_dram_regs { + int speed; + u32 mdcnfg; + u32 mdcas0; + u32 mdcas1; + u32 mdcas2; +}; + + +static struct cpufreq_driver sa1100_driver; + +static struct sa1100_dram_regs sa1100_dram_settings[] = { + /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */ + { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */ + { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */ + { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */ + {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */ + {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */ + {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */ + {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */ + {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */ + {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */ + {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */ + {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */ + {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */ + {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */ + {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */ + {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */ + {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */ + { 0, 0, 0, 0, 0 } /* last entry */ +}; + +static void sa1100_update_dram_timings(int current_speed, int new_speed) +{ + struct sa1100_dram_regs *settings = sa1100_dram_settings; + + /* find speed */ + while (settings->speed != 0) { + if (new_speed == settings->speed) + break; + + settings++; + } + + if (settings->speed == 0) { + panic("%s: couldn't find dram setting for speed %d\n", + __func__, new_speed); + } + + /* No risk, no fun: run with interrupts on! */ + if (new_speed > current_speed) { + /* We're going FASTER, so first relax the memory + * timings before changing the core frequency + */ + + /* Half the memory access clock */ + MDCNFG |= MDCNFG_CDB2; + + /* The order of these statements IS important, keep 8 + * pulses!! + */ + MDCAS2 = settings->mdcas2; + MDCAS1 = settings->mdcas1; + MDCAS0 = settings->mdcas0; + MDCNFG = settings->mdcnfg; + } else { + /* We're going SLOWER: first decrease the core + * frequency and then tighten the memory settings. + */ + + /* Half the memory access clock */ + MDCNFG |= MDCNFG_CDB2; + + /* The order of these statements IS important, keep 8 + * pulses!! + */ + MDCAS0 = settings->mdcas0; + MDCAS1 = settings->mdcas1; + MDCAS2 = settings->mdcas2; + MDCNFG = settings->mdcnfg; + } +} + +static int sa1100_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int cur = sa11x0_getspeed(0); + unsigned int new_ppcr; + struct cpufreq_freqs freqs; + + new_ppcr = sa11x0_freq_to_ppcr(target_freq); + switch (relation) { + case CPUFREQ_RELATION_L: + if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) + new_ppcr--; + break; + case CPUFREQ_RELATION_H: + if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && + (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) + new_ppcr--; + break; + } + + freqs.old = cur; + freqs.new = sa11x0_ppcr_to_freq(new_ppcr); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + if (freqs.new > cur) + sa1100_update_dram_timings(cur, freqs.new); + + PPCR = new_ppcr; + + if (freqs.new < cur) + sa1100_update_dram_timings(cur, freqs.new); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static int __init sa1100_cpu_init(struct cpufreq_policy *policy) +{ + if (policy->cpu != 0) + return -EINVAL; + policy->cur = policy->min = policy->max = sa11x0_getspeed(0); + policy->cpuinfo.min_freq = 59000; + policy->cpuinfo.max_freq = 287000; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + return 0; +} + +static struct cpufreq_driver sa1100_driver __refdata = { + .flags = CPUFREQ_STICKY, + .verify = sa11x0_verify_speed, + .target = sa1100_target, + .get = sa11x0_getspeed, + .init = sa1100_cpu_init, + .name = "sa1100", +}; + +static int __init sa1100_dram_init(void) +{ + if (cpu_is_sa1100()) + return cpufreq_register_driver(&sa1100_driver); + else + return -ENODEV; +} + +arch_initcall(sa1100_dram_init); diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c new file mode 100644 index 0000000..39c90b6 --- /dev/null +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -0,0 +1,406 @@ +/* + * linux/arch/arm/mach-sa1100/cpu-sa1110.c + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Note: there are two erratas that apply to the SA1110 here: + * 7 - SDRAM auto-power-up failure (rev A0) + * 13 - Corruption of internal register reads/writes following + * SDRAM reads (rev A0, B0, B1) + * + * We ignore rev. A0 and B0 devices; I don't think they're worth supporting. + * + * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#undef DEBUG + +struct sdram_params { + const char name[20]; + u_char rows; /* bits */ + u_char cas_latency; /* cycles */ + u_char tck; /* clock cycle time (ns) */ + u_char trcd; /* activate to r/w (ns) */ + u_char trp; /* precharge to activate (ns) */ + u_char twr; /* write recovery time (ns) */ + u_short refresh; /* refresh time for array (us) */ +}; + +struct sdram_info { + u_int mdcnfg; + u_int mdrefr; + u_int mdcas[3]; +}; + +static struct sdram_params sdram_tbl[] __initdata = { + { /* Toshiba TC59SM716 CL2 */ + .name = "TC59SM716-CL2", + .rows = 12, + .tck = 10, + .trcd = 20, + .trp = 20, + .twr = 10, + .refresh = 64000, + .cas_latency = 2, + }, { /* Toshiba TC59SM716 CL3 */ + .name = "TC59SM716-CL3", + .rows = 12, + .tck = 8, + .trcd = 20, + .trp = 20, + .twr = 8, + .refresh = 64000, + .cas_latency = 3, + }, { /* Samsung K4S641632D TC75 */ + .name = "K4S641632D", + .rows = 14, + .tck = 9, + .trcd = 27, + .trp = 20, + .twr = 9, + .refresh = 64000, + .cas_latency = 3, + }, { /* Samsung K4S281632B-1H */ + .name = "K4S281632B-1H", + .rows = 12, + .tck = 10, + .trp = 20, + .twr = 10, + .refresh = 64000, + .cas_latency = 3, + }, { /* Samsung KM416S4030CT */ + .name = "KM416S4030CT", + .rows = 13, + .tck = 8, + .trcd = 24, /* 3 CLKs */ + .trp = 24, /* 3 CLKs */ + .twr = 16, /* Trdl: 2 CLKs */ + .refresh = 64000, + .cas_latency = 3, + }, { /* Winbond W982516AH75L CL3 */ + .name = "W982516AH75L", + .rows = 16, + .tck = 8, + .trcd = 20, + .trp = 20, + .twr = 8, + .refresh = 64000, + .cas_latency = 3, + }, { /* Micron MT48LC8M16A2TG-75 */ + .name = "MT48LC8M16A2TG-75", + .rows = 12, + .tck = 8, + .trcd = 20, + .trp = 20, + .twr = 8, + .refresh = 64000, + .cas_latency = 3, + }, +}; + +static struct sdram_params sdram_params; + +/* + * Given a period in ns and frequency in khz, calculate the number of + * cycles of frequency in period. Note that we round up to the next + * cycle, even if we are only slightly over. + */ +static inline u_int ns_to_cycles(u_int ns, u_int khz) +{ + return (ns * khz + 999999) / 1000000; +} + +/* + * Create the MDCAS register bit pattern. + */ +static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) +{ + u_int shift; + + rcd = 2 * rcd - 1; + shift = delayed + 1 + rcd; + + mdcas[0] = (1 << rcd) - 1; + mdcas[0] |= 0x55555555 << shift; + mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1); +} + +static void +sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz, + struct sdram_params *sdram) +{ + u_int mem_khz, sd_khz, trp, twr; + + mem_khz = cpu_khz / 2; + sd_khz = mem_khz; + + /* + * If SDCLK would invalidate the SDRAM timings, + * run SDCLK at half speed. + * + * CPU steppings prior to B2 must either run the memory at + * half speed or use delayed read latching (errata 13). + */ + if ((ns_to_cycles(sdram->tck, sd_khz) > 1) || + (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000)) + sd_khz /= 2; + + sd->mdcnfg = MDCNFG & 0x007f007f; + + twr = ns_to_cycles(sdram->twr, mem_khz); + + /* trp should always be >1 */ + trp = ns_to_cycles(sdram->trp, mem_khz) - 1; + if (trp < 1) + trp = 1; + + sd->mdcnfg |= trp << 8; + sd->mdcnfg |= trp << 24; + sd->mdcnfg |= sdram->cas_latency << 12; + sd->mdcnfg |= sdram->cas_latency << 28; + sd->mdcnfg |= twr << 14; + sd->mdcnfg |= twr << 30; + + sd->mdrefr = MDREFR & 0xffbffff0; + sd->mdrefr |= 7; + + if (sd_khz != mem_khz) + sd->mdrefr |= MDREFR_K1DB2; + + /* initial number of '1's in MDCAS + 1 */ + set_mdcas(sd->mdcas, sd_khz >= 62000, + ns_to_cycles(sdram->trcd, mem_khz)); + +#ifdef DEBUG + printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n", + sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1], + sd->mdcas[2]); +#endif +} + +/* + * Set the SDRAM refresh rate. + */ +static inline void sdram_set_refresh(u_int dri) +{ + MDREFR = (MDREFR & 0xffff000f) | (dri << 4); + (void) MDREFR; +} + +/* + * Update the refresh period. We do this such that we always refresh + * the SDRAMs within their permissible period. The refresh period is + * always a multiple of the memory clock (fixed at cpu_clock / 2). + * + * FIXME: we don't currently take account of burst accesses here, + * but neither do Intels DM nor Angel. + */ +static void +sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) +{ + u_int ns_row = (sdram->refresh * 1000) >> sdram->rows; + u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32; + +#ifdef DEBUG + mdelay(250); + printk(KERN_DEBUG "new dri value = %d\n", dri); +#endif + + sdram_set_refresh(dri); +} + +/* + * Ok, set the CPU frequency. + */ +static int sa1110_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct sdram_params *sdram = &sdram_params; + struct cpufreq_freqs freqs; + struct sdram_info sd; + unsigned long flags; + unsigned int ppcr, unused; + + switch (relation) { + case CPUFREQ_RELATION_L: + ppcr = sa11x0_freq_to_ppcr(target_freq); + if (sa11x0_ppcr_to_freq(ppcr) > policy->max) + ppcr--; + break; + case CPUFREQ_RELATION_H: + ppcr = sa11x0_freq_to_ppcr(target_freq); + if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && + (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) + ppcr--; + break; + default: + return -EINVAL; + } + + freqs.old = sa11x0_getspeed(0); + freqs.new = sa11x0_ppcr_to_freq(ppcr); + + sdram_calculate_timing(&sd, freqs.new, sdram); + +#if 0 + /* + * These values are wrong according to the SA1110 documentation + * and errata, but they seem to work. Need to get a storage + * scope on to the SDRAM signals to work out why. + */ + if (policy->max < 147500) { + sd.mdrefr |= MDREFR_K1DB2; + sd.mdcas[0] = 0xaaaaaa7f; + } else { + sd.mdrefr &= ~MDREFR_K1DB2; + sd.mdcas[0] = 0xaaaaaa9f; + } + sd.mdcas[1] = 0xaaaaaaaa; + sd.mdcas[2] = 0xaaaaaaaa; +#endif + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + /* + * The clock could be going away for some time. Set the SDRAMs + * to refresh rapidly (every 64 memory clock cycles). To get + * through the whole array, we need to wait 262144 mclk cycles. + * We wait 20ms to be safe. + */ + sdram_set_refresh(2); + if (!irqs_disabled()) + msleep(20); + else + mdelay(20); + + /* + * Reprogram the DRAM timings with interrupts disabled, and + * ensure that we are doing this within a complete cache line. + * This means that we won't access SDRAM for the duration of + * the programming. + */ + local_irq_save(flags); + asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); + udelay(10); + __asm__ __volatile__("\n\ + b 2f \n\ + .align 5 \n\ +1: str %3, [%1, #0] @ MDCNFG \n\ + str %4, [%1, #28] @ MDREFR \n\ + str %5, [%1, #4] @ MDCAS0 \n\ + str %6, [%1, #8] @ MDCAS1 \n\ + str %7, [%1, #12] @ MDCAS2 \n\ + str %8, [%2, #0] @ PPCR \n\ + ldr %0, [%1, #0] \n\ + b 3f \n\ +2: b 1b \n\ +3: nop \n\ + nop" + : "=&r" (unused) + : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg), + "r" (sd.mdrefr), "r" (sd.mdcas[0]), + "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr)); + local_irq_restore(flags); + + /* + * Now, return the SDRAM refresh back to normal. + */ + sdram_update_refresh(freqs.new, sdram); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static int __init sa1110_cpu_init(struct cpufreq_policy *policy) +{ + if (policy->cpu != 0) + return -EINVAL; + policy->cur = policy->min = policy->max = sa11x0_getspeed(0); + policy->cpuinfo.min_freq = 59000; + policy->cpuinfo.max_freq = 287000; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + return 0; +} + +/* sa1110_driver needs __refdata because it must remain after init registers + * it with cpufreq_register_driver() */ +static struct cpufreq_driver sa1110_driver __refdata = { + .flags = CPUFREQ_STICKY, + .verify = sa11x0_verify_speed, + .target = sa1110_target, + .get = sa11x0_getspeed, + .init = sa1110_cpu_init, + .name = "sa1110", +}; + +static struct sdram_params *sa1110_find_sdram(const char *name) +{ + struct sdram_params *sdram; + + for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl); + sdram++) + if (strcmp(name, sdram->name) == 0) + return sdram; + + return NULL; +} + +static char sdram_name[16]; + +static int __init sa1110_clk_init(void) +{ + struct sdram_params *sdram; + const char *name = sdram_name; + + if (!cpu_is_sa1110()) + return -ENODEV; + + if (!name[0]) { + if (machine_is_assabet()) + name = "TC59SM716-CL3"; + if (machine_is_pt_system3()) + name = "K4S641632D"; + if (machine_is_h3100()) + name = "KM416S4030CT"; + if (machine_is_jornada720()) + name = "K4S281632B-1H"; + if (machine_is_nanoengine()) + name = "MT48LC8M16A2TG-75"; + } + + sdram = sa1110_find_sdram(name); + if (sdram) { + printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d" + " twr: %d refresh: %d cas_latency: %d\n", + sdram->tck, sdram->trcd, sdram->trp, + sdram->twr, sdram->refresh, sdram->cas_latency); + + memcpy(&sdram_params, sdram, sizeof(sdram_params)); + + return cpufreq_register_driver(&sa1110_driver); + } + + return 0; +} + +module_param_string(sdram, sdram_name, sizeof(sdram_name), 0); +arch_initcall(sa1110_clk_init); -- cgit v0.10.2 From 81c720c90e6fbda5a1f53f932035de899f27adb6 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:17 +0000 Subject: cpufreq: AVR32: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of AVR32 based at32ap platform to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Hans-Christian Egtvedt Signed-off-by: Rafael J. Wysocki diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index c1a868d..22c4030 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -250,20 +250,7 @@ config ARCH_SUSPEND_POSSIBLE def_bool y menu "CPU Frequency scaling" - source "drivers/cpufreq/Kconfig" - -config CPU_FREQ_AT32AP - bool "CPU frequency driver for AT32AP" - depends on CPU_FREQ && PLATFORM_AT32AP - default n - help - This enables the CPU frequency driver for AT32AP processors. - - For details, take a look in . - - If in doubt, say N. - endmenu endmenu diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index f4025db..d5aff36 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index c76a49b..4abcf43 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -28,7 +28,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index 2d8ab08..18f3fa0 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -27,7 +27,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig index b189e0c..06e389c 100644 --- a/arch/avr32/configs/atngw100_mrmt_defconfig +++ b/arch/avr32/configs/atngw100_mrmt_defconfig @@ -23,7 +23,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig index 2e4de42..2518a13 100644 --- a/arch/avr32/configs/atngw100mkii_defconfig +++ b/arch/avr32/configs/atngw100mkii_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig index fad3cd2..245ef6b 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig @@ -29,7 +29,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig index 2998623..fa6cbac 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig @@ -28,7 +28,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index a582465..bbd5131 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -25,7 +25,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index 57a79df..c1cd726 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 1a49bd8..754ae56 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index 206a1b6..58589d8 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index 0421498..57788a4 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -27,7 +27,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index 82f24eb..ba7c31e 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig @@ -31,7 +31,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig index 1bee51f..0a8bfdc 100644 --- a/arch/avr32/configs/mimc200_defconfig +++ b/arch/avr32/configs/mimc200_defconfig @@ -24,7 +24,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile index 514c9a9..fc09ec4 100644 --- a/arch/avr32/mach-at32ap/Makefile +++ b/arch/avr32/mach-at32ap/Makefile @@ -1,7 +1,6 @@ obj-y += pdc.o clock.o intc.o extint.o pio.o hsmc.o obj-y += hmatrix.o obj-$(CONFIG_CPU_AT32AP700X) += at32ap700x.o pm-at32ap700x.o -obj-$(CONFIG_CPU_FREQ_AT32AP) += cpufreq.o obj-$(CONFIG_PM) += pm.o ifeq ($(CONFIG_PM_DEBUG),y) diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/arch/avr32/mach-at32ap/cpufreq.c deleted file mode 100644 index 6544887..0000000 --- a/arch/avr32/mach-at32ap/cpufreq.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (C) 2004-2007 Atmel Corporation - * - * Based on MIPS implementation arch/mips/kernel/time.c - * Copyright 2001 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/*#define DEBUG*/ - -#include -#include -#include -#include -#include -#include -#include -#include - -static struct clk *cpuclk; - -static int at32_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - -static unsigned int at32_get_speed(unsigned int cpu) -{ - /* No SMP support */ - if (cpu) - return 0; - return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000); -} - -static unsigned int ref_freq; -static unsigned long loops_per_jiffy_ref; - -static int at32_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_freqs freqs; - long freq; - - /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); - - /* Check if policy->min <= new_freq <= policy->max */ - if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) - return -EINVAL; - - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = at32_get_speed(0); - freqs.new = (freq + 500) / 1000; - freqs.flags = 0; - - if (!ref_freq) { - ref_freq = freqs.old; - loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; - } - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - if (freqs.old < freqs.new) - boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - clk_set_rate(cpuclk, freq); - if (freqs.new < freqs.old) - boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %lu Hz\n", freq); - - return 0; -} - -static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - cpuclk = clk_get(NULL, "cpu"); - if (IS_ERR(cpuclk)) { - pr_debug("cpufreq: could not get CPU clk\n"); - return PTR_ERR(cpuclk); - } - - policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - policy->cpuinfo.transition_latency = 0; - policy->cur = at32_get_speed(0); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - - printk("cpufreq: AT32AP CPU frequency driver\n"); - - return 0; -} - -static struct cpufreq_driver at32_driver = { - .name = "at32ap", - .owner = THIS_MODULE, - .init = at32_cpufreq_driver_init, - .verify = at32_verify_speed, - .target = at32_set_target, - .get = at32_get_speed, - .flags = CPUFREQ_STICKY, -}; - -static int __init at32_cpufreq_init(void) -{ - return cpufreq_register_driver(&at32_driver); -} -late_initcall(at32_cpufreq_init); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index cbcb21e..d7ce6cc 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -205,6 +205,19 @@ depends on ARM source "drivers/cpufreq/Kconfig.arm" endmenu +menu "AVR32 CPU frequency scaling drivers" +depends on AVR32 + +config AVR32_AT32AP_CPUFREQ + bool "CPU frequency driver for AT32AP" + depends on PLATFORM_AT32AP + default n + help + This enables the CPU frequency driver for AT32AP processors. + If in doubt, say N. + +endmenu + menu "PowerPC CPU frequency scaling drivers" depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 8b21016..2701aca 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -74,3 +74,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o ################################################################################## # PowerPC platform drivers obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o + +################################################################################## +# Other platform drivers +obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c new file mode 100644 index 0000000..6544887 --- /dev/null +++ b/drivers/cpufreq/at32ap-cpufreq.c @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2004-2007 Atmel Corporation + * + * Based on MIPS implementation arch/mips/kernel/time.c + * Copyright 2001 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct clk *cpuclk; + +static int at32_verify_speed(struct cpufreq_policy *policy) +{ + if (policy->cpu != 0) + return -EINVAL; + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static unsigned int at32_get_speed(unsigned int cpu) +{ + /* No SMP support */ + if (cpu) + return 0; + return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000); +} + +static unsigned int ref_freq; +static unsigned long loops_per_jiffy_ref; + +static int at32_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_freqs freqs; + long freq; + + /* Convert target_freq from kHz to Hz */ + freq = clk_round_rate(cpuclk, target_freq * 1000); + + /* Check if policy->min <= new_freq <= policy->max */ + if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) + return -EINVAL; + + pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); + + freqs.old = at32_get_speed(0); + freqs.new = (freq + 500) / 1000; + freqs.flags = 0; + + if (!ref_freq) { + ref_freq = freqs.old; + loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; + } + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + if (freqs.old < freqs.new) + boot_cpu_data.loops_per_jiffy = cpufreq_scale( + loops_per_jiffy_ref, ref_freq, freqs.new); + clk_set_rate(cpuclk, freq); + if (freqs.new < freqs.old) + boot_cpu_data.loops_per_jiffy = cpufreq_scale( + loops_per_jiffy_ref, ref_freq, freqs.new); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + pr_debug("cpufreq: set frequency %lu Hz\n", freq); + + return 0; +} + +static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) +{ + if (policy->cpu != 0) + return -EINVAL; + + cpuclk = clk_get(NULL, "cpu"); + if (IS_ERR(cpuclk)) { + pr_debug("cpufreq: could not get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; + policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + policy->cpuinfo.transition_latency = 0; + policy->cur = at32_get_speed(0); + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + + printk("cpufreq: AT32AP CPU frequency driver\n"); + + return 0; +} + +static struct cpufreq_driver at32_driver = { + .name = "at32ap", + .owner = THIS_MODULE, + .init = at32_cpufreq_driver_init, + .verify = at32_verify_speed, + .target = at32_set_target, + .get = at32_get_speed, + .flags = CPUFREQ_STICKY, +}; + +static int __init at32_cpufreq_init(void) +{ + return cpufreq_register_driver(&at32_driver); +} +late_initcall(at32_cpufreq_init); -- cgit v0.10.2 From d5cc9901ca05a816bd05b69699a1ba5358871ad5 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:18 +0000 Subject: cpufreq: blackfin: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of BLACKFIN architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Steven Miao Signed-off-by: Rafael J. Wysocki diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile index 75f0ba2..675466d 100644 --- a/arch/blackfin/mach-common/Makefile +++ b/arch/blackfin/mach-common/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_PM) += pm.o ifneq ($(CONFIG_BF60x),y) obj-$(CONFIG_PM) += dpmc_modes.o endif -obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c deleted file mode 100644 index 995511e80..0000000 --- a/arch/blackfin/mach-common/cpufreq.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Blackfin core clock scaling - * - * Copyright 2008-2011 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/* this is the table of CCLK frequencies, in Hz */ -/* .index is the entry in the auxiliary dpm_state_table[] */ -static struct cpufreq_frequency_table bfin_freq_table[] = { - { - .frequency = CPUFREQ_TABLE_END, - .index = 0, - }, - { - .frequency = CPUFREQ_TABLE_END, - .index = 1, - }, - { - .frequency = CPUFREQ_TABLE_END, - .index = 2, - }, - { - .frequency = CPUFREQ_TABLE_END, - .index = 0, - }, -}; - -static struct bfin_dpm_state { - unsigned int csel; /* system clock divider */ - unsigned int tscale; /* change the divider on the core timer interrupt */ -} dpm_state_table[3]; - -#if defined(CONFIG_CYCLES_CLOCKSOURCE) -/* - * normalized to maximum frequency offset for CYCLES, - * used in time-ts cycles clock source, but could be used - * somewhere also. - */ -unsigned long long __bfin_cycles_off; -unsigned int __bfin_cycles_mod; -#endif - -/**************************************************************************/ -static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk) -{ - - unsigned long csel, min_cclk; - int index; - - /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */ -#if ANOMALY_05000273 || ANOMALY_05000274 || \ - (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \ - && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) - min_cclk = sclk * 2; -#else - min_cclk = sclk; -#endif - -#ifndef CONFIG_BF60x - csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); -#else - csel = bfin_read32(CGU0_DIV) & 0x1F; -#endif - - for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) { - bfin_freq_table[index].frequency = cclk >> index; -#ifndef CONFIG_BF60x - dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ -#else - dpm_state_table[index].csel = csel; -#endif - dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1; - - pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", - bfin_freq_table[index].frequency, - dpm_state_table[index].csel, - dpm_state_table[index].tscale); - } - return; -} - -static void bfin_adjust_core_timer(void *info) -{ - unsigned int tscale; - unsigned int index = *(unsigned int *)info; - - /* we have to adjust the core timer, because it is using cclk */ - tscale = dpm_state_table[index].tscale; - bfin_write_TSCALE(tscale); - return; -} - -static unsigned int bfin_getfreq_khz(unsigned int cpu) -{ - /* Both CoreA/B have the same core clock */ - return get_cclk() / 1000; -} - -#ifdef CONFIG_BF60x -unsigned long cpu_set_cclk(int cpu, unsigned long new) -{ - struct clk *clk; - int ret; - - clk = clk_get(NULL, "CCLK"); - if (IS_ERR(clk)) - return -ENODEV; - - ret = clk_set_rate(clk, new); - clk_put(clk); - return ret; -} -#endif - -static int bfin_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ -#ifndef CONFIG_BF60x - unsigned int plldiv; -#endif - unsigned int index; - unsigned long cclk_hz; - struct cpufreq_freqs freqs; - static unsigned long lpj_ref; - static unsigned int lpj_ref_freq; - int ret = 0; - -#if defined(CONFIG_CYCLES_CLOCKSOURCE) - cycles_t cycles; -#endif - - if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, - relation, &index)) - return -EINVAL; - - cclk_hz = bfin_freq_table[index].frequency; - - freqs.old = bfin_getfreq_khz(0); - freqs.new = cclk_hz; - - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", - cclk_hz, target_freq, freqs.old); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); -#ifndef CONFIG_BF60x - plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; - bfin_write_PLL_DIV(plldiv); -#else - ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); - if (ret != 0) { - WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); - return ret; - } -#endif - on_each_cpu(bfin_adjust_core_timer, &index, 1); -#if defined(CONFIG_CYCLES_CLOCKSOURCE) - cycles = get_cycles(); - SSYNC(); - cycles += 10; /* ~10 cycles we lose after get_cycles() */ - __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); - __bfin_cycles_mod = index; -#endif - if (!lpj_ref_freq) { - lpj_ref = loops_per_jiffy; - lpj_ref_freq = freqs.old; - } - if (freqs.new != freqs.old) { - loops_per_jiffy = cpufreq_scale(lpj_ref, - lpj_ref_freq, freqs.new); - } - - /* TODO: just test case for cycles clock source, remove later */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: done\n"); - return ret; -} - -static int bfin_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, bfin_freq_table); -} - -static int __bfin_cpu_init(struct cpufreq_policy *policy) -{ - - unsigned long cclk, sclk; - - cclk = get_cclk() / 1000; - sclk = get_sclk() / 1000; - - if (policy->cpu == CPUFREQ_CPU) - bfin_init_tables(cclk, sclk); - - policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ - - policy->cur = cclk; - cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); - return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); -} - -static struct freq_attr *bfin_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver bfin_driver = { - .verify = bfin_verify_speed, - .target = bfin_target, - .get = bfin_getfreq_khz, - .init = __bfin_cpu_init, - .name = "bfin cpufreq", - .owner = THIS_MODULE, - .attr = bfin_freq_attr, -}; - -static int __init bfin_cpu_init(void) -{ - return cpufreq_register_driver(&bfin_driver); -} - -static void __exit bfin_cpu_exit(void) -{ - cpufreq_unregister_driver(&bfin_driver); -} - -MODULE_AUTHOR("Michael Hennerich "); -MODULE_DESCRIPTION("cpufreq driver for Blackfin"); -MODULE_LICENSE("GPL"); - -module_init(bfin_cpu_init); -module_exit(bfin_cpu_exit); diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 2701aca..6f8fec3 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -78,3 +78,4 @@ obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o ################################################################################## # Other platform drivers obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o +obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c new file mode 100644 index 0000000..995511e80 --- /dev/null +++ b/drivers/cpufreq/blackfin-cpufreq.c @@ -0,0 +1,247 @@ +/* + * Blackfin core clock scaling + * + * Copyright 2008-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* this is the table of CCLK frequencies, in Hz */ +/* .index is the entry in the auxiliary dpm_state_table[] */ +static struct cpufreq_frequency_table bfin_freq_table[] = { + { + .frequency = CPUFREQ_TABLE_END, + .index = 0, + }, + { + .frequency = CPUFREQ_TABLE_END, + .index = 1, + }, + { + .frequency = CPUFREQ_TABLE_END, + .index = 2, + }, + { + .frequency = CPUFREQ_TABLE_END, + .index = 0, + }, +}; + +static struct bfin_dpm_state { + unsigned int csel; /* system clock divider */ + unsigned int tscale; /* change the divider on the core timer interrupt */ +} dpm_state_table[3]; + +#if defined(CONFIG_CYCLES_CLOCKSOURCE) +/* + * normalized to maximum frequency offset for CYCLES, + * used in time-ts cycles clock source, but could be used + * somewhere also. + */ +unsigned long long __bfin_cycles_off; +unsigned int __bfin_cycles_mod; +#endif + +/**************************************************************************/ +static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk) +{ + + unsigned long csel, min_cclk; + int index; + + /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */ +#if ANOMALY_05000273 || ANOMALY_05000274 || \ + (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \ + && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) + min_cclk = sclk * 2; +#else + min_cclk = sclk; +#endif + +#ifndef CONFIG_BF60x + csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); +#else + csel = bfin_read32(CGU0_DIV) & 0x1F; +#endif + + for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) { + bfin_freq_table[index].frequency = cclk >> index; +#ifndef CONFIG_BF60x + dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ +#else + dpm_state_table[index].csel = csel; +#endif + dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1; + + pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", + bfin_freq_table[index].frequency, + dpm_state_table[index].csel, + dpm_state_table[index].tscale); + } + return; +} + +static void bfin_adjust_core_timer(void *info) +{ + unsigned int tscale; + unsigned int index = *(unsigned int *)info; + + /* we have to adjust the core timer, because it is using cclk */ + tscale = dpm_state_table[index].tscale; + bfin_write_TSCALE(tscale); + return; +} + +static unsigned int bfin_getfreq_khz(unsigned int cpu) +{ + /* Both CoreA/B have the same core clock */ + return get_cclk() / 1000; +} + +#ifdef CONFIG_BF60x +unsigned long cpu_set_cclk(int cpu, unsigned long new) +{ + struct clk *clk; + int ret; + + clk = clk_get(NULL, "CCLK"); + if (IS_ERR(clk)) + return -ENODEV; + + ret = clk_set_rate(clk, new); + clk_put(clk); + return ret; +} +#endif + +static int bfin_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ +#ifndef CONFIG_BF60x + unsigned int plldiv; +#endif + unsigned int index; + unsigned long cclk_hz; + struct cpufreq_freqs freqs; + static unsigned long lpj_ref; + static unsigned int lpj_ref_freq; + int ret = 0; + +#if defined(CONFIG_CYCLES_CLOCKSOURCE) + cycles_t cycles; +#endif + + if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, + relation, &index)) + return -EINVAL; + + cclk_hz = bfin_freq_table[index].frequency; + + freqs.old = bfin_getfreq_khz(0); + freqs.new = cclk_hz; + + pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", + cclk_hz, target_freq, freqs.old); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); +#ifndef CONFIG_BF60x + plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; + bfin_write_PLL_DIV(plldiv); +#else + ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); + if (ret != 0) { + WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); + return ret; + } +#endif + on_each_cpu(bfin_adjust_core_timer, &index, 1); +#if defined(CONFIG_CYCLES_CLOCKSOURCE) + cycles = get_cycles(); + SSYNC(); + cycles += 10; /* ~10 cycles we lose after get_cycles() */ + __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); + __bfin_cycles_mod = index; +#endif + if (!lpj_ref_freq) { + lpj_ref = loops_per_jiffy; + lpj_ref_freq = freqs.old; + } + if (freqs.new != freqs.old) { + loops_per_jiffy = cpufreq_scale(lpj_ref, + lpj_ref_freq, freqs.new); + } + + /* TODO: just test case for cycles clock source, remove later */ + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + pr_debug("cpufreq: done\n"); + return ret; +} + +static int bfin_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, bfin_freq_table); +} + +static int __bfin_cpu_init(struct cpufreq_policy *policy) +{ + + unsigned long cclk, sclk; + + cclk = get_cclk() / 1000; + sclk = get_sclk() / 1000; + + if (policy->cpu == CPUFREQ_CPU) + bfin_init_tables(cclk, sclk); + + policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ + + policy->cur = cclk; + cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); + return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); +} + +static struct freq_attr *bfin_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver bfin_driver = { + .verify = bfin_verify_speed, + .target = bfin_target, + .get = bfin_getfreq_khz, + .init = __bfin_cpu_init, + .name = "bfin cpufreq", + .owner = THIS_MODULE, + .attr = bfin_freq_attr, +}; + +static int __init bfin_cpu_init(void) +{ + return cpufreq_register_driver(&bfin_driver); +} + +static void __exit bfin_cpu_exit(void) +{ + cpufreq_unregister_driver(&bfin_driver); +} + +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("cpufreq driver for Blackfin"); +MODULE_LICENSE("GPL"); + +module_init(bfin_cpu_init); +module_exit(bfin_cpu_exit); -- cgit v0.10.2 From 8e8aa95a2b0b2b0184b3b72b324a6145362720bd Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:19 +0000 Subject: cpufreq: cris: move cpufreq driver to drivers/cpufreq This patch moves cpufreq drivers of CRIS architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Jesper Nilsson Signed-off-by: Rafael J. Wysocki diff --git a/arch/cris/arch-v32/mach-a3/Makefile b/arch/cris/arch-v32/mach-a3/Makefile index d366e08..18a2271 100644 --- a/arch/cris/arch-v32/mach-a3/Makefile +++ b/arch/cris/arch-v32/mach-a3/Makefile @@ -3,7 +3,6 @@ # obj-y := dma.o pinmux.o io.o arbiter.o -obj-$(CONFIG_CPU_FREQ) += cpufreq.o clean: diff --git a/arch/cris/arch-v32/mach-a3/cpufreq.c b/arch/cris/arch-v32/mach-a3/cpufreq.c deleted file mode 100644 index ee142c4..0000000 --- a/arch/cris/arch-v32/mach-a3/cpufreq.c +++ /dev/null @@ -1,146 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data); - -static struct notifier_block cris_sdram_freq_notifier_block = { - .notifier_call = cris_sdram_freq_notifier -}; - -static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, -}; - -static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) -{ - reg_clkgen_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); - return clk_ctrl.pll ? 200000 : 6000; -} - -static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) -{ - struct cpufreq_freqs freqs; - reg_clkgen_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); - - freqs.old = cris_freq_get_cpu_frequency(policy->cpu); - freqs.new = cris_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - local_irq_disable(); - - /* Even though we may be SMP they will share the same clock - * so all settings are made on CPU0. */ - if (cris_freq_table[state].frequency == 200000) - clk_ctrl.pll = 1; - else - clk_ctrl.pll = 0; - REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); - - local_irq_enable(); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, cris_freq_table, - target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(policy, newstate); - - return 0; -} - -static int cris_freq_cpu_init(struct cpufreq_policy *policy) -{ - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; -} - - -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver cris_freq_driver = { - .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, - .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, - .name = "cris_freq", - .owner = THIS_MODULE, - .attr = cris_freq_attr, -}; - -static int __init cris_freq_init(void) -{ - int ret; - ret = cpufreq_register_driver(&cris_freq_driver); - cpufreq_register_notifier(&cris_sdram_freq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - return ret; -} - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - int i; - struct cpufreq_freqs *freqs = data; - if (val == CPUFREQ_PRECHANGE) { - reg_ddr2_rw_cfg cfg = - REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg); - cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46); - - if (freqs->new == 200000) - for (i = 0; i < 50000; i++); - REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); - } - return 0; -} - - -module_init(cris_freq_init); diff --git a/arch/cris/arch-v32/mach-fs/Makefile b/arch/cris/arch-v32/mach-fs/Makefile index d366e08..18a2271 100644 --- a/arch/cris/arch-v32/mach-fs/Makefile +++ b/arch/cris/arch-v32/mach-fs/Makefile @@ -3,7 +3,6 @@ # obj-y := dma.o pinmux.o io.o arbiter.o -obj-$(CONFIG_CPU_FREQ) += cpufreq.o clean: diff --git a/arch/cris/arch-v32/mach-fs/cpufreq.c b/arch/cris/arch-v32/mach-fs/cpufreq.c deleted file mode 100644 index 1295223..0000000 --- a/arch/cris/arch-v32/mach-fs/cpufreq.c +++ /dev/null @@ -1,142 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data); - -static struct notifier_block cris_sdram_freq_notifier_block = { - .notifier_call = cris_sdram_freq_notifier -}; - -static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, -}; - -static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) -{ - reg_config_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - return clk_ctrl.pll ? 200000 : 6000; -} - -static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, - unsigned int state) -{ - struct cpufreq_freqs freqs; - reg_config_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - - freqs.old = cris_freq_get_cpu_frequency(policy->cpu); - freqs.new = cris_freq_table[state].frequency; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - local_irq_disable(); - - /* Even though we may be SMP they will share the same clock - * so all settings are made on CPU0. */ - if (cris_freq_table[state].frequency == 200000) - clk_ctrl.pll = 1; - else - clk_ctrl.pll = 0; - REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl); - - local_irq_enable(); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target - (policy, cris_freq_table, target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(policy, newstate); - - return 0; -} - -static int cris_freq_cpu_init(struct cpufreq_policy *policy) -{ - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; -} - -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver cris_freq_driver = { - .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, - .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, - .name = "cris_freq", - .owner = THIS_MODULE, - .attr = cris_freq_attr, -}; - -static int __init cris_freq_init(void) -{ - int ret; - ret = cpufreq_register_driver(&cris_freq_driver); - cpufreq_register_notifier(&cris_sdram_freq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - return ret; -} - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - int i; - struct cpufreq_freqs *freqs = data; - if (val == CPUFREQ_PRECHANGE) { - reg_bif_core_rw_sdram_timing timing = - REG_RD(bif_core, regi_bif_core, rw_sdram_timing); - timing.cpd = (freqs->new == 200000 ? 0 : 1); - - if (freqs->new == 200000) - for (i = 0; i < 50000; i++) ; - REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); - } - return 0; -} - -module_init(cris_freq_init); diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 6f8fec3..2d6dbf3 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -79,3 +79,5 @@ obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o # Other platform drivers obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o +obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o +obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c new file mode 100644 index 0000000..ee142c4 --- /dev/null +++ b/drivers/cpufreq/cris-artpec3-cpufreq.c @@ -0,0 +1,146 @@ +#include +#include +#include +#include +#include +#include +#include + +static int +cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, + void *data); + +static struct notifier_block cris_sdram_freq_notifier_block = { + .notifier_call = cris_sdram_freq_notifier +}; + +static struct cpufreq_frequency_table cris_freq_table[] = { + {0x01, 6000}, + {0x02, 200000}, + {0, CPUFREQ_TABLE_END}, +}; + +static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) +{ + reg_clkgen_rw_clk_ctrl clk_ctrl; + clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); + return clk_ctrl.pll ? 200000 : 6000; +} + +static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int state) +{ + struct cpufreq_freqs freqs; + reg_clkgen_rw_clk_ctrl clk_ctrl; + clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); + + freqs.old = cris_freq_get_cpu_frequency(policy->cpu); + freqs.new = cris_freq_table[state].frequency; + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + local_irq_disable(); + + /* Even though we may be SMP they will share the same clock + * so all settings are made on CPU0. */ + if (cris_freq_table[state].frequency == 200000) + clk_ctrl.pll = 1; + else + clk_ctrl.pll = 0; + REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); + + local_irq_enable(); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); +}; + +static int cris_freq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); +} + +static int cris_freq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, cris_freq_table, + target_freq, relation, &newstate)) + return -EINVAL; + + cris_freq_set_cpu_state(policy, newstate); + + return 0; +} + +static int cris_freq_cpu_init(struct cpufreq_policy *policy) +{ + int result; + + /* cpuinfo and default policy values */ + policy->cpuinfo.transition_latency = 1000000; /* 1ms */ + policy->cur = cris_freq_get_cpu_frequency(0); + + result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); + if (result) + return (result); + + cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); + + return 0; +} + + +static int cris_freq_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + + +static struct freq_attr *cris_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver cris_freq_driver = { + .get = cris_freq_get_cpu_frequency, + .verify = cris_freq_verify, + .target = cris_freq_target, + .init = cris_freq_cpu_init, + .exit = cris_freq_cpu_exit, + .name = "cris_freq", + .owner = THIS_MODULE, + .attr = cris_freq_attr, +}; + +static int __init cris_freq_init(void) +{ + int ret; + ret = cpufreq_register_driver(&cris_freq_driver); + cpufreq_register_notifier(&cris_sdram_freq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + return ret; +} + +static int +cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + int i; + struct cpufreq_freqs *freqs = data; + if (val == CPUFREQ_PRECHANGE) { + reg_ddr2_rw_cfg cfg = + REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg); + cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46); + + if (freqs->new == 200000) + for (i = 0; i < 50000; i++); + REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); + } + return 0; +} + + +module_init(cris_freq_init); diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c new file mode 100644 index 0000000..1295223 --- /dev/null +++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c @@ -0,0 +1,142 @@ +#include +#include +#include +#include +#include +#include +#include + +static int +cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, + void *data); + +static struct notifier_block cris_sdram_freq_notifier_block = { + .notifier_call = cris_sdram_freq_notifier +}; + +static struct cpufreq_frequency_table cris_freq_table[] = { + {0x01, 6000}, + {0x02, 200000}, + {0, CPUFREQ_TABLE_END}, +}; + +static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) +{ + reg_config_rw_clk_ctrl clk_ctrl; + clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); + return clk_ctrl.pll ? 200000 : 6000; +} + +static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, + unsigned int state) +{ + struct cpufreq_freqs freqs; + reg_config_rw_clk_ctrl clk_ctrl; + clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); + + freqs.old = cris_freq_get_cpu_frequency(policy->cpu); + freqs.new = cris_freq_table[state].frequency; + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + local_irq_disable(); + + /* Even though we may be SMP they will share the same clock + * so all settings are made on CPU0. */ + if (cris_freq_table[state].frequency == 200000) + clk_ctrl.pll = 1; + else + clk_ctrl.pll = 0; + REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl); + + local_irq_enable(); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); +}; + +static int cris_freq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); +} + +static int cris_freq_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target + (policy, cris_freq_table, target_freq, relation, &newstate)) + return -EINVAL; + + cris_freq_set_cpu_state(policy, newstate); + + return 0; +} + +static int cris_freq_cpu_init(struct cpufreq_policy *policy) +{ + int result; + + /* cpuinfo and default policy values */ + policy->cpuinfo.transition_latency = 1000000; /* 1ms */ + policy->cur = cris_freq_get_cpu_frequency(0); + + result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); + if (result) + return (result); + + cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); + + return 0; +} + +static int cris_freq_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static struct freq_attr *cris_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver cris_freq_driver = { + .get = cris_freq_get_cpu_frequency, + .verify = cris_freq_verify, + .target = cris_freq_target, + .init = cris_freq_cpu_init, + .exit = cris_freq_cpu_exit, + .name = "cris_freq", + .owner = THIS_MODULE, + .attr = cris_freq_attr, +}; + +static int __init cris_freq_init(void) +{ + int ret; + ret = cpufreq_register_driver(&cris_freq_driver); + cpufreq_register_notifier(&cris_sdram_freq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + return ret; +} + +static int +cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + int i; + struct cpufreq_freqs *freqs = data; + if (val == CPUFREQ_PRECHANGE) { + reg_bif_core_rw_sdram_timing timing = + REG_RD(bif_core, regi_bif_core, rw_sdram_timing); + timing.cpd = (freqs->new == 200000 ? 0 : 1); + + if (freqs->new == 200000) + for (i = 0; i < 50000; i++) ; + REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); + } + return 0; +} + +module_init(cris_freq_init); -- cgit v0.10.2 From ab423e435f1eafdb9a071fe8a9942b2522d09d2d Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:20 +0000 Subject: cpufreq: ia64: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of IA64 architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Tony Luck Signed-off-by: Rafael J. Wysocki diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9a02f71..152b5f2 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -591,9 +591,9 @@ source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" if PM - -source "arch/ia64/kernel/cpufreq/Kconfig" - +menu "CPU Frequency scaling" +source "drivers/cpufreq/Kconfig" +endmenu endif endmenu diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index d959c84..20678a9 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o -obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig deleted file mode 100644 index 2d9d527..0000000 --- a/arch/ia64/kernel/cpufreq/Kconfig +++ /dev/null @@ -1,29 +0,0 @@ - -# -# CPU Frequency scaling -# - -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - -config IA64_ACPI_CPUFREQ - tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE - depends on ACPI_PROCESSOR - help - This driver adds a CPUFreq driver which utilizes the ACPI - Processor Performance States. - - For details, take a look at . - - If in doubt, say N. - -endif # CPU_FREQ - -endmenu - diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile deleted file mode 100644 index 4838f2a..0000000 --- a/arch/ia64/kernel/cpufreq/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o - diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c deleted file mode 100644 index 4700fef..0000000 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ /dev/null @@ -1,439 +0,0 @@ -/* - * arch/ia64/kernel/cpufreq/acpi-cpufreq.c - * This file provides the ACPI based P-state support. This - * module works with generic cpufreq infrastructure. Most of - * the code is based on i386 version - * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) - * - * Copyright (C) 2005 Intel Corp - * Venkatesh Pallipadi - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -MODULE_AUTHOR("Venkatesh Pallipadi"); -MODULE_DESCRIPTION("ACPI Processor P-States Driver"); -MODULE_LICENSE("GPL"); - - -struct cpufreq_acpi_io { - struct acpi_processor_performance acpi_data; - struct cpufreq_frequency_table *freq_table; - unsigned int resume; -}; - -static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; - -static struct cpufreq_driver acpi_cpufreq_driver; - - -static int -processor_set_pstate ( - u32 value) -{ - s64 retval; - - pr_debug("processor_set_pstate\n"); - - retval = ia64_pal_set_pstate((u64)value); - - if (retval) { - pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", - value, retval); - return -ENODEV; - } - return (int)retval; -} - - -static int -processor_get_pstate ( - u32 *value) -{ - u64 pstate_index = 0; - s64 retval; - - pr_debug("processor_get_pstate\n"); - - retval = ia64_pal_get_pstate(&pstate_index, - PAL_GET_PSTATE_TYPE_INSTANT); - *value = (u32) pstate_index; - - if (retval) - pr_debug("Failed to get current freq with " - "error 0x%lx, idx 0x%x\n", retval, *value); - - return (int)retval; -} - - -/* To be used only after data->acpi_data is initialized */ -static unsigned -extract_clock ( - struct cpufreq_acpi_io *data, - unsigned value, - unsigned int cpu) -{ - unsigned long i; - - pr_debug("extract_clock\n"); - - for (i = 0; i < data->acpi_data.state_count; i++) { - if (value == data->acpi_data.states[i].status) - return data->acpi_data.states[i].core_frequency; - } - return data->acpi_data.states[i-1].core_frequency; -} - - -static unsigned int -processor_get_freq ( - struct cpufreq_acpi_io *data, - unsigned int cpu) -{ - int ret = 0; - u32 value = 0; - cpumask_t saved_mask; - unsigned long clock_freq; - - pr_debug("processor_get_freq\n"); - - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (smp_processor_id() != cpu) - goto migrate_end; - - /* processor_get_pstate gets the instantaneous frequency */ - ret = processor_get_pstate(&value); - - if (ret) { - set_cpus_allowed_ptr(current, &saved_mask); - printk(KERN_WARNING "get performance failed with error %d\n", - ret); - ret = 0; - goto migrate_end; - } - clock_freq = extract_clock(data, value, cpu); - ret = (clock_freq*1000); - -migrate_end: - set_cpus_allowed_ptr(current, &saved_mask); - return ret; -} - - -static int -processor_set_freq ( - struct cpufreq_acpi_io *data, - struct cpufreq_policy *policy, - int state) -{ - int ret = 0; - u32 value = 0; - struct cpufreq_freqs cpufreq_freqs; - cpumask_t saved_mask; - int retval; - - pr_debug("processor_set_freq\n"); - - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(policy->cpu)); - if (smp_processor_id() != policy->cpu) { - retval = -EAGAIN; - goto migrate_end; - } - - if (state == data->acpi_data.state) { - if (unlikely(data->resume)) { - pr_debug("Called after resume, resetting to P%d\n", state); - data->resume = 0; - } else { - pr_debug("Already at target state (P%d)\n", state); - retval = 0; - goto migrate_end; - } - } - - pr_debug("Transitioning from P%d to P%d\n", - data->acpi_data.state, state); - - /* cpufreq frequency struct */ - cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; - cpufreq_freqs.new = data->freq_table[state].frequency; - - /* notify cpufreq */ - cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); - - /* - * First we write the target state's 'control' value to the - * control_register. - */ - - value = (u32) data->acpi_data.states[state].control; - - pr_debug("Transitioning to state: 0x%08x\n", value); - - ret = processor_set_pstate(value); - if (ret) { - unsigned int tmp = cpufreq_freqs.new; - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_POSTCHANGE); - cpufreq_freqs.new = cpufreq_freqs.old; - cpufreq_freqs.old = tmp; - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &cpufreq_freqs, - CPUFREQ_POSTCHANGE); - printk(KERN_WARNING "Transition failed with error %d\n", ret); - retval = -ENODEV; - goto migrate_end; - } - - cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); - - data->acpi_data.state = state; - - retval = 0; - -migrate_end: - set_cpus_allowed_ptr(current, &saved_mask); - return (retval); -} - - -static unsigned int -acpi_cpufreq_get ( - unsigned int cpu) -{ - struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - - pr_debug("acpi_cpufreq_get\n"); - - return processor_get_freq(data, cpu); -} - - -static int -acpi_cpufreq_target ( - struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - unsigned int next_state = 0; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_setpolicy\n"); - - result = cpufreq_frequency_table_target(policy, - data->freq_table, target_freq, relation, &next_state); - if (result) - return (result); - - result = processor_set_freq(data, policy, next_state); - - return (result); -} - - -static int -acpi_cpufreq_verify ( - struct cpufreq_policy *policy) -{ - unsigned int result = 0; - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_verify\n"); - - result = cpufreq_frequency_table_verify(policy, - data->freq_table); - - return (result); -} - - -static int -acpi_cpufreq_cpu_init ( - struct cpufreq_policy *policy) -{ - unsigned int i; - unsigned int cpu = policy->cpu; - struct cpufreq_acpi_io *data; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_cpu_init\n"); - - data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); - if (!data) - return (-ENOMEM); - - acpi_io_data[cpu] = data; - - result = acpi_processor_register_performance(&data->acpi_data, cpu); - - if (result) - goto err_free; - - /* capability check */ - if (data->acpi_data.state_count <= 1) { - pr_debug("No P-States\n"); - result = -ENODEV; - goto err_unreg; - } - - if ((data->acpi_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE)) { - pr_debug("Unsupported address space [%d, %d]\n", - (u32) (data->acpi_data.control_register.space_id), - (u32) (data->acpi_data.status_register.space_id)); - result = -ENODEV; - goto err_unreg; - } - - /* alloc freq_table */ - data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * - (data->acpi_data.state_count + 1), - GFP_KERNEL); - if (!data->freq_table) { - result = -ENOMEM; - goto err_unreg; - } - - /* detect transition latency */ - policy->cpuinfo.transition_latency = 0; - for (i=0; iacpi_data.state_count; i++) { - if ((data->acpi_data.states[i].transition_latency * 1000) > - policy->cpuinfo.transition_latency) { - policy->cpuinfo.transition_latency = - data->acpi_data.states[i].transition_latency * 1000; - } - } - policy->cur = processor_get_freq(data, policy->cpu); - - /* table init */ - for (i = 0; i <= data->acpi_data.state_count; i++) - { - data->freq_table[i].index = i; - if (i < data->acpi_data.state_count) { - data->freq_table[i].frequency = - data->acpi_data.states[i].core_frequency * 1000; - } else { - data->freq_table[i].frequency = CPUFREQ_TABLE_END; - } - } - - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); - if (result) { - goto err_freqfree; - } - - /* notify BIOS that we exist */ - acpi_processor_notify_smm(THIS_MODULE); - - printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " - "activated.\n", cpu); - - for (i = 0; i < data->acpi_data.state_count; i++) - pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", - (i == data->acpi_data.state?'*':' '), i, - (u32) data->acpi_data.states[i].core_frequency, - (u32) data->acpi_data.states[i].power, - (u32) data->acpi_data.states[i].transition_latency, - (u32) data->acpi_data.states[i].bus_master_latency, - (u32) data->acpi_data.states[i].status, - (u32) data->acpi_data.states[i].control); - - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - - /* the first call to ->target() should result in us actually - * writing something to the appropriate registers. */ - data->resume = 1; - - return (result); - - err_freqfree: - kfree(data->freq_table); - err_unreg: - acpi_processor_unregister_performance(&data->acpi_data, cpu); - err_free: - kfree(data); - acpi_io_data[cpu] = NULL; - - return (result); -} - - -static int -acpi_cpufreq_cpu_exit ( - struct cpufreq_policy *policy) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_cpu_exit\n"); - - if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); - acpi_io_data[policy->cpu] = NULL; - acpi_processor_unregister_performance(&data->acpi_data, - policy->cpu); - kfree(data); - } - - return (0); -} - - -static struct freq_attr* acpi_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - -static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, - .get = acpi_cpufreq_get, - .init = acpi_cpufreq_cpu_init, - .exit = acpi_cpufreq_cpu_exit, - .name = "acpi-cpufreq", - .owner = THIS_MODULE, - .attr = acpi_cpufreq_attr, -}; - - -static int __init -acpi_cpufreq_init (void) -{ - pr_debug("acpi_cpufreq_init\n"); - - return cpufreq_register_driver(&acpi_cpufreq_driver); -} - - -static void __exit -acpi_cpufreq_exit (void) -{ - pr_debug("acpi_cpufreq_exit\n"); - - cpufreq_unregister_driver(&acpi_cpufreq_driver); - return; -} - - -late_initcall(acpi_cpufreq_init); -module_exit(acpi_cpufreq_exit); - diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d7ce6cc..a2f1600 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -218,6 +218,23 @@ config AVR32_AT32AP_CPUFREQ endmenu +menu "CPUFreq processor drivers" +depends on IA64 + +config IA64_ACPI_CPUFREQ + tristate "ACPI Processor P-States driver" + select CPU_FREQ_TABLE + depends on ACPI_PROCESSOR + help + This driver adds a CPUFreq driver which utilizes the ACPI + Processor Performance States. + + For details, take a look at . + + If in doubt, say N. + +endmenu + menu "PowerPC CPU frequency scaling drivers" depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 2d6dbf3..ab68133 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -81,3 +81,4 @@ obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o +obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c new file mode 100644 index 0000000..c0075db --- /dev/null +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -0,0 +1,438 @@ +/* + * This file provides the ACPI based P-state support. This + * module works with generic cpufreq infrastructure. Most of + * the code is based on i386 version + * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) + * + * Copyright (C) 2005 Intel Corp + * Venkatesh Pallipadi + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +MODULE_AUTHOR("Venkatesh Pallipadi"); +MODULE_DESCRIPTION("ACPI Processor P-States Driver"); +MODULE_LICENSE("GPL"); + + +struct cpufreq_acpi_io { + struct acpi_processor_performance acpi_data; + struct cpufreq_frequency_table *freq_table; + unsigned int resume; +}; + +static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; + +static struct cpufreq_driver acpi_cpufreq_driver; + + +static int +processor_set_pstate ( + u32 value) +{ + s64 retval; + + pr_debug("processor_set_pstate\n"); + + retval = ia64_pal_set_pstate((u64)value); + + if (retval) { + pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", + value, retval); + return -ENODEV; + } + return (int)retval; +} + + +static int +processor_get_pstate ( + u32 *value) +{ + u64 pstate_index = 0; + s64 retval; + + pr_debug("processor_get_pstate\n"); + + retval = ia64_pal_get_pstate(&pstate_index, + PAL_GET_PSTATE_TYPE_INSTANT); + *value = (u32) pstate_index; + + if (retval) + pr_debug("Failed to get current freq with " + "error 0x%lx, idx 0x%x\n", retval, *value); + + return (int)retval; +} + + +/* To be used only after data->acpi_data is initialized */ +static unsigned +extract_clock ( + struct cpufreq_acpi_io *data, + unsigned value, + unsigned int cpu) +{ + unsigned long i; + + pr_debug("extract_clock\n"); + + for (i = 0; i < data->acpi_data.state_count; i++) { + if (value == data->acpi_data.states[i].status) + return data->acpi_data.states[i].core_frequency; + } + return data->acpi_data.states[i-1].core_frequency; +} + + +static unsigned int +processor_get_freq ( + struct cpufreq_acpi_io *data, + unsigned int cpu) +{ + int ret = 0; + u32 value = 0; + cpumask_t saved_mask; + unsigned long clock_freq; + + pr_debug("processor_get_freq\n"); + + saved_mask = current->cpus_allowed; + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + if (smp_processor_id() != cpu) + goto migrate_end; + + /* processor_get_pstate gets the instantaneous frequency */ + ret = processor_get_pstate(&value); + + if (ret) { + set_cpus_allowed_ptr(current, &saved_mask); + printk(KERN_WARNING "get performance failed with error %d\n", + ret); + ret = 0; + goto migrate_end; + } + clock_freq = extract_clock(data, value, cpu); + ret = (clock_freq*1000); + +migrate_end: + set_cpus_allowed_ptr(current, &saved_mask); + return ret; +} + + +static int +processor_set_freq ( + struct cpufreq_acpi_io *data, + struct cpufreq_policy *policy, + int state) +{ + int ret = 0; + u32 value = 0; + struct cpufreq_freqs cpufreq_freqs; + cpumask_t saved_mask; + int retval; + + pr_debug("processor_set_freq\n"); + + saved_mask = current->cpus_allowed; + set_cpus_allowed_ptr(current, cpumask_of(policy->cpu)); + if (smp_processor_id() != policy->cpu) { + retval = -EAGAIN; + goto migrate_end; + } + + if (state == data->acpi_data.state) { + if (unlikely(data->resume)) { + pr_debug("Called after resume, resetting to P%d\n", state); + data->resume = 0; + } else { + pr_debug("Already at target state (P%d)\n", state); + retval = 0; + goto migrate_end; + } + } + + pr_debug("Transitioning from P%d to P%d\n", + data->acpi_data.state, state); + + /* cpufreq frequency struct */ + cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; + cpufreq_freqs.new = data->freq_table[state].frequency; + + /* notify cpufreq */ + cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); + + /* + * First we write the target state's 'control' value to the + * control_register. + */ + + value = (u32) data->acpi_data.states[state].control; + + pr_debug("Transitioning to state: 0x%08x\n", value); + + ret = processor_set_pstate(value); + if (ret) { + unsigned int tmp = cpufreq_freqs.new; + cpufreq_notify_transition(policy, &cpufreq_freqs, + CPUFREQ_POSTCHANGE); + cpufreq_freqs.new = cpufreq_freqs.old; + cpufreq_freqs.old = tmp; + cpufreq_notify_transition(policy, &cpufreq_freqs, + CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &cpufreq_freqs, + CPUFREQ_POSTCHANGE); + printk(KERN_WARNING "Transition failed with error %d\n", ret); + retval = -ENODEV; + goto migrate_end; + } + + cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); + + data->acpi_data.state = state; + + retval = 0; + +migrate_end: + set_cpus_allowed_ptr(current, &saved_mask); + return (retval); +} + + +static unsigned int +acpi_cpufreq_get ( + unsigned int cpu) +{ + struct cpufreq_acpi_io *data = acpi_io_data[cpu]; + + pr_debug("acpi_cpufreq_get\n"); + + return processor_get_freq(data, cpu); +} + + +static int +acpi_cpufreq_target ( + struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; + unsigned int next_state = 0; + unsigned int result = 0; + + pr_debug("acpi_cpufreq_setpolicy\n"); + + result = cpufreq_frequency_table_target(policy, + data->freq_table, target_freq, relation, &next_state); + if (result) + return (result); + + result = processor_set_freq(data, policy, next_state); + + return (result); +} + + +static int +acpi_cpufreq_verify ( + struct cpufreq_policy *policy) +{ + unsigned int result = 0; + struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; + + pr_debug("acpi_cpufreq_verify\n"); + + result = cpufreq_frequency_table_verify(policy, + data->freq_table); + + return (result); +} + + +static int +acpi_cpufreq_cpu_init ( + struct cpufreq_policy *policy) +{ + unsigned int i; + unsigned int cpu = policy->cpu; + struct cpufreq_acpi_io *data; + unsigned int result = 0; + + pr_debug("acpi_cpufreq_cpu_init\n"); + + data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); + if (!data) + return (-ENOMEM); + + acpi_io_data[cpu] = data; + + result = acpi_processor_register_performance(&data->acpi_data, cpu); + + if (result) + goto err_free; + + /* capability check */ + if (data->acpi_data.state_count <= 1) { + pr_debug("No P-States\n"); + result = -ENODEV; + goto err_unreg; + } + + if ((data->acpi_data.control_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) || + (data->acpi_data.status_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE)) { + pr_debug("Unsupported address space [%d, %d]\n", + (u32) (data->acpi_data.control_register.space_id), + (u32) (data->acpi_data.status_register.space_id)); + result = -ENODEV; + goto err_unreg; + } + + /* alloc freq_table */ + data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * + (data->acpi_data.state_count + 1), + GFP_KERNEL); + if (!data->freq_table) { + result = -ENOMEM; + goto err_unreg; + } + + /* detect transition latency */ + policy->cpuinfo.transition_latency = 0; + for (i=0; iacpi_data.state_count; i++) { + if ((data->acpi_data.states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency) { + policy->cpuinfo.transition_latency = + data->acpi_data.states[i].transition_latency * 1000; + } + } + policy->cur = processor_get_freq(data, policy->cpu); + + /* table init */ + for (i = 0; i <= data->acpi_data.state_count; i++) + { + data->freq_table[i].index = i; + if (i < data->acpi_data.state_count) { + data->freq_table[i].frequency = + data->acpi_data.states[i].core_frequency * 1000; + } else { + data->freq_table[i].frequency = CPUFREQ_TABLE_END; + } + } + + result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); + if (result) { + goto err_freqfree; + } + + /* notify BIOS that we exist */ + acpi_processor_notify_smm(THIS_MODULE); + + printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " + "activated.\n", cpu); + + for (i = 0; i < data->acpi_data.state_count; i++) + pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", + (i == data->acpi_data.state?'*':' '), i, + (u32) data->acpi_data.states[i].core_frequency, + (u32) data->acpi_data.states[i].power, + (u32) data->acpi_data.states[i].transition_latency, + (u32) data->acpi_data.states[i].bus_master_latency, + (u32) data->acpi_data.states[i].status, + (u32) data->acpi_data.states[i].control); + + cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); + + /* the first call to ->target() should result in us actually + * writing something to the appropriate registers. */ + data->resume = 1; + + return (result); + + err_freqfree: + kfree(data->freq_table); + err_unreg: + acpi_processor_unregister_performance(&data->acpi_data, cpu); + err_free: + kfree(data); + acpi_io_data[cpu] = NULL; + + return (result); +} + + +static int +acpi_cpufreq_cpu_exit ( + struct cpufreq_policy *policy) +{ + struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; + + pr_debug("acpi_cpufreq_cpu_exit\n"); + + if (data) { + cpufreq_frequency_table_put_attr(policy->cpu); + acpi_io_data[policy->cpu] = NULL; + acpi_processor_unregister_performance(&data->acpi_data, + policy->cpu); + kfree(data); + } + + return (0); +} + + +static struct freq_attr* acpi_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + + +static struct cpufreq_driver acpi_cpufreq_driver = { + .verify = acpi_cpufreq_verify, + .target = acpi_cpufreq_target, + .get = acpi_cpufreq_get, + .init = acpi_cpufreq_cpu_init, + .exit = acpi_cpufreq_cpu_exit, + .name = "acpi-cpufreq", + .owner = THIS_MODULE, + .attr = acpi_cpufreq_attr, +}; + + +static int __init +acpi_cpufreq_init (void) +{ + pr_debug("acpi_cpufreq_init\n"); + + return cpufreq_register_driver(&acpi_cpufreq_driver); +} + + +static void __exit +acpi_cpufreq_exit (void) +{ + pr_debug("acpi_cpufreq_exit\n"); + + cpufreq_unregister_driver(&acpi_cpufreq_driver); + return; +} + + +late_initcall(acpi_cpufreq_init); +module_exit(acpi_cpufreq_exit); + -- cgit v0.10.2 From 7a9989356b23fa2c7731632d3b575c53c1ac8bce Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:21 +0000 Subject: cpufreq: mips: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of MIPS architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: John Crispin Signed-off-by: Rafael J. Wysocki diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21f..22e8417 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2539,7 +2539,14 @@ source "kernel/power/Kconfig" endmenu -source "arch/mips/kernel/cpufreq/Kconfig" +config MIPS_EXTERNAL_TIMER + bool + +if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER +menu "CPU Power Management" +source "drivers/cpufreq/Kconfig" +endmenu +endif source "net/Kconfig" diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f..c69ca65 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -92,8 +92,6 @@ CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/n obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o -obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ - obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig deleted file mode 100644 index 58c601e..0000000 --- a/arch/mips/kernel/cpufreq/Kconfig +++ /dev/null @@ -1,41 +0,0 @@ -# -# CPU Frequency scaling -# - -config MIPS_EXTERNAL_TIMER - bool - -config MIPS_CPUFREQ - bool - default y - depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER - -if MIPS_CPUFREQ - -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - -config LOONGSON2_CPUFREQ - tristate "Loongson2 CPUFreq Driver" - select CPU_FREQ_TABLE - depends on MIPS_CPUFREQ - help - This option adds a CPUFreq driver for loongson processors which - support software configurable cpu frequency. - - Loongson2F and it's successors support this feature. - - For details, take a look at . - - If in doubt, say N. - -endif # CPU_FREQ - -endmenu - -endif # MIPS_CPUFREQ diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile deleted file mode 100644 index 05a5715..0000000 --- a/arch/mips/kernel/cpufreq/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Linux/MIPS cpufreq. -# - -obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c deleted file mode 100644 index 8488957..0000000 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Cpufreq driver for the loongson-2 processors - * - * The 2E revision of loongson processor not support this feature. - * - * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology - * Author: Yanhua, yanh@lemote.com - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include /* set_cpus_allowed() */ -#include -#include - -#include - -#include - -static uint nowait; - -static struct clk *cpuclk; - -static void (*saved_cpu_wait) (void); - -static int loongson2_cpu_freq_notifier(struct notifier_block *nb, - unsigned long val, void *data); - -static struct notifier_block loongson2_cpufreq_notifier_block = { - .notifier_call = loongson2_cpu_freq_notifier -}; - -static int loongson2_cpu_freq_notifier(struct notifier_block *nb, - unsigned long val, void *data) -{ - if (val == CPUFREQ_POSTCHANGE) - current_cpu_data.udelay_val = loops_per_jiffy; - - return 0; -} - -static unsigned int loongson2_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(cpuclk); -} - -/* - * Here we notify other drivers of the proposed change and the final change. - */ -static int loongson2_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cpu = policy->cpu; - unsigned int newstate = 0; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - unsigned int freq; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - if (cpufreq_frequency_table_target - (policy, &loongson2_clockmod_table[0], target_freq, relation, - &newstate)) - return -EINVAL; - - freq = - ((cpu_clock_freq / 1000) * - loongson2_clockmod_table[newstate].index) / 8; - if (freq < policy->min || freq > policy->max) - return -EINVAL; - - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = loongson2_cpufreq_get(cpu); - freqs.new = freq; - freqs.flags = 0; - - if (freqs.new == freqs.old) - return 0; - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - /* setting the cpu frequency */ - clk_set_rate(cpuclk, freq); - - /* notifiers */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %u kHz\n", freq); - - return 0; -} - -static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - int i; - unsigned long rate; - int ret; - - cpuclk = clk_get(NULL, "cpu_clk"); - if (IS_ERR(cpuclk)) { - printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); - return PTR_ERR(cpuclk); - } - - rate = cpu_clock_freq / 1000; - if (!rate) { - clk_put(cpuclk); - return -EINVAL; - } - ret = clk_set_rate(cpuclk, rate); - if (ret) { - clk_put(cpuclk); - return ret; - } - - /* clock table init */ - for (i = 2; - (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END); - i++) - loongson2_clockmod_table[i].frequency = (rate * i) / 8; - - policy->cur = loongson2_cpufreq_get(policy->cpu); - - cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], - policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - &loongson2_clockmod_table[0]); -} - -static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &loongson2_clockmod_table[0]); -} - -static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) -{ - clk_put(cpuclk); - return 0; -} - -static struct freq_attr *loongson2_table_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver loongson2_cpufreq_driver = { - .owner = THIS_MODULE, - .name = "loongson2", - .init = loongson2_cpufreq_cpu_init, - .verify = loongson2_cpufreq_verify, - .target = loongson2_cpufreq_target, - .get = loongson2_cpufreq_get, - .exit = loongson2_cpufreq_exit, - .attr = loongson2_table_attr, -}; - -static struct platform_device_id platform_device_ids[] = { - { - .name = "loongson2_cpufreq", - }, - {} -}; - -MODULE_DEVICE_TABLE(platform, platform_device_ids); - -static struct platform_driver platform_driver = { - .driver = { - .name = "loongson2_cpufreq", - .owner = THIS_MODULE, - }, - .id_table = platform_device_ids, -}; - -/* - * This is the simple version of Loongson-2 wait, Maybe we need do this in - * interrupt disabled context. - */ - -static DEFINE_SPINLOCK(loongson2_wait_lock); - -static void loongson2_cpu_wait(void) -{ - unsigned long flags; - u32 cpu_freq; - - spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ - spin_unlock_irqrestore(&loongson2_wait_lock, flags); -} - -static int __init cpufreq_init(void) -{ - int ret; - - /* Register platform stuff */ - ret = platform_driver_register(&platform_driver); - if (ret) - return ret; - - pr_info("cpufreq: Loongson-2F CPU frequency driver.\n"); - - cpufreq_register_notifier(&loongson2_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - ret = cpufreq_register_driver(&loongson2_cpufreq_driver); - - if (!ret && !nowait) { - saved_cpu_wait = cpu_wait; - cpu_wait = loongson2_cpu_wait; - } - - return ret; -} - -static void __exit cpufreq_exit(void) -{ - if (!nowait && saved_cpu_wait) - cpu_wait = saved_cpu_wait; - cpufreq_unregister_driver(&loongson2_cpufreq_driver); - cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - platform_driver_unregister(&platform_driver); -} - -module_init(cpufreq_init); -module_exit(cpufreq_exit); - -module_param(nowait, uint, 0644); -MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait"); - -MODULE_AUTHOR("Yanhua "); -MODULE_DESCRIPTION("cpufreq driver for Loongson2F"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index a2f1600..5030df5 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -235,6 +235,24 @@ config IA64_ACPI_CPUFREQ endmenu +menu "MIPS CPUFreq processor drivers" +depends on MIPS + +config LOONGSON2_CPUFREQ + tristate "Loongson2 CPUFreq Driver" + select CPU_FREQ_TABLE + help + This option adds a CPUFreq driver for loongson processors which + support software configurable cpu frequency. + + Loongson2F and it's successors support this feature. + + For details, take a look at . + + If in doubt, say N. + +endmenu + menu "PowerPC CPU frequency scaling drivers" depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ab68133..9659cbb 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -82,3 +82,4 @@ obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o +obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c new file mode 100644 index 0000000..8488957 --- /dev/null +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -0,0 +1,248 @@ +/* + * Cpufreq driver for the loongson-2 processors + * + * The 2E revision of loongson processor not support this feature. + * + * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology + * Author: Yanhua, yanh@lemote.com + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include + +#include + +#include + +static uint nowait; + +static struct clk *cpuclk; + +static void (*saved_cpu_wait) (void); + +static int loongson2_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block loongson2_cpufreq_notifier_block = { + .notifier_call = loongson2_cpu_freq_notifier +}; + +static int loongson2_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + if (val == CPUFREQ_POSTCHANGE) + current_cpu_data.udelay_val = loops_per_jiffy; + + return 0; +} + +static unsigned int loongson2_cpufreq_get(unsigned int cpu) +{ + return clk_get_rate(cpuclk); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int loongson2_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int cpu = policy->cpu; + unsigned int newstate = 0; + cpumask_t cpus_allowed; + struct cpufreq_freqs freqs; + unsigned int freq; + + cpus_allowed = current->cpus_allowed; + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + if (cpufreq_frequency_table_target + (policy, &loongson2_clockmod_table[0], target_freq, relation, + &newstate)) + return -EINVAL; + + freq = + ((cpu_clock_freq / 1000) * + loongson2_clockmod_table[newstate].index) / 8; + if (freq < policy->min || freq > policy->max) + return -EINVAL; + + pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); + + freqs.old = loongson2_cpufreq_get(cpu); + freqs.new = freq; + freqs.flags = 0; + + if (freqs.new == freqs.old) + return 0; + + /* notifiers */ + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + set_cpus_allowed_ptr(current, &cpus_allowed); + + /* setting the cpu frequency */ + clk_set_rate(cpuclk, freq); + + /* notifiers */ + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + pr_debug("cpufreq: set frequency %u kHz\n", freq); + + return 0; +} + +static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int i; + unsigned long rate; + int ret; + + cpuclk = clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + rate = cpu_clock_freq / 1000; + if (!rate) { + clk_put(cpuclk); + return -EINVAL; + } + ret = clk_set_rate(cpuclk, rate); + if (ret) { + clk_put(cpuclk); + return ret; + } + + /* clock table init */ + for (i = 2; + (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END); + i++) + loongson2_clockmod_table[i].frequency = (rate * i) / 8; + + policy->cur = loongson2_cpufreq_get(policy->cpu); + + cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], + policy->cpu); + + return cpufreq_frequency_table_cpuinfo(policy, + &loongson2_clockmod_table[0]); +} + +static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + &loongson2_clockmod_table[0]); +} + +static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) +{ + clk_put(cpuclk); + return 0; +} + +static struct freq_attr *loongson2_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver loongson2_cpufreq_driver = { + .owner = THIS_MODULE, + .name = "loongson2", + .init = loongson2_cpufreq_cpu_init, + .verify = loongson2_cpufreq_verify, + .target = loongson2_cpufreq_target, + .get = loongson2_cpufreq_get, + .exit = loongson2_cpufreq_exit, + .attr = loongson2_table_attr, +}; + +static struct platform_device_id platform_device_ids[] = { + { + .name = "loongson2_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "loongson2_cpufreq", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +/* + * This is the simple version of Loongson-2 wait, Maybe we need do this in + * interrupt disabled context. + */ + +static DEFINE_SPINLOCK(loongson2_wait_lock); + +static void loongson2_cpu_wait(void) +{ + unsigned long flags; + u32 cpu_freq; + + spin_lock_irqsave(&loongson2_wait_lock, flags); + cpu_freq = LOONGSON_CHIPCFG0; + LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + spin_unlock_irqrestore(&loongson2_wait_lock, flags); +} + +static int __init cpufreq_init(void) +{ + int ret; + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpufreq: Loongson-2F CPU frequency driver.\n"); + + cpufreq_register_notifier(&loongson2_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + ret = cpufreq_register_driver(&loongson2_cpufreq_driver); + + if (!ret && !nowait) { + saved_cpu_wait = cpu_wait; + cpu_wait = loongson2_cpu_wait; + } + + return ret; +} + +static void __exit cpufreq_exit(void) +{ + if (!nowait && saved_cpu_wait) + cpu_wait = saved_cpu_wait; + cpufreq_unregister_driver(&loongson2_cpufreq_driver); + cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait"); + +MODULE_AUTHOR("Yanhua "); +MODULE_DESCRIPTION("cpufreq driver for Loongson2F"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 7258267e56325d41f468eb650b6c23f697201645 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:22 +0000 Subject: cpufreq: sh: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of SUPERH architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Paul Mundt Signed-off-by: Rafael J. Wysocki diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5e85963..06e3163 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -624,25 +624,7 @@ config SH_CLK_CPG_LEGACY endmenu menu "CPU Frequency scaling" - source "drivers/cpufreq/Kconfig" - -config SH_CPU_FREQ - tristate "SuperH CPU Frequency driver" - depends on CPU_FREQ - select CPU_FREQ_TABLE - help - This adds the cpufreq driver for SuperH. Any CPU that supports - clock rate rounding through the clock framework can use this - driver. While it will make the kernel slightly larger, this is - harmless for CPUs that don't support rate rounding. The driver - will also generate a notice in the boot log before disabling - itself if the CPU in question is not capable of rate rounding. - - For details, take a look at . - - If unsure, say N. - endmenu source "arch/sh/drivers/Kconfig" diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index f259b37..261c8bf 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_VSYSCALL) += vsyscall/ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c deleted file mode 100644 index 88c8fee..0000000 --- a/arch/sh/kernel/cpufreq.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * arch/sh/kernel/cpufreq.c - * - * cpufreq driver for the SuperH processors. - * - * Copyright (C) 2002 - 2012 Paul Mundt - * Copyright (C) 2002 M. R. Brown - * - * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c - * - * Copyright (C) 2004-2007 Atmel Corporation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#define pr_fmt(fmt) "cpufreq: " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* set_cpus_allowed() */ -#include -#include -#include - -static DEFINE_PER_CPU(struct clk, sh_cpuclk); - -static unsigned int sh_cpufreq_get(unsigned int cpu) -{ - return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; -} - -/* - * Here we notify other drivers of the proposed change and the final change. - */ -static int sh_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cpu = policy->cpu; - struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - struct device *dev; - long freq; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - BUG_ON(smp_processor_id() != cpu); - - dev = get_cpu_device(cpu); - - /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); - - if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) - return -EINVAL; - - dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = sh_cpufreq_get(cpu); - freqs.new = (freq + 500) / 1000; - freqs.flags = 0; - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - set_cpus_allowed_ptr(current, &cpus_allowed); - clk_set_rate(cpuclk, freq); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - dev_dbg(dev, "set frequency %lu Hz\n", freq); - - return 0; -} - -static int sh_cpufreq_verify(struct cpufreq_policy *policy) -{ - struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); - struct cpufreq_frequency_table *freq_table; - - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - return 0; -} - -static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - struct cpufreq_frequency_table *freq_table; - struct device *dev; - - dev = get_cpu_device(cpu); - - cpuclk = clk_get(dev, "cpu_clk"); - if (IS_ERR(cpuclk)) { - dev_err(dev, "couldn't get CPU clk\n"); - return PTR_ERR(cpuclk); - } - - policy->cur = sh_cpufreq_get(cpu); - - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; - if (freq_table) { - int result; - - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, cpu); - } else { - dev_notice(dev, "no frequency table found, falling back " - "to rate rounding.\n"); - - policy->min = policy->cpuinfo.min_freq = - (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->max = policy->cpuinfo.max_freq = - (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - } - - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - - dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " - "Maximum %u.%03u MHz.\n", - policy->min / 1000, policy->min % 1000, - policy->max / 1000, policy->max % 1000); - - return 0; -} - -static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - - cpufreq_frequency_table_put_attr(cpu); - clk_put(cpuclk); - - return 0; -} - -static struct freq_attr *sh_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver sh_cpufreq_driver = { - .owner = THIS_MODULE, - .name = "sh", - .get = sh_cpufreq_get, - .target = sh_cpufreq_target, - .verify = sh_cpufreq_verify, - .init = sh_cpufreq_cpu_init, - .exit = sh_cpufreq_cpu_exit, - .attr = sh_freq_attr, -}; - -static int __init sh_cpufreq_module_init(void) -{ - pr_notice("SuperH CPU frequency driver.\n"); - return cpufreq_register_driver(&sh_cpufreq_driver); -} - -static void __exit sh_cpufreq_module_exit(void) -{ - cpufreq_unregister_driver(&sh_cpufreq_driver); -} - -module_init(sh_cpufreq_module_init); -module_exit(sh_cpufreq_module_exit); - -MODULE_AUTHOR("Paul Mundt "); -MODULE_DESCRIPTION("cpufreq driver for SuperH"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 5030df5..602d5db 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -258,5 +258,23 @@ depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" endmenu +menu "SH CPU Frequency scaling" +depends on SUPERH +config SH_CPU_FREQ + tristate "SuperH CPU Frequency driver" + select CPU_FREQ_TABLE + help + This adds the cpufreq driver for SuperH. Any CPU that supports + clock rate rounding through the clock framework can use this + driver. While it will make the kernel slightly larger, this is + harmless for CPUs that don't support rate rounding. The driver + will also generate a notice in the boot log before disabling + itself if the CPU in question is not capable of rate rounding. + + For details, take a look at . + + If unsure, say N. +endmenu + endif endmenu diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 9659cbb..3930d2e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -83,3 +83,4 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c new file mode 100644 index 0000000..73adb64 --- /dev/null +++ b/drivers/cpufreq/sh-cpufreq.c @@ -0,0 +1,189 @@ +/* + * cpufreq driver for the SuperH processors. + * + * Copyright (C) 2002 - 2012 Paul Mundt + * Copyright (C) 2002 M. R. Brown + * + * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c + * + * Copyright (C) 2004-2007 Atmel Corporation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "cpufreq: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +static DEFINE_PER_CPU(struct clk, sh_cpuclk); + +static unsigned int sh_cpufreq_get(unsigned int cpu) +{ + return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sh_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int cpu = policy->cpu; + struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); + cpumask_t cpus_allowed; + struct cpufreq_freqs freqs; + struct device *dev; + long freq; + + cpus_allowed = current->cpus_allowed; + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + BUG_ON(smp_processor_id() != cpu); + + dev = get_cpu_device(cpu); + + /* Convert target_freq from kHz to Hz */ + freq = clk_round_rate(cpuclk, target_freq * 1000); + + if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) + return -EINVAL; + + dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); + + freqs.old = sh_cpufreq_get(cpu); + freqs.new = (freq + 500) / 1000; + freqs.flags = 0; + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + set_cpus_allowed_ptr(current, &cpus_allowed); + clk_set_rate(cpuclk, freq); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + dev_dbg(dev, "set frequency %lu Hz\n", freq); + + return 0; +} + +static int sh_cpufreq_verify(struct cpufreq_policy *policy) +{ + struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); + struct cpufreq_frequency_table *freq_table; + + freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; + if (freq_table) + return cpufreq_frequency_table_verify(policy, freq_table); + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; + policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + return 0; +} + +static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); + struct cpufreq_frequency_table *freq_table; + struct device *dev; + + dev = get_cpu_device(cpu); + + cpuclk = clk_get(dev, "cpu_clk"); + if (IS_ERR(cpuclk)) { + dev_err(dev, "couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + policy->cur = sh_cpufreq_get(cpu); + + freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; + if (freq_table) { + int result; + + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (!result) + cpufreq_frequency_table_get_attr(freq_table, cpu); + } else { + dev_notice(dev, "no frequency table found, falling back " + "to rate rounding.\n"); + + policy->min = policy->cpuinfo.min_freq = + (clk_round_rate(cpuclk, 1) + 500) / 1000; + policy->max = policy->cpuinfo.max_freq = + (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; + } + + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " + "Maximum %u.%03u MHz.\n", + policy->min / 1000, policy->min % 1000, + policy->max / 1000, policy->max % 1000); + + return 0; +} + +static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); + + cpufreq_frequency_table_put_attr(cpu); + clk_put(cpuclk); + + return 0; +} + +static struct freq_attr *sh_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver sh_cpufreq_driver = { + .owner = THIS_MODULE, + .name = "sh", + .get = sh_cpufreq_get, + .target = sh_cpufreq_target, + .verify = sh_cpufreq_verify, + .init = sh_cpufreq_cpu_init, + .exit = sh_cpufreq_cpu_exit, + .attr = sh_freq_attr, +}; + +static int __init sh_cpufreq_module_init(void) +{ + pr_notice("SuperH CPU frequency driver.\n"); + return cpufreq_register_driver(&sh_cpufreq_driver); +} + +static void __exit sh_cpufreq_module_exit(void) +{ + cpufreq_unregister_driver(&sh_cpufreq_driver); +} + +module_init(sh_cpufreq_module_init); +module_exit(sh_cpufreq_module_exit); + +MODULE_AUTHOR("Paul Mundt "); +MODULE_DESCRIPTION("cpufreq driver for SuperH"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 73cc9c8cac3f86ae4efebf001b68dcecc00b0017 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:23 +0000 Subject: cpufreq: unicore2: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of UNICORE-2 architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Guan Xuetao Signed-off-by: Rafael J. Wysocki diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile index fa497e0..607a72f 100644 --- a/arch/unicore32/kernel/Makefile +++ b/arch/unicore32/kernel/Makefile @@ -9,7 +9,6 @@ obj-y += setup.o signal.o sys.o stacktrace.o traps.o obj-$(CONFIG_MODULES) += ksyms.o module.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_CPU_FREQ) += cpu-ucv2.o obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o # obj-y for architecture PKUnity v3 diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/arch/unicore32/kernel/cpu-ucv2.c deleted file mode 100644 index ba5a71c..0000000 --- a/arch/unicore32/kernel/cpu-ucv2.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * linux/arch/unicore32/kernel/cpu-ucv2.c: clock scaling for the UniCore-II - * - * Code specific to PKUnity SoC and UniCore ISA - * - * Maintained by GUAN Xue-tao - * Copyright (C) 2001-2010 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include - -static struct cpufreq_driver ucv2_driver; - -/* make sure that only the "userspace" governor is run - * -- anything else wouldn't make sense on this platform, anyway. - */ -int ucv2_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu) - return -EINVAL; - - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - - return 0; -} - -static unsigned int ucv2_getspeed(unsigned int cpu) -{ - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - - if (cpu) - return 0; - return clk_get_rate(mclk)/1000; -} - -static int ucv2_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cur = ucv2_getspeed(0); - struct cpufreq_freqs freqs; - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - if (!clk_set_rate(mclk, target_freq * 1000)) { - freqs.old = cur; - freqs.new = target_freq; - } - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int __init ucv2_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - policy->cur = ucv2_getspeed(0); - policy->min = policy->cpuinfo.min_freq = 250000; - policy->max = policy->cpuinfo.max_freq = 1000000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; -} - -static struct cpufreq_driver ucv2_driver = { - .flags = CPUFREQ_STICKY, - .verify = ucv2_verify_speed, - .target = ucv2_target, - .get = ucv2_getspeed, - .init = ucv2_cpu_init, - .name = "UniCore-II", -}; - -static int __init ucv2_cpufreq_init(void) -{ - return cpufreq_register_driver(&ucv2_driver); -} - -arch_initcall(ucv2_cpufreq_init); diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 3930d2e..18479b2 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -84,3 +84,4 @@ obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o +obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c new file mode 100644 index 0000000..12fc904 --- /dev/null +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -0,0 +1,92 @@ +/* + * clock scaling for the UniCore-II + * + * Code specific to PKUnity SoC and UniCore ISA + * + * Maintained by GUAN Xue-tao + * Copyright (C) 2001-2010 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include + +static struct cpufreq_driver ucv2_driver; + +/* make sure that only the "userspace" governor is run + * -- anything else wouldn't make sense on this platform, anyway. + */ +int ucv2_verify_speed(struct cpufreq_policy *policy) +{ + if (policy->cpu) + return -EINVAL; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); + + return 0; +} + +static unsigned int ucv2_getspeed(unsigned int cpu) +{ + struct clk *mclk = clk_get(NULL, "MAIN_CLK"); + + if (cpu) + return 0; + return clk_get_rate(mclk)/1000; +} + +static int ucv2_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int cur = ucv2_getspeed(0); + struct cpufreq_freqs freqs; + struct clk *mclk = clk_get(NULL, "MAIN_CLK"); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + if (!clk_set_rate(mclk, target_freq * 1000)) { + freqs.old = cur; + freqs.new = target_freq; + } + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static int __init ucv2_cpu_init(struct cpufreq_policy *policy) +{ + if (policy->cpu != 0) + return -EINVAL; + policy->cur = ucv2_getspeed(0); + policy->min = policy->cpuinfo.min_freq = 250000; + policy->max = policy->cpuinfo.max_freq = 1000000; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + return 0; +} + +static struct cpufreq_driver ucv2_driver = { + .flags = CPUFREQ_STICKY, + .verify = ucv2_verify_speed, + .target = ucv2_target, + .get = ucv2_getspeed, + .init = ucv2_cpu_init, + .name = "UniCore-II", +}; + +static int __init ucv2_cpufreq_init(void) +{ + return cpufreq_register_driver(&ucv2_driver); +} + +arch_initcall(ucv2_cpufreq_init); -- cgit v0.10.2 From 764295ae675bb252de3637e36a6602a9aed7de6b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 4 Apr 2013 12:54:24 +0000 Subject: cpufreq: sparc: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of SPARC architecture to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: David S. Miller Signed-off-by: Rafael J. Wysocki diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3d361f2..c85b761 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -254,29 +254,6 @@ config HOTPLUG_CPU if SPARC64 source "drivers/cpufreq/Kconfig" - -config US3_FREQ - tristate "UltraSPARC-III CPU Frequency driver" - depends on CPU_FREQ - select CPU_FREQ_TABLE - help - This adds the CPUFreq driver for UltraSPARC-III processors. - - For details, take a look at . - - If in doubt, say N. - -config US2E_FREQ - tristate "UltraSPARC-IIe CPU Frequency driver" - depends on CPU_FREQ - select CPU_FREQ_TABLE - help - This adds the CPUFreq driver for UltraSPARC-IIe processors. - - For details, take a look at . - - If in doubt, say N. - endif config US3_MC diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 6cf591b..5276fd4 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -102,9 +102,6 @@ obj-$(CONFIG_PCI_MSI) += pci_msi.o obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o -# sparc64 cpufreq -obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o -obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o obj-$(CONFIG_US3_MC) += chmc.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c deleted file mode 100644 index 306ae46..0000000 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ /dev/null @@ -1,408 +0,0 @@ -/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * Many thanks to Dominik Brodowski for fixing up the cpufreq - * infrastructure in order to make this driver easier to implement. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static struct cpufreq_driver *cpufreq_us2e_driver; - -struct us2e_freq_percpu_info { - struct cpufreq_frequency_table table[6]; -}; - -/* Indexed by cpu number. */ -static struct us2e_freq_percpu_info *us2e_freq_table; - -#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL -#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL - -/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled - * in the ESTAR mode control register. - */ -#define ESTAR_MODE_DIV_1 0x0000000000000000UL -#define ESTAR_MODE_DIV_2 0x0000000000000001UL -#define ESTAR_MODE_DIV_4 0x0000000000000003UL -#define ESTAR_MODE_DIV_6 0x0000000000000002UL -#define ESTAR_MODE_DIV_8 0x0000000000000004UL -#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL - -#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL -#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL -#define MCTRL0_REFR_COUNT_SHIFT 8 -#define MCTRL0_REFR_INTERVAL 7800 -#define MCTRL0_REFR_CLKS_P_CNT 64 - -static unsigned long read_hbreg(unsigned long addr) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=&r" (ret) - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); - return ret; -} - -static void write_hbreg(unsigned long addr, unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) - : "memory"); - if (addr == HBIRD_ESTAR_MODE_ADDR) { - /* Need to wait 16 clock cycles for the PLL to lock. */ - udelay(1); - } -} - -static void self_refresh_ctl(int enable) -{ - unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - - if (enable) - mctrl |= MCTRL0_SREFRESH_ENAB; - else - mctrl &= ~MCTRL0_SREFRESH_ENAB; - write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); - (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); -} - -static void frob_mem_refresh(int cpu_slowing_down, - unsigned long clock_tick, - unsigned long old_divisor, unsigned long divisor) -{ - unsigned long old_refr_count, refr_count, mctrl; - - refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); - refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); - - mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) - >> MCTRL0_REFR_COUNT_SHIFT; - - mctrl &= ~MCTRL0_REFR_COUNT_MASK; - mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; - write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); - mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - - if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { - unsigned long usecs; - - /* We have to wait for both refresh counts (old - * and new) to go to zero. - */ - usecs = (MCTRL0_REFR_CLKS_P_CNT * - (refr_count + old_refr_count) * - 1000000UL * - old_divisor) / clock_tick; - udelay(usecs + 1UL); - } -} - -static void us2e_transition(unsigned long estar, unsigned long new_bits, - unsigned long clock_tick, - unsigned long old_divisor, unsigned long divisor) -{ - unsigned long flags; - - local_irq_save(flags); - - estar &= ~ESTAR_MODE_DIV_MASK; - - /* This is based upon the state transition diagram in the IIe manual. */ - if (old_divisor == 2 && divisor == 1) { - self_refresh_ctl(0); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - frob_mem_refresh(0, clock_tick, old_divisor, divisor); - } else if (old_divisor == 1 && divisor == 2) { - frob_mem_refresh(1, clock_tick, old_divisor, divisor); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - self_refresh_ctl(1); - } else if (old_divisor == 1 && divisor > 2) { - us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, - 1, 2); - us2e_transition(estar, new_bits, clock_tick, - 2, divisor); - } else if (old_divisor > 2 && divisor == 1) { - us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, - old_divisor, 2); - us2e_transition(estar, new_bits, clock_tick, - 2, divisor); - } else if (old_divisor < divisor) { - frob_mem_refresh(0, clock_tick, old_divisor, divisor); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - } else if (old_divisor > divisor) { - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - frob_mem_refresh(1, clock_tick, old_divisor, divisor); - } else { - BUG(); - } - - local_irq_restore(flags); -} - -static unsigned long index_to_estar_mode(unsigned int index) -{ - switch (index) { - case 0: - return ESTAR_MODE_DIV_1; - - case 1: - return ESTAR_MODE_DIV_2; - - case 2: - return ESTAR_MODE_DIV_4; - - case 3: - return ESTAR_MODE_DIV_6; - - case 4: - return ESTAR_MODE_DIV_8; - - default: - BUG(); - } -} - -static unsigned long index_to_divisor(unsigned int index) -{ - switch (index) { - case 0: - return 1; - - case 1: - return 2; - - case 2: - return 4; - - case 3: - return 6; - - case 4: - return 8; - - default: - BUG(); - } -} - -static unsigned long estar_to_divisor(unsigned long estar) -{ - unsigned long ret; - - switch (estar & ESTAR_MODE_DIV_MASK) { - case ESTAR_MODE_DIV_1: - ret = 1; - break; - case ESTAR_MODE_DIV_2: - ret = 2; - break; - case ESTAR_MODE_DIV_4: - ret = 4; - break; - case ESTAR_MODE_DIV_6: - ret = 6; - break; - case ESTAR_MODE_DIV_8: - ret = 8; - break; - default: - BUG(); - } - - return ret; -} - -static unsigned int us2e_freq_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned long clock_tick, estar; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - clock_tick = sparc64_get_clock_tick(cpu) / 1000; - estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - return clock_tick / estar_to_divisor(estar); -} - -static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, - unsigned int index) -{ - unsigned int cpu = policy->cpu; - unsigned long new_bits, new_freq; - unsigned long clock_tick, divisor, old_divisor, estar; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; - new_bits = index_to_estar_mode(index); - divisor = index_to_divisor(index); - new_freq /= divisor; - - estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); - - old_divisor = estar_to_divisor(estar); - - freqs.old = clock_tick / old_divisor; - freqs.new = new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - if (old_divisor != divisor) - us2e_transition(estar, new_bits, clock_tick * 1000, - old_divisor, divisor); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us2e_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us2e_freq_table[policy->cpu].table[0], - target_freq, relation, &new_index)) - return -EINVAL; - - us2e_set_cpu_divider_index(policy, new_index); - - return 0; -} - -static int us2e_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us2e_freq_table[policy->cpu].table[0]); -} - -static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - struct cpufreq_frequency_table *table = - &us2e_freq_table[cpu].table[0]; - - table[0].index = 0; - table[0].frequency = clock_tick / 1; - table[1].index = 1; - table[1].frequency = clock_tick / 2; - table[2].index = 2; - table[2].frequency = clock_tick / 4; - table[2].index = 3; - table[2].frequency = clock_tick / 6; - table[2].index = 4; - table[2].frequency = clock_tick / 8; - table[2].index = 5; - table[3].frequency = CPUFREQ_TABLE_END; - - policy->cpuinfo.transition_latency = 0; - policy->cur = clock_tick; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) -{ - if (cpufreq_us2e_driver) - us2e_set_cpu_divider_index(policy, 0); - - return 0; -} - -static int __init us2e_freq_init(void) -{ - unsigned long manuf, impl, ver; - int ret; - - if (tlb_type != spitfire) - return -ENODEV; - - __asm__("rdpr %%ver, %0" : "=r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - - if (manuf == 0x17 && impl == 0x13) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us2e_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), - GFP_KERNEL); - if (!us2e_freq_table) - goto err_out; - - driver->init = us2e_freq_cpu_init; - driver->verify = us2e_freq_verify; - driver->target = us2e_freq_target; - driver->get = us2e_freq_get; - driver->exit = us2e_freq_cpu_exit; - driver->owner = THIS_MODULE, - strcpy(driver->name, "UltraSPARC-IIe"); - - cpufreq_us2e_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us2e_driver = NULL; - } - kfree(us2e_freq_table); - us2e_freq_table = NULL; - return ret; - } - - return -ENODEV; -} - -static void __exit us2e_freq_exit(void) -{ - if (cpufreq_us2e_driver) { - cpufreq_unregister_driver(cpufreq_us2e_driver); - kfree(cpufreq_us2e_driver); - cpufreq_us2e_driver = NULL; - kfree(us2e_freq_table); - us2e_freq_table = NULL; - } -} - -MODULE_AUTHOR("David S. Miller "); -MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); -MODULE_LICENSE("GPL"); - -module_init(us2e_freq_init); -module_exit(us2e_freq_exit); diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c deleted file mode 100644 index c71ee14..0000000 --- a/arch/sparc/kernel/us3_cpufreq.c +++ /dev/null @@ -1,269 +0,0 @@ -/* us3_cpufreq.c: UltraSPARC-III cpu frequency support - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * Many thanks to Dominik Brodowski for fixing up the cpufreq - * infrastructure in order to make this driver easier to implement. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static struct cpufreq_driver *cpufreq_us3_driver; - -struct us3_freq_percpu_info { - struct cpufreq_frequency_table table[4]; -}; - -/* Indexed by cpu number. */ -static struct us3_freq_percpu_info *us3_freq_table; - -/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled - * in the Safari config register. - */ -#define SAFARI_CFG_DIV_1 0x0000000000000000UL -#define SAFARI_CFG_DIV_2 0x0000000040000000UL -#define SAFARI_CFG_DIV_32 0x0000000080000000UL -#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL - -static unsigned long read_safari_cfg(void) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=&r" (ret) - : "i" (ASI_SAFARI_CONFIG)); - return ret; -} - -static void write_safari_cfg(unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (val), "i" (ASI_SAFARI_CONFIG) - : "memory"); -} - -static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) -{ - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - unsigned long ret; - - switch (safari_cfg & SAFARI_CFG_DIV_MASK) { - case SAFARI_CFG_DIV_1: - ret = clock_tick / 1; - break; - case SAFARI_CFG_DIV_2: - ret = clock_tick / 2; - break; - case SAFARI_CFG_DIV_32: - ret = clock_tick / 32; - break; - default: - BUG(); - } - - return ret; -} - -static unsigned int us3_freq_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned long reg; - unsigned int ret; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - reg = read_safari_cfg(); - ret = get_current_freq(cpu, reg); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - return ret; -} - -static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, - unsigned int index) -{ - unsigned int cpu = policy->cpu; - unsigned long new_bits, new_freq, reg; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - new_freq = sparc64_get_clock_tick(cpu) / 1000; - switch (index) { - case 0: - new_bits = SAFARI_CFG_DIV_1; - new_freq /= 1; - break; - case 1: - new_bits = SAFARI_CFG_DIV_2; - new_freq /= 2; - break; - case 2: - new_bits = SAFARI_CFG_DIV_32; - new_freq /= 32; - break; - - default: - BUG(); - } - - reg = read_safari_cfg(); - - freqs.old = get_current_freq(cpu, reg); - freqs.new = new_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - reg &= ~SAFARI_CFG_DIV_MASK; - reg |= new_bits; - write_safari_cfg(reg); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us3_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us3_freq_table[policy->cpu].table[0], - target_freq, - relation, - &new_index)) - return -EINVAL; - - us3_set_cpu_divider_index(policy, new_index); - - return 0; -} - -static int us3_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us3_freq_table[policy->cpu].table[0]); -} - -static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - struct cpufreq_frequency_table *table = - &us3_freq_table[cpu].table[0]; - - table[0].index = 0; - table[0].frequency = clock_tick / 1; - table[1].index = 1; - table[1].frequency = clock_tick / 2; - table[2].index = 2; - table[2].frequency = clock_tick / 32; - table[3].index = 0; - table[3].frequency = CPUFREQ_TABLE_END; - - policy->cpuinfo.transition_latency = 0; - policy->cur = clock_tick; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static int us3_freq_cpu_exit(struct cpufreq_policy *policy) -{ - if (cpufreq_us3_driver) - us3_set_cpu_divider_index(policy, 0); - - return 0; -} - -static int __init us3_freq_init(void) -{ - unsigned long manuf, impl, ver; - int ret; - - if (tlb_type != cheetah && tlb_type != cheetah_plus) - return -ENODEV; - - __asm__("rdpr %%ver, %0" : "=r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - - if (manuf == CHEETAH_MANUF && - (impl == CHEETAH_IMPL || - impl == CHEETAH_PLUS_IMPL || - impl == JAGUAR_IMPL || - impl == PANTHER_IMPL)) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us3_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us3_freq_percpu_info)), - GFP_KERNEL); - if (!us3_freq_table) - goto err_out; - - driver->init = us3_freq_cpu_init; - driver->verify = us3_freq_verify; - driver->target = us3_freq_target; - driver->get = us3_freq_get; - driver->exit = us3_freq_cpu_exit; - driver->owner = THIS_MODULE, - strcpy(driver->name, "UltraSPARC-III"); - - cpufreq_us3_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us3_driver = NULL; - } - kfree(us3_freq_table); - us3_freq_table = NULL; - return ret; - } - - return -ENODEV; -} - -static void __exit us3_freq_exit(void) -{ - if (cpufreq_us3_driver) { - cpufreq_unregister_driver(cpufreq_us3_driver); - kfree(cpufreq_us3_driver); - cpufreq_us3_driver = NULL; - kfree(us3_freq_table); - us3_freq_table = NULL; - } -} - -MODULE_AUTHOR("David S. Miller "); -MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III"); -MODULE_LICENSE("GPL"); - -module_init(us3_freq_init); -module_exit(us3_freq_exit); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 602d5db..a1488f5 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -258,6 +258,29 @@ depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" endmenu +menu "SPARC CPU frequency scaling drivers" +depends on SPARC64 +config SPARC_US3_CPUFREQ + tristate "UltraSPARC-III CPU Frequency driver" + select CPU_FREQ_TABLE + help + This adds the CPUFreq driver for UltraSPARC-III processors. + + For details, take a look at . + + If in doubt, say N. + +config SPARC_US2E_CPUFREQ + tristate "UltraSPARC-IIe CPU Frequency driver" + select CPU_FREQ_TABLE + help + This adds the CPUFreq driver for UltraSPARC-IIe processors. + + For details, take a look at . + + If in doubt, say N. +endmenu + menu "SH CPU Frequency scaling" depends on SUPERH config SH_CPU_FREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 18479b2..3a9ff23 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -84,4 +84,6 @@ obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o +obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o +obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c new file mode 100644 index 0000000..306ae46 --- /dev/null +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -0,0 +1,408 @@ +/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support + * + * Copyright (C) 2003 David S. Miller (davem@redhat.com) + * + * Many thanks to Dominik Brodowski for fixing up the cpufreq + * infrastructure in order to make this driver easier to implement. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static struct cpufreq_driver *cpufreq_us2e_driver; + +struct us2e_freq_percpu_info { + struct cpufreq_frequency_table table[6]; +}; + +/* Indexed by cpu number. */ +static struct us2e_freq_percpu_info *us2e_freq_table; + +#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL +#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL + +/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled + * in the ESTAR mode control register. + */ +#define ESTAR_MODE_DIV_1 0x0000000000000000UL +#define ESTAR_MODE_DIV_2 0x0000000000000001UL +#define ESTAR_MODE_DIV_4 0x0000000000000003UL +#define ESTAR_MODE_DIV_6 0x0000000000000002UL +#define ESTAR_MODE_DIV_8 0x0000000000000004UL +#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL + +#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL +#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL +#define MCTRL0_REFR_COUNT_SHIFT 8 +#define MCTRL0_REFR_INTERVAL 7800 +#define MCTRL0_REFR_CLKS_P_CNT 64 + +static unsigned long read_hbreg(unsigned long addr) +{ + unsigned long ret; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=&r" (ret) + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); + return ret; +} + +static void write_hbreg(unsigned long addr, unsigned long val) +{ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) + : "memory"); + if (addr == HBIRD_ESTAR_MODE_ADDR) { + /* Need to wait 16 clock cycles for the PLL to lock. */ + udelay(1); + } +} + +static void self_refresh_ctl(int enable) +{ + unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); + + if (enable) + mctrl |= MCTRL0_SREFRESH_ENAB; + else + mctrl &= ~MCTRL0_SREFRESH_ENAB; + write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); + (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); +} + +static void frob_mem_refresh(int cpu_slowing_down, + unsigned long clock_tick, + unsigned long old_divisor, unsigned long divisor) +{ + unsigned long old_refr_count, refr_count, mctrl; + + refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); + refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); + + mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); + old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) + >> MCTRL0_REFR_COUNT_SHIFT; + + mctrl &= ~MCTRL0_REFR_COUNT_MASK; + mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; + write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); + mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); + + if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { + unsigned long usecs; + + /* We have to wait for both refresh counts (old + * and new) to go to zero. + */ + usecs = (MCTRL0_REFR_CLKS_P_CNT * + (refr_count + old_refr_count) * + 1000000UL * + old_divisor) / clock_tick; + udelay(usecs + 1UL); + } +} + +static void us2e_transition(unsigned long estar, unsigned long new_bits, + unsigned long clock_tick, + unsigned long old_divisor, unsigned long divisor) +{ + unsigned long flags; + + local_irq_save(flags); + + estar &= ~ESTAR_MODE_DIV_MASK; + + /* This is based upon the state transition diagram in the IIe manual. */ + if (old_divisor == 2 && divisor == 1) { + self_refresh_ctl(0); + write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); + frob_mem_refresh(0, clock_tick, old_divisor, divisor); + } else if (old_divisor == 1 && divisor == 2) { + frob_mem_refresh(1, clock_tick, old_divisor, divisor); + write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); + self_refresh_ctl(1); + } else if (old_divisor == 1 && divisor > 2) { + us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, + 1, 2); + us2e_transition(estar, new_bits, clock_tick, + 2, divisor); + } else if (old_divisor > 2 && divisor == 1) { + us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, + old_divisor, 2); + us2e_transition(estar, new_bits, clock_tick, + 2, divisor); + } else if (old_divisor < divisor) { + frob_mem_refresh(0, clock_tick, old_divisor, divisor); + write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); + } else if (old_divisor > divisor) { + write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); + frob_mem_refresh(1, clock_tick, old_divisor, divisor); + } else { + BUG(); + } + + local_irq_restore(flags); +} + +static unsigned long index_to_estar_mode(unsigned int index) +{ + switch (index) { + case 0: + return ESTAR_MODE_DIV_1; + + case 1: + return ESTAR_MODE_DIV_2; + + case 2: + return ESTAR_MODE_DIV_4; + + case 3: + return ESTAR_MODE_DIV_6; + + case 4: + return ESTAR_MODE_DIV_8; + + default: + BUG(); + } +} + +static unsigned long index_to_divisor(unsigned int index) +{ + switch (index) { + case 0: + return 1; + + case 1: + return 2; + + case 2: + return 4; + + case 3: + return 6; + + case 4: + return 8; + + default: + BUG(); + } +} + +static unsigned long estar_to_divisor(unsigned long estar) +{ + unsigned long ret; + + switch (estar & ESTAR_MODE_DIV_MASK) { + case ESTAR_MODE_DIV_1: + ret = 1; + break; + case ESTAR_MODE_DIV_2: + ret = 2; + break; + case ESTAR_MODE_DIV_4: + ret = 4; + break; + case ESTAR_MODE_DIV_6: + ret = 6; + break; + case ESTAR_MODE_DIV_8: + ret = 8; + break; + default: + BUG(); + } + + return ret; +} + +static unsigned int us2e_freq_get(unsigned int cpu) +{ + cpumask_t cpus_allowed; + unsigned long clock_tick, estar; + + cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + clock_tick = sparc64_get_clock_tick(cpu) / 1000; + estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); + + set_cpus_allowed_ptr(current, &cpus_allowed); + + return clock_tick / estar_to_divisor(estar); +} + +static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int cpu = policy->cpu; + unsigned long new_bits, new_freq; + unsigned long clock_tick, divisor, old_divisor, estar; + cpumask_t cpus_allowed; + struct cpufreq_freqs freqs; + + cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; + new_bits = index_to_estar_mode(index); + divisor = index_to_divisor(index); + new_freq /= divisor; + + estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); + + old_divisor = estar_to_divisor(estar); + + freqs.old = clock_tick / old_divisor; + freqs.new = new_freq; + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + if (old_divisor != divisor) + us2e_transition(estar, new_bits, clock_tick * 1000, + old_divisor, divisor); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + set_cpus_allowed_ptr(current, &cpus_allowed); +} + +static int us2e_freq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int new_index = 0; + + if (cpufreq_frequency_table_target(policy, + &us2e_freq_table[policy->cpu].table[0], + target_freq, relation, &new_index)) + return -EINVAL; + + us2e_set_cpu_divider_index(policy, new_index); + + return 0; +} + +static int us2e_freq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + &us2e_freq_table[policy->cpu].table[0]); +} + +static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; + struct cpufreq_frequency_table *table = + &us2e_freq_table[cpu].table[0]; + + table[0].index = 0; + table[0].frequency = clock_tick / 1; + table[1].index = 1; + table[1].frequency = clock_tick / 2; + table[2].index = 2; + table[2].frequency = clock_tick / 4; + table[2].index = 3; + table[2].frequency = clock_tick / 6; + table[2].index = 4; + table[2].frequency = clock_tick / 8; + table[2].index = 5; + table[3].frequency = CPUFREQ_TABLE_END; + + policy->cpuinfo.transition_latency = 0; + policy->cur = clock_tick; + + return cpufreq_frequency_table_cpuinfo(policy, table); +} + +static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) +{ + if (cpufreq_us2e_driver) + us2e_set_cpu_divider_index(policy, 0); + + return 0; +} + +static int __init us2e_freq_init(void) +{ + unsigned long manuf, impl, ver; + int ret; + + if (tlb_type != spitfire) + return -ENODEV; + + __asm__("rdpr %%ver, %0" : "=r" (ver)); + manuf = ((ver >> 48) & 0xffff); + impl = ((ver >> 32) & 0xffff); + + if (manuf == 0x17 && impl == 0x13) { + struct cpufreq_driver *driver; + + ret = -ENOMEM; + driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); + if (!driver) + goto err_out; + + us2e_freq_table = kzalloc( + (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), + GFP_KERNEL); + if (!us2e_freq_table) + goto err_out; + + driver->init = us2e_freq_cpu_init; + driver->verify = us2e_freq_verify; + driver->target = us2e_freq_target; + driver->get = us2e_freq_get; + driver->exit = us2e_freq_cpu_exit; + driver->owner = THIS_MODULE, + strcpy(driver->name, "UltraSPARC-IIe"); + + cpufreq_us2e_driver = driver; + ret = cpufreq_register_driver(driver); + if (ret) + goto err_out; + + return 0; + +err_out: + if (driver) { + kfree(driver); + cpufreq_us2e_driver = NULL; + } + kfree(us2e_freq_table); + us2e_freq_table = NULL; + return ret; + } + + return -ENODEV; +} + +static void __exit us2e_freq_exit(void) +{ + if (cpufreq_us2e_driver) { + cpufreq_unregister_driver(cpufreq_us2e_driver); + kfree(cpufreq_us2e_driver); + cpufreq_us2e_driver = NULL; + kfree(us2e_freq_table); + us2e_freq_table = NULL; + } +} + +MODULE_AUTHOR("David S. Miller "); +MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); +MODULE_LICENSE("GPL"); + +module_init(us2e_freq_init); +module_exit(us2e_freq_exit); diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c new file mode 100644 index 0000000..c71ee14 --- /dev/null +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -0,0 +1,269 @@ +/* us3_cpufreq.c: UltraSPARC-III cpu frequency support + * + * Copyright (C) 2003 David S. Miller (davem@redhat.com) + * + * Many thanks to Dominik Brodowski for fixing up the cpufreq + * infrastructure in order to make this driver easier to implement. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static struct cpufreq_driver *cpufreq_us3_driver; + +struct us3_freq_percpu_info { + struct cpufreq_frequency_table table[4]; +}; + +/* Indexed by cpu number. */ +static struct us3_freq_percpu_info *us3_freq_table; + +/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled + * in the Safari config register. + */ +#define SAFARI_CFG_DIV_1 0x0000000000000000UL +#define SAFARI_CFG_DIV_2 0x0000000040000000UL +#define SAFARI_CFG_DIV_32 0x0000000080000000UL +#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL + +static unsigned long read_safari_cfg(void) +{ + unsigned long ret; + + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=&r" (ret) + : "i" (ASI_SAFARI_CONFIG)); + return ret; +} + +static void write_safari_cfg(unsigned long val) +{ + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (val), "i" (ASI_SAFARI_CONFIG) + : "memory"); +} + +static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) +{ + unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; + unsigned long ret; + + switch (safari_cfg & SAFARI_CFG_DIV_MASK) { + case SAFARI_CFG_DIV_1: + ret = clock_tick / 1; + break; + case SAFARI_CFG_DIV_2: + ret = clock_tick / 2; + break; + case SAFARI_CFG_DIV_32: + ret = clock_tick / 32; + break; + default: + BUG(); + } + + return ret; +} + +static unsigned int us3_freq_get(unsigned int cpu) +{ + cpumask_t cpus_allowed; + unsigned long reg; + unsigned int ret; + + cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + reg = read_safari_cfg(); + ret = get_current_freq(cpu, reg); + + set_cpus_allowed_ptr(current, &cpus_allowed); + + return ret; +} + +static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int cpu = policy->cpu; + unsigned long new_bits, new_freq, reg; + cpumask_t cpus_allowed; + struct cpufreq_freqs freqs; + + cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); + + new_freq = sparc64_get_clock_tick(cpu) / 1000; + switch (index) { + case 0: + new_bits = SAFARI_CFG_DIV_1; + new_freq /= 1; + break; + case 1: + new_bits = SAFARI_CFG_DIV_2; + new_freq /= 2; + break; + case 2: + new_bits = SAFARI_CFG_DIV_32; + new_freq /= 32; + break; + + default: + BUG(); + } + + reg = read_safari_cfg(); + + freqs.old = get_current_freq(cpu, reg); + freqs.new = new_freq; + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + reg &= ~SAFARI_CFG_DIV_MASK; + reg |= new_bits; + write_safari_cfg(reg); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + set_cpus_allowed_ptr(current, &cpus_allowed); +} + +static int us3_freq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int new_index = 0; + + if (cpufreq_frequency_table_target(policy, + &us3_freq_table[policy->cpu].table[0], + target_freq, + relation, + &new_index)) + return -EINVAL; + + us3_set_cpu_divider_index(policy, new_index); + + return 0; +} + +static int us3_freq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + &us3_freq_table[policy->cpu].table[0]); +} + +static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; + struct cpufreq_frequency_table *table = + &us3_freq_table[cpu].table[0]; + + table[0].index = 0; + table[0].frequency = clock_tick / 1; + table[1].index = 1; + table[1].frequency = clock_tick / 2; + table[2].index = 2; + table[2].frequency = clock_tick / 32; + table[3].index = 0; + table[3].frequency = CPUFREQ_TABLE_END; + + policy->cpuinfo.transition_latency = 0; + policy->cur = clock_tick; + + return cpufreq_frequency_table_cpuinfo(policy, table); +} + +static int us3_freq_cpu_exit(struct cpufreq_policy *policy) +{ + if (cpufreq_us3_driver) + us3_set_cpu_divider_index(policy, 0); + + return 0; +} + +static int __init us3_freq_init(void) +{ + unsigned long manuf, impl, ver; + int ret; + + if (tlb_type != cheetah && tlb_type != cheetah_plus) + return -ENODEV; + + __asm__("rdpr %%ver, %0" : "=r" (ver)); + manuf = ((ver >> 48) & 0xffff); + impl = ((ver >> 32) & 0xffff); + + if (manuf == CHEETAH_MANUF && + (impl == CHEETAH_IMPL || + impl == CHEETAH_PLUS_IMPL || + impl == JAGUAR_IMPL || + impl == PANTHER_IMPL)) { + struct cpufreq_driver *driver; + + ret = -ENOMEM; + driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); + if (!driver) + goto err_out; + + us3_freq_table = kzalloc( + (NR_CPUS * sizeof(struct us3_freq_percpu_info)), + GFP_KERNEL); + if (!us3_freq_table) + goto err_out; + + driver->init = us3_freq_cpu_init; + driver->verify = us3_freq_verify; + driver->target = us3_freq_target; + driver->get = us3_freq_get; + driver->exit = us3_freq_cpu_exit; + driver->owner = THIS_MODULE, + strcpy(driver->name, "UltraSPARC-III"); + + cpufreq_us3_driver = driver; + ret = cpufreq_register_driver(driver); + if (ret) + goto err_out; + + return 0; + +err_out: + if (driver) { + kfree(driver); + cpufreq_us3_driver = NULL; + } + kfree(us3_freq_table); + us3_freq_table = NULL; + return ret; + } + + return -ENODEV; +} + +static void __exit us3_freq_exit(void) +{ + if (cpufreq_us3_driver) { + cpufreq_unregister_driver(cpufreq_us3_driver); + kfree(cpufreq_us3_driver); + cpufreq_us3_driver = NULL; + kfree(us3_freq_table); + us3_freq_table = NULL; + } +} + +MODULE_AUTHOR("David S. Miller "); +MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III"); +MODULE_LICENSE("GPL"); + +module_init(us3_freq_init); +module_exit(us3_freq_exit); -- cgit v0.10.2 From 6eb1c377423572374518f5be93214d139d113090 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 25 Mar 2013 11:20:23 +0530 Subject: cpufreq: powerpc/platforms/cell: move cpufreq driver to drivers/cpufreq This patch moves cpufreq driver of powerpc platforms/cell to drivers/cpufreq. Signed-off-by: Viresh Kumar Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 53aaefe..9978f59 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -113,34 +113,10 @@ config CBE_THERM default m depends on CBE_RAS && SPU_BASE -config CBE_CPUFREQ - tristate "CBE frequency scaling" - depends on CBE_RAS && CPU_FREQ - default m - help - This adds the cpufreq driver for Cell BE processors. - For details, take a look at . - If you don't have such processor, say N - -config CBE_CPUFREQ_PMI_ENABLE - bool "CBE frequency scaling using PMI interface" - depends on CBE_CPUFREQ - default n - help - Select this, if you want to use the PMI interface - to switch frequencies. Using PMI, the - processor will not only be able to run at lower speed, - but also at lower core voltage. - -config CBE_CPUFREQ_PMI - tristate - depends on CBE_CPUFREQ_PMI_ENABLE - default CBE_CPUFREQ - config PPC_PMI tristate default y - depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON + depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON help PMI (Platform Management Interrupt) is a way to communicate with the BMC (Baseboard Management Controller). diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index a4a8935..fe053e7 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -5,9 +5,6 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ obj-$(CONFIG_CBE_RAS) += ras.o obj-$(CONFIG_CBE_THERM) += cbe_thermal.o -obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o -obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o -cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c deleted file mode 100644 index 718c6a3..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ /dev/null @@ -1,208 +0,0 @@ -/* - * cpufreq driver for the cell processor - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include - -#include -#include -#include -#include "cbe_cpufreq.h" - -static DEFINE_MUTEX(cbe_switch_mutex); - - -/* the CBE supports an 8 step frequency scaling */ -static struct cpufreq_frequency_table cbe_freqs[] = { - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {5, 0}, - {6, 0}, - {8, 0}, - {10, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -/* - * hardware specific functions - */ - -static int set_pmode(unsigned int cpu, unsigned int slow_mode) -{ - int rc; - - if (cbe_cpufreq_has_pmi) - rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); - else - rc = cbe_cpufreq_set_pmode(cpu, slow_mode); - - pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); - - return rc; -} - -/* - * cpufreq functions - */ - -static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - const u32 *max_freqp; - u32 max_freq; - int i, cur_pmode; - struct device_node *cpu; - - cpu = of_get_cpu_node(policy->cpu, NULL); - - if (!cpu) - return -ENODEV; - - pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - /* - * Let's check we can actually get to the CELL regs - */ - if (!cbe_get_cpu_pmd_regs(policy->cpu) || - !cbe_get_cpu_mic_tm_regs(policy->cpu)) { - pr_info("invalid CBE regs pointers for cpufreq\n"); - return -EINVAL; - } - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - - of_node_put(cpu); - - if (!max_freqp) - return -EINVAL; - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - - pr_debug("max clock-frequency is at %u kHz\n", max_freq); - pr_debug("initializing frequency table\n"); - - /* initialize frequency table */ - for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index; - pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); - } - - /* if DEBUG is enabled set_pmode() measures the latency - * of a transition */ - policy->cpuinfo.transition_latency = 25000; - - cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); - pr_debug("current pmode is at %d\n",cur_pmode); - - policy->cur = cbe_freqs[cur_pmode].frequency; - -#ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); -#endif - - cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); - - /* this ensures that policy->cpuinfo_min - * and policy->cpuinfo_max are set correctly */ - return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); -} - -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static int cbe_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, cbe_freqs); -} - -static int cbe_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - int rc; - struct cpufreq_freqs freqs; - unsigned int cbe_pmode_new; - - cpufreq_frequency_table_target(policy, - cbe_freqs, - target_freq, - relation, - &cbe_pmode_new); - - freqs.old = policy->cur; - freqs.new = cbe_freqs[cbe_pmode_new].frequency; - - mutex_lock(&cbe_switch_mutex); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - - pr_debug("setting frequency for cpu %d to %d kHz, " \ - "1/%d of max frequency\n", - policy->cpu, - cbe_freqs[cbe_pmode_new].frequency, - cbe_freqs[cbe_pmode_new].index); - - rc = set_pmode(policy->cpu, cbe_pmode_new); - - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&cbe_switch_mutex); - - return rc; -} - -static struct cpufreq_driver cbe_cpufreq_driver = { - .verify = cbe_cpufreq_verify, - .target = cbe_cpufreq_target, - .init = cbe_cpufreq_cpu_init, - .exit = cbe_cpufreq_cpu_exit, - .name = "cbe-cpufreq", - .owner = THIS_MODULE, - .flags = CPUFREQ_CONST_LOOPS, -}; - -/* - * module init and destoy - */ - -static int __init cbe_cpufreq_init(void) -{ - if (!machine_is(cell)) - return -ENODEV; - - return cpufreq_register_driver(&cbe_cpufreq_driver); -} - -static void __exit cbe_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&cbe_cpufreq_driver); -} - -module_init(cbe_cpufreq_init); -module_exit(cbe_cpufreq_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft "); diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h deleted file mode 100644 index c1d86bf..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * cbe_cpufreq.h - * - * This file contains the definitions used by the cbe_cpufreq driver. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft - * - */ - -#include -#include - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode); -int cbe_cpufreq_get_pmode(int cpu); - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); - -#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE) -extern bool cbe_cpufreq_has_pmi; -#else -#define cbe_cpufreq_has_pmi (0) -#endif diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c deleted file mode 100644 index 20472e4..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * pervasive backend for the cbe_cpufreq driver - * - * This driver makes use of the pervasive unit to - * engage the desired frequency. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include -#include -#include -#include - -#include "cbe_cpufreq.h" - -/* to write to MIC register */ -static u64 MIC_Slow_Fast_Timer_table[] = { - [0 ... 7] = 0x007fc00000000000ull, -}; - -/* more values for the MIC */ -static u64 MIC_Slow_Next_Timer_table[] = { - 0x0000240000000000ull, - 0x0000268000000000ull, - 0x000029C000000000ull, - 0x00002D0000000000ull, - 0x0000300000000000ull, - 0x0000334000000000ull, - 0x000039C000000000ull, - 0x00003FC000000000ull, -}; - - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - unsigned long flags; - u64 value; -#ifdef DEBUG - long time; -#endif - - local_irq_save(flags); - - mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu); - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - -#ifdef DEBUG - time = jiffies; -#endif - - out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]); - - out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]); - - value = in_be64(&pmd_regs->pmcr); - /* set bits to zero */ - value &= 0xFFFFFFFFFFFFFFF8ull; - /* set bits to next pmode */ - value |= pmode; - - out_be64(&pmd_regs->pmcr, value); - -#ifdef DEBUG - /* wait until new pmode appears in status register */ - value = in_be64(&pmd_regs->pmsr) & 0x07; - while (value != pmode) { - cpu_relax(); - value = in_be64(&pmd_regs->pmsr) & 0x07; - } - - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "pervasive unit\n", time); -#endif - local_irq_restore(flags); - - return 0; -} - - -int cbe_cpufreq_get_pmode(int cpu) -{ - int ret; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - ret = in_be64(&pmd_regs->pmsr) & 0x07; - - return ret; -} - diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c deleted file mode 100644 index 60a07a4..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * pmi backend for the cbe_cpufreq driver - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef DEBUG -#include -#endif - -#include "cbe_cpufreq.h" - -static u8 pmi_slow_mode_limit[MAX_CBE]; - -bool cbe_cpufreq_has_pmi = false; -EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); - -/* - * hardware specific functions - */ - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) -{ - int ret; - pmi_message_t pmi_msg; -#ifdef DEBUG - long time; -#endif - pmi_msg.type = PMI_TYPE_FREQ_CHANGE; - pmi_msg.data1 = cbe_cpu_to_node(cpu); - pmi_msg.data2 = pmode; - -#ifdef DEBUG - time = jiffies; -#endif - pmi_send_message(pmi_msg); - -#ifdef DEBUG - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "PMI\n", time); -#endif - ret = pmi_msg.data2; - pr_debug("PMI returned slow mode %d\n", ret); - - return ret; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); - - -static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) -{ - u8 node, slow_mode; - - BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); - - node = pmi_msg.data1; - slow_mode = pmi_msg.data2; - - pmi_slow_mode_limit[node] = slow_mode; - - pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); -} - -static int pmi_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *cbe_freqs; - u8 node; - - /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE - * and CPUFREQ_NOTIFY policy events?) - */ - if (event == CPUFREQ_START) - return 0; - - cbe_freqs = cpufreq_frequency_get_table(policy->cpu); - node = cbe_cpu_to_node(policy->cpu); - - pr_debug("got notified, event=%lu, node=%u\n", event, node); - - if (pmi_slow_mode_limit[node] != 0) { - pr_debug("limiting node %d to slow mode %d\n", - node, pmi_slow_mode_limit[node]); - - cpufreq_verify_within_limits(policy, 0, - - cbe_freqs[pmi_slow_mode_limit[node]].frequency); - } - - return 0; -} - -static struct notifier_block pmi_notifier_block = { - .notifier_call = pmi_notifier, -}; - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_FREQ_CHANGE, - .handle_pmi_message = cbe_cpufreq_handle_pmi, -}; - - - -static int __init cbe_cpufreq_pmi_init(void) -{ - cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; - - if (!cbe_cpufreq_has_pmi) - return -ENODEV; - - cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); - - return 0; -} - -static void __exit cbe_cpufreq_pmi_exit(void) -{ - cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); - pmi_unregister_handler(&cbe_pmi_handler); -} - -module_init(cbe_cpufreq_pmi_init); -module_exit(cbe_cpufreq_pmi_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft "); diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index e76992f..9c926ca 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -1,3 +1,21 @@ +config CPU_FREQ_CBE + tristate "CBE frequency scaling" + depends on CBE_RAS && PPC_CELL + default m + help + This adds the cpufreq driver for Cell BE processors. + For details, take a look at . + If you don't have such processor, say N + +config CPU_FREQ_CBE_PMI + bool "CBE frequency scaling using PMI interface" + depends on CPU_FREQ_CBE + default n + help + Select this, if you want to use the PMI interface to switch + frequencies. Using PMI, the processor will not only be able to run at + lower speed, but also at lower core voltage. + config CPU_FREQ_MAPLE bool "Support for Maple 970FX Evaluation Board" depends on PPC_MAPLE diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 3a9ff23..17417c7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -73,6 +73,9 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o ################################################################################## # PowerPC platform drivers +obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o +ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o +obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o ################################################################################## diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c new file mode 100644 index 0000000..e577a1d --- /dev/null +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -0,0 +1,209 @@ +/* + * cpufreq driver for the cell processor + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 + * + * Author: Christian Krafft + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#include +#include +#include + +#include "ppc_cbe_cpufreq.h" + +static DEFINE_MUTEX(cbe_switch_mutex); + + +/* the CBE supports an 8 step frequency scaling */ +static struct cpufreq_frequency_table cbe_freqs[] = { + {1, 0}, + {2, 0}, + {3, 0}, + {4, 0}, + {5, 0}, + {6, 0}, + {8, 0}, + {10, 0}, + {0, CPUFREQ_TABLE_END}, +}; + +/* + * hardware specific functions + */ + +static int set_pmode(unsigned int cpu, unsigned int slow_mode) +{ + int rc; + + if (cbe_cpufreq_has_pmi) + rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); + else + rc = cbe_cpufreq_set_pmode(cpu, slow_mode); + + pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); + + return rc; +} + +/* + * cpufreq functions + */ + +static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + const u32 *max_freqp; + u32 max_freq; + int i, cur_pmode; + struct device_node *cpu; + + cpu = of_get_cpu_node(policy->cpu, NULL); + + if (!cpu) + return -ENODEV; + + pr_debug("init cpufreq on CPU %d\n", policy->cpu); + + /* + * Let's check we can actually get to the CELL regs + */ + if (!cbe_get_cpu_pmd_regs(policy->cpu) || + !cbe_get_cpu_mic_tm_regs(policy->cpu)) { + pr_info("invalid CBE regs pointers for cpufreq\n"); + return -EINVAL; + } + + max_freqp = of_get_property(cpu, "clock-frequency", NULL); + + of_node_put(cpu); + + if (!max_freqp) + return -EINVAL; + + /* we need the freq in kHz */ + max_freq = *max_freqp / 1000; + + pr_debug("max clock-frequency is at %u kHz\n", max_freq); + pr_debug("initializing frequency table\n"); + + /* initialize frequency table */ + for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { + cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index; + pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); + } + + /* if DEBUG is enabled set_pmode() measures the latency + * of a transition */ + policy->cpuinfo.transition_latency = 25000; + + cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); + pr_debug("current pmode is at %d\n",cur_pmode); + + policy->cur = cbe_freqs[cur_pmode].frequency; + +#ifdef CONFIG_SMP + cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); +#endif + + cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); + + /* this ensures that policy->cpuinfo_min + * and policy->cpuinfo_max are set correctly */ + return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); +} + +static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static int cbe_cpufreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, cbe_freqs); +} + +static int cbe_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + int rc; + struct cpufreq_freqs freqs; + unsigned int cbe_pmode_new; + + cpufreq_frequency_table_target(policy, + cbe_freqs, + target_freq, + relation, + &cbe_pmode_new); + + freqs.old = policy->cur; + freqs.new = cbe_freqs[cbe_pmode_new].frequency; + + mutex_lock(&cbe_switch_mutex); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + pr_debug("setting frequency for cpu %d to %d kHz, " \ + "1/%d of max frequency\n", + policy->cpu, + cbe_freqs[cbe_pmode_new].frequency, + cbe_freqs[cbe_pmode_new].index); + + rc = set_pmode(policy->cpu, cbe_pmode_new); + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + mutex_unlock(&cbe_switch_mutex); + + return rc; +} + +static struct cpufreq_driver cbe_cpufreq_driver = { + .verify = cbe_cpufreq_verify, + .target = cbe_cpufreq_target, + .init = cbe_cpufreq_cpu_init, + .exit = cbe_cpufreq_cpu_exit, + .name = "cbe-cpufreq", + .owner = THIS_MODULE, + .flags = CPUFREQ_CONST_LOOPS, +}; + +/* + * module init and destoy + */ + +static int __init cbe_cpufreq_init(void) +{ + if (!machine_is(cell)) + return -ENODEV; + + return cpufreq_register_driver(&cbe_cpufreq_driver); +} + +static void __exit cbe_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&cbe_cpufreq_driver); +} + +module_init(cbe_cpufreq_init); +module_exit(cbe_cpufreq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Krafft "); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h new file mode 100644 index 0000000..b4c00a5 --- /dev/null +++ b/drivers/cpufreq/ppc_cbe_cpufreq.h @@ -0,0 +1,24 @@ +/* + * ppc_cbe_cpufreq.h + * + * This file contains the definitions used by the cbe_cpufreq driver. + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 + * + * Author: Christian Krafft + * + */ + +#include +#include + +int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode); +int cbe_cpufreq_get_pmode(int cpu); + +int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); + +#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE) +extern bool cbe_cpufreq_has_pmi; +#else +#define cbe_cpufreq_has_pmi (0) +#endif diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c new file mode 100644 index 0000000..84d2f2c --- /dev/null +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c @@ -0,0 +1,115 @@ +/* + * pervasive backend for the cbe_cpufreq driver + * + * This driver makes use of the pervasive unit to + * engage the desired frequency. + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 + * + * Author: Christian Krafft + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include "ppc_cbe_cpufreq.h" + +/* to write to MIC register */ +static u64 MIC_Slow_Fast_Timer_table[] = { + [0 ... 7] = 0x007fc00000000000ull, +}; + +/* more values for the MIC */ +static u64 MIC_Slow_Next_Timer_table[] = { + 0x0000240000000000ull, + 0x0000268000000000ull, + 0x000029C000000000ull, + 0x00002D0000000000ull, + 0x0000300000000000ull, + 0x0000334000000000ull, + 0x000039C000000000ull, + 0x00003FC000000000ull, +}; + + +int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode) +{ + struct cbe_pmd_regs __iomem *pmd_regs; + struct cbe_mic_tm_regs __iomem *mic_tm_regs; + unsigned long flags; + u64 value; +#ifdef DEBUG + long time; +#endif + + local_irq_save(flags); + + mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu); + pmd_regs = cbe_get_cpu_pmd_regs(cpu); + +#ifdef DEBUG + time = jiffies; +#endif + + out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]); + out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]); + + out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]); + out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]); + + value = in_be64(&pmd_regs->pmcr); + /* set bits to zero */ + value &= 0xFFFFFFFFFFFFFFF8ull; + /* set bits to next pmode */ + value |= pmode; + + out_be64(&pmd_regs->pmcr, value); + +#ifdef DEBUG + /* wait until new pmode appears in status register */ + value = in_be64(&pmd_regs->pmsr) & 0x07; + while (value != pmode) { + cpu_relax(); + value = in_be64(&pmd_regs->pmsr) & 0x07; + } + + time = jiffies - time; + time = jiffies_to_msecs(time); + pr_debug("had to wait %lu ms for a transition using " \ + "pervasive unit\n", time); +#endif + local_irq_restore(flags); + + return 0; +} + + +int cbe_cpufreq_get_pmode(int cpu) +{ + int ret; + struct cbe_pmd_regs __iomem *pmd_regs; + + pmd_regs = cbe_get_cpu_pmd_regs(cpu); + ret = in_be64(&pmd_regs->pmsr) & 0x07; + + return ret; +} + diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c new file mode 100644 index 0000000..d29e8da --- /dev/null +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -0,0 +1,156 @@ +/* + * pmi backend for the cbe_cpufreq driver + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 + * + * Author: Christian Krafft + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef DEBUG +#include +#endif + +#include "ppc_cbe_cpufreq.h" + +static u8 pmi_slow_mode_limit[MAX_CBE]; + +bool cbe_cpufreq_has_pmi = false; +EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); + +/* + * hardware specific functions + */ + +int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) +{ + int ret; + pmi_message_t pmi_msg; +#ifdef DEBUG + long time; +#endif + pmi_msg.type = PMI_TYPE_FREQ_CHANGE; + pmi_msg.data1 = cbe_cpu_to_node(cpu); + pmi_msg.data2 = pmode; + +#ifdef DEBUG + time = jiffies; +#endif + pmi_send_message(pmi_msg); + +#ifdef DEBUG + time = jiffies - time; + time = jiffies_to_msecs(time); + pr_debug("had to wait %lu ms for a transition using " \ + "PMI\n", time); +#endif + ret = pmi_msg.data2; + pr_debug("PMI returned slow mode %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); + + +static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) +{ + u8 node, slow_mode; + + BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); + + node = pmi_msg.data1; + slow_mode = pmi_msg.data2; + + pmi_slow_mode_limit[node] = slow_mode; + + pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); +} + +static int pmi_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; + struct cpufreq_frequency_table *cbe_freqs; + u8 node; + + /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE + * and CPUFREQ_NOTIFY policy events?) + */ + if (event == CPUFREQ_START) + return 0; + + cbe_freqs = cpufreq_frequency_get_table(policy->cpu); + node = cbe_cpu_to_node(policy->cpu); + + pr_debug("got notified, event=%lu, node=%u\n", event, node); + + if (pmi_slow_mode_limit[node] != 0) { + pr_debug("limiting node %d to slow mode %d\n", + node, pmi_slow_mode_limit[node]); + + cpufreq_verify_within_limits(policy, 0, + + cbe_freqs[pmi_slow_mode_limit[node]].frequency); + } + + return 0; +} + +static struct notifier_block pmi_notifier_block = { + .notifier_call = pmi_notifier, +}; + +static struct pmi_handler cbe_pmi_handler = { + .type = PMI_TYPE_FREQ_CHANGE, + .handle_pmi_message = cbe_cpufreq_handle_pmi, +}; + + + +static int __init cbe_cpufreq_pmi_init(void) +{ + cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; + + if (!cbe_cpufreq_has_pmi) + return -ENODEV; + + cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); + + return 0; +} + +static void __exit cbe_cpufreq_pmi_exit(void) +{ + cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); + pmi_unregister_handler(&cbe_pmi_handler); +} + +module_init(cbe_cpufreq_pmi_init); +module_exit(cbe_cpufreq_pmi_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Krafft "); -- cgit v0.10.2 From 5800043b2488a1c4c6e859af860644d37419d58b Mon Sep 17 00:00:00 2001 From: Nathan Zimmer Date: Thu, 4 Apr 2013 14:53:25 +0000 Subject: cpufreq: convert cpufreq_driver to using RCU We eventually would like to remove the rwlock cpufreq_driver_lock or convert it back to a spinlock and protect the read sections with RCU. The first step in that direction is to make cpufreq_driver use RCU. I don't see an easy wasy to protect the cpufreq_cpu_data structure with RCU, so I am leaving it with the rwlock for now since under certain configs __cpufreq_cpu_get is a hot spot with 256+ cores. [rjw: Subject, changelog, white space] Signed-off-by: Nathan Zimmer Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0198cd0..fd97a62 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -39,7 +39,7 @@ * level driver of CPUFreq support, and its spinlock. This lock * also protects the cpufreq_cpu_data array. */ -static struct cpufreq_driver *cpufreq_driver; +static struct cpufreq_driver __rcu *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); #ifdef CONFIG_HOTPLUG_CPU /* This one keeps track of the previously set governor of a removed CPU */ @@ -130,26 +130,34 @@ static DEFINE_MUTEX(cpufreq_governor_mutex); bool have_governor_per_policy(void) { - return cpufreq_driver->have_governor_per_policy; + bool have_governor_per_policy; + rcu_read_lock(); + have_governor_per_policy = + rcu_dereference(cpufreq_driver)->have_governor_per_policy; + rcu_read_unlock(); + return have_governor_per_policy; } static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) { struct cpufreq_policy *data; + struct cpufreq_driver *driver; unsigned long flags; if (cpu >= nr_cpu_ids) goto err_out; /* get the cpufreq driver */ - read_lock_irqsave(&cpufreq_driver_lock, flags); + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); - if (!cpufreq_driver) + if (!driver) goto err_out_unlock; - if (!try_module_get(cpufreq_driver->owner)) + if (!try_module_get(driver->owner)) goto err_out_unlock; + read_lock_irqsave(&cpufreq_driver_lock, flags); /* get the CPU */ data = per_cpu(cpufreq_cpu_data, cpu); @@ -161,12 +169,14 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) goto err_out_put_module; read_unlock_irqrestore(&cpufreq_driver_lock, flags); + rcu_read_unlock(); return data; err_out_put_module: - module_put(cpufreq_driver->owner); -err_out_unlock: + module_put(driver->owner); read_unlock_irqrestore(&cpufreq_driver_lock, flags); +err_out_unlock: + rcu_read_unlock(); err_out: return NULL; } @@ -189,7 +199,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) { if (!sysfs) kobject_put(&data->kobj); - module_put(cpufreq_driver->owner); + rcu_read_lock(); + module_put(rcu_dereference(cpufreq_driver)->owner); + rcu_read_unlock(); } void cpufreq_cpu_put(struct cpufreq_policy *data) @@ -257,7 +269,9 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, if (cpufreq_disabled()) return; - freqs->flags = cpufreq_driver->flags; + rcu_read_lock(); + freqs->flags = rcu_dereference(cpufreq_driver)->flags; + rcu_read_unlock(); pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); @@ -268,7 +282,7 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy, * which is not equal to what the cpufreq core thinks is * "old frequency". */ - if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { + if (!(freqs->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { pr_debug("Warning: CPU frequency is" @@ -334,11 +348,21 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, struct cpufreq_governor **governor) { int err = -EINVAL; - - if (!cpufreq_driver) + struct cpufreq_driver *driver; + bool has_setpolicy; + bool has_target; + + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + if (!driver) { + rcu_read_unlock(); goto out; + } + has_setpolicy = driver->setpolicy ? true : false; + has_target = driver->target ? true : false; + rcu_read_unlock(); - if (cpufreq_driver->setpolicy) { + if (has_setpolicy) { if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { *policy = CPUFREQ_POLICY_PERFORMANCE; err = 0; @@ -347,7 +371,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, *policy = CPUFREQ_POLICY_POWERSAVE; err = 0; } - } else if (cpufreq_driver->target) { + } else if (has_target) { struct cpufreq_governor *t; mutex_lock(&cpufreq_governor_mutex); @@ -498,7 +522,12 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, */ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) { - return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); + ssize_t size; + rcu_read_lock(); + size = scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", + rcu_dereference(cpufreq_driver)->name); + rcu_read_unlock(); + return size; } /** @@ -510,10 +539,13 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, ssize_t i = 0; struct cpufreq_governor *t; - if (!cpufreq_driver->target) { + rcu_read_lock(); + if (!rcu_dereference(cpufreq_driver)->target) { + rcu_read_unlock(); i += sprintf(buf, "performance powersave"); goto out; } + rcu_read_unlock(); list_for_each_entry(t, &cpufreq_governor_list, governor_list) { if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) @@ -591,9 +623,15 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) { unsigned int limit; + int (*bios_limit)(int cpu, unsigned int *limit); int ret; - if (cpufreq_driver->bios_limit) { - ret = cpufreq_driver->bios_limit(policy->cpu, &limit); + + rcu_read_lock(); + bios_limit = rcu_dereference(cpufreq_driver)->bios_limit; + rcu_read_unlock(); + + if (bios_limit) { + ret = bios_limit(policy->cpu, &limit); if (!ret) return sprintf(buf, "%u\n", limit); } @@ -736,6 +774,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, { struct cpufreq_policy new_policy; struct freq_attr **drv_attr; + struct cpufreq_driver *driver; unsigned long flags; int ret = 0; unsigned int j; @@ -747,28 +786,31 @@ static int cpufreq_add_dev_interface(unsigned int cpu, return ret; /* set up files for this cpu device */ - drv_attr = cpufreq_driver->attr; + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + drv_attr = driver->attr; while ((drv_attr) && (*drv_attr)) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) - goto err_out_kobj_put; + goto err_out_unlock; drv_attr++; } - if (cpufreq_driver->get) { + if (driver->get) { ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) - goto err_out_kobj_put; + goto err_out_unlock; } - if (cpufreq_driver->target) { + if (driver->target) { ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) - goto err_out_kobj_put; + goto err_out_unlock; } - if (cpufreq_driver->bios_limit) { + if (driver->bios_limit) { ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) - goto err_out_kobj_put; + goto err_out_unlock; } + rcu_read_unlock(); write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) { @@ -791,12 +833,20 @@ static int cpufreq_add_dev_interface(unsigned int cpu, policy->user_policy.governor = policy->governor; if (ret) { + int (*exit)(struct cpufreq_policy *policy); + pr_debug("setting policy failed\n"); - if (cpufreq_driver->exit) - cpufreq_driver->exit(policy); + rcu_read_lock(); + exit = rcu_dereference(cpufreq_driver)->exit; + rcu_read_unlock(); + if (exit) + exit(policy); + } return ret; +err_out_unlock: + rcu_read_unlock(); err_out_kobj_put: kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); @@ -854,6 +904,8 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) unsigned int j, cpu = dev->id; int ret = -ENOMEM; struct cpufreq_policy *policy; + struct cpufreq_driver *driver; + int (*init)(struct cpufreq_policy *policy); unsigned long flags; #ifdef CONFIG_HOTPLUG_CPU struct cpufreq_governor *gov; @@ -888,10 +940,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) #endif #endif - if (!try_module_get(cpufreq_driver->owner)) { + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + if (!try_module_get(driver->owner)) { + rcu_read_unlock(); ret = -EINVAL; goto module_out; } + init = driver->init; + rcu_read_unlock(); policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); if (!policy) @@ -916,7 +973,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU */ - ret = cpufreq_driver->init(policy); + ret = init(policy); if (ret) { pr_debug("initialization failed\n"); goto err_set_policy_cpu; @@ -951,7 +1008,9 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) goto err_out_unregister; kobject_uevent(&policy->kobj, KOBJ_ADD); - module_put(cpufreq_driver->owner); + rcu_read_lock(); + module_put(rcu_dereference(cpufreq_driver)->owner); + rcu_read_unlock(); pr_debug("initialization complete\n"); return 0; @@ -973,7 +1032,9 @@ err_free_cpumask: err_free_policy: kfree(policy); nomem_out: - module_put(cpufreq_driver->owner); + rcu_read_lock(); + module_put(rcu_dereference(cpufreq_driver)->owner); + rcu_read_unlock(); module_out: return ret; } @@ -1007,9 +1068,12 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif unsigned int cpu = dev->id, ret, cpus; unsigned long flags; struct cpufreq_policy *data; + struct cpufreq_driver *driver; struct kobject *kobj; struct completion *cmp; struct device *cpu_dev; + bool has_target; + int (*exit)(struct cpufreq_policy *policy); pr_debug("%s: unregistering CPU %u\n", __func__, cpu); @@ -1025,14 +1089,19 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif return -EINVAL; } - if (cpufreq_driver->target) + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + has_target = driver->target ? true : false; + exit = driver->exit; + if (has_target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); #ifdef CONFIG_HOTPLUG_CPU - if (!cpufreq_driver->setpolicy) + if (!driver->setpolicy) strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, CPUFREQ_NAME_LEN); #endif + rcu_read_unlock(); WARN_ON(lock_policy_rwsem_write(cpu)); cpus = cpumask_weight(data->cpus); @@ -1091,13 +1160,13 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif wait_for_completion(cmp); pr_debug("wait complete\n"); - if (cpufreq_driver->exit) - cpufreq_driver->exit(data); + if (exit) + exit(data); free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); - } else if (cpufreq_driver->target) { + } else if (has_target) { __cpufreq_governor(data, CPUFREQ_GOV_START); __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } @@ -1171,10 +1240,18 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int cpufreq_quick_get(unsigned int cpu) { struct cpufreq_policy *policy; + struct cpufreq_driver *driver; + unsigned int (*get)(unsigned int cpu); unsigned int ret_freq = 0; - if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) - return cpufreq_driver->get(cpu); + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + if (driver && driver->setpolicy && driver->get) { + get = driver->get; + rcu_read_unlock(); + return get(cpu); + } + rcu_read_unlock(); policy = cpufreq_cpu_get(cpu); if (policy) { @@ -1210,15 +1287,26 @@ EXPORT_SYMBOL(cpufreq_quick_get_max); static unsigned int __cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpufreq_driver *driver; + unsigned int (*get)(unsigned int cpu); unsigned int ret_freq = 0; + u8 flags; + - if (!cpufreq_driver->get) + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + if (!driver->get) { + rcu_read_unlock(); return ret_freq; + } + flags = driver->flags; + get = driver->get; + rcu_read_unlock(); - ret_freq = cpufreq_driver->get(cpu); + ret_freq = get(cpu); if (ret_freq && policy->cur && - !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { + !(flags & CPUFREQ_CONST_LOOPS)) { /* verify no discrepancy between actual and saved value exists */ if (unlikely(ret_freq != policy->cur)) { @@ -1274,6 +1362,7 @@ static struct subsys_interface cpufreq_interface = { */ static int cpufreq_bp_suspend(void) { + int (*suspend)(struct cpufreq_policy *policy); int ret = 0; int cpu = smp_processor_id(); @@ -1286,8 +1375,11 @@ static int cpufreq_bp_suspend(void) if (!cpu_policy) return 0; - if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(cpu_policy); + rcu_read_lock(); + suspend = rcu_dereference(cpufreq_driver)->suspend; + rcu_read_unlock(); + if (suspend) { + ret = suspend(cpu_policy); if (ret) printk(KERN_ERR "cpufreq: suspend failed in ->suspend " "step on CPU %u\n", cpu_policy->cpu); @@ -1313,6 +1405,7 @@ static int cpufreq_bp_suspend(void) static void cpufreq_bp_resume(void) { int ret = 0; + int (*resume)(struct cpufreq_policy *policy); int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; @@ -1324,8 +1417,12 @@ static void cpufreq_bp_resume(void) if (!cpu_policy) return; - if (cpufreq_driver->resume) { - ret = cpufreq_driver->resume(cpu_policy); + rcu_read_lock(); + resume = rcu_dereference(cpufreq_driver)->resume; + rcu_read_unlock(); + + if (resume) { + ret = resume(cpu_policy); if (ret) { printk(KERN_ERR "cpufreq: resume failed in ->resume " "step on CPU %u\n", cpu_policy->cpu); @@ -1352,10 +1449,14 @@ static struct syscore_ops cpufreq_syscore_ops = { */ const char *cpufreq_get_current_driver(void) { - if (cpufreq_driver) - return cpufreq_driver->name; - - return NULL; + struct cpufreq_driver *driver; + const char *name = NULL; + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + if (driver) + name = driver->name; + rcu_read_unlock(); + return name; } EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); @@ -1449,6 +1550,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; unsigned int old_target_freq = target_freq; + int (*target)(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); if (cpufreq_disabled()) return -ENODEV; @@ -1465,8 +1569,11 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (target_freq == policy->cur) return 0; - if (cpufreq_driver->target) - retval = cpufreq_driver->target(policy, target_freq, relation); + rcu_read_lock(); + target = rcu_dereference(cpufreq_driver)->target; + rcu_read_unlock(); + if (target) + retval = target(policy, target_freq, relation); return retval; } @@ -1499,18 +1606,24 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_target); int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; + unsigned int (*getavg)(struct cpufreq_policy *policy, + unsigned int cpu); if (cpufreq_disabled()) return ret; - if (!cpufreq_driver->getavg) + rcu_read_lock(); + getavg = rcu_dereference(cpufreq_driver)->getavg; + rcu_read_unlock(); + + if (!getavg) return 0; policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; - ret = cpufreq_driver->getavg(policy, cpu); + ret = getavg(policy, cpu); cpufreq_cpu_put(policy); return ret; @@ -1668,6 +1781,9 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) { int ret = 0, failed = 1; + struct cpufreq_driver *driver; + int (*verify)(struct cpufreq_policy *policy); + int (*setpolicy)(struct cpufreq_policy *policy); pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); @@ -1681,7 +1797,13 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, } /* verify the cpu speed can be set within this limit */ - ret = cpufreq_driver->verify(policy); + rcu_read_lock(); + driver = rcu_dereference(cpufreq_driver); + verify = driver->verify; + setpolicy = driver->setpolicy; + rcu_read_unlock(); + + ret = verify(policy); if (ret) goto error_out; @@ -1695,7 +1817,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, /* verify the cpu speed can be set within this limit, which might be different to the first one */ - ret = cpufreq_driver->verify(policy); + ret = verify(policy); if (ret) goto error_out; @@ -1709,10 +1831,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, pr_debug("new min and max freqs are %u - %u kHz\n", data->min, data->max); - if (cpufreq_driver->setpolicy) { + if (setpolicy) { data->policy = policy->policy; pr_debug("setting range\n"); - ret = cpufreq_driver->setpolicy(policy); + ret = setpolicy(policy); } else { if (policy->governor != data->governor) { /* save old, working values */ @@ -1772,6 +1894,11 @@ int cpufreq_update_policy(unsigned int cpu) { struct cpufreq_policy *data = cpufreq_cpu_get(cpu); struct cpufreq_policy policy; + struct cpufreq_driver *driver; + unsigned int (*get)(unsigned int cpu); + int (*target)(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); int ret; if (!data) { @@ -1793,13 +1920,18 @@ int cpufreq_update_policy(unsigned int cpu) /* BIOS might change freq behind our back -> ask driver for current freq and notify governors about a change */ - if (cpufreq_driver->get) { - policy.cur = cpufreq_driver->get(cpu); + rcu_read_lock(); + driver = rcu_access_pointer(cpufreq_driver); + get = driver->get; + target = driver->target; + rcu_read_unlock(); + if (get) { + policy.cur = get(cpu); if (!data->cur) { pr_debug("Driver did not initialize current freq"); data->cur = policy.cur; } else { - if (data->cur != policy.cur && cpufreq_driver->target) + if (data->cur != policy.cur && target) cpufreq_out_of_sync(cpu, data->cur, policy.cur); } @@ -1878,18 +2010,19 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) driver_data->flags |= CPUFREQ_CONST_LOOPS; write_lock_irqsave(&cpufreq_driver_lock, flags); - if (cpufreq_driver) { + if (rcu_access_pointer(cpufreq_driver)) { write_unlock_irqrestore(&cpufreq_driver_lock, flags); return -EBUSY; } - cpufreq_driver = driver_data; + rcu_assign_pointer(cpufreq_driver, driver_data); write_unlock_irqrestore(&cpufreq_driver_lock, flags); + synchronize_rcu(); ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_null_driver; - if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { + if (!(driver_data->flags & CPUFREQ_STICKY)) { int i; ret = -ENODEV; @@ -1916,8 +2049,9 @@ err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver = NULL; + rcu_assign_pointer(cpufreq_driver, NULL); write_unlock_irqrestore(&cpufreq_driver_lock, flags); + synchronize_rcu(); return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); @@ -1934,9 +2068,15 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver); int cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; + struct cpufreq_driver *old_driver; - if (!cpufreq_driver || (driver != cpufreq_driver)) + rcu_read_lock(); + old_driver = rcu_access_pointer(cpufreq_driver); + if (!old_driver || (driver != old_driver)) { + rcu_read_unlock(); return -EINVAL; + } + rcu_read_unlock(); pr_debug("unregistering driver %s\n", driver->name); @@ -1944,8 +2084,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) unregister_hotcpu_notifier(&cpufreq_cpu_notifier); write_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver = NULL; + rcu_assign_pointer(cpufreq_driver, NULL); write_unlock_irqrestore(&cpufreq_driver_lock, flags); + synchronize_rcu(); return 0; } -- cgit v0.10.2 From fb30809efa3edeb692a6b29125a07c9eceb322dc Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Tue, 2 Apr 2013 09:56:56 -0500 Subject: cpufreq: ondemand: allow custom powersave_bias_target handler to be registered This allows for another [arch specific] driver to hook into existing powersave bias function of the ondemand governor. i.e. This allows AMD specific powersave bias function (in a separate AMD specific driver) to aid ondemand governor's frequency transition decisions. Signed-off-by: Jacob Shin Acked-by: Thomas Renninger Acked-by: Borislav Petkov Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 513cc82..9f668e9 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -263,4 +263,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct common_dbs_data *cdata, unsigned int event); void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, unsigned int delay, bool all_cpus); +void od_register_powersave_bias_handler(unsigned int (*f) + (struct cpufreq_policy *, unsigned int, unsigned int), + unsigned int powersave_bias); +void od_unregister_powersave_bias_handler(void); #endif /* _CPUFREQ_GOVERNER_H */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 1471478..b0ffef9 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "cpufreq_governor.h" @@ -40,6 +41,8 @@ static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); +static struct od_ops od_ops; + #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND static struct cpufreq_governor cpufreq_gov_ondemand; #endif @@ -80,7 +83,7 @@ static int should_io_be_busy(void) * Returns the freq_hi to be used right now and will set freq_hi_jiffies, * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. */ -static unsigned int powersave_bias_target(struct cpufreq_policy *policy, +static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation) { unsigned int freq_req, freq_reduc, freq_avg; @@ -145,7 +148,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) struct od_dbs_tuners *od_tuners = dbs_data->tuners; if (od_tuners->powersave_bias) - freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + freq = od_ops.powersave_bias_target(p, freq, + CPUFREQ_RELATION_H); else if (p->cur == p->max) return; @@ -205,12 +209,12 @@ static void od_check_cpu(int cpu, unsigned int load_freq) if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); + return; } + + freq_next = od_ops.powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } } @@ -557,7 +561,7 @@ define_get_cpu_dbs_routines(od_cpu_dbs_info); static struct od_ops od_ops = { .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, - .powersave_bias_target = powersave_bias_target, + .powersave_bias_target = generic_powersave_bias_target, .freq_increase = dbs_freq_increase, }; @@ -574,6 +578,47 @@ static struct common_dbs_data od_dbs_cdata = { .exit = od_exit, }; +static void od_set_powersave_bias(unsigned int powersave_bias) +{ + struct cpufreq_policy *policy; + struct dbs_data *dbs_data; + struct od_dbs_tuners *od_tuners; + unsigned int cpu; + cpumask_t done; + + cpumask_clear(&done); + + get_online_cpus(); + for_each_online_cpu(cpu) { + if (cpumask_test_cpu(cpu, &done)) + continue; + + policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; + dbs_data = policy->governor_data; + od_tuners = dbs_data->tuners; + od_tuners->powersave_bias = powersave_bias; + + cpumask_or(&done, &done, policy->cpus); + } + put_online_cpus(); +} + +void od_register_powersave_bias_handler(unsigned int (*f) + (struct cpufreq_policy *, unsigned int, unsigned int), + unsigned int powersave_bias) +{ + od_ops.powersave_bias_target = f; + od_set_powersave_bias(powersave_bias); +} +EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler); + +void od_unregister_powersave_bias_handler(void) +{ + od_ops.powersave_bias_target = generic_powersave_bias_target; + od_set_powersave_bias(0); +} +EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler); + static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { -- cgit v0.10.2 From 9c5320c8ea8b8423edca2c40cd559f1ce9496dab Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Thu, 4 Apr 2013 16:19:04 +0000 Subject: cpufreq: AMD "frequency sensitivity feedback" powersave bias for ondemand governor Future AMD processors, starting with Family 16h, can provide software with feedback on how the workload may respond to frequency change -- memory-bound workloads will not benefit from higher frequency, where as compute-bound workloads will. This patch enables this "frequency sensitivity feedback" to aid the ondemand governor to make better frequency change decisions by hooking into the powersave bias. Signed-off-by: Jacob Shin Acked-by: Thomas Renninger Acked-by: Borislav Petkov Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index 4dfed30..66f9cc3 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt @@ -167,6 +167,27 @@ of load evaluation and helping the CPU stay at its top speed when truly busy, rather than shifting back and forth in speed. This tunable has no effect on behavior at lower speeds/lower CPU loads. +powersave_bias: this parameter takes a value between 0 to 1000. It +defines the percentage (times 10) value of the target frequency that +will be shaved off of the target. For example, when set to 100 -- 10%, +when ondemand governor would have targeted 1000 MHz, it will target +1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0 +(disabled) by default. +When AMD frequency sensitivity powersave bias driver -- +drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter +defines the workload frequency sensitivity threshold in which a lower +frequency is chosen instead of ondemand governor's original target. +The frequency sensitivity is a hardware reported (on AMD Family 16h +Processors and above) value between 0 to 100% that tells software how +the performance of the workload running on a CPU will change when +frequency changes. A workload with sensitivity of 0% (memory/IO-bound) +will not perform any better on higher core frequency, whereas a +workload with sensitivity of 100% (CPU-bound) will perform better +higher the frequency. When the driver is loaded, this is set to 400 +by default -- for CPUs running workloads with sensitivity value below +40%, a lower frequency is chosen. Unloading the driver or writing 0 +will disable this feature. + 2.5 Conservative ---------------- diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 93fe929..9e22520 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -182,6 +182,7 @@ #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ #define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index ee8e9ab..d92b5da 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -39,8 +39,9 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, - { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, + { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, + { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index d7dc0ed..2b8a8c3 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -129,6 +129,23 @@ config X86_POWERNOW_K8 For details, take a look at . +config X86_AMD_FREQ_SENSITIVITY + tristate "AMD frequency sensitivity feedback powersave bias" + depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD + help + This adds AMD-specific powersave bias function to the ondemand + governor, which allows it to make more power-conscious frequency + change decisions based on feedback from hardware (availble on AMD + Family 16h and above). + + Hardware feedback tells software how "sensitive" to frequency changes + the CPUs' workloads are. CPU-bound workloads will be more sensitive + -- they will perform better as frequency increases. Memory/IO-bound + workloads will be less sensitive -- they will not necessarily perform + better as frequency increases. + + If in doubt, say N. + config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" depends on X86_32 && PCI diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 17417c7..a264dd3 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o +obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o ################################################################################## # ARM SoC drivers diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c new file mode 100644 index 0000000..f6b79ab --- /dev/null +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -0,0 +1,148 @@ +/* + * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias + * for the ondemand governor. + * + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Jacob Shin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "cpufreq_governor.h" + +#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080 +#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081 +#define CLASS_CODE_SHIFT 56 +#define POWERSAVE_BIAS_MAX 1000 +#define POWERSAVE_BIAS_DEF 400 + +struct cpu_data_t { + u64 actual; + u64 reference; + unsigned int freq_prev; +}; + +static DEFINE_PER_CPU(struct cpu_data_t, cpu_data); + +static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + int sensitivity; + long d_actual, d_reference; + struct msr actual, reference; + struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu); + struct dbs_data *od_data = policy->governor_data; + struct od_dbs_tuners *od_tuners = od_data->tuners; + struct od_cpu_dbs_info_s *od_info = + od_data->cdata->get_cpu_dbs_info_s(policy->cpu); + + if (!od_info->freq_table) + return freq_next; + + rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, + &actual.l, &actual.h); + rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE, + &reference.l, &reference.h); + actual.h &= 0x00ffffff; + reference.h &= 0x00ffffff; + + /* counter wrapped around, so stay on current frequency */ + if (actual.q < data->actual || reference.q < data->reference) { + freq_next = policy->cur; + goto out; + } + + d_actual = actual.q - data->actual; + d_reference = reference.q - data->reference; + + /* divide by 0, so stay on current frequency as well */ + if (d_reference == 0) { + freq_next = policy->cur; + goto out; + } + + sensitivity = POWERSAVE_BIAS_MAX - + (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference); + + clamp(sensitivity, 0, POWERSAVE_BIAS_MAX); + + /* this workload is not CPU bound, so choose a lower freq */ + if (sensitivity < od_tuners->powersave_bias) { + if (data->freq_prev == policy->cur) + freq_next = policy->cur; + + if (freq_next > policy->cur) + freq_next = policy->cur; + else if (freq_next < policy->cur) + freq_next = policy->min; + else { + unsigned int index; + + cpufreq_frequency_table_target(policy, + od_info->freq_table, policy->cur - 1, + CPUFREQ_RELATION_H, &index); + freq_next = od_info->freq_table[index].frequency; + } + + data->freq_prev = freq_next; + } else + data->freq_prev = 0; + +out: + data->actual = actual.q; + data->reference = reference.q; + return freq_next; +} + +static int __init amd_freq_sensitivity_init(void) +{ + u64 val; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return -ENODEV; + + if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) + return -ENODEV; + + if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) + return -ENODEV; + + if (!(val >> CLASS_CODE_SHIFT)) + return -ENODEV; + + od_register_powersave_bias_handler(amd_powersave_bias_target, + POWERSAVE_BIAS_DEF); + return 0; +} +late_initcall(amd_freq_sensitivity_init); + +static void __exit amd_freq_sensitivity_exit(void) +{ + od_unregister_powersave_bias_handler(); +} +module_exit(amd_freq_sensitivity_exit); + +static const struct x86_cpu_id amd_freq_sensitivity_ids[] = { + X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids); + +MODULE_AUTHOR("Jacob Shin "); +MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for " + "the ondemand governor."); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 49d7b5bfb79cd5f8141a2998e2f4003d0ed65e5c Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Mon, 8 Apr 2013 09:57:34 +0000 Subject: cpufreq: exynos: Add cpufreq driver for exynos5440 This patch adds dvfs support for exynos5440 SOC. This soc has 4 cores and they scale at same frequency. The nature of exynos5440 clock controller is different from previous exynos controllers so not using the common exynos cpufreq framework. The major difference being interrupt notification for frequency change. Also, OPP library is used for device tree parsing to get different parameters like frequency, voltage etc. Since the opp library sorts the frequency table in ascending order so they are again re-arranged in descending order. This will have one-to-one mapping with the clock controller state management logic. Signed-off-by: Amit Daniel Kachhap Acked-by: Kukjin Kim Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-exynos5440.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-exynos5440.txt new file mode 100644 index 0000000..caff1a5 --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-exynos5440.txt @@ -0,0 +1,28 @@ + +Exynos5440 cpufreq driver +------------------- + +Exynos5440 SoC cpufreq driver for CPU frequency scaling. + +Required properties: +- interrupts: Interrupt to know the completion of cpu frequency change. +- operating-points: Table of frequencies and voltage CPU could be transitioned into, + in the decreasing order. Frequency should be in KHz units and voltage + should be in microvolts. + +Optional properties: +- clock-latency: Clock monitor latency in microsecond. + +All the required listed above must be defined under node cpufreq. + +Example: +-------- + cpufreq@160000 { + compatible = "samsung,exynos5440-cpufreq"; + reg = <0x160000 0x1000>; + interrupts = <0 57 0>; + operating-points = < + 1000000 975000 + 800000 925000>; + clock-latency = <100000>; + }; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 09da6a3..83bd5c1 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -42,6 +42,15 @@ config ARM_EXYNOS5250_CPUFREQ This adds the CPUFreq driver for Samsung EXYNOS5250 SoC. +config ARM_EXYNOS5440_CPUFREQ + def_bool SOC_EXYNOS5440 + depends on HAVE_CLK && PM_OPP && OF + help + This adds the CPUFreq driver for Samsung EXYNOS5440 + SoC. The nature of exynos5440 clock controller is + different than previous exynos controllers so not using + the common exynos framework. + config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" depends on ARCH_HIGHBANK diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index a264dd3..315b923 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c new file mode 100644 index 0000000..ead7ed4 --- /dev/null +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -0,0 +1,478 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Amit Daniel Kachhap + * + * EXYNOS5440 - CPU frequency scaling support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register definitions */ +#define XMU_DVFS_CTRL 0x0060 +#define XMU_PMU_P0_7 0x0064 +#define XMU_C0_3_PSTATE 0x0090 +#define XMU_P_LIMIT 0x00a0 +#define XMU_P_STATUS 0x00a4 +#define XMU_PMUEVTEN 0x00d0 +#define XMU_PMUIRQEN 0x00d4 +#define XMU_PMUIRQ 0x00d8 + +/* PMU mask and shift definations */ +#define P_VALUE_MASK 0x7 + +#define XMU_DVFS_CTRL_EN_SHIFT 0 + +#define P0_7_CPUCLKDEV_SHIFT 21 +#define P0_7_CPUCLKDEV_MASK 0x7 +#define P0_7_ATBCLKDEV_SHIFT 18 +#define P0_7_ATBCLKDEV_MASK 0x7 +#define P0_7_CSCLKDEV_SHIFT 15 +#define P0_7_CSCLKDEV_MASK 0x7 +#define P0_7_CPUEMA_SHIFT 28 +#define P0_7_CPUEMA_MASK 0xf +#define P0_7_L2EMA_SHIFT 24 +#define P0_7_L2EMA_MASK 0xf +#define P0_7_VDD_SHIFT 8 +#define P0_7_VDD_MASK 0x7f +#define P0_7_FREQ_SHIFT 0 +#define P0_7_FREQ_MASK 0xff + +#define C0_3_PSTATE_VALID_SHIFT 8 +#define C0_3_PSTATE_CURR_SHIFT 4 +#define C0_3_PSTATE_NEW_SHIFT 0 + +#define PSTATE_CHANGED_EVTEN_SHIFT 0 + +#define PSTATE_CHANGED_IRQEN_SHIFT 0 + +#define PSTATE_CHANGED_SHIFT 0 + +/* some constant values for clock divider calculation */ +#define CPU_DIV_FREQ_MAX 500 +#define CPU_DBG_FREQ_MAX 375 +#define CPU_ATB_FREQ_MAX 500 + +#define PMIC_LOW_VOLT 0x30 +#define PMIC_HIGH_VOLT 0x28 + +#define CPUEMA_HIGH 0x2 +#define CPUEMA_MID 0x4 +#define CPUEMA_LOW 0x7 + +#define L2EMA_HIGH 0x1 +#define L2EMA_MID 0x3 +#define L2EMA_LOW 0x4 + +#define DIV_TAB_MAX 2 +/* frequency unit is 20MHZ */ +#define FREQ_UNIT 20 +#define MAX_VOLTAGE 1550000 /* In microvolt */ +#define VOLTAGE_STEP 12500 /* In microvolt */ + +#define CPUFREQ_NAME "exynos5440_dvfs" +#define DEF_TRANS_LATENCY 100000 + +enum cpufreq_level_index { + L0, L1, L2, L3, L4, + L5, L6, L7, L8, L9, +}; +#define CPUFREQ_LEVEL_END (L7 + 1) + +struct exynos_dvfs_data { + void __iomem *base; + struct resource *mem; + int irq; + struct clk *cpu_clk; + unsigned int cur_frequency; + unsigned int latency; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_count; + struct device *dev; + bool dvfs_enabled; + struct work_struct irq_work; +}; + +static struct exynos_dvfs_data *dvfs_info; +static DEFINE_MUTEX(cpufreq_lock); +static struct cpufreq_freqs freqs; + +static int init_div_table(void) +{ + struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; + unsigned int tmp, clk_div, ema_div, freq, volt_id; + int i = 0; + struct opp *opp; + + for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { + + opp = opp_find_freq_exact(dvfs_info->dev, + freq_tbl[i].frequency * 1000, true); + if (IS_ERR(opp)) { + dev_err(dvfs_info->dev, + "failed to find valid OPP for %u KHZ\n", + freq_tbl[i].frequency); + return PTR_ERR(opp); + } + + freq = freq_tbl[i].frequency / 1000; /* In MHZ */ + clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK) + << P0_7_CPUCLKDEV_SHIFT; + clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK) + << P0_7_ATBCLKDEV_SHIFT; + clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK) + << P0_7_CSCLKDEV_SHIFT; + + /* Calculate EMA */ + volt_id = opp_get_voltage(opp); + volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; + if (volt_id < PMIC_HIGH_VOLT) { + ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | + (L2EMA_HIGH << P0_7_L2EMA_SHIFT); + } else if (volt_id > PMIC_LOW_VOLT) { + ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) | + (L2EMA_LOW << P0_7_L2EMA_SHIFT); + } else { + ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) | + (L2EMA_MID << P0_7_L2EMA_SHIFT); + } + + tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT) + | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT)); + + __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i); + } + + return 0; +} + +static void exynos_enable_dvfs(void) +{ + unsigned int tmp, i, cpu; + struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; + /* Disable DVFS */ + __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL); + + /* Enable PSTATE Change Event */ + tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN); + tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT); + __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN); + + /* Enable PSTATE Change IRQ */ + tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN); + tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT); + __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN); + + /* Set initial performance index */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) + if (freq_table[i].frequency == dvfs_info->cur_frequency) + break; + + if (freq_table[i].frequency == CPUFREQ_TABLE_END) { + dev_crit(dvfs_info->dev, "Boot up frequency not supported\n"); + /* Assign the highest frequency */ + i = 0; + dvfs_info->cur_frequency = freq_table[i].frequency; + } + + dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ", + dvfs_info->cur_frequency); + + for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) { + tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); + tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT); + tmp |= (i << C0_3_PSTATE_NEW_SHIFT); + __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); + } + + /* Enable DVFS */ + __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT, + dvfs_info->base + XMU_DVFS_CTRL); +} + +static int exynos_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + dvfs_info->freq_table); +} + +static unsigned int exynos_getspeed(unsigned int cpu) +{ + return dvfs_info->cur_frequency; +} + +static int exynos_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int index, tmp; + int ret = 0, i; + struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; + + mutex_lock(&cpufreq_lock); + + ret = cpufreq_frequency_table_target(policy, freq_table, + target_freq, relation, &index); + if (ret) + goto out; + + freqs.old = dvfs_info->cur_frequency; + freqs.new = freq_table[index].frequency; + + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + + /* Set the target frequency in all C0_3_PSTATE register */ + for_each_cpu(i, policy->cpus) { + tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4); + tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT); + tmp |= (index << C0_3_PSTATE_NEW_SHIFT); + + __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); + } +out: + mutex_unlock(&cpufreq_lock); + return ret; +} + +static void exynos_cpufreq_work(struct work_struct *work) +{ + unsigned int cur_pstate, index; + struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ + struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; + + /* Ensure we can access cpufreq structures */ + if (unlikely(dvfs_info->dvfs_enabled == false)) + goto skip_work; + + mutex_lock(&cpufreq_lock); + freqs.old = dvfs_info->cur_frequency; + + cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS); + if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1) + index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK; + else + index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK; + + if (likely(index < dvfs_info->freq_count)) { + freqs.new = freq_table[index].frequency; + dvfs_info->cur_frequency = freqs.new; + } else { + dev_crit(dvfs_info->dev, "New frequency out of range\n"); + freqs.new = dvfs_info->cur_frequency; + } + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + + cpufreq_cpu_put(policy); + mutex_unlock(&cpufreq_lock); +skip_work: + enable_irq(dvfs_info->irq); +} + +static irqreturn_t exynos_cpufreq_irq(int irq, void *id) +{ + unsigned int tmp; + + tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ); + if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) { + __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ); + disable_irq_nosync(irq); + schedule_work(&dvfs_info->irq_work); + } + return IRQ_HANDLED; +} + +static void exynos_sort_descend_freq_table(void) +{ + struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; + int i = 0, index; + unsigned int tmp_freq; + /* + * Exynos5440 clock controller state logic expects the cpufreq table to + * be in descending order. But the OPP library constructs the table in + * ascending order. So to make the table descending we just need to + * swap the i element with the N - i element. + */ + for (i = 0; i < dvfs_info->freq_count / 2; i++) { + index = dvfs_info->freq_count - i - 1; + tmp_freq = freq_tbl[i].frequency; + freq_tbl[i].frequency = freq_tbl[index].frequency; + freq_tbl[index].frequency = tmp_freq; + } +} + +static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int ret; + + ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table); + if (ret) { + dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret); + return ret; + } + + policy->cur = dvfs_info->cur_frequency; + policy->cpuinfo.transition_latency = dvfs_info->latency; + cpumask_setall(policy->cpus); + + cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu); + + return 0; +} + +static struct cpufreq_driver exynos_driver = { + .flags = CPUFREQ_STICKY, + .verify = exynos_verify_speed, + .target = exynos_target, + .get = exynos_getspeed, + .init = exynos_cpufreq_cpu_init, + .name = CPUFREQ_NAME, +}; + +static const struct of_device_id exynos_cpufreq_match[] = { + { + .compatible = "samsung,exynos5440-cpufreq", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos_cpufreq_match); + +static int exynos_cpufreq_probe(struct platform_device *pdev) +{ + int ret = -EINVAL; + struct device_node *np; + struct resource res; + + np = pdev->dev.of_node; + if (!np) + return -ENODEV; + + dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL); + if (!dvfs_info) { + ret = -ENOMEM; + goto err_put_node; + } + + dvfs_info->dev = &pdev->dev; + + ret = of_address_to_resource(np, 0, &res); + if (ret) + goto err_put_node; + + dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res); + if (IS_ERR(dvfs_info->base)) { + ret = PTR_ERR(dvfs_info->base); + goto err_put_node; + } + + dvfs_info->irq = irq_of_parse_and_map(np, 0); + if (!dvfs_info->irq) { + dev_err(dvfs_info->dev, "No cpufreq irq found\n"); + ret = -ENODEV; + goto err_put_node; + } + + ret = of_init_opp_table(dvfs_info->dev); + if (ret) { + dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret); + goto err_put_node; + } + + ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + if (ret) { + dev_err(dvfs_info->dev, + "failed to init cpufreq table: %d\n", ret); + goto err_put_node; + } + dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev); + exynos_sort_descend_freq_table(); + + if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) + dvfs_info->latency = DEF_TRANS_LATENCY; + + dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk"); + if (IS_ERR(dvfs_info->cpu_clk)) { + dev_err(dvfs_info->dev, "Failed to get cpu clock\n"); + ret = PTR_ERR(dvfs_info->cpu_clk); + goto err_free_table; + } + + dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk); + if (!dvfs_info->cur_frequency) { + dev_err(dvfs_info->dev, "Failed to get clock rate\n"); + ret = -EINVAL; + goto err_free_table; + } + dvfs_info->cur_frequency /= 1000; + + INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work); + ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq, + exynos_cpufreq_irq, IRQF_TRIGGER_NONE, + CPUFREQ_NAME, dvfs_info); + if (ret) { + dev_err(dvfs_info->dev, "Failed to register IRQ\n"); + goto err_free_table; + } + + ret = init_div_table(); + if (ret) { + dev_err(dvfs_info->dev, "Failed to initialise div table\n"); + goto err_free_table; + } + + exynos_enable_dvfs(); + ret = cpufreq_register_driver(&exynos_driver); + if (ret) { + dev_err(dvfs_info->dev, + "%s: failed to register cpufreq driver\n", __func__); + goto err_free_table; + } + + of_node_put(np); + dvfs_info->dvfs_enabled = true; + return 0; + +err_free_table: + opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); +err_put_node: + of_node_put(np); + dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); + return ret; +} + +static int exynos_cpufreq_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&exynos_driver); + opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); + return 0; +} + +static struct platform_driver exynos_cpufreq_platdrv = { + .driver = { + .name = "exynos5440-cpufreq", + .owner = THIS_MODULE, + .of_match_table = exynos_cpufreq_match, + }, + .probe = exynos_cpufreq_probe, + .remove = exynos_cpufreq_remove, +}; +module_platform_driver(exynos_cpufreq_platdrv); + +MODULE_AUTHOR("Amit Daniel Kachhap "); +MODULE_DESCRIPTION("Exynos5440 cpufreq driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From c1585207a54659c51e0cebb7a208c6671cc3f03c Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Mon, 8 Apr 2013 08:17:36 +0000 Subject: cpufreq: exynos: Remove error return even if no soc is found This patch helps to have single binary for exynos5440 and previous exynos soc's. This change is needed for adding exynos5440 cpufreq driver which does not uses exynos-cpufreq common file and adds it own driver. Signed-off-by: Amit Daniel Kachhap Acked-by: Kukjin Kim Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index c0c4ce5..475b4f6 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -294,7 +294,7 @@ static int __init exynos_cpufreq_init(void) else if (soc_is_exynos5250()) ret = exynos5250_cpufreq_init(exynos_info); else - pr_err("%s: CPU type not found\n", __func__); + return 0; if (ret) goto err_vdd_arm; -- cgit v0.10.2 From 64649dcdf6c42d990cb44cceb0112ebd606ef435 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Mon, 8 Apr 2013 08:17:37 +0000 Subject: arm: exynos: Enable OPP library support for exynos5440 The OPP library support is needed for exynos5440 cpu frequency dynamic scaling driver. Signed-off-by: Amit Daniel Kachhap Acked-by: Kukjin Kim Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 70f94c8..d5dde07 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -72,10 +72,12 @@ config SOC_EXYNOS5440 bool "SAMSUNG EXYNOS5440" default y depends on ARCH_EXYNOS5 + select ARCH_HAS_OPP select ARM_ARCH_TIMER select AUTO_ZRELADDR select PINCTRL select PINCTRL_EXYNOS5440 + select PM_OPP help Enable EXYNOS5440 SoC support -- cgit v0.10.2 From 49ded525d4486dc97fc965858bf3ddf245463670 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Tue, 9 Apr 2013 23:22:01 +0000 Subject: cpufreq: OMAP: instantiate omap-cpufreq as a platform_driver As multi-platform build is being adopted by more and more ARM platforms, initcall function should be used very carefully. For example, when CONFIG_ARM_OMAP2PLUS_CPUFREQ is built in the kernel, omap_cpufreq_init() will be called on all the platforms to initialize omap-cpufreq driver. Further, on OMAP, we now use Soc generic cpufreq-cpu0 driver using device tree entries. To allow cpufreq-cpu0 and omap-cpufreq drivers to co-exist for OMAP in a single image, we need to ensure the following: 1. With device tree boot, we use cpufreq-cpu0 2. With non device tree boot, we use omap-cpufreq In the case of (1), we will have cpu OPPs and regulator registered as part of the device tree nodes, to ensure that omap-cpufreq and cpufreq-cpu0 don't conflict in managing the frequency of the same CPU, we should not permit omap-cpufreq to be probed. In the case of (2), we will not have the cpufreq-cpu0 device, hence only omap-cpufreq will be active. To eliminate this undesired these effects, we change omap-cpufreq driver to have it instantiated as a platform_driver and register "omap-cpufreq" device only when booted without device tree nodes on OMAP platforms. This allows the following: a) Will only run on platforms that create the platform_device "omap-cpufreq". b) Since the platform_device is registered only when device tree nodes are *not* populated, omap-cpufreq driver does not conflict with the usage of cpufreq-cpu0 driver which is used on OMAP platforms when device tree nodes are present. Inspired by commit 5553f9e26f6f49a93ba732fd222eac6973a4cf35 (cpufreq: instantiate cpufreq-cpu0 as a platform_driver) [robherring2@gmail.com: reported conflict of omap-cpufreq vs other driver in an non-device tree supported boot] Reported-by: Rob Herring Signed-off-by: Nishanth Menon Acked-by: Viresh Kumar Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 673a4c1..8d15f9a 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -265,6 +265,12 @@ static void __init omap4_init_voltages(void) omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva"); } +static inline void omap_init_cpufreq(void) +{ + struct platform_device_info devinfo = { .name = "omap-cpufreq", }; + platform_device_register_full(&devinfo); +} + static int __init omap2_common_pm_init(void) { if (!of_have_populated_dt()) @@ -294,6 +300,9 @@ int __init omap2_common_pm_late_init(void) /* Smartreflex device init */ omap_devinit_smartreflex(); + + /* cpufreq dummy device instantiation */ + omap_init_cpufreq(); } #ifdef CONFIG_SUSPEND diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index ad7549c..0279d18 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -243,7 +244,7 @@ static struct cpufreq_driver omap_driver = { .attr = omap_cpufreq_attr, }; -static int __init omap_cpufreq_init(void) +static int omap_cpufreq_probe(struct platform_device *pdev) { mpu_dev = get_cpu_device(0); if (!mpu_dev) { @@ -271,12 +272,20 @@ static int __init omap_cpufreq_init(void) return cpufreq_register_driver(&omap_driver); } -static void __exit omap_cpufreq_exit(void) +static int omap_cpufreq_remove(struct platform_device *pdev) { - cpufreq_unregister_driver(&omap_driver); + return cpufreq_unregister_driver(&omap_driver); } +static struct platform_driver omap_cpufreq_platdrv = { + .driver = { + .name = "omap-cpufreq", + .owner = THIS_MODULE, + }, + .probe = omap_cpufreq_probe, + .remove = omap_cpufreq_remove, +}; +module_platform_driver(omap_cpufreq_platdrv); + MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs"); MODULE_LICENSE("GPL"); -module_init(omap_cpufreq_init); -module_exit(omap_cpufreq_exit); -- cgit v0.10.2 From d1b6848590af407c5d2354929ac683052d631830 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 9 Apr 2013 22:38:18 +0000 Subject: cpufreq / intel_pstate: Optimize intel_pstate_set_policy This function is called quite often from other subsystems. Removed unused call to intel_pstate_get_min_max(). Also when "policy->policy == CPUFREQ_POLICY_PERFORMANCE", then no need to do calculations as the limits will be forced anyway. Also corrected filename in the header. Signed-off-by: Srinivas Pandruvada Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ad72922..fb38242 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1,5 +1,5 @@ /* - * cpufreq_snb.c: Native P state management for Intel processors + * intel_pstate.c: Native P state management for Intel processors * * (C) Copyright 2012 Intel Corporation * Author: Dirk Brandewie @@ -658,30 +658,27 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) static int intel_pstate_set_policy(struct cpufreq_policy *policy) { struct cpudata *cpu; - int min, max; cpu = all_cpu_data[policy->cpu]; if (!policy->cpuinfo.max_freq) return -ENODEV; - intel_pstate_get_min_max(cpu, &min, &max); - - limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; - limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); - limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - - limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; - limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); - limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { limits.min_perf_pct = 100; limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); limits.no_turbo = 0; + return 0; } + limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; + limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); + limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); + + limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; + limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); + limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return 0; } -- cgit v0.10.2 From e4969ebac83fdea78d89c779331396728a4e6199 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 11 Apr 2013 08:04:53 +0000 Subject: cpufreq: Call __cpufreq_governor() with correct policy->cpus mask __cpufreq_governor() must be called with a correct policy->cpus mask. In __cpufreq_remove_dev() we initially clear policy->cpus with cpumask_clear_cpu() and then call __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT). If the governor is doing some per-cpu stuff in EXIT callback, this can create uncertain behavior. Generic governors in drivers/cpufreq/ doesn't do any per-cpu stuff in EXIT callback and so we don't face any issues currently. But its better to keep the code clean, so we don't face any issues in future. Now, we call cpumask_clear_cpu() only when multiple cpus are managed by policy. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fd97a62..3564947 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1105,7 +1105,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif WARN_ON(lock_policy_rwsem_write(cpu)); cpus = cpumask_weight(data->cpus); - cpumask_clear_cpu(cpu, data->cpus); + + if (cpus > 1) + cpumask_clear_cpu(cpu, data->cpus); unlock_policy_rwsem_write(cpu); if (cpu != data->cpu) { -- cgit v0.10.2 From c77896693da9ad75bd999fd86dd81a3da747e267 Mon Sep 17 00:00:00 2001 From: Mats Fagerstrom Date: Thu, 11 Apr 2013 23:23:57 +0200 Subject: cpufreq: dbx500: Round to closest available freq When reading the cpu speed, round it to the closest available frequency from the table. Signed-off-by: Mats Fagerstrom Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 15ed367..6ec6539 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -71,15 +71,15 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) int i = 0; unsigned long freq = clk_get_rate(armss_clk) / 1000; - while (freq_table[i].frequency != CPUFREQ_TABLE_END) { - if (freq <= freq_table[i].frequency) + /* The value is rounded to closest frequency in the defined table. */ + while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) { + if (freq < freq_table[i].frequency + + (freq_table[i + 1].frequency - freq_table[i].frequency) / 2) return freq_table[i].frequency; i++; } - /* We could not find a corresponding frequency. */ - pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n"); - return 0; + return freq_table[i].frequency; } static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy) -- cgit v0.10.2 From 70eb0855b2f8fcf61f9f47626b9fa70e7b45ab06 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Mon, 22 Apr 2013 00:24:37 +0200 Subject: cpufreq: exynos5440: Protect OPP search calls with RCU lock As per the OPP library documentation(Documentation/power/opp.txt) all OPP find/get calls should be protected by RCU locks. Signed-off-by: Amit Daniel Kachhap Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index ead7ed4..0c74018 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -120,11 +120,13 @@ static int init_div_table(void) int i = 0; struct opp *opp; + rcu_read_lock(); for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { opp = opp_find_freq_exact(dvfs_info->dev, freq_tbl[i].frequency * 1000, true); if (IS_ERR(opp)) { + rcu_read_unlock(); dev_err(dvfs_info->dev, "failed to find valid OPP for %u KHZ\n", freq_tbl[i].frequency); @@ -159,6 +161,7 @@ static int init_div_table(void) __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i); } + rcu_read_unlock(); return 0; } -- cgit v0.10.2 From 820c6ca293e99ae225dc9dd7e0e689c444b08f92 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 22 Apr 2013 00:48:03 +0200 Subject: cpufreq: Don't call __cpufreq_governor() for drivers without target() Some cpufreq drivers implement their own governor and so don't need us to call generic governors interface via __cpufreq_governor(). Few recent commits haven't obeyed this law well and we saw some regressions. This patch is an attempt to fix the above issue. Signed-off-by: Viresh Kumar Reported-and-tested-by: Sedat Dilek Tested-by: Dirk Brandewie Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3564947..a6f6595 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -858,13 +858,18 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, struct device *dev) { struct cpufreq_policy *policy; - int ret = 0; + int ret = 0, has_target = 0; unsigned long flags; policy = cpufreq_cpu_get(sibling); WARN_ON(!policy); - __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + rcu_read_lock(); + has_target = !!rcu_dereference(cpufreq_driver)->target; + rcu_read_unlock(); + + if (has_target) + __cpufreq_governor(policy, CPUFREQ_GOV_STOP); lock_policy_rwsem_write(sibling); @@ -877,8 +882,10 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, unlock_policy_rwsem_write(sibling); - __cpufreq_governor(policy, CPUFREQ_GOV_START); - __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + if (has_target) { + __cpufreq_governor(policy, CPUFREQ_GOV_START); + __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + } ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); if (ret) { @@ -1146,7 +1153,8 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif /* If cpu is last user of policy, free policy */ if (cpus == 1) { - __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + if (has_target) + __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); lock_policy_rwsem_read(cpu); kobj = &data->kobj; -- cgit v0.10.2 From 763f8c3fe493693d8c651bd6352a67acc798e361 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 15 Apr 2013 07:05:24 +0000 Subject: cpufreq: ARM big LITTLE: put DT nodes after using them DT nodes should be put using of_node_put() to balance their usage counts. This is not done properly in ARM's big LITTLE driver. Fix it. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 452ff46..44be311 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -31,22 +31,28 @@ static int dt_init_opp_table(struct device *cpu_dev) { - struct device_node *np = NULL; + struct device_node *np, *parent; int count = 0, ret; - for_each_child_of_node(of_find_node_by_path("/cpus"), np) { + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_err("failed to find OF /cpus\n"); + return -ENOENT; + } + + for_each_child_of_node(parent, np) { if (count++ != cpu_dev->id) continue; - if (!of_get_property(np, "operating-points", NULL)) - return -ENODATA; - - cpu_dev->of_node = np; - - ret = of_init_opp_table(cpu_dev); - if (ret) - return ret; - - return 0; + if (!of_get_property(np, "operating-points", NULL)) { + ret = -ENODATA; + } else { + cpu_dev->of_node = np; + ret = of_init_opp_table(cpu_dev); + } + of_node_put(np); + of_node_put(parent); + + return ret; } return -ENODEV; @@ -54,15 +60,24 @@ static int dt_init_opp_table(struct device *cpu_dev) static int dt_get_transition_latency(struct device *cpu_dev) { - struct device_node *np = NULL; + struct device_node *np, *parent; u32 transition_latency = CPUFREQ_ETERNAL; int count = 0; - for_each_child_of_node(of_find_node_by_path("/cpus"), np) { + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_err("failed to find OF /cpus\n"); + return -ENOENT; + } + + for_each_child_of_node(parent, np) { if (count++ != cpu_dev->id) continue; of_property_read_u32(np, "clock-latency", &transition_latency); + of_node_put(np); + of_node_put(parent); + return 0; } -- cgit v0.10.2 From ad61f44234c8e2814e47724ddebdc1eb58625f3f Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 15 Apr 2013 07:05:25 +0000 Subject: cpufreq: ARM big LITTLE: Adapt to latest cpufreq updates This driver isn't updated to work with latest cpufreq core updates that happened recently. Fix them. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 1d29d1a..dbdf677 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -77,8 +77,6 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, target_freq, relation, &freq_tab_idx); freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency; - freqs.cpu = policy->cpu; - pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", __func__, cpu, cur_cluster, freqs.old, target_freq, freqs.new); @@ -86,8 +84,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); if (ret) { @@ -97,8 +94,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, policy->cur = freqs.new; - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } @@ -231,7 +227,7 @@ static struct cpufreq_driver bL_cpufreq_driver = { .get = bL_cpufreq_get, .init = bL_cpufreq_init, .exit = bL_cpufreq_exit, - .have_multiple_policies = true, + .have_governor_per_policy = true, .attr = bL_cpufreq_attr, }; -- cgit v0.10.2 From 141b467810b78f2d7a85f4b752b42b92f014629b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 15 Apr 2013 07:09:37 +0000 Subject: cpufreq: cpu0: Put cpu parent node after using it Parent node must be put after using it to balance its usage count. This was missing in cpufreq-cpu0 driver. Fix it. Signed-off-by: Viresh Kumar Acked-by: Shawn Guo Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 6561853..7684dfc 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -251,6 +251,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) } of_node_put(np); + of_node_put(parent); return 0; out_free_table: -- cgit v0.10.2 From 5eed1987e23c6800050ec61706aeb393b0f0bc96 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Wed, 24 Apr 2013 11:04:48 +0000 Subject: ARM: S5pv210: compiling issue, ARM_S5PV210_CPUFREQ needs CONFIG_CPU_FREQ_TABLE=y For arm S5pv210 with allmodconfig, ARM_S5PV210_CPUFREQ need CONFIG_CPU_FREQ_TABLE=y, or will cause compiling issue. The related operation: + arm-linux-gnu-ld -EL -p --no-undefined -X --build-id -X -o .tmp_vmlinux1 -T /root/linux-next/arch/arm/kernel/vmlinux.lds arch/arm/kernel/head.o init/built-in.o --start-group usr/built-in.o arch/arm/nwfpe/built-in.o arch/arm/vfp/built-in.o arch/arm/kernel/built-in.o arch/arm/mm/built-in.o arch/arm/common/built-in.o arch/arm/net/built-in.o arch/arm/crypto/built-in.o arch/arm/mach-s5pv210/built-in.o arch/arm/plat-samsung/built-in.o kernel/built-in.o mm/built-in.o fs/built-in.o ipc/built-in.o security/built-in.o crypto/built-in.o block/built-in.o arch/arm/lib/lib.a lib/lib.a arch/arm/lib/built-in.o lib/built-in.o drivers/built-in.o sound/built-in.o firmware/built-in.o net/built-in.o --end-group The related errors: drivers/built-in.o: In function `s5pv210_target': drivers/cpufreq/s5pv210-cpufreq.c:225: undefined reference to `cpufreq_frequency_table_target' drivers/cpufreq/s5pv210-cpufreq.c:237: undefined reference to `cpufreq_frequency_table_target' drivers/built-in.o: In function `s5pv210_verify_speed': drivers/cpufreq/s5pv210-cpufreq.c:182: undefined reference to `cpufreq_frequency_table_verify' drivers/built-in.o: In function `s5pv210_cpu_init': drivers/cpufreq/s5pv210-cpufreq.c:556: undefined reference to `cpufreq_frequency_table_get_attr' drivers/cpufreq/s5pv210-cpufreq.c:560: undefined reference to `cpufreq_frequency_table_cpuinfo' make: *** [vmlinux] Error 1 Signed-off-by: Chen Gang Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 83bd5c1..f3af18b 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -129,6 +129,7 @@ config ARM_S3C64XX_CPUFREQ config ARM_S5PV210_CPUFREQ bool "Samsung S5PV210 and S5PC110" depends on CPU_S5PV210 + select CPU_FREQ_TABLE default y help This adds the CPUFreq driver for Samsung S5PV210 and -- cgit v0.10.2 From 7264a2bbb0a1d9935c653cb0e0cb48cfcaa87538 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 25 Apr 2013 19:28:57 +0200 Subject: cpufreq: pxa2xx: initialize variables gcc-3.8 correctly found that the variables set by find_freq_tables() are not initialized if this function is called on something other than a pxa2xx or pxa3xx: pxa2xx-cpufreq.c: In function 'pxa_verify_policy': pxa2xx-cpufreq.c:272:6: warning: 'pxa_freqs_table' may be used uninitialized in this function [-Wmaybe-uninitialized] pxa2xx-cpufreq.c: In function 'pxa_set_target': pxa2xx-cpufreq.c:345:23: warning: 'pxa_freq_settings' may be used uninitialized in this function [-Wmaybe-uninitialized] Rather than adding a bogus initialization that would let us get a little further before crashing, add an explicit BUG(). We know that this code is designed to run on only these cpus, so this will fix the build warning and give a more helpful diagnostic if the code ever changes to run on other machines. Signed-off-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index fe4c55b..9e5bc8e 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c @@ -221,10 +221,11 @@ static void find_freq_tables(struct cpufreq_frequency_table **freq_table, *pxa_freqs = pxa255_turbo_freqs; *freq_table = pxa255_turbo_freq_table; } - } - if (cpu_is_pxa27x()) { + } else if (cpu_is_pxa27x()) { *pxa_freqs = pxa27x_freqs; *freq_table = pxa27x_freq_table; + } else { + BUG(); } } -- cgit v0.10.2 From 45c009a9a447655aecbdb06c86126f05d0272171 Mon Sep 17 00:00:00 2001 From: viresh kumar Date: Fri, 26 Apr 2013 12:53:16 +0000 Subject: cpufreq: MAINTAINERS: Add co-maintainer Add Viresh as a co-maintainer of cpufreq framework. This would mostly be for cpufreq core and ARM drivers but not restricted to them. Also add a pointer to the git tree cpufreq patches are pulled into. [rjw: Changelog] Signed-off-by: Viresh Kumar Tentatively-acked-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/MAINTAINERS b/MAINTAINERS index be73983..a1746f5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2200,9 +2200,11 @@ F: drivers/net/ethernet/ti/cpmac.c CPU FREQUENCY DRIVERS M: Rafael J. Wysocki +M: Viresh Kumar L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org S: Maintained +T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git F: drivers/cpufreq/ F: include/linux/cpufreq.h -- cgit v0.10.2