summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 21:18:07 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 21:18:07 (GMT)
commit43c9fad942b5afb9e03801c0721d83160fa5b0dd (patch)
treec2002d29d5579196b0f54fd78e47ab02efced394 /drivers/cpufreq
parentcb8a4deaf9b2778653c4391d8ccb24c5ab159f9d (diff)
parentd461003574ebc862c2dae70c9e1ab32b1bf2cda1 (diff)
downloadlinux-43c9fad942b5afb9e03801c0721d83160fa5b0dd.tar.xz
Merge tag 'pm+acpi-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management and ACPI updates from Rafael Wysocki: "The rework of backlight interface selection API from Hans de Goede stands out from the number of commits and the number of affected places perspective. The cpufreq core fixes from Viresh Kumar are quite significant too as far as the number of commits goes and because they should reduce CPU online/offline overhead quite a bit in the majority of cases. From the new featues point of view, the ACPICA update (to upstream revision 20150515) adding support for new ACPI 6 material to ACPICA is the one that matters the most as some new significant features will be based on it going forward. Also included is an update of the ACPI device power management core to follow ACPI 6 (which in turn reflects the Windows' device PM implementation), a PM core extension to support wakeup interrupts in a more generic way and support for the ACPI _CCA device configuration object. The rest is mostly fixes and cleanups all over and some documentation updates, including new DT bindings for Operating Performance Points. There is one fix for a regression introduced in the 4.1 cycle, but it adds quite a number of lines of code, it wasn't really ready before Thursday and you were on vacation, so I refrained from pushing it on the last minute for 4.1. Specifics: - ACPICA update to upstream revision 20150515 including basic support for ACPI 6 features: new ACPI tables introduced by ACPI 6 (STAO, XENV, WPBT, NFIT, IORT), changes related to the other tables (DTRM, FADT, LPIT, MADT), new predefined names (_BTH, _CR3, _DSD, _LPI, _MTL, _PRR, _RDI, _RST, _TFP, _TSN), fixes and cleanups (Bob Moore, Lv Zheng). - ACPI device power management core code update to follow ACPI 6 which reflects the ACPI device power management implementation in Windows (Rafael J Wysocki). - rework of the backlight interface selection logic to reduce the number of kernel command line options and improve the handling of DMI quirks that may be involved in that and to make the code generally more straightforward (Hans de Goede). - fixes for the ACPI Embedded Controller (EC) driver related to the handling of EC transactions (Lv Zheng). - fix for a regression related to the ACPI resources management and resulting from a recent change of ACPI initialization code ordering (Rafael J Wysocki). - fix for a system initialization regression related to ACPI introduced during the 3.14 cycle and caused by running the code that switches the platform over to the ACPI mode too early in the initialization sequence (Rafael J Wysocki). - support for the ACPI _CCA device configuration object related to DMA cache coherence (Suravee Suthikulpanit). - ACPI/APEI fixes and cleanups (Jiri Kosina, Borislav Petkov). - ACPI battery driver cleanups (Luis Henriques, Mathias Krause). - ACPI processor driver cleanups (Hanjun Guo). - cleanups and documentation update related to the ACPI device properties interface based on _DSD (Rafael J Wysocki). - ACPI device power management fixes (Rafael J Wysocki). - assorted cleanups related to ACPI (Dominik Brodowski, Fabian Frederick, Lorenzo Pieralisi, Mathias Krause, Rafael J Wysocki). - fix for a long-standing issue causing General Protection Faults to be generated occasionally on return to user space after resume from ACPI-based suspend-to-RAM on 32-bit x86 (Ingo Molnar). - fix to make the suspend core code return -EBUSY consistently in all cases when system suspend is aborted due to wakeup detection (Ruchi Kandoi). - support for automated device wakeup IRQ handling allowing drivers to make their PM support more starightforward (Tony Lindgren). - new tracepoints for suspend-to-idle tracing and rework of the prepare/complete callbacks tracing in the PM core (Todd E Brandt, Rafael J Wysocki). - wakeup sources framework enhancements (Jin Qian). - new macro for noirq system PM callbacks (Grygorii Strashko). - assorted cleanups related to system suspend (Rafael J Wysocki). - cpuidle core cleanups to make the code more efficient (Rafael J Wysocki). - powernv/pseries cpuidle driver update (Shilpasri G Bhat). - cpufreq core fixes related to CPU online/offline that should reduce the overhead of these operations quite a bit, unless the CPU in question is physically going away (Viresh Kumar, Saravana Kannan). - serialization of cpufreq governor callbacks to avoid race conditions in some cases (Viresh Kumar). - intel_pstate driver fixes and cleanups (Doug Smythies, Prarit Bhargava, Joe Konno). - cpufreq driver (arm_big_little, cpufreq-dt, qoriq) updates (Sudeep Holla, Felipe Balbi, Tang Yuantian). - assorted cleanups in cpufreq drivers and core (Shailendra Verma, Fabian Frederick, Wang Long). - new Device Tree bindings for representing Operating Performance Points (Viresh Kumar). - updates for the common clock operations support code in the PM core (Rajendra Nayak, Geert Uytterhoeven). - PM domains core code update (Geert Uytterhoeven). - Intel Knights Landing support for the RAPL (Running Average Power Limit) power capping driver (Dasaratharaman Chandramouli). - fixes related to the floor frequency setting on Atom SoCs in the RAPL power capping driver (Ajay Thomas). - runtime PM framework documentation update (Ben Dooks). - cpupower tool fix (Herton R Krzesinski)" * tag 'pm+acpi-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (194 commits) cpuidle: powernv/pseries: Auto-promotion of snooze to deeper idle state x86: Load __USER_DS into DS/ES after resume PM / OPP: Add binding for 'opp-suspend' PM / OPP: Allow multiple OPP tables to be passed via DT PM / OPP: Add new bindings to address shortcomings of existing bindings ACPI: Constify ACPI device IDs in documentation ACPI / enumeration: Document the rules regarding the PRP0001 device ID ACPI / video: Make acpi_video_unregister_backlight() private acpi-video-detect: Remove old API toshiba-acpi: Port to new backlight interface selection API thinkpad-acpi: Port to new backlight interface selection API sony-laptop: Port to new backlight interface selection API samsung-laptop: Port to new backlight interface selection API msi-wmi: Port to new backlight interface selection API msi-laptop: Port to new backlight interface selection API intel-oaktrail: Port to new backlight interface selection API ideapad-laptop: Port to new backlight interface selection API fujitsu-laptop: Port to new backlight interface selection API eeepc-laptop: Port to new backlight interface selection API dell-wmi: Port to new backlight interface selection API ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/arm_big_little.c40
-rw-r--r--drivers/cpufreq/cpufreq-dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c573
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c28
-rw-r--r--drivers/cpufreq/cpufreq_governor.c345
-rw-r--r--drivers/cpufreq/cpufreq_governor.h16
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c6
-rw-r--r--drivers/cpufreq/gx-suspmod.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c72
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c20
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c32
13 files changed, 649 insertions, 492 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4f3dbc8..611cb09 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,7 @@
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index e1a6ba6..f1e42f8 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
-#include <asm/bL_switcher.h>
#include "arm_big_little.h"
@@ -41,12 +40,16 @@
#define MAX_CLUSTERS 2
#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
static bool bL_switching_enabled;
#define is_bL_switching_enabled() bL_switching_enabled
#define set_switching_enabled(x) (bL_switching_enabled = (x))
#else
#define is_bL_switching_enabled() false
#define set_switching_enabled(x) do { } while (0)
+#define bL_switch_request(...) do { } while (0)
+#define bL_switcher_put_enabled() do { } while (0)
+#define bL_switcher_get_enabled() do { } while (0)
#endif
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
@@ -186,6 +189,15 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
mutex_unlock(&cluster_lock[old_cluster]);
}
+ /*
+ * FIXME: clk_set_rate has to handle the case where clk_change_rate
+ * can fail due to hardware or firmware issues. Until the clk core
+ * layer is fixed, we can check here. In most of the cases we will
+ * be reading only the cached value anyway. This needs to be removed
+ * once clk core is fixed.
+ */
+ if (bL_cpufreq_get_rate(cpu) != new_rate)
+ return -EIO;
return 0;
}
@@ -322,7 +334,6 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
- char name[14] = "cpu-cluster.";
int ret;
if (freq_table[cluster])
@@ -342,8 +353,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
goto free_opp_table;
}
- name[12] = cluster + '0';
- clk[cluster] = clk_get(cpu_dev, name);
+ clk[cluster] = clk_get(cpu_dev, NULL);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
@@ -506,6 +516,7 @@ static struct cpufreq_driver bL_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
+#ifdef CONFIG_BL_SWITCHER
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
unsigned long action, void *_arg)
{
@@ -538,6 +549,20 @@ static struct notifier_block bL_switcher_notifier = {
.notifier_call = bL_cpufreq_switcher_notifier,
};
+static int __bLs_register_notifier(void)
+{
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
int ret, i;
@@ -555,8 +580,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
- ret = bL_switcher_get_enabled();
- set_switching_enabled(ret);
+ set_switching_enabled(bL_switcher_get_enabled());
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
@@ -567,7 +591,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
- ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ ret = __bLs_register_notifier();
if (ret) {
cpufreq_unregister_driver(&bL_cpufreq_driver);
arm_bL_ops = NULL;
@@ -591,7 +615,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
}
bL_switcher_get_enabled();
- bL_switcher_unregister_notifier(&bL_switcher_notifier);
+ __bLs_unregister_notifier();
cpufreq_unregister_driver(&bL_cpufreq_driver);
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index bab67db..528a82bf 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -416,6 +416,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
};
module_platform_driver(dt_cpufreq_platdrv);
+MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic cpufreq driver");
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index a225809..db69eeb 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -414,7 +414,7 @@ static int nforce2_detect_chipset(void)
* nforce2_init - initializes the nForce2 CPUFreq driver
*
* Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
- * devices, -EINVAL on problems during initiatization, and zero on
+ * devices, -EINVAL on problems during initialization, and zero on
* success.
*/
static int __init nforce2_init(void)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8ae655c..b612411 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -31,10 +31,62 @@
#include <linux/tick.h>
#include <trace/events/power.h>
-/* Macros to iterate over lists */
-/* Iterate over online CPUs policies */
static LIST_HEAD(cpufreq_policy_list);
-#define for_each_policy(__policy) \
+
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
+static bool suitable_policy(struct cpufreq_policy *policy, bool active)
+{
+ return active == !policy_is_inactive(policy);
+}
+
+/* Finds Next Acive/Inactive policy */
+static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
+ bool active)
+{
+ do {
+ policy = list_next_entry(policy, policy_list);
+
+ /* No more policies in the list */
+ if (&policy->policy_list == &cpufreq_policy_list)
+ return NULL;
+ } while (!suitable_policy(policy, active));
+
+ return policy;
+}
+
+static struct cpufreq_policy *first_policy(bool active)
+{
+ struct cpufreq_policy *policy;
+
+ /* No policies in the list */
+ if (list_empty(&cpufreq_policy_list))
+ return NULL;
+
+ policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
+ policy_list);
+
+ if (!suitable_policy(policy, active))
+ policy = next_policy(policy, active);
+
+ return policy;
+}
+
+/* Macros to iterate over CPU policies */
+#define for_each_suitable_policy(__policy, __active) \
+ for (__policy = first_policy(__active); \
+ __policy; \
+ __policy = next_policy(__policy, __active))
+
+#define for_each_active_policy(__policy) \
+ for_each_suitable_policy(__policy, true)
+#define for_each_inactive_policy(__policy) \
+ for_each_suitable_policy(__policy, false)
+
+#define for_each_policy(__policy) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
/* Iterate over governors */
@@ -49,13 +101,9 @@ static LIST_HEAD(cpufreq_governor_list);
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
-static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock);
DEFINE_MUTEX(cpufreq_governor_lock);
-/* This one keeps track of the previously set governor of a removed CPU */
-static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
@@ -178,7 +226,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
policy->cpuinfo.transition_latency = transition_latency;
/*
- * The driver only supports the SMP configuartion where all processors
+ * The driver only supports the SMP configuration where all processors
* share the clock and voltage and clock.
*/
cpumask_setall(policy->cpus);
@@ -187,10 +235,18 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
-unsigned int cpufreq_generic_get(unsigned int cpu)
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
+}
+
+unsigned int cpufreq_generic_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+
if (!policy || IS_ERR(policy->clk)) {
pr_err("%s: No %s associated to cpu: %d\n",
__func__, policy ? "clk" : "policy", cpu);
@@ -201,18 +257,29 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
-/* Only for cpufreq core internal use */
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
-{
- return per_cpu(cpufreq_cpu_data, cpu);
-}
-
+/**
+ * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
+ *
+ * @cpu: cpu to find policy for.
+ *
+ * This returns policy for 'cpu', returns NULL if it doesn't exist.
+ * It also increments the kobject reference count to mark it busy and so would
+ * require a corresponding call to cpufreq_cpu_put() to decrement it back.
+ * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
+ * freed as that depends on the kobj count.
+ *
+ * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
+ * valid policy is found. This is done to make sure the driver doesn't get
+ * unregistered while the policy is being used.
+ *
+ * Return: A valid policy on success, otherwise NULL on failure.
+ */
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
unsigned long flags;
- if (cpu >= nr_cpu_ids)
+ if (WARN_ON(cpu >= nr_cpu_ids))
return NULL;
if (!down_read_trylock(&cpufreq_rwsem))
@@ -223,7 +290,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (cpufreq_driver) {
/* get the CPU */
- policy = per_cpu(cpufreq_cpu_data, cpu);
+ policy = cpufreq_cpu_get_raw(cpu);
if (policy)
kobject_get(&policy->kobj);
}
@@ -237,6 +304,16 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+/**
+ * cpufreq_cpu_put: Decrements the usage count of a policy
+ *
+ * @policy: policy earlier returned by cpufreq_cpu_get().
+ *
+ * This decrements the kobject reference count incremented earlier by calling
+ * cpufreq_cpu_get().
+ *
+ * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
+ */
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
kobject_put(&policy->kobj);
@@ -798,11 +875,18 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
down_write(&policy->rwsem);
+ /* Updating inactive policies is invalid, so avoid doing that. */
+ if (unlikely(policy_is_inactive(policy))) {
+ ret = -EBUSY;
+ goto unlock_policy_rwsem;
+ }
+
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
+unlock_policy_rwsem:
up_write(&policy->rwsem);
up_read(&cpufreq_rwsem);
@@ -873,28 +957,67 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
}
EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
-/* symlink affected CPUs */
+static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
+{
+ struct device *cpu_dev;
+
+ pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
+
+ if (!policy)
+ return 0;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (WARN_ON(!cpu_dev))
+ return 0;
+
+ return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
+}
+
+static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
+{
+ struct device *cpu_dev;
+
+ pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
+
+ cpu_dev = get_cpu_device(cpu);
+ if (WARN_ON(!cpu_dev))
+ return;
+
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+}
+
+/* Add/remove symlinks for all related CPUs */
static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
- for_each_cpu(j, policy->cpus) {
- struct device *cpu_dev;
-
- if (j == policy->cpu)
+ /* Some related CPUs might not be present (physically hotplugged) */
+ for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ if (j == policy->kobj_cpu)
continue;
- pr_debug("Adding link for CPU: %u\n", j);
- cpu_dev = get_cpu_device(j);
- ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
- "cpufreq");
+ ret = add_cpu_dev_symlink(policy, j);
if (ret)
break;
}
+
return ret;
}
+static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
+{
+ unsigned int j;
+
+ /* Some related CPUs might not be present (physically hotplugged) */
+ for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ if (j == policy->kobj_cpu)
+ continue;
+
+ remove_cpu_dev_symlink(policy, j);
+ }
+}
+
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
struct device *dev)
{
@@ -937,7 +1060,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
memcpy(&new_policy, policy, sizeof(*policy));
/* Update governor of new_policy to the governor used before hotplug */
- gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ gov = find_governor(policy->last_governor);
if (gov)
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
@@ -963,7 +1086,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev)
{
int ret = 0;
- unsigned long flags;
+
+ /* Has this CPU been taken care of already? */
+ if (cpumask_test_cpu(cpu, policy->cpus))
+ return 0;
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
@@ -974,13 +1100,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
down_write(&policy->rwsem);
-
- write_lock_irqsave(&cpufreq_driver_lock, flags);
-
cpumask_set_cpu(cpu, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = policy;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
up_write(&policy->rwsem);
if (has_target()) {
@@ -994,7 +1114,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ return 0;
}
static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
@@ -1003,20 +1123,25 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
-
- policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
-
+ policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (policy)
- policy->governor = NULL;
+ if (likely(policy)) {
+ /* Policy should be inactive here */
+ WARN_ON(!policy_is_inactive(policy));
+
+ down_write(&policy->rwsem);
+ policy->cpu = cpu;
+ up_write(&policy->rwsem);
+ }
return policy;
}
-static struct cpufreq_policy *cpufreq_policy_alloc(void)
+static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
{
struct cpufreq_policy *policy;
+ int ret;
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy)
@@ -1028,6 +1153,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
+ "cpufreq");
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ goto err_free_rcpumask;
+ }
+
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
@@ -1035,8 +1167,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
+ policy->cpu = dev->id;
+
+ /* Set this once on allocation */
+ policy->kobj_cpu = dev->id;
+
return policy;
+err_free_rcpumask:
+ free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
@@ -1045,18 +1184,20 @@ err_free_policy:
return NULL;
}
-static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
{
struct kobject *kobj;
struct completion *cmp;
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_REMOVE_POLICY, policy);
+ if (notify)
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
- down_read(&policy->rwsem);
+ down_write(&policy->rwsem);
+ cpufreq_remove_dev_symlink(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
- up_read(&policy->rwsem);
+ up_write(&policy->rwsem);
kobject_put(kobj);
/*
@@ -1069,68 +1210,64 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
pr_debug("wait complete\n");
}
-static void cpufreq_policy_free(struct cpufreq_policy *policy)
-{
- free_cpumask_var(policy->related_cpus);
- free_cpumask_var(policy->cpus);
- kfree(policy);
-}
-
-static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
- struct device *cpu_dev)
+static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
{
- int ret;
-
- if (WARN_ON(cpu == policy->cpu))
- return 0;
+ unsigned long flags;
+ int cpu;
- /* Move kobject to the new policy->cpu */
- ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
- if (ret) {
- pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
- return ret;
- }
+ /* Remove policy from list */
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_del(&policy->policy_list);
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- up_write(&policy->rwsem);
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return 0;
+ cpufreq_policy_put_kobj(policy, notify);
+ free_cpumask_var(policy->related_cpus);
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
}
-static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+/**
+ * cpufreq_add_dev - add a CPU device
+ *
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
- bool recover_policy = cpufreq_suspended;
-
- if (cpu_is_offline(cpu))
- return 0;
+ bool recover_policy = !sif;
pr_debug("adding CPU %u\n", cpu);
- /* check whether a different CPU already registered this
- * CPU because it is in the same boat. */
- policy = cpufreq_cpu_get_raw(cpu);
- if (unlikely(policy))
- return 0;
+ /*
+ * Only possible if 'cpu' wasn't physically present earlier and we are
+ * here from subsys_interface add callback. A hotplug notifier will
+ * follow and we will handle it like logical CPU hotplug then. For now,
+ * just create the sysfs link.
+ */
+ if (cpu_is_offline(cpu))
+ return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
- /* Check if this cpu was hot-unplugged earlier and has siblings */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_policy(policy) {
- if (cpumask_test_cpu(cpu, policy->related_cpus)) {
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(policy, cpu, dev);
- up_read(&cpufreq_rwsem);
- return ret;
- }
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy && !policy_is_inactive(policy)) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ ret = cpufreq_add_policy_cpu(policy, cpu, dev);
+ up_read(&cpufreq_rwsem);
+ return ret;
}
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
/*
* Restore the saved policy when doing light-weight init and fall back
@@ -1139,22 +1276,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) {
recover_policy = false;
- policy = cpufreq_policy_alloc();
+ policy = cpufreq_policy_alloc(dev);
if (!policy)
goto nomem_out;
}
- /*
- * In the resume path, since we restore a saved policy, the assignment
- * to policy->cpu is like an update of the existing policy, rather than
- * the creation of a brand new one. So we need to perform this update
- * by invoking update_policy_cpu().
- */
- if (recover_policy && cpu != policy->cpu)
- WARN_ON(update_policy_cpu(policy, cpu, dev));
- else
- policy->cpu = cpu;
-
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* call driver. From then on the cpufreq must be able
@@ -1181,21 +1307,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
- /* prepare interface data */
- ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &dev->kobj, "cpufreq");
- if (ret) {
- pr_err("%s: failed to init policy->kobj: %d\n",
- __func__, ret);
- goto err_init_policy_kobj;
- }
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->related_cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus)
- per_cpu(cpufreq_cpu_data, j) = policy;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
@@ -1253,11 +1370,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_out_unregister;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
- }
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- list_add(&policy->policy_list, &cpufreq_policy_list);
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_add(&policy->policy_list, &cpufreq_policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ }
cpufreq_init_policy(policy);
@@ -1281,68 +1398,28 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
err_out_unregister:
err_get_freq:
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus)
- per_cpu(cpufreq_cpu_data, j) = NULL;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- if (!recover_policy) {
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
- }
-err_init_policy_kobj:
up_write(&policy->rwsem);
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
- if (recover_policy) {
- /* Do not leave stale fallback data behind. */
- per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
- cpufreq_policy_put_kobj(policy);
- }
- cpufreq_policy_free(policy);
-
+ cpufreq_policy_free(policy, recover_policy);
nomem_out:
up_read(&cpufreq_rwsem);
return ret;
}
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
-{
- return __cpufreq_add_dev(dev, sif);
-}
-
static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif)
{
- unsigned int cpu = dev->id, cpus;
- int ret;
- unsigned long flags;
+ unsigned int cpu = dev->id;
+ int ret = 0;
struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
- write_lock_irqsave(&cpufreq_driver_lock, flags);
-
- policy = per_cpu(cpufreq_cpu_data, cpu);
-
- /* Save the policy somewhere when doing a light-weight tear-down */
- if (cpufreq_suspended)
- per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
-
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
+ policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
@@ -1354,108 +1431,75 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
pr_err("%s: Failed to stop governor\n", __func__);
return ret;
}
-
- strncpy(per_cpu(cpufreq_cpu_governor, cpu),
- policy->governor->name, CPUFREQ_NAME_LEN);
}
- down_read(&policy->rwsem);
- cpus = cpumask_weight(policy->cpus);
- up_read(&policy->rwsem);
+ down_write(&policy->rwsem);
+ cpumask_clear_cpu(cpu, policy->cpus);
- if (cpu != policy->cpu) {
- sysfs_remove_link(&dev->kobj, "cpufreq");
- } else if (cpus > 1) {
+ if (policy_is_inactive(policy)) {
+ if (has_target())
+ strncpy(policy->last_governor, policy->governor->name,
+ CPUFREQ_NAME_LEN);
+ } else if (cpu == policy->cpu) {
/* Nominate new CPU */
- int new_cpu = cpumask_any_but(policy->cpus, cpu);
- struct device *cpu_dev = get_cpu_device(new_cpu);
+ policy->cpu = cpumask_any(policy->cpus);
+ }
+ up_write(&policy->rwsem);
- sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
- ret = update_policy_cpu(policy, new_cpu, cpu_dev);
- if (ret) {
- if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
- "cpufreq"))
- pr_err("%s: Failed to restore kobj link to cpu:%d\n",
- __func__, cpu_dev->id);
- return ret;
- }
+ /* Start governor again for active policy */
+ if (!policy_is_inactive(policy)) {
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
- if (!cpufreq_suspended)
- pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, new_cpu, cpu);
+ if (ret)
+ pr_err("%s: Failed to start governor\n", __func__);
+ }
} else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy);
}
- return 0;
+ return ret;
}
static int __cpufreq_remove_dev_finish(struct device *dev,
struct subsys_interface *sif)
{
- unsigned int cpu = dev->id, cpus;
+ unsigned int cpu = dev->id;
int ret;
- unsigned long flags;
- struct cpufreq_policy *policy;
-
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = per_cpu(cpufreq_cpu_data, cpu);
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
- down_write(&policy->rwsem);
- cpus = cpumask_weight(policy->cpus);
-
- if (cpus > 1)
- cpumask_clear_cpu(cpu, policy->cpus);
- up_write(&policy->rwsem);
+ /* Only proceed for inactive policies */
+ if (!policy_is_inactive(policy))
+ return 0;
/* If cpu is last user of policy, free policy */
- if (cpus == 1) {
- if (has_target()) {
- ret = __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_EXIT);
- if (ret) {
- pr_err("%s: Failed to exit governor\n",
- __func__);
- return ret;
- }
- }
-
- if (!cpufreq_suspended)
- cpufreq_policy_put_kobj(policy);
-
- /*
- * Perform the ->exit() even during light-weight tear-down,
- * since this is a core component, and is essential for the
- * subsequent light-weight ->init() to succeed.
- */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- /* Remove policy from list of active policies */
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- list_del(&policy->policy_list);
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- if (!cpufreq_suspended)
- cpufreq_policy_free(policy);
- } else if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
-
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
- pr_err("%s: Failed to start governor\n", __func__);
+ pr_err("%s: Failed to exit governor\n", __func__);
return ret;
}
}
+ /*
+ * Perform the ->exit() even during light-weight tear-down,
+ * since this is a core component, and is essential for the
+ * subsequent light-weight ->init() to succeed.
+ */
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ /* Free the policy only if the driver is getting removed. */
+ if (sif)
+ cpufreq_policy_free(policy, true);
+
return 0;
}
@@ -1469,8 +1513,33 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
unsigned int cpu = dev->id;
int ret;
- if (cpu_is_offline(cpu))
+ /*
+ * Only possible if 'cpu' is getting physically removed now. A hotplug
+ * notifier should have already been called and we just need to remove
+ * link or free policy here.
+ */
+ if (cpu_is_offline(cpu)) {
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ struct cpumask mask;
+
+ if (!policy)
+ return 0;
+
+ cpumask_copy(&mask, policy->related_cpus);
+ cpumask_clear_cpu(cpu, &mask);
+
+ /*
+ * Free policy only if all policy->related_cpus are removed
+ * physically.
+ */
+ if (cpumask_intersects(&mask, cpu_present_mask)) {
+ remove_cpu_dev_symlink(policy, cpu);
+ return 0;
+ }
+
+ cpufreq_policy_free(policy, true);
return 0;
+ }
ret = __cpufreq_remove_dev_prepare(dev, sif);
@@ -1567,6 +1636,10 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
ret_freq = cpufreq_driver->get(policy->cpu);
+ /* Updating inactive policies is invalid, so avoid doing that. */
+ if (unlikely(policy_is_inactive(policy)))
+ return ret_freq;
+
if (ret_freq && policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
/* verify no discrepancy between actual and
@@ -1656,7 +1729,7 @@ void cpufreq_suspend(void)
pr_debug("%s: Suspending Governors\n", __func__);
- for_each_policy(policy) {
+ for_each_active_policy(policy) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
@@ -1690,7 +1763,7 @@ void cpufreq_resume(void)
pr_debug("%s: Resuming Governors\n", __func__);
- for_each_policy(policy) {
+ for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
@@ -1891,7 +1964,7 @@ static int __target_index(struct cpufreq_policy *policy,
* Failed after setting to intermediate freq? Driver should have
* reverted back to initial frequency and so should we. Check
* here for intermediate_freq instead of get_intermediate, in
- * case we have't switched to intermediate freq at all.
+ * case we haven't switched to intermediate freq at all.
*/
if (unlikely(retval && intermediate_freq)) {
freqs.old = intermediate_freq;
@@ -2092,7 +2165,8 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
- int cpu;
+ struct cpufreq_policy *policy;
+ unsigned long flags;
if (!governor)
return;
@@ -2100,12 +2174,15 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
if (cpufreq_disabled())
return;
- for_each_present_cpu(cpu) {
- if (cpu_online(cpu))
- continue;
- if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
- strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
+ /* clear last_governor for all inactive policies */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_inactive_policy(policy) {
+ if (!strcmp(policy->last_governor, governor->name)) {
+ policy->governor = NULL;
+ strcpy(policy->last_governor, "\0");
+ }
}
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
@@ -2304,7 +2381,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
if (dev) {
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
- __cpufreq_add_dev(dev, NULL);
+ cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
@@ -2316,7 +2393,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_FAILED:
- __cpufreq_add_dev(dev, NULL);
+ cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -2336,7 +2413,7 @@ static int cpufreq_boost_set_sw(int state)
struct cpufreq_policy *policy;
int ret = -EINVAL;
- for_each_policy(policy) {
+ for_each_active_policy(policy) {
freq_table = cpufreq_frequency_get_table(policy->cpu);
if (freq_table) {
ret = cpufreq_frequency_table_cpuinfo(policy,
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 25a70d0..c86a10c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -148,6 +148,10 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
return 0;
}
+static struct notifier_block cs_cpufreq_notifier_block = {
+ .notifier_call = dbs_cpufreq_notifier,
+};
+
/************************** sysfs interface ************************/
static struct common_dbs_data cs_dbs_cdata;
@@ -317,7 +321,7 @@ static struct attribute_group cs_attr_group_gov_pol = {
/************************** sysfs end ************************/
-static int cs_init(struct dbs_data *dbs_data)
+static int cs_init(struct dbs_data *dbs_data, bool notify)
{
struct cs_dbs_tuners *tuners;
@@ -336,25 +340,25 @@ static int cs_init(struct dbs_data *dbs_data)
dbs_data->tuners = tuners;
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
- mutex_init(&dbs_data->mutex);
+
+ if (notify)
+ cpufreq_register_notifier(&cs_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
return 0;
}
-static void cs_exit(struct dbs_data *dbs_data)
+static void cs_exit(struct dbs_data *dbs_data, bool notify)
{
+ if (notify)
+ cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
kfree(dbs_data->tuners);
}
define_get_cpu_dbs_routines(cs_cpu_dbs_info);
-static struct notifier_block cs_cpufreq_notifier_block = {
- .notifier_call = dbs_cpufreq_notifier,
-};
-
-static struct cs_ops cs_ops = {
- .notifier_block = &cs_cpufreq_notifier_block,
-};
-
static struct common_dbs_data cs_dbs_cdata = {
.governor = GOV_CONSERVATIVE,
.attr_group_gov_sys = &cs_attr_group_gov_sys,
@@ -363,9 +367,9 @@ static struct common_dbs_data cs_dbs_cdata = {
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
.gov_dbs_timer = cs_dbs_timer,
.gov_check_cpu = cs_check_cpu,
- .gov_ops = &cs_ops,
.init = cs_init,
.exit = cs_exit,
+ .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
};
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 1b44496..57a39f8 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -239,211 +239,242 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
}
}
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata, unsigned int event)
+static int cpufreq_governor_init(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data,
+ struct common_dbs_data *cdata)
{
- struct dbs_data *dbs_data;
- struct od_cpu_dbs_info_s *od_dbs_info = NULL;
- struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
- struct od_ops *od_ops = NULL;
- struct od_dbs_tuners *od_tuners = NULL;
- struct cs_dbs_tuners *cs_tuners = NULL;
- struct cpu_dbs_common_info *cpu_cdbs;
- unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
- int io_busy = 0;
- int rc;
-
- if (have_governor_per_policy())
- dbs_data = policy->governor_data;
- else
- dbs_data = cdata->gdbs_data;
-
- WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
-
- switch (event) {
- case CPUFREQ_GOV_POLICY_INIT:
- if (have_governor_per_policy()) {
- WARN_ON(dbs_data);
- } else if (dbs_data) {
- dbs_data->usage_count++;
- policy->governor_data = dbs_data;
- return 0;
- }
-
- dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
- if (!dbs_data) {
- pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
- return -ENOMEM;
- }
+ unsigned int latency;
+ int ret;
- dbs_data->cdata = cdata;
- dbs_data->usage_count = 1;
- rc = cdata->init(dbs_data);
- if (rc) {
- pr_err("%s: POLICY_INIT: init() failed\n", __func__);
- kfree(dbs_data);
- return rc;
- }
+ if (dbs_data) {
+ if (WARN_ON(have_governor_per_policy()))
+ return -EINVAL;
+ dbs_data->usage_count++;
+ policy->governor_data = dbs_data;
+ return 0;
+ }
- if (!have_governor_per_policy())
- WARN_ON(cpufreq_get_global_kobject());
+ dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+ if (!dbs_data)
+ return -ENOMEM;
- rc = sysfs_create_group(get_governor_parent_kobj(policy),
- get_sysfs_attr(dbs_data));
- if (rc) {
- cdata->exit(dbs_data);
- kfree(dbs_data);
- return rc;
- }
+ dbs_data->cdata = cdata;
+ dbs_data->usage_count = 1;
- policy->governor_data = dbs_data;
+ ret = cdata->init(dbs_data, !policy->governor->initialized);
+ if (ret)
+ goto free_dbs_data;
- /* policy latency is in ns. Convert it to us first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
+ /* policy latency is in ns. Convert it to us first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
- /* Bring kernel and HW constraints together */
- dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
+ /* Bring kernel and HW constraints together */
+ dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
latency * LATENCY_MULTIPLIER));
- if ((cdata->governor == GOV_CONSERVATIVE) &&
- (!policy->governor->initialized)) {
- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
-
- cpufreq_register_notifier(cs_ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (!have_governor_per_policy()) {
+ if (WARN_ON(cpufreq_get_global_kobject())) {
+ ret = -EINVAL;
+ goto cdata_exit;
}
+ cdata->gdbs_data = dbs_data;
+ }
- if (!have_governor_per_policy())
- cdata->gdbs_data = dbs_data;
+ ret = sysfs_create_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+ if (ret)
+ goto put_kobj;
- return 0;
- case CPUFREQ_GOV_POLICY_EXIT:
- if (!--dbs_data->usage_count) {
- sysfs_remove_group(get_governor_parent_kobj(policy),
- get_sysfs_attr(dbs_data));
+ policy->governor_data = dbs_data;
- if (!have_governor_per_policy())
- cpufreq_put_global_kobject();
+ return 0;
- if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
- (policy->governor->initialized == 1)) {
- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+put_kobj:
+ if (!have_governor_per_policy()) {
+ cdata->gdbs_data = NULL;
+ cpufreq_put_global_kobject();
+ }
+cdata_exit:
+ cdata->exit(dbs_data, !policy->governor->initialized);
+free_dbs_data:
+ kfree(dbs_data);
+ return ret;
+}
+
+static void cpufreq_governor_exit(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
+{
+ struct common_dbs_data *cdata = dbs_data->cdata;
- cpufreq_unregister_notifier(cs_ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
+ policy->governor_data = NULL;
+ if (!--dbs_data->usage_count) {
+ sysfs_remove_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
- cdata->exit(dbs_data);
- kfree(dbs_data);
+ if (!have_governor_per_policy()) {
cdata->gdbs_data = NULL;
+ cpufreq_put_global_kobject();
}
- policy->governor_data = NULL;
- return 0;
+ cdata->exit(dbs_data, policy->governor->initialized == 1);
+ kfree(dbs_data);
}
+}
- cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+static int cpufreq_governor_start(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
+{
+ struct common_dbs_data *cdata = dbs_data->cdata;
+ unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
+ struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+ int io_busy = 0;
+
+ if (!policy->cur)
+ return -EINVAL;
+
+ if (cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
- cs_tuners = dbs_data->tuners;
- cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice_load;
} else {
- od_tuners = dbs_data->tuners;
- od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
sampling_rate = od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice_load;
- od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
- switch (event) {
- case CPUFREQ_GOV_START:
- if (!policy->cur)
- return -EINVAL;
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
+ unsigned int prev_load;
- mutex_lock(&dbs_data->mutex);
+ j_cdbs->cpu = j;
+ j_cdbs->cur_policy = policy;
+ j_cdbs->prev_cpu_idle =
+ get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_common_info *j_cdbs =
- dbs_data->cdata->get_cpu_cdbs(j);
- unsigned int prev_load;
+ prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
+ j_cdbs->prev_cpu_idle);
+ j_cdbs->prev_load = 100 * prev_load /
+ (unsigned int)j_cdbs->prev_cpu_wall;
- j_cdbs->cpu = j;
- j_cdbs->cur_policy = policy;
- j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
- &j_cdbs->prev_cpu_wall, io_busy);
+ if (ignore_nice)
+ j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- prev_load = (unsigned int)
- (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
- j_cdbs->prev_load = 100 * prev_load /
- (unsigned int) j_cdbs->prev_cpu_wall;
+ mutex_init(&j_cdbs->timer_mutex);
+ INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
+ }
- if (ignore_nice)
- j_cdbs->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ if (cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_cpu_dbs_info_s *cs_dbs_info =
+ cdata->get_cpu_dbs_info_s(cpu);
- mutex_init(&j_cdbs->timer_mutex);
- INIT_DEFERRABLE_WORK(&j_cdbs->work,
- dbs_data->cdata->gov_dbs_timer);
- }
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
+ } else {
+ struct od_ops *od_ops = cdata->gov_ops;
+ struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
- cs_dbs_info->down_skip = 0;
- cs_dbs_info->enable = 1;
- cs_dbs_info->requested_freq = policy->cur;
- } else {
- od_dbs_info->rate_mult = 1;
- od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
- od_ops->powersave_bias_init_cpu(cpu);
- }
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ od_ops->powersave_bias_init_cpu(cpu);
+ }
- mutex_unlock(&dbs_data->mutex);
+ /* Initiate timer time stamp */
+ cpu_cdbs->time_stamp = ktime_get();
- /* Initiate timer time stamp */
- cpu_cdbs->time_stamp = ktime_get();
+ gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
+ true);
+ return 0;
+}
- gov_queue_work(dbs_data, policy,
- delay_for_sampling_rate(sampling_rate), true);
- break;
+static void cpufreq_governor_stop(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
+{
+ struct common_dbs_data *cdata = dbs_data->cdata;
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
- case CPUFREQ_GOV_STOP:
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
- cs_dbs_info->enable = 0;
+ if (cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_cpu_dbs_info_s *cs_dbs_info =
+ cdata->get_cpu_dbs_info_s(cpu);
- gov_cancel_work(dbs_data, policy);
+ cs_dbs_info->enable = 0;
+ }
- mutex_lock(&dbs_data->mutex);
- mutex_destroy(&cpu_cdbs->timer_mutex);
- cpu_cdbs->cur_policy = NULL;
+ gov_cancel_work(dbs_data, policy);
- mutex_unlock(&dbs_data->mutex);
+ mutex_destroy(&cpu_cdbs->timer_mutex);
+ cpu_cdbs->cur_policy = NULL;
+}
- break;
+static void cpufreq_governor_limits(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
+{
+ struct common_dbs_data *cdata = dbs_data->cdata;
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+
+ if (!cpu_cdbs->cur_policy)
+ return;
+
+ mutex_lock(&cpu_cdbs->timer_mutex);
+ if (policy->max < cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
+ CPUFREQ_RELATION_H);
+ else if (policy->min > cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
+ CPUFREQ_RELATION_L);
+ dbs_check_cpu(dbs_data, cpu);
+ mutex_unlock(&cpu_cdbs->timer_mutex);
+}
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata, unsigned int event)
+{
+ struct dbs_data *dbs_data;
+ int ret = 0;
+ /* Lock governor to block concurrent initialization of governor */
+ mutex_lock(&cdata->mutex);
+
+ if (have_governor_per_policy())
+ dbs_data = policy->governor_data;
+ else
+ dbs_data = cdata->gdbs_data;
+
+ if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_INIT:
+ ret = cpufreq_governor_init(policy, dbs_data, cdata);
+ break;
+ case CPUFREQ_GOV_POLICY_EXIT:
+ cpufreq_governor_exit(policy, dbs_data);
+ break;
+ case CPUFREQ_GOV_START:
+ ret = cpufreq_governor_start(policy, dbs_data);
+ break;
+ case CPUFREQ_GOV_STOP:
+ cpufreq_governor_stop(policy, dbs_data);
+ break;
case CPUFREQ_GOV_LIMITS:
- mutex_lock(&dbs_data->mutex);
- if (!cpu_cdbs->cur_policy) {
- mutex_unlock(&dbs_data->mutex);
- break;
- }
- mutex_lock(&cpu_cdbs->timer_mutex);
- if (policy->max < cpu_cdbs->cur_policy->cur)
- __cpufreq_driver_target(cpu_cdbs->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > cpu_cdbs->cur_policy->cur)
- __cpufreq_driver_target(cpu_cdbs->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(dbs_data, cpu);
- mutex_unlock(&cpu_cdbs->timer_mutex);
- mutex_unlock(&dbs_data->mutex);
+ cpufreq_governor_limits(policy, dbs_data);
break;
}
- return 0;
+
+unlock:
+ mutex_unlock(&cdata->mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index cc401d1..34736f5 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -208,11 +208,16 @@ struct common_dbs_data {
void *(*get_cpu_dbs_info_s)(int cpu);
void (*gov_dbs_timer)(struct work_struct *work);
void (*gov_check_cpu)(int cpu, unsigned int load);
- int (*init)(struct dbs_data *dbs_data);
- void (*exit)(struct dbs_data *dbs_data);
+ int (*init)(struct dbs_data *dbs_data, bool notify);
+ void (*exit)(struct dbs_data *dbs_data, bool notify);
/* Governor specific ops, see below */
void *gov_ops;
+
+ /*
+ * Protects governor's data (struct dbs_data and struct common_dbs_data)
+ */
+ struct mutex mutex;
};
/* Governor Per policy data */
@@ -221,9 +226,6 @@ struct dbs_data {
unsigned int min_sampling_rate;
int usage_count;
void *tuners;
-
- /* dbs_mutex protects dbs_enable in governor start/stop */
- struct mutex mutex;
};
/* Governor specific ops, will be passed to dbs_data->gov_ops */
@@ -234,10 +236,6 @@ struct od_ops {
void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
};
-struct cs_ops {
- struct notifier_block *notifier_block;
-};
-
static inline int delay_for_sampling_rate(unsigned int sampling_rate)
{
int delay = usecs_to_jiffies(sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index ad3f38f..3c1e10f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -475,7 +475,7 @@ static struct attribute_group od_attr_group_gov_pol = {
/************************** sysfs end ************************/
-static int od_init(struct dbs_data *dbs_data)
+static int od_init(struct dbs_data *dbs_data, bool notify)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
@@ -513,11 +513,10 @@ static int od_init(struct dbs_data *dbs_data)
tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
- mutex_init(&dbs_data->mutex);
return 0;
}
-static void od_exit(struct dbs_data *dbs_data)
+static void od_exit(struct dbs_data *dbs_data, bool notify)
{
kfree(dbs_data->tuners);
}
@@ -541,6 +540,7 @@ static struct common_dbs_data od_dbs_cdata = {
.gov_ops = &od_ops,
.init = od_init,
.exit = od_exit,
+ .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
};
static void od_set_powersave_bias(unsigned int powersave_bias)
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 1d723dc..3488c9c 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -144,7 +144,7 @@ module_param(max_duration, int, 0444);
/**
- * we can detect a core multipiler from dir0_lsb
+ * we can detect a core multiplier from dir0_lsb
* from GX1 datasheet p.56,
* MULT[3:0]:
* 0000 = SYSCLK multiplied by 4 (test only)
@@ -346,7 +346,7 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
/* it needs to be assured that at least one supported frequency is
* within policy->min and policy->max. If it is not, policy->max
- * needs to be increased until one freuqency is supported.
+ * needs to be increased until one frequency is supported.
* policy->min may not be decreased, though. This way we guarantee a
* specific processing capacity.
*/
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2ba53f4..15ada47 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -49,9 +49,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
}
-static inline int32_t div_fp(int32_t x, int32_t y)
+static inline int32_t div_fp(s64 x, s64 y)
{
- return div_s64((int64_t)x << FRAC_BITS, y);
+ return div64_s64((int64_t)x << FRAC_BITS, y);
}
static inline int ceiling_fp(int32_t x)
@@ -69,6 +69,7 @@ struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
+ u64 tsc;
int freq;
ktime_t time;
};
@@ -110,6 +111,7 @@ struct cpudata {
ktime_t last_sample_time;
u64 prev_aperf;
u64 prev_mperf;
+ u64 prev_tsc;
struct sample sample;
};
@@ -397,7 +399,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
update_turbo_state();
if (limits.turbo_disabled) {
- pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
return -EPERM;
}
@@ -485,7 +487,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(void)
{
hwp_active++;
- pr_info("intel_pstate HWP enabled\n");
+ pr_info("intel_pstate: HWP enabled\n");
wrmsrl( MSR_PM_ENABLE, 0x1);
}
@@ -536,7 +538,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
val |= vid;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
}
#define BYT_BCLK_FREQS 5
@@ -705,19 +707,20 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
{
int max_perf, min_perf;
- update_turbo_state();
-
- intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+ if (force) {
+ update_turbo_state();
- pstate = clamp_t(int, pstate, min_perf, max_perf);
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
- if (pstate == cpu->pstate.current_pstate)
- return;
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+ }
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
@@ -734,7 +737,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
}
static inline void intel_pstate_calc_busy(struct cpudata *cpu)
@@ -757,23 +760,28 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
unsigned long flags;
+ u64 tsc;
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = native_read_tsc();
local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
cpu->sample.time = ktime_get();
cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf;
+ cpu->sample.tsc = tsc;
cpu->sample.aperf -= cpu->prev_aperf;
cpu->sample.mperf -= cpu->prev_mperf;
+ cpu->sample.tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
}
static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
@@ -795,7 +803,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
- u32 duration_us;
+ s64 duration_us;
u32 sample_time;
/*
@@ -822,8 +830,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
* to adjust our busyness.
*/
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
- duration_us = (u32) ktime_us_delta(cpu->sample.time,
- cpu->last_sample_time);
+ duration_us = ktime_us_delta(cpu->sample.time,
+ cpu->last_sample_time);
if (duration_us > sample_time * 3) {
sample_ratio = div_fp(int_tofp(sample_time),
int_tofp(duration_us));
@@ -838,6 +846,10 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
int32_t busy_scaled;
struct _pid *pid;
signed int ctl;
+ int from;
+ struct sample *sample;
+
+ from = cpu->pstate.current_pstate;
pid = &cpu->pid;
busy_scaled = intel_pstate_get_scaled_busy(cpu);
@@ -845,7 +857,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
ctl = pid_calc(pid, busy_scaled);
/* Negative values of ctl increase the pstate and vice versa */
- intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
+ intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
+
+ sample = &cpu->sample;
+ trace_pstate_sample(fp_toint(sample->core_pct_busy),
+ fp_toint(busy_scaled),
+ from,
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->tsc,
+ sample->freq);
}
static void intel_hwp_timer_func(unsigned long __data)
@@ -859,21 +881,11 @@ static void intel_hwp_timer_func(unsigned long __data)
static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
- struct sample *sample;
intel_pstate_sample(cpu);
- sample = &cpu->sample;
-
intel_pstate_adjust_busy_pstate(cpu);
- trace_pstate_sample(fp_toint(sample->core_pct_busy),
- fp_toint(intel_pstate_get_scaled_busy(cpu)),
- cpu->pstate.current_pstate,
- sample->mperf,
- sample->aperf,
- sample->freq);
-
intel_pstate_set_sample_time(cpu);
}
@@ -936,7 +948,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
add_timer_on(&cpu->timer, cpunum);
- pr_debug("Intel pstate controlling: cpu %d\n", cpunum);
+ pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
return 0;
}
@@ -1002,13 +1014,13 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
int cpu_num = policy->cpu;
struct cpudata *cpu = all_cpu_data[cpu_num];
- pr_info("intel_pstate CPU %d exiting\n", cpu_num);
+ pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
del_timer_sync(&all_cpu_data[cpu_num]->timer);
if (hwp_active)
return;
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
}
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index e24269a..1d99c97 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -56,7 +56,7 @@ module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
-typedef struct {
+struct pxa_freqs {
unsigned int khz;
unsigned int membus;
unsigned int cccr;
@@ -64,7 +64,7 @@ typedef struct {
unsigned int cclkcfg;
int vmin;
int vmax;
-} pxa_freqs_t;
+};
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
@@ -86,7 +86,7 @@ static unsigned int sdram_rows;
/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
-static pxa_freqs_t pxa255_run_freqs[] =
+static const struct pxa_freqs pxa255_run_freqs[] =
{
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
@@ -98,7 +98,7 @@ static pxa_freqs_t pxa255_run_freqs[] =
};
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
-static pxa_freqs_t pxa255_turbo_freqs[] =
+static const struct pxa_freqs pxa255_turbo_freqs[] =
{
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
@@ -153,7 +153,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table
((HT) ? CCLKCFG_HALFTURBO : 0) | \
((T) ? CCLKCFG_TURBO : 0))
-static pxa_freqs_t pxa27x_freqs[] = {
+static struct pxa_freqs pxa27x_freqs[] = {
{104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
{156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
@@ -171,7 +171,7 @@ extern unsigned get_clk_frequency_khz(int info);
#ifdef CONFIG_REGULATOR
-static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{
int ret = 0;
int vmin, vmax;
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
}
}
#else
-static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
+static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
{
return 0;
}
@@ -211,7 +211,7 @@ static void __init pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
- pxa_freqs_t **pxa_freqs)
+ const struct pxa_freqs **pxa_freqs)
{
if (cpu_is_pxa25x()) {
if (!pxa255_turbo_table) {
@@ -270,7 +270,7 @@ static unsigned int pxa_cpufreq_get(unsigned int cpu)
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
- pxa_freqs_t *pxa_freq_settings;
+ const struct pxa_freqs *pxa_freq_settings;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@@ -361,7 +361,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
int i;
unsigned int freq;
struct cpufreq_frequency_table *pxa255_freq_table;
- pxa_freqs_t *pxa255_freqs;
+ const struct pxa_freqs *pxa255_freqs;
/* try to guess pxa27x cpu */
if (cpu_is_pxa27x())
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 88b21ae..358f075 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -27,11 +27,11 @@
/**
* struct cpu_data
- * @parent: the parent node of cpu clock
+ * @pclk: the parent clock of cpu
* @table: frequency table
*/
struct cpu_data {
- struct device_node *parent;
+ struct clk **pclk;
struct cpufreq_frequency_table *table;
};
@@ -196,7 +196,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- struct device_node *np;
+ struct device_node *np, *pnode;
int i, count, ret;
u32 freq, mask;
struct clk *clk;
@@ -219,17 +219,23 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_nomem2;
}
- data->parent = of_parse_phandle(np, "clocks", 0);
- if (!data->parent) {
+ pnode = of_parse_phandle(np, "clocks", 0);
+ if (!pnode) {
pr_err("%s: could not get clock information\n", __func__);
goto err_nomem2;
}
- count = of_property_count_strings(data->parent, "clock-names");
+ count = of_property_count_strings(pnode, "clock-names");
+ data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->pclk) {
+ pr_err("%s: no memory\n", __func__);
+ goto err_node;
+ }
+
table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
if (!table) {
pr_err("%s: no memory\n", __func__);
- goto err_node;
+ goto err_pclk;
}
if (fmask)
@@ -238,7 +244,8 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
mask = 0x0;
for (i = 0; i < count; i++) {
- clk = of_clk_get(data->parent, i);
+ clk = of_clk_get(pnode, i);
+ data->pclk[i] = clk;
freq = clk_get_rate(clk);
/*
* the clock is valid if its frequency is not masked
@@ -273,13 +280,16 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = u64temp + 1;
of_node_put(np);
+ of_node_put(pnode);
return 0;
err_nomem1:
kfree(table);
+err_pclk:
+ kfree(data->pclk);
err_node:
- of_node_put(data->parent);
+ of_node_put(pnode);
err_nomem2:
policy->driver_data = NULL;
kfree(data);
@@ -293,7 +303,7 @@ static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
- of_node_put(data->parent);
+ kfree(data->pclk);
kfree(data->table);
kfree(data);
policy->driver_data = NULL;
@@ -307,7 +317,7 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
struct clk *parent;
struct cpu_data *data = policy->driver_data;
- parent = of_clk_get(data->parent, data->table[index].driver_data);
+ parent = data->pclk[data->table[index].driver_data];
return clk_set_parent(policy->clk, parent);
}