From a938da0682c2487f6aafc9a7c3caa8d675acdb38 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Sun, 12 Aug 2012 00:17:02 +0200 Subject: PM / Sleep: Print name of wakeup source that aborts suspend A driver or app may repeatedly request a wakeup source while the system is attempting to enter suspend, which may indicate a bug or at least point out a highly active system component that is responsible for decreased battery life on a mobile device. Even when the incidence of suspend abort is not severe, identifying wakeup sources that frequently abort suspend can be a useful clue for power management analysis. In some cases the existing stats can point out the offender where there is an unexpectedly high activation count that stands out from the others, but in other cases the wakeup source frequently taken just after the rest of the system thinks its time to suspend might not stand out in the overall stats. It is also often useful to have information about what's been happening recently, rather than totals of all activity for the system boot. It's suggested to dump a line about which wakeup source aborted suspend to aid analysis of these situations. Signed-off-by: Todd Poynor Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index cbb463b..8a0a9ca 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -649,6 +649,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) } EXPORT_SYMBOL_GPL(pm_wakeup_event); +static void print_active_wakeup_sources(void) +{ + struct wakeup_source *ws; + int active = 0; + struct wakeup_source *last_activity_ws = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->active) { + pr_info("active wakeup source: %s\n", ws->name); + active = 1; + } else if (!active && + (!last_activity_ws || + ktime_to_ns(ws->last_time) > + ktime_to_ns(last_activity_ws->last_time))) { + last_activity_ws = ws; + } + } + + if (!active && last_activity_ws) + pr_info("last active wakeup source: %s\n", + last_activity_ws->name); + rcu_read_unlock(); +} + /** * pm_wakeup_pending - Check if power transition in progress should be aborted. * @@ -671,6 +696,10 @@ bool pm_wakeup_pending(void) events_check_enabled = !ret; } spin_unlock_irqrestore(&events_lock, flags); + + if (ret) + print_active_wakeup_sources(); + return ret; } -- cgit v0.10.2 From 5834ec3aea8a84b70efeb52ee91a8f8b1042cd2a Mon Sep 17 00:00:00 2001 From: Sedat Dilek Date: Thu, 23 Aug 2012 02:47:13 +0200 Subject: PM / Freezer: Fix small typo "regrigerator" Noticed when digging into a suspend issue in linux-next (next-20120821). For more details see . Signed-off-by: Sedat Dilek Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki diff --git a/kernel/power/process.c b/kernel/power/process.c index 19db29f..87da817 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -79,7 +79,7 @@ static int try_to_freeze_tasks(bool user_only) /* * We need to retry, but first give the freezing tasks some - * time to enter the regrigerator. + * time to enter the refrigerator. */ msleep(10); } -- cgit v0.10.2 From 62d6ae880e3e76098d5e345decd2dce443975889 Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Thu, 19 Jul 2012 20:34:10 +0000 Subject: Honor state disabling in the cpuidle ladder governor There are two cpuidle governors ladder and menu. While the ladder governor is always available, if CONFIG_CPU_IDLE is selected, the menu governor additionally requires CONFIG_NO_HZ. A particular C state can be disabled by writing to the sysfs file /sys/devices/system/cpu/cpuN/cpuidle/stateN/disable, but this mechanism is only implemented in the menu governor. Thus, in a system where CONFIG_NO_HZ is not selected, the ladder governor becomes default and always will walk through all sleep states - irrespective of whether the C state was disabled via sysfs or not. The only way to select a specific C state was to write the related latency to /dev/cpu_dma_latency and keep the file open as long as this setting was required - not very practical and not suitable for setting a single core in an SMP system. With this patch, the ladder governor only will promote to the next C state, if it has not been disabled, and it will demote, if the current C state was disabled. Note that the patch does not make the setting of the sysfs variable "disable" coherent, i.e. if one is disabling a light state, then all deeper states are disabled as well, but the "disable" variable does not reflect it. Likewise, if one enables a deep state but a lighter state still is disabled, then this has no effect. A related section has been added to the documentation. Signed-off-by: Carsten Emde Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt index 9d28a34..b6f44f4 100644 --- a/Documentation/cpuidle/sysfs.txt +++ b/Documentation/cpuidle/sysfs.txt @@ -76,9 +76,17 @@ total 0 * desc : Small description about the idle state (string) -* disable : Option to disable this idle state (bool) +* disable : Option to disable this idle state (bool) -> see note below * latency : Latency to exit out of this idle state (in microseconds) * name : Name of the idle state (string) * power : Power consumed while in this idle state (in milliwatts) * time : Total time spent in this idle state (in microseconds) * usage : Number of times this state was entered (count) + +Note: +The behavior and the effect of the disable variable depends on the +implementation of a particular governor. In the ladder governor, for +example, it is not coherent, i.e. if one is disabling a light state, +then all deeper states are disabled as well, but the disable variable +does not reflect it. Likewise, if one enables a deep state but a lighter +state still is disabled, then this has no effect. diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index b6a09ea..2aef26c 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -88,6 +88,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, /* consider promotion */ if (last_idx < drv->state_count - 1 && + !dev->states_usage[last_idx + 1].disable && last_residency > last_state->threshold.promotion_time && drv->states[last_idx + 1].exit_latency <= latency_req) { last_state->stats.promotion_count++; @@ -100,7 +101,8 @@ static int ladder_select_state(struct cpuidle_driver *drv, /* consider demotion */ if (last_idx > CPUIDLE_DRIVER_STATE_START && - drv->states[last_idx].exit_latency > latency_req) { + (dev->states_usage[last_idx].disable || + drv->states[last_idx].exit_latency > latency_req)) { int i; for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { -- cgit v0.10.2 From 66804c13f7b79fb26cf4848ebac1e865b9aff65c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:28:52 +0200 Subject: PM / cpuidle: Make ladder governor use the "disabled" state flag For the mechanism introduced by commit cbc9ef0 (PM / Domains: Add preliminary support for cpuidle, v2) to work with the ladder governor, that governor should respect the "disabled" state flag added by that commit. Change the ladder governor accordingly. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 2aef26c..9b78405 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -88,6 +88,7 @@ static int ladder_select_state(struct cpuidle_driver *drv, /* consider promotion */ if (last_idx < drv->state_count - 1 && + !drv->states[last_idx + 1].disabled && !dev->states_usage[last_idx + 1].disable && last_residency > last_state->threshold.promotion_time && drv->states[last_idx + 1].exit_latency <= latency_req) { @@ -101,7 +102,8 @@ static int ladder_select_state(struct cpuidle_driver *drv, /* consider demotion */ if (last_idx > CPUIDLE_DRIVER_STATE_START && - (dev->states_usage[last_idx].disable || + (drv->states[last_idx].disabled || + dev->states_usage[last_idx].disable || drv->states[last_idx].exit_latency > latency_req)) { int i; -- cgit v0.10.2 From 802d8b49a7705298b62ac35a59b867f1288caaf3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:39:16 +0200 Subject: PM / Domains: Introduce simplified power on routine for system resume Introduce function pm_genpd_sync_poweron() for restoring domain power during resume from system suspend and hibernation. It can be much simpler than pm_genpd_poweron(), because it doesn't have to care about locking and it can skip many checks done by the latter. Modify pm_genpd_resume_noirq() and pm_genpd_restore_noirq() to use the new function. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ba3487c..55c39f5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -777,6 +777,32 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) } /** + * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. + * @genpd: PM domain to power on. + * + * This function is only called in "noirq" stage of system power transitions, so + * it need not acquire locks (all of the "noirq" callbacks are executed + * sequentially, so it is guaranteed that it will never run twice in parallel). + */ +static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) +{ + struct gpd_link *link; + + if (genpd->status != GPD_STATE_POWER_OFF) + return; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + pm_genpd_sync_poweron(link->master); + genpd_sd_counter_inc(link->master); + } + + if (genpd->power_on) + genpd->power_on(genpd); + + genpd->status = GPD_STATE_ACTIVE; +} + +/** * resume_needed - Check whether to resume a device before system suspend. * @dev: Device to check. * @genpd: PM domain the device belongs to. @@ -979,7 +1005,7 @@ static int pm_genpd_resume_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - pm_genpd_poweron(genpd); + pm_genpd_sync_poweron(genpd); genpd->suspended_count--; return genpd_start_dev(genpd, dev); @@ -1186,8 +1212,8 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspended_count++ == 0) { /* * The boot kernel might put the domain into arbitrary state, - * so make it appear as powered off to pm_genpd_poweron(), so - * that it tries to power it on in case it was really off. + * so make it appear as powered off to pm_genpd_sync_poweron(), + * so that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; if (genpd->suspend_power_off) { @@ -1205,7 +1231,7 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - pm_genpd_poweron(genpd); + pm_genpd_sync_poweron(genpd); return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); } -- cgit v0.10.2 From 77f827de07432a74821cf0f831d699544b2d474f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:39:57 +0200 Subject: PM / Domains: Add power off/on function for system core suspend stage Introduce function pm_genpd_syscore_switch() and two wrappers around it, pm_genpd_syscore_poweroff() and pm_genpd_syscore_poweron(), allowing the callers to let the generic PM domains framework know that the given device is not necessary any more and its PM domain can be turned off (the former) or that the given device will be required immediately, so its PM domain has to be turned on (the latter) during the system core (syscore) stage of system suspend (or hibernation) and resume. These functions will be used for handling devices registered as clock sources and clock event devices that belong to PM domains. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 55c39f5..515c8ec 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -697,6 +697,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} #ifdef CONFIG_PM_SLEEP +/** + * pm_genpd_present - Check if the given PM domain has been initialized. + * @genpd: PM domain to check. + */ +static bool pm_genpd_present(struct generic_pm_domain *genpd) +{ + struct generic_pm_domain *gpd; + + if (IS_ERR_OR_NULL(genpd)) + return false; + + list_for_each_entry(gpd, &gpd_list, gpd_list_node) + if (gpd == genpd) + return true; + + return false; +} + static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, struct device *dev) { @@ -750,9 +768,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. * - * This function is only called in "noirq" stages of system power transitions, - * so it need not acquire locks (all of the "noirq" callbacks are executed - * sequentially, so it is guaranteed that it will never run twice in parallel). + * This function is only called in "noirq" and "syscore" stages of system power + * transitions, so it need not acquire locks (all of the "noirq" callbacks are + * executed sequentially, so it is guaranteed that it will never run twice in + * parallel). */ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { @@ -780,9 +799,10 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. * @genpd: PM domain to power on. * - * This function is only called in "noirq" stage of system power transitions, so - * it need not acquire locks (all of the "noirq" callbacks are executed - * sequentially, so it is guaranteed that it will never run twice in parallel). + * This function is only called in "noirq" and "syscore" stages of system power + * transitions, so it need not acquire locks (all of the "noirq" callbacks are + * executed sequentially, so it is guaranteed that it will never run twice in + * parallel). */ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) { @@ -1272,6 +1292,31 @@ static void pm_genpd_complete(struct device *dev) } } +/** + * pm_genpd_syscore_switch - Switch power during system core suspend or resume. + * @dev: Device that normally is marked as "always on" to switch power for. + * + * This routine may only be called during the system core (syscore) suspend or + * resume phase for devices whose "always on" flags are set. + */ +void pm_genpd_syscore_switch(struct device *dev, bool suspend) +{ + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd(dev); + if (!pm_genpd_present(genpd)) + return; + + if (suspend) { + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + } else { + pm_genpd_sync_poweron(genpd); + genpd->suspended_count--; + } +} +EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); + #else #define pm_genpd_prepare NULL diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a7d6172..ab83cf3 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -258,4 +258,20 @@ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} static inline void pm_genpd_poweroff_unused(void) {} #endif +#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP +extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); +#else +static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} +#endif + +static inline void pm_genpd_syscore_poweroff(struct device *dev) +{ + pm_genpd_syscore_switch(dev, true); +} + +static inline void pm_genpd_syscore_poweron(struct device *dev) +{ + pm_genpd_syscore_switch(dev, false); +} + #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index a70518c..5dfdc9e 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -263,6 +263,10 @@ config PM_GENERIC_DOMAINS bool depends on PM +config PM_GENERIC_DOMAINS_SLEEP + def_bool y + depends on PM_SLEEP && PM_GENERIC_DOMAINS + config PM_GENERIC_DOMAINS_RUNTIME def_bool y depends on PM_RUNTIME && PM_GENERIC_DOMAINS -- cgit v0.10.2 From adc78e6b9946a4b22e22403d961f3b03c469e5d3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:40:41 +0200 Subject: timekeeping: Add suspend and resume of clock event devices Some clock event devices, for example such that belong to PM domains, need to be handled in a spcial way during the timekeeping suspend and resume (which takes place in the system core, or "syscore", stages of system power transitions) in analogy with clock sources. Introduce .suspend() and .resume() callbacks for clock event devices that will be executed by timekeeping_suspend/_resume(), respectively, next the the clock sources' .suspend() and .resume() callbacks. Signed-off-by: Rafael J. Wysocki diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index acba8943..8a7096f 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -97,6 +97,8 @@ struct clock_event_device { void (*broadcast)(const struct cpumask *mask); void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); unsigned long min_delta_ticks; unsigned long max_delta_ticks; @@ -156,6 +158,9 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) freq, minsec); } +extern void clockevents_suspend(void); +extern void clockevents_resume(void); + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void clockevents_notify(unsigned long reason, void *arg); #else @@ -164,6 +169,9 @@ extern void clockevents_notify(unsigned long reason, void *arg); #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ +static inline void clockevents_suspend(void) {} +static inline void clockevents_resume(void) {} + #define clockevents_notify(reason, arg) do { } while (0) #endif diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 7e1ce01..30b6de0 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -397,6 +397,30 @@ void clockevents_exchange_device(struct clock_event_device *old, local_irq_restore(flags); } +/** + * clockevents_suspend - suspend clock devices + */ +void clockevents_suspend(void) +{ + struct clock_event_device *dev; + + list_for_each_entry_reverse(dev, &clockevent_devices, list) + if (dev->suspend) + dev->suspend(dev); +} + +/** + * clockevents_resume - resume clock devices + */ +void clockevents_resume(void) +{ + struct clock_event_device *dev; + + list_for_each_entry(dev, &clockevent_devices, list) + if (dev->resume) + dev->resume(dev); +} + #ifdef CONFIG_GENERIC_CLOCKEVENTS /** * clockevents_notify - notification about relevant events diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 34e5eac..312a675 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -773,6 +773,7 @@ static void timekeeping_resume(void) read_persistent_clock(&ts); + clockevents_resume(); clocksource_resume(); write_seqlock_irqsave(&tk->lock, flags); @@ -832,6 +833,7 @@ static int timekeeping_suspend(void) clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); + clockevents_suspend(); return 0; } -- cgit v0.10.2 From eaa49a8cd1f98a6486413d902e7304df026a1fa9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:41:20 +0200 Subject: sh: TMU: Introduce clocksource/clock events suspend/resume routines Introduce suspend/resume routines for SH TMU clock source and clock event device such that if those devices belong to a PM domain, the generic PM domains framework will be notified that the given domain may be turned off (during system suspend) or that it has to be turned on (during system resume). This change allows the A4R domain on SH7372 to be turned off during system suspend (tested on the Mackerel board) if the TMU clock source and/or clock event device is in use. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index c1b51d4..7d70082 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -33,6 +33,7 @@ #include #include #include +#include struct sh_tmu_priv { void __iomem *mapbase; @@ -43,6 +44,7 @@ struct sh_tmu_priv { unsigned long periodic; struct clock_event_device ced; struct clocksource cs; + bool cs_enabled; }; static DEFINE_RAW_SPINLOCK(sh_tmu_lock); @@ -204,14 +206,40 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs) int ret; ret = sh_tmu_enable(p); - if (!ret) + if (!ret) { __clocksource_updatefreq_hz(cs, p->rate); + p->cs_enabled = true; + } return ret; } static void sh_tmu_clocksource_disable(struct clocksource *cs) { - sh_tmu_disable(cs_to_sh_tmu(cs)); + struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + + WARN_ON(!p->cs_enabled); + + sh_tmu_disable(p); + p->cs_enabled = false; +} + +static void sh_tmu_clocksource_suspend(struct clocksource *cs) +{ + struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + + if (p->cs_enabled) + sh_tmu_disable(p); + + pm_genpd_syscore_poweroff(&p->pdev->dev); +} + +static void sh_tmu_clocksource_resume(struct clocksource *cs) +{ + struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + + pm_genpd_syscore_poweron(&p->pdev->dev); + if (p->cs_enabled) + sh_tmu_enable(p); } static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, @@ -224,6 +252,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, cs->read = sh_tmu_clocksource_read; cs->enable = sh_tmu_clocksource_enable; cs->disable = sh_tmu_clocksource_disable; + cs->suspend = sh_tmu_clocksource_suspend; + cs->resume = sh_tmu_clocksource_resume; cs->mask = CLOCKSOURCE_MASK(32); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; @@ -301,6 +331,16 @@ static int sh_tmu_clock_event_next(unsigned long delta, return 0; } +static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); +} + +static void sh_tmu_clock_event_resume(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); +} + static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, char *name, unsigned long rating) { @@ -316,6 +356,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, ced->cpumask = cpumask_of(0); ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; + ced->suspend = sh_tmu_clock_event_suspend; + ced->resume = sh_tmu_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); @@ -407,8 +449,12 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) struct sh_tmu_priv *p = platform_get_drvdata(pdev); int ret; - if (!is_early_platform_device(pdev)) - pm_genpd_dev_always_on(&pdev->dev, true); + if (!is_early_platform_device(pdev)) { + struct sh_timer_config *cfg = pdev->dev.platform_data; + + if (cfg->clocksource_rating || cfg->clockevent_rating) + pm_genpd_dev_always_on(&pdev->dev, true); + } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); -- cgit v0.10.2 From 9bb5ec8872312751f992cdac9968805fe03e81aa Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:43:03 +0200 Subject: sh: CMT: Introduce clocksource/clock events suspend/resume routines Introduce suspend/resume routines for SH CMT clock event devices and modify the suspend/resume routines for SH CMT clock sources such that if those devices belong to a PM domain, the generic PM domains framework will be notified that the given domain may be turned off (during system suspend) or that it has to be turned on (during system resume). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 98b06ba..c06d27f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -464,9 +464,20 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs) sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); } +static void sh_cmt_clocksource_suspend(struct clocksource *cs) +{ + struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + + sh_cmt_stop(p, FLAG_CLOCKSOURCE); + pm_genpd_syscore_poweroff(&p->pdev->dev); +} + static void sh_cmt_clocksource_resume(struct clocksource *cs) { - sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); + struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + + pm_genpd_syscore_poweron(&p->pdev->dev); + sh_cmt_start(p, FLAG_CLOCKSOURCE); } static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, @@ -479,7 +490,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, cs->read = sh_cmt_clocksource_read; cs->enable = sh_cmt_clocksource_enable; cs->disable = sh_cmt_clocksource_disable; - cs->suspend = sh_cmt_clocksource_disable; + cs->suspend = sh_cmt_clocksource_suspend; cs->resume = sh_cmt_clocksource_resume; cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; @@ -562,6 +573,16 @@ static int sh_cmt_clock_event_next(unsigned long delta, return 0; } +static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev); +} + +static void sh_cmt_clock_event_resume(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev); +} + static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, char *name, unsigned long rating) { @@ -576,6 +597,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, ced->cpumask = cpumask_of(0); ced->set_next_event = sh_cmt_clock_event_next; ced->set_mode = sh_cmt_clock_event_mode; + ced->suspend = sh_cmt_clock_event_suspend; + ced->resume = sh_cmt_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); @@ -690,8 +713,12 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) struct sh_cmt_priv *p = platform_get_drvdata(pdev); int ret; - if (!is_early_platform_device(pdev)) - pm_genpd_dev_always_on(&pdev->dev, true); + if (!is_early_platform_device(pdev)) { + struct sh_timer_config *cfg = pdev->dev.platform_data; + + if (cfg->clocksource_rating || cfg->clockevent_rating) + pm_genpd_dev_always_on(&pdev->dev, true); + } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); -- cgit v0.10.2 From cc7ad4564b1097c916834dc919678867631e7676 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:43:41 +0200 Subject: sh: MTU2: Introduce clock events suspend/resume routines Introduce suspend/resume routines for SH MTU2 clock event devices such that if those devices belong to a PM domain, the generic PM domains framework will be notified that the given domain may be turned off (during system suspend) or that it has to be turned on (during system resume). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index d9b76ca..a55bb90 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -208,6 +208,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, } } +static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); +} + +static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); +} + static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, char *name, unsigned long rating) { @@ -221,6 +231,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, ced->rating = rating; ced->cpumask = cpumask_of(0); ced->set_mode = sh_mtu2_clock_event_mode; + ced->suspend = sh_mtu2_clock_event_suspend; + ced->resume = sh_mtu2_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); @@ -307,8 +319,12 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) struct sh_mtu2_priv *p = platform_get_drvdata(pdev); int ret; - if (!is_early_platform_device(pdev)) - pm_genpd_dev_always_on(&pdev->dev, true); + if (!is_early_platform_device(pdev)) { + struct sh_timer_config *cfg = pdev->dev.platform_data; + + if (cfg->clockevent_rating) + pm_genpd_dev_always_on(&pdev->dev, true); + } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); -- cgit v0.10.2 From e91c11b1a7f876c6f056d872eb210734150a1795 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:44:28 +0200 Subject: PM: Reorganize device PM initialization Make the device power management initialization more straightforward by moving the initialization of common (i.e. used by both runtime PM and system suspend) fields to a separate routine. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0113adc..7bd1fe4 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -57,20 +57,17 @@ static pm_message_t pm_transition; static int async_error; /** - * device_pm_init - Initialize the PM-related part of a device object. + * device_pm_sleep_init - Initialize system suspend-related device fields. * @dev: Device object being initialized. */ -void device_pm_init(struct device *dev) +void device_pm_sleep_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; - spin_lock_init(&dev->power.lock); - pm_runtime_init(dev); INIT_LIST_HEAD(&dev->power.entry); - dev->power.power_state = PMSG_INVALID; } /** diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index eeb4bff..8a0dcc7 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -1,5 +1,11 @@ #include +static inline void device_pm_init_common(struct device *dev) +{ + spin_lock_init(&dev->power.lock); + dev->power.power_state = PMSG_INVALID; +} + #ifdef CONFIG_PM_RUNTIME extern void pm_runtime_init(struct device *dev); @@ -25,7 +31,7 @@ static inline struct device *to_device(struct list_head *entry) return container_of(entry, struct device, power.entry); } -extern void device_pm_init(struct device *dev); +extern void device_pm_sleep_init(struct device *dev); extern void device_pm_add(struct device *); extern void device_pm_remove(struct device *); extern void device_pm_move_before(struct device *, struct device *); @@ -34,12 +40,7 @@ extern void device_pm_move_last(struct device *); #else /* !CONFIG_PM_SLEEP */ -static inline void device_pm_init(struct device *dev) -{ - spin_lock_init(&dev->power.lock); - dev->power.power_state = PMSG_INVALID; - pm_runtime_init(dev); -} +static inline void device_pm_sleep_init(struct device *dev) {} static inline void device_pm_add(struct device *dev) { @@ -60,6 +61,13 @@ static inline void device_pm_move_last(struct device *dev) {} #endif /* !CONFIG_PM_SLEEP */ +static inline void device_pm_init(struct device *dev) +{ + device_pm_init_common(dev); + device_pm_sleep_init(dev); + pm_runtime_init(dev); +} + #ifdef CONFIG_PM /* -- cgit v0.10.2 From bed2b42d9f0b411f384c5619870ab0fea5dd116b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:45:11 +0200 Subject: PM / Runtime: Allow helpers to be called by early platform drivers Runtime PM helper functions, like pm_runtime_get_sync(), cannot be called by early platform device drivers, because the devices' power management locks are not initialized at that time. This is quite inconvenient, so modify early_platform_add_devices() to initialize the devices power management locks as appropriate and make sure that they won't be initialized more than once if an early platform device is going to be used as a regular one later. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/platform.c b/drivers/base/platform.c index a1a7225..d51514b 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -22,6 +22,7 @@ #include #include "base.h" +#include "power/power.h" #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ driver)) @@ -948,6 +949,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num) dev = &devs[i]->dev; if (!dev->devres_head.next) { + pm_runtime_early_init(dev); INIT_LIST_HEAD(&dev->devres_head); list_add_tail(&dev->devres_head, &early_platform_device_list); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 8a0dcc7..0dbfdf4 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -2,17 +2,31 @@ static inline void device_pm_init_common(struct device *dev) { - spin_lock_init(&dev->power.lock); - dev->power.power_state = PMSG_INVALID; + if (!dev->power.early_init) { + spin_lock_init(&dev->power.lock); + dev->power.power_state = PMSG_INVALID; + dev->power.early_init = true; + } } #ifdef CONFIG_PM_RUNTIME +static inline void pm_runtime_early_init(struct device *dev) +{ + dev->power.disable_depth = 1; + device_pm_init_common(dev); +} + extern void pm_runtime_init(struct device *dev); extern void pm_runtime_remove(struct device *dev); #else /* !CONFIG_PM_RUNTIME */ +static inline void pm_runtime_early_init(struct device *dev) +{ + device_pm_init_common(dev); +} + static inline void pm_runtime_init(struct device *dev) {} static inline void pm_runtime_remove(struct device *dev) {} diff --git a/include/linux/pm.h b/include/linux/pm.h index f067e60..716517a 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -510,6 +510,7 @@ struct dev_pm_info { bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ bool ignore_children:1; + bool early_init:1; /* Owned by the PM core */ spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; -- cgit v0.10.2 From 6fb28badf207a6d8a78906353772e1c3f560a977 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:45:54 +0200 Subject: PM / Domains: Rename the always_on device flag to syscore The always_on device flag is used to mark the devices (belonging to a PM domain) that should never be turned off, except for the system core (syscore) suspend/hibernation and resume stages. Change name of that flag to "syscore" to better reflect its purpose. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 515c8ec..15234ec 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -436,7 +436,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) + || pdd->dev->power.irq_safe || to_gpd_data(pdd)->syscore)) not_suspended++; if (not_suspended > genpd->in_progress) @@ -578,7 +578,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - if (dev_gpd_data(dev)->always_on) + if (dev_gpd_data(dev)->syscore) return -EBUSY; stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; @@ -983,7 +983,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + if (genpd->suspend_power_off || dev_gpd_data(dev)->syscore || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -1016,7 +1016,7 @@ static int pm_genpd_resume_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + if (genpd->suspend_power_off || dev_gpd_data(dev)->syscore || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -1136,7 +1136,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? + return genpd->suspend_power_off || dev_gpd_data(dev)->syscore ? 0 : genpd_stop_dev(genpd, dev); } @@ -1157,7 +1157,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? + return genpd->suspend_power_off || dev_gpd_data(dev)->syscore ? 0 : genpd_start_dev(genpd, dev); } @@ -1253,7 +1253,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd); - return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); + return dev_gpd_data(dev)->syscore ? 0 : genpd_start_dev(genpd, dev); } /** @@ -1526,11 +1526,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, } /** - * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. + * pm_genpd_dev_syscore - Set/unset the "syscore" flag for a given device. * @dev: Device to set/unset the flag for. - * @val: The new value of the device's "always on" flag. + * @val: The new value of the device's "syscore" flag. */ -void pm_genpd_dev_always_on(struct device *dev, bool val) +void pm_genpd_dev_syscore(struct device *dev, bool val) { struct pm_subsys_data *psd; unsigned long flags; @@ -1539,11 +1539,11 @@ void pm_genpd_dev_always_on(struct device *dev, bool val) psd = dev_to_psd(dev); if (psd && psd->domain_data) - to_gpd_data(psd->domain_data)->always_on = val; + to_gpd_data(psd->domain_data)->syscore = val; spin_unlock_irqrestore(&dev->power.lock, flags); } -EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); +EXPORT_SYMBOL_GPL(pm_genpd_dev_syscore); /** * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index c06d27f..c6fbb9f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -717,7 +717,7 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) struct sh_timer_config *cfg = pdev->dev.platform_data; if (cfg->clocksource_rating || cfg->clockevent_rating) - pm_genpd_dev_always_on(&pdev->dev, true); + pm_genpd_dev_syscore(&pdev->dev, true); } if (p) { diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index a55bb90..278c18a 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -323,7 +323,7 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) struct sh_timer_config *cfg = pdev->dev.platform_data; if (cfg->clockevent_rating) - pm_genpd_dev_always_on(&pdev->dev, true); + pm_genpd_dev_syscore(&pdev->dev, true); } if (p) { diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 7d70082..5319689 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -453,7 +453,7 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) struct sh_timer_config *cfg = pdev->dev.platform_data; if (cfg->clocksource_rating || cfg->clockevent_rating) - pm_genpd_dev_always_on(&pdev->dev, true); + pm_genpd_dev_syscore(&pdev->dev, true); } if (p) { diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index ab83cf3..dab0938 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -114,7 +114,7 @@ struct generic_pm_domain_data { struct mutex lock; unsigned int refcount; bool need_restore; - bool always_on; + bool syscore; }; #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -153,7 +153,7 @@ static inline int pm_genpd_of_add_device(struct device_node *genpd_node, extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); -extern void pm_genpd_dev_always_on(struct device *dev, bool val); +extern void pm_genpd_dev_syscore(struct device *dev, bool val); extern void pm_genpd_dev_need_restore(struct device *dev, bool val); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); @@ -199,7 +199,7 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {} +static inline void pm_genpd_dev_syscore(struct device *dev, bool val) {} static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {} static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_sd) -- cgit v0.10.2 From dbf374142dd7a3c394ec124ebe7339a6c412d9b6 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:46:39 +0200 Subject: PM / Domains: Move syscore flag from subsys data to struct device The syscore device PM flag is used to mark the devices (belonging to a PM domain) that should never be turned off, except for the system core (syscore) suspend/hibernation and resume stages. That flag is stored in the device's struct pm_subsys_data object whose address is available from struct device. However, in some situations it may be convenient to set that flag before the device is added to a PM domain, so it is better to move it directly to the "power" member of struct device. Then, it can be checked by the routines in drivers/base/power/runtime.c and drivers/base/power/main.c, which is more straightforward. This also reduces the number of dev_gpd_data() invocations in the generic PM domains framework, so the overhead related to the syscore flag is slightly smaller. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 39c3252..cf7a851 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -83,3 +83,18 @@ int dev_pm_put_subsys_data(struct device *dev) return ret; } EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); + +/** + * dev_pm_syscore_device - Set/unset the given device's power.syscore flag. + * @dev: Device whose flag is to be modified. + * @val: New value of the flag. + */ +void dev_pm_syscore_device(struct device *dev, bool val) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + dev->power.syscore = val; + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(dev_pm_syscore_device); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 15234ec..5217275 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -436,7 +436,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe || to_gpd_data(pdd)->syscore)) + || pdd->dev->power.irq_safe || pdd->dev->power.syscore)) not_suspended++; if (not_suspended > genpd->in_progress) @@ -578,9 +578,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - if (dev_gpd_data(dev)->syscore) - return -EBUSY; - stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; if (stop_ok && !stop_ok(dev)) return -EBUSY; @@ -983,7 +980,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->syscore + if (genpd->suspend_power_off || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -1016,7 +1013,7 @@ static int pm_genpd_resume_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->syscore + if (genpd->suspend_power_off || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -1136,8 +1133,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->syscore ? - 0 : genpd_stop_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); } /** @@ -1157,8 +1153,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->syscore ? - 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); } /** @@ -1253,7 +1248,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd); - return dev_gpd_data(dev)->syscore ? 0 : genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev); } /** @@ -1526,26 +1521,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, } /** - * pm_genpd_dev_syscore - Set/unset the "syscore" flag for a given device. - * @dev: Device to set/unset the flag for. - * @val: The new value of the device's "syscore" flag. - */ -void pm_genpd_dev_syscore(struct device *dev, bool val) -{ - struct pm_subsys_data *psd; - unsigned long flags; - - spin_lock_irqsave(&dev->power.lock, flags); - - psd = dev_to_psd(dev); - if (psd && psd->domain_data) - to_gpd_data(psd->domain_data)->syscore = val; - - spin_unlock_irqrestore(&dev->power.lock, flags); -} -EXPORT_SYMBOL_GPL(pm_genpd_dev_syscore); - -/** * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. * @dev: Device to set/unset the flag for. * @val: The new value of the device's "need restore" flag. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 7bd1fe4..57f5814 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -405,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); + if (dev->power.syscore) + goto Out; + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -426,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) error = dpm_run_callback(callback, dev, state, info); + Out: TRACE_RESUME(error); return error; } @@ -483,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); + if (dev->power.syscore) + goto Out; + if (dev->pm_domain) { info = "early power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -504,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state) error = dpm_run_callback(callback, dev, state, info); + Out: TRACE_RESUME(error); return error; } @@ -567,6 +575,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_DEVICE(dev); TRACE_RESUME(0); + if (dev->power.syscore) + goto Complete; + dpm_wait(dev->parent, async); device_lock(dev); @@ -629,6 +640,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) Unlock: device_unlock(dev); + + Complete: complete_all(&dev->power.completion); TRACE_RESUME(error); @@ -719,6 +732,9 @@ static void device_complete(struct device *dev, pm_message_t state) void (*callback)(struct device *) = NULL; char *info = NULL; + if (dev->power.syscore) + return; + device_lock(dev); if (dev->pm_domain) { @@ -831,6 +847,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) pm_callback_t callback = NULL; char *info = NULL; + if (dev->power.syscore) + return 0; + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -914,6 +933,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state) pm_callback_t callback = NULL; char *info = NULL; + if (dev->power.syscore) + return 0; + if (dev->pm_domain) { info = "late power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -1050,6 +1072,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Complete; } + if (dev->power.syscore) + goto Complete; + device_lock(dev); if (dev->pm_domain) { @@ -1206,6 +1231,9 @@ static int device_prepare(struct device *dev, pm_message_t state) char *info = NULL; int error = 0; + if (dev->power.syscore) + return 0; + device_lock(dev); dev->power.wakeup_path = device_may_wakeup(dev); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7d9c1cb..bd1de39 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -134,7 +134,7 @@ static int rpm_check_suspend_allowed(struct device *dev) if (dev->power.runtime_error) retval = -EINVAL; - else if (dev->power.disable_depth > 0) + else if (dev->power.disable_depth > 0 || dev->power.syscore) retval = -EACCES; else if (atomic_read(&dev->power.usage_count) > 0) retval = -EAGAIN; diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index c6fbb9f..a515605 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -717,7 +717,7 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) struct sh_timer_config *cfg = pdev->dev.platform_data; if (cfg->clocksource_rating || cfg->clockevent_rating) - pm_genpd_dev_syscore(&pdev->dev, true); + dev_pm_syscore_device(&pdev->dev, true); } if (p) { diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 278c18a..1a95cad 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -323,7 +323,7 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) struct sh_timer_config *cfg = pdev->dev.platform_data; if (cfg->clockevent_rating) - pm_genpd_dev_syscore(&pdev->dev, true); + dev_pm_syscore_device(&pdev->dev, true); } if (p) { diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 5319689..81b0239 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -453,7 +453,7 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) struct sh_timer_config *cfg = pdev->dev.platform_data; if (cfg->clocksource_rating || cfg->clockevent_rating) - pm_genpd_dev_syscore(&pdev->dev, true); + dev_pm_syscore_device(&pdev->dev, true); } if (p) { diff --git a/include/linux/pm.h b/include/linux/pm.h index 716517a..b79a0dd 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -43,8 +43,12 @@ struct device; #ifdef CONFIG_PM extern const char power_group_name[]; /* = "power" */ + +extern void dev_pm_syscore_device(struct device *dev, bool val); #else #define power_group_name NULL + +static inline void dev_pm_syscore_device(struct device *dev, bool val) {} #endif typedef struct pm_message { @@ -511,6 +515,7 @@ struct dev_pm_info { bool is_suspended:1; /* Ditto */ bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ + bool syscore:1; spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index dab0938..08adf8e 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -114,7 +114,6 @@ struct generic_pm_domain_data { struct mutex lock; unsigned int refcount; bool need_restore; - bool syscore; }; #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -153,7 +152,6 @@ static inline int pm_genpd_of_add_device(struct device_node *genpd_node, extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); -extern void pm_genpd_dev_syscore(struct device *dev, bool val); extern void pm_genpd_dev_need_restore(struct device *dev, bool val); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); @@ -199,7 +197,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_dev_syscore(struct device *dev, bool val) {} static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {} static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_sd) -- cgit v0.10.2 From e2e3e4e51ebdcd757079bd7ec5dcc9dfb2ebce24 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:47:29 +0200 Subject: PM / Domains: Do not measure start time for "irq safe" devices The genpd_start_dev() routine used by pm_genpd_runtime_resume() to put "irq safe" devices into the full power state measures the time necessary to "start" the device and updates its PM QoS timing data if necessary. This may lead to a deadlock if the given device is a clock source and genpd_start_dev() is invoked from within the clock source's .enable() routine, which will happen if that routine uses pm_runtime_get_sync(), for example, to ensure that the device is operational. For this reason, introduce a special routine analogous to genpd_start_dev(), called genpd_start_dev_no_timing(), that doesn't carry out the time measurement, and make pm_genpd_runtime_resume() use it instead of genpd_start_dev() to power up "irq safe" devices. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 5217275..d7e71b5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -75,6 +75,12 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) start_latency_ns, "start"); } +static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, start, dev); +} + static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -626,7 +632,7 @@ static int pm_genpd_runtime_resume(struct device *dev) /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) - return genpd_start_dev(genpd, dev); + return genpd_start_dev_no_timing(genpd, dev); mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); -- cgit v0.10.2 From 61a53bfaa11644b8e9850ac79024b06465a43518 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:48:17 +0200 Subject: sh: TMU: Basic runtime PM support Modify the SH TMU clock source/clock event device driver to support runtime PM at a basic level (i.e. device clocks can be disabled and enabled, but domain power must be on, because the devices have to be marked as "irq safe"). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 81b0239..0cc4add 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -45,6 +45,7 @@ struct sh_tmu_priv { struct clock_event_device ced; struct clocksource cs; bool cs_enabled; + unsigned int enable_count; }; static DEFINE_RAW_SPINLOCK(sh_tmu_lock); @@ -109,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); } -static int sh_tmu_enable(struct sh_tmu_priv *p) +static int __sh_tmu_enable(struct sh_tmu_priv *p) { int ret; @@ -137,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p) return 0; } -static void sh_tmu_disable(struct sh_tmu_priv *p) +static int sh_tmu_enable(struct sh_tmu_priv *p) +{ + if (p->enable_count++ > 0) + return 0; + + pm_runtime_get_sync(&p->pdev->dev); + dev_pm_syscore_device(&p->pdev->dev, true); + + return __sh_tmu_enable(p); +} + +static void __sh_tmu_disable(struct sh_tmu_priv *p) { /* disable channel */ sh_tmu_start_stop_ch(p, 0); @@ -149,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p) clk_disable(p->clk); } +static void sh_tmu_disable(struct sh_tmu_priv *p) +{ + if (WARN_ON(p->enable_count == 0)) + return; + + if (--p->enable_count > 0) + return; + + __sh_tmu_disable(p); + + dev_pm_syscore_device(&p->pdev->dev, false); + pm_runtime_put(&p->pdev->dev); +} + static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, int periodic) { @@ -205,11 +231,15 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs) struct sh_tmu_priv *p = cs_to_sh_tmu(cs); int ret; + if (WARN_ON(p->cs_enabled)) + return 0; + ret = sh_tmu_enable(p); if (!ret) { __clocksource_updatefreq_hz(cs, p->rate); p->cs_enabled = true; } + return ret; } @@ -217,7 +247,8 @@ static void sh_tmu_clocksource_disable(struct clocksource *cs) { struct sh_tmu_priv *p = cs_to_sh_tmu(cs); - WARN_ON(!p->cs_enabled); + if (WARN_ON(!p->cs_enabled)) + return; sh_tmu_disable(p); p->cs_enabled = false; @@ -227,19 +258,26 @@ static void sh_tmu_clocksource_suspend(struct clocksource *cs) { struct sh_tmu_priv *p = cs_to_sh_tmu(cs); - if (p->cs_enabled) - sh_tmu_disable(p); + if (!p->cs_enabled) + return; - pm_genpd_syscore_poweroff(&p->pdev->dev); + if (--p->enable_count == 0) { + __sh_tmu_disable(p); + pm_genpd_syscore_poweroff(&p->pdev->dev); + } } static void sh_tmu_clocksource_resume(struct clocksource *cs) { struct sh_tmu_priv *p = cs_to_sh_tmu(cs); - pm_genpd_syscore_poweron(&p->pdev->dev); - if (p->cs_enabled) - sh_tmu_enable(p); + if (!p->cs_enabled) + return; + + if (p->enable_count++ == 0) { + pm_genpd_syscore_poweron(&p->pdev->dev); + __sh_tmu_enable(p); + } } static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, @@ -434,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) ret = PTR_ERR(p->clk); goto err1; } + p->cs_enabled = false; + p->enable_count = 0; return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), cfg->clockevent_rating, @@ -447,18 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) static int __devinit sh_tmu_probe(struct platform_device *pdev) { struct sh_tmu_priv *p = platform_get_drvdata(pdev); + struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; if (!is_early_platform_device(pdev)) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - - if (cfg->clocksource_rating || cfg->clockevent_rating) - dev_pm_syscore_device(&pdev->dev, true); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -471,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); + pm_runtime_idle(&pdev->dev); + return ret; } - return ret; + if (is_early_platform_device(pdev)) + return 0; + + out: + if (cfg->clockevent_rating || cfg->clocksource_rating) + pm_runtime_irq_safe(&pdev->dev); + else + pm_runtime_idle(&pdev->dev); + + return 0; } static int __devexit sh_tmu_remove(struct platform_device *pdev) -- cgit v0.10.2 From bad813831e291cf34a007e6f03c37cf95037c868 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 6 Aug 2012 01:48:57 +0200 Subject: sh: CMT: Basic runtime PM support Modify the SH CMT clock source/clock event device driver to support runtime PM at a basic level (i.e. device clocks can be disabled and enabled, but domain power must be on, because the devices have to be marked as "irq safe"). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index a515605..a5f7829 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -33,6 +33,7 @@ #include #include #include +#include struct sh_cmt_priv { void __iomem *mapbase; @@ -52,6 +53,7 @@ struct sh_cmt_priv { struct clock_event_device ced; struct clocksource cs; unsigned long total_cycles; + bool cs_enabled; }; static DEFINE_RAW_SPINLOCK(sh_cmt_lock); @@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { int k, ret; + pm_runtime_get_sync(&p->pdev->dev); + dev_pm_syscore_device(&p->pdev->dev, true); + /* enable clock */ ret = clk_enable(p->clk); if (ret) { @@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p) /* stop clock */ clk_disable(p->clk); + + dev_pm_syscore_device(&p->pdev->dev, false); + pm_runtime_put(&p->pdev->dev); } /* private flags */ @@ -451,17 +459,26 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs) int ret; struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + WARN_ON(p->cs_enabled); + p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); - if (!ret) + if (!ret) { __clocksource_updatefreq_hz(cs, p->rate); + p->cs_enabled = true; + } return ret; } static void sh_cmt_clocksource_disable(struct clocksource *cs) { - sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); + struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + + WARN_ON(!p->cs_enabled); + + sh_cmt_stop(p, FLAG_CLOCKSOURCE); + p->cs_enabled = false; } static void sh_cmt_clocksource_suspend(struct clocksource *cs) @@ -693,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) dev_err(&p->pdev->dev, "registration failed\n"); goto err1; } + p->cs_enabled = false; ret = setup_irq(irq, &p->irqaction); if (ret) { @@ -711,18 +729,17 @@ err0: static int __devinit sh_cmt_probe(struct platform_device *pdev) { struct sh_cmt_priv *p = platform_get_drvdata(pdev); + struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; if (!is_early_platform_device(pdev)) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - - if (cfg->clocksource_rating || cfg->clockevent_rating) - dev_pm_syscore_device(&pdev->dev, true); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -735,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); + pm_runtime_idle(&pdev->dev); + return ret; } - return ret; + if (is_early_platform_device(pdev)) + return 0; + + out: + if (cfg->clockevent_rating || cfg->clocksource_rating) + pm_runtime_irq_safe(&pdev->dev); + else + pm_runtime_idle(&pdev->dev); + + return 0; } static int __devexit sh_cmt_remove(struct platform_device *pdev) -- cgit v0.10.2 From 3cb6f10a4d925ec21f414bc30a8aded2830963e5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 13 Aug 2012 14:00:16 +0200 Subject: sh: MTU2: Basic runtime PM support Modify the SH MTU2 clock event device driver to support runtime PM at a basic level (i.e. device clocks can be disabled and enabled, but domain power must be on, because the device has to be marked as "irq safe"). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 1a95cad..c5eea85 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -32,6 +32,7 @@ #include #include #include +#include struct sh_mtu2_priv { void __iomem *mapbase; @@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p) { int ret; + pm_runtime_get_sync(&p->pdev->dev); + dev_pm_syscore_device(&p->pdev->dev, true); + /* enable clock */ ret = clk_enable(p->clk); if (ret) { @@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p) /* stop clock */ clk_disable(p->clk); + + dev_pm_syscore_device(&p->pdev->dev, false); + pm_runtime_put(&p->pdev->dev); } static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) @@ -317,18 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) static int __devinit sh_mtu2_probe(struct platform_device *pdev) { struct sh_mtu2_priv *p = platform_get_drvdata(pdev); + struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; if (!is_early_platform_device(pdev)) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - - if (cfg->clockevent_rating) - dev_pm_syscore_device(&pdev->dev, true); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -341,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); + pm_runtime_idle(&pdev->dev); + return ret; } - return ret; + if (is_early_platform_device(pdev)) + return 0; + + out: + if (cfg->clockevent_rating) + pm_runtime_irq_safe(&pdev->dev); + else + pm_runtime_idle(&pdev->dev); + + return 0; } static int __devexit sh_mtu2_remove(struct platform_device *pdev) -- cgit v0.10.2 From feb70af0e3ac6817327be70b47731039ea135dbc Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 13 Aug 2012 14:00:25 +0200 Subject: PM: Do not use the syscore flag for runtime PM The syscore device PM flag used to mark the devices (belonging to PM domains) that should never be turned off, except for the system core (syscore) suspend/hibernation and resume stages, need not be accessed by the runtime PM core functions, because all of the devices it is set for need to be marked as "irq safe" anyway and are protected from being turned off by runtime PM by ensuring that their usage counters are always set. For this reason, make the syscore flag system-wide PM-specific and simplify the code used for manipulating it, because it need not acquire the device's power.lock any more. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index cf7a851..39c3252 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -83,18 +83,3 @@ int dev_pm_put_subsys_data(struct device *dev) return ret; } EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); - -/** - * dev_pm_syscore_device - Set/unset the given device's power.syscore flag. - * @dev: Device whose flag is to be modified. - * @val: New value of the flag. - */ -void dev_pm_syscore_device(struct device *dev, bool val) -{ - unsigned long flags; - - spin_lock_irqsave(&dev->power.lock, flags); - dev->power.syscore = val; - spin_unlock_irqrestore(&dev->power.lock, flags); -} -EXPORT_SYMBOL_GPL(dev_pm_syscore_device); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index d7e71b5..5f4606f 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -442,7 +442,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe || pdd->dev->power.syscore)) + || pdd->dev->power.irq_safe)) not_suspended++; if (not_suspended > genpd->in_progress) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index bd1de39..7d9c1cb 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -134,7 +134,7 @@ static int rpm_check_suspend_allowed(struct device *dev) if (dev->power.runtime_error) retval = -EINVAL; - else if (dev->power.disable_depth > 0 || dev->power.syscore) + else if (dev->power.disable_depth > 0) retval = -EACCES; else if (atomic_read(&dev->power.usage_count) > 0) retval = -EAGAIN; diff --git a/include/linux/device.h b/include/linux/device.h index 52a5f15..86529e6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -772,6 +772,13 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) dev->power.ignore_children = enable; } +static inline void dev_pm_syscore_device(struct device *dev, bool val) +{ +#ifdef CONFIG_PM_SLEEP + dev->power.syscore = val; +#endif +} + static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); diff --git a/include/linux/pm.h b/include/linux/pm.h index b79a0dd..44d1f23 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -43,12 +43,8 @@ struct device; #ifdef CONFIG_PM extern const char power_group_name[]; /* = "power" */ - -extern void dev_pm_syscore_device(struct device *dev, bool val); #else #define power_group_name NULL - -static inline void dev_pm_syscore_device(struct device *dev, bool val) {} #endif typedef struct pm_message { @@ -515,13 +511,13 @@ struct dev_pm_info { bool is_suspended:1; /* Ditto */ bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ - bool syscore:1; spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path:1; + bool syscore:1; #else unsigned int should_wakeup:1; #endif -- cgit v0.10.2 From b5abb085f5540a612b0b7a6326ae2a07de2330dd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:06:11 +0200 Subject: PM / Domains: Make it possible to use domain names when adding devices Add a new helper function __pm_genpd_name_add_device() allowing a device to be added to a (registered) generic PM domain identified by name. Add a wrapper around it, pm_genpd_name_add_device(), passing NULL as the last argument and reorganize pm_domains.h for the new functions to be defined consistently with the existing ones. These functions are useful for adding devices to PM domains whose representations are stored in tables, when the caller doesn't know the index of the domain to add the device to, but it knows the domain's name. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 5f4606f..ac06d02 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1465,6 +1465,33 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, return __pm_genpd_add_device(genpd, dev, td); } + +/** + * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. + * @domain_name: Name of the PM domain to add the device to. + * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. + */ +int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, + struct gpd_timing_data *td) +{ + struct generic_pm_domain *genpd = NULL, *gpd; + + if (IS_ERR_OR_NULL(domain_name) || IS_ERR_OR_NULL(dev)) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (!strcmp(gpd->name, domain_name)) { + genpd = gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + + return __pm_genpd_add_device(genpd, dev, td); +} + /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. * @genpd: PM domain to remove the device from. diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 08adf8e..1991a92 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -138,17 +138,9 @@ extern int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, struct gpd_timing_data *td); -static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev) -{ - return __pm_genpd_add_device(genpd, dev, NULL); -} - -static inline int pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev) -{ - return __pm_genpd_of_add_device(genpd_node, dev, NULL); -} +extern int __pm_genpd_name_add_device(const char *domain_name, + struct device *dev, + struct gpd_timing_data *td); extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); @@ -187,8 +179,15 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev) +static inline int __pm_genpd_of_add_device(struct device_node *genpd_node, + struct device *dev, + struct gpd_timing_data *td) +{ + return -ENOSYS; +} +static inline int __pm_genpd_name_add_device(const char *domain_name, + struct device *dev, + struct gpd_timing_data *td) { return -ENOSYS; } @@ -242,6 +241,24 @@ static inline bool default_stop_ok(struct device *dev) #define pm_domain_always_on_gov NULL #endif +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return __pm_genpd_add_device(genpd, dev, NULL); +} + +static inline int pm_genpd_of_add_device(struct device_node *genpd_node, + struct device *dev) +{ + return __pm_genpd_of_add_device(genpd_node, dev, NULL); +} + +static inline int pm_genpd_name_add_device(const char *domain_name, + struct device *dev) +{ + return __pm_genpd_name_add_device(domain_name, dev, NULL); +} + static inline int pm_genpd_remove_callbacks(struct device *dev) { return __pm_genpd_remove_callbacks(dev, true); -- cgit v0.10.2 From fb7268be9f72bed6ae48554f00f2dcb2ef333bfc Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:08:37 +0200 Subject: PM / Domains: Make it possible to use names when adding subdomains Add a new helper function, pm_genpd_add_subdomain_names(), allowing the caller to add a subdomain to a generic PM domain using names for domain identification (both domains have to be initialized before). This function is useful for adding subdomains to PM domains whose representations are stored in tables, when the caller doesn't know the indices of the domain to add the subdomain to and of the subdomain itself, but it knows the domains' names. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ac06d02..cddf818 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1584,7 +1584,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct gpd_link *link; int ret = 0; - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) + || genpd == subdomain) return -EINVAL; start: @@ -1631,6 +1632,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, } /** + * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. + * @master_name: Name of the master PM domain to add the subdomain to. + * @subdomain_name: Name of the subdomain to be added. + */ +int pm_genpd_add_subdomain_names(const char *master_name, + const char *subdomain_name) +{ + struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; + + if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (!master && !strcmp(gpd->name, master_name)) + master = gpd; + + if (!subdomain && !strcmp(gpd->name, subdomain_name)) + subdomain = gpd; + + if (master && subdomain) + break; + } + mutex_unlock(&gpd_list_lock); + + return pm_genpd_add_subdomain(master, subdomain); +} + +/** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. * @genpd: Master PM domain to remove the subdomain from. * @subdomain: Subdomain to be removed. diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 1991a92..8dbf48b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -147,6 +147,8 @@ extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, extern void pm_genpd_dev_need_restore(struct device *dev, bool val); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); +extern int pm_genpd_add_subdomain_names(const char *master_name, + const char *subdomain_name); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); extern int pm_genpd_add_callbacks(struct device *dev, @@ -202,6 +204,11 @@ static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } +static inline int pm_genpd_add_subdomain_names(const char *master_name, + const char *subdomain_name) +{ + return -ENOSYS; +} static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target) { -- cgit v0.10.2 From 8bc0251de2932e603f8ed73b76ba2d64b2dc1d18 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:11:14 +0200 Subject: PM / Domains: Add power-on function using names to identify domains It sometimes is necessary to turn on a given PM domain when only the name of it is known and the domain pointer is not readily available. For this reason, add a new helper function, pm_genpd_name_poweron(), allowing the caller to turn on a PM domain using its name for identification. To avoid code duplication, move the domain lookup code to a separate function. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index cddf818..4d63340 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -53,6 +53,24 @@ static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); +static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) +{ + struct generic_pm_domain *genpd = NULL, *gpd; + + if (IS_ERR_OR_NULL(domain_name)) + return NULL; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (!strcmp(gpd->name, domain_name)) { + genpd = gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + return genpd; +} + #ifdef CONFIG_PM struct generic_pm_domain *dev_to_genpd(struct device *dev) @@ -262,6 +280,18 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) return ret; } +/** + * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. + * @domain_name: Name of the PM domain to power up. + */ +int pm_genpd_name_poweron(const char *domain_name) +{ + struct generic_pm_domain *genpd; + + genpd = pm_genpd_lookup_name(domain_name); + return genpd ? pm_genpd_poweron(genpd) : -EINVAL; +} + #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME @@ -1475,21 +1505,7 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, struct gpd_timing_data *td) { - struct generic_pm_domain *genpd = NULL, *gpd; - - if (IS_ERR_OR_NULL(domain_name) || IS_ERR_OR_NULL(dev)) - return -EINVAL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!strcmp(gpd->name, domain_name)) { - genpd = gpd; - break; - } - } - mutex_unlock(&gpd_list_lock); - - return __pm_genpd_add_device(genpd, dev, td); + return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); } /** diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 8dbf48b..d9d6083 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -161,6 +161,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); extern int pm_genpd_poweron(struct generic_pm_domain *genpd); +extern int pm_genpd_name_poweron(const char *domain_name); extern bool default_stop_ok(struct device *dev); @@ -240,6 +241,10 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) { return -ENOSYS; } +static inline int pm_genpd_name_poweron(const char *domain_name) +{ + return -ENOSYS; +} static inline bool default_stop_ok(struct device *dev) { return false; -- cgit v0.10.2 From 40114447a7f89860b46a64e5504f313656cb5f27 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:32:43 +0200 Subject: PM / Domains: Document cpuidle-related functions and change their names The names of the cpuidle-related functions in drivers/base/power/domain.c are inconsistent with the names of the other exported functions in that file (the "pm_" prefix is missing from them) and they are missing kerneldoc comments. Fix that by adding the missing "pm_" prefix to the names of those functions and add kerneldoc comments documenting them. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 4d63340..e44e1a8 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1829,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) } EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); -int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) +/** + * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. + * @genpd: PM domain to be connected with cpuidle. + * @state: cpuidle state this domain can disable/enable. + * + * Make a PM domain behave as though it contained a CPU core, that is, instead + * of calling its power down routine it will enable the given cpuidle state so + * that the cpuidle subsystem can power it down (if possible and desirable). + */ +int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) { struct cpuidle_driver *cpuidle_drv; struct gpd_cpu_data *cpu_data; @@ -1878,7 +1887,14 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) goto out; } -int genpd_detach_cpuidle(struct generic_pm_domain *genpd) +/** + * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. + * @genpd: PM domain to remove the cpuidle connection from. + * + * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the + * given PM domain. + */ +int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) { struct gpd_cpu_data *cpu_data; struct cpuidle_state *idle_state; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index d9d6083..f2a633a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -155,8 +155,8 @@ extern int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td); extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); -extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); -extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd); +extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); +extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); @@ -225,11 +225,11 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { return -ENOSYS; } -static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) +static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) { return -ENOSYS; } -static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd) +static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) { return -ENOSYS; } -- cgit v0.10.2 From 74a2799ab51acec9410f467fef8678ebb1125d7d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:32:59 +0200 Subject: PM / Domains: Operations related to cpuidle using domain names Make it possible to use domain names in operations connecting cpuidle to and disconnecting it from a PM domain. This is useful on platforms where PM domain objects are organized in such a way that the names of the domains are easier to use than the addresses of those objects. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e44e1a8..12ad070 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1888,6 +1888,16 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) } /** + * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. + * @name: Name of the domain to connect to cpuidle. + * @state: cpuidle state this domain can manipulate. + */ +int pm_genpd_name_attach_cpuidle(const char *name, int state) +{ + return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); +} + +/** * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. * @genpd: PM domain to remove the cpuidle connection from. * @@ -1925,6 +1935,15 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) return ret; } +/** + * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. + * @name: Name of the domain to disconnect cpuidle from. + */ +int pm_genpd_name_detach_cpuidle(const char *name) +{ + return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); +} + /* Default device callbacks for generic PM domains. */ /** diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f2a633a..7c1d252 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -156,7 +156,9 @@ extern int pm_genpd_add_callbacks(struct device *dev, struct gpd_timing_data *td); extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); +extern int pm_genpd_name_attach_cpuidle(const char *name, int state); extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); +extern int pm_genpd_name_detach_cpuidle(const char *name); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); @@ -229,10 +231,18 @@ static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int s { return -ENOSYS; } +static inline int pm_genpd_name_attach_cpuidle(const char *name, int state) +{ + return -ENOSYS; +} static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) { return -ENOSYS; } +static inline int pm_genpd_name_detach_cpuidle(const char *name) +{ + return -ENOSYS; +} static inline void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { -- cgit v0.10.2 From 2d175069f2b5477692d4bd7586bc530ffe8107bb Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Mon, 13 Aug 2012 00:14:53 +0200 Subject: PM / cpufreq: Initialise the cpu field during conservative governor start This change initialises the cpu id field of cs_cpu_dbs_info structure in conservative governor and keep this consistent with other governors. Similar initialisation is present in ondemand governor. Signed-off-by: Amit Daniel Kachhap Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 235a340..a1563d7 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } + this_dbs_info->cpu = cpu; this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; -- cgit v0.10.2 From 8bdd94689ef528738a0f14c44e069596a465d622 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:07:01 +0200 Subject: ARM: shmobile: Use names of power domains for adding devices to them Make the power management code under arch/arm/mach-shmobile/ use names of power domains instead of pointers to domain objects for adding devices to the domains. This will allow us to put the domain objects into tables and register them all in one shot going forward. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index f172ca8..b85957a 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1461,14 +1461,14 @@ static void __init ap4evb_init(void) platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); - rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device); - rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device); - rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device); - - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device); + rmobile_add_device_to_domain("A4LC", &lcdc1_device); + rmobile_add_device_to_domain("A4LC", &lcdc_device); + rmobile_add_device_to_domain("A4MP", &fsi_device); + + rmobile_add_device_to_domain("A3SP", &sh_mmcif_device); + rmobile_add_device_to_domain("A3SP", &sdhi0_device); + rmobile_add_device_to_domain("A3SP", &sdhi1_device); + rmobile_add_device_to_domain("A4R", &ceu_device); hdmi_init_pm_clock(); fsi_init_pm_clock(); diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index cf10f92..28e6e1d 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -1181,10 +1181,10 @@ static void __init eva_init(void) eva_clock_init(); - rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device); - rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device); + rmobile_add_device_to_domain("A4LC", &lcdc0_device); + rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device); if (usb) - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb); + rmobile_add_device_to_domain("A3SP", usb); } static void __init eva_earlytimer_init(void) diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 7ea2b31..954b02e 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1623,20 +1623,20 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); - rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device); - rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device); - rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device); - rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device); + rmobile_add_device_to_domain("A4LC", &lcdc_device); + rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device); + rmobile_add_device_to_domain("A4LC", &meram_device); + rmobile_add_device_to_domain("A4MP", &fsi_device); + rmobile_add_device_to_domain("A3SP", &usbhs0_device); + rmobile_add_device_to_domain("A3SP", &usbhs1_device); + rmobile_add_device_to_domain("A3SP", &nand_flash_device); + rmobile_add_device_to_domain("A3SP", &sh_mmcif_device); + rmobile_add_device_to_domain("A3SP", &sdhi0_device); #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device); + rmobile_add_device_to_domain("A3SP", &sdhi1_device); #endif - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device); + rmobile_add_device_to_domain("A3SP", &sdhi2_device); + rmobile_add_device_to_domain("A4R", &ceu_device); hdmi_init_pm_clock(); sh7372_pm_init(); diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h index 5a40284..0c25fc9 100644 --- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -31,13 +31,13 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd); -extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd, +extern void rmobile_add_device_to_domain(const char *domain_name, struct platform_device *pdev); extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, struct rmobile_pm_domain *rmobile_sd); #else #define rmobile_init_pm_domain(pd) do { } while (0) -#define rmobile_add_device_to_domain(pd, pdev) do { } while (0) +#define rmobile_add_device_to_domain(name, pdev) do { } while (0) #define rmobile_pm_add_subdomain(pd, sd) do { } while (0) #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index a856254..c94056d 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -149,12 +149,12 @@ void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) __rmobile_pd_power_up(rmobile_pd, false); } -void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd, +void rmobile_add_device_to_domain(const char *domain_name, struct platform_device *pdev) { struct device *dev = &pdev->dev; - pm_genpd_add_device(&rmobile_pd->genpd, dev); + pm_genpd_name_add_device(domain_name, dev); if (pm_clk_no_clocks(dev)) pm_clk_add(dev, NULL); } diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index 78948a9..db7af56 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -688,16 +688,16 @@ void __init r8a7740_add_standard_devices(void) /* add devices to PM domain */ - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif0_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif1_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif2_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif3_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif4_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif5_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif6_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif7_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scifb_device); - rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &i2c1_device); + rmobile_add_device_to_domain("A3SP", &scif0_device); + rmobile_add_device_to_domain("A3SP", &scif1_device); + rmobile_add_device_to_domain("A3SP", &scif2_device); + rmobile_add_device_to_domain("A3SP", &scif3_device); + rmobile_add_device_to_domain("A3SP", &scif4_device); + rmobile_add_device_to_domain("A3SP", &scif5_device); + rmobile_add_device_to_domain("A3SP", &scif6_device); + rmobile_add_device_to_domain("A3SP", &scif7_device); + rmobile_add_device_to_domain("A3SP", &scifb_device); + rmobile_add_device_to_domain("A3SP", &i2c1_device); } static void __init r8a7740_earlytimer_init(void) diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 838a87b..95f06c8 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -1023,30 +1023,30 @@ void __init sh7372_add_standard_devices(void) platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); - rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device); - rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device); - rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device); - rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device); - rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device); + rmobile_add_device_to_domain("A3RV", &vpu_device); + rmobile_add_device_to_domain("A4MP", &spu0_device); + rmobile_add_device_to_domain("A4MP", &spu1_device); + rmobile_add_device_to_domain("A3SP", &scif0_device); + rmobile_add_device_to_domain("A3SP", &scif1_device); + rmobile_add_device_to_domain("A3SP", &scif2_device); + rmobile_add_device_to_domain("A3SP", &scif3_device); + rmobile_add_device_to_domain("A3SP", &scif4_device); + rmobile_add_device_to_domain("A3SP", &scif5_device); + rmobile_add_device_to_domain("A3SP", &scif6_device); + rmobile_add_device_to_domain("A3SP", &iic1_device); + rmobile_add_device_to_domain("A3SP", &dma0_device); + rmobile_add_device_to_domain("A3SP", &dma1_device); + rmobile_add_device_to_domain("A3SP", &dma2_device); + rmobile_add_device_to_domain("A3SP", &usb_dma0_device); + rmobile_add_device_to_domain("A3SP", &usb_dma1_device); + rmobile_add_device_to_domain("A4R", &iic0_device); + rmobile_add_device_to_domain("A4R", &veu0_device); + rmobile_add_device_to_domain("A4R", &veu1_device); + rmobile_add_device_to_domain("A4R", &veu2_device); + rmobile_add_device_to_domain("A4R", &veu3_device); + rmobile_add_device_to_domain("A4R", &jpu_device); + rmobile_add_device_to_domain("A4R", &tmu00_device); + rmobile_add_device_to_domain("A4R", &tmu01_device); } static void __init sh7372_earlytimer_init(void) -- cgit v0.10.2 From 8e0e7aaef3c98c52e85f5640b73ffa82058abcfd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:07:46 +0200 Subject: ARM: shmobile: Drop r8a7779_add_device_to_domain() If the r8a7779's PM domains are given names, this SoC and its boards will be able to use rmobile_add_device_to_domain() for adding devices to those domains and r8a7779_add_device_to_domain(), which is not used anywhere at the moment anyway, may be dopped. Accordingly, give names to the r8a7779's PM domains and drop r8a7779_add_device_to_domain(). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h index b07ad31..e0dcef0 100644 --- a/arch/arm/mach-shmobile/include/mach/r8a7779.h +++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h @@ -353,11 +353,8 @@ extern struct r8a7779_pm_domain r8a7779_vdp1; extern struct r8a7779_pm_domain r8a7779_impx3; extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd); -extern void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd, - struct platform_device *pdev); #else #define r8a7779_init_pm_domain(pd) do { } while (0) -#define r8a7779_add_device_to_domain(pd, pdev) do { } while (0) #endif /* CONFIG_PM */ #endif /* __ASM_R8A7779_H__ */ diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c index a18a4ae..b2a98cc 100644 --- a/arch/arm/mach-shmobile/pm-r8a7779.c +++ b/arch/arm/mach-shmobile/pm-r8a7779.c @@ -199,17 +199,8 @@ void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) pd_power_up(&r8a7779_pd->genpd); } -void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd, - struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - - pm_genpd_add_device(&r8a7779_pd->genpd, dev); - if (pm_clk_no_clocks(dev)) - pm_clk_add(dev, NULL); -} - struct r8a7779_pm_domain r8a7779_sh4a = { + .genpd.name = "SH4A", .ch = { .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */ .isr_bit = 16, /* SH4A */ @@ -217,6 +208,7 @@ struct r8a7779_pm_domain r8a7779_sh4a = { }; struct r8a7779_pm_domain r8a7779_sgx = { + .genpd.name = "SGX", .ch = { .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */ .isr_bit = 20, /* SGX */ @@ -224,6 +216,7 @@ struct r8a7779_pm_domain r8a7779_sgx = { }; struct r8a7779_pm_domain r8a7779_vdp1 = { + .genpd.name = "VDP1", .ch = { .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */ .isr_bit = 21, /* VDP */ @@ -231,6 +224,7 @@ struct r8a7779_pm_domain r8a7779_vdp1 = { }; struct r8a7779_pm_domain r8a7779_impx3 = { + .genpd.name = "IMPX3", .ch = { .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */ .isr_bit = 24, /* IMP */ -- cgit v0.10.2 From 7fcb304792649e763e460c47abc4c53085bffaee Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:09:31 +0200 Subject: ARM: shmobile: Use domain names when adding subdomains to power domains Make the power management code under arch/arm/mach-shmobile/ use pm_genpd_add_subdomain_names() for adding subdomains to power domains, which makes it possible to drop rmobile_pm_add_subdomain() and will allow us to carry out those operations for domain objects stored in tables in a straightforward way. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h index 0c25fc9..7e24877 100644 --- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -33,12 +33,9 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd); extern void rmobile_add_device_to_domain(const char *domain_name, struct platform_device *pdev); -extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, - struct rmobile_pm_domain *rmobile_sd); #else #define rmobile_init_pm_domain(pd) do { } while (0) #define rmobile_add_device_to_domain(name, pdev) do { } while (0) -#define rmobile_pm_add_subdomain(pd, sd) do { } while (0) #endif /* CONFIG_PM */ #endif /* PM_RMOBILE_H */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index c94056d..6c47843 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -158,10 +158,4 @@ void rmobile_add_device_to_domain(const char *domain_name, if (pm_clk_no_clocks(dev)) pm_clk_add(dev, NULL); } - -void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, - struct rmobile_pm_domain *rmobile_sd) -{ - pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd); -} #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index db7af56..a18892d 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -678,7 +678,7 @@ void __init r8a7740_add_standard_devices(void) rmobile_init_pm_domain(&r8a7740_pd_a3sp); rmobile_init_pm_domain(&r8a7740_pd_a4lc); - rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp); + pm_genpd_add_subdomain_names("A4S", "A3SP"); /* add devices */ platform_add_devices(r8a7740_early_devices, diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 95f06c8..a2e1eea 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -1011,11 +1011,11 @@ void __init sh7372_add_standard_devices(void) rmobile_init_pm_domain(&sh7372_pd_a3sp); rmobile_init_pm_domain(&sh7372_pd_a3sg); - rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv); - rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc); + pm_genpd_add_subdomain_names("A4LC", "A3RV"); + pm_genpd_add_subdomain_names("A4R", "A4LC"); - rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg); - rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp); + pm_genpd_add_subdomain_names("A4S", "A3SG"); + pm_genpd_add_subdomain_names("A4S", "A3SP"); platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); -- cgit v0.10.2 From 0d09f450b6ee2065436df9fd4e0f83f9b6d71eaa Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:10:22 +0200 Subject: ARM: shmobile: Add routine for automatic PM domains initialization Add a new routine, rmobile_init_domains(), allowing the caller to initialize all generic PM objects stored in a table in one operation. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h index 7e24877..4c264c7 100644 --- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -31,10 +31,12 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd); +extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num); extern void rmobile_add_device_to_domain(const char *domain_name, struct platform_device *pdev); #else #define rmobile_init_pm_domain(pd) do { } while (0) +#define rmobile_init_domains(domains, num) do { } while (0) #define rmobile_add_device_to_domain(name, pdev) do { } while (0) #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 6c47843..4b6f2ea 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -149,6 +149,14 @@ void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) __rmobile_pd_power_up(rmobile_pd, false); } +void rmobile_init_domains(struct rmobile_pm_domain domains[], int num) +{ + int j; + + for (j = 0; j < num; j++) + rmobile_init_pm_domain(&domains[j]); +} + void rmobile_add_device_to_domain(const char *domain_name, struct platform_device *pdev) { -- cgit v0.10.2 From 70fe7b24672a988f8aab77a04329d6331a1f10a8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:54:15 +0200 Subject: ARM: shmobile: Do not access sh7372 A4S domain internals directly The sh7372_enter_suspend() routine checks the status field of the generic PM domain object corresponding to the A4S domain in order to check if it can turn that domain off when entering system sleep. However, it shouldn't rely on the specific values of the generic data structures this way, so make it use its own mechanism to recognize when it is safe to turn that domain off. For this purpos, introduce a boolean variable a4s_suspend_ready that will be set by the A4S' suspend routine and unset by its resume routine executed by rmobile_pd_power_down() and __rmobile_pd_power_up(), respectively. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 7920370..0a48046 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -110,21 +110,33 @@ struct rmobile_pm_domain sh7372_pd_a3ri = { .bit_shift = 8, }; +static bool a4s_suspend_ready; + static int sh7372_pd_a4s_suspend(void) { /* * The A4S domain contains the CPU core and therefore it should - * only be turned off if the CPU is in use. + * only be turned off if the CPU is not in use. This may happen + * during system suspend, when SYSC is going to be used for generating + * resume signals and a4s_suspend_ready is set to let + * sh7372_enter_suspend() know that it can turn A4S off. */ + a4s_suspend_ready = true; return -EBUSY; } +static void sh7372_pd_a4s_resume(void) +{ + a4s_suspend_ready = false; +} + struct rmobile_pm_domain sh7372_pd_a4s = { .genpd.name = "A4S", .bit_shift = 10, .gov = &pm_domain_always_on_gov, .no_debug = true, .suspend = sh7372_pd_a4s_suspend, + .resume = sh7372_pd_a4s_resume, }; static int sh7372_a3sp_pd_suspend(void) @@ -390,8 +402,7 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state) /* check active clocks to determine potential wakeup sources */ if (sh7372_sysc_valid(&msk, &msk2)) { - if (!console_suspend_enabled && - sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) { + if (!console_suspend_enabled && a4s_suspend_ready) { /* convert INTC mask/sense to SYSC mask/sense */ sh7372_setup_sysc(msk, msk2); -- cgit v0.10.2 From e7e59a4b55706b0bbaba1cd8af46495553b6e876 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:12:56 +0200 Subject: ARM: shmobile: Move sh7372's PM domain objects to a table Instead of giving a name to every sh7372's PM domain object, put them all into a table and use rmobile_init_domains(), introduced by a previous patch, for initializing them all altogether. Also, use pm_genpd_add_subdomain_names() for adding subdomains to the PM domains and pm_genpd_poweron_name() for turning on the A4S domain when preparing for system suspend. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index b59048e..40beb79 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -478,21 +478,15 @@ extern struct clk sh7372_fsibck_clk; extern struct clk sh7372_fsidiva_clk; extern struct clk sh7372_fsidivb_clk; -#ifdef CONFIG_PM -extern struct rmobile_pm_domain sh7372_pd_a4lc; -extern struct rmobile_pm_domain sh7372_pd_a4mp; -extern struct rmobile_pm_domain sh7372_pd_d4; -extern struct rmobile_pm_domain sh7372_pd_a4r; -extern struct rmobile_pm_domain sh7372_pd_a3rv; -extern struct rmobile_pm_domain sh7372_pd_a3ri; -extern struct rmobile_pm_domain sh7372_pd_a4s; -extern struct rmobile_pm_domain sh7372_pd_a3sp; -extern struct rmobile_pm_domain sh7372_pd_a3sg; -#endif /* CONFIG_PM */ - extern void sh7372_intcs_suspend(void); extern void sh7372_intcs_resume(void); extern void sh7372_intca_suspend(void); extern void sh7372_intca_resume(void); +#ifdef CONFIG_PM +extern void __init sh7372_init_pm_domains(void); +#else +static inline void sh7372_init_pm_domains(void) {} +#endif + #endif /* __ASM_SH7372_H__ */ diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 0a48046..0d722b5 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -71,21 +71,6 @@ #ifdef CONFIG_PM -struct rmobile_pm_domain sh7372_pd_a4lc = { - .genpd.name = "A4LC", - .bit_shift = 1, -}; - -struct rmobile_pm_domain sh7372_pd_a4mp = { - .genpd.name = "A4MP", - .bit_shift = 2, -}; - -struct rmobile_pm_domain sh7372_pd_d4 = { - .genpd.name = "D4", - .bit_shift = 3, -}; - static int sh7372_a4r_pd_suspend(void) { sh7372_intcs_suspend(); @@ -93,26 +78,9 @@ static int sh7372_a4r_pd_suspend(void) return 0; } -struct rmobile_pm_domain sh7372_pd_a4r = { - .genpd.name = "A4R", - .bit_shift = 5, - .suspend = sh7372_a4r_pd_suspend, - .resume = sh7372_intcs_resume, -}; - -struct rmobile_pm_domain sh7372_pd_a3rv = { - .genpd.name = "A3RV", - .bit_shift = 6, -}; - -struct rmobile_pm_domain sh7372_pd_a3ri = { - .genpd.name = "A3RI", - .bit_shift = 8, -}; - static bool a4s_suspend_ready; -static int sh7372_pd_a4s_suspend(void) +static int sh7372_a4s_pd_suspend(void) { /* * The A4S domain contains the CPU core and therefore it should @@ -125,20 +93,11 @@ static int sh7372_pd_a4s_suspend(void) return -EBUSY; } -static void sh7372_pd_a4s_resume(void) +static void sh7372_a4s_pd_resume(void) { a4s_suspend_ready = false; } -struct rmobile_pm_domain sh7372_pd_a4s = { - .genpd.name = "A4S", - .bit_shift = 10, - .gov = &pm_domain_always_on_gov, - .no_debug = true, - .suspend = sh7372_pd_a4s_suspend, - .resume = sh7372_pd_a4s_resume, -}; - static int sh7372_a3sp_pd_suspend(void) { /* @@ -148,18 +107,62 @@ static int sh7372_a3sp_pd_suspend(void) return console_suspend_enabled ? 0 : -EBUSY; } -struct rmobile_pm_domain sh7372_pd_a3sp = { - .genpd.name = "A3SP", - .bit_shift = 11, - .gov = &pm_domain_always_on_gov, - .no_debug = true, - .suspend = sh7372_a3sp_pd_suspend, +static struct rmobile_pm_domain sh7372_pm_domains[] = { + { + .genpd.name = "A4LC", + .bit_shift = 1, + }, + { + .genpd.name = "A4MP", + .bit_shift = 2, + }, + { + .genpd.name = "D4", + .bit_shift = 3, + }, + { + .genpd.name = "A4R", + .bit_shift = 5, + .suspend = sh7372_a4r_pd_suspend, + .resume = sh7372_intcs_resume, + }, + { + .genpd.name = "A3RV", + .bit_shift = 6, + }, + { + .genpd.name = "A3RI", + .bit_shift = 8, + }, + { + .genpd.name = "A4S", + .bit_shift = 10, + .gov = &pm_domain_always_on_gov, + .no_debug = true, + .suspend = sh7372_a4s_pd_suspend, + .resume = sh7372_a4s_pd_resume, + }, + { + .genpd.name = "A3SP", + .bit_shift = 11, + .gov = &pm_domain_always_on_gov, + .no_debug = true, + .suspend = sh7372_a3sp_pd_suspend, + }, + { + .genpd.name = "A3SG", + .bit_shift = 13, + }, }; -struct rmobile_pm_domain sh7372_pd_a3sg = { - .genpd.name = "A3SG", - .bit_shift = 13, -}; +void __init sh7372_init_pm_domains(void) +{ + rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains)); + pm_genpd_add_subdomain_names("A4LC", "A3RV"); + pm_genpd_add_subdomain_names("A4R", "A4LC"); + pm_genpd_add_subdomain_names("A4S", "A3SG"); + pm_genpd_add_subdomain_names("A4S", "A3SP"); +} #endif /* CONFIG_PM */ @@ -436,7 +439,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier, * executed during system suspend and resume, respectively, so * that those functions don't crash while accessing the INTCS. */ - pm_genpd_poweron(&sh7372_pd_a4r.genpd); + pm_genpd_name_poweron("A4R"); break; case PM_POST_SUSPEND: pm_genpd_poweroff_unused(); diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index a2e1eea..c1f698f 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -1001,21 +1001,7 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { - rmobile_init_pm_domain(&sh7372_pd_a4lc); - rmobile_init_pm_domain(&sh7372_pd_a4mp); - rmobile_init_pm_domain(&sh7372_pd_d4); - rmobile_init_pm_domain(&sh7372_pd_a4r); - rmobile_init_pm_domain(&sh7372_pd_a3rv); - rmobile_init_pm_domain(&sh7372_pd_a3ri); - rmobile_init_pm_domain(&sh7372_pd_a4s); - rmobile_init_pm_domain(&sh7372_pd_a3sp); - rmobile_init_pm_domain(&sh7372_pd_a3sg); - - pm_genpd_add_subdomain_names("A4LC", "A3RV"); - pm_genpd_add_subdomain_names("A4R", "A4LC"); - - pm_genpd_add_subdomain_names("A4S", "A3SG"); - pm_genpd_add_subdomain_names("A4S", "A3SP"); + sh7372_init_pm_domains(); platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); -- cgit v0.10.2 From 7b5674075b7c7ddb0c4da18b1f104e1db774ce82 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:13:37 +0200 Subject: ARM: shmobile: Move r8a7740's PM domain objects to a table Instead of giving a name to every r8a7740's PM domain object, put them all into a table and use rmobile_init_domains(), introduced by a previous patch, for initializing them all altogether. Also, use pm_genpd_add_subdomain_names() for adding A3SP as a subdomain of A4S. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/r8a7740.h b/arch/arm/mach-shmobile/include/mach/r8a7740.h index 7143147..59d252f 100644 --- a/arch/arm/mach-shmobile/include/mach/r8a7740.h +++ b/arch/arm/mach-shmobile/include/mach/r8a7740.h @@ -607,9 +607,9 @@ enum { }; #ifdef CONFIG_PM -extern struct rmobile_pm_domain r8a7740_pd_a4s; -extern struct rmobile_pm_domain r8a7740_pd_a3sp; -extern struct rmobile_pm_domain r8a7740_pd_a4lc; +extern void __init r8a7740_init_pm_domains(void); +#else +static inline void r8a7740_init_pm_domains(void) {} #endif /* CONFIG_PM */ #endif /* __ASM_R8A7740_H__ */ diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c index 893504d..21e5316d 100644 --- a/arch/arm/mach-shmobile/pm-r8a7740.c +++ b/arch/arm/mach-shmobile/pm-r8a7740.c @@ -21,14 +21,6 @@ static int r8a7740_pd_a4s_suspend(void) return -EBUSY; } -struct rmobile_pm_domain r8a7740_pd_a4s = { - .genpd.name = "A4S", - .bit_shift = 10, - .gov = &pm_domain_always_on_gov, - .no_debug = true, - .suspend = r8a7740_pd_a4s_suspend, -}; - static int r8a7740_pd_a3sp_suspend(void) { /* @@ -38,17 +30,31 @@ static int r8a7740_pd_a3sp_suspend(void) return console_suspend_enabled ? 0 : -EBUSY; } -struct rmobile_pm_domain r8a7740_pd_a3sp = { - .genpd.name = "A3SP", - .bit_shift = 11, - .gov = &pm_domain_always_on_gov, - .no_debug = true, - .suspend = r8a7740_pd_a3sp_suspend, +static struct rmobile_pm_domain r8a7740_pm_domains[] = { + { + .genpd.name = "A4S", + .bit_shift = 10, + .gov = &pm_domain_always_on_gov, + .no_debug = true, + .suspend = r8a7740_pd_a4s_suspend, + }, + { + .genpd.name = "A3SP", + .bit_shift = 11, + .gov = &pm_domain_always_on_gov, + .no_debug = true, + .suspend = r8a7740_pd_a3sp_suspend, + }, + { + .genpd.name = "A4LC", + .bit_shift = 1, + }, }; -struct rmobile_pm_domain r8a7740_pd_a4lc = { - .genpd.name = "A4LC", - .bit_shift = 1, -}; +void __init r8a7740_init_pm_domains(void) +{ + rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains)); + pm_genpd_add_subdomain_names("A4S", "A3SP"); +} #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index a18892d..11bb1d9 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -673,12 +673,7 @@ void __init r8a7740_add_standard_devices(void) r8a7740_i2c_workaround(&i2c0_device); r8a7740_i2c_workaround(&i2c1_device); - /* PM domain */ - rmobile_init_pm_domain(&r8a7740_pd_a4s); - rmobile_init_pm_domain(&r8a7740_pd_a3sp); - rmobile_init_pm_domain(&r8a7740_pd_a4lc); - - pm_genpd_add_subdomain_names("A4S", "A3SP"); + r8a7740_init_pm_domains(); /* add devices */ platform_add_devices(r8a7740_early_devices, -- cgit v0.10.2 From 45e5ca575647ebbca932f34a1ed915ae7a581cbb Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:14:14 +0200 Subject: ARM: shmobile: Move r8a7779's PM domain objects to a table Instead of giving a name to every r8a7779's PM domain object, put them all into a table and initialize them all together in a loop. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h index e0dcef0..7ad4797 100644 --- a/arch/arm/mach-shmobile/include/mach/r8a7779.h +++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h @@ -347,14 +347,9 @@ extern int r8a7779_sysc_power_down(struct r8a7779_pm_ch *r8a7779_ch); extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch); #ifdef CONFIG_PM -extern struct r8a7779_pm_domain r8a7779_sh4a; -extern struct r8a7779_pm_domain r8a7779_sgx; -extern struct r8a7779_pm_domain r8a7779_vdp1; -extern struct r8a7779_pm_domain r8a7779_impx3; - -extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd); +extern void __init r8a7779_init_pm_domains(void); #else -#define r8a7779_init_pm_domain(pd) do { } while (0) +static inline void r8a7779_init_pm_domains(void) {} #endif /* CONFIG_PM */ #endif /* __ASM_R8A7779_H__ */ diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c index b2a98cc..d50a8e9 100644 --- a/arch/arm/mach-shmobile/pm-r8a7779.c +++ b/arch/arm/mach-shmobile/pm-r8a7779.c @@ -183,7 +183,7 @@ static bool pd_active_wakeup(struct device *dev) return true; } -void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) +static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) { struct generic_pm_domain *genpd = &r8a7779_pd->genpd; @@ -199,37 +199,44 @@ void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) pd_power_up(&r8a7779_pd->genpd); } -struct r8a7779_pm_domain r8a7779_sh4a = { - .genpd.name = "SH4A", - .ch = { - .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */ - .isr_bit = 16, /* SH4A */ - } +static struct r8a7779_pm_domain r8a7779_pm_domains[] = { + { + .genpd.name = "SH4A", + .ch = { + .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */ + .isr_bit = 16, /* SH4A */ + }, + }, + { + .genpd.name = "SGX", + .ch = { + .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */ + .isr_bit = 20, /* SGX */ + }, + }, + { + .genpd.name = "VDP1", + .ch = { + .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */ + .isr_bit = 21, /* VDP */ + }, + }, + { + .genpd.name = "IMPX3", + .ch = { + .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */ + .isr_bit = 24, /* IMP */ + }, + }, }; -struct r8a7779_pm_domain r8a7779_sgx = { - .genpd.name = "SGX", - .ch = { - .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */ - .isr_bit = 20, /* SGX */ - } -}; - -struct r8a7779_pm_domain r8a7779_vdp1 = { - .genpd.name = "VDP1", - .ch = { - .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */ - .isr_bit = 21, /* VDP */ - } -}; +void __init r8a7779_init_pm_domains(void) +{ + int j; -struct r8a7779_pm_domain r8a7779_impx3 = { - .genpd.name = "IMPX3", - .ch = { - .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */ - .isr_bit = 24, /* IMP */ - } -}; + for (j = 0; j < ARRAY_SIZE(r8a7779_pm_domains); j++) + r8a7779_init_pm_domain(&r8a7779_pm_domains[j]); +} #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index e98e46f..2917668 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c @@ -251,10 +251,7 @@ void __init r8a7779_add_standard_devices(void) #endif r8a7779_pm_init(); - r8a7779_init_pm_domain(&r8a7779_sh4a); - r8a7779_init_pm_domain(&r8a7779_sgx); - r8a7779_init_pm_domain(&r8a7779_vdp1); - r8a7779_init_pm_domain(&r8a7779_impx3); + r8a7779_init_pm_domains(); platform_add_devices(r8a7779_early_devices, ARRAY_SIZE(r8a7779_early_devices)); -- cgit v0.10.2 From a62595d359ba8ca0f25b93c06eab84ba97ca516f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 7 Aug 2012 01:15:02 +0200 Subject: ARM: shmobile: Make rmobile_init_pm_domain() static Since rmobile_init_pm_domain() is not called anywhere outside of arch/arm/mach-shmobile/pm-rmobile.c any more, it can be made static and its header may be removed from pm-rmobile.h. Modify the code accordingly. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h index 4c264c7..7fd9ad1 100644 --- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -30,12 +30,10 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) } #ifdef CONFIG_PM -extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd); extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num); extern void rmobile_add_device_to_domain(const char *domain_name, struct platform_device *pdev); #else -#define rmobile_init_pm_domain(pd) do { } while (0) #define rmobile_init_domains(domains, num) do { } while (0) #define rmobile_add_device_to_domain(name, pdev) do { } while (0) #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 4b6f2ea..63f3566 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -134,7 +134,7 @@ static int rmobile_pd_start_dev(struct device *dev) return ret; } -void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) +static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) { struct generic_pm_domain *genpd = &rmobile_pd->genpd; struct dev_power_governor *gov = rmobile_pd->gov; -- cgit v0.10.2 From 8ae28ecb8726db5904f0f703f100315377b0172b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 8 Aug 2012 00:27:10 +0200 Subject: ARM: shmobile: Set PM domain on/off latencies directly The results of adaptive latency computations in __pm_genpd_poweron() and pm_genpd_poweroff() show that the power on/power off latencies of all power domains in SH7372 are a little below 250 us. Therefore, if 250 us is used as the common initial value of the latency fields in struct generic_pm_domain for all domains, the latency values will never have to change at run time and there won't be any overhead related to re-computation of the corresponding PM QoS data. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 0d722b5..9d055a9 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -71,6 +71,8 @@ #ifdef CONFIG_PM +#define PM_DOMAIN_ON_OFF_LATENCY_NS 250000 + static int sh7372_a4r_pd_suspend(void) { sh7372_intcs_suspend(); @@ -110,32 +112,46 @@ static int sh7372_a3sp_pd_suspend(void) static struct rmobile_pm_domain sh7372_pm_domains[] = { { .genpd.name = "A4LC", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 1, }, { .genpd.name = "A4MP", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 2, }, { .genpd.name = "D4", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 3, }, { .genpd.name = "A4R", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 5, .suspend = sh7372_a4r_pd_suspend, .resume = sh7372_intcs_resume, }, { .genpd.name = "A3RV", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 6, }, { .genpd.name = "A3RI", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 8, }, { .genpd.name = "A4S", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 10, .gov = &pm_domain_always_on_gov, .no_debug = true, @@ -144,6 +160,8 @@ static struct rmobile_pm_domain sh7372_pm_domains[] = { }, { .genpd.name = "A3SP", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 11, .gov = &pm_domain_always_on_gov, .no_debug = true, @@ -151,6 +169,8 @@ static struct rmobile_pm_domain sh7372_pm_domains[] = { }, { .genpd.name = "A3SG", + .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, + .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS, .bit_shift = 13, }, }; -- cgit v0.10.2 From 455ae3a5d4191e60f50cd9faf72f9dc9f233242d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 8 Aug 2012 00:27:52 +0200 Subject: ARM: shmobile: Allow device latencies to be specified directly Make it possible to specify device start/stop and save/restore state latencies directy when adding devices to PM domains. For this purpose, introduce rmobile_add_device_to_domain_td() whose third argument is a pointer to a struct gpd_timing_data object containing device latency data. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h index 7fd9ad1..4d02f74 100644 --- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -31,10 +31,20 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num); -extern void rmobile_add_device_to_domain(const char *domain_name, - struct platform_device *pdev); +extern void rmobile_add_device_to_domain_td(const char *domain_name, + struct platform_device *pdev, + struct gpd_timing_data *td); + +static inline void rmobile_add_device_to_domain(const char *domain_name, + struct platform_device *pdev) +{ + rmobile_add_device_to_domain_td(domain_name, pdev, NULL); +} + #else + #define rmobile_init_domains(domains, num) do { } while (0) +#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0) #define rmobile_add_device_to_domain(name, pdev) do { } while (0) #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 63f3566..682575a 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -157,12 +157,13 @@ void rmobile_init_domains(struct rmobile_pm_domain domains[], int num) rmobile_init_pm_domain(&domains[j]); } -void rmobile_add_device_to_domain(const char *domain_name, - struct platform_device *pdev) +void rmobile_add_device_to_domain_td(const char *domain_name, + struct platform_device *pdev, + struct gpd_timing_data *td) { struct device *dev = &pdev->dev; - pm_genpd_name_add_device(domain_name, dev); + __pm_genpd_name_add_device(domain_name, dev, td); if (pm_clk_no_clocks(dev)) pm_clk_add(dev, NULL); } -- cgit v0.10.2 From c37b7a7b3454ea6e6b457ab458c694fe4d81cf9e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 8 Aug 2012 00:28:36 +0200 Subject: ARM: shmobile: Specify device latencies for SH7372 devices directly The results of adaptive latency computations in GENPD_DEV_TIMED_CALLBACK() show that the start/stop and save/restore state latencies of all devices on SH7372 I have tried are a little below 250 us. Therefore, if the 250 us is used as the common initial value of the latency fields in struct gpd_timing_data for all devices on SH7372, the latency values will never have to change at run time and there won't be any overhead related to re-computation of the corresponding PM QoS data. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index c1f698f..5bcde8a 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -999,8 +999,17 @@ static struct platform_device *sh7372_late_devices[] __initdata = { &spu1_device, }; +#define DEV_LATENCY_NS 250000 + void __init sh7372_add_standard_devices(void) { + struct gpd_timing_data latencies = { + .stop_latency_ns = DEV_LATENCY_NS, + .start_latency_ns = DEV_LATENCY_NS, + .save_state_latency_ns = DEV_LATENCY_NS, + .restore_state_latency_ns = DEV_LATENCY_NS, + }; + sh7372_init_pm_domains(); platform_add_devices(sh7372_early_devices, @@ -1009,30 +1018,30 @@ void __init sh7372_add_standard_devices(void) platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); - rmobile_add_device_to_domain("A3RV", &vpu_device); - rmobile_add_device_to_domain("A4MP", &spu0_device); - rmobile_add_device_to_domain("A4MP", &spu1_device); - rmobile_add_device_to_domain("A3SP", &scif0_device); - rmobile_add_device_to_domain("A3SP", &scif1_device); - rmobile_add_device_to_domain("A3SP", &scif2_device); - rmobile_add_device_to_domain("A3SP", &scif3_device); - rmobile_add_device_to_domain("A3SP", &scif4_device); - rmobile_add_device_to_domain("A3SP", &scif5_device); - rmobile_add_device_to_domain("A3SP", &scif6_device); - rmobile_add_device_to_domain("A3SP", &iic1_device); - rmobile_add_device_to_domain("A3SP", &dma0_device); - rmobile_add_device_to_domain("A3SP", &dma1_device); - rmobile_add_device_to_domain("A3SP", &dma2_device); - rmobile_add_device_to_domain("A3SP", &usb_dma0_device); - rmobile_add_device_to_domain("A3SP", &usb_dma1_device); - rmobile_add_device_to_domain("A4R", &iic0_device); - rmobile_add_device_to_domain("A4R", &veu0_device); - rmobile_add_device_to_domain("A4R", &veu1_device); - rmobile_add_device_to_domain("A4R", &veu2_device); - rmobile_add_device_to_domain("A4R", &veu3_device); - rmobile_add_device_to_domain("A4R", &jpu_device); - rmobile_add_device_to_domain("A4R", &tmu00_device); - rmobile_add_device_to_domain("A4R", &tmu01_device); + rmobile_add_device_to_domain_td("A3RV", &vpu_device, &latencies); + rmobile_add_device_to_domain_td("A4MP", &spu0_device, &latencies); + rmobile_add_device_to_domain_td("A4MP", &spu1_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif0_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif1_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif2_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif3_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif4_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif5_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &scif6_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &iic1_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &dma0_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &dma1_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &dma2_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &usb_dma0_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &usb_dma1_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &iic0_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &veu0_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &veu1_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &veu2_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &veu3_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &jpu_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &tmu00_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &tmu01_device, &latencies); } static void __init sh7372_earlytimer_init(void) -- cgit v0.10.2 From b42879e2e9b130e9ea0df3f076a0742df46e143a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 8 Aug 2012 00:29:16 +0200 Subject: ARM: shmobile: Specify device latencies for Mackerel devices directly The results of adaptive latency computations in GENPD_DEV_TIMED_CALLBACK() show that the start/stop and save/restore state latencies of all devices on the Mackerel board I have tried are a little below 250 us. Therefore, if the 250 us is used as the common initial value of the latency fields in struct gpd_timing_data for all devices on Mackerel, the latency values will never have to change at run time and there won't be any overhead related to re-computation of the corresponding PM QoS data. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 954b02e..c64fc40 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1407,8 +1407,15 @@ static struct i2c_board_info i2c1_devices[] = { #define GPIO_PORT168CR 0xE60520A8 #define SRCR4 0xe61580bc #define USCCR1 0xE6058144 +#define DEV_LATENCY_NS 250000 static void __init mackerel_init(void) { + struct gpd_timing_data latencies = { + .stop_latency_ns = DEV_LATENCY_NS, + .start_latency_ns = DEV_LATENCY_NS, + .save_state_latency_ns = DEV_LATENCY_NS, + .restore_state_latency_ns = DEV_LATENCY_NS, + }; u32 srcr4; struct clk *clk; @@ -1623,20 +1630,20 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); - rmobile_add_device_to_domain("A4LC", &lcdc_device); - rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device); - rmobile_add_device_to_domain("A4LC", &meram_device); - rmobile_add_device_to_domain("A4MP", &fsi_device); - rmobile_add_device_to_domain("A3SP", &usbhs0_device); - rmobile_add_device_to_domain("A3SP", &usbhs1_device); - rmobile_add_device_to_domain("A3SP", &nand_flash_device); - rmobile_add_device_to_domain("A3SP", &sh_mmcif_device); - rmobile_add_device_to_domain("A3SP", &sdhi0_device); + rmobile_add_device_to_domain_td("A4LC", &lcdc_device, &latencies); + rmobile_add_device_to_domain_td("A4LC", &hdmi_lcdc_device, &latencies); + rmobile_add_device_to_domain_td("A4LC", &meram_device, &latencies); + rmobile_add_device_to_domain_td("A4MP", &fsi_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &usbhs0_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &usbhs1_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &nand_flash_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &sh_mmcif_device, &latencies); + rmobile_add_device_to_domain_td("A3SP", &sdhi0_device, &latencies); #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) - rmobile_add_device_to_domain("A3SP", &sdhi1_device); + rmobile_add_device_to_domain_td("A3SP", &sdhi1_device, &latencies); #endif - rmobile_add_device_to_domain("A3SP", &sdhi2_device); - rmobile_add_device_to_domain("A4R", &ceu_device); + rmobile_add_device_to_domain_td("A3SP", &sdhi2_device, &latencies); + rmobile_add_device_to_domain_td("A4R", &ceu_device, &latencies); hdmi_init_pm_clock(); sh7372_pm_init(); -- cgit v0.10.2 From ac18e02dc022a5413219f41d000bc637c7c5d957 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:56:26 +0200 Subject: ARM: shmobile: Rework adding devices to PM domains on Mackerel On SH7372 and Mackerel devices are added to PM domains through a series of rmobile_add_device_to_domain_td() calls where the last argument is always the same. This is quite inefficient, so add a common function for adding devices to PM domains that reads the domain-device pairs information from a table and use it during SH7372 and Mackerel initialization. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index c64fc40..d1e8fe8 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1407,14 +1407,23 @@ static struct i2c_board_info i2c1_devices[] = { #define GPIO_PORT168CR 0xE60520A8 #define SRCR4 0xe61580bc #define USCCR1 0xE6058144 -#define DEV_LATENCY_NS 250000 static void __init mackerel_init(void) { - struct gpd_timing_data latencies = { - .stop_latency_ns = DEV_LATENCY_NS, - .start_latency_ns = DEV_LATENCY_NS, - .save_state_latency_ns = DEV_LATENCY_NS, - .restore_state_latency_ns = DEV_LATENCY_NS, + struct pm_domain_device domain_devices[] = { + { "A4LC", &lcdc_device, }, + { "A4LC", &hdmi_lcdc_device, }, + { "A4LC", &meram_device, }, + { "A4MP", &fsi_device, }, + { "A3SP", &usbhs0_device, }, + { "A3SP", &usbhs1_device, }, + { "A3SP", &nand_flash_device, }, + { "A3SP", &sh_mmcif_device, }, + { "A3SP", &sdhi0_device, }, +#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) + { "A3SP", &sdhi1_device, }, +#endif + { "A3SP", &sdhi2_device, }, + { "A4R", &ceu_device, }, }; u32 srcr4; struct clk *clk; @@ -1630,20 +1639,8 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); - rmobile_add_device_to_domain_td("A4LC", &lcdc_device, &latencies); - rmobile_add_device_to_domain_td("A4LC", &hdmi_lcdc_device, &latencies); - rmobile_add_device_to_domain_td("A4LC", &meram_device, &latencies); - rmobile_add_device_to_domain_td("A4MP", &fsi_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &usbhs0_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &usbhs1_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &nand_flash_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &sh_mmcif_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &sdhi0_device, &latencies); -#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) - rmobile_add_device_to_domain_td("A3SP", &sdhi1_device, &latencies); -#endif - rmobile_add_device_to_domain_td("A3SP", &sdhi2_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &ceu_device, &latencies); + rmobile_add_devices_to_domains(domain_devices, + ARRAY_SIZE(domain_devices)); hdmi_init_pm_clock(); sh7372_pm_init(); diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h index 4d02f74..690553a 100644 --- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -12,6 +12,8 @@ #include +#define DEFAULT_DEV_LATENCY_NS 250000 + struct platform_device; struct rmobile_pm_domain { @@ -29,6 +31,11 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) return container_of(d, struct rmobile_pm_domain, genpd); } +struct pm_domain_device { + const char *domain_name; + struct platform_device *pdev; +}; + #ifdef CONFIG_PM extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num); extern void rmobile_add_device_to_domain_td(const char *domain_name, @@ -41,11 +48,16 @@ static inline void rmobile_add_device_to_domain(const char *domain_name, rmobile_add_device_to_domain_td(domain_name, pdev, NULL); } +extern void rmobile_add_devices_to_domains(struct pm_domain_device data[], + int size); #else #define rmobile_init_domains(domains, num) do { } while (0) #define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0) #define rmobile_add_device_to_domain(name, pdev) do { } while (0) + +static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[], + int size) {} #endif /* CONFIG_PM */ #endif /* PM_RMOBILE_H */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 682575a..d37d368 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -167,4 +167,20 @@ void rmobile_add_device_to_domain_td(const char *domain_name, if (pm_clk_no_clocks(dev)) pm_clk_add(dev, NULL); } + +void rmobile_add_devices_to_domains(struct pm_domain_device data[], + int size) +{ + struct gpd_timing_data latencies = { + .stop_latency_ns = DEFAULT_DEV_LATENCY_NS, + .start_latency_ns = DEFAULT_DEV_LATENCY_NS, + .save_state_latency_ns = DEFAULT_DEV_LATENCY_NS, + .restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS, + }; + int j; + + for (j = 0; j < size; j++) + rmobile_add_device_to_domain_td(data[j].domain_name, + data[j].pdev, &latencies); +} #endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 5bcde8a..a07954f 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -999,15 +999,33 @@ static struct platform_device *sh7372_late_devices[] __initdata = { &spu1_device, }; -#define DEV_LATENCY_NS 250000 - void __init sh7372_add_standard_devices(void) { - struct gpd_timing_data latencies = { - .stop_latency_ns = DEV_LATENCY_NS, - .start_latency_ns = DEV_LATENCY_NS, - .save_state_latency_ns = DEV_LATENCY_NS, - .restore_state_latency_ns = DEV_LATENCY_NS, + struct pm_domain_device domain_devices[] = { + { "A3RV", &vpu_device, }, + { "A4MP", &spu0_device, }, + { "A4MP", &spu1_device, }, + { "A3SP", &scif0_device, }, + { "A3SP", &scif1_device, }, + { "A3SP", &scif2_device, }, + { "A3SP", &scif3_device, }, + { "A3SP", &scif4_device, }, + { "A3SP", &scif5_device, }, + { "A3SP", &scif6_device, }, + { "A3SP", &iic1_device, }, + { "A3SP", &dma0_device, }, + { "A3SP", &dma1_device, }, + { "A3SP", &dma2_device, }, + { "A3SP", &usb_dma0_device, }, + { "A3SP", &usb_dma1_device, }, + { "A4R", &iic0_device, }, + { "A4R", &veu0_device, }, + { "A4R", &veu1_device, }, + { "A4R", &veu2_device, }, + { "A4R", &veu3_device, }, + { "A4R", &jpu_device, }, + { "A4R", &tmu00_device, }, + { "A4R", &tmu01_device, }, }; sh7372_init_pm_domains(); @@ -1018,30 +1036,8 @@ void __init sh7372_add_standard_devices(void) platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); - rmobile_add_device_to_domain_td("A3RV", &vpu_device, &latencies); - rmobile_add_device_to_domain_td("A4MP", &spu0_device, &latencies); - rmobile_add_device_to_domain_td("A4MP", &spu1_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif0_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif1_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif2_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif3_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif4_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif5_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &scif6_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &iic1_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &dma0_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &dma1_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &dma2_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &usb_dma0_device, &latencies); - rmobile_add_device_to_domain_td("A3SP", &usb_dma1_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &iic0_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &veu0_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &veu1_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &veu2_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &veu3_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &jpu_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &tmu00_device, &latencies); - rmobile_add_device_to_domain_td("A4R", &tmu01_device, &latencies); + rmobile_add_devices_to_domains(domain_devices, + ARRAY_SIZE(domain_devices)); } static void __init sh7372_earlytimer_init(void) -- cgit v0.10.2 From 201dbd8110164d817e73c5602dd4d358d7fc535a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:56:41 +0200 Subject: ARM: shmobile: Rework adding devices to PM domains on AP4EVB Use the function rmobile_add_devices_to_domains() introduced previously for adding devices to PM domains during the AP4EVB initialization instead of a series of rmobile_add_device_to_domain*() calls. This also causes the default device PM QoS latencies to be used on that board in analogy with Mackerel. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index b85957a..5168a03 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1229,6 +1229,15 @@ static struct i2c_board_info i2c1_devices[] = { #define USCCR1 0xE6058144 static void __init ap4evb_init(void) { + struct pm_domain_device domain_devices[] = { + { "A4LC", &lcdc1_device, }, + { "A4LC", &lcdc_device, }, + { "A4MP", &fsi_device, }, + { "A3SP", &sh_mmcif_device, }, + { "A3SP", &sdhi0_device, }, + { "A3SP", &sdhi1_device, }, + { "A4R", &ceu_device, }, + }; u32 srcr4; struct clk *clk; @@ -1461,14 +1470,8 @@ static void __init ap4evb_init(void) platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); - rmobile_add_device_to_domain("A4LC", &lcdc1_device); - rmobile_add_device_to_domain("A4LC", &lcdc_device); - rmobile_add_device_to_domain("A4MP", &fsi_device); - - rmobile_add_device_to_domain("A3SP", &sh_mmcif_device); - rmobile_add_device_to_domain("A3SP", &sdhi0_device); - rmobile_add_device_to_domain("A3SP", &sdhi1_device); - rmobile_add_device_to_domain("A4R", &ceu_device); + rmobile_add_devices_to_domains(domain_devices, + ARRAY_SIZE(domain_devices)); hdmi_init_pm_clock(); fsi_init_pm_clock(); -- cgit v0.10.2 From 18c081e27666c5db33075bf7ae4694e091cf1d20 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:57:06 +0200 Subject: ARM: shmobile: Remove the console check from sh7372_enter_suspend() The !console_suspend_enabled check in sh7372_enter_suspend() seems to be reversed and the condition it is supposed to catch (console clock enabled) should be detected by the sh7372_sysc_valid() check anyway, so remove it. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 9d055a9..d01122c 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -424,16 +424,14 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state) unsigned long msk, msk2; /* check active clocks to determine potential wakeup sources */ - if (sh7372_sysc_valid(&msk, &msk2)) { - if (!console_suspend_enabled && a4s_suspend_ready) { - /* convert INTC mask/sense to SYSC mask/sense */ - sh7372_setup_sysc(msk, msk2); - - /* enter A4S sleep with PLLC0 off */ - pr_debug("entering A4S\n"); - sh7372_enter_a4s_common(0); - return 0; - } + if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) { + /* convert INTC mask/sense to SYSC mask/sense */ + sh7372_setup_sysc(msk, msk2); + + /* enter A4S sleep with PLLC0 off */ + pr_debug("entering A4S\n"); + sh7372_enter_a4s_common(0); + return 0; } /* default to enter A3SM sleep with PLLC0 off */ -- cgit v0.10.2 From 35f2b0bd5911dc0eef3f5352b6acb79c69420111 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:57:27 +0200 Subject: ARM: shmobile: Move definition of shmobile_init_late() to header The role of the only function in the common.c file in arch/arm/mach-shmobile, shmobile_init_late(), is to call two initializers whose definitions depend on kernel configuration options. Those initializers may very well be called from a static inline function in arm/mach-shmobile/include/mach/common.h, though, in which makes the code a bit easier to read. Moreover, the common.c may be dropped entirely then. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile index 0df5ae6..fe2c97c 100644 --- a/arch/arm/mach-shmobile/Makefile +++ b/arch/arm/mach-shmobile/Makefile @@ -3,7 +3,7 @@ # # Common objects -obj-y := timer.o console.o clock.o common.o +obj-y := timer.o console.o clock.o # CPU objects obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c deleted file mode 100644 index 608aba9..0000000 --- a/arch/arm/mach-shmobile/common.c +++ /dev/null @@ -1,24 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - */ -#include -#include -#include - -void __init shmobile_init_late(void) -{ - shmobile_suspend_init(); - shmobile_cpuidle_init(); -} diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 45e61da..9175c18 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -86,8 +86,6 @@ extern int r8a7779_boot_secondary(unsigned int cpu); extern void r8a7779_smp_prepare_cpus(void); extern void r8a7779_register_twd(void); -extern void shmobile_init_late(void); - #ifdef CONFIG_SUSPEND int shmobile_suspend_init(void); #else @@ -100,4 +98,10 @@ int shmobile_cpuidle_init(void); static inline int shmobile_cpuidle_init(void) { return 0; } #endif +static inline void shmobile_init_late(void) +{ + shmobile_suspend_init(); + shmobile_cpuidle_init(); +} + #endif /* __ARCH_MACH_COMMON_H */ -- cgit v0.10.2 From 5b41147ceae44350f43f9b8124687d22bed2bbb9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Aug 2012 20:58:19 +0200 Subject: ARM: shmobile: Make sh7372 cpuidle handling more straightforward The sh7372 cpuidle code uses the same artificially designed routine shmobile_cpuidle_enter() as the .enter() callback for all of its cpuidle states. However, shmobile_cpuidle_enter() calls a different "enter" function for each state using an array of function pointers populated by the sh7372 PM initialization code. Moreover, the states[] array of the shmobile cpuidle driver is populated by that code as well, although in principle it just might have been filled with static data. All of that complexity goes away if the sh7372 cpuidle code is allowed to define its own cpuidle driver structure that can be passed for registration to the common shmobile cpuidle initialization routine, so modify the code accordingly. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c index 7b541e91..9e05026 100644 --- a/arch/arm/mach-shmobile/cpuidle.c +++ b/arch/arm/mach-shmobile/cpuidle.c @@ -16,51 +16,38 @@ #include #include -static void shmobile_enter_wfi(void) +int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv, + int index) { cpu_do_idle(); -} - -void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = { - shmobile_enter_wfi, /* regular sleep mode */ -}; - -static int shmobile_cpuidle_enter(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - shmobile_cpuidle_modes[index](); - - return index; + return 0; } static struct cpuidle_device shmobile_cpuidle_dev; -static struct cpuidle_driver shmobile_cpuidle_driver = { +static struct cpuidle_driver shmobile_cpuidle_default_driver = { .name = "shmobile_cpuidle", .owner = THIS_MODULE, .en_core_tk_irqen = 1, .states[0] = ARM_CPUIDLE_WFI_STATE, + .states[0].enter = shmobile_enter_wfi, .safe_state_index = 0, /* C1 */ .state_count = 1, }; -void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv); +static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver; + +void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv) +{ + cpuidle_drv = drv; +} int shmobile_cpuidle_init(void) { struct cpuidle_device *dev = &shmobile_cpuidle_dev; - struct cpuidle_driver *drv = &shmobile_cpuidle_driver; - int i; - - for (i = 0; i < CPUIDLE_STATE_MAX; i++) - drv->states[i].enter = shmobile_cpuidle_enter; - - if (shmobile_cpuidle_setup) - shmobile_cpuidle_setup(drv); - cpuidle_register_driver(drv); + cpuidle_register_driver(cpuidle_drv); - dev->state_count = drv->state_count; + dev->state_count = cpuidle_drv->state_count; cpuidle_register_device(dev); return 0; diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 9175c18..eb89293 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -14,8 +14,10 @@ extern int shmobile_clk_init(void); extern void shmobile_handle_irq_intc(struct pt_regs *); extern struct platform_suspend_ops shmobile_suspend_ops; struct cpuidle_driver; -extern void (*shmobile_cpuidle_modes[])(void); -extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv); +struct cpuidle_device; +extern int shmobile_enter_wfi(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index); +extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv); extern void sh7367_init_irq(void); extern void sh7367_map_io(void); diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index d01122c..5cafd35 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -347,7 +348,8 @@ static int sh7372_do_idle_core_standby(unsigned long unused) return 0; } -static void sh7372_enter_core_standby(void) +static int sh7372_enter_core_standby(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); @@ -358,52 +360,61 @@ static void sh7372_enter_core_standby(void) /* disable reset vector translation */ __raw_writel(0, SBAR); + + return 1; } -static void sh7372_enter_a3sm_pll_on(void) +static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { sh7372_enter_a3sm_common(1); + return 2; } -static void sh7372_enter_a3sm_pll_off(void) +static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { sh7372_enter_a3sm_common(0); + return 3; } -static void sh7372_cpuidle_setup(struct cpuidle_driver *drv) -{ - struct cpuidle_state *state = &drv->states[drv->state_count]; - - snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); - strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN); - state->exit_latency = 10; - state->target_residency = 20 + 10; - state->flags = CPUIDLE_FLAG_TIME_VALID; - shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby; - drv->state_count++; - - state = &drv->states[drv->state_count]; - snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); - strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN); - state->exit_latency = 20; - state->target_residency = 30 + 20; - state->flags = CPUIDLE_FLAG_TIME_VALID; - shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on; - drv->state_count++; - - state = &drv->states[drv->state_count]; - snprintf(state->name, CPUIDLE_NAME_LEN, "C4"); - strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN); - state->exit_latency = 120; - state->target_residency = 30 + 120; - state->flags = CPUIDLE_FLAG_TIME_VALID; - shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off; - drv->state_count++; -} +static struct cpuidle_driver sh7372_cpuidle_driver = { + .name = "sh7372_cpuidle", + .owner = THIS_MODULE, + .en_core_tk_irqen = 1, + .state_count = 4, + .safe_state_index = 0, /* C1 */ + .states[0] = ARM_CPUIDLE_WFI_STATE, + .states[0].enter = shmobile_enter_wfi, + .states[1] = { + .name = "C2", + .desc = "Core Standby Mode", + .exit_latency = 10, + .target_residency = 20 + 10, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = sh7372_enter_core_standby, + }, + .states[2] = { + .name = "C3", + .desc = "A3SM PLL ON", + .exit_latency = 20, + .target_residency = 30 + 20, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = sh7372_enter_a3sm_pll_on, + }, + .states[3] = { + .name = "C4", + .desc = "A3SM PLL OFF", + .exit_latency = 120, + .target_residency = 30 + 120, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = sh7372_enter_a3sm_pll_off, + }, +}; static void sh7372_cpuidle_init(void) { - shmobile_cpuidle_setup = sh7372_cpuidle_setup; + shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver); } #else static void sh7372_cpuidle_init(void) {} -- cgit v0.10.2 From caaca999d335872a53b3fff8fa5af2de794d16a1 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 22 Aug 2012 12:27:24 +0200 Subject: ARM: shmobile: Add A4S cpuidle state on sh7372 Add a "C5" cpuidle state to the SH7372 SoC connected to the A4S power domain in such a way that A4S may be turned off by cpuidle if all I/O devices in that domain have been suspended (or do not have attached drivers). This requires some reorganization of the initialization of SH7372 power management which affects the the boards based on it, Mackerel and AP4EVB. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 5168a03..264340a 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1486,6 +1486,6 @@ MACHINE_START(AP4EVB, "ap4evb") .init_irq = sh7372_init_irq, .handle_irq = shmobile_handle_irq_intc, .init_machine = ap4evb_init, - .init_late = shmobile_init_late, + .init_late = sh7372_pm_init_late, .timer = &shmobile_timer, MACHINE_END diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index d1e8fe8..f96e415 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1654,6 +1654,6 @@ MACHINE_START(MACKEREL, "mackerel") .init_irq = sh7372_init_irq, .handle_irq = shmobile_handle_irq_intc, .init_machine = mackerel_init, - .init_late = shmobile_init_late, + .init_late = sh7372_pm_init_late, .timer = &shmobile_timer, MACHINE_END diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index 40beb79..eb98b45 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -489,4 +489,6 @@ extern void __init sh7372_init_pm_domains(void); static inline void sh7372_init_pm_domains(void) {} #endif +extern void __init sh7372_pm_init_late(void); + #endif /* __ASM_SH7372_H__ */ diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 5cafd35..a7a5e20 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -339,6 +339,21 @@ static void sh7372_enter_a3sm_common(int pllc0_on) sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); sh7372_enter_sysc(pllc0_on, 1 << 12); } + +static void sh7372_enter_a4s_common(int pllc0_on) +{ + sh7372_intca_suspend(); + sh7372_set_reset_vector(SMFRAM); + sh7372_enter_sysc(pllc0_on, 1 << 10); + sh7372_intca_resume(); +} + +static void sh7372_pm_setup_smfram(void) +{ + memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); +} +#else +static inline void sh7372_pm_setup_smfram(void) {} #endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */ #ifdef CONFIG_CPU_IDLE @@ -378,11 +393,24 @@ static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev, return 3; } +static int sh7372_enter_a4s(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + unsigned long msk, msk2; + + if (!sh7372_sysc_valid(&msk, &msk2)) + return sh7372_enter_a3sm_pll_off(dev, drv, index); + + sh7372_setup_sysc(msk, msk2); + sh7372_enter_a4s_common(0); + return 4; +} + static struct cpuidle_driver sh7372_cpuidle_driver = { .name = "sh7372_cpuidle", .owner = THIS_MODULE, .en_core_tk_irqen = 1, - .state_count = 4, + .state_count = 5, .safe_state_index = 0, /* C1 */ .states[0] = ARM_CPUIDLE_WFI_STATE, .states[0].enter = shmobile_enter_wfi, @@ -410,6 +438,15 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { .flags = CPUIDLE_FLAG_TIME_VALID, .enter = sh7372_enter_a3sm_pll_off, }, + .states[4] = { + .name = "C5", + .desc = "A4S PLL OFF", + .exit_latency = 240, + .target_residency = 30 + 240, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = sh7372_enter_a4s, + .disabled = true, + }, }; static void sh7372_cpuidle_init(void) @@ -421,15 +458,6 @@ static void sh7372_cpuidle_init(void) {} #endif #ifdef CONFIG_SUSPEND -static void sh7372_enter_a4s_common(int pllc0_on) -{ - sh7372_intca_suspend(); - memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); - sh7372_set_reset_vector(SMFRAM); - sh7372_enter_sysc(pllc0_on, 1 << 10); - sh7372_intca_resume(); -} - static int sh7372_enter_suspend(suspend_state_t suspend_state) { unsigned long msk, msk2; @@ -497,6 +525,14 @@ void __init sh7372_pm_init(void) /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ __raw_writel(0, PDNSEL); + sh7372_pm_setup_smfram(); + sh7372_suspend_init(); sh7372_cpuidle_init(); } + +void __init sh7372_pm_init_late(void) +{ + shmobile_init_late(); + pm_genpd_name_attach_cpuidle("A4S", 4); +} -- cgit v0.10.2 From 56835e6cc053c29bf1a15a07dbeb78f219a15214 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 5 Sep 2012 00:50:26 +0200 Subject: cpufreq / powernow-k8: Fixup missing _PSS objects message _PSS objects can also be missing if Cool'N'Quiet is disabled in the BIOS. Add that to the FW_BUG message for the user to try before updating her BIOS. Fix formatting while at it. Acked-by: Mark Langsdorf Signed-off-by: Borislav Petkov Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index c0e8164..33f17c4 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1265,12 +1265,15 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) init_on_cpu->rc = 0; } +static const char missing_pss_msg[] = + KERN_ERR + FW_BUG PFX "No compatible ACPI _PSS objects found.\n" + FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" + FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n"; + /* per CPU init entry point to the driver */ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { - static const char ACPI_PSS_BIOS_BUG_MSG[] = - KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" - FW_BUG PFX "Try again with latest BIOS.\n"; struct powernow_k8_data *data; struct init_on_cpu init_on_cpu; int rc; @@ -1298,7 +1301,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - printk_once(ACPI_PSS_BIOS_BUG_MSG); + printk_once(missing_pss_msg); goto err_out; } if (pol->cpu != 0) { -- cgit v0.10.2 From c59687f8466df36633d937cc298aad465d704990 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 5 Sep 2012 15:13:48 +0200 Subject: cpuidle / ACPI : remove power from acpi_processor_cx structure Remove the unused power field from struct struct acpi_processor_cx. [rjw: Modified changelog.] Signed-off-by: Daniel Lezcano Acked-by: Konrad Rzeszutek Wilk Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index ad3730b..de89624 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -483,8 +483,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) if (obj->type != ACPI_TYPE_INTEGER) continue; - cx.power = obj->integer.value; - current_count++; memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index b590ee0..316df65 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -98,7 +98,6 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr) dst_cx->type = cx->type; dst_cx->latency = cx->latency; - dst_cx->power = cx->power; dst_cx->dpcnt = 0; set_xen_guest_handle(dst_cx->dp, NULL); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 64ec644..db427fa 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -59,7 +59,6 @@ struct acpi_processor_cx { u8 entry_method; u8 index; u32 latency; - u32 power; u8 bm_sts_skip; char desc[ACPI_CX_DESC_LEN]; }; -- cgit v0.10.2 From b3d3b9fb6016e6eacd3ae49fb786806d00c43e7b Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Thu, 6 Sep 2012 08:18:57 +0000 Subject: PM / Domains: Fix compilation warning related to genpd_start_dev_no_timing() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Function genpd_start_dev_no_timing was accessed inside CONFIG_PM_RUNTIME macro but defined outside it. When the above macro was not defined the compiler gave the following warning: drivers/base/power/domain.c:96:12: warning: ‘genpd_start_dev_no_timing’ defined but not used [-Wunused-function] Signed-off-by: Sachin Kamat Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 12ad070..c22b869 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -93,12 +93,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) start_latency_ns, "start"); } -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} - static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -296,6 +290,12 @@ int pm_genpd_name_poweron(const char *domain_name) #ifdef CONFIG_PM_RUNTIME +static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, start, dev); +} + static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, -- cgit v0.10.2 From 4955070974ecfa0b1ae9d2506f529460fd3a4b0b Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 6 Sep 2012 23:19:06 +0200 Subject: PM / wakeup: Use irqsave/irqrestore for events_lock Jon Medhurst (Tixy) recently noticed a problem with the events_lock usage. One of the Android patches that uses wakeup_sources calls wakeup_source_add() with irqs disabled. However, the event_lock usage in wakeup_source_add() uses spin_lock_irq()/spin_unlock_irq(), which reenables interrupts. This results in lockdep warnings. The fix is to use spin_lock_irqsave()/spin_lock_irqrestore() instead for the events_lock. References: https://bugs.launchpad.net/linaro-landing-team-arm/+bug/1037565 Reported-and-debugged-by: Jon Medhurst (Tixy) Signed-off-by: John Stultz Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 8a0a9ca..e6ee5e8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy); */ void wakeup_source_add(struct wakeup_source *ws) { + unsigned long flags; + if (WARN_ON(!ws)) return; @@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws) ws->active = false; ws->last_time = ktime_get(); - spin_lock_irq(&events_lock); + spin_lock_irqsave(&events_lock, flags); list_add_rcu(&ws->entry, &wakeup_sources); - spin_unlock_irq(&events_lock); + spin_unlock_irqrestore(&events_lock, flags); } EXPORT_SYMBOL_GPL(wakeup_source_add); @@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add); */ void wakeup_source_remove(struct wakeup_source *ws) { + unsigned long flags; + if (WARN_ON(!ws)) return; - spin_lock_irq(&events_lock); + spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); - spin_unlock_irq(&events_lock); + spin_unlock_irqrestore(&events_lock, flags); synchronize_rcu(); } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -752,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block) bool pm_save_wakeup_count(unsigned int count) { unsigned int cnt, inpr; + unsigned long flags; events_check_enabled = false; - spin_lock_irq(&events_lock); + spin_lock_irqsave(&events_lock, flags); split_counters(&cnt, &inpr); if (cnt == count && inpr == 0) { saved_count = count; events_check_enabled = true; } - spin_unlock_irq(&events_lock); + spin_unlock_irqrestore(&events_lock, flags); return events_check_enabled; } -- cgit v0.10.2 From c6a57bfffea5b673e5b4f9aeff85a00607e59077 Mon Sep 17 00:00:00 2001 From: Luis Gonzalez Fernandez Date: Fri, 7 Sep 2012 21:35:21 +0200 Subject: PM / QoS: Add return code to pm_qos_get_value function. pm_qos_get_value don't return a return code in all cases. It's sure that anything interesting happend after BUG() but this prevent any compilation warning. [rjw: Chaneged the new return value to PM_QOS_DEFAULT_VALUE.] Signed-off-by: Luis Gonzalez Fernandez Signed-off-by: Rafael J. Wysocki diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 6a031e6..846bd42 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -139,6 +139,7 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c) default: /* runtime check for not using enum */ BUG(); + return PM_QOS_DEFAULT_VALUE; } } -- cgit v0.10.2 From 3dc9a633f8a65b39c5897874138027328bfb0a94 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 4 Sep 2012 08:28:02 +0000 Subject: acpi-cpufreq: Add support for modern AMD CPUs The programming model for P-states on modern AMD CPUs is very similar to that of Intel and VIA. It makes sense to consolidate this support into one driver rather than duplicating functionality between two of them. This patch adds support for AMDs with hardware P-state control to acpi-cpufreq. Signed-off-by: Matthew Garrett Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 957ec87..1e1f3eb 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -248,6 +248,8 @@ #define MSR_IA32_PERF_STATUS 0x00000198 #define MSR_IA32_PERF_CTL 0x00000199 +#define MSR_AMD_PERF_STATUS 0xc0010063 +#define MSR_AMD_PERF_CTL 0xc0010062 #define MSR_IA32_MPERF 0x000000e7 #define MSR_IA32_APERF 0x000000e8 diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 78ff7ee..8d12e37 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ help This driver adds a CPUFreq driver which utilizes the ACPI Processor Performance States. - This driver also supports Intel Enhanced Speedstep. + This driver also supports Intel Enhanced Speedstep and newer + AMD CPUs. To compile this driver as a module, choose M here: the module will be called acpi-cpufreq. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 56c6c6b..067a61f 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -54,10 +54,12 @@ MODULE_LICENSE("GPL"); enum { UNDEFINED_CAPABLE = 0, SYSTEM_INTEL_MSR_CAPABLE, + SYSTEM_AMD_MSR_CAPABLE, SYSTEM_IO_CAPABLE, }; #define INTEL_MSR_RANGE (0xffff) +#define AMD_MSR_RANGE (0x7) struct acpi_cpufreq_data { struct acpi_processor_performance *acpi_data; @@ -82,6 +84,13 @@ static int check_est_cpu(unsigned int cpuid) return cpu_has(cpu, X86_FEATURE_EST); } +static int check_amd_hwpstate_cpu(unsigned int cpuid) +{ + struct cpuinfo_x86 *cpu = &cpu_data(cpuid); + + return cpu_has(cpu, X86_FEATURE_HW_PSTATE); +} + static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) { struct acpi_processor_performance *perf; @@ -101,7 +110,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) int i; struct acpi_processor_performance *perf; - msr &= INTEL_MSR_RANGE; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + msr &= AMD_MSR_RANGE; + else + msr &= INTEL_MSR_RANGE; + perf = data->acpi_data; for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { @@ -115,6 +128,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) { switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: + case SYSTEM_AMD_MSR_CAPABLE: return extract_msr(val, data); case SYSTEM_IO_CAPABLE: return extract_io(val, data); @@ -150,6 +164,7 @@ static void do_drv_read(void *_cmd) switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: + case SYSTEM_AMD_MSR_CAPABLE: rdmsr(cmd->addr.msr.reg, cmd->val, h); break; case SYSTEM_IO_CAPABLE: @@ -174,6 +189,9 @@ static void do_drv_write(void *_cmd) lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); wrmsr(cmd->addr.msr.reg, lo, hi); break; + case SYSTEM_AMD_MSR_CAPABLE: + wrmsr(cmd->addr.msr.reg, cmd->val, 0); + break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, cmd->val, @@ -217,6 +235,10 @@ static u32 get_cur_val(const struct cpumask *mask) cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; break; + case SYSTEM_AMD_MSR_CAPABLE: + cmd.type = SYSTEM_AMD_MSR_CAPABLE; + cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; + break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; @@ -326,6 +348,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, cmd.addr.msr.reg = MSR_IA32_PERF_CTL; cmd.val = (u32) perf->states[next_perf_state].control; break; + case SYSTEM_AMD_MSR_CAPABLE: + cmd.type = SYSTEM_AMD_MSR_CAPABLE; + cmd.addr.msr.reg = MSR_AMD_PERF_CTL; + cmd.val = (u32) perf->states[next_perf_state].control; + break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; cmd.addr.io.port = perf->control_register.address; @@ -580,12 +607,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) break; case ACPI_ADR_SPACE_FIXED_HARDWARE: pr_debug("HARDWARE addr space\n"); - if (!check_est_cpu(cpu)) { - result = -ENODEV; - goto err_unreg; + if (check_est_cpu(cpu)) { + data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; + break; } - data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; - break; + if (check_amd_hwpstate_cpu(cpu)) { + data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; + break; + } + result = -ENODEV; + goto err_unreg; default: pr_debug("Unknown addr space %d\n", (u32) (perf->control_register.space_id)); -- cgit v0.10.2 From acd316248205d553594296f1895ba5196b89ffcc Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 4 Sep 2012 08:28:03 +0000 Subject: acpi-cpufreq: Add quirk to disable _PSD usage on all AMD CPUs To workaround some Windows specific behavior, the ACPI _PSD table on AMD desktop boards advertises all cores as dependent, meaning that they all can only use the same P-state. acpi-cpufreq strictly obeys this description, instantiating one CPU only and symlinking the others. But the hardware can have distinct frequencies for each core and powernow-k8 did it that way. So, in order to use the hardware to its full potential and keep the original powernow-k8 behavior, lets override the _PSD table setting on AMD hardware. We use the siblings table, as it matches the current hardware behavior. Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 067a61f..70e7173 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -51,6 +51,8 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); +#define PFX "acpi-cpufreq: " + enum { UNDEFINED_CAPABLE = 0, SYSTEM_INTEL_MSR_CAPABLE, @@ -586,6 +588,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_copy(policy->cpus, cpu_core_mask(cpu)); } + + if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { + cpumask_clear(policy->cpus); + cpumask_set_cpu(cpu, policy->cpus); + cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu)); + policy->shared_type = CPUFREQ_SHARED_TYPE_HW; + pr_info_once(PFX "overriding BIOS provided _PSD data\n"); + } #endif /* capability check */ -- cgit v0.10.2 From 034be8fd645bb77ca623036dc24c790b6cc803e2 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 4 Sep 2012 08:28:04 +0000 Subject: cpufreq: Add warning message to powernow-k8 cpufreq modules are often loaded from init scripts that assume that all recent AMD systems will use powernow-k8. To inform the user of the change of support and ease the transition to acpi-cpufreq, emit a warning message. Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 8d12e37..b36ca1f 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -96,7 +96,8 @@ config X86_POWERNOW_K8 select CPU_FREQ_TABLE depends on ACPI && ACPI_PROCESSOR help - This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. + This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. + Support for K10 and newer processors is now in acpi-cpufreq. To compile this driver as a module, choose M here: the module will be called powernow-k8. diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 33f17c4..e2defb8 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1563,6 +1563,9 @@ static int __cpuinit powernowk8_init(void) if (!x86_match_cpu(powernow_k8_ids)) return -ENODEV; + if (static_cpu_has(X86_FEATURE_HW_PSTATE)) + pr_warn(PFX "support for this CPU is deprecated, use acpi-cpufreq instead.\n"); + for_each_online_cpu(i) { int rc; smp_call_function_single(i, check_supported_cpu, &rc, 1); -- cgit v0.10.2 From a2060958569a8e8e36202c373a55c458ff84856d Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 4 Sep 2012 08:28:05 +0000 Subject: powernow-k8: delay info messages until initialization has succeeded powernow-k8 is quite prematurely crying Hooray and outputs diagnostic messages, although the actual initialization can still fail. Since now we may have acpi-cpufreq already loaded, we move the messages at the end of the init routine to avoid confusing output if the loading of powernow-k8 should not succeed. Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index e2defb8..f1035a9 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1576,9 +1576,6 @@ static int __cpuinit powernowk8_init(void) if (supported_cpus != num_online_cpus()) return -ENODEV; - printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", - num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); - if (boot_cpu_has(X86_FEATURE_CPB)) { cpb_capable = true; @@ -1597,16 +1594,23 @@ static int __cpuinit powernowk8_init(void) struct msr *reg = per_cpu_ptr(msrs, cpu); cpb_enabled |= !(!!(reg->l & BIT(25))); } - - printk(KERN_INFO PFX "Core Performance Boosting: %s.\n", - (cpb_enabled ? "on" : "off")); } rv = cpufreq_register_driver(&cpufreq_amd64_driver); - if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { - unregister_cpu_notifier(&cpb_nb); - msrs_free(msrs); - msrs = NULL; + + if (!rv) + pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", + num_online_nodes(), boot_cpu_data.x86_model_id, + supported_cpus); + + if (boot_cpu_has(X86_FEATURE_CPB)) { + if (rv < 0) { + unregister_cpu_notifier(&cpb_nb); + msrs_free(msrs); + msrs = NULL; + } else + pr_info(PFX "Core Performance Boosting: %s.\n", + (cpb_enabled ? "on" : "off")); } return rv; } -- cgit v0.10.2 From f594065faf4f9067c2283a34619fc0714e79a98d Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 4 Sep 2012 08:28:06 +0000 Subject: ACPI: Add fixups for AMD P-state figures Some AMD systems may round the frequencies in ACPI tables to 100MHz boundaries. We can obtain the real frequencies from MSRs, so add a quirk to fix these frequencies up on AMD systems. Signed-off-by: Matthew Garrett Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1e1f3eb..fbee971 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -248,6 +248,7 @@ #define MSR_IA32_PERF_STATUS 0x00000198 #define MSR_IA32_PERF_CTL 0x00000199 +#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 #define MSR_AMD_PERF_STATUS 0xc0010063 #define MSR_AMD_PERF_CTL 0xc0010062 diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index a093dc1..836bfe0 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -324,6 +324,34 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr) return result; } +#ifdef CONFIG_X86 +/* + * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding + * in their ACPI data. Calculate the real values and fix up the _PSS data. + */ +static void amd_fixup_frequency(struct acpi_processor_px *px, int i) +{ + u32 hi, lo, fid, did; + int index = px->control & 0x00000007; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return; + + if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) + || boot_cpu_data.x86 == 0x11) { + rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); + fid = lo & 0x3f; + did = (lo >> 6) & 7; + if (boot_cpu_data.x86 == 0x10) + px->core_frequency = (100 * (fid + 0x10)) >> did; + else + px->core_frequency = (100 * (fid + 8)) >> did; + } +} +#else +static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {}; +#endif + static int acpi_processor_get_performance_states(struct acpi_processor *pr) { int result = 0; @@ -379,6 +407,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) goto end; } + amd_fixup_frequency(px, i); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", i, -- cgit v0.10.2 From 615b7300717b9ad5c23d1f391843484fe30f6c12 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 4 Sep 2012 08:28:07 +0000 Subject: acpi-cpufreq: Add support for disabling dynamic overclocking One feature present in powernow-k8 that isn't present in acpi-cpufreq is support for enabling or disabling AMD's core performance boost technology. This patch adds support to acpi-cpufreq, but also includes support for Intel's dynamic acceleration. The original boost disabling sysfs file was per CPU, but acted globally. Also the naming (cpb) was at least not intuitive. So lets introduce a single file simply called "boost", which sits once in /sys/devices/system/cpu/cpufreq. This should be the only way of using this feature, so add documentation about the rationale and the usage. A following patch will re-introduce the cpb knob for compatibility reasons on AMD CPUs. Per-CPU boost switching is possible, but not trivial and is thus postponed to a later patch series. Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 5dab364..6943133 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -176,3 +176,14 @@ Description: Disable L3 cache indices All AMD processors with L3 caches provide this functionality. For details, see BKDGs at http://developer.amd.com/documentation/guides/Pages/default.aspx + + +What: /sys/devices/system/cpu/cpufreq/boost +Date: August 2012 +Contact: Linux kernel mailing list +Description: Processor frequency boosting control + + This switch controls the boost setting for the whole system. + Boosting allows the CPU and the firmware to run at a frequency + beyound it's nominal limit. + More details can be found in Documentation/cpu-freq/boost.txt diff --git a/Documentation/cpu-freq/boost.txt b/Documentation/cpu-freq/boost.txt new file mode 100644 index 0000000..9b4edfc --- /dev/null +++ b/Documentation/cpu-freq/boost.txt @@ -0,0 +1,93 @@ +Processor boosting control + + - information for users - + +Quick guide for the impatient: +-------------------- +/sys/devices/system/cpu/cpufreq/boost +controls the boost setting for the whole system. You can read and write +that file with either "0" (boosting disabled) or "1" (boosting allowed). +Reading or writing 1 does not mean that the system is boosting at this +very moment, but only that the CPU _may_ raise the frequency at it's +discretion. +-------------------- + +Introduction +------------- +Some CPUs support a functionality to raise the operating frequency of +some cores in a multi-core package if certain conditions apply, mostly +if the whole chip is not fully utilized and below it's intended thermal +budget. This is done without operating system control by a combination +of hardware and firmware. +On Intel CPUs this is called "Turbo Boost", AMD calls it "Turbo-Core", +in technical documentation "Core performance boost". In Linux we use +the term "boost" for convenience. + +Rationale for disable switch +---------------------------- + +Though the idea is to just give better performance without any user +intervention, sometimes the need arises to disable this functionality. +Most systems offer a switch in the (BIOS) firmware to disable the +functionality at all, but a more fine-grained and dynamic control would +be desirable: +1. While running benchmarks, reproducible results are important. Since + the boosting functionality depends on the load of the whole package, + single thread performance can vary. By explicitly disabling the boost + functionality at least for the benchmark's run-time the system will run + at a fixed frequency and results are reproducible again. +2. To examine the impact of the boosting functionality it is helpful + to do tests with and without boosting. +3. Boosting means overclocking the processor, though under controlled + conditions. By raising the frequency and the voltage the processor + will consume more power than without the boosting, which may be + undesirable for instance for mobile users. Disabling boosting may + save power here, though this depends on the workload. + + +User controlled switch +---------------------- + +To allow the user to toggle the boosting functionality, the acpi-cpufreq +driver exports a sysfs knob to disable it. There is a file: +/sys/devices/system/cpu/cpufreq/boost +which can either read "0" (boosting disabled) or "1" (boosting enabled). +Reading the file is always supported, even if the processor does not +support boosting. In this case the file will be read-only and always +reads as "0". Explicitly changing the permissions and writing to that +file anyway will return EINVAL. + +On supported CPUs one can write either a "0" or a "1" into this file. +This will either disable the boost functionality on all cores in the +whole system (0) or will allow the hardware to boost at will (1). + +Writing a "1" does not explicitly boost the system, but just allows the +CPU (and the firmware) to boost at their discretion. Some implementations +take external factors like the chip's temperature into account, so +boosting once does not necessarily mean that it will occur every time +even using the exact same software setup. + + +AMD legacy cpb switch +--------------------- +The AMD powernow-k8 driver used to support a very similar switch to +disable or enable the "Core Performance Boost" feature of some AMD CPUs. +This switch was instantiated in each CPU's cpufreq directory +(/sys/devices/system/cpu[0-9]*/cpufreq) and was called "cpb". +Though the per CPU existence hints at a more fine grained control, the +actual implementation only supported a system-global switch semantics, +which was simply reflected into each CPU's file. Writing a 0 or 1 into it +would pull the other CPUs to the same state. +For compatibility reasons this file and its behavior is still supported +on AMD CPUs, though it is now protected by a config switch +(X86_ACPI_CPUFREQ_CPB). On Intel CPUs this file will never be created, +even with the config option set. +This functionality is considered legacy and will be removed in some future +kernel version. + +More fine grained boosting control +---------------------------------- + +Technically it is possible to switch the boosting functionality at least +on a per package basis, for some CPUs even per core. Currently the driver +does not support it, but this may be implemented in the future. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 70e7173..dffa7af 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -63,6 +63,8 @@ enum { #define INTEL_MSR_RANGE (0xffff) #define AMD_MSR_RANGE (0x7) +#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) + struct acpi_cpufreq_data { struct acpi_processor_performance *acpi_data; struct cpufreq_frequency_table *freq_table; @@ -78,6 +80,96 @@ static struct acpi_processor_performance __percpu *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver; static unsigned int acpi_pstate_strict; +static bool boost_enabled, boost_supported; +static struct msr __percpu *msrs; + +static bool boost_state(unsigned int cpu) +{ + u32 lo, hi; + u64 msr; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); + msr = lo | ((u64)hi << 32); + return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); + case X86_VENDOR_AMD: + rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); + msr = lo | ((u64)hi << 32); + return !(msr & MSR_K7_HWCR_CPB_DIS); + } + return false; +} + +static void boost_set_msrs(bool enable, const struct cpumask *cpumask) +{ + u32 cpu; + u32 msr_addr; + u64 msr_mask; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + msr_addr = MSR_IA32_MISC_ENABLE; + msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; + break; + case X86_VENDOR_AMD: + msr_addr = MSR_K7_HWCR; + msr_mask = MSR_K7_HWCR_CPB_DIS; + break; + default: + return; + } + + rdmsr_on_cpus(cpumask, msr_addr, msrs); + + for_each_cpu(cpu, cpumask) { + struct msr *reg = per_cpu_ptr(msrs, cpu); + if (enable) + reg->q &= ~msr_mask; + else + reg->q |= msr_mask; + } + + wrmsr_on_cpus(cpumask, msr_addr, msrs); +} + +static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val = 0; + + if (!boost_supported) + return -EINVAL; + + ret = kstrtoul(buf, 10, &val); + if (ret || (val > 1)) + return -EINVAL; + + if ((val && boost_enabled) || (!val && !boost_enabled)) + return count; + + get_online_cpus(); + + boost_set_msrs(val, cpu_online_mask); + + put_online_cpus(); + + boost_enabled = val; + pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); + + return count; +} + +static ssize_t show_global_boost(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", boost_enabled); +} + +static struct global_attr global_boost = __ATTR(boost, 0644, + show_global_boost, + store_global_boost); static int check_est_cpu(unsigned int cpuid) { @@ -448,6 +540,44 @@ static void free_acpi_perf_data(void) free_percpu(acpi_perf_data); } +static int boost_notify(struct notifier_block *nb, unsigned long action, + void *hcpu) +{ + unsigned cpu = (long)hcpu; + const struct cpumask *cpumask; + + cpumask = get_cpu_mask(cpu); + + /* + * Clear the boost-disable bit on the CPU_DOWN path so that + * this cpu cannot block the remaining ones from boosting. On + * the CPU_UP path we simply keep the boost-disable flag in + * sync with the current global state. + */ + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + boost_set_msrs(boost_enabled, cpumask); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + boost_set_msrs(1, cpumask); + break; + + default: + break; + } + + return NOTIFY_OK; +} + + +static struct notifier_block boost_nb = { + .notifier_call = boost_notify, +}; + /* * acpi_cpufreq_early_init - initialize ACPI P-States library * @@ -774,6 +904,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = { .attr = acpi_cpufreq_attr, }; +static void __init acpi_cpufreq_boost_init(void) +{ + if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { + msrs = msrs_alloc(); + + if (!msrs) + return; + + boost_supported = true; + boost_enabled = boost_state(0); + + get_online_cpus(); + + /* Force all MSRs to the same value */ + boost_set_msrs(boost_enabled, cpu_online_mask); + + register_cpu_notifier(&boost_nb); + + put_online_cpus(); + } else + global_boost.attr.mode = 0444; + + /* We create the boost file in any case, though for systems without + * hardware support it will be read-only and hardwired to return 0. + */ + if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) + pr_warn(PFX "could not register global boost sysfs file\n"); + else + pr_debug("registered global boost sysfs file\n"); +} + +static void __exit acpi_cpufreq_boost_exit(void) +{ + sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); + + if (msrs) { + unregister_cpu_notifier(&boost_nb); + + msrs_free(msrs); + msrs = NULL; + } +} + static int __init acpi_cpufreq_init(void) { int ret; @@ -790,6 +963,8 @@ static int __init acpi_cpufreq_init(void) ret = cpufreq_register_driver(&acpi_cpufreq_driver); if (ret) free_acpi_perf_data(); + else + acpi_cpufreq_boost_init(); return ret; } @@ -798,6 +973,8 @@ static void __exit acpi_cpufreq_exit(void) { pr_debug("acpi_cpufreq_exit\n"); + acpi_cpufreq_boost_exit(); + cpufreq_unregister_driver(&acpi_cpufreq_driver); free_acpi_perf_data(); -- cgit v0.10.2 From 11269ff506888a06b19c8c7a3297114f30673973 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 4 Sep 2012 08:28:08 +0000 Subject: acpi-cpufreq: Add compatibility for legacy AMD cpb sysfs knob The powernow-k8 driver supported a sysfs knob called "cpb", which was instantiated per CPU, but actually acted globally for the whole system. To keep some compatibility with this feature, we re-introduce this behavior here, but: a) only enable it on AMD CPUs and b) protect it with a Kconfig switch I'd like to consider this feature obsolete. Lets keep it around for some kernel versions and then phase it out. Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index b36ca1f..934854a 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -33,6 +33,18 @@ config X86_ACPI_CPUFREQ If in doubt, say N. +config X86_ACPI_CPUFREQ_CPB + default y + bool "Legacy cpb sysfs knob support for AMD CPUs" + depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD + help + The powernow-k8 driver used to provide a sysfs knob called "cpb" + to disable the Core Performance Boosting feature of AMD CPUs. This + file has now been superseeded by the more generic "boost" entry. + + By enabling this option the acpi_cpufreq driver provides the old + entry in addition to the new boost ones, for compatibility reasons. + config ELAN_CPUFREQ tristate "AMD Elan SC400 and SC410" select CPU_FREQ_TABLE diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index dffa7af..0d048f6 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -133,8 +133,7 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask) wrmsr_on_cpus(cpumask, msr_addr, msrs); } -static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) +static ssize_t _store_boost(const char *buf, size_t count) { int ret; unsigned long val = 0; @@ -161,6 +160,12 @@ static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, return count; } +static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + return _store_boost(buf, count); +} + static ssize_t show_global_boost(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -171,6 +176,21 @@ static struct global_attr global_boost = __ATTR(boost, 0644, show_global_boost, store_global_boost); +#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB +static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, + size_t count) +{ + return _store_boost(buf, count); +} + +static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", boost_enabled); +} + +static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); +#endif + static int check_est_cpu(unsigned int cpuid) { struct cpuinfo_x86 *cpu = &cpu_data(cpuid); @@ -889,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) static struct freq_attr *acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* this is a placeholder for cpb, do not remove */ NULL, }; @@ -960,6 +981,27 @@ static int __init acpi_cpufreq_init(void) if (ret) return ret; +#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB + /* this is a sysfs file with a strange name and an even stranger + * semantic - per CPU instantiation, but system global effect. + * Lets enable it only on AMD CPUs for compatibility reasons and + * only if configured. This is considered legacy code, which + * will probably be removed at some point in the future. + */ + if (check_amd_hwpstate_cpu(0)) { + struct freq_attr **iter; + + pr_debug("adding sysfs entry for cpb\n"); + + for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) + ; + + /* make sure there is a terminator behind it */ + if (iter[1] == NULL) + *iter = &cpb; + } +#endif + ret = cpufreq_register_driver(&acpi_cpufreq_driver); if (ret) free_acpi_perf_data(); -- cgit v0.10.2 From e1f0b8e9b04a262834ed111e605e5d215685dfab Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 4 Sep 2012 08:28:09 +0000 Subject: cpufreq: Remove support for hardware P-state chips from powernow-k8 These chips are now supported by acpi-cpufreq, so we can delete all the code handling them. Andre: Tighten the deprecation warning message. Trigger load of acpi-cpufreq and let the load of the module finally fail. This avoids the problem of users ending up without any cpufreq support after the transition. Signed-off-by: Matthew Garrett Signed-off-by: Andre Przywara Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 9531fc2..b99790f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -19,7 +19,7 @@ obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o # K8 systems. ACPI is preferred to all other hardware-specific drivers. # speedstep-* is preferred over p4-clockmod. -obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o +obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index f1035a9..0b19faf 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -49,22 +49,12 @@ #define PFX "powernow-k8: " #define VERSION "version 2.20.00" #include "powernow-k8.h" -#include "mperf.h" /* serialize freq changes */ static DEFINE_MUTEX(fidvid_mutex); static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); -static int cpu_family = CPU_OPTERON; - -/* array to map SW pstate number to acpi state */ -static u32 ps_to_as[8]; - -/* core performance boost */ -static bool cpb_capable, cpb_enabled; -static struct msr __percpu *msrs; - static struct cpufreq_driver cpufreq_amd64_driver; #ifndef CONFIG_SMP @@ -86,12 +76,6 @@ static u32 find_khz_freq_from_fid(u32 fid) return 1000 * find_freq_from_fid(fid); } -static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, - u32 pstate) -{ - return data[ps_to_as[pstate]].frequency; -} - /* Return the vco fid for an input fid * * Each "low" fid has corresponding "high" fid, and you can get to "low" fids @@ -114,9 +98,6 @@ static int pending_bit_stuck(void) { u32 lo, hi; - if (cpu_family == CPU_HW_PSTATE) - return 0; - rdmsr(MSR_FIDVID_STATUS, lo, hi); return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; } @@ -130,20 +111,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) u32 lo, hi; u32 i = 0; - if (cpu_family == CPU_HW_PSTATE) { - rdmsr(MSR_PSTATE_STATUS, lo, hi); - i = lo & HW_PSTATE_MASK; - data->currpstate = i; - - /* - * a workaround for family 11h erratum 311 might cause - * an "out-of-range Pstate if the core is in Pstate-0 - */ - if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps)) - data->currpstate = HW_PSTATE_0; - - return 0; - } do { if (i++ > 10000) { pr_debug("detected change pending stuck\n"); @@ -300,14 +267,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data, return 0; } -/* Change hardware pstate by single MSR write */ -static int transition_pstate(struct powernow_k8_data *data, u32 pstate) -{ - wrmsr(MSR_PSTATE_CTRL, pstate, 0); - data->currpstate = pstate; - return 0; -} - /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid) @@ -524,8 +483,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, static const struct x86_cpu_id powernow_k8_ids[] = { /* IO based frequency switching */ { X86_VENDOR_AMD, 0xf }, - /* MSR based frequency switching supported */ - X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), {} }; MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); @@ -561,15 +518,8 @@ static void check_supported_cpu(void *_rc) "Power state transitions not supported\n"); return; } - } else { /* must be a HW Pstate capable processor */ - cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); - if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) - cpu_family = CPU_HW_PSTATE; - else - return; + *rc = 0; } - - *rc = 0; } static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, @@ -633,18 +583,11 @@ static void print_basics(struct powernow_k8_data *data) for (j = 0; j < data->numps; j++) { if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { - if (cpu_family == CPU_HW_PSTATE) { - printk(KERN_INFO PFX - " %d : pstate %d (%d MHz)\n", j, - data->powernow_table[j].index, - data->powernow_table[j].frequency/1000); - } else { printk(KERN_INFO PFX "fid 0x%x (%d MHz), vid 0x%x\n", data->powernow_table[j].index & 0xff, data->powernow_table[j].frequency/1000, data->powernow_table[j].index >> 8); - } } } if (data->batps) @@ -652,20 +595,6 @@ static void print_basics(struct powernow_k8_data *data) data->batps); } -static u32 freq_from_fid_did(u32 fid, u32 did) -{ - u32 mhz = 0; - - if (boot_cpu_data.x86 == 0x10) - mhz = (100 * (fid + 0x10)) >> did; - else if (boot_cpu_data.x86 == 0x11) - mhz = (100 * (fid + 8)) >> did; - else - BUG(); - - return mhz * 1000; -} - static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) { @@ -825,7 +754,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, { u64 control; - if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) + if (!data->acpi_data.state_count) return; control = data->acpi_data.states[index].control; @@ -876,10 +805,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) data->numps = data->acpi_data.state_count; powernow_k8_acpi_pst_values(data, 0); - if (cpu_family == CPU_HW_PSTATE) - ret_val = fill_powernow_table_pstate(data, powernow_table); - else - ret_val = fill_powernow_table_fidvid(data, powernow_table); + ret_val = fill_powernow_table_fidvid(data, powernow_table); if (ret_val) goto err_out_mem; @@ -916,51 +842,6 @@ err_out: return ret_val; } -static int fill_powernow_table_pstate(struct powernow_k8_data *data, - struct cpufreq_frequency_table *powernow_table) -{ - int i; - u32 hi = 0, lo = 0; - rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi); - data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; - - for (i = 0; i < data->acpi_data.state_count; i++) { - u32 index; - - index = data->acpi_data.states[i].control & HW_PSTATE_MASK; - if (index > data->max_hw_pstate) { - printk(KERN_ERR PFX "invalid pstate %d - " - "bad value %d.\n", i, index); - printk(KERN_ERR PFX "Please report to BIOS " - "manufacturer\n"); - invalidate_entry(powernow_table, i); - continue; - } - - ps_to_as[index] = i; - - /* Frequency may be rounded for these */ - if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) - || boot_cpu_data.x86 == 0x11) { - - rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); - if (!(hi & HW_PSTATE_VALID_MASK)) { - pr_debug("invalid pstate %d, ignoring\n", index); - invalidate_entry(powernow_table, i); - continue; - } - - powernow_table[i].frequency = - freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); - } else - powernow_table[i].frequency = - data->acpi_data.states[i].core_frequency * 1000; - - powernow_table[i].index = index; - } - return 0; -} - static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) { @@ -1037,15 +918,7 @@ static int get_transition_latency(struct powernow_k8_data *data) max_latency = cur_latency; } if (max_latency == 0) { - /* - * Fam 11h and later may return 0 as transition latency. This - * is intended and means "very fast". While cpufreq core and - * governors currently can handle that gracefully, better set it - * to 1 to avoid problems in the future. - */ - if (boot_cpu_data.x86 < 0x11) - printk(KERN_ERR FW_WARN PFX "Invalid zero transition " - "latency\n"); + pr_err(FW_WARN PFX "Invalid zero transition latency\n"); max_latency = 1; } /* value in usecs, needs to be in nanoseconds */ @@ -1105,40 +978,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, return res; } -/* Take a frequency, and issue the hardware pstate transition command */ -static int transition_frequency_pstate(struct powernow_k8_data *data, - unsigned int index) -{ - u32 pstate = 0; - int res, i; - struct cpufreq_freqs freqs; - - pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); - - /* get MSR index for hardware pstate transition */ - pstate = index & HW_PSTATE_MASK; - if (pstate > data->max_hw_pstate) - return -EINVAL; - - freqs.old = find_khz_freq_from_pstate(data->powernow_table, - data->currpstate); - freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - - for_each_cpu(i, data->available_cores) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - } - - res = transition_pstate(data, pstate); - freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - - for_each_cpu(i, data->available_cores) { - freqs.cpu = i; - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } - return res; -} - /* Driver entry point to switch to the target frequency */ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) @@ -1180,18 +1019,15 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (query_current_values_with_pending_wait(data)) goto err_out; - if (cpu_family != CPU_HW_PSTATE) { - pr_debug("targ: curr fid 0x%x, vid 0x%x\n", - data->currfid, data->currvid); + pr_debug("targ: curr fid 0x%x, vid 0x%x\n", + data->currfid, data->currvid); - if ((checkvid != data->currvid) || - (checkfid != data->currfid)) { - printk(KERN_INFO PFX - "error - out of sync, fix 0x%x 0x%x, " - "vid 0x%x 0x%x\n", - checkfid, data->currfid, - checkvid, data->currvid); - } + if ((checkvid != data->currvid) || + (checkfid != data->currfid)) { + pr_info(PFX + "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", + checkfid, data->currfid, + checkvid, data->currvid); } if (cpufreq_frequency_table_target(pol, data->powernow_table, @@ -1202,11 +1038,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, powernow_k8_acpi_pst_values(data, newstate); - if (cpu_family == CPU_HW_PSTATE) - ret = transition_frequency_pstate(data, - data->powernow_table[newstate].index); - else - ret = transition_frequency_fidvid(data, newstate); + ret = transition_frequency_fidvid(data, newstate); + if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); ret = 1; @@ -1215,11 +1048,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, } mutex_unlock(&fidvid_mutex); - if (cpu_family == CPU_HW_PSTATE) - pol->cur = find_khz_freq_from_pstate(data->powernow_table, - data->powernow_table[newstate].index); - else - pol->cur = find_khz_freq_from_fid(data->currfid); + pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; err_out: @@ -1259,8 +1088,7 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) return; } - if (cpu_family == CPU_OPTERON) - fidvid_msr_init(); + fidvid_msr_init(); init_on_cpu->rc = 0; } @@ -1277,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) struct powernow_k8_data *data; struct init_on_cpu init_on_cpu; int rc; - struct cpuinfo_x86 *c = &cpu_data(pol->cpu); if (!cpu_online(pol->cpu)) return -ENODEV; @@ -1293,7 +1120,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) } data->cpu = pol->cpu; - data->currpstate = HW_PSTATE_INVALID; if (powernow_k8_cpu_init_acpi(data)) { /* @@ -1330,17 +1156,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) if (rc != 0) goto err_out_exit_acpi; - if (cpu_family == CPU_HW_PSTATE) - cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); - else - cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); + cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); data->available_cores = pol->cpus; - if (cpu_family == CPU_HW_PSTATE) - pol->cur = find_khz_freq_from_pstate(data->powernow_table, - data->currpstate); - else - pol->cur = find_khz_freq_from_fid(data->currfid); + pol->cur = find_khz_freq_from_fid(data->currfid); pr_debug("policy current frequency %d kHz\n", pol->cur); /* min/max the cpu is capable of */ @@ -1352,18 +1171,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) return -EINVAL; } - /* Check for APERF/MPERF support in hardware */ - if (cpu_has(c, X86_FEATURE_APERFMPERF)) - cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf; - cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); - if (cpu_family == CPU_HW_PSTATE) - pr_debug("cpu_init done, current pstate 0x%x\n", - data->currpstate); - else - pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", - data->currfid, data->currvid); + pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", + data->currfid, data->currvid); per_cpu(powernow_data, pol->cpu) = data; @@ -1416,88 +1227,15 @@ static unsigned int powernowk8_get(unsigned int cpu) if (err) goto out; - if (cpu_family == CPU_HW_PSTATE) - khz = find_khz_freq_from_pstate(data->powernow_table, - data->currpstate); - else - khz = find_khz_freq_from_fid(data->currfid); + khz = find_khz_freq_from_fid(data->currfid); out: return khz; } -static void _cpb_toggle_msrs(bool t) -{ - int cpu; - - get_online_cpus(); - - rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); - - for_each_cpu(cpu, cpu_online_mask) { - struct msr *reg = per_cpu_ptr(msrs, cpu); - if (t) - reg->l &= ~BIT(25); - else - reg->l |= BIT(25); - } - wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); - - put_online_cpus(); -} - -/* - * Switch on/off core performance boosting. - * - * 0=disable - * 1=enable. - */ -static void cpb_toggle(bool t) -{ - if (!cpb_capable) - return; - - if (t && !cpb_enabled) { - cpb_enabled = true; - _cpb_toggle_msrs(t); - printk(KERN_INFO PFX "Core Boosting enabled.\n"); - } else if (!t && cpb_enabled) { - cpb_enabled = false; - _cpb_toggle_msrs(t); - printk(KERN_INFO PFX "Core Boosting disabled.\n"); - } -} - -static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, - size_t count) -{ - int ret = -EINVAL; - unsigned long val = 0; - - ret = strict_strtoul(buf, 10, &val); - if (!ret && (val == 0 || val == 1) && cpb_capable) - cpb_toggle(val); - else - return -EINVAL; - - return count; -} - -static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", cpb_enabled); -} - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(cpb); - static struct freq_attr *powernow_k8_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, - &cpb, NULL, }; @@ -1513,58 +1251,20 @@ static struct cpufreq_driver cpufreq_amd64_driver = { .attr = powernow_k8_attr, }; -/* - * Clear the boost-disable flag on the CPU_DOWN path so that this cpu - * cannot block the remaining ones from boosting. On the CPU_UP path we - * simply keep the boost-disable flag in sync with the current global - * state. - */ -static int cpb_notify(struct notifier_block *nb, unsigned long action, - void *hcpu) -{ - unsigned cpu = (long)hcpu; - u32 lo, hi; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - - if (!cpb_enabled) { - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); - lo |= BIT(25); - wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); - } - break; - - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); - lo &= ~BIT(25); - wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); - break; - - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block cpb_nb = { - .notifier_call = cpb_notify, -}; - /* driver entry point for init */ static int __cpuinit powernowk8_init(void) { - unsigned int i, supported_cpus = 0, cpu; + unsigned int i, supported_cpus = 0; int rv; - if (!x86_match_cpu(powernow_k8_ids)) + if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { + pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n"); + request_module("acpi-cpufreq"); return -ENODEV; + } - if (static_cpu_has(X86_FEATURE_HW_PSTATE)) - pr_warn(PFX "support for this CPU is deprecated, use acpi-cpufreq instead.\n"); + if (!x86_match_cpu(powernow_k8_ids)) + return -ENODEV; for_each_online_cpu(i) { int rc; @@ -1576,26 +1276,6 @@ static int __cpuinit powernowk8_init(void) if (supported_cpus != num_online_cpus()) return -ENODEV; - if (boot_cpu_has(X86_FEATURE_CPB)) { - - cpb_capable = true; - - msrs = msrs_alloc(); - if (!msrs) { - printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); - return -ENOMEM; - } - - register_cpu_notifier(&cpb_nb); - - rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); - - for_each_cpu(cpu, cpu_online_mask) { - struct msr *reg = per_cpu_ptr(msrs, cpu); - cpb_enabled |= !(!!(reg->l & BIT(25))); - } - } - rv = cpufreq_register_driver(&cpufreq_amd64_driver); if (!rv) @@ -1603,15 +1283,6 @@ static int __cpuinit powernowk8_init(void) num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); - if (boot_cpu_has(X86_FEATURE_CPB)) { - if (rv < 0) { - unregister_cpu_notifier(&cpb_nb); - msrs_free(msrs); - msrs = NULL; - } else - pr_info(PFX "Core Performance Boosting: %s.\n", - (cpb_enabled ? "on" : "off")); - } return rv; } @@ -1620,13 +1291,6 @@ static void __exit powernowk8_exit(void) { pr_debug("exit\n"); - if (boot_cpu_has(X86_FEATURE_CPB)) { - msrs_free(msrs); - msrs = NULL; - - unregister_cpu_notifier(&cpb_nb); - } - cpufreq_unregister_driver(&cpufreq_amd64_driver); } diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h index 3744d26..79329d4 100644 --- a/drivers/cpufreq/powernow-k8.h +++ b/drivers/cpufreq/powernow-k8.h @@ -5,24 +5,11 @@ * http://www.gnu.org/licenses/gpl.html */ -enum pstate { - HW_PSTATE_INVALID = 0xff, - HW_PSTATE_0 = 0, - HW_PSTATE_1 = 1, - HW_PSTATE_2 = 2, - HW_PSTATE_3 = 3, - HW_PSTATE_4 = 4, - HW_PSTATE_5 = 5, - HW_PSTATE_6 = 6, - HW_PSTATE_7 = 7, -}; - struct powernow_k8_data { unsigned int cpu; u32 numps; /* number of p-states */ u32 batps; /* number of p-states supported on battery */ - u32 max_hw_pstate; /* maximum legal hardware pstate */ /* these values are constant when the PSB is used to determine * vid/fid pairings, but are modified during the ->target() call @@ -37,7 +24,6 @@ struct powernow_k8_data { /* keep track of the current fid / vid or pstate */ u32 currvid; u32 currfid; - enum pstate currpstate; /* the powernow_table includes all frequency and vid/fid pairings: * fid are the lower 8 bits of the index, vid are the upper 8 bits. @@ -97,23 +83,6 @@ struct powernow_k8_data { #define MSR_S_HI_CURRENT_VID 0x0000003f #define MSR_C_HI_STP_GNT_BENIGN 0x00000001 - -/* Hardware Pstate _PSS and MSR definitions */ -#define USE_HW_PSTATE 0x00000080 -#define HW_PSTATE_MASK 0x00000007 -#define HW_PSTATE_VALID_MASK 0x80000000 -#define HW_PSTATE_MAX_MASK 0x000000f0 -#define HW_PSTATE_MAX_SHIFT 4 -#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ -#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ -#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ -#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ - -/* define the two driver architectures */ -#define CPU_OPTERON 0 -#define CPU_HW_PSTATE 1 - - /* * There are restrictions frequencies have to follow: * - only 1 entry in the low fid table ( <=1.4GHz ) @@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); -static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); -- cgit v0.10.2 From ec971ea5f2426a0bf9d5cca9a103743918c12978 Mon Sep 17 00:00:00 2001 From: Richard Zhao Date: Wed, 5 Sep 2012 01:08:59 +0200 Subject: ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp If CONFIG_SMP, cpufreq skips loops_per_jiffy update, because different arch has different per-cpu loops_per_jiffy definition. Signed-off-by: Richard Zhao Acked-by: Russell King Acked-by: Santosh Shilimkar Signed-off-by: Shawn Guo Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad2..8e03567 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -584,3 +585,56 @@ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, l_p_j_ref); +static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); +static unsigned long global_l_p_j_ref; +static unsigned long global_l_p_j_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu = freq->cpu; + + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + if (!per_cpu(l_p_j_ref, cpu)) { + per_cpu(l_p_j_ref, cpu) = + per_cpu(cpu_data, cpu).loops_per_jiffy; + per_cpu(l_p_j_ref_freq, cpu) = freq->old; + if (!global_l_p_j_ref) { + global_l_p_j_ref = loops_per_jiffy; + global_l_p_j_ref_freq = freq->old; + } + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || + (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, + global_l_p_j_ref_freq, + freq->new); + per_cpu(cpu_data, cpu).loops_per_jiffy = + cpufreq_scale(per_cpu(l_p_j_ref, cpu), + per_cpu(l_p_j_ref_freq, cpu), + freq->new); + } + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif -- cgit v0.10.2 From b496dfbc94ab86f970ef0167eaabe51f930aa5fb Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Wed, 5 Sep 2012 01:09:12 +0200 Subject: PM / OPP: Initialize OPP table from device tree With a lot of devices booting from device tree nowadays, it requires that OPP table can be initialized from device tree. The patch adds a helper function of_init_opp_table together with a binding doc for that purpose. Signed-off-by: Shawn Guo Acked-by: Rob Herring Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt new file mode 100644 index 0000000..74499e5 --- /dev/null +++ b/Documentation/devicetree/bindings/power/opp.txt @@ -0,0 +1,25 @@ +* Generic OPP Interface + +SoCs have a standard set of tuples consisting of frequency and +voltage pairs that the device will support per voltage domain. These +are called Operating Performance Points or OPPs. + +Properties: +- operating-points: An array of 2-tuples items, and each item consists + of frequency and voltage like . + freq: clock frequency in kHz + vol: voltage in microvolt + +Examples: + +cpu@0 { + compatible = "arm,cortex-a9"; + reg = <0>; + next-level-cache = <&L2>; + operating-points = < + /* kHz uV */ + 792000 1100000 + 396000 950000 + 198000 850000 + >; +}; diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index ac993ea..d946864 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -22,6 +22,7 @@ #include #include #include +#include /* * Internal data structure organization with the OPP layer library is as @@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev) return &dev_opp->head; } + +#ifdef CONFIG_OF +/** + * of_init_opp_table() - Initialize opp table from device tree + * @dev: device pointer used to lookup device OPPs. + * + * Register the initial OPP table with the OPP library for given device. + */ +int of_init_opp_table(struct device *dev) +{ + const struct property *prop; + const __be32 *val; + int nr; + + prop = of_find_property(dev->of_node, "operating-points", NULL); + if (!prop) + return -ENODEV; + if (!prop->value) + return -ENODATA; + + /* + * Each OPP is a set of tuples consisting of frequency and + * voltage like . + */ + nr = prop->length / sizeof(u32); + if (nr % 2) { + dev_err(dev, "%s: Invalid OPP list\n", __func__); + return -EINVAL; + } + + val = prop->value; + while (nr) { + unsigned long freq = be32_to_cpup(val++) * 1000; + unsigned long volt = be32_to_cpup(val++); + + if (opp_add(dev, freq, volt)) { + dev_warn(dev, "%s: Failed to add OPP %ld\n", + __func__, freq); + continue; + } + nr -= 2; + } + + return 0; +} +#endif diff --git a/include/linux/opp.h b/include/linux/opp.h index 2a4e5fa..214e0ebc 100644 --- a/include/linux/opp.h +++ b/include/linux/opp.h @@ -48,6 +48,14 @@ int opp_disable(struct device *dev, unsigned long freq); struct srcu_notifier_head *opp_get_notifier(struct device *dev); +#ifdef CONFIG_OF +int of_init_opp_table(struct device *dev); +#else +static inline int of_init_opp_table(struct device *dev) +{ + return -EINVAL; +} +#endif /* CONFIG_OF */ #else static inline unsigned long opp_get_voltage(struct opp *opp) { -- cgit v0.10.2 From 95ceafd46359dfd901f9d3b881b33d3036e4b0ce Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 6 Sep 2012 07:09:11 +0000 Subject: cpufreq: Add a generic cpufreq-cpu0 driver It adds a generic cpufreq driver for CPU0 frequency management based on clk, regulator, OPP and device tree support. It can support both uniprocessor (UP) and those symmetric multiprocessor (SMP) systems which share clock and voltage across all CPUs. Signed-off-by: Shawn Guo Acked-by: Santosh Shilimkar Tested-by: AnilKumar Ch Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt new file mode 100644 index 0000000..4416ccc --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt @@ -0,0 +1,55 @@ +Generic CPU0 cpufreq driver + +It is a generic cpufreq driver for CPU0 frequency management. It +supports both uniprocessor (UP) and symmetric multiprocessor (SMP) +systems which share clock and voltage across all CPUs. + +Both required and optional properties listed below must be defined +under node /cpus/cpu@0. + +Required properties: +- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt + for details + +Optional properties: +- clock-latency: Specify the possible maximum transition latency for clock, + in unit of nanoseconds. +- voltage-tolerance: Specify the CPU voltage tolerance in percentage. + +Examples: + +cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + compatible = "arm,cortex-a9"; + reg = <0>; + next-level-cache = <&L2>; + operating-points = < + /* kHz uV */ + 792000 1100000 + 396000 950000 + 198000 850000 + >; + transition-latency = <61036>; /* two CLK32 periods */ + }; + + cpu@1 { + compatible = "arm,cortex-a9"; + reg = <1>; + next-level-cache = <&L2>; + }; + + cpu@2 { + compatible = "arm,cortex-a9"; + reg = <2>; + next-level-cache = <&L2>; + }; + + cpu@3 { + compatible = "arm,cortex-a9"; + reg = <3>; + next-level-cache = <&L2>; + }; +}; diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index e24a2a1..ea512f4 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. +config GENERIC_CPUFREQ_CPU0 + bool "Generic CPU0 cpufreq driver" + depends on HAVE_CLK && REGULATOR && PM_OPP && OF + select CPU_FREQ_TABLE + help + This adds a generic cpufreq driver for CPU0 frequency management. + It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) + systems which share clock and voltage across all CPUs. + + If in doubt, say N. + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b99790f..1bc90e1 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -13,6 +13,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o +obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o + ################################################################################## # x86 drivers. # Link order matters. K8 is preferred to ACPI because of firmware bugs in early diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c new file mode 100644 index 0000000..e915827 --- /dev/null +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * The OPP code in function cpu0_set_target() is reused from + * drivers/cpufreq/omap-cpufreq.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int transition_latency; +static unsigned int voltage_tolerance; /* in percentage */ + +static struct device *cpu_dev; +static struct clk *cpu_clk; +static struct regulator *cpu_reg; +static struct cpufreq_frequency_table *freq_table; + +static int cpu0_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static unsigned int cpu0_get_speed(unsigned int cpu) +{ + return clk_get_rate(cpu_clk) / 1000; +} + +static int cpu0_set_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + struct cpufreq_freqs freqs; + struct opp *opp; + unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0; + unsigned int index, cpu; + int ret; + + ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, + relation, &index); + if (ret) { + pr_err("failed to match target freqency %d: %d\n", + target_freq, ret); + return ret; + } + + freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); + if (freq_Hz < 0) + freq_Hz = freq_table[index].frequency * 1000; + freqs.new = freq_Hz / 1000; + freqs.old = clk_get_rate(cpu_clk) / 1000; + + if (freqs.old == freqs.new) + return 0; + + for_each_online_cpu(cpu) { + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + if (cpu_reg) { + opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); + if (IS_ERR(opp)) { + pr_err("failed to find OPP for %ld\n", freq_Hz); + return PTR_ERR(opp); + } + volt = opp_get_voltage(opp); + tol = volt * voltage_tolerance / 100; + volt_old = regulator_get_voltage(cpu_reg); + } + + pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", + freqs.old / 1000, volt_old ? volt_old / 1000 : -1, + freqs.new / 1000, volt ? volt / 1000 : -1); + + /* scaling up? scale voltage before frequency */ + if (cpu_reg && freqs.new > freqs.old) { + ret = regulator_set_voltage_tol(cpu_reg, volt, tol); + if (ret) { + pr_err("failed to scale voltage up: %d\n", ret); + freqs.new = freqs.old; + return ret; + } + } + + ret = clk_set_rate(cpu_clk, freqs.new * 1000); + if (ret) { + pr_err("failed to set clock rate: %d\n", ret); + if (cpu_reg) + regulator_set_voltage_tol(cpu_reg, volt_old, tol); + return ret; + } + + /* scaling down? scale voltage after frequency */ + if (cpu_reg && freqs.new < freqs.old) { + ret = regulator_set_voltage_tol(cpu_reg, volt, tol); + if (ret) { + pr_err("failed to scale voltage down: %d\n", ret); + clk_set_rate(cpu_clk, freqs.old * 1000); + freqs.new = freqs.old; + return ret; + } + } + + for_each_online_cpu(cpu) { + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return 0; +} + +static int cpu0_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret; + + if (policy->cpu != 0) + return -EINVAL; + + ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (ret) { + pr_err("invalid frequency table: %d\n", ret); + return ret; + } + + policy->cpuinfo.transition_latency = transition_latency; + policy->cur = clk_get_rate(cpu_clk) / 1000; + + /* + * The driver only supports the SMP configuartion where all processors + * share the clock and voltage and clock. Use cpufreq affected_cpus + * interface to have all CPUs scaled together. + */ + policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; + cpumask_setall(policy->cpus); + + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + + return 0; +} + +static int cpu0_cpufreq_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + + return 0; +} + +static struct freq_attr *cpu0_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver cpu0_cpufreq_driver = { + .flags = CPUFREQ_STICKY, + .verify = cpu0_verify_speed, + .target = cpu0_set_target, + .get = cpu0_get_speed, + .init = cpu0_cpufreq_init, + .exit = cpu0_cpufreq_exit, + .name = "generic_cpu0", + .attr = cpu0_cpufreq_attr, +}; + +static int __devinit cpu0_cpufreq_driver_init(void) +{ + struct device_node *np; + int ret; + + np = of_find_node_by_path("/cpus/cpu@0"); + if (!np) { + pr_err("failed to find cpu0 node\n"); + return -ENOENT; + } + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + pr_err("failed to get cpu0 device\n"); + ret = -ENODEV; + goto out_put_node; + } + + cpu_dev->of_node = np; + + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) { + ret = PTR_ERR(cpu_clk); + pr_err("failed to get cpu0 clock: %d\n", ret); + goto out_put_node; + } + + cpu_reg = regulator_get(cpu_dev, "cpu0"); + if (IS_ERR(cpu_reg)) { + pr_warn("failed to get cpu0 regulator\n"); + cpu_reg = NULL; + } + + ret = of_init_opp_table(cpu_dev); + if (ret) { + pr_err("failed to init OPP table: %d\n", ret); + goto out_put_node; + } + + ret = opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + pr_err("failed to init cpufreq table: %d\n", ret); + goto out_put_node; + } + + of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); + + if (of_property_read_u32(np, "clock-latency", &transition_latency)) + transition_latency = CPUFREQ_ETERNAL; + + if (cpu_reg) { + struct opp *opp; + unsigned long min_uV, max_uV; + int i; + + /* + * OPP is maintained in order of increasing frequency, and + * freq_table initialised from OPP is therefore sorted in the + * same order. + */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) + ; + opp = opp_find_freq_exact(cpu_dev, + freq_table[0].frequency * 1000, true); + min_uV = opp_get_voltage(opp); + opp = opp_find_freq_exact(cpu_dev, + freq_table[i-1].frequency * 1000, true); + max_uV = opp_get_voltage(opp); + ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); + if (ret > 0) + transition_latency += ret * 1000; + } + + ret = cpufreq_register_driver(&cpu0_cpufreq_driver); + if (ret) { + pr_err("failed register driver: %d\n", ret); + goto out_free_table; + } + + of_node_put(np); + return 0; + +out_free_table: + opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_node: + of_node_put(np); + return ret; +} +late_initcall(cpu0_cpufreq_driver_init); + +MODULE_AUTHOR("Shawn Guo "); +MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From bbdc18a3fb6740619f0d037241c85dc6cd4517aa Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 10 Sep 2012 12:05:18 +0000 Subject: properly __init-annotate pm_sysrq_init() This is used only as argument to subsys_initcall(). Signed-off-by: Jan Beulich Signed-off-by: Rafael J. Wysocki diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index d523593..68197a4 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = { .enable_mask = SYSRQ_ENABLE_BOOT, }; -static int pm_sysrq_init(void) +static int __init pm_sysrq_init(void) { register_sysrq_key('o', &sysrq_poweroff_op); return 0; -- cgit v0.10.2 From f26365179d13c18591539d0518aa9de568c70ee0 Mon Sep 17 00:00:00 2001 From: Michal Pecio Date: Fri, 14 Sep 2012 21:07:39 +0200 Subject: cpufreq / ondemand: update frequency when limits are relaxed Reevaluate CPU load and update frequency immediately whenever limits are changed. Currently ondemand doesn't do that when limits are relaxed, wasting power on systems with relatively low sampling rate. Signed-off-by: Michal Pecio Reviewed-by: Thomas Renninger Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 836e9b0..9479fb3 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); + dbs_check_cpu(this_dbs_info); mutex_unlock(&this_dbs_info->timer_mutex); break; } -- cgit v0.10.2 From 2d8fced75cfa8c513ba1cbe682cb30207d941f2b Mon Sep 17 00:00:00 2001 From: Michal Pecio Date: Fri, 14 Sep 2012 21:07:48 +0200 Subject: cpufreq: conservative: update frequency when limits are relaxed Reevaluate CPU load and update frequency immediately whenever limits are changed. Currently conservative doesn't do that when limits are relaxed, wasting power on systems with relatively low sampling rate. Signed-off-by: Michal Pecio Reviewed-by: Thomas Renninger Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a1563d7..b75dc2c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -584,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, __cpufreq_driver_target( this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); + dbs_check_cpu(this_dbs_info); mutex_unlock(&this_dbs_info->timer_mutex); break; -- cgit v0.10.2 From 21ce35dc79346eb206f3271f3cc07fb9a5452ae7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 14 Sep 2012 21:10:46 +0200 Subject: sections: fix section conflicts in drivers/cpufreq Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h index cbf48fb..e2dc436 100644 --- a/drivers/cpufreq/longhaul.h +++ b/drivers/cpufreq/longhaul.h @@ -56,7 +56,7 @@ union msr_longhaul { /* * VIA C3 Samuel 1 & Samuel 2 (stepping 0) */ -static const int __cpuinitdata samuel1_mults[16] = { +static const int __cpuinitconst samuel1_mults[16] = { -1, /* 0000 -> RESERVED */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = { -1, /* 1111 -> RESERVED */ }; -static const int __cpuinitdata samuel1_eblcr[16] = { +static const int __cpuinitconst samuel1_eblcr[16] = { 50, /* 0000 -> RESERVED */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = { /* * VIA C3 Samuel2 Stepping 1->15 */ -static const int __cpuinitdata samuel2_eblcr[16] = { +static const int __cpuinitconst samuel2_eblcr[16] = { 50, /* 0000 -> 5.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = { /* * VIA C3 Ezra */ -static const int __cpuinitdata ezra_mults[16] = { +static const int __cpuinitconst ezra_mults[16] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = { 120, /* 1111 -> 12.0x */ }; -static const int __cpuinitdata ezra_eblcr[16] = { +static const int __cpuinitconst ezra_eblcr[16] = { 50, /* 0000 -> 5.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = { /* * VIA C3 (Ezra-T) [C5M]. */ -static const int __cpuinitdata ezrat_mults[32] = { +static const int __cpuinitconst ezrat_mults[32] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = { -1, /* 1111 -> RESERVED (12.0x) */ }; -static const int __cpuinitdata ezrat_eblcr[32] = { +static const int __cpuinitconst ezrat_eblcr[32] = { 50, /* 0000 -> 5.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = { /* * VIA C3 Nehemiah */ -static const int __cpuinitdata nehemiah_mults[32] = { +static const int __cpuinitconst nehemiah_mults[32] = { 100, /* 0000 -> 10.0x */ -1, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ @@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = { -1, /* 1111 -> 12.0x */ }; -static const int __cpuinitdata nehemiah_eblcr[32] = { +static const int __cpuinitconst nehemiah_eblcr[32] = { 50, /* 0000 -> 5.0x */ 160, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ @@ -315,7 +315,7 @@ struct mV_pos { unsigned short pos; }; -static const struct mV_pos __cpuinitdata vrm85_mV[32] = { +static const struct mV_pos __cpuinitconst vrm85_mV[32] = { {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, @@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = { {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} }; -static const unsigned char __cpuinitdata mV_vrm85[32] = { +static const unsigned char __cpuinitconst mV_vrm85[32] = { 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 }; -static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { +static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = { {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, @@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { {675, 3}, {650, 2}, {625, 1}, {600, 0} }; -static const unsigned char __cpuinitdata mV_mobilevrm[32] = { +static const unsigned char __cpuinitconst mV_mobilevrm[32] = { 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, -- cgit v0.10.2 From cd664cc3a574b30988476143c1dcc9298b1fa531 Mon Sep 17 00:00:00 2001 From: Richard Zhao Date: Fri, 14 Sep 2012 21:15:05 +0200 Subject: cpufreq: OMAP: remove loops_per_jiffy recalculate for smp With ARM smp common code recalculating loops_per_jiffy in a cpufreq transiton notifier call, the loops_per_jiffy recalculate in omap-cpufreq driver becomes redundant. Remove it. Signed-off-by: Richard Zhao Acked-by: Santosh Shilimkar Signed-off-by: Shawn Guo Acked-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index b47034e..6e22f44 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -40,16 +40,6 @@ /* OPP tolerance in percentage */ #define OPP_TOLERANCE 4 -#ifdef CONFIG_SMP -struct lpj_info { - unsigned long ref; - unsigned int freq; -}; - -static DEFINE_PER_CPU(struct lpj_info, lpj_ref); -static struct lpj_info global_lpj_ref; -#endif - static struct cpufreq_frequency_table *freq_table; static atomic_t freq_table_users = ATOMIC_INIT(0); static struct clk *mpu_clk; @@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy, } freqs.new = omap_getspeed(policy->cpu); -#ifdef CONFIG_SMP - /* - * Note that loops_per_jiffy is not updated on SMP systems in - * cpufreq driver. So, update the per-CPU loops_per_jiffy value - * on frequency transition. We need to update all dependent CPUs. - */ - for_each_cpu(i, policy->cpus) { - struct lpj_info *lpj = &per_cpu(lpj_ref, i); - if (!lpj->freq) { - lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy; - lpj->freq = freqs.old; - } - - per_cpu(cpu_data, i).loops_per_jiffy = - cpufreq_scale(lpj->ref, lpj->freq, freqs.new); - } - - /* And don't forget to adjust the global one */ - if (!global_lpj_ref.freq) { - global_lpj_ref.ref = loops_per_jiffy; - global_lpj_ref.freq = freqs.old; - } - loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq, - freqs.new); -#endif done: /* notifiers */ -- cgit v0.10.2 From 38a991b625ae3898f18149f8fa287338647a4c9f Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Sat, 15 Sep 2012 22:42:54 +0200 Subject: ACPI / processor: remove unused function parameter The 'device' parameter is not used neither in acpi_processor_power_init and acpi_processor_power_exit. This patch removes it. Signed-off-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bfc31cb..9c9288b 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -475,7 +475,7 @@ static __ref int acpi_processor_start(struct acpi_processor *pr) acpi_processor_get_limit_info(pr); if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) - acpi_processor_power_init(pr, device); + acpi_processor_power_init(pr); pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); @@ -509,7 +509,7 @@ err_remove_sysfs_thermal: err_thermal_unregister: thermal_cooling_device_unregister(pr->cdev); err_power_exit: - acpi_processor_power_exit(pr, device); + acpi_processor_power_exit(pr); return result; } @@ -620,7 +620,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type) return -EINVAL; } - acpi_processor_power_exit(pr, device); + acpi_processor_power_exit(pr); sysfs_remove_link(&device->dev.kobj, "sysdev"); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index de89624..c46a44a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1216,8 +1216,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) static int acpi_processor_registered; -int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, - struct acpi_device *device) +int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) { acpi_status status = 0; int retval; @@ -1281,8 +1280,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, return 0; } -int acpi_processor_power_exit(struct acpi_processor *pr, - struct acpi_device *device) +int acpi_processor_power_exit(struct acpi_processor *pr) { if (disabled_by_idle_boot_param()) return 0; diff --git a/include/acpi/processor.h b/include/acpi/processor.h index db427fa..1d3c1a6 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -324,12 +324,10 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, extern const struct file_operations acpi_processor_throttling_fops; extern void acpi_processor_throttling_init(void); /* in processor_idle.c */ -int acpi_processor_power_init(struct acpi_processor *pr, - struct acpi_device *device); +int acpi_processor_power_init(struct acpi_processor *pr); +int acpi_processor_power_exit(struct acpi_processor *pr); int acpi_processor_cst_has_changed(struct acpi_processor *pr); int acpi_processor_hotplug(struct acpi_processor *pr); -int acpi_processor_power_exit(struct acpi_processor *pr, - struct acpi_device *device); int acpi_processor_suspend(struct device *dev); int acpi_processor_resume(struct device *dev); extern struct cpuidle_driver acpi_idle_driver; -- cgit v0.10.2 From ed1511b80c92b1b1a8dde567adc090e470a4344a Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Sat, 15 Sep 2012 22:43:05 +0200 Subject: ACPI / processor: remove pointless variable initialization The 'errata' variable is a global variable which is set to zero, no need to do that with a memset in the init function. Signed-off-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 9c9288b..e78c2a5 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -905,8 +905,6 @@ static int __init acpi_processor_init(void) if (acpi_disabled) return 0; - memset(&errata, 0, sizeof(errata)); - result = acpi_bus_register_driver(&acpi_processor_driver); if (result < 0) return result; -- cgit v0.10.2 From 3d339dcbb56d8d70c1b959aff87d74adc3a84eea Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 17 Sep 2012 23:01:56 +0200 Subject: cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure Currently we have the cpuidle_device field in the acpi_processor_power structure. This adds a dependency between processor.h and cpuidle.h Although it is not a real problem, removing this dependency has the benefit of separating a bit more the cpuidle code from the rest of the acpi code. Also, the compilation should be a bit improved because we do no longer include cpuidle.h in processor.h. The preprocessor was generating 30418 loc and with this patch it generates 30256 loc for processor_thermal.c, a file which is not concerned at all by cpuidle, like processor_perflib.c and processor_throttling.c. That may sound ridiculous, but "small streams make big rivers" :P This patch moves this field into a static global per cpu variable like what is done in the intel_idle driver. Signed-off-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c46a44a..3655ab9 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -79,6 +79,8 @@ module_param(bm_check_disable, uint, 0000); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); +static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); + static int disabled_by_idle_boot_param(void) { return boot_option_idle_override == IDLE_POLL || @@ -998,7 +1000,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; struct cpuidle_state_usage *state_usage; - struct cpuidle_device *dev = &pr->power.dev; + struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); if (!pr->flags.power_setup_done) return -EINVAL; @@ -1130,6 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) int acpi_processor_hotplug(struct acpi_processor *pr) { int ret = 0; + struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); if (disabled_by_idle_boot_param()) return 0; @@ -1145,11 +1148,11 @@ int acpi_processor_hotplug(struct acpi_processor *pr) return -ENODEV; cpuidle_pause_and_lock(); - cpuidle_disable_device(&pr->power.dev); + cpuidle_disable_device(dev); acpi_processor_get_power_info(pr); if (pr->flags.power) { acpi_processor_setup_cpuidle_cx(pr); - ret = cpuidle_enable_device(&pr->power.dev); + ret = cpuidle_enable_device(dev); } cpuidle_resume_and_unlock(); @@ -1160,6 +1163,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) { int cpu; struct acpi_processor *_pr; + struct cpuidle_device *dev; if (disabled_by_idle_boot_param()) return 0; @@ -1190,7 +1194,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) _pr = per_cpu(processors, cpu); if (!_pr || !_pr->flags.power_setup_done) continue; - cpuidle_disable_device(&_pr->power.dev); + dev = per_cpu(acpi_cpuidle_device, cpu); + cpuidle_disable_device(dev); } /* Populate Updated C-state information */ @@ -1204,7 +1209,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) acpi_processor_get_power_info(_pr); if (_pr->flags.power) { acpi_processor_setup_cpuidle_cx(_pr); - cpuidle_enable_device(&_pr->power.dev); + dev = per_cpu(acpi_cpuidle_device, cpu); + cpuidle_enable_device(dev); } } put_online_cpus(); @@ -1220,6 +1226,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) { acpi_status status = 0; int retval; + struct cpuidle_device *dev; static int first_run; if (disabled_by_idle_boot_param()) @@ -1265,11 +1272,18 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", acpi_idle_driver.name); } + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + per_cpu(acpi_cpuidle_device, pr->id) = dev; + + acpi_processor_setup_cpuidle_cx(pr); + /* Register per-cpu cpuidle_device. Cpuidle driver * must already be registered before registering device */ - acpi_processor_setup_cpuidle_cx(pr); - retval = cpuidle_register_device(&pr->power.dev); + retval = cpuidle_register_device(dev); if (retval) { if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); @@ -1282,11 +1296,13 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) int acpi_processor_power_exit(struct acpi_processor *pr) { + struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); + if (disabled_by_idle_boot_param()) return 0; if (pr->flags.power) { - cpuidle_unregister_device(&pr->power.dev); + cpuidle_unregister_device(dev); acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 1d3c1a6..555d033 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -3,7 +3,6 @@ #include #include -#include #include #include @@ -64,7 +63,6 @@ struct acpi_processor_cx { }; struct acpi_processor_power { - struct cpuidle_device dev; struct acpi_processor_cx *state; unsigned long bm_check_timestamp; u32 default_state; -- cgit v0.10.2 From 997a031107ec962967ce36db9bc500f1fad491c1 Mon Sep 17 00:00:00 2001 From: Feng Hong Date: Wed, 19 Sep 2012 14:16:00 +0200 Subject: PM / Sleep: use resume event when call dpm_resume_early When dpm_suspend_noirq fail, state is PMSG_SUSPEND, should change to PMSG_RESUME when dpm_resume_early is called Signed-off-by: Feng Hong Signed-off-by: Raul Xiong Signed-off-by: Neil Zhang Cc: stable@vger.kernel.org Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0113adc..2700f2e 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -996,7 +996,7 @@ int dpm_suspend_end(pm_message_t state) error = dpm_suspend_noirq(state); if (error) { - dpm_resume_early(state); + dpm_resume_early(resume_event(state)); return error; } -- cgit v0.10.2 From fc2fb3a075c206927d3fbad251dae82ba82ccf2d Mon Sep 17 00:00:00 2001 From: Jean Pihet Date: Wed, 19 Sep 2012 14:17:20 +0200 Subject: PM QoS: Use spinlock in the per-device PM QoS constraints code The per-device PM QoS locking requires a spinlock to be used. The reasons are: - an alignement with the PM QoS core code, which is used by the per-device PM QoS code for the constraints lists management. The PM QoS core code uses spinlocks to protect the constraints lists, - some drivers need to use the per-device PM QoS functionality from interrupt context or spinlock protected context. An example of such a driver is the OMAP HSI (high-speed synchronous serial interface) driver which needs to control the IP block idle state depending on the FIFO empty state, from interrupt context. Reported-by: Djamil Elaidi Signed-off-by: Jean Pihet Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 74a67e0..968a771 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -24,26 +24,32 @@ * . a system-wide notification callback using the dev_pm_qos_*_global_notifier * API. The notification chain data is stored in a static variable. * - * Note about the per-device constraint data struct allocation: - * . The per-device constraints data struct ptr is tored into the device + * Notes about the per-device constraint data struct allocation: + * . The per-device constraints data struct ptr is stored into the device * dev_pm_info. * . To minimize the data usage by the per-device constraints, the data struct - * is only allocated at the first call to dev_pm_qos_add_request. + * is only allocated at the first call to dev_pm_qos_add_request. * . The data is later free'd when the device is removed from the system. - * . A global mutex protects the constraints users from the data being - * allocated and free'd. + * + * Notes about locking: + * . The dev->power.lock lock protects the constraints list + * (dev->power.constraints) allocation and free, as triggered by the + * driver core code at device insertion and removal, + * . A global lock dev_pm_qos_lock protects the constraints list entries + * from any modification and the notifiers registration and unregistration. + * . For both locks a spinlock is needed since this code can be called from + * interrupt context or spinlock protected context. */ #include #include #include #include -#include #include #include "power.h" -static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_SPINLOCK(dev_pm_qos_lock); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -110,18 +116,19 @@ static int apply_constraint(struct dev_pm_qos_request *req, * @dev: device to allocate data for * * Called at the first call to add_request, for constraint data allocation - * Must be called with the dev_pm_qos_mtx mutex held + * Must be called with the dev_pm_qos_lock lock held */ static int dev_pm_qos_constraints_allocate(struct device *dev) { struct pm_qos_constraints *c; struct blocking_notifier_head *n; + unsigned long flags; - c = kzalloc(sizeof(*c), GFP_KERNEL); + c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) return -ENOMEM; - n = kzalloc(sizeof(*n), GFP_KERNEL); + n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) { kfree(c); return -ENOMEM; @@ -134,9 +141,9 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) c->type = PM_QOS_MIN; c->notifiers = n; - spin_lock_irq(&dev->power.lock); + spin_lock_irqsave(&dev->power.lock, flags); dev->power.constraints = c; - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); return 0; } @@ -150,10 +157,12 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) */ void dev_pm_qos_constraints_init(struct device *dev) { - mutex_lock(&dev_pm_qos_mtx); + unsigned long flags; + + spin_lock_irqsave(&dev_pm_qos_lock, flags); dev->power.constraints = NULL; dev->power.power_state = PMSG_ON; - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); } /** @@ -166,6 +175,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) { struct dev_pm_qos_request *req, *tmp; struct pm_qos_constraints *c; + unsigned long flags; /* * If the device's PM QoS resume latency limit has been exposed to user @@ -173,7 +183,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) */ dev_pm_qos_hide_latency_limit(dev); - mutex_lock(&dev_pm_qos_mtx); + spin_lock_irqsave(&dev_pm_qos_lock, flags); dev->power.power_state = PMSG_INVALID; c = dev->power.constraints; @@ -198,7 +208,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) kfree(c); out: - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); } /** @@ -223,6 +233,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, s32 value) { int ret = 0; + unsigned long flags; if (!dev || !req) /*guard against callers passing in null */ return -EINVAL; @@ -233,7 +244,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, req->dev = dev; - mutex_lock(&dev_pm_qos_mtx); + spin_lock_irqsave(&dev_pm_qos_lock, flags); if (!dev->power.constraints) { if (dev->power.power_state.event == PM_EVENT_INVALID) { @@ -255,7 +266,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, ret = apply_constraint(req, PM_QOS_ADD_REQ, value); out: - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); return ret; } @@ -280,6 +291,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) { int ret = 0; + unsigned long flags; if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -288,7 +300,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, "%s() called for unknown object\n", __func__)) return -EINVAL; - mutex_lock(&dev_pm_qos_mtx); + spin_lock_irqsave(&dev_pm_qos_lock, flags); if (req->dev->power.constraints) { if (new_value != req->node.prio) @@ -299,7 +311,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, ret = -ENODEV; } - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); @@ -319,6 +331,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { int ret = 0; + unsigned long flags; if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -327,7 +340,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) "%s() called for unknown object\n", __func__)) return -EINVAL; - mutex_lock(&dev_pm_qos_mtx); + spin_lock_irqsave(&dev_pm_qos_lock, flags); if (req->dev->power.constraints) { ret = apply_constraint(req, PM_QOS_REMOVE_REQ, @@ -338,7 +351,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) ret = -ENODEV; } - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); @@ -359,8 +372,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) { int ret = 0; + unsigned long flags; - mutex_lock(&dev_pm_qos_mtx); + spin_lock_irqsave(&dev_pm_qos_lock, flags); if (!dev->power.constraints) ret = dev->power.power_state.event != PM_EVENT_INVALID ? @@ -370,7 +384,7 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ret = blocking_notifier_chain_register( dev->power.constraints->notifiers, notifier); - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); @@ -389,8 +403,9 @@ int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier) { int retval = 0; + unsigned long flags; - mutex_lock(&dev_pm_qos_mtx); + spin_lock_irqsave(&dev_pm_qos_lock, flags); /* Silently return if the constraints object is not present. */ if (dev->power.constraints) @@ -398,7 +413,7 @@ int dev_pm_qos_remove_notifier(struct device *dev, dev->power.constraints->notifiers, notifier); - mutex_unlock(&dev_pm_qos_mtx); + spin_unlock_irqrestore(&dev_pm_qos_lock, flags); return retval; } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); -- cgit v0.10.2 From 88d26136a256576e444db312179e17af6dd0ea87 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 19 Sep 2012 21:59:02 +0200 Subject: PM: Prevent runtime suspend during system resume This patch (as1591) moves the pm_runtime_get_noresume() and pm_runtime_put_sync() calls from __device_suspend() and device_resume() to device_prepare() and device_complete() in the PM core. The reason for doing this is to make sure that parent devices remain at full power (i.e., don't go into runtime suspend) while their children are being resumed from a system sleep. The PCI core already contained equivalent code to serve the same purpose. The patch removes the duplicated code, since it is no longer needed. One of the comments from the PCI core gets moved into the PM core, and a second comment is added to explain whe the _get_noresume and _put_sync calls are present. Signed-off-by: Alan Stern Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2700f2e..077b975 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -565,7 +565,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; char *info = NULL; int error = 0; - bool put = false; TRACE_DEVICE(dev); TRACE_RESUME(0); @@ -583,7 +582,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) goto Unlock; pm_runtime_enable(dev); - put = true; if (dev->pm_domain) { info = "power domain "; @@ -636,9 +634,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); - if (put) - pm_runtime_put_sync(dev); - return error; } @@ -749,6 +744,8 @@ static void device_complete(struct device *dev, pm_message_t state) } device_unlock(dev); + + pm_runtime_put_sync(dev); } /** @@ -1043,12 +1040,16 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (async_error) goto Complete; - pm_runtime_get_noresume(dev); + /* + * If a device configured to wake up the system from sleep states + * has been suspended at run time and there's a resume request pending + * for it, this is equivalent to the device signaling wakeup, so the + * system suspend operation should be aborted. + */ if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { - pm_runtime_put_sync(dev); async_error = -EBUSY; goto Complete; } @@ -1111,12 +1112,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) Complete: complete_all(&dev->power.completion); - if (error) { - pm_runtime_put_sync(dev); + if (error) async_error = error; - } else if (dev->power.is_suspended) { + else if (dev->power.is_suspended) __pm_runtime_disable(dev, false); - } return error; } @@ -1209,6 +1208,14 @@ static int device_prepare(struct device *dev, pm_message_t state) char *info = NULL; int error = 0; + /* + * If a device's parent goes into runtime suspend at the wrong time, + * it won't be possible to resume the device. To prevent this we + * block runtime suspend here, during the prepare phase, and allow + * it again during the complete phase. + */ + pm_runtime_get_noresume(dev); + device_lock(dev); dev->power.wakeup_path = device_may_wakeup(dev); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 185be37..51cd90b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -624,21 +624,6 @@ static int pci_pm_prepare(struct device *dev) int error = 0; /* - * If a PCI device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. - */ - pm_runtime_get_noresume(dev); - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); - - if (pm_wakeup_pending()) { - pm_runtime_put_sync(dev); - return -EBUSY; - } - - /* * PCI devices suspended at run time need to be resumed at this * point, because in general it is necessary to reconfigure them for * system suspend. Namely, if the device is supposed to wake up the @@ -661,8 +646,6 @@ static void pci_pm_complete(struct device *dev) if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); - - pm_runtime_put_sync(dev); } #else /* !CONFIG_PM_SLEEP */ -- cgit v0.10.2 From a77de28662adea391d8ed952e2b9c49b60193e8c Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 19 Sep 2012 21:59:42 +0200 Subject: cpuidle: remove some empty lines This mindless patch is just about removing some trailing carriage returns. [rjw: Changed the subject.] Signed-off-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 58bf3b1..424bc81 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -41,7 +41,6 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv) } } - /** * cpuidle_register_driver - registers a driver * @drv: the driver @@ -65,7 +64,6 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) return 0; } - EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** @@ -96,7 +94,6 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv) spin_unlock(&cpuidle_driver_lock); } - EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); struct cpuidle_driver *cpuidle_driver_ref(void) -- cgit v0.10.2 From 1bae9958f8b0162aadf79e2ac20bcc9a409bb661 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 19 Sep 2012 22:00:10 +0200 Subject: cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name omap_device_get_by_hwmod_name() returns ERR_PTR on error. Signed-off-by: Axel Lin Acked-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 6e22f44..65f8e9a 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -266,9 +266,9 @@ static int __init omap_cpufreq_init(void) } mpu_dev = omap_device_get_by_hwmod_name("mpu"); - if (!mpu_dev) { + if (IS_ERR(mpu_dev)) { pr_warning("%s: unable to get the mpu device\n", __func__); - return -EINVAL; + return PTR_ERR(mpu_dev); } mpu_reg = regulator_get(mpu_dev, "vcc"); -- cgit v0.10.2 From ed953472d181e1d149f17d85d82de9634db296c3 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Sat, 22 Sep 2012 00:38:32 +0200 Subject: cpuidle: rename function name "__cpuidle_register_driver", v2 The function __cpuidle_register_driver name is confusing because it suggests, conforming to the coding style of the kernel, it registers the driver without taking a lock. Actually, it just fill the different power field states with a decresing value if the power has not been specified. Clarify the purpose of the function by changing its name and move the condition out of this function. This patch fix nothing and does not change the behavior of the function. It is just for the sake of clarity. IHMO, reading in the code: + if (!drv->power_specified) + set_power_states(drv); is much more explicit than: - __cpuidle_register_driver(drv); Signed-off-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 424bc81..87db387 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -18,9 +18,10 @@ static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); int cpuidle_driver_refcount; -static void __cpuidle_register_driver(struct cpuidle_driver *drv) +static void set_power_states(struct cpuidle_driver *drv) { int i; + /* * cpuidle driver should set the drv->power_specified bit * before registering if the driver provides @@ -35,10 +36,8 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv) * an power value of -1. So we use -2, -3, etc, for other * c-states. */ - if (!drv->power_specified) { - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) - drv->states[i].power_usage = -1 - i; - } + for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) + drv->states[i].power_usage = -1 - i; } /** @@ -58,8 +57,12 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) spin_unlock(&cpuidle_driver_lock); return -EBUSY; } - __cpuidle_register_driver(drv); + + if (!drv->power_specified) + set_power_states(drv); + cpuidle_curr_driver = drv; + spin_unlock(&cpuidle_driver_lock); return 0; -- cgit v0.10.2 From 6f3c77b040fc24708228607bba504878de5236d1 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Fri, 21 Sep 2012 22:47:34 +0000 Subject: PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 There are several drivers where the return value of pm_runtime_get_sync() is used to decide whether or not it is safe to access hardware and that don't provide .suspend() callbacks for system suspend (but may use late/noirq callbacks.) If such a driver happens to call pm_runtime_get_sync() during system suspend, after the core has disabled runtime PM, it will get the error code and will decide that the hardware should not be accessed, although this may be a wrong conclusion, depending on the state of the device when runtime PM was disabled. Drivers might work around this problem by using a test like: ret = pm_runtime_get_sync(dev); if (!ret || (ret == -EACCES && driver_private_data(dev)->suspended)) { /* access hardware */ } where driver_private_data(dev)->suspended is a flag set by the driver's .suspend() method (that would have to be added for this purpose). However, that potentially would need to be done by multiple drivers which means quite a lot of duplicated code and bloat. To avoid that we can use the observation that the core sets dev->power.is_suspended before disabling runtime PM and use that instead of the driver's private flag. Still, potentially many drivers would need to repeat that same check in quite a few places, so it's better to let the core do it. Then we can be a bit smarter and check whether or not runtime PM was disabled by the core only (disable_depth == 1) or by someone else in addition to the core (disable_depth > 1). In the former case rpm_resume() can return 1 if the runtime PM status is RPM_ACTIVE, because it means the device was active when the core disabled runtime PM. In the latter case it should still return -EACCES, because it isn't clear why runtime PM has been disabled. Tested on AM3730/Beagle-xM where a wakeup IRQ firing during the late suspend phase triggers runtime PM activity in the I2C driver since the wakeup IRQ is on an I2C-connected PMIC. [rjw: Modified whitespace to follow the file's convention.] Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7d9c1cb..3148b10 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags) repeat: if (dev->power.runtime_error) retval = -EINVAL; + else if (dev->power.disable_depth == 1 && dev->power.is_suspended + && dev->power.runtime_status == RPM_ACTIVE) + retval = 1; else if (dev->power.disable_depth > 0) retval = -EACCES; if (retval) -- cgit v0.10.2 From 8376869e51f5094e87229aa6200c43ada85c9aaf Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 24 Sep 2012 21:39:36 +0200 Subject: Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" This reverts commit fc2fb3a075c206927d3fbad251dae82ba82ccf2d. The problem with the above commit is that it makes the device PM QoS core code hold a spinlock around blocking_notifier_call_chain() invocations. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 968a771..74a67e0 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -24,32 +24,26 @@ * . a system-wide notification callback using the dev_pm_qos_*_global_notifier * API. The notification chain data is stored in a static variable. * - * Notes about the per-device constraint data struct allocation: - * . The per-device constraints data struct ptr is stored into the device + * Note about the per-device constraint data struct allocation: + * . The per-device constraints data struct ptr is tored into the device * dev_pm_info. * . To minimize the data usage by the per-device constraints, the data struct - * is only allocated at the first call to dev_pm_qos_add_request. + * is only allocated at the first call to dev_pm_qos_add_request. * . The data is later free'd when the device is removed from the system. - * - * Notes about locking: - * . The dev->power.lock lock protects the constraints list - * (dev->power.constraints) allocation and free, as triggered by the - * driver core code at device insertion and removal, - * . A global lock dev_pm_qos_lock protects the constraints list entries - * from any modification and the notifiers registration and unregistration. - * . For both locks a spinlock is needed since this code can be called from - * interrupt context or spinlock protected context. + * . A global mutex protects the constraints users from the data being + * allocated and free'd. */ #include #include #include #include +#include #include #include "power.h" -static DEFINE_SPINLOCK(dev_pm_qos_lock); +static DEFINE_MUTEX(dev_pm_qos_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -116,19 +110,18 @@ static int apply_constraint(struct dev_pm_qos_request *req, * @dev: device to allocate data for * * Called at the first call to add_request, for constraint data allocation - * Must be called with the dev_pm_qos_lock lock held + * Must be called with the dev_pm_qos_mtx mutex held */ static int dev_pm_qos_constraints_allocate(struct device *dev) { struct pm_qos_constraints *c; struct blocking_notifier_head *n; - unsigned long flags; - c = kzalloc(sizeof(*c), GFP_ATOMIC); + c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; - n = kzalloc(sizeof(*n), GFP_ATOMIC); + n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) { kfree(c); return -ENOMEM; @@ -141,9 +134,9 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) c->type = PM_QOS_MIN; c->notifiers = n; - spin_lock_irqsave(&dev->power.lock, flags); + spin_lock_irq(&dev->power.lock); dev->power.constraints = c; - spin_unlock_irqrestore(&dev->power.lock, flags); + spin_unlock_irq(&dev->power.lock); return 0; } @@ -157,12 +150,10 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) */ void dev_pm_qos_constraints_init(struct device *dev) { - unsigned long flags; - - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); dev->power.constraints = NULL; dev->power.power_state = PMSG_ON; - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); } /** @@ -175,7 +166,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev) { struct dev_pm_qos_request *req, *tmp; struct pm_qos_constraints *c; - unsigned long flags; /* * If the device's PM QoS resume latency limit has been exposed to user @@ -183,7 +173,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) */ dev_pm_qos_hide_latency_limit(dev); - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); dev->power.power_state = PMSG_INVALID; c = dev->power.constraints; @@ -208,7 +198,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) kfree(c); out: - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); } /** @@ -233,7 +223,6 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, s32 value) { int ret = 0; - unsigned long flags; if (!dev || !req) /*guard against callers passing in null */ return -EINVAL; @@ -244,7 +233,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, req->dev = dev; - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); if (!dev->power.constraints) { if (dev->power.power_state.event == PM_EVENT_INVALID) { @@ -266,7 +255,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, ret = apply_constraint(req, PM_QOS_ADD_REQ, value); out: - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); return ret; } @@ -291,7 +280,6 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) { int ret = 0; - unsigned long flags; if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -300,7 +288,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, "%s() called for unknown object\n", __func__)) return -EINVAL; - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); if (req->dev->power.constraints) { if (new_value != req->node.prio) @@ -311,7 +299,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, ret = -ENODEV; } - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); @@ -331,7 +319,6 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { int ret = 0; - unsigned long flags; if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -340,7 +327,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) "%s() called for unknown object\n", __func__)) return -EINVAL; - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); if (req->dev->power.constraints) { ret = apply_constraint(req, PM_QOS_REMOVE_REQ, @@ -351,7 +338,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) ret = -ENODEV; } - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); @@ -372,9 +359,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) { int ret = 0; - unsigned long flags; - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); if (!dev->power.constraints) ret = dev->power.power_state.event != PM_EVENT_INVALID ? @@ -384,7 +370,7 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ret = blocking_notifier_chain_register( dev->power.constraints->notifiers, notifier); - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); @@ -403,9 +389,8 @@ int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier) { int retval = 0; - unsigned long flags; - spin_lock_irqsave(&dev_pm_qos_lock, flags); + mutex_lock(&dev_pm_qos_mtx); /* Silently return if the constraints object is not present. */ if (dev->power.constraints) @@ -413,7 +398,7 @@ int dev_pm_qos_remove_notifier(struct device *dev, dev->power.constraints->notifiers, notifier); - spin_unlock_irqrestore(&dev_pm_qos_lock, flags); + mutex_unlock(&dev_pm_qos_mtx); return retval; } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); -- cgit v0.10.2