From 564b905ab10d17fb42f86aa8b7b9b796276d1336 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 23 Jun 2011 01:52:55 +0200 Subject: PM / Domains: Rename struct dev_power_domain to struct dev_pm_domain The naming convention used by commit 7538e3db6e015e890825fbd9f86599b (PM: Add support for device power domains), which introduced the struct dev_power_domain type for representing device power domains, evidently confuses some developers who tend to think that objects of this type must correspond to "power domains" as defined by hardware, which is not the case. Namely, at the kernel level, a struct dev_power_domain object can represent arbitrary set of devices that are mutually dependent power management-wise and need not belong to one hardware power domain. To avoid that confusion, rename struct dev_power_domain to struct dev_pm_domain and rename the related pointers in struct device and struct pm_clk_notifier_block from pwr_domain to pm_domain. Signed-off-by: Rafael J. Wysocki Acked-by: Kevin Hilman diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 64565aa..85c6f98 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt @@ -506,8 +506,8 @@ routines. Nevertheless, different callback pointers are used in case there is a situation where it actually matters. -Device Power Domains --------------------- +Device Power Management Domains +------------------------------- Sometimes devices share reference clocks or other power resources. In those cases it generally is not possible to put devices into low-power states individually. Instead, a set of devices sharing a power resource can be put @@ -516,8 +516,8 @@ power resource. Of course, they also need to be put into the full-power state together, by turning the shared power resource on. A set of devices with this property is often referred to as a power domain. -Support for power domains is provided through the pwr_domain field of struct -device. This field is a pointer to an object of type struct dev_power_domain, +Support for power domains is provided through the pm_domain field of struct +device. This field is a pointer to an object of type struct dev_pm_domain, defined in include/linux/pm.h, providing a set of power management callbacks analogous to the subsystem-level and device driver callbacks that are executed for the given device during all power transitions, instead of the respective diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 334fb88..212f331 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c @@ -49,20 +49,20 @@ static int omap1_pm_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = omap1_pm_runtime_suspend, .runtime_resume = omap1_pm_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }, }; -#define OMAP1_PWR_DOMAIN (&default_power_domain) +#define OMAP1_PM_DOMAIN (&default_pm_domain) #else -#define OMAP1_PWR_DOMAIN NULL +#define OMAP1_PM_DOMAIN NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = OMAP1_PWR_DOMAIN, + .pm_domain = OMAP1_PM_DOMAIN, .con_ids = { "ick", "fck", NULL, }, }; diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 2d1b67a..99802d2 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -28,7 +28,7 @@ static int default_platform_runtime_idle(struct device *dev) return pm_runtime_suspend(dev); } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = pm_runtime_clk_suspend, .runtime_resume = pm_runtime_clk_resume, @@ -37,16 +37,16 @@ static struct dev_power_domain default_power_domain = { }, }; -#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) +#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) #else -#define DEFAULT_PWR_DOMAIN_PTR NULL +#define DEFAULT_PM_DOMAIN_PTR NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, + .pm_domain = DEFAULT_PM_DOMAIN_PTR, .con_ids = { NULL, }, }; diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 49fc0df..d21579b 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -564,7 +564,7 @@ static int _od_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -static struct dev_power_domain omap_device_power_domain = { +static struct dev_pm_domain omap_device_pm_domain = { .ops = { .runtime_suspend = _od_runtime_suspend, .runtime_idle = _od_runtime_idle, @@ -586,7 +586,7 @@ int omap_device_register(struct omap_device *od) pr_debug("omap_device: %s: registering\n", od->pdev.name); od->pdev.dev.parent = &omap_device_parent; - od->pdev.dev.pwr_domain = &omap_device_power_domain; + od->pdev.dev.pm_domain = &omap_device_pm_domain; return platform_device_register(&od->pdev); } diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 64c807c..bf280c8 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -256,7 +256,7 @@ out: return ret; } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = default_platform_runtime_suspend, .runtime_resume = default_platform_runtime_resume, @@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb, hwblk_disable(hwblk_info, hwblk); /* make sure driver re-inits itself once */ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); - dev->pwr_domain = &default_power_domain; + dev->pm_domain = &default_pm_domain; break; /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ case BUS_NOTIFY_BOUND_DRIVER: @@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb, __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); break; case BUS_NOTIFY_DEL_DEVICE: - dev->pwr_domain = NULL; + dev->pm_domain = NULL; break; } return 0; diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index ad367c4..c562481 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -278,11 +278,11 @@ int pm_runtime_clk_resume(struct device *dev) * * For this function to work, @nb must be a member of an object of type * struct pm_clk_notifier_block containing all of the requisite data. - * Specifically, the pwr_domain member of that object is copied to the device's - * pwr_domain field and its con_ids member is used to populate the device's list + * Specifically, the pm_domain member of that object is copied to the device's + * pm_domain field and its con_ids member is used to populate the device's list * of runtime PM clocks, depending on @action. * - * If the device's pwr_domain field is already populated with a value different + * If the device's pm_domain field is already populated with a value different * from the one stored in the struct pm_clk_notifier_block object, the function * does nothing. */ @@ -300,14 +300,14 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, switch (action) { case BUS_NOTIFY_ADD_DEVICE: - if (dev->pwr_domain) + if (dev->pm_domain) break; error = pm_runtime_clk_init(dev); if (error) break; - dev->pwr_domain = clknb->pwr_domain; + dev->pm_domain = clknb->pm_domain; if (clknb->con_ids[0]) { for (con_id = clknb->con_ids; *con_id; con_id++) pm_runtime_clk_add(dev, *con_id); @@ -317,10 +317,10 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, break; case BUS_NOTIFY_DEL_DEVICE: - if (dev->pwr_domain != clknb->pwr_domain) + if (dev->pm_domain != clknb->pm_domain) break; - dev->pwr_domain = NULL; + dev->pm_domain = NULL; pm_runtime_clk_destroy(dev); break; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 06f09bf..85b591a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -425,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->pwr_domain) { + if (dev->pm_domain) { pm_dev_dbg(dev, state, "EARLY power domain "); - error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); + error = pm_noirq_op(dev, &dev->pm_domain->ops, state); } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "EARLY type "); error = pm_noirq_op(dev, dev->type->pm, state); @@ -521,9 +521,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Unlock; - if (dev->pwr_domain) { + if (dev->pm_domain) { pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pwr_domain->ops, state); + error = pm_op(dev, &dev->pm_domain->ops, state); goto End; } @@ -641,10 +641,10 @@ static void device_complete(struct device *dev, pm_message_t state) { device_lock(dev); - if (dev->pwr_domain) { + if (dev->pm_domain) { pm_dev_dbg(dev, state, "completing power domain "); - if (dev->pwr_domain->ops.complete) - dev->pwr_domain->ops.complete(dev); + if (dev->pm_domain->ops.complete) + dev->pm_domain->ops.complete(dev); } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "completing type "); if (dev->type->pm->complete) @@ -744,9 +744,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) { int error; - if (dev->pwr_domain) { + if (dev->pm_domain) { pm_dev_dbg(dev, state, "LATE power domain "); - error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); + error = pm_noirq_op(dev, &dev->pm_domain->ops, state); if (error) return error; } else if (dev->type && dev->type->pm) { @@ -853,9 +853,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Unlock; } - if (dev->pwr_domain) { + if (dev->pm_domain) { pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pwr_domain->ops, state); + error = pm_op(dev, &dev->pm_domain->ops, state); goto End; } @@ -982,11 +982,11 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); - if (dev->pwr_domain) { + if (dev->pm_domain) { pm_dev_dbg(dev, state, "preparing power domain "); - if (dev->pwr_domain->ops.prepare) - error = dev->pwr_domain->ops.prepare(dev); - suspend_report_result(dev->pwr_domain->ops.prepare, error); + if (dev->pm_domain->ops.prepare) + error = dev->pm_domain->ops.prepare(dev); + suspend_report_result(dev->pm_domain->ops.prepare, error); if (error) goto End; } else if (dev->type && dev->type->pm) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0d4587b..5f5c423 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -213,8 +213,8 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->pwr_domain) - callback = dev->pwr_domain->ops.runtime_idle; + if (dev->pm_domain) + callback = dev->pm_domain->ops.runtime_idle; else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_idle; else if (dev->class && dev->class->pm) @@ -374,8 +374,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->pwr_domain) - callback = dev->pwr_domain->ops.runtime_suspend; + if (dev->pm_domain) + callback = dev->pm_domain->ops.runtime_suspend; else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_suspend; else if (dev->class && dev->class->pm) @@ -573,8 +573,8 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); - if (dev->pwr_domain) - callback = dev->pwr_domain->ops.runtime_resume; + if (dev->pm_domain) + callback = dev->pm_domain->ops.runtime_resume; else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_resume; else if (dev->class && dev->class->pm) diff --git a/include/linux/device.h b/include/linux/device.h index e4f62d8..160d4dd 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -516,7 +516,7 @@ struct device_dma_parameters { * minimizes board-specific #ifdefs in drivers. * @power: For device power management. * See Documentation/power/devices.txt for details. - * @pwr_domain: Provide callbacks that are executed during system suspend, + * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. * @numa_node: NUMA node this device is close to. @@ -567,7 +567,7 @@ struct device { void *platform_data; /* Platform specific data, device core doesn't touch it */ struct dev_pm_info power; - struct dev_power_domain *pwr_domain; + struct dev_pm_domain *pm_domain; #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 411e4f4..e396320 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -472,7 +472,7 @@ extern void update_pm_runtime_accounting(struct device *dev); * hibernation, system resume and during runtime PM transitions along with * subsystem-level and driver-level callbacks. */ -struct dev_power_domain { +struct dev_pm_domain { struct dev_pm_ops ops; }; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 878cf84..ef91904 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -247,7 +247,7 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) struct pm_clk_notifier_block { struct notifier_block nb; - struct dev_power_domain *pwr_domain; + struct dev_pm_domain *pm_domain; char *con_ids[]; }; -- cgit v0.10.2 From dc6e4e56e6ef473a696a1ab24f80b79b9aceb92d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 23 Jun 2011 01:53:04 +0200 Subject: PM: subsys_data in struct dev_pm_info need not depend on RM_RUNTIME The subsys_data field of struct dev_pm_info, introduced by commit 1d2b71f61b6a10216274e27b717becf9ae101fc7 (PM / Runtime: Add subsystem data field to struct dev_pm_info), is going to be used even if CONFIG_PM_RUNTIME is not set, so move it from under the #ifdef. Signed-off-by: Rafael J. Wysocki diff --git a/include/linux/pm.h b/include/linux/pm.h index e396320..7e8f0763 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -461,8 +461,8 @@ struct dev_pm_info { unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; - void *subsys_data; /* Owned by the subsystem. */ #endif + void *subsys_data; /* Owned by the subsystem. */ }; extern void update_pm_runtime_accounting(struct device *dev); -- cgit v0.10.2 From f721889ff65afa6243c463832c74dee3bed418d5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:12:45 +0200 Subject: PM / Domains: Support for generic I/O PM domains (v8) Introduce common headers, helper functions and callbacks allowing platforms to use simple generic power domains for runtime power management. Introduce struct generic_pm_domain to be used for representing power domains that each contain a number of devices and may be parent domains or subdomains with respect to other power domains. Among other things, this structure includes callbacks to be provided by platforms for performing specific tasks related to power management (i.e. ->stop_device() may disable a device's clocks, while ->start_device() may enable them, ->power_off() is supposed to remove power from the entire power domain and ->power_on() is supposed to restore it). Introduce functions that can be used as power domain runtime PM callbacks, pm_genpd_runtime_suspend() and pm_genpd_runtime_resume(), as well as helper functions for the initialization of a power domain represented by a struct generic_power_domain object, adding a device to or removing a device from it and adding or removing subdomains. Introduce configuration option CONFIG_PM_GENERIC_DOMAINS to be selected by the platforms that want to use the new code. Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman Reviewed-by: Kevin Hilman diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 3647e11..2639ae7 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c new file mode 100644 index 0000000..fd31be3 --- /dev/null +++ b/drivers/base/power/domain.c @@ -0,0 +1,494 @@ +/* + * drivers/base/power/domain.c - Common code related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PM_RUNTIME + +static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) +{ + if (!WARN_ON(genpd->sd_count == 0)) + genpd->sd_count--; +} + +/** + * __pm_genpd_save_device - Save the pre-suspend state of a device. + * @dle: Device list entry of the device to save the state of. + * @genpd: PM domain the device belongs to. + */ +static int __pm_genpd_save_device(struct dev_list_entry *dle, + struct generic_pm_domain *genpd) +{ + struct device *dev = dle->dev; + struct device_driver *drv = dev->driver; + int ret = 0; + + if (dle->need_restore) + return 0; + + if (drv && drv->pm && drv->pm->runtime_suspend) { + if (genpd->start_device) + genpd->start_device(dev); + + ret = drv->pm->runtime_suspend(dev); + + if (genpd->stop_device) + genpd->stop_device(dev); + } + + if (!ret) + dle->need_restore = true; + + return ret; +} + +/** + * __pm_genpd_restore_device - Restore the pre-suspend state of a device. + * @dle: Device list entry of the device to restore the state of. + * @genpd: PM domain the device belongs to. + */ +static void __pm_genpd_restore_device(struct dev_list_entry *dle, + struct generic_pm_domain *genpd) +{ + struct device *dev = dle->dev; + struct device_driver *drv = dev->driver; + + if (!dle->need_restore) + return; + + if (drv && drv->pm && drv->pm->runtime_resume) { + if (genpd->start_device) + genpd->start_device(dev); + + drv->pm->runtime_resume(dev); + + if (genpd->stop_device) + genpd->stop_device(dev); + } + + dle->need_restore = false; +} + +/** + * pm_genpd_poweroff - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, run the runtime suspend callbacks provided by all of + * the @genpd's devices' drivers and remove power from @genpd. + */ +static int pm_genpd_poweroff(struct generic_pm_domain *genpd) +{ + struct generic_pm_domain *parent; + struct dev_list_entry *dle; + unsigned int not_suspended; + int ret; + + if (genpd->power_is_off) + return 0; + + if (genpd->sd_count > 0) + return -EBUSY; + + not_suspended = 0; + list_for_each_entry(dle, &genpd->dev_list, node) + if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) + not_suspended++; + + if (not_suspended > genpd->in_progress) + return -EBUSY; + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + list_for_each_entry_reverse(dle, &genpd->dev_list, node) { + ret = __pm_genpd_save_device(dle, genpd); + if (ret) + goto err_dev; + } + + if (genpd->power_off) + genpd->power_off(genpd); + + genpd->power_is_off = true; + + parent = genpd->parent; + if (parent) { + genpd_sd_counter_dec(parent); + if (parent->sd_count == 0) + queue_work(pm_wq, &parent->power_off_work); + } + + return 0; + + err_dev: + list_for_each_entry_continue(dle, &genpd->dev_list, node) + __pm_genpd_restore_device(dle, genpd); + + return ret; +} + +/** + * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. + * @work: Work structure used for scheduling the execution of this function. + */ +static void genpd_power_off_work_fn(struct work_struct *work) +{ + struct generic_pm_domain *genpd; + + genpd = container_of(work, struct generic_pm_domain, power_off_work); + + if (genpd->parent) + mutex_lock(&genpd->parent->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + pm_genpd_poweroff(genpd); + mutex_unlock(&genpd->lock); + if (genpd->parent) + mutex_unlock(&genpd->parent->lock); +} + +/** + * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. + * @dev: Device to suspend. + * + * Carry out a runtime suspend of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_runtime_suspend(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(dev->pm_domain)) + return -EINVAL; + + genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain); + + if (genpd->parent) + mutex_lock(&genpd->parent->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + + if (genpd->stop_device) { + int ret = genpd->stop_device(dev); + if (ret) + goto out; + } + genpd->in_progress++; + pm_genpd_poweroff(genpd); + genpd->in_progress--; + + out: + mutex_unlock(&genpd->lock); + if (genpd->parent) + mutex_unlock(&genpd->parent->lock); + + return 0; +} + +/** + * pm_genpd_poweron - Restore power to a given PM domain and its parents. + * @genpd: PM domain to power up. + * + * Restore power to @genpd and all of its parents so that it is possible to + * resume a device belonging to it. + */ +static int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + int ret = 0; + + start: + if (genpd->parent) + mutex_lock(&genpd->parent->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + + if (!genpd->power_is_off) + goto out; + + if (genpd->parent && genpd->parent->power_is_off) { + mutex_unlock(&genpd->lock); + mutex_unlock(&genpd->parent->lock); + + ret = pm_genpd_poweron(genpd->parent); + if (ret) + return ret; + + goto start; + } + + if (genpd->power_on) { + int ret = genpd->power_on(genpd); + if (ret) + goto out; + } + + genpd->power_is_off = false; + if (genpd->parent) + genpd->parent->sd_count++; + + out: + mutex_unlock(&genpd->lock); + if (genpd->parent) + mutex_unlock(&genpd->parent->lock); + + return ret; +} + +/** + * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. + * @dev: Device to resume. + * + * Carry out a runtime resume of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_runtime_resume(struct device *dev) +{ + struct generic_pm_domain *genpd; + struct dev_list_entry *dle; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(dev->pm_domain)) + return -EINVAL; + + genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain); + + ret = pm_genpd_poweron(genpd); + if (ret) + return ret; + + mutex_lock(&genpd->lock); + + list_for_each_entry(dle, &genpd->dev_list, node) { + if (dle->dev == dev) { + __pm_genpd_restore_device(dle, genpd); + break; + } + } + + if (genpd->start_device) + genpd->start_device(dev); + + mutex_unlock(&genpd->lock); + + return 0; +} + +#else + +static inline void genpd_power_off_work_fn(struct work_struct *work) {} + +#define pm_genpd_runtime_suspend NULL +#define pm_genpd_runtime_resume NULL + +#endif /* CONFIG_PM_RUNTIME */ + +/** + * pm_genpd_add_device - Add a device to an I/O PM domain. + * @genpd: PM domain to add the device to. + * @dev: Device to be added. + */ +int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) +{ + struct dev_list_entry *dle; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) + return -EINVAL; + + mutex_lock(&genpd->lock); + + if (genpd->power_is_off) { + ret = -EINVAL; + goto out; + } + + list_for_each_entry(dle, &genpd->dev_list, node) + if (dle->dev == dev) { + ret = -EINVAL; + goto out; + } + + dle = kzalloc(sizeof(*dle), GFP_KERNEL); + if (!dle) { + ret = -ENOMEM; + goto out; + } + + dle->dev = dev; + dle->need_restore = false; + list_add_tail(&dle->node, &genpd->dev_list); + + spin_lock_irq(&dev->power.lock); + dev->pm_domain = &genpd->domain; + spin_unlock_irq(&dev->power.lock); + + out: + mutex_unlock(&genpd->lock); + + return ret; +} + +/** + * pm_genpd_remove_device - Remove a device from an I/O PM domain. + * @genpd: PM domain to remove the device from. + * @dev: Device to be removed. + */ +int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + struct dev_list_entry *dle; + int ret = -EINVAL; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) + return -EINVAL; + + mutex_lock(&genpd->lock); + + list_for_each_entry(dle, &genpd->dev_list, node) { + if (dle->dev != dev) + continue; + + spin_lock_irq(&dev->power.lock); + dev->pm_domain = NULL; + spin_unlock_irq(&dev->power.lock); + + list_del(&dle->node); + kfree(dle); + + ret = 0; + break; + } + + mutex_unlock(&genpd->lock); + + return ret; +} + +/** + * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. + * @genpd: Master PM domain to add the subdomain to. + * @new_subdomain: Subdomain to be added. + */ +int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_subdomain) +{ + struct generic_pm_domain *subdomain; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) + return -EINVAL; + + mutex_lock(&genpd->lock); + + if (genpd->power_is_off && !new_subdomain->power_is_off) { + ret = -EINVAL; + goto out; + } + + list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { + if (subdomain == new_subdomain) { + ret = -EINVAL; + goto out; + } + } + + mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); + + list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); + new_subdomain->parent = genpd; + if (!subdomain->power_is_off) + genpd->sd_count++; + + mutex_unlock(&new_subdomain->lock); + + out: + mutex_unlock(&genpd->lock); + + return ret; +} + +/** + * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. + * @genpd: Master PM domain to remove the subdomain from. + * @target: Subdomain to be removed. + */ +int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target) +{ + struct generic_pm_domain *subdomain; + int ret = -EINVAL; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) + return -EINVAL; + + mutex_lock(&genpd->lock); + + list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { + if (subdomain != target) + continue; + + mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); + + list_del(&subdomain->sd_node); + subdomain->parent = NULL; + if (!subdomain->power_is_off) + genpd_sd_counter_dec(genpd); + + mutex_unlock(&subdomain->lock); + + ret = 0; + break; + } + + mutex_unlock(&genpd->lock); + + return ret; +} + +/** + * pm_genpd_init - Initialize a generic I/O PM domain object. + * @genpd: PM domain object to initialize. + * @gov: PM domain governor to associate with the domain (may be NULL). + * @is_off: Initial value of the domain's power_is_off field. + */ +void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) +{ + if (IS_ERR_OR_NULL(genpd)) + return; + + INIT_LIST_HEAD(&genpd->sd_node); + genpd->parent = NULL; + INIT_LIST_HEAD(&genpd->dev_list); + INIT_LIST_HEAD(&genpd->sd_list); + mutex_init(&genpd->lock); + genpd->gov = gov; + INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); + genpd->in_progress = 0; + genpd->sd_count = 0; + genpd->power_is_off = is_off; + genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; + genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; + genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; +} diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h new file mode 100644 index 0000000..b1a22c6 --- /dev/null +++ b/include/linux/pm_domain.h @@ -0,0 +1,78 @@ +/* + * pm_domain.h - Definitions and headers related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_DOMAIN_H +#define _LINUX_PM_DOMAIN_H + +#include + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *domain); +}; + +struct generic_pm_domain { + struct dev_pm_domain domain; /* PM domain operations */ + struct list_head sd_node; /* Node in the parent's subdomain list */ + struct generic_pm_domain *parent; /* Parent PM domain */ + struct list_head sd_list; /* List of dubdomains */ + struct list_head dev_list; /* List of devices */ + struct mutex lock; + struct dev_power_governor *gov; + struct work_struct power_off_work; + unsigned int in_progress; /* Number of devices being suspended now */ + unsigned int sd_count; /* Number of subdomains with power "on" */ + bool power_is_off; /* Whether or not power has been removed */ + int (*power_off)(struct generic_pm_domain *domain); + int (*power_on)(struct generic_pm_domain *domain); + int (*start_device)(struct device *dev); + int (*stop_device)(struct device *dev); +}; + +struct dev_list_entry { + struct list_head node; + struct device *dev; + bool need_restore; +}; + +#ifdef CONFIG_PM_GENERIC_DOMAINS +extern int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev); +extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev); +extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_subdomain); +extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target); +extern void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); +#else +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_sd) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target) +{ + return -ENOSYS; +} +static inline void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) {} +#endif + +#endif /* _LINUX_PM_DOMAIN_H */ diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 87f4d24..e83ac25 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -227,3 +227,7 @@ config PM_OPP config PM_RUNTIME_CLK def_bool y depends on PM_RUNTIME && HAVE_CLK + +config PM_GENERIC_DOMAINS + bool + depends on PM -- cgit v0.10.2 From e5291928839877f8e73c2643ee1d3fe0bcdcaf5c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:12:59 +0200 Subject: PM: Introduce generic "noirq" callback routines for subsystems (v2) Introduce generic "noirq" power management callback routines for subsystems in addition to the "regular" generic PM callback routines. The new routines will be used, among other things, for implementing system-wide PM transitions support for generic PM domains. Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index b24875b..4b011b1 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -606,32 +606,60 @@ driver/base/power/generic_ops.c: callback provided by its driver and return its result, or return 0 if not defined + int pm_generic_suspend_noirq(struct device *dev); + - if pm_runtime_suspended(dev) returns "false", invoke the ->suspend_noirq() + callback provided by the device's driver and return its result, or return + 0 if not defined + int pm_generic_resume(struct device *dev); - invoke the ->resume() callback provided by the driver of this device and, if successful, change the device's runtime PM status to 'active' + int pm_generic_resume_noirq(struct device *dev); + - invoke the ->resume_noirq() callback provided by the driver of this device + int pm_generic_freeze(struct device *dev); - if the device has not been suspended at run time, invoke the ->freeze() callback provided by its driver and return its result, or return 0 if not defined + int pm_generic_freeze_noirq(struct device *dev); + - if pm_runtime_suspended(dev) returns "false", invoke the ->freeze_noirq() + callback provided by the device's driver and return its result, or return + 0 if not defined + int pm_generic_thaw(struct device *dev); - if the device has not been suspended at run time, invoke the ->thaw() callback provided by its driver and return its result, or return 0 if not defined + int pm_generic_thaw_noirq(struct device *dev); + - if pm_runtime_suspended(dev) returns "false", invoke the ->thaw_noirq() + callback provided by the device's driver and return its result, or return + 0 if not defined + int pm_generic_poweroff(struct device *dev); - if the device has not been suspended at run time, invoke the ->poweroff() callback provided by its driver and return its result, or return 0 if not defined + int pm_generic_poweroff_noirq(struct device *dev); + - if pm_runtime_suspended(dev) returns "false", run the ->poweroff_noirq() + callback provided by the device's driver and return its result, or return + 0 if not defined + int pm_generic_restore(struct device *dev); - invoke the ->restore() callback provided by the driver of this device and, if successful, change the device's runtime PM status to 'active' + int pm_generic_restore_noirq(struct device *dev); + - invoke the ->restore_noirq() callback provided by the device's driver + These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(), -->runtime_resume(), ->suspend(), ->resume(), ->freeze(), ->thaw(), ->poweroff(), -or ->restore() callback pointers in the subsystem-level dev_pm_ops structures. +->runtime_resume(), ->suspend(), ->suspend_noirq(), ->resume(), +->resume_noirq(), ->freeze(), ->freeze_noirq(), ->thaw(), ->thaw_noirq(), +->poweroff(), ->poweroff_noirq(), ->restore(), ->restore_noirq() callback +pointers in the subsystem-level dev_pm_ops structures. If a subsystem wishes to use all of them at the same time, it can simply assign the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index cb3bb36..9508df7 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev) * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. * @dev: Device to handle. * @event: PM transition of the system under way. + * @bool: Whether or not this is the "noirq" stage. * * If the device has not been suspended at run time, execute the * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and * return its error code. Otherwise, return zero. */ -static int __pm_generic_call(struct device *dev, int event) +static int __pm_generic_call(struct device *dev, int event, bool noirq) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int (*callback)(struct device *); @@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event) switch (event) { case PM_EVENT_SUSPEND: - callback = pm->suspend; + callback = noirq ? pm->suspend_noirq : pm->suspend; break; case PM_EVENT_FREEZE: - callback = pm->freeze; + callback = noirq ? pm->freeze_noirq : pm->freeze; break; case PM_EVENT_HIBERNATE: - callback = pm->poweroff; + callback = noirq ? pm->poweroff_noirq : pm->poweroff; break; case PM_EVENT_THAW: - callback = pm->thaw; + callback = noirq ? pm->thaw_noirq : pm->thaw; break; default: callback = NULL; @@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event) } /** + * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend_noirq(struct device *dev) +{ + return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); +} +EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); + +/** * pm_generic_suspend - Generic suspend callback for subsystems. * @dev: Device to suspend. */ int pm_generic_suspend(struct device *dev) { - return __pm_generic_call(dev, PM_EVENT_SUSPEND); + return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); } EXPORT_SYMBOL_GPL(pm_generic_suspend); /** + * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze_noirq(struct device *dev) +{ + return __pm_generic_call(dev, PM_EVENT_FREEZE, true); +} +EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); + +/** * pm_generic_freeze - Generic freeze callback for subsystems. * @dev: Device to freeze. */ int pm_generic_freeze(struct device *dev) { - return __pm_generic_call(dev, PM_EVENT_FREEZE); + return __pm_generic_call(dev, PM_EVENT_FREEZE, false); } EXPORT_SYMBOL_GPL(pm_generic_freeze); /** + * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff_noirq(struct device *dev) +{ + return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); + +/** * pm_generic_poweroff - Generic poweroff callback for subsystems. * @dev: Device to handle. */ int pm_generic_poweroff(struct device *dev) { - return __pm_generic_call(dev, PM_EVENT_HIBERNATE); + return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); } EXPORT_SYMBOL_GPL(pm_generic_poweroff); /** + * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw_noirq(struct device *dev) +{ + return __pm_generic_call(dev, PM_EVENT_THAW, true); +} +EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); + +/** * pm_generic_thaw - Generic thaw callback for subsystems. * @dev: Device to thaw. */ int pm_generic_thaw(struct device *dev) { - return __pm_generic_call(dev, PM_EVENT_THAW); + return __pm_generic_call(dev, PM_EVENT_THAW, false); } EXPORT_SYMBOL_GPL(pm_generic_thaw); @@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw); * __pm_generic_resume - Generic resume/restore callback for subsystems. * @dev: Device to handle. * @event: PM transition of the system under way. + * @bool: Whether or not this is the "noirq" stage. * * Execute the resume/resotre callback provided by the @dev's driver, if * defined. If it returns 0, change the device's runtime PM status to 'active'. * Return the callback's error code. */ -static int __pm_generic_resume(struct device *dev, int event) +static int __pm_generic_resume(struct device *dev, int event, bool noirq) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int (*callback)(struct device *); @@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event) switch (event) { case PM_EVENT_RESUME: - callback = pm->resume; + callback = noirq ? pm->resume_noirq : pm->resume; break; case PM_EVENT_RESTORE: - callback = pm->restore; + callback = noirq ? pm->restore_noirq : pm->restore; break; default: callback = NULL; @@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event) return 0; ret = callback(dev); - if (!ret && pm_runtime_enabled(dev)) { + if (!ret && !noirq && pm_runtime_enabled(dev)) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event) } /** + * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume_noirq(struct device *dev) +{ + return __pm_generic_resume(dev, PM_EVENT_RESUME, true); +} +EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); + +/** * pm_generic_resume - Generic resume callback for subsystems. * @dev: Device to resume. */ int pm_generic_resume(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESUME); + return __pm_generic_resume(dev, PM_EVENT_RESUME, false); } EXPORT_SYMBOL_GPL(pm_generic_resume); /** + * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems. + * @dev: Device to restore. + */ +int pm_generic_restore_noirq(struct device *dev) +{ + return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); +} +EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); + +/** * pm_generic_restore - Generic restore callback for subsystems. * @dev: Device to restore. */ int pm_generic_restore(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESTORE); + return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); } EXPORT_SYMBOL_GPL(pm_generic_restore); @@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = { #ifdef CONFIG_PM_SLEEP .prepare = pm_generic_prepare, .suspend = pm_generic_suspend, + .suspend_noirq = pm_generic_suspend_noirq, .resume = pm_generic_resume, + .resume_noirq = pm_generic_resume_noirq, .freeze = pm_generic_freeze, + .freeze_noirq = pm_generic_freeze_noirq, .thaw = pm_generic_thaw, + .thaw_noirq = pm_generic_thaw_noirq, .poweroff = pm_generic_poweroff, + .poweroff_noirq = pm_generic_poweroff_noirq, .restore = pm_generic_restore, + .restore_noirq = pm_generic_restore_noirq, .complete = pm_generic_complete, #endif #ifdef CONFIG_PM_RUNTIME diff --git a/include/linux/pm.h b/include/linux/pm.h index 7e8f0763..f7c84c9 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -553,11 +553,17 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); extern int pm_generic_prepare(struct device *dev); +extern int pm_generic_suspend_noirq(struct device *dev); extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume_noirq(struct device *dev); extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze_noirq(struct device *dev); extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw_noirq(struct device *dev); extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore_noirq(struct device *dev); extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff_noirq(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); -- cgit v0.10.2 From 5248051b9afb6684cd817b2fbdaefa5063761dab Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:13:10 +0200 Subject: PM / Domains: Move code from under #ifdef CONFIG_PM_RUNTIME (v2) There is some code in drivers/base/power/domain.c that will be useful for both runtime PM and system-wide power transitions, so make it depend on CONFIG_PM instead of CONFIG_PM_RUNTIME. Signed-off-by: Rafael J. Wysocki Reviewed-by: Kevin Hilman diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index fd31be3..f14ba32 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -14,7 +14,15 @@ #include #include -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM + +static struct generic_pm_domain *dev_to_genpd(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev->pm_domain)) + return ERR_PTR(-EINVAL); + + return container_of(dev->pm_domain, struct generic_pm_domain, domain); +} static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) { @@ -23,6 +31,58 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) } /** + * pm_genpd_poweron - Restore power to a given PM domain and its parents. + * @genpd: PM domain to power up. + * + * Restore power to @genpd and all of its parents so that it is possible to + * resume a device belonging to it. + */ +static int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + int ret = 0; + + start: + if (genpd->parent) + mutex_lock(&genpd->parent->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + + if (!genpd->power_is_off) + goto out; + + if (genpd->parent && genpd->parent->power_is_off) { + mutex_unlock(&genpd->lock); + mutex_unlock(&genpd->parent->lock); + + ret = pm_genpd_poweron(genpd->parent); + if (ret) + return ret; + + goto start; + } + + if (genpd->power_on) { + int ret = genpd->power_on(genpd); + if (ret) + goto out; + } + + genpd->power_is_off = false; + if (genpd->parent) + genpd->parent->sd_count++; + + out: + mutex_unlock(&genpd->lock); + if (genpd->parent) + mutex_unlock(&genpd->parent->lock); + + return ret; +} + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_RUNTIME + +/** * __pm_genpd_save_device - Save the pre-suspend state of a device. * @dle: Device list entry of the device to save the state of. * @genpd: PM domain the device belongs to. @@ -174,11 +234,10 @@ static int pm_genpd_runtime_suspend(struct device *dev) dev_dbg(dev, "%s()\n", __func__); - if (IS_ERR_OR_NULL(dev->pm_domain)) + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) return -EINVAL; - genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain); - if (genpd->parent) mutex_lock(&genpd->parent->lock); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); @@ -201,54 +260,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) } /** - * pm_genpd_poweron - Restore power to a given PM domain and its parents. - * @genpd: PM domain to power up. - * - * Restore power to @genpd and all of its parents so that it is possible to - * resume a device belonging to it. - */ -static int pm_genpd_poweron(struct generic_pm_domain *genpd) -{ - int ret = 0; - - start: - if (genpd->parent) - mutex_lock(&genpd->parent->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - - if (!genpd->power_is_off) - goto out; - - if (genpd->parent && genpd->parent->power_is_off) { - mutex_unlock(&genpd->lock); - mutex_unlock(&genpd->parent->lock); - - ret = pm_genpd_poweron(genpd->parent); - if (ret) - return ret; - - goto start; - } - - if (genpd->power_on) { - int ret = genpd->power_on(genpd); - if (ret) - goto out; - } - - genpd->power_is_off = false; - if (genpd->parent) - genpd->parent->sd_count++; - - out: - mutex_unlock(&genpd->lock); - if (genpd->parent) - mutex_unlock(&genpd->parent->lock); - - return ret; -} - -/** * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. * @dev: Device to resume. * @@ -264,11 +275,10 @@ static int pm_genpd_runtime_resume(struct device *dev) dev_dbg(dev, "%s()\n", __func__); - if (IS_ERR_OR_NULL(dev->pm_domain)) + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) return -EINVAL; - genpd = container_of(dev->pm_domain, struct generic_pm_domain, domain); - ret = pm_genpd_poweron(genpd); if (ret) return ret; -- cgit v0.10.2 From 596ba34bcd2978ee9823cc1d84df230576f8ffb9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:13:19 +0200 Subject: PM / Domains: System-wide transitions support for generic domains (v5) Make generic PM domains support system-wide power transitions (system suspend and hibernation). Add suspend, resume, freeze, thaw, poweroff and restore callbacks to be associated with struct generic_pm_domain objects and make pm_genpd_init() use them as appropriate. The new callbacks do nothing for devices belonging to power domains that were powered down at run time (before the transition). For the other devices the action carried out depends on the type of the transition. During system suspend the power domain .suspend() callback executes pm_generic_suspend() for the device, while the PM domain .suspend_noirq() callback runs pm_generic_suspend_noirq() for it, stops it and eventually removes power from the PM domain it belongs to (after all devices in the domain have been stopped and its subdomains have been powered off). During system resume the PM domain .resume_noirq() callback restores power to the PM domain (when executed for it first time), starts the device and executes pm_generic_resume_noirq() for it, while the .resume() callback executes pm_generic_resume() for the device. Finally, the .complete() callback executes pm_runtime_idle() for the device which should put it back into the suspended state if its runtime PM usage count is equal to zero at that time. The actions carried out during hibernation and resume from it are analogous to the ones described above. Signed-off-by: Rafael J. Wysocki Reviewed-by: Kevin Hilman diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index f14ba32..33086e9 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -21,7 +21,7 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) if (IS_ERR_OR_NULL(dev->pm_domain)) return ERR_PTR(-EINVAL); - return container_of(dev->pm_domain, struct generic_pm_domain, domain); + return pd_to_genpd(dev->pm_domain); } static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) @@ -46,7 +46,8 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd) mutex_lock(&genpd->parent->lock); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - if (!genpd->power_is_off) + if (!genpd->power_is_off + || (genpd->prepared_count > 0 && genpd->suspend_power_off)) goto out; if (genpd->parent && genpd->parent->power_is_off) { @@ -155,7 +156,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) unsigned int not_suspended; int ret; - if (genpd->power_is_off) + if (genpd->power_is_off || genpd->prepared_count > 0) return 0; if (genpd->sd_count > 0) @@ -260,6 +261,27 @@ static int pm_genpd_runtime_suspend(struct device *dev) } /** + * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. + * @dev: Device to resume. + * @genpd: PM domain the device belongs to. + */ +static void __pm_genpd_runtime_resume(struct device *dev, + struct generic_pm_domain *genpd) +{ + struct dev_list_entry *dle; + + list_for_each_entry(dle, &genpd->dev_list, node) { + if (dle->dev == dev) { + __pm_genpd_restore_device(dle, genpd); + break; + } + } + + if (genpd->start_device) + genpd->start_device(dev); +} + +/** * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. * @dev: Device to resume. * @@ -270,7 +292,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; - struct dev_list_entry *dle; int ret; dev_dbg(dev, "%s()\n", __func__); @@ -284,17 +305,7 @@ static int pm_genpd_runtime_resume(struct device *dev) return ret; mutex_lock(&genpd->lock); - - list_for_each_entry(dle, &genpd->dev_list, node) { - if (dle->dev == dev) { - __pm_genpd_restore_device(dle, genpd); - break; - } - } - - if (genpd->start_device) - genpd->start_device(dev); - + __pm_genpd_runtime_resume(dev, genpd); mutex_unlock(&genpd->lock); return 0; @@ -303,12 +314,493 @@ static int pm_genpd_runtime_resume(struct device *dev) #else static inline void genpd_power_off_work_fn(struct work_struct *work) {} +static inline void __pm_genpd_runtime_resume(struct device *dev, + struct generic_pm_domain *genpd) {} #define pm_genpd_runtime_suspend NULL #define pm_genpd_runtime_resume NULL #endif /* CONFIG_PM_RUNTIME */ +#ifdef CONFIG_PM_SLEEP + +/** + * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. + * @genpd: PM domain to power off, if possible. + * + * Check if the given PM domain can be powered off (during system suspend or + * hibernation) and do that if so. Also, in that case propagate to its parent. + * + * This function is only called in "noirq" stages of system power transitions, + * so it need not acquire locks (all of the "noirq" callbacks are executed + * sequentially, so it is guaranteed that it will never run twice in parallel). + */ +static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) +{ + struct generic_pm_domain *parent = genpd->parent; + + if (genpd->power_is_off) + return; + + if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) + return; + + if (genpd->power_off) + genpd->power_off(genpd); + + genpd->power_is_off = true; + if (parent) { + genpd_sd_counter_dec(parent); + pm_genpd_sync_poweroff(parent); + } +} + +/** + * pm_genpd_prepare - Start power transition of a device in a PM domain. + * @dev: Device to start the transition of. + * + * Start a power transition of a device (during a system-wide power transition) + * under the assumption that its pm_domain field points to the domain member of + * an object of type struct generic_pm_domain representing a PM domain + * consisting of I/O devices. + */ +static int pm_genpd_prepare(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + mutex_lock(&genpd->lock); + + if (genpd->prepared_count++ == 0) + genpd->suspend_power_off = genpd->power_is_off; + + if (genpd->suspend_power_off) { + mutex_unlock(&genpd->lock); + return 0; + } + + /* + * If the device is in the (runtime) "suspended" state, call + * .start_device() for it, if defined. + */ + if (pm_runtime_suspended(dev)) + __pm_genpd_runtime_resume(dev, genpd); + + /* + * Do not check if runtime resume is pending at this point, because it + * has been taken care of already and if pm_genpd_poweron() ran at this + * point as a result of the check, it would deadlock. + */ + __pm_runtime_disable(dev, false); + + mutex_unlock(&genpd->lock); + + return pm_generic_prepare(dev); +} + +/** + * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. + * @dev: Device to suspend. + * + * Suspend a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a PM domain consisting of I/O devices. + */ +static int pm_genpd_suspend(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); +} + +/** + * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. + * @dev: Device to suspend. + * + * Carry out a late suspend of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_suspend_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off) + return 0; + + ret = pm_generic_suspend_noirq(dev); + if (ret) + return ret; + + if (genpd->stop_device) + genpd->stop_device(dev); + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + + return 0; +} + +/** + * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. + * @dev: Device to resume. + * + * Carry out an early resume of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_resume_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off) + return 0; + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + pm_genpd_poweron(genpd); + genpd->suspended_count--; + if (genpd->start_device) + genpd->start_device(dev); + + return pm_generic_resume_noirq(dev); +} + +/** + * pm_genpd_resume - Resume a device belonging to an I/O power domain. + * @dev: Device to resume. + * + * Resume a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_resume(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); +} + +/** + * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. + * @dev: Device to freeze. + * + * Freeze a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_freeze(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); +} + +/** + * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. + * @dev: Device to freeze. + * + * Carry out a late freeze of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_freeze_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off) + return 0; + + ret = pm_generic_freeze_noirq(dev); + if (ret) + return ret; + + if (genpd->stop_device) + genpd->stop_device(dev); + + return 0; +} + +/** + * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. + * @dev: Device to thaw. + * + * Carry out an early thaw of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_thaw_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off) + return 0; + + if (genpd->start_device) + genpd->start_device(dev); + + return pm_generic_thaw_noirq(dev); +} + +/** + * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. + * @dev: Device to thaw. + * + * Thaw a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_thaw(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); +} + +/** + * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. + * @dev: Device to suspend. + * + * Power off a device under the assumption that its pm_domain field points to + * the domain member of an object of type struct generic_pm_domain representing + * a PM domain consisting of I/O devices. + */ +static int pm_genpd_dev_poweroff(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); +} + +/** + * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. + * @dev: Device to suspend. + * + * Carry out a late powering off of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int pm_genpd_dev_poweroff_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->suspend_power_off) + return 0; + + ret = pm_generic_poweroff_noirq(dev); + if (ret) + return ret; + + if (genpd->stop_device) + genpd->stop_device(dev); + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + + return 0; +} + +/** + * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. + * @dev: Device to resume. + * + * Carry out an early restore of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int pm_genpd_restore_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * Since all of the "noirq" callbacks are executed sequentially, it is + * guaranteed that this function will never run twice in parallel for + * the same PM domain, so it is not necessary to use locking here. + */ + genpd->power_is_off = true; + if (genpd->suspend_power_off) { + /* + * The boot kernel might put the domain into the power on state, + * so make sure it really is powered off. + */ + if (genpd->power_off) + genpd->power_off(genpd); + return 0; + } + + pm_genpd_poweron(genpd); + genpd->suspended_count--; + if (genpd->start_device) + genpd->start_device(dev); + + return pm_generic_restore_noirq(dev); +} + +/** + * pm_genpd_restore - Restore a device belonging to an I/O power domain. + * @dev: Device to resume. + * + * Restore a device under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static int pm_genpd_restore(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); +} + +/** + * pm_genpd_complete - Complete power transition of a device in a power domain. + * @dev: Device to complete the transition of. + * + * Complete a power transition of a device (during a system-wide power + * transition) under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static void pm_genpd_complete(struct device *dev) +{ + struct generic_pm_domain *genpd; + bool run_complete; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return; + + mutex_lock(&genpd->lock); + + run_complete = !genpd->suspend_power_off; + if (--genpd->prepared_count == 0) + genpd->suspend_power_off = false; + + mutex_unlock(&genpd->lock); + + if (run_complete) { + pm_generic_complete(dev); + pm_runtime_enable(dev); + } +} + +#else + +#define pm_genpd_prepare NULL +#define pm_genpd_suspend NULL +#define pm_genpd_suspend_noirq NULL +#define pm_genpd_resume_noirq NULL +#define pm_genpd_resume NULL +#define pm_genpd_freeze NULL +#define pm_genpd_freeze_noirq NULL +#define pm_genpd_thaw_noirq NULL +#define pm_genpd_thaw NULL +#define pm_genpd_dev_poweroff_noirq NULL +#define pm_genpd_dev_poweroff NULL +#define pm_genpd_restore_noirq NULL +#define pm_genpd_restore NULL +#define pm_genpd_complete NULL + +#endif /* CONFIG_PM_SLEEP */ + /** * pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. @@ -331,6 +823,11 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) goto out; } + if (genpd->prepared_count > 0) { + ret = -EAGAIN; + goto out; + } + list_for_each_entry(dle, &genpd->dev_list, node) if (dle->dev == dev) { ret = -EINVAL; @@ -346,6 +843,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) dle->dev = dev; dle->need_restore = false; list_add_tail(&dle->node, &genpd->dev_list); + genpd->device_count++; spin_lock_irq(&dev->power.lock); dev->pm_domain = &genpd->domain; @@ -375,6 +873,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, mutex_lock(&genpd->lock); + if (genpd->prepared_count > 0) { + ret = -EAGAIN; + goto out; + } + list_for_each_entry(dle, &genpd->dev_list, node) { if (dle->dev != dev) continue; @@ -383,6 +886,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, dev->pm_domain = NULL; spin_unlock_irq(&dev->power.lock); + genpd->device_count--; list_del(&dle->node); kfree(dle); @@ -390,6 +894,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, break; } + out: mutex_unlock(&genpd->lock); return ret; @@ -498,7 +1003,23 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->in_progress = 0; genpd->sd_count = 0; genpd->power_is_off = is_off; + genpd->device_count = 0; + genpd->suspended_count = 0; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; + genpd->domain.ops.prepare = pm_genpd_prepare; + genpd->domain.ops.suspend = pm_genpd_suspend; + genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; + genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; + genpd->domain.ops.resume = pm_genpd_resume; + genpd->domain.ops.freeze = pm_genpd_freeze; + genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; + genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; + genpd->domain.ops.thaw = pm_genpd_thaw; + genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; + genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; + genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; + genpd->domain.ops.restore = pm_genpd_restore; + genpd->domain.ops.complete = pm_genpd_complete; } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b1a22c6..7961b0d 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -11,6 +11,9 @@ #include +#define GPD_IN_SUSPEND 1 +#define GPD_POWER_OFF 2 + struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); }; @@ -27,12 +30,21 @@ struct generic_pm_domain { unsigned int in_progress; /* Number of devices being suspended now */ unsigned int sd_count; /* Number of subdomains with power "on" */ bool power_is_off; /* Whether or not power has been removed */ + unsigned int device_count; /* Number of devices */ + unsigned int suspended_count; /* System suspend device counter */ + unsigned int prepared_count; /* Suspend counter of prepared devices */ + bool suspend_power_off; /* Power status before system suspend */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); int (*start_device)(struct device *dev); int (*stop_device)(struct device *dev); }; +static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) +{ + return container_of(pd, struct generic_pm_domain, domain); +} + struct dev_list_entry { struct list_head node; struct device *dev; -- cgit v0.10.2 From d4f2d87a8b46c14c4307c690c92bd08229f66ecf Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:13:29 +0200 Subject: PM / Domains: Wakeup devices support for system sleep transitions There is the problem how to handle devices set up to wake up the system from sleep states during system-wide power transitions. In some cases, those devices can be turned off entirely, because the wakeup signals will be generated on their behalf anyway. In some other cases, they will generate wakeup signals if their clocks are stopped, but only if power is not removed from them. Finally, in some cases, they can only generate wakeup signals if power is not removed from them and their clocks are enabled. To allow platform-specific code to decide whether or not to put wakeup devices (and their PM domains) into low-power state during system-wide transitions, such as system suspend, introduce a new generic PM domain callback, .active_wakeup(), that will be used during the "noirq" phase of system suspend and hibernation (after image creation) to decide what to do with wakeup devices. Specifically, if this callback is present and returns "true", the generic PM domain code will not execute .stop_device() for the given wakeup device and its PM domain won't be powered off. Signed-off-by: Rafael J. Wysocki Acked-by: Kevin Hilman diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 33086e9..1aed94c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -450,6 +450,10 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (ret) return ret; + if (device_may_wakeup(dev) + && genpd->active_wakeup && genpd->active_wakeup(dev)) + return 0; + if (genpd->stop_device) genpd->stop_device(dev); @@ -670,6 +674,10 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev) if (ret) return ret; + if (device_may_wakeup(dev) + && genpd->active_wakeup && genpd->active_wakeup(dev)) + return 0; + if (genpd->stop_device) genpd->stop_device(dev); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 7961b0d..98491ee 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -38,6 +38,7 @@ struct generic_pm_domain { int (*power_on)(struct generic_pm_domain *domain); int (*start_device)(struct device *dev); int (*stop_device)(struct device *dev); + bool (*active_wakeup)(struct device *dev); }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) -- cgit v0.10.2 From b7b95920aa2e89e655afe9913ee0e55855ceda90 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:13:37 +0200 Subject: PM: Allow the clocks management code to be used during system suspend The common clocks management code in drivers/base/power/clock_ops.c is going to be used during system-wide power transitions as well as for runtime PM, so it shouldn't depend on CONFIG_PM_RUNTIME. However, the suspend/resume functions provided by it for CONFIG_PM_RUNTIME unset, to be used during system-wide power transitions, should not behave in the same way as their counterparts defined for CONFIG_PM_RUNTIME set, because in that case the clocks are managed differently at run time. The names of the functions still contain the word "runtime" after this change, but that is going to be modified by a separate patch later. Signed-off-by: Rafael J. Wysocki Reviewed-by: Kevin Hilman diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index c562481..2fb9c12 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -15,7 +15,7 @@ #include #include -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct pm_runtime_clk_data { struct list_head clock_list; @@ -191,6 +191,10 @@ void pm_runtime_clk_destroy(struct device *dev) kfree(prd); } +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_RUNTIME + /** * pm_runtime_clk_acquire - Acquire a device clock. * @dev: Device whose clock is to be acquired. @@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, #else /* !CONFIG_PM_RUNTIME */ +#ifdef CONFIG_PM + +/** + * pm_runtime_clk_suspend - Disable clocks in a device's PM clock list. + * @dev: Device to disable the clocks for. + */ +int pm_runtime_clk_suspend(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + dev_dbg(dev, "%s()\n", __func__); + + /* If there is no driver, the clocks are already disabled. */ + if (!prd || !dev->driver) + return 0; + + mutex_lock(&prd->lock); + + list_for_each_entry_reverse(ce, &prd->clock_list, node) + clk_disable(ce->clk); + + mutex_unlock(&prd->lock); + + return 0; +} + +/** + * pm_runtime_clk_resume - Enable clocks in a device's PM clock list. + * @dev: Device to enable the clocks for. + */ +int pm_runtime_clk_resume(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + dev_dbg(dev, "%s()\n", __func__); + + /* If there is no driver, the clocks should remain disabled. */ + if (!prd || !dev->driver) + return 0; + + mutex_lock(&prd->lock); + + list_for_each_entry(ce, &prd->clock_list, node) + clk_enable(ce->clk); + + mutex_unlock(&prd->lock); + + return 0; +} + +#endif /* CONFIG_PM */ + /** * enable_clock - Enable a device clock. * @dev: Device whose clock is to be enabled. diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index ef91904..1bd5063 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -251,7 +251,7 @@ struct pm_clk_notifier_block { char *con_ids[]; }; -#ifdef CONFIG_PM_RUNTIME_CLK +#ifdef CONFIG_PM_CLK extern int pm_runtime_clk_init(struct device *dev); extern void pm_runtime_clk_destroy(struct device *dev); extern int pm_runtime_clk_add(struct device *dev, const char *con_id); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index e83ac25..7b856b3 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -224,9 +224,9 @@ config PM_OPP implementations a ready to use framework to manage OPPs. For more information, read -config PM_RUNTIME_CLK +config PM_CLK def_bool y - depends on PM_RUNTIME && HAVE_CLK + depends on PM && HAVE_CLK config PM_GENERIC_DOMAINS bool -- cgit v0.10.2 From 3d5c30367cbc0c55c93bb158e824e00badc7ddc4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:13:44 +0200 Subject: PM: Rename clock management functions The common PM clock management functions may be used for system suspend/resume as well as for runtime PM, so rename them accordingly. Modify kerneldoc comments describing these functions and kernel messages printed by them, so that they refer to power management in general rather that to runtime PM. Signed-off-by: Rafael J. Wysocki Reviewed-by: Kevin Hilman diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 212f331..943072d 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c @@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev) if (ret) return ret; - ret = pm_runtime_clk_suspend(dev); + ret = pm_clk_suspend(dev); if (ret) { pm_generic_runtime_resume(dev); return ret; @@ -45,7 +45,7 @@ static int omap1_pm_runtime_resume(struct device *dev) { dev_dbg(dev, "%s\n", __func__); - pm_runtime_clk_resume(dev); + pm_clk_resume(dev); return pm_generic_runtime_resume(dev); } @@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void) if (!cpu_class_is_omap1()) return -ENODEV; - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 99802d2..2bcde1c 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -30,8 +30,8 @@ static int default_platform_runtime_idle(struct device *dev) static struct dev_pm_domain default_pm_domain = { .ops = { - .runtime_suspend = pm_runtime_clk_suspend, - .runtime_resume = pm_runtime_clk_resume, + .runtime_suspend = pm_clk_suspend, + .runtime_resume = pm_clk_resume, .runtime_idle = default_platform_runtime_idle, USE_PLATFORM_PM_SLEEP_OPS }, @@ -52,7 +52,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = { static int __init sh_pm_runtime_init(void) { - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(sh_pm_runtime_init); diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 2fb9c12..a846b2f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -17,7 +17,7 @@ #ifdef CONFIG_PM -struct pm_runtime_clk_data { +struct pm_clk_data { struct list_head clock_list; struct mutex lock; }; @@ -36,25 +36,25 @@ struct pm_clock_entry { enum pce_status status; }; -static struct pm_runtime_clk_data *__to_prd(struct device *dev) +static struct pm_clk_data *__to_pcd(struct device *dev) { return dev ? dev->power.subsys_data : NULL; } /** - * pm_runtime_clk_add - Start using a device clock for runtime PM. - * @dev: Device whose clock is going to be used for runtime PM. + * pm_clk_add - Start using a device clock for power management. + * @dev: Device whose clock is going to be used for power management. * @con_id: Connection ID of the clock. * * Add the clock represented by @con_id to the list of clocks used for - * the runtime PM of @dev. + * the power management of @dev. */ -int pm_runtime_clk_add(struct device *dev, const char *con_id) +int pm_clk_add(struct device *dev, const char *con_id) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce; - if (!prd) + if (!pcd) return -EINVAL; ce = kzalloc(sizeof(*ce), GFP_KERNEL); @@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id) } } - mutex_lock(&prd->lock); - list_add_tail(&ce->node, &prd->clock_list); - mutex_unlock(&prd->lock); + mutex_lock(&pcd->lock); + list_add_tail(&ce->node, &pcd->clock_list); + mutex_unlock(&pcd->lock); return 0; } /** - * __pm_runtime_clk_remove - Destroy runtime PM clock entry. - * @ce: Runtime PM clock entry to destroy. + * __pm_clk_remove - Destroy PM clock entry. + * @ce: PM clock entry to destroy. * - * This routine must be called under the mutex protecting the runtime PM list - * of clocks corresponding the the @ce's device. + * This routine must be called under the mutex protecting the PM list of clocks + * corresponding the the @ce's device. */ -static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) +static void __pm_clk_remove(struct pm_clock_entry *ce) { if (!ce) return; @@ -108,87 +108,87 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) } /** - * pm_runtime_clk_remove - Stop using a device clock for runtime PM. - * @dev: Device whose clock should not be used for runtime PM any more. + * pm_clk_remove - Stop using a device clock for power management. + * @dev: Device whose clock should not be used for PM any more. * @con_id: Connection ID of the clock. * * Remove the clock represented by @con_id from the list of clocks used for - * the runtime PM of @dev. + * the power management of @dev. */ -void pm_runtime_clk_remove(struct device *dev, const char *con_id) +void pm_clk_remove(struct device *dev, const char *con_id) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce; - if (!prd) + if (!pcd) return; - mutex_lock(&prd->lock); + mutex_lock(&pcd->lock); - list_for_each_entry(ce, &prd->clock_list, node) { + list_for_each_entry(ce, &pcd->clock_list, node) { if (!con_id && !ce->con_id) { - __pm_runtime_clk_remove(ce); + __pm_clk_remove(ce); break; } else if (!con_id || !ce->con_id) { continue; } else if (!strcmp(con_id, ce->con_id)) { - __pm_runtime_clk_remove(ce); + __pm_clk_remove(ce); break; } } - mutex_unlock(&prd->lock); + mutex_unlock(&pcd->lock); } /** - * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. - * @dev: Device to initialize the list of runtime PM clocks for. + * pm_clk_init - Initialize a device's list of power management clocks. + * @dev: Device to initialize the list of PM clocks for. * - * Allocate a struct pm_runtime_clk_data object, initialize its lock member and + * Allocate a struct pm_clk_data object, initialize its lock member and * make the @dev's power.subsys_data field point to it. */ -int pm_runtime_clk_init(struct device *dev) +int pm_clk_init(struct device *dev) { - struct pm_runtime_clk_data *prd; + struct pm_clk_data *pcd; - prd = kzalloc(sizeof(*prd), GFP_KERNEL); - if (!prd) { - dev_err(dev, "Not enough memory fo runtime PM data.\n"); + pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); + if (!pcd) { + dev_err(dev, "Not enough memory for PM clock data.\n"); return -ENOMEM; } - INIT_LIST_HEAD(&prd->clock_list); - mutex_init(&prd->lock); - dev->power.subsys_data = prd; + INIT_LIST_HEAD(&pcd->clock_list); + mutex_init(&pcd->lock); + dev->power.subsys_data = pcd; return 0; } /** - * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. - * @dev: Device to destroy the list of runtime PM clocks for. + * pm_clk_destroy - Destroy a device's list of power management clocks. + * @dev: Device to destroy the list of PM clocks for. * * Clear the @dev's power.subsys_data field, remove the list of clock entries - * from the struct pm_runtime_clk_data object pointed to by it before and free + * from the struct pm_clk_data object pointed to by it before and free * that object. */ -void pm_runtime_clk_destroy(struct device *dev) +void pm_clk_destroy(struct device *dev) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce, *c; - if (!prd) + if (!pcd) return; dev->power.subsys_data = NULL; - mutex_lock(&prd->lock); + mutex_lock(&pcd->lock); - list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) - __pm_runtime_clk_remove(ce); + list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) + __pm_clk_remove(ce); - mutex_unlock(&prd->lock); + mutex_unlock(&pcd->lock); - kfree(prd); + kfree(pcd); } #endif /* CONFIG_PM */ @@ -196,11 +196,11 @@ void pm_runtime_clk_destroy(struct device *dev) #ifdef CONFIG_PM_RUNTIME /** - * pm_runtime_clk_acquire - Acquire a device clock. + * pm_clk_acquire - Acquire a device clock. * @dev: Device whose clock is to be acquired. * @con_id: Connection ID of the clock. */ -static void pm_runtime_clk_acquire(struct device *dev, +static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) { ce->clk = clk_get(dev, ce->con_id); @@ -213,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev, } /** - * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. + * pm_clk_suspend - Disable clocks in a device's PM clock list. * @dev: Device to disable the clocks for. */ -int pm_runtime_clk_suspend(struct device *dev) +int pm_clk_suspend(struct device *dev) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce; dev_dbg(dev, "%s()\n", __func__); - if (!prd) + if (!pcd) return 0; - mutex_lock(&prd->lock); + mutex_lock(&pcd->lock); - list_for_each_entry_reverse(ce, &prd->clock_list, node) { + list_for_each_entry_reverse(ce, &pcd->clock_list, node) { if (ce->status == PCE_STATUS_NONE) - pm_runtime_clk_acquire(dev, ce); + pm_clk_acquire(dev, ce); if (ce->status < PCE_STATUS_ERROR) { clk_disable(ce->clk); @@ -238,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev) } } - mutex_unlock(&prd->lock); + mutex_unlock(&pcd->lock); return 0; } /** - * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. + * pm_clk_resume - Enable clocks in a device's PM clock list. * @dev: Device to enable the clocks for. */ -int pm_runtime_clk_resume(struct device *dev) +int pm_clk_resume(struct device *dev) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce; dev_dbg(dev, "%s()\n", __func__); - if (!prd) + if (!pcd) return 0; - mutex_lock(&prd->lock); + mutex_lock(&pcd->lock); - list_for_each_entry(ce, &prd->clock_list, node) { + list_for_each_entry(ce, &pcd->clock_list, node) { if (ce->status == PCE_STATUS_NONE) - pm_runtime_clk_acquire(dev, ce); + pm_clk_acquire(dev, ce); if (ce->status < PCE_STATUS_ERROR) { clk_enable(ce->clk); @@ -269,13 +269,13 @@ int pm_runtime_clk_resume(struct device *dev) } } - mutex_unlock(&prd->lock); + mutex_unlock(&pcd->lock); return 0; } /** - * pm_runtime_clk_notify - Notify routine for device addition and removal. + * pm_clk_notify - Notify routine for device addition and removal. * @nb: Notifier block object this function is a member of. * @action: Operation being carried out by the caller. * @data: Device the routine is being run for. @@ -284,13 +284,13 @@ int pm_runtime_clk_resume(struct device *dev) * struct pm_clk_notifier_block containing all of the requisite data. * Specifically, the pm_domain member of that object is copied to the device's * pm_domain field and its con_ids member is used to populate the device's list - * of runtime PM clocks, depending on @action. + * of PM clocks, depending on @action. * * If the device's pm_domain field is already populated with a value different * from the one stored in the struct pm_clk_notifier_block object, the function * does nothing. */ -static int pm_runtime_clk_notify(struct notifier_block *nb, +static int pm_clk_notify(struct notifier_block *nb, unsigned long action, void *data) { struct pm_clk_notifier_block *clknb; @@ -307,16 +307,16 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, if (dev->pm_domain) break; - error = pm_runtime_clk_init(dev); + error = pm_clk_init(dev); if (error) break; dev->pm_domain = clknb->pm_domain; if (clknb->con_ids[0]) { for (con_id = clknb->con_ids; *con_id; con_id++) - pm_runtime_clk_add(dev, *con_id); + pm_clk_add(dev, *con_id); } else { - pm_runtime_clk_add(dev, NULL); + pm_clk_add(dev, NULL); } break; @@ -325,7 +325,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, break; dev->pm_domain = NULL; - pm_runtime_clk_destroy(dev); + pm_clk_destroy(dev); break; } @@ -337,51 +337,51 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, #ifdef CONFIG_PM /** - * pm_runtime_clk_suspend - Disable clocks in a device's PM clock list. + * pm_clk_suspend - Disable clocks in a device's PM clock list. * @dev: Device to disable the clocks for. */ -int pm_runtime_clk_suspend(struct device *dev) +int pm_clk_suspend(struct device *dev) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce; dev_dbg(dev, "%s()\n", __func__); /* If there is no driver, the clocks are already disabled. */ - if (!prd || !dev->driver) + if (!pcd || !dev->driver) return 0; - mutex_lock(&prd->lock); + mutex_lock(&pcd->lock); - list_for_each_entry_reverse(ce, &prd->clock_list, node) + list_for_each_entry_reverse(ce, &pcd->clock_list, node) clk_disable(ce->clk); - mutex_unlock(&prd->lock); + mutex_unlock(&pcd->lock); return 0; } /** - * pm_runtime_clk_resume - Enable clocks in a device's PM clock list. + * pm_clk_resume - Enable clocks in a device's PM clock list. * @dev: Device to enable the clocks for. */ -int pm_runtime_clk_resume(struct device *dev) +int pm_clk_resume(struct device *dev) { - struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce; dev_dbg(dev, "%s()\n", __func__); /* If there is no driver, the clocks should remain disabled. */ - if (!prd || !dev->driver) + if (!pcd || !dev->driver) return 0; - mutex_lock(&prd->lock); + mutex_lock(&pcd->lock); - list_for_each_entry(ce, &prd->clock_list, node) + list_for_each_entry(ce, &pcd->clock_list, node) clk_enable(ce->clk); - mutex_unlock(&prd->lock); + mutex_unlock(&pcd->lock); return 0; } @@ -423,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id) } /** - * pm_runtime_clk_notify - Notify routine for device addition and removal. + * pm_clk_notify - Notify routine for device addition and removal. * @nb: Notifier block object this function is a member of. * @action: Operation being carried out by the caller. * @data: Device the routine is being run for. @@ -433,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id) * Specifically, the con_ids member of that object is used to enable or disable * the device's clocks, depending on @action. */ -static int pm_runtime_clk_notify(struct notifier_block *nb, +static int pm_clk_notify(struct notifier_block *nb, unsigned long action, void *data) { struct pm_clk_notifier_block *clknb; @@ -469,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, #endif /* !CONFIG_PM_RUNTIME */ /** - * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. + * pm_clk_add_notifier - Add bus type notifier for power management clocks. * @bus: Bus type to add the notifier to. * @clknb: Notifier to be added to the given bus type. * * The nb member of @clknb is not expected to be initialized and its - * notifier_call member will be replaced with pm_runtime_clk_notify(). However, + * notifier_call member will be replaced with pm_clk_notify(). However, * the remaining members of @clknb should be populated prior to calling this * routine. */ -void pm_runtime_clk_add_notifier(struct bus_type *bus, +void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb) { if (!bus || !clknb) return; - clknb->nb.notifier_call = pm_runtime_clk_notify; + clknb->nb.notifier_call = pm_clk_notify; bus_register_notifier(bus, &clknb->nb); } diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 1bd5063..dfb8539 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -252,36 +252,36 @@ struct pm_clk_notifier_block { }; #ifdef CONFIG_PM_CLK -extern int pm_runtime_clk_init(struct device *dev); -extern void pm_runtime_clk_destroy(struct device *dev); -extern int pm_runtime_clk_add(struct device *dev, const char *con_id); -extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); -extern int pm_runtime_clk_suspend(struct device *dev); -extern int pm_runtime_clk_resume(struct device *dev); +extern int pm_clk_init(struct device *dev); +extern void pm_clk_destroy(struct device *dev); +extern int pm_clk_add(struct device *dev, const char *con_id); +extern void pm_clk_remove(struct device *dev, const char *con_id); +extern int pm_clk_suspend(struct device *dev); +extern int pm_clk_resume(struct device *dev); #else -static inline int pm_runtime_clk_init(struct device *dev) +static inline int pm_clk_init(struct device *dev) { return -EINVAL; } -static inline void pm_runtime_clk_destroy(struct device *dev) +static inline void pm_clk_destroy(struct device *dev) { } -static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) +static inline int pm_clk_add(struct device *dev, const char *con_id) { return -EINVAL; } -static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) +static inline void pm_clk_remove(struct device *dev, const char *con_id) { } -#define pm_runtime_clock_suspend NULL -#define pm_runtime_clock_resume NULL +#define pm_clk_suspend NULL +#define pm_clk_resume NULL #endif #ifdef CONFIG_HAVE_CLK -extern void pm_runtime_clk_add_notifier(struct bus_type *bus, +extern void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb); #else -static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, +static inline void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb) { } -- cgit v0.10.2 From e3e0109138376bb262b8ecf33bad0586fa131925 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:13:56 +0200 Subject: ARM / shmobile: Support for I/O power domains for SH7372 (v9) Use the generic power domains support introduced by the previous patch to implement support for power domains on SH7372. Signed-off-by: Rafael J. Wysocki Acked-by: Paul Mundt diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9adc278..e04fa9d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -642,6 +642,7 @@ config ARCH_SHMOBILE select NO_IOPORT select SPARSE_IRQ select MULTI_IRQ_HANDLER + select PM_GENERIC_DOMAINS if PM help Support for Renesas's SH-Mobile and R-Mobile ARM platforms. diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 3802f2a..f93b1cb 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1582,6 +1582,10 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); + sh7372_init_pm_domain(&sh7372_a4lc); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); + hdmi_init_pm_clock(); sh7372_pm_init(); } diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index df20d76..495013c 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -12,6 +12,7 @@ #define __ASM_SH7372_H__ #include +#include /* * Pin Function Controller: @@ -470,4 +471,27 @@ extern struct clk sh7372_fsibck_clk; extern struct clk sh7372_fsidiva_clk; extern struct clk sh7372_fsidivb_clk; +struct platform_device; + +struct sh7372_pm_domain { + struct generic_pm_domain genpd; + unsigned int bit_shift; +}; + +static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) +{ + return container_of(d, struct sh7372_pm_domain, genpd); +} + +#ifdef CONFIG_PM +extern struct sh7372_pm_domain sh7372_a4lc; + +extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); +extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, + struct platform_device *pdev); +#else +#define sh7372_init_pm_domain(pd) do { } while(0) +#define sh7372_add_device_to_domain(pd, pdev) do { } while(0) +#endif /* CONFIG_PM */ + #endif /* __ASM_SH7372_H__ */ diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 8e4aadf..e17f124 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -15,16 +15,118 @@ #include #include #include +#include +#include +#include #include #include #include #include +#include #define SMFRAM 0xe6a70000 #define SYSTBCR 0xe6150024 #define SBAR 0xe6180020 #define APARMBAREA 0xe6f10020 +#define SPDCR 0xe6180008 +#define SWUCR 0xe6180014 +#define PSTR 0xe6180080 + +#define PSTR_RETRIES 100 +#define PSTR_DELAY_US 10 + +#ifdef CONFIG_PM + +static int pd_power_down(struct generic_pm_domain *genpd) +{ + struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); + unsigned int mask = 1 << sh7372_pd->bit_shift; + + if (__raw_readl(PSTR) & mask) { + unsigned int retry_count; + + __raw_writel(mask, SPDCR); + + for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SPDCR) & mask)) + break; + cpu_relax(); + } + } + + pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); + + return 0; +} + +static int pd_power_up(struct generic_pm_domain *genpd) +{ + struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); + unsigned int mask = 1 << sh7372_pd->bit_shift; + unsigned int retry_count; + int ret = 0; + + if (__raw_readl(PSTR) & mask) + goto out; + + __raw_writel(mask, SWUCR); + + for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SWUCR) & mask)) + goto out; + if (retry_count > PSTR_RETRIES) + udelay(PSTR_DELAY_US); + else + cpu_relax(); + } + if (__raw_readl(SWUCR) & mask) + ret = -EIO; + + out: + pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); + + return ret; +} + +static bool pd_active_wakeup(struct device *dev) +{ + return true; +} + +void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) +{ + struct generic_pm_domain *genpd = &sh7372_pd->genpd; + + pm_genpd_init(genpd, NULL, false); + genpd->stop_device = pm_clk_suspend; + genpd->start_device = pm_clk_resume; + genpd->active_wakeup = pd_active_wakeup; + genpd->power_off = pd_power_down; + genpd->power_on = pd_power_up; + pd_power_up(&sh7372_pd->genpd); +} + +void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (!dev->power.subsys_data) { + pm_clk_init(dev); + pm_clk_add(dev, NULL); + } + pm_genpd_add_device(&sh7372_pd->genpd, dev); +} + +struct sh7372_pm_domain sh7372_a4lc = { + .bit_shift = 1, +}; + +#endif /* CONFIG_PM */ + static void sh7372_enter_core_standby(void) { void __iomem *smfram = (void __iomem *)SMFRAM; -- cgit v0.10.2 From 96f7934e448fd52644b8862ea12761e1524f3bf3 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 1 Jul 2011 22:14:34 +0200 Subject: ARM: mach-shmobile: sh7372 A4LC support for AP4EVB The AP4EVB board is also using a sh7372 SoC, so tie in A4LC support on that board as well. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 803bc6e..a26f895 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1408,6 +1408,9 @@ static void __init ap4evb_init(void) platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + hdmi_init_pm_clock(); fsi_init_pm_clock(); sh7372_pm_init(); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index f93b1cb..cc1ccd8 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1582,7 +1582,6 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); - sh7372_init_pm_domain(&sh7372_a4lc); sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index cd807ee..ec1ae8c 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -841,6 +841,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { + sh7372_init_pm_domain(&sh7372_a4lc); + platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); -- cgit v0.10.2 From 33afebf3da2e8c26faebb1b271c810f937e35788 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 1 Jul 2011 22:14:45 +0200 Subject: ARM: mach-shmobile: sh7372 A3RV support Add support for the sh7372 A3RV power domain and hook up the VPU device. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index 495013c..e935b5e 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -485,6 +485,7 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern struct sh7372_pm_domain sh7372_a4lc; +extern struct sh7372_pm_domain sh7372_a3rv; extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index e17f124..d467c86 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -125,6 +125,10 @@ struct sh7372_pm_domain sh7372_a4lc = { .bit_shift = 1, }; +struct sh7372_pm_domain sh7372_a3rv = { + .bit_shift = 6, +}; + #endif /* CONFIG_PM */ static void sh7372_enter_core_standby(void) diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index ec1ae8c..14a497f 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -842,12 +842,15 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { sh7372_init_pm_domain(&sh7372_a4lc); + sh7372_init_pm_domain(&sh7372_a3rv); platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); + + sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); } void __init sh7372_add_early_devices(void) -- cgit v0.10.2 From 082517aa21c60c06bd2a4c287aab5e1b3d8c7c22 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 1 Jul 2011 22:14:53 +0200 Subject: ARM: mach-shmobile: sh7372 A3RI support Add support for the sh7372 A3RI power domain. This domain contains the ISP hardware block, but there is no driver available. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index e935b5e..b99f3d6 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -486,6 +486,7 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern struct sh7372_pm_domain sh7372_a4lc; extern struct sh7372_pm_domain sh7372_a3rv; +extern struct sh7372_pm_domain sh7372_a3ri; extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index d467c86..48c3b91 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -129,6 +129,10 @@ struct sh7372_pm_domain sh7372_a3rv = { .bit_shift = 6, }; +struct sh7372_pm_domain sh7372_a3ri = { + .bit_shift = 8, +}; + #endif /* CONFIG_PM */ static void sh7372_enter_core_standby(void) diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 14a497f..d558333 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -843,6 +843,7 @@ void __init sh7372_add_standard_devices(void) { sh7372_init_pm_domain(&sh7372_a4lc); sh7372_init_pm_domain(&sh7372_a3rv); + sh7372_init_pm_domain(&sh7372_a3ri); platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); -- cgit v0.10.2 From c47586b6d36ef2d5d7dc39afc44b75e31bc1a671 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 1 Jul 2011 22:15:01 +0200 Subject: ARM: mach-shmobile: sh7372 A3SG support Add support for the sh7372 A3SG power domain. This domain contains the SGX hardware block, but there is no open source driver available. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index b99f3d6..1aed9da 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -487,6 +487,7 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) extern struct sh7372_pm_domain sh7372_a4lc; extern struct sh7372_pm_domain sh7372_a3rv; extern struct sh7372_pm_domain sh7372_a3ri; +extern struct sh7372_pm_domain sh7372_a3sg; extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 48c3b91..474a15a 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -133,6 +133,10 @@ struct sh7372_pm_domain sh7372_a3ri = { .bit_shift = 8, }; +struct sh7372_pm_domain sh7372_a3sg = { + .bit_shift = 13, +}; + #endif /* CONFIG_PM */ static void sh7372_enter_core_standby(void) diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index d558333..b219cea 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -844,6 +844,7 @@ void __init sh7372_add_standard_devices(void) sh7372_init_pm_domain(&sh7372_a4lc); sh7372_init_pm_domain(&sh7372_a3rv); sh7372_init_pm_domain(&sh7372_a3ri); + sh7372_init_pm_domain(&sh7372_a3sg); platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); -- cgit v0.10.2 From 455716e9b12ba93e93181ac88bef62e4eb5ac66c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:29:05 +0200 Subject: PM / Runtime: Update documentation of interactions with system sleep The documents describing the interactions between runtime PM and system sleep generally refer to the model in which the system sleep state is entered through a global firmware or hardware operation. As a result, some recommendations given in there are not entirely suitable for systems in which this is not the case. Update the documentation to take the existence of those systems into account. Signed-off-by: Rafael J. Wysocki Reviewed-by: Kevin Hilman diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 85c6f98..3384d59 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt @@ -604,7 +604,7 @@ state temporarily, for example so that its system wakeup capability can be disabled. This all depends on the hardware and the design of the subsystem and device driver in question. -During system-wide resume from a sleep state it's best to put devices into the -full-power state, as explained in Documentation/power/runtime_pm.txt. Refer to -that document for more information regarding this particular issue as well as +During system-wide resume from a sleep state it's easiest to put devices into +the full-power state, as explained in Documentation/power/runtime_pm.txt. Refer +to that document for more information regarding this particular issue as well as for information on the device runtime power management framework in general. diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 4b011b1..513c52e 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -553,9 +553,9 @@ suspend routine). It may be necessary to resume the device and suspend it again in order to do so. The same is true if the driver uses different power levels or other settings for run-time suspend and system sleep. -During system resume, devices generally should be brought back to full power, -even if they were suspended before the system sleep began. There are several -reasons for this, including: +During system resume, the simplest approach is to bring all devices back to full +power, even if they had been suspended before the system suspend began. There +are several reasons for this, including: * The device might need to switch power levels, wake-up settings, etc. @@ -572,16 +572,27 @@ reasons for this, including: * Even though the device was suspended, if its usage counter was > 0 then most likely it would need a run-time resume in the near future anyway. - * Always going back to full power is simplest. - -If the device was suspended before the sleep began, then its run-time PM status -will have to be updated to reflect the actual post-system sleep status. The way -to do this is: +If the device had been suspended before the system suspend began and it's +brought back to full power during resume, then its run-time PM status will have +to be updated to reflect the actual post-system sleep status. The way to do +this is: pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); +On some systems, however, system sleep is not entered through a global firmware +or hardware operation. Instead, all hardware components are put into low-power +states directly by the kernel in a coordinated way. Then, the system sleep +state effectively follows from the states the hardware components end up in +and the system is woken up from that state by a hardware interrupt or a similar +mechanism entirely under the kernel's control. As a result, the kernel never +gives control away and the states of all devices during resume are precisely +known to it. If that is the case and none of the situations listed above takes +place (in particular, if the system is not waking up from hibernation), it may +be more efficient to leave the devices that had been suspended before the system +suspend began in the suspended state. + 7. Generic subsystem callbacks Subsystems may wish to conserve code space by using the set of generic power -- cgit v0.10.2 From 632e270e01d8a1ee9e8ea56c83028727f17b1d17 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Jul 2011 22:29:15 +0200 Subject: PM / Runtime: Return special error code if runtime PM is disabled Some callers of pm_runtime_get_sync() and other runtime PM helper functions, scsi_autopm_get_host() and scsi_autopm_get_device() in particular, need to distinguish error codes returned when runtime PM is disabled (i.e. power.disable_depth is nonzero for the given device) from error codes returned in other situations. For this reason, make the runtime PM helper functions return -EACCES when power.disable_depth is nonzero and ensure that this error code won't be returned by them in any other circumstances. Modify scsi_autopm_get_host() and scsi_autopm_get_device() to check the error code returned by pm_runtime_get_sync() and ignore -EACCES. Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 513c52e..0ec3d61 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -291,7 +291,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: - execute the subsystem-level suspend callback for the device; returns 0 on success, 1 if the device's run-time PM status was already 'suspended', or error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt - to suspend the device again in future + to suspend the device again in future and -EACCES means that + 'power.disable_depth' is different from 0 int pm_runtime_autosuspend(struct device *dev); - same as pm_runtime_suspend() except that the autosuspend delay is taken @@ -304,7 +305,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: success, 1 if the device's run-time PM status was already 'active' or error code on failure, where -EAGAIN means it may be safe to attempt to resume the device again in future, but 'power.runtime_error' should be - checked additionally + checked additionally, and -EACCES means that 'power.disable_depth' is + different from 0 int pm_request_idle(struct device *dev); - submit a request to execute the subsystem-level idle callback for the diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 5f5c423..ee99025 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev) if (dev->power.runtime_error) retval = -EINVAL; - else if (atomic_read(&dev->power.usage_count) > 0 - || dev->power.disable_depth > 0) + else if (dev->power.disable_depth > 0) + retval = -EACCES; + else if (atomic_read(&dev->power.usage_count) > 0) retval = -EAGAIN; else if (!pm_children_suspended(dev)) retval = -EBUSY; @@ -262,7 +263,7 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) spin_lock_irq(&dev->power.lock); } dev->power.runtime_error = retval; - return retval; + return retval != -EACCES ? retval : -EIO; } /** @@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags) if (dev->power.runtime_error) retval = -EINVAL; else if (dev->power.disable_depth > 0) - retval = -EAGAIN; + retval = -EACCES; if (retval) goto out; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index d70e91a..d82a023a 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -144,9 +144,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev) int err; err = pm_runtime_get_sync(&sdev->sdev_gendev); - if (err < 0) + if (err < 0 && err !=-EACCES) pm_runtime_put_sync(&sdev->sdev_gendev); - else if (err > 0) + else err = 0; return err; } @@ -173,9 +173,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost) int err; err = pm_runtime_get_sync(&shost->shost_gendev); - if (err < 0) + if (err < 0 && err !=-EACCES) pm_runtime_put_sync(&shost->shost_gendev); - else if (err > 0) + else err = 0; return err; } -- cgit v0.10.2 From eea3fc0357eb89d0b2d1af37bdfb83eb4076a542 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 6 Jul 2011 10:51:40 +0200 Subject: PCI / PM: Detect early wakeup in pci_pm_prepare() A subsequent patch is going to move the invocation of pm_runtime_barrier() from dpm_prepare() to __device_suspend(). Consequently, early wakeup events resulting from runtime resume requests for wakeup devices queued up right before system suspend will only be detected after all of the subsystem-level .prepare() callbacks have run. However, the PCI bus type calls pm_runtime_get_sync() from its pci_pm_prepare() callback routine, so it would destroy the early wakeup events information regarding PCI devices. To prevent this from happening add an early wakeup detection mechanism, analogous to the one currently in dpm_prepare(), to pci_pm_prepare(). Signed-off-by: Rafael J. Wysocki Acked-by: Jesse Barnes diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 46767c5..12d1e81 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "pci.h" struct pci_dynid { @@ -616,6 +617,21 @@ static int pci_pm_prepare(struct device *dev) int error = 0; /* + * If a PCI device configured to wake up the system from sleep states + * has been suspended at run time and there's a resume request pending + * for it, this is equivalent to the device signaling wakeup, so the + * system suspend operation should be aborted. + */ + pm_runtime_get_noresume(dev); + if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) + pm_wakeup_event(dev, 0); + + if (pm_wakeup_pending()) { + pm_runtime_put_sync(dev); + return -EBUSY; + } + + /* * PCI devices suspended at run time need to be resumed at this * point, because in general it is necessary to reconfigure them for * system suspend. Namely, if the device is supposed to wake up the @@ -624,7 +640,7 @@ static int pci_pm_prepare(struct device *dev) * system from the sleep state, we'll have to prevent it from signaling * wake-up. */ - pm_runtime_get_sync(dev); + pm_runtime_resume(dev); if (drv && drv->pm && drv->pm->prepare) error = drv->pm->prepare(dev); -- cgit v0.10.2 From 1e2ef05bb8cf851a694d38e9170c89e7ff052741 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 6 Jul 2011 10:51:58 +0200 Subject: PM: Limit race conditions between runtime PM and system sleep (v2) One of the roles of the PM core is to prevent different PM callbacks executed for the same device object from racing with each other. Unfortunately, after commit e8665002477f0278f84f898145b1f141ba26ee26 (PM: Allow pm_runtime_suspend() to succeed during system suspend) runtime PM callbacks may be executed concurrently with system suspend/resume callbacks for the same device. The main reason for commit e8665002477f0278f84f898145b1f141ba26ee26 was that some subsystems and device drivers wanted to use runtime PM helpers, pm_runtime_suspend() and pm_runtime_put_sync() in particular, for carrying out the suspend of devices in their .suspend() callbacks. However, as it's been determined recently, there are multiple reasons not to do so, inlcuding: * The caller really doesn't control the runtime PM usage counters, because user space can access them through sysfs and effectively block runtime PM. That means using pm_runtime_suspend() or pm_runtime_get_sync() to suspend devices during system suspend may or may not work. * If a driver calls pm_runtime_suspend() from its .suspend() callback, it causes the subsystem's .runtime_suspend() callback to be executed, which leads to the call sequence: subsys->suspend(dev) driver->suspend(dev) pm_runtime_suspend(dev) subsys->runtime_suspend(dev) recursive from the subsystem's point of view. For some subsystems that may actually work (e.g. the platform bus type), but for some it will fail in a rather spectacular fashion (e.g. PCI). In each case it means a layering violation. * Both the subsystem and the driver can provide .suspend_noirq() callbacks for system suspend that can do whatever the .runtime_suspend() callbacks do just fine, so it really isn't necessary to call pm_runtime_suspend() during system suspend. * The runtime PM's handling of wakeup devices is usually different from the system suspend's one, so .runtime_suspend() may simply be inappropriate for system suspend. * System suspend is supposed to work even if CONFIG_PM_RUNTIME is unset. * The runtime PM workqueue is frozen before system suspend, so if whatever the driver is going to do during system suspend depends on it, that simply won't work. Still, there is a good reason to allow pm_runtime_resume() to succeed during system suspend and resume (for instance, some subsystems and device drivers may legitimately use it to ensure that their devices are in full-power states before suspending them). Moreover, there is no reason to prevent runtime PM callbacks from being executed in parallel with the system suspend/resume .prepare() and .complete() callbacks and the code removed by commit e8665002477f0278f84f898145b1f141ba26ee26 went too far in this respect. On the other hand, runtime PM callbacks, including .runtime_resume(), must not be executed during system suspend's "late" stage of suspending devices and during system resume's "early" device resume stage. Taking all of the above into consideration, make the PM core acquire a runtime PM reference to every device and resume it if there's a runtime PM resume request pending right before executing the subsystem-level .suspend() callback for it. Make the PM core drop references to all devices right after executing the subsystem-level .resume() callbacks for them. Additionally, make the PM core disable the runtime PM framework for all devices during system suspend, after executing the subsystem-level .suspend() callbacks for them, and enable the runtime PM framework for all devices during system resume, right before executing the subsystem-level .resume() callbacks for them. Signed-off-by: Rafael J. Wysocki Acked-by: Kevin Hilman diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 0ec3d61..d50dd1a 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -583,6 +583,13 @@ this is: pm_runtime_set_active(dev); pm_runtime_enable(dev); +The PM core always increments the run-time usage counter before calling the +->suspend() callback and decrements it after calling the ->resume() callback. +Hence disabling run-time PM temporarily like this will not cause any runtime +suspend attempts to be permanently lost. If the usage count goes to zero +following the return of the ->resume() callback, the ->runtime_idle() callback +will be invoked as usual. + On some systems, however, system sleep is not entered through a global firmware or hardware operation. Instead, all hardware components are put into low-power states directly by the kernel in a coordinated way. Then, the system sleep @@ -595,6 +602,20 @@ place (in particular, if the system is not waking up from hibernation), it may be more efficient to leave the devices that had been suspended before the system suspend began in the suspended state. +The PM core does its best to reduce the probability of race conditions between +the runtime PM and system suspend/resume (and hibernation) callbacks by carrying +out the following operations: + + * During system suspend it calls pm_runtime_get_noresume() and + pm_runtime_barrier() for every device right before executing the + subsystem-level .suspend() callback for it. In addition to that it calls + pm_runtime_disable() for every device right after executing the + subsystem-level .suspend() callback for it. + + * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync() + for every device right before and right after executing the subsystem-level + .resume() callback for it, respectively. + 7. Generic subsystem callbacks Subsystems may wish to conserve code space by using the set of generic power diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 85b591a..a854591 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -505,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) static int device_resume(struct device *dev, pm_message_t state, bool async) { int error = 0; + bool put = false; TRACE_DEVICE(dev); TRACE_RESUME(0); @@ -521,6 +522,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Unlock; + pm_runtime_enable(dev); + put = true; + if (dev->pm_domain) { pm_dev_dbg(dev, state, "power domain "); error = pm_op(dev, &dev->pm_domain->ops, state); @@ -563,6 +567,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) complete_all(&dev->power.completion); TRACE_RESUME(error); + + if (put) + pm_runtime_put_sync(dev); + return error; } @@ -843,16 +851,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) int error = 0; dpm_wait_for_children(dev, async); - device_lock(dev); if (async_error) - goto Unlock; + return 0; + + pm_runtime_get_noresume(dev); + if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) + pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { + pm_runtime_put_sync(dev); async_error = -EBUSY; - goto Unlock; + return 0; } + device_lock(dev); + if (dev->pm_domain) { pm_dev_dbg(dev, state, "power domain "); error = pm_op(dev, &dev->pm_domain->ops, state); @@ -890,12 +904,15 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: dev->power.is_suspended = !error; - Unlock: device_unlock(dev); complete_all(&dev->power.completion); - if (error) + if (error) { + pm_runtime_put_sync(dev); async_error = error; + } else if (dev->power.is_suspended) { + __pm_runtime_disable(dev, false); + } return error; } @@ -1035,13 +1052,7 @@ int dpm_prepare(pm_message_t state) get_device(dev); mutex_unlock(&dpm_list_mtx); - pm_runtime_get_noresume(dev); - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); - - pm_runtime_put_sync(dev); - error = pm_wakeup_pending() ? - -EBUSY : device_prepare(dev, state); + error = device_prepare(dev, state); mutex_lock(&dpm_list_mtx); if (error) { -- cgit v0.10.2 From e358bad75ff13210f5211cac9f93d76170d43f89 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 6 Jul 2011 10:52:06 +0200 Subject: PM / Runtime: Improve documentation of enable, disable and barrier The runtime PM documentation in Documentation/power/runtime_pm.txt doesn't say that pm_runtime_enable() and pm_runtime_disable() work by operating on power.disable_depth, which is wrong, because the possibility of nesting disables doesn't follow from the description of these functions. Also, there is no description of pm_runtime_barrier() at all in the document, which is confusing. Improve the documentation by fixing those issues. Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index d50dd1a..ca15bbb 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -369,17 +369,27 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: pm_runtime_autosuspend(dev) and return its result void pm_runtime_enable(struct device *dev); - - enable the run-time PM helper functions to run the device bus type's - run-time PM callbacks described in Section 2 + - decrement the device's 'power.disable_depth' field; if that field is equal + to zero, the run-time PM helper functions can execute subsystem-level + callbacks described in Section 2 for the device int pm_runtime_disable(struct device *dev); - - prevent the run-time PM helper functions from running subsystem-level - run-time PM callbacks for the device, make sure that all of the pending + - increment the device's 'power.disable_depth' field (if the value of that + field was previously zero, this prevents subsystem-level runtime PM + callbacks from being run for the device), make sure that all of the pending run-time PM operations on the device are either completed or canceled; returns 1 if there was a resume request pending and it was necessary to execute the subsystem-level resume callback for the device to satisfy that request, otherwise 0 is returned + int pm_runtime_barrier(struct device *dev); + - check if there's a resume request pending for the device and resume it + (synchronously) in that case, cancel any other pending runtime PM requests + regarding it and wait for all runtime PM operations on it in progress to + complete; returns 1 if there was a resume request pending and it was + necessary to execute the subsystem-level resume callback for the device to + satisfy that request, otherwise 0 is returned + void pm_suspend_ignore_children(struct device *dev, bool enable); - set/unset the power.ignore_children flag of the device -- cgit v0.10.2 From 62052ab1d1a456f5f62f8b753e12d10ca1a83604 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 6 Jul 2011 10:52:13 +0200 Subject: PM / Runtime: Replace "run-time" with "runtime" in documentation The runtime PM documentation and kerneldoc comments sometimes spell "runtime" with a dash (i.e. "run-time"). Replace all of those instances with "runtime" to make the naming consistent. Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index ca15bbb..40e47c7 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -1,39 +1,39 @@ -Run-time Power Management Framework for I/O Devices +Runtime Power Management Framework for I/O Devices (C) 2009-2011 Rafael J. Wysocki , Novell Inc. (C) 2010 Alan Stern 1. Introduction -Support for run-time power management (run-time PM) of I/O devices is provided +Support for runtime power management (runtime PM) of I/O devices is provided at the power management core (PM core) level by means of: * The power management workqueue pm_wq in which bus types and device drivers can put their PM-related work items. It is strongly recommended that pm_wq be - used for queuing all work items related to run-time PM, because this allows + used for queuing all work items related to runtime PM, because this allows them to be synchronized with system-wide power transitions (suspend to RAM, hibernation and resume from system sleep states). pm_wq is declared in include/linux/pm_runtime.h and defined in kernel/power/main.c. -* A number of run-time PM fields in the 'power' member of 'struct device' (which +* A number of runtime PM fields in the 'power' member of 'struct device' (which is of the type 'struct dev_pm_info', defined in include/linux/pm.h) that can - be used for synchronizing run-time PM operations with one another. + be used for synchronizing runtime PM operations with one another. -* Three device run-time PM callbacks in 'struct dev_pm_ops' (defined in +* Three device runtime PM callbacks in 'struct dev_pm_ops' (defined in include/linux/pm.h). * A set of helper functions defined in drivers/base/power/runtime.c that can be - used for carrying out run-time PM operations in such a way that the + used for carrying out runtime PM operations in such a way that the synchronization between them is taken care of by the PM core. Bus types and device drivers are encouraged to use these functions. -The run-time PM callbacks present in 'struct dev_pm_ops', the device run-time PM +The runtime PM callbacks present in 'struct dev_pm_ops', the device runtime PM fields of 'struct dev_pm_info' and the core helper functions provided for -run-time PM are described below. +runtime PM are described below. -2. Device Run-time PM Callbacks +2. Device Runtime PM Callbacks -There are three device run-time PM callbacks defined in 'struct dev_pm_ops': +There are three device runtime PM callbacks defined in 'struct dev_pm_ops': struct dev_pm_ops { ... @@ -72,11 +72,11 @@ knows what to do to handle the device). not mean that the device has been put into a low power state. It is supposed to mean, however, that the device will not process data and will not communicate with the CPU(s) and RAM until the subsystem-level resume - callback is executed for it. The run-time PM status of a device after + callback is executed for it. The runtime PM status of a device after successful execution of the subsystem-level suspend callback is 'suspended'. * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN, - the device's run-time PM status is 'active', which means that the device + the device's runtime PM status is 'active', which means that the device _must_ be fully operational afterwards. * If the subsystem-level suspend callback returns an error code different @@ -104,7 +104,7 @@ the device). * Once the subsystem-level resume callback has completed successfully, the PM core regards the device as fully operational, which means that the device - _must_ be able to complete I/O operations as needed. The run-time PM status + _must_ be able to complete I/O operations as needed. The runtime PM status of the device is then 'active'. * If the subsystem-level resume callback returns an error code, the PM core @@ -130,7 +130,7 @@ device in that case. The value returned by this callback is ignored by the PM core. The helper functions provided by the PM core, described in Section 4, guarantee -that the following constraints are met with respect to the bus type's run-time +that the following constraints are met with respect to the bus type's runtime PM callbacks: (1) The callbacks are mutually exclusive (e.g. it is forbidden to execute @@ -142,7 +142,7 @@ PM callbacks: (2) ->runtime_idle() and ->runtime_suspend() can only be executed for 'active' devices (i.e. the PM core will only execute ->runtime_idle() or - ->runtime_suspend() for the devices the run-time PM status of which is + ->runtime_suspend() for the devices the runtime PM status of which is 'active'). (3) ->runtime_idle() and ->runtime_suspend() can only be executed for a device @@ -151,7 +151,7 @@ PM callbacks: flag of which is set. (4) ->runtime_resume() can only be executed for 'suspended' devices (i.e. the - PM core will only execute ->runtime_resume() for the devices the run-time + PM core will only execute ->runtime_resume() for the devices the runtime PM status of which is 'suspended'). Additionally, the helper functions provided by the PM core obey the following @@ -171,9 +171,9 @@ rules: scheduled requests to execute the other callbacks for the same device, except for scheduled autosuspends. -3. Run-time PM Device Fields +3. Runtime PM Device Fields -The following device run-time PM fields are present in 'struct dev_pm_info', as +The following device runtime PM fields are present in 'struct dev_pm_info', as defined in include/linux/pm.h: struct timer_list suspend_timer; @@ -205,7 +205,7 @@ defined in include/linux/pm.h: unsigned int disable_depth; - used for disabling the helper funcions (they work normally if this is - equal to zero); the initial value of it is 1 (i.e. run-time PM is + equal to zero); the initial value of it is 1 (i.e. runtime PM is initially disabled for all devices) unsigned int runtime_error; @@ -229,10 +229,10 @@ defined in include/linux/pm.h: suspend to complete; means "start a resume as soon as you've suspended" unsigned int run_wake; - - set if the device is capable of generating run-time wake-up events + - set if the device is capable of generating runtime wake-up events enum rpm_status runtime_status; - - the run-time PM status of the device; this field's initial value is + - the runtime PM status of the device; this field's initial value is RPM_SUSPENDED, which means that each device is initially regarded by the PM core as 'suspended', regardless of its real hardware status @@ -243,7 +243,7 @@ defined in include/linux/pm.h: and pm_runtime_forbid() helper functions unsigned int no_callbacks; - - indicates that the device does not use the run-time PM callbacks (see + - indicates that the device does not use the runtime PM callbacks (see Section 8); it may be modified only by the pm_runtime_no_callbacks() helper function @@ -270,16 +270,16 @@ defined in include/linux/pm.h: All of the above fields are members of the 'power' member of 'struct device'. -4. Run-time PM Device Helper Functions +4. Runtime PM Device Helper Functions -The following run-time PM helper functions are defined in +The following runtime PM helper functions are defined in drivers/base/power/runtime.c and include/linux/pm_runtime.h: void pm_runtime_init(struct device *dev); - - initialize the device run-time PM fields in 'struct dev_pm_info' + - initialize the device runtime PM fields in 'struct dev_pm_info' void pm_runtime_remove(struct device *dev); - - make sure that the run-time PM of the device will be disabled after + - make sure that the runtime PM of the device will be disabled after removing the device from device hierarchy int pm_runtime_idle(struct device *dev); @@ -289,7 +289,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: int pm_runtime_suspend(struct device *dev); - execute the subsystem-level suspend callback for the device; returns 0 on - success, 1 if the device's run-time PM status was already 'suspended', or + success, 1 if the device's runtime PM status was already 'suspended', or error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt to suspend the device again in future and -EACCES means that 'power.disable_depth' is different from 0 @@ -302,7 +302,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: int pm_runtime_resume(struct device *dev); - execute the subsystem-level resume callback for the device; returns 0 on - success, 1 if the device's run-time PM status was already 'active' or + success, 1 if the device's runtime PM status was already 'active' or error code on failure, where -EAGAIN means it may be safe to attempt to resume the device again in future, but 'power.runtime_error' should be checked additionally, and -EACCES means that 'power.disable_depth' is @@ -323,7 +323,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: device in future, where 'delay' is the time to wait before queuing up a suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work item is queued up immediately); returns 0 on success, 1 if the device's PM - run-time status was already 'suspended', or error code if the request + runtime status was already 'suspended', or error code if the request hasn't been scheduled (or queued up if 'delay' is 0); if the execution of ->runtime_suspend() is already scheduled and not yet expired, the new value of 'delay' will be used as the time to wait @@ -331,7 +331,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: int pm_request_resume(struct device *dev); - submit a request to execute the subsystem-level resume callback for the device (the request is represented by a work item in pm_wq); returns 0 on - success, 1 if the device's run-time PM status was already 'active', or + success, 1 if the device's runtime PM status was already 'active', or error code if the request hasn't been queued up void pm_runtime_get_noresume(struct device *dev); @@ -370,14 +370,14 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: void pm_runtime_enable(struct device *dev); - decrement the device's 'power.disable_depth' field; if that field is equal - to zero, the run-time PM helper functions can execute subsystem-level + to zero, the runtime PM helper functions can execute subsystem-level callbacks described in Section 2 for the device int pm_runtime_disable(struct device *dev); - increment the device's 'power.disable_depth' field (if the value of that field was previously zero, this prevents subsystem-level runtime PM callbacks from being run for the device), make sure that all of the pending - run-time PM operations on the device are either completed or canceled; + runtime PM operations on the device are either completed or canceled; returns 1 if there was a resume request pending and it was necessary to execute the subsystem-level resume callback for the device to satisfy that request, otherwise 0 is returned @@ -394,7 +394,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: - set/unset the power.ignore_children flag of the device int pm_runtime_set_active(struct device *dev); - - clear the device's 'power.runtime_error' flag, set the device's run-time + - clear the device's 'power.runtime_error' flag, set the device's runtime PM status to 'active' and update its parent's counter of 'active' children as appropriate (it is only valid to use this function if 'power.runtime_error' is set or 'power.disable_depth' is greater than @@ -402,7 +402,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: which is not active and the 'power.ignore_children' flag of which is unset void pm_runtime_set_suspended(struct device *dev); - - clear the device's 'power.runtime_error' flag, set the device's run-time + - clear the device's 'power.runtime_error' flag, set the device's runtime PM status to 'suspended' and update its parent's counter of 'active' children as appropriate (it is only valid to use this function if 'power.runtime_error' is set or 'power.disable_depth' is greater than @@ -423,7 +423,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: effectively prevent the device from being power managed at run time) void pm_runtime_no_callbacks(struct device *dev); - - set the power.no_callbacks flag for the device and remove the run-time + - set the power.no_callbacks flag for the device and remove the runtime PM attributes from /sys/devices/.../power (or prevent them from being added when the device is registered) @@ -443,7 +443,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); - set the power.autosuspend_delay value to 'delay' (expressed in - milliseconds); if 'delay' is negative then run-time suspends are + milliseconds); if 'delay' is negative then runtime suspends are prevented unsigned long pm_runtime_autosuspend_expiration(struct device *dev); @@ -482,35 +482,35 @@ pm_runtime_resume() pm_runtime_get_sync() pm_runtime_put_sync_suspend() -5. Run-time PM Initialization, Device Probing and Removal +5. Runtime PM Initialization, Device Probing and Removal -Initially, the run-time PM is disabled for all devices, which means that the -majority of the run-time PM helper funtions described in Section 4 will return +Initially, the runtime PM is disabled for all devices, which means that the +majority of the runtime PM helper funtions described in Section 4 will return -EAGAIN until pm_runtime_enable() is called for the device. -In addition to that, the initial run-time PM status of all devices is +In addition to that, the initial runtime PM status of all devices is 'suspended', but it need not reflect the actual physical state of the device. Thus, if the device is initially active (i.e. it is able to process I/O), its -run-time PM status must be changed to 'active', with the help of +runtime PM status must be changed to 'active', with the help of pm_runtime_set_active(), before pm_runtime_enable() is called for the device. -However, if the device has a parent and the parent's run-time PM is enabled, +However, if the device has a parent and the parent's runtime PM is enabled, calling pm_runtime_set_active() for the device will affect the parent, unless the parent's 'power.ignore_children' flag is set. Namely, in that case the parent won't be able to suspend at run time, using the PM core's helper functions, as long as the child's status is 'active', even if the child's -run-time PM is still disabled (i.e. pm_runtime_enable() hasn't been called for +runtime PM is still disabled (i.e. pm_runtime_enable() hasn't been called for the child yet or pm_runtime_disable() has been called for it). For this reason, once pm_runtime_set_active() has been called for the device, pm_runtime_enable() -should be called for it too as soon as reasonably possible or its run-time PM +should be called for it too as soon as reasonably possible or its runtime PM status should be changed back to 'suspended' with the help of pm_runtime_set_suspended(). -If the default initial run-time PM status of the device (i.e. 'suspended') +If the default initial runtime PM status of the device (i.e. 'suspended') reflects the actual state of the device, its bus type's or its driver's ->probe() callback will likely need to wake it up using one of the PM core's helper functions described in Section 4. In that case, pm_runtime_resume() -should be used. Of course, for this purpose the device's run-time PM has to be +should be used. Of course, for this purpose the device's runtime PM has to be enabled earlier by calling pm_runtime_enable(). If the device bus type's or driver's ->probe() callback runs @@ -541,29 +541,29 @@ The user space can effectively disallow the driver of the device to power manage it at run time by changing the value of its /sys/devices/.../power/control attribute to "on", which causes pm_runtime_forbid() to be called. In principle, this mechanism may also be used by the driver to effectively turn off the -run-time power management of the device until the user space turns it on. -Namely, during the initialization the driver can make sure that the run-time PM +runtime power management of the device until the user space turns it on. +Namely, during the initialization the driver can make sure that the runtime PM status of the device is 'active' and call pm_runtime_forbid(). It should be noted, however, that if the user space has already intentionally changed the value of /sys/devices/.../power/control to "auto" to allow the driver to power manage the device at run time, the driver may confuse it by using pm_runtime_forbid() this way. -6. Run-time PM and System Sleep +6. Runtime PM and System Sleep -Run-time PM and system sleep (i.e., system suspend and hibernation, also known +Runtime PM and system sleep (i.e., system suspend and hibernation, also known as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of ways. If a device is active when a system sleep starts, everything is straightforward. But what should happen if the device is already suspended? -The device may have different wake-up settings for run-time PM and system sleep. -For example, remote wake-up may be enabled for run-time suspend but disallowed +The device may have different wake-up settings for runtime PM and system sleep. +For example, remote wake-up may be enabled for runtime suspend but disallowed for system sleep (device_may_wakeup(dev) returns 'false'). When this happens, the subsystem-level system suspend callback is responsible for changing the device's wake-up setting (it may leave that to the device driver's system suspend routine). It may be necessary to resume the device and suspend it again in order to do so. The same is true if the driver uses different power levels -or other settings for run-time suspend and system sleep. +or other settings for runtime suspend and system sleep. During system resume, the simplest approach is to bring all devices back to full power, even if they had been suspended before the system suspend began. There @@ -582,10 +582,10 @@ are several reasons for this, including: * The device might need to be reset. * Even though the device was suspended, if its usage counter was > 0 then most - likely it would need a run-time resume in the near future anyway. + likely it would need a runtime resume in the near future anyway. If the device had been suspended before the system suspend began and it's -brought back to full power during resume, then its run-time PM status will have +brought back to full power during resume, then its runtime PM status will have to be updated to reflect the actual post-system sleep status. The way to do this is: @@ -593,9 +593,9 @@ this is: pm_runtime_set_active(dev); pm_runtime_enable(dev); -The PM core always increments the run-time usage counter before calling the +The PM core always increments the runtime usage counter before calling the ->suspend() callback and decrements it after calling the ->resume() callback. -Hence disabling run-time PM temporarily like this will not cause any runtime +Hence disabling runtime PM temporarily like this will not cause any runtime suspend attempts to be permanently lost. If the usage count goes to zero following the return of the ->resume() callback, the ->runtime_idle() callback will be invoked as usual. @@ -710,8 +710,8 @@ the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its dev_pm_ops structure pointer. Device drivers that wish to use the same function as a system suspend, freeze, -poweroff and run-time suspend callback, and similarly for system resume, thaw, -restore, and run-time resume, can achieve this with the help of the +poweroff and runtime suspend callback, and similarly for system resume, thaw, +restore, and runtime resume, can achieve this with the help of the UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its last argument to NULL). @@ -721,7 +721,7 @@ Some "devices" are only logical sub-devices of their parent and cannot be power-managed on their own. (The prototype example is a USB interface. Entire USB devices can go into low-power mode or send wake-up requests, but neither is possible for individual interfaces.) The drivers for these devices have no -need of run-time PM callbacks; if the callbacks did exist, ->runtime_suspend() +need of runtime PM callbacks; if the callbacks did exist, ->runtime_suspend() and ->runtime_resume() would always return 0 without doing anything else and ->runtime_idle() would always call pm_runtime_suspend(). @@ -729,7 +729,7 @@ Subsystems can tell the PM core about these devices by calling pm_runtime_no_callbacks(). This should be done after the device structure is initialized and before it is registered (although after device registration is also okay). The routine will set the device's power.no_callbacks flag and -prevent the non-debugging run-time PM sysfs attributes from being created. +prevent the non-debugging runtime PM sysfs attributes from being created. When power.no_callbacks is set, the PM core will not invoke the ->runtime_idle(), ->runtime_suspend(), or ->runtime_resume() callbacks. @@ -737,7 +737,7 @@ Instead it will assume that suspends and resumes always succeed and that idle devices should be suspended. As a consequence, the PM core will never directly inform the device's subsystem -or driver about run-time power changes. Instead, the driver for the device's +or driver about runtime power changes. Instead, the driver for the device's parent must take responsibility for telling the device's driver when the parent's power state changes. @@ -748,13 +748,13 @@ A device should be put in a low-power state only when there's some reason to think it will remain in that state for a substantial time. A common heuristic says that a device which hasn't been used for a while is liable to remain unused; following this advice, drivers should not allow devices to be suspended -at run-time until they have been inactive for some minimum period. Even when +at runtime until they have been inactive for some minimum period. Even when the heuristic ends up being non-optimal, it will still prevent devices from "bouncing" too rapidly between low-power and full-power states. The term "autosuspend" is an historical remnant. It doesn't mean that the device is automatically suspended (the subsystem or driver still has to call -the appropriate PM routines); rather it means that run-time suspends will +the appropriate PM routines); rather it means that runtime suspends will automatically be delayed until the desired period of inactivity has elapsed. Inactivity is determined based on the power.last_busy field. Drivers should diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index ee99025..be7b982 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1,5 +1,5 @@ /* - * drivers/base/power/runtime.c - Helper functions for device run-time PM + * drivers/base/power/runtime.c - Helper functions for device runtime PM * * Copyright (c) 2009 Rafael J. Wysocki , Novell Inc. * Copyright (C) 2010 Alan Stern @@ -159,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev) * @dev: Device to notify the bus type about. * @rpmflags: Flag bits. * - * Check if the device's run-time PM status allows it to be suspended. If + * Check if the device's runtime PM status allows it to be suspended. If * another idle notification has been started earlier, return immediately. If * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise * run the ->runtime_idle() callback directly. @@ -267,11 +267,11 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) } /** - * rpm_suspend - Carry out run-time suspend of given device. + * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. * @rpmflags: Flag bits. * - * Check if the device's run-time PM status allows it to be suspended. If + * Check if the device's runtime PM status allows it to be suspended. If * another suspend has been started earlier, either return immediately or wait * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a * pending idle notification. If the RPM_ASYNC flag is set then queue a @@ -430,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags) } /** - * rpm_resume - Carry out run-time resume of given device. + * rpm_resume - Carry out runtime resume of given device. * @dev: Device to resume. * @rpmflags: Flag bits. * - * Check if the device's run-time PM status allows it to be resumed. Cancel + * Check if the device's runtime PM status allows it to be resumed. Cancel * any scheduled or pending requests. If another resume has been started * earlier, either return immediately or wait for it to finish, depending on the * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in @@ -551,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags) spin_lock(&parent->power.lock); /* - * We can resume if the parent's run-time PM is disabled or it + * We can resume if the parent's runtime PM is disabled or it * is set to ignore children. */ if (!parent->power.disable_depth @@ -615,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags) } /** - * pm_runtime_work - Universal run-time PM work function. + * pm_runtime_work - Universal runtime PM work function. * @work: Work structure used for scheduling the execution of this function. * * Use @work to get the device object the work is to be done for, determine what - * is to be done and execute the appropriate run-time PM function. + * is to be done and execute the appropriate runtime PM function. */ static void pm_runtime_work(struct work_struct *work) { @@ -718,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) EXPORT_SYMBOL_GPL(pm_schedule_suspend); /** - * __pm_runtime_idle - Entry point for run-time idle operations. + * __pm_runtime_idle - Entry point for runtime idle operations. * @dev: Device to send idle notification for. * @rpmflags: Flag bits. * @@ -747,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_idle); /** - * __pm_runtime_suspend - Entry point for run-time put/suspend operations. + * __pm_runtime_suspend - Entry point for runtime put/suspend operations. * @dev: Device to suspend. * @rpmflags: Flag bits. * @@ -776,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_suspend); /** - * __pm_runtime_resume - Entry point for run-time resume operations. + * __pm_runtime_resume - Entry point for runtime resume operations. * @dev: Device to resume. * @rpmflags: Flag bits. * @@ -802,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** - * __pm_runtime_set_status - Set run-time PM status of a device. + * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. - * @status: New run-time PM status of the device. + * @status: New runtime PM status of the device. * - * If run-time PM of the device is disabled or its power.runtime_error field is + * If runtime PM of the device is disabled or its power.runtime_error field is * different from zero, the status may be changed either to RPM_ACTIVE, or to * RPM_SUSPENDED, as long as that reflects the actual state of the device. * However, if the device has a parent and the parent is not active, and the @@ -852,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) /* * It is invalid to put an active child under a parent that is - * not active, has run-time PM enabled and the + * not active, has runtime PM enabled and the * 'power.ignore_children' flag unset. */ if (!parent->power.disable_depth @@ -886,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status); * @dev: Device to handle. * * Flush all pending requests for the device from pm_wq and wait for all - * run-time PM operations involving the device in progress to complete. + * runtime PM operations involving the device in progress to complete. * * Should be called under dev->power.lock with interrupts disabled. */ @@ -934,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev) * Prevent the device from being suspended by incrementing its usage counter and * if there's a pending resume request for the device, wake the device up. * Next, make sure that all pending requests for the device have been flushed - * from pm_wq and wait for all run-time PM operations involving the device in + * from pm_wq and wait for all runtime PM operations involving the device in * progress to complete. * * Return value: @@ -964,18 +964,18 @@ int pm_runtime_barrier(struct device *dev) EXPORT_SYMBOL_GPL(pm_runtime_barrier); /** - * __pm_runtime_disable - Disable run-time PM of a device. + * __pm_runtime_disable - Disable runtime PM of a device. * @dev: Device to handle. * @check_resume: If set, check if there's a resume request for the device. * * Increment power.disable_depth for the device and if was zero previously, - * cancel all pending run-time PM requests for the device and wait for all + * cancel all pending runtime PM requests for the device and wait for all * operations in progress to complete. The device can be either active or - * suspended after its run-time PM has been disabled. + * suspended after its runtime PM has been disabled. * * If @check_resume is set and there's a resume request pending when * __pm_runtime_disable() is called and power.disable_depth is zero, the - * function will wake up the device before disabling its run-time PM. + * function will wake up the device before disabling its runtime PM. */ void __pm_runtime_disable(struct device *dev, bool check_resume) { @@ -988,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) /* * Wake up the device if there's a resume request pending, because that - * means there probably is some I/O to process and disabling run-time PM + * means there probably is some I/O to process and disabling runtime PM * shouldn't prevent the device from processing the I/O. */ if (check_resume && dev->power.request_pending @@ -1013,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) EXPORT_SYMBOL_GPL(__pm_runtime_disable); /** - * pm_runtime_enable - Enable run-time PM of a device. + * pm_runtime_enable - Enable runtime PM of a device. * @dev: Device to handle. */ void pm_runtime_enable(struct device *dev) @@ -1032,7 +1032,7 @@ void pm_runtime_enable(struct device *dev) EXPORT_SYMBOL_GPL(pm_runtime_enable); /** - * pm_runtime_forbid - Block run-time PM of a device. + * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. * * Increase the device's usage count and clear its power.runtime_auto flag, @@ -1055,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev) EXPORT_SYMBOL_GPL(pm_runtime_forbid); /** - * pm_runtime_allow - Unblock run-time PM of a device. + * pm_runtime_allow - Unblock runtime PM of a device. * @dev: Device to handle. * * Decrease the device's usage count and set its power.runtime_auto flag. @@ -1076,12 +1076,12 @@ void pm_runtime_allow(struct device *dev) EXPORT_SYMBOL_GPL(pm_runtime_allow); /** - * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. + * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. * @dev: Device to handle. * * Set the power.no_callbacks flag, which tells the PM core that this - * device is power-managed through its parent and has no run-time PM - * callbacks of its own. The run-time sysfs attributes will be removed. + * device is power-managed through its parent and has no runtime PM + * callbacks of its own. The runtime sysfs attributes will be removed. */ void pm_runtime_no_callbacks(struct device *dev) { @@ -1157,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use) * @delay: Value of the new delay in milliseconds. * * Set the device's power.autosuspend_delay value. If it changes to negative - * and the power.use_autosuspend flag is set, prevent run-time suspends. If it - * changes the other way, allow run-time suspends. + * and the power.use_autosuspend flag is set, prevent runtime suspends. If it + * changes the other way, allow runtime suspends. */ void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) { @@ -1178,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); * @dev: Device to handle. * @use: New value for use_autosuspend. * - * Set the device's power.use_autosuspend flag, and allow or prevent run-time + * Set the device's power.use_autosuspend flag, and allow or prevent runtime * suspends as needed. */ void __pm_runtime_use_autosuspend(struct device *dev, bool use) @@ -1195,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use) EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); /** - * pm_runtime_init - Initialize run-time PM fields in given device object. + * pm_runtime_init - Initialize runtime PM fields in given device object. * @dev: Device object to initialize. */ void pm_runtime_init(struct device *dev) -- cgit v0.10.2 From 69c843b45eb3b8f267019e6a05860c9c48337419 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 6 Jul 2011 10:52:23 +0200 Subject: PM / Runtime: Prevent runtime_resume from racing with probe This patch (as1475) adds device_lock() and device_unlock() calls to the store methods for the power/control and power/autosuspend_delay_ms sysfs attribute files. We don't want badly timed writes to these files to cause runtime_resume callbacks to occur while a driver is being probed for a device. Signed-off-by: Alan Stern Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a9f5b89..942d6a7 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, cp = memchr(buf, '\n', n); if (cp) len = cp - buf; + device_lock(dev); if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) pm_runtime_allow(dev); else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) pm_runtime_forbid(dev); else - return -EINVAL; + n = -EINVAL; + device_unlock(dev); return n; } @@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) return -EINVAL; + device_lock(dev); pm_runtime_set_autosuspend_delay(dev, delay); + device_unlock(dev); return n; } -- cgit v0.10.2 From 2cffff1281a74714c9e035322077ec52ffb1f838 Mon Sep 17 00:00:00 2001 From: ShuoX Liu Date: Fri, 8 Jul 2011 20:53:55 +0200 Subject: PM / Runtime: Consistent utilization of deferred_resume dev->power.deferred_resume is used as a bool typically, so change one assignment to false from 0, like other places. Signed-off-by: ShuoX Liu diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index be7b982..8dc247c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -389,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_ACTIVE); - dev->power.deferred_resume = 0; + dev->power.deferred_resume = false; if (retval == -EAGAIN || retval == -EBUSY) dev->power.runtime_error = 0; else -- cgit v0.10.2 From a41b64665a79b8be980d450f6e5b9354d7511700 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Sun, 10 Jul 2011 10:11:57 +0200 Subject: ARM: mach-shmobile: sh7372: make sure that fsi is peripheral of spu2 FSI act as peripheral circuits of the SPU2. Signed-off-by: Kuninori Morimoto Acked-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index a26f895..4226db6 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1414,6 +1414,7 @@ static void __init ap4evb_init(void) hdmi_init_pm_clock(); fsi_init_pm_clock(); sh7372_pm_init(); + pm_clk_add(&fsi_device.dev, "spu2"); } static void __init ap4evb_timer_init(void) diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index cc1ccd8..362b883 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1587,6 +1587,7 @@ static void __init mackerel_init(void) hdmi_init_pm_clock(); sh7372_pm_init(); + pm_clk_add(&fsi_device.dev, "spu2"); } static void __init mackerel_timer_init(void) diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index c0800d8..91f5779 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = { CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), + CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]), }; void __init sh7372_clock_init(void) -- cgit v0.10.2 From c1ba5bb5608664b37a3b1ac647e0dd67d46ddc99 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Sun, 10 Jul 2011 10:12:08 +0200 Subject: ARM: mach-shmobile: sh7372 A4MP support Add support for the sh7372 A4MP power domain and hook up the FSI/SPU2 device. Signed-off-by: Kuninori Morimoto Acked-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 4226db6..b473b8e 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1410,6 +1410,7 @@ static void __init ap4evb_init(void) sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device); sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); hdmi_init_pm_clock(); fsi_init_pm_clock(); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 362b883..5b36b6c 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1584,6 +1584,7 @@ static void __init mackerel_init(void) sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); hdmi_init_pm_clock(); sh7372_pm_init(); diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index 1aed9da..f3ceced 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -485,6 +485,7 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern struct sh7372_pm_domain sh7372_a4lc; +extern struct sh7372_pm_domain sh7372_a4mp; extern struct sh7372_pm_domain sh7372_a3rv; extern struct sh7372_pm_domain sh7372_a3ri; extern struct sh7372_pm_domain sh7372_a3sg; diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 474a15a..f3bd95f 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -125,6 +125,10 @@ struct sh7372_pm_domain sh7372_a4lc = { .bit_shift = 1, }; +struct sh7372_pm_domain sh7372_a4mp = { + .bit_shift = 2, +}; + struct sh7372_pm_domain sh7372_a3rv = { .bit_shift = 6, }; diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index b219cea..9b0cd43 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -842,6 +842,7 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { sh7372_init_pm_domain(&sh7372_a4lc); + sh7372_init_pm_domain(&sh7372_a4mp); sh7372_init_pm_domain(&sh7372_a3rv); sh7372_init_pm_domain(&sh7372_a3ri); sh7372_init_pm_domain(&sh7372_a3sg); @@ -853,6 +854,8 @@ void __init sh7372_add_standard_devices(void) ARRAY_SIZE(sh7372_late_devices)); sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device); } void __init sh7372_add_early_devices(void) -- cgit v0.10.2 From d24771dec9c62945a5d1c6a37e7a04f5c2a2ae6f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sun, 10 Jul 2011 10:38:22 +0200 Subject: ARM: mach-shmobile: sh7372 D4 support Add support for the sh7372 D4 power domain. This power domain contains the Coresight-ETM hardware block. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index f3ceced..ce595ce 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -486,6 +486,7 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) #ifdef CONFIG_PM extern struct sh7372_pm_domain sh7372_a4lc; extern struct sh7372_pm_domain sh7372_a4mp; +extern struct sh7372_pm_domain sh7372_d4; extern struct sh7372_pm_domain sh7372_a3rv; extern struct sh7372_pm_domain sh7372_a3ri; extern struct sh7372_pm_domain sh7372_a3sg; diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index f3bd95f..a8e0de1 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -129,6 +129,10 @@ struct sh7372_pm_domain sh7372_a4mp = { .bit_shift = 2, }; +struct sh7372_pm_domain sh7372_d4 = { + .bit_shift = 3, +}; + struct sh7372_pm_domain sh7372_a3rv = { .bit_shift = 6, }; diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 9b0cd43..79f0413 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -843,6 +843,7 @@ void __init sh7372_add_standard_devices(void) { sh7372_init_pm_domain(&sh7372_a4lc); sh7372_init_pm_domain(&sh7372_a4mp); + sh7372_init_pm_domain(&sh7372_d4); sh7372_init_pm_domain(&sh7372_a3rv); sh7372_init_pm_domain(&sh7372_a3ri); sh7372_init_pm_domain(&sh7372_a3sg); -- cgit v0.10.2 From 999a4d2a4da0527567e4b17d4da0782509358a83 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sun, 10 Jul 2011 10:38:34 +0200 Subject: ARM: mach-shmobile: Runtime PM late init callback Add a mach-shmobile specific callback for SoC-specific code to hook into. By having the late_initcall() in a common place we can have multi-SoC/board support in the same kernel binary. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 06aecb3..73a76d7 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -12,6 +12,7 @@ extern struct platform_suspend_ops shmobile_suspend_ops; struct cpuidle_device; extern void (*shmobile_cpuidle_modes[])(void); extern void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev); +extern void (*shmobile_runtime_pm_late_init)(void); extern void sh7367_init_irq(void); extern void sh7367_add_early_devices(void); diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 2bcde1c..2f6ded5 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -56,3 +56,13 @@ static int __init sh_pm_runtime_init(void) return 0; } core_initcall(sh_pm_runtime_init); + +void (*shmobile_runtime_pm_late_init)(void); + +static int __init sh_pm_runtime_late_init(void) +{ + if (shmobile_runtime_pm_late_init) + shmobile_runtime_pm_late_init(); + return 0; +} +late_initcall(sh_pm_runtime_late_init); -- cgit v0.10.2 From b9416f03fd9f809225a90f63b2fbc86f90010eab Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sun, 10 Jul 2011 10:38:53 +0200 Subject: ARM: mach-shmobile: sh7372 late pm domain off Add sh7372 specific code to power down unused pm domains. This should really be replaced by some generic PM core code IMO, but until that happens this patch makes sure we don't waste power by leaving unused power domains on. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index a8e0de1..9ad3b61 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -96,6 +96,17 @@ static bool pd_active_wakeup(struct device *dev) return true; } +static void sh7372_late_pm_domain_off(void) +{ + /* request power down of unused pm domains */ + queue_work(pm_wq, &sh7372_a4lc.genpd.power_off_work); + queue_work(pm_wq, &sh7372_a4mp.genpd.power_off_work); + queue_work(pm_wq, &sh7372_d4.genpd.power_off_work); + queue_work(pm_wq, &sh7372_a3rv.genpd.power_off_work); + queue_work(pm_wq, &sh7372_a3ri.genpd.power_off_work); + queue_work(pm_wq, &sh7372_a3sg.genpd.power_off_work); +} + void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) { struct generic_pm_domain *genpd = &sh7372_pd->genpd; @@ -107,6 +118,8 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) genpd->power_off = pd_power_down; genpd->power_on = pd_power_up; pd_power_up(&sh7372_pd->genpd); + + shmobile_runtime_pm_late_init = sh7372_late_pm_domain_off; } void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, -- cgit v0.10.2 From 18b4f3f5d058b590e7189027eeb5d897742ade0a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sun, 10 Jul 2011 10:39:14 +0200 Subject: PM / Domains: Export pm_genpd_poweron() in header Allow SoC-specific code to call pm_genpd_poweron(). Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1aed94c..1f1a7d8 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -37,7 +37,7 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) * Restore power to @genpd and all of its parents so that it is possible to * resume a device belonging to it. */ -static int pm_genpd_poweron(struct generic_pm_domain *genpd) +int pm_genpd_poweron(struct generic_pm_domain *genpd) { int ret = 0; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 98491ee..14fb095 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -63,6 +63,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_poweron(struct generic_pm_domain *genpd); #else static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) @@ -86,6 +87,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, } static inline void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) {} +static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + return -ENOSYS; +} #endif #endif /* _LINUX_PM_DOMAIN_H */ -- cgit v0.10.2 From 775b8ae8707592af9275b8b216c2bf056b3f5d82 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sun, 10 Jul 2011 10:39:32 +0200 Subject: ARM: mach-shmobile: sh7372 A3RV requires A4LC Add a power domain workaround for the VPU and A3RV on sh7372. The sh7372 data sheet mentions that the VPU is located in the A3RV power domain. The A3RV power domain is not related to A4LC in any way, but testing shows that unless A3RV _and_ A4LC are powered on the VPU test program will bomb out. This issue may be caused by a more or less undocumented dependency on the MERAM block that happens to be located in A4LC. So now we know that the out-of-reset requirement of the VPU is that the MERAM is powered on. This patch adds a workaround for A3RV to make sure A4LC is powered on - this so we can use the VPU even though the LCDCs are in blanking state and A4LC is supposed to be off. Signed-off-by: Magnus Damm Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 9ad3b61..71400ea 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -91,6 +91,36 @@ static int pd_power_up(struct generic_pm_domain *genpd) return ret; } +static int pd_power_up_a3rv(struct generic_pm_domain *genpd) +{ + int ret = pd_power_up(genpd); + + /* force A4LC on after A3RV has been requested on */ + pm_genpd_poweron(&sh7372_a4lc.genpd); + + return ret; +} + +static int pd_power_down_a3rv(struct generic_pm_domain *genpd) +{ + int ret = pd_power_down(genpd); + + /* try to power down A4LC after A3RV is requested off */ + pm_genpd_poweron(&sh7372_a4lc.genpd); + queue_work(pm_wq, &sh7372_a4lc.genpd.power_off_work); + + return ret; +} + +static int pd_power_down_a4lc(struct generic_pm_domain *genpd) +{ + /* only power down A4LC if A3RV is off */ + if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) + return pd_power_down(genpd); + + return 0; +} + static bool pd_active_wakeup(struct device *dev) { return true; @@ -115,9 +145,18 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) genpd->stop_device = pm_clk_suspend; genpd->start_device = pm_clk_resume; genpd->active_wakeup = pd_active_wakeup; - genpd->power_off = pd_power_down; - genpd->power_on = pd_power_up; - pd_power_up(&sh7372_pd->genpd); + + if (sh7372_pd == &sh7372_a4lc) { + genpd->power_off = pd_power_down_a4lc; + genpd->power_on = pd_power_up; + } else if (sh7372_pd == &sh7372_a3rv) { + genpd->power_off = pd_power_down_a3rv; + genpd->power_on = pd_power_up_a3rv; + } else { + genpd->power_off = pd_power_down; + genpd->power_on = pd_power_up; + } + genpd->power_on(&sh7372_pd->genpd); shmobile_runtime_pm_late_init = sh7372_late_pm_domain_off; } -- cgit v0.10.2 From 6f00ff78278fd5d6ac110b6903ee042af2d6af91 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:39:10 +0200 Subject: PM / Domains: Set device state to "active" during system resume The runtime PM status of devices in a power domain that is not powered off in pm_genpd_complete() should be set to "active", because those devices are operational at this point. Some of them may not be in use, though, so make pm_genpd_complete() call pm_runtime_idle() in addition to pm_runtime_set_active() for each of them. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1f1a7d8..0e7e91b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -786,7 +786,9 @@ static void pm_genpd_complete(struct device *dev) if (run_complete) { pm_generic_complete(dev); + pm_runtime_set_active(dev); pm_runtime_enable(dev); + pm_runtime_idle(dev); } } -- cgit v0.10.2 From b6c10c84665912985d0bf9b6ae8ce19fc4298d9f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:39:21 +0200 Subject: PM / Domains: Make failing pm_genpd_prepare() clean up properly If pm_generic_prepare() in pm_genpd_prepare() returns error code, the PM domains counter of "prepared" devices should be decremented and its suspend_power_off flag should be reset if this counter drops down to zero. Otherwise, the PM domain runtime PM code will not handle the domain correctly (it will permanently think that system suspend is in progress). Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0e7e91b..9a20d93 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -367,6 +367,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) static int pm_genpd_prepare(struct device *dev) { struct generic_pm_domain *genpd; + int ret; dev_dbg(dev, "%s()\n", __func__); @@ -400,7 +401,16 @@ static int pm_genpd_prepare(struct device *dev) mutex_unlock(&genpd->lock); - return pm_generic_prepare(dev); + ret = pm_generic_prepare(dev); + if (ret) { + mutex_lock(&genpd->lock); + + if (--genpd->prepared_count == 0) + genpd->suspend_power_off = false; + + mutex_unlock(&genpd->lock); + } + return ret; } /** -- cgit v0.10.2 From 17b75eca7683d4942f4d8d00563fd15f37c39589 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:39:29 +0200 Subject: PM / Domains: Do not execute device callbacks under locks Currently, the .start_device() and .stop_device() callbacks from struct generic_pm_domain() as well as the device drivers' runtime PM callbacks used by the generic PM domains code are executed under the generic PM domain lock. This, unfortunately, is prone to deadlocks, for example if a device and its parent are boths members of the same PM domain. For this reason, it would be better if the PM domains code didn't execute device callbacks under the lock. Rework the locking in the generic PM domains code so that the lock is dropped for the execution of device callbacks. To this end, introduce PM domains states reflecting the current status of a PM domain and such that the PM domain lock cannot be acquired if the status is GPD_STATE_BUSY. Make threads attempting to acquire a PM domain's lock wait until the status changes to either GPD_STATE_ACTIVE or GPD_STATE_POWER_OFF. This change by itself doesn't fix the deadlock problem mentioned above, but the mechanism introduced by it will be used for for this purpose by a subsequent patch. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 9a20d93..d06f3bb 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #ifdef CONFIG_PM @@ -30,6 +32,34 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) genpd->sd_count--; } +static void genpd_acquire_lock(struct generic_pm_domain *genpd) +{ + DEFINE_WAIT(wait); + + mutex_lock(&genpd->lock); + /* + * Wait for the domain to transition into either the active, + * or the power off state. + */ + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (genpd->status != GPD_STATE_BUSY) + break; + mutex_unlock(&genpd->lock); + + schedule(); + + mutex_lock(&genpd->lock); + } + finish_wait(&genpd->status_wait_queue, &wait); +} + +static void genpd_release_lock(struct generic_pm_domain *genpd) +{ + mutex_unlock(&genpd->lock); +} + /** * pm_genpd_poweron - Restore power to a given PM domain and its parents. * @genpd: PM domain to power up. @@ -39,22 +69,50 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) */ int pm_genpd_poweron(struct generic_pm_domain *genpd) { + struct generic_pm_domain *parent = genpd->parent; + DEFINE_WAIT(wait); int ret = 0; start: - if (genpd->parent) - mutex_lock(&genpd->parent->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + if (parent) { + mutex_lock(&parent->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + } else { + mutex_lock(&genpd->lock); + } + /* + * Wait for the domain to transition into either the active, + * or the power off state. + */ + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (genpd->status != GPD_STATE_BUSY) + break; + mutex_unlock(&genpd->lock); + if (parent) + mutex_unlock(&parent->lock); + + schedule(); - if (!genpd->power_is_off + if (parent) { + mutex_lock(&parent->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + } else { + mutex_lock(&genpd->lock); + } + } + finish_wait(&genpd->status_wait_queue, &wait); + + if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) goto out; - if (genpd->parent && genpd->parent->power_is_off) { + if (parent && parent->status != GPD_STATE_ACTIVE) { mutex_unlock(&genpd->lock); - mutex_unlock(&genpd->parent->lock); + mutex_unlock(&parent->lock); - ret = pm_genpd_poweron(genpd->parent); + ret = pm_genpd_poweron(parent); if (ret) return ret; @@ -67,14 +125,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) goto out; } - genpd->power_is_off = false; - if (genpd->parent) - genpd->parent->sd_count++; + genpd->status = GPD_STATE_ACTIVE; + if (parent) + parent->sd_count++; out: mutex_unlock(&genpd->lock); - if (genpd->parent) - mutex_unlock(&genpd->parent->lock); + if (parent) + mutex_unlock(&parent->lock); return ret; } @@ -90,6 +148,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) */ static int __pm_genpd_save_device(struct dev_list_entry *dle, struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) { struct device *dev = dle->dev; struct device_driver *drv = dev->driver; @@ -98,6 +157,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, if (dle->need_restore) return 0; + mutex_unlock(&genpd->lock); + if (drv && drv->pm && drv->pm->runtime_suspend) { if (genpd->start_device) genpd->start_device(dev); @@ -108,6 +169,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, genpd->stop_device(dev); } + mutex_lock(&genpd->lock); + if (!ret) dle->need_restore = true; @@ -121,6 +184,7 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, */ static void __pm_genpd_restore_device(struct dev_list_entry *dle, struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) { struct device *dev = dle->dev; struct device_driver *drv = dev->driver; @@ -128,6 +192,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, if (!dle->need_restore) return; + mutex_unlock(&genpd->lock); + if (drv && drv->pm && drv->pm->runtime_resume) { if (genpd->start_device) genpd->start_device(dev); @@ -138,6 +204,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, genpd->stop_device(dev); } + mutex_lock(&genpd->lock); + dle->need_restore = false; } @@ -150,13 +218,14 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, * the @genpd's devices' drivers and remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) { struct generic_pm_domain *parent; struct dev_list_entry *dle; unsigned int not_suspended; int ret; - if (genpd->power_is_off || genpd->prepared_count > 0) + if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0) return 0; if (genpd->sd_count > 0) @@ -175,22 +244,36 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) return -EAGAIN; } + genpd->status = GPD_STATE_BUSY; + list_for_each_entry_reverse(dle, &genpd->dev_list, node) { ret = __pm_genpd_save_device(dle, genpd); if (ret) goto err_dev; } + mutex_unlock(&genpd->lock); + + parent = genpd->parent; + if (parent) { + genpd_acquire_lock(parent); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + } else { + mutex_lock(&genpd->lock); + } + if (genpd->power_off) genpd->power_off(genpd); - genpd->power_is_off = true; + genpd->status = GPD_STATE_POWER_OFF; + wake_up_all(&genpd->status_wait_queue); - parent = genpd->parent; if (parent) { genpd_sd_counter_dec(parent); if (parent->sd_count == 0) queue_work(pm_wq, &parent->power_off_work); + + genpd_release_lock(parent); } return 0; @@ -199,6 +282,9 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) list_for_each_entry_continue(dle, &genpd->dev_list, node) __pm_genpd_restore_device(dle, genpd); + genpd->status = GPD_STATE_ACTIVE; + wake_up_all(&genpd->status_wait_queue); + return ret; } @@ -212,13 +298,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - if (genpd->parent) - mutex_lock(&genpd->parent->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + genpd_acquire_lock(genpd); pm_genpd_poweroff(genpd); - mutex_unlock(&genpd->lock); - if (genpd->parent) - mutex_unlock(&genpd->parent->lock); + genpd_release_lock(genpd); } /** @@ -239,23 +321,17 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->parent) - mutex_lock(&genpd->parent->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - if (genpd->stop_device) { int ret = genpd->stop_device(dev); if (ret) - goto out; + return ret; } + + genpd_acquire_lock(genpd); genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; - - out: - mutex_unlock(&genpd->lock); - if (genpd->parent) - mutex_unlock(&genpd->parent->lock); + genpd_release_lock(genpd); return 0; } @@ -276,9 +352,6 @@ static void __pm_genpd_runtime_resume(struct device *dev, break; } } - - if (genpd->start_device) - genpd->start_device(dev); } /** @@ -304,9 +377,15 @@ static int pm_genpd_runtime_resume(struct device *dev) if (ret) return ret; - mutex_lock(&genpd->lock); + genpd_acquire_lock(genpd); + genpd->status = GPD_STATE_BUSY; __pm_genpd_runtime_resume(dev, genpd); - mutex_unlock(&genpd->lock); + genpd->status = GPD_STATE_ACTIVE; + wake_up_all(&genpd->status_wait_queue); + genpd_release_lock(genpd); + + if (genpd->start_device) + genpd->start_device(dev); return 0; } @@ -339,7 +418,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { struct generic_pm_domain *parent = genpd->parent; - if (genpd->power_is_off) + if (genpd->status == GPD_STATE_POWER_OFF) return; if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) @@ -348,7 +427,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) if (genpd->power_off) genpd->power_off(genpd); - genpd->power_is_off = true; + genpd->status = GPD_STATE_POWER_OFF; if (parent) { genpd_sd_counter_dec(parent); pm_genpd_sync_poweroff(parent); @@ -375,32 +454,41 @@ static int pm_genpd_prepare(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - mutex_lock(&genpd->lock); + /* + * If a wakeup request is pending for the device, it should be woken up + * at this point and a system wakeup event should be reported if it's + * set up to wake up the system from sleep states. + */ + pm_runtime_get_noresume(dev); + if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) + pm_wakeup_event(dev, 0); + + if (pm_wakeup_pending()) { + pm_runtime_put_sync(dev); + return -EBUSY; + } + + genpd_acquire_lock(genpd); if (genpd->prepared_count++ == 0) - genpd->suspend_power_off = genpd->power_is_off; + genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; + + genpd_release_lock(genpd); if (genpd->suspend_power_off) { - mutex_unlock(&genpd->lock); + pm_runtime_put_noidle(dev); return 0; } /* - * If the device is in the (runtime) "suspended" state, call - * .start_device() for it, if defined. - */ - if (pm_runtime_suspended(dev)) - __pm_genpd_runtime_resume(dev, genpd); - - /* - * Do not check if runtime resume is pending at this point, because it - * has been taken care of already and if pm_genpd_poweron() ran at this - * point as a result of the check, it would deadlock. + * The PM domain must be in the GPD_STATE_ACTIVE state at this point, + * so pm_genpd_poweron() will return immediately, but if the device + * is suspended (e.g. it's been stopped by .stop_device()), we need + * to make it operational. */ + pm_runtime_resume(dev); __pm_runtime_disable(dev, false); - mutex_unlock(&genpd->lock); - ret = pm_generic_prepare(dev); if (ret) { mutex_lock(&genpd->lock); @@ -409,7 +497,10 @@ static int pm_genpd_prepare(struct device *dev) genpd->suspend_power_off = false; mutex_unlock(&genpd->lock); + pm_runtime_enable(dev); } + + pm_runtime_put_sync(dev); return ret; } @@ -726,7 +817,7 @@ static int pm_genpd_restore_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - genpd->power_is_off = true; + genpd->status = GPD_STATE_POWER_OFF; if (genpd->suspend_power_off) { /* * The boot kernel might put the domain into the power on state, @@ -836,9 +927,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; - mutex_lock(&genpd->lock); + genpd_acquire_lock(genpd); - if (genpd->power_is_off) { + if (genpd->status == GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } @@ -870,7 +961,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) spin_unlock_irq(&dev->power.lock); out: - mutex_unlock(&genpd->lock); + genpd_release_lock(genpd); return ret; } @@ -891,7 +982,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; - mutex_lock(&genpd->lock); + genpd_acquire_lock(genpd); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -915,7 +1006,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, } out: - mutex_unlock(&genpd->lock); + genpd_release_lock(genpd); return ret; } @@ -934,9 +1025,19 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) return -EINVAL; - mutex_lock(&genpd->lock); + start: + genpd_acquire_lock(genpd); + mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); - if (genpd->power_is_off && !new_subdomain->power_is_off) { + if (new_subdomain->status != GPD_STATE_POWER_OFF + && new_subdomain->status != GPD_STATE_ACTIVE) { + mutex_unlock(&new_subdomain->lock); + genpd_release_lock(genpd); + goto start; + } + + if (genpd->status == GPD_STATE_POWER_OFF + && new_subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } @@ -948,17 +1049,14 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, } } - mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); - list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); new_subdomain->parent = genpd; - if (!subdomain->power_is_off) + if (subdomain->status != GPD_STATE_POWER_OFF) genpd->sd_count++; - mutex_unlock(&new_subdomain->lock); - out: - mutex_unlock(&genpd->lock); + mutex_unlock(&new_subdomain->lock); + genpd_release_lock(genpd); return ret; } @@ -977,7 +1075,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) return -EINVAL; - mutex_lock(&genpd->lock); + start: + genpd_acquire_lock(genpd); list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { if (subdomain != target) @@ -985,9 +1084,16 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); + if (subdomain->status != GPD_STATE_POWER_OFF + && subdomain->status != GPD_STATE_ACTIVE) { + mutex_unlock(&subdomain->lock); + genpd_release_lock(genpd); + goto start; + } + list_del(&subdomain->sd_node); subdomain->parent = NULL; - if (!subdomain->power_is_off) + if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_dec(genpd); mutex_unlock(&subdomain->lock); @@ -996,7 +1102,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, break; } - mutex_unlock(&genpd->lock); + genpd_release_lock(genpd); return ret; } @@ -1022,7 +1128,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd, INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); genpd->in_progress = 0; genpd->sd_count = 0; - genpd->power_is_off = is_off; + genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; + init_waitqueue_head(&genpd->status_wait_queue); genpd->device_count = 0; genpd->suspended_count = 0; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 14fb095..c71457c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -11,8 +11,11 @@ #include -#define GPD_IN_SUSPEND 1 -#define GPD_POWER_OFF 2 +enum gpd_status { + GPD_STATE_ACTIVE = 0, /* PM domain is active */ + GPD_STATE_BUSY, /* Something is happening to the PM domain */ + GPD_STATE_POWER_OFF, /* PM domain is off */ +}; struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); @@ -29,7 +32,8 @@ struct generic_pm_domain { struct work_struct power_off_work; unsigned int in_progress; /* Number of devices being suspended now */ unsigned int sd_count; /* Number of subdomains with power "on" */ - bool power_is_off; /* Whether or not power has been removed */ + enum gpd_status status; /* Current state of the domain */ + wait_queue_head_t status_wait_queue; unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ -- cgit v0.10.2 From c6d22b37263607ba5aeeb2e11169fa65caa29bee Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:39:36 +0200 Subject: PM / Domains: Allow callbacks to execute all runtime PM helpers A deadlock may occur if one of the PM domains' .start_device() or .stop_device() callbacks or a device driver's .runtime_suspend() or .runtime_resume() callback executed by the core generic PM domain code uses a "wrong" runtime PM helper function. This happens, for example, if .runtime_resume() from one device's driver calls pm_runtime_resume() for another device in the same PM domain. A similar situation may take place if a device's parent is in the same PM domain, in which case the runtime PM framework may execute pm_genpd_runtime_resume() automatically for the parent (if it is suspended at the moment). This, of course, is undesirable, so the generic PM domains code should be modified to prevent it from happening. The runtime PM framework guarantees that pm_genpd_runtime_suspend() and pm_genpd_runtime_resume() won't be executed in parallel for the same device, so the generic PM domains code need not worry about those cases. Still, it needs to prevent the other possible race conditions between pm_genpd_runtime_suspend(), pm_genpd_runtime_resume(), pm_genpd_poweron() and pm_genpd_poweroff() from happening and it needs to avoid deadlocks at the same time. To this end, modify the generic PM domains code to relax synchronization rules so that: * pm_genpd_poweron() doesn't wait for the PM domain status to change from GPD_STATE_BUSY. If it finds that the status is not GPD_STATE_POWER_OFF, it returns without powering the domain on (it may modify the status depending on the circumstances). * pm_genpd_poweroff() returns as soon as it finds that the PM domain's status changed from GPD_STATE_BUSY after it's released the PM domain's lock. * pm_genpd_runtime_suspend() doesn't wait for the PM domain status to change from GPD_STATE_BUSY after executing the domain's .stop_device() callback and executes pm_genpd_poweroff() only if pm_genpd_runtime_resume() is not executed in parallel. * pm_genpd_runtime_resume() doesn't wait for the PM domain status to change from GPD_STATE_BUSY after executing pm_genpd_poweron() and sets the domain's status to GPD_STATE_BUSY and increments its counter of resuming devices (introduced by this change) immediately after acquiring the lock. The counter of resuming devices is then decremented after executing __pm_genpd_runtime_resume() for the device and the domain's status is reset to GPD_STATE_ACTIVE (unless there are more resuming devices in the domain, in which case the status remains GPD_STATE_BUSY). This way, for example, if a device driver's .runtime_resume() callback executes pm_runtime_resume() for another device in the same PM domain, pm_genpd_poweron() called by pm_genpd_runtime_resume() invoked by the runtime PM framework will not block and it will see that there's nothing to do for it. Next, the PM domain's lock will be acquired without waiting for its status to change from GPD_STATE_BUSY and the device driver's .runtime_resume() callback will be executed. In turn, if pm_runtime_suspend() is executed by one device driver's .runtime_resume() callback for another device in the same PM domain, pm_genpd_poweroff() executed by pm_genpd_runtime_suspend() invoked by the runtime PM framework as a result will notice that one of the devices in the domain is being resumed, so it will return immediately. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index d06f3bb..7e6cc8a 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -44,7 +44,8 @@ static void genpd_acquire_lock(struct generic_pm_domain *genpd) for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_BUSY) + if (genpd->status == GPD_STATE_ACTIVE + || genpd->status == GPD_STATE_POWER_OFF) break; mutex_unlock(&genpd->lock); @@ -60,6 +61,12 @@ static void genpd_release_lock(struct generic_pm_domain *genpd) mutex_unlock(&genpd->lock); } +static void genpd_set_active(struct generic_pm_domain *genpd) +{ + if (genpd->resume_count == 0) + genpd->status = GPD_STATE_ACTIVE; +} + /** * pm_genpd_poweron - Restore power to a given PM domain and its parents. * @genpd: PM domain to power up. @@ -75,42 +82,24 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) start: if (parent) { - mutex_lock(&parent->lock); + genpd_acquire_lock(parent); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); } else { mutex_lock(&genpd->lock); } - /* - * Wait for the domain to transition into either the active, - * or the power off state. - */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_BUSY) - break; - mutex_unlock(&genpd->lock); - if (parent) - mutex_unlock(&parent->lock); - - schedule(); - - if (parent) { - mutex_lock(&parent->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - } else { - mutex_lock(&genpd->lock); - } - } - finish_wait(&genpd->status_wait_queue, &wait); if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) goto out; + if (genpd->status != GPD_STATE_POWER_OFF) { + genpd_set_active(genpd); + goto out; + } + if (parent && parent->status != GPD_STATE_ACTIVE) { mutex_unlock(&genpd->lock); - mutex_unlock(&parent->lock); + genpd_release_lock(parent); ret = pm_genpd_poweron(parent); if (ret) @@ -125,14 +114,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) goto out; } - genpd->status = GPD_STATE_ACTIVE; + genpd_set_active(genpd); if (parent) parent->sd_count++; out: mutex_unlock(&genpd->lock); if (parent) - mutex_unlock(&parent->lock); + genpd_release_lock(parent); return ret; } @@ -210,6 +199,20 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, } /** + * genpd_abort_poweroff - Check if a PM domain power off should be aborted. + * @genpd: PM domain to check. + * + * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during + * a "power off" operation, which means that a "power on" has occured in the + * meantime, or if its resume_count field is different from zero, which means + * that one of its devices has been resumed in the meantime. + */ +static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) +{ + return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; +} + +/** * pm_genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. * @@ -223,9 +226,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) struct generic_pm_domain *parent; struct dev_list_entry *dle; unsigned int not_suspended; - int ret; + int ret = 0; - if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0) + start: + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) System suspend is in progress. + * (3) One of the domain's devices is being resumed right now. + */ + if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 + || genpd->resume_count > 0) return 0; if (genpd->sd_count > 0) @@ -239,34 +250,54 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (not_suspended > genpd->in_progress) return -EBUSY; + if (genpd->poweroff_task) { + /* + * Another instance of pm_genpd_poweroff() is executing + * callbacks, so tell it to start over and return. + */ + genpd->status = GPD_STATE_REPEAT; + return 0; + } + if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } genpd->status = GPD_STATE_BUSY; + genpd->poweroff_task = current; list_for_each_entry_reverse(dle, &genpd->dev_list, node) { ret = __pm_genpd_save_device(dle, genpd); if (ret) goto err_dev; - } - mutex_unlock(&genpd->lock); + if (genpd_abort_poweroff(genpd)) + goto out; + + if (genpd->status == GPD_STATE_REPEAT) { + genpd->poweroff_task = NULL; + goto start; + } + } parent = genpd->parent; if (parent) { + mutex_unlock(&genpd->lock); + genpd_acquire_lock(parent); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - } else { - mutex_lock(&genpd->lock); + + if (genpd_abort_poweroff(genpd)) { + genpd_release_lock(parent); + goto out; + } } if (genpd->power_off) genpd->power_off(genpd); genpd->status = GPD_STATE_POWER_OFF; - wake_up_all(&genpd->status_wait_queue); if (parent) { genpd_sd_counter_dec(parent); @@ -276,16 +307,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_release_lock(parent); } - return 0; + out: + genpd->poweroff_task = NULL; + wake_up_all(&genpd->status_wait_queue); + return ret; err_dev: list_for_each_entry_continue(dle, &genpd->dev_list, node) __pm_genpd_restore_device(dle, genpd); - genpd->status = GPD_STATE_ACTIVE; - wake_up_all(&genpd->status_wait_queue); - - return ret; + genpd_set_active(genpd); + goto out; } /** @@ -327,11 +359,11 @@ static int pm_genpd_runtime_suspend(struct device *dev) return ret; } - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return 0; } @@ -365,6 +397,7 @@ static void __pm_genpd_runtime_resume(struct device *dev, static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; + DEFINE_WAIT(wait); int ret; dev_dbg(dev, "%s()\n", __func__); @@ -377,12 +410,31 @@ static int pm_genpd_runtime_resume(struct device *dev) if (ret) return ret; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); genpd->status = GPD_STATE_BUSY; + genpd->resume_count++; + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + /* + * If current is the powering off task, we have been called + * reentrantly from one of the device callbacks, so we should + * not wait. + */ + if (!genpd->poweroff_task || genpd->poweroff_task == current) + break; + mutex_unlock(&genpd->lock); + + schedule(); + + mutex_lock(&genpd->lock); + } + finish_wait(&genpd->status_wait_queue, &wait); __pm_genpd_runtime_resume(dev, genpd); - genpd->status = GPD_STATE_ACTIVE; + genpd->resume_count--; + genpd_set_active(genpd); wake_up_all(&genpd->status_wait_queue); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (genpd->start_device) genpd->start_device(dev); @@ -1130,6 +1182,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->sd_count = 0; genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; init_waitqueue_head(&genpd->status_wait_queue); + genpd->poweroff_task = NULL; + genpd->resume_count = 0; genpd->device_count = 0; genpd->suspended_count = 0; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index c71457c..feb80af 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -14,6 +14,7 @@ enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_BUSY, /* Something is happening to the PM domain */ + GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ GPD_STATE_POWER_OFF, /* PM domain is off */ }; @@ -34,6 +35,8 @@ struct generic_pm_domain { unsigned int sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ wait_queue_head_t status_wait_queue; + struct task_struct *poweroff_task; /* Powering off task */ + unsigned int resume_count; /* Number of devices being resumed */ unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ -- cgit v0.10.2 From 697a7f3727b53c7d4c927948bbe1f6afc4fabfde Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:39:48 +0200 Subject: PM / Domains: Do not restore all devices on power off error Since every device in a PM domain has its own need_restore flag, which is set by __pm_genpd_save_device(), there's no need to walk the domain's device list and restore all devices on an error from one of the drivers' .runtime_suspend() callbacks. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7e6cc8a..7b20801 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -269,8 +269,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) list_for_each_entry_reverse(dle, &genpd->dev_list, node) { ret = __pm_genpd_save_device(dle, genpd); - if (ret) - goto err_dev; + if (ret) { + genpd_set_active(genpd); + goto out; + } if (genpd_abort_poweroff(genpd)) goto out; @@ -311,13 +313,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd->poweroff_task = NULL; wake_up_all(&genpd->status_wait_queue); return ret; - - err_dev: - list_for_each_entry_continue(dle, &genpd->dev_list, node) - __pm_genpd_restore_device(dle, genpd); - - genpd_set_active(genpd); - goto out; } /** -- cgit v0.10.2 From 4ecd6e651dd25ebbf0cc53c68162c0ab08641725 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:39:57 +0200 Subject: PM / Domains: Improve handling of wakeup devices during system suspend Kevin points out that if there's a device that can wake up the system from sleep states, but it doesn't generate wakeup signals by itself (they are generated on its behalf by other parts of the system) and it currently is not enabled to wake up the system (that is, device_may_wakeup() returns "false" for it), we may need to change its wakeup settings during system suspend (for example, the device might have been configured to signal remote wakeup from the system's working state, as needed by runtime PM). Therefore the generic PM domains code should invoke the system suspend callbacks provided by the device's driver, which it doesn't do if the PM domain is powered off during the system suspend's "prepare" stage. This is a valid point. Moreover, this code also should make sure that system wakeup devices that are enabled to wake up the system from sleep states and have to remain active for this purpose are not suspended while the system is in a sleep state. To avoid the above issues, make the generic PM domains' .prepare() routine, pm_genpd_prepare(), force runtime resume of devices whose system wakeup settings may need to be changed during system suspend or that should remain active while the system is in a sleep state to be able to wake it up from that state. Reported-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7b20801..b6e29ff 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -482,6 +482,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) } /** + * resume_needed - Check whether to resume a device before system suspend. + * @dev: Device to check. + * @genpd: PM domain the device belongs to. + * + * There are two cases in which a device that can wake up the system from sleep + * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled + * to wake up the system and it has to remain active for this purpose while the + * system is in the sleep state and (2) if the device is not enabled to wake up + * the system from sleep states and it generally doesn't generate wakeup signals + * by itself (those signals are generated on its behalf by other parts of the + * system). In the latter case it may be necessary to reconfigure the device's + * wakeup settings during system suspend, because it may have been set up to + * signal remote wakeup from the system's working state as needed by runtime PM. + * Return 'true' in either of the above cases. + */ +static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) +{ + bool active_wakeup; + + if (!device_can_wakeup(dev)) + return false; + + active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); + return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; +} + +/** * pm_genpd_prepare - Start power transition of a device in a PM domain. * @dev: Device to start the transition of. * @@ -515,6 +542,9 @@ static int pm_genpd_prepare(struct device *dev) return -EBUSY; } + if (resume_needed(dev, genpd)) + pm_runtime_resume(dev); + genpd_acquire_lock(genpd); if (genpd->prepared_count++ == 0) -- cgit v0.10.2 From 56375fd420f851944960bd53dbb08d674f4d9406 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 12 Jul 2011 00:40:03 +0200 Subject: PM / Domains: Queue up power off work only if it is not pending In theory it is possible that pm_genpd_poweroff() for two different subdomains of the same parent domain will attempt to queue up the execution of pm_genpd_poweroff() for the parent twice in a row. This would lead to unpleasant consequences, so prevent it from happening by checking if genpd->power_off_work is pending before attempting to queue it up. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index b6e29ff..c3e4e29 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -213,6 +213,19 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) } /** + * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). + * @genpd: PM domait to power off. + * + * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * before. + */ +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ + if (!work_pending(&genpd->power_off_work)) + queue_work(pm_wq, &genpd->power_off_work); +} + +/** * pm_genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. * @@ -304,7 +317,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (parent) { genpd_sd_counter_dec(parent); if (parent->sd_count == 0) - queue_work(pm_wq, &parent->power_off_work); + genpd_queue_power_off_work(parent); genpd_release_lock(parent); } -- cgit v0.10.2 From f3393b62f157cc87f8d78247e97b87778dc077b8 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 12 Jul 2011 11:17:09 +0200 Subject: PM / Runtime: Add new helper function: pm_runtime_status_suspended() This boolean function simply returns whether or not the runtime status of the device is 'suspended'. Unlike pm_runtime_suspended(), this function returns the runtime status whether or not runtime PM for the device has been disabled or not. Also add entry to Documentation/power/runtime.txt Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 40e47c7..14dd3c6 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -412,6 +412,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: - return true if the device's runtime PM status is 'suspended' and its 'power.disable_depth' field is equal to zero, or false otherwise + bool pm_runtime_status_suspended(struct device *dev); + - return true if the device's runtime PM status is 'suspended' + void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage counter (used by the /sys/devices/.../power/control interface to diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index dfb8539..daac05d 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -82,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev) && !dev->power.disable_depth; } +static inline bool pm_runtime_status_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED; +} + static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; @@ -130,6 +135,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool device_run_wake(struct device *dev) { return false; } static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } -- cgit v0.10.2 From 256a5435975e344b975f89c5434aa6f6eeb03fa1 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 12 Jul 2011 22:48:03 +0200 Subject: OMAP: PM: omap_device: conditionally use PM domain runtime helpers Only build and use the runtime PM helper functions only when runtime PM is actually enabled. Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index d21579b..f7d2ff7 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -537,6 +537,7 @@ int omap_early_device_register(struct omap_device *od) return 0; } +#ifdef CONFIG_PM_RUNTIME static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -563,12 +564,12 @@ static int _od_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } +#endif static struct dev_pm_domain omap_device_pm_domain = { .ops = { - .runtime_suspend = _od_runtime_suspend, - .runtime_idle = _od_runtime_idle, - .runtime_resume = _od_runtime_resume, + SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, + _od_runtime_idle) USE_PLATFORM_PM_SLEEP_OPS } }; -- cgit v0.10.2 From c03f007a8bf0e092caeb6856a5c8a850df10b974 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 12 Jul 2011 22:48:19 +0200 Subject: OMAP: PM: omap_device: add system PM methods for PM domain handling In the omap_device PM domain callbacks, use omap_device idle/enable to automatically manage device idle states during system suspend/resume. If an omap_device has not already been runtime suspended, the ->suspend_noirq() method of the PM domain will use omap_device_idle() to idle the HW after calling the driver's ->runtime_suspend() callback. Similarily, upon resume, if the device was suspended during ->suspend_noirq(), the ->resume_noirq() method of the PM domain will use omap_device_enable() to enable the HW and then call the driver's ->runtime_resume() callback. If a device has already been runtime suspended, the noirq methods of the PM domain leave the device runtime suspended by default. However, if a driver needs to runtime resume a device during suspend (for example, to change its wakeup settings), it may do so using pm_runtime_get* in it's ->suspend() callback. Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h index e4c349f..bc36d05 100644 --- a/arch/arm/plat-omap/include/plat/omap_device.h +++ b/arch/arm/plat-omap/include/plat/omap_device.h @@ -44,6 +44,9 @@ extern struct device omap_device_parent; #define OMAP_DEVICE_STATE_IDLE 2 #define OMAP_DEVICE_STATE_SHUTDOWN 3 +/* omap_device.flags values */ +#define OMAP_DEVICE_SUSPENDED BIT(0) + /** * struct omap_device - omap_device wrapper for platform_devices * @pdev: platform_device @@ -73,6 +76,7 @@ struct omap_device { s8 pm_lat_level; u8 hwmods_cnt; u8 _state; + u8 flags; }; /* Device driver interface (call via platform_data fn ptrs) */ diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index f7d2ff7..b93cfdc 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -566,11 +566,47 @@ static int _od_runtime_resume(struct device *dev) } #endif +#ifdef CONFIG_SUSPEND +static int _od_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + int ret; + + ret = pm_generic_suspend_noirq(dev); + + if (!ret && !pm_runtime_status_suspended(dev)) { + if (pm_generic_runtime_suspend(dev) == 0) { + omap_device_idle(pdev); + od->flags |= OMAP_DEVICE_SUSPENDED; + } + } + + return ret; +} + +static int _od_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + + if ((od->flags & OMAP_DEVICE_SUSPENDED) && + !pm_runtime_status_suspended(dev)) { + od->flags &= ~OMAP_DEVICE_SUSPENDED; + omap_device_enable(pdev); + pm_generic_runtime_resume(dev); + } + + return pm_generic_resume_noirq(dev); +} +#endif + static struct dev_pm_domain omap_device_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, _od_runtime_idle) USE_PLATFORM_PM_SLEEP_OPS + SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq) } }; -- cgit v0.10.2 From 80c6d1e65c25fbf198b463cfaec015820fb9bcdc Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 12 Jul 2011 22:48:29 +0200 Subject: OMAP: PM: omap_device: add API to disable idle on suspend By default, omap_devices will be automatically idled on suspend (and re-enabled on resume.) Using this new API, device init code can disable this feature if desired. NOTE: any driver/device that has been runtime PM converted should not be using this API. Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h index bc36d05..ee405b36 100644 --- a/arch/arm/plat-omap/include/plat/omap_device.h +++ b/arch/arm/plat-omap/include/plat/omap_device.h @@ -46,6 +46,7 @@ extern struct device omap_device_parent; /* omap_device.flags values */ #define OMAP_DEVICE_SUSPENDED BIT(0) +#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1) /** * struct omap_device - omap_device wrapper for platform_devices @@ -121,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od); int omap_device_disable_clocks(struct omap_device *od); int omap_device_enable_clocks(struct omap_device *od); +static inline void omap_device_disable_idle_on_suspend(struct omap_device *od) +{ + od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND; +} /* * Entries should be kept in latency order ascending diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index b93cfdc..2526fa3 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -573,6 +573,9 @@ static int _od_suspend_noirq(struct device *dev) struct omap_device *od = to_omap_device(pdev); int ret; + if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND) + return pm_generic_suspend_noirq(dev); + ret = pm_generic_suspend_noirq(dev); if (!ret && !pm_runtime_status_suspended(dev)) { @@ -590,6 +593,9 @@ static int _od_resume_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); + if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND) + return pm_generic_resume_noirq(dev); + if ((od->flags & OMAP_DEVICE_SUSPENDED) && !pm_runtime_status_suspended(dev)) { od->flags &= ~OMAP_DEVICE_SUSPENDED; -- cgit v0.10.2 From c8c9fda5069456eb9e0e403c19764b2e257802e1 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 12 Jul 2011 22:48:42 +0200 Subject: OMAP: PM: disable idle on suspend for GPIO and UART Until these drivers are runtime PM converted, their device power states are managed by calling custom driver hooks late in the idle/suspend path. Therefore, do not let the suspend/resume core code automatically idle these devices since they will be managed manually by the OMAP PM core very late in the idle/suspend path. Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c index 9529842..48e5ece 100644 --- a/arch/arm/mach-omap2/gpio.c +++ b/arch/arm/mach-omap2/gpio.c @@ -87,6 +87,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused) return PTR_ERR(od); } + omap_device_disable_idle_on_suspend(od); + gpio_bank_count++; return 0; } diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 1ac361b..466fc722 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata) WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n", name, oh->name); + omap_device_disable_idle_on_suspend(od); oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); uart->irq = oh->mpu_irqs[0].irq; -- cgit v0.10.2 From 5125bbf3880755419eff68672623cde49c4f31e8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 13 Jul 2011 12:31:52 +0200 Subject: PM / Domains: Introduce function to power off all unused PM domains Add a new function pm_genpd_poweroff_unused() queuing up the execution of pm_genpd_poweroff() for every initialized generic PM domain. Calling it will cause every generic PM domain without devices in use to be powered off. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index c3e4e29..c2c537d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -16,6 +16,9 @@ #include #include +static LIST_HEAD(gpd_list); +static DEFINE_MUTEX(gpd_list_lock); + #ifdef CONFIG_PM static struct generic_pm_domain *dev_to_genpd(struct device *dev) @@ -1241,4 +1244,22 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; genpd->domain.ops.restore = pm_genpd_restore; genpd->domain.ops.complete = pm_genpd_complete; + mutex_lock(&gpd_list_lock); + list_add(&genpd->gpd_list_node, &gpd_list); + mutex_unlock(&gpd_list_lock); +} + +/** + * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. + */ +void pm_genpd_poweroff_unused(void) +{ + struct generic_pm_domain *genpd; + + mutex_lock(&gpd_list_lock); + + list_for_each_entry(genpd, &gpd_list, gpd_list_node) + genpd_queue_power_off_work(genpd); + + mutex_unlock(&gpd_list_lock); } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index feb80af..3e4f3d3 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -24,6 +24,7 @@ struct dev_power_governor { struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ + struct list_head gpd_list_node; /* Node in the global PM domains list */ struct list_head sd_node; /* Node in the parent's subdomain list */ struct generic_pm_domain *parent; /* Parent PM domain */ struct list_head sd_list; /* List of dubdomains */ @@ -71,6 +72,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); extern int pm_genpd_poweron(struct generic_pm_domain *genpd); +extern void pm_genpd_poweroff_unused(void); #else static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) @@ -98,6 +100,7 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) { return -ENOSYS; } +static inline void pm_genpd_poweroff_unused(void) {} #endif #endif /* _LINUX_PM_DOMAIN_H */ -- cgit v0.10.2 From 796204142a98b6e0e71b494e808d1b6ee62cc75f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 13 Jul 2011 12:32:07 +0200 Subject: ARM / shmobile: Use pm_genpd_poweroff_unused() Make shmobile use pm_genpd_poweroff_unused() instead of the open-coded powering off PM domains without devices in use. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 73a76d7..06aecb3 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -12,7 +12,6 @@ extern struct platform_suspend_ops shmobile_suspend_ops; struct cpuidle_device; extern void (*shmobile_cpuidle_modes[])(void); extern void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev); -extern void (*shmobile_runtime_pm_late_init)(void); extern void sh7367_init_irq(void); extern void sh7367_add_early_devices(void); diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 71400ea..f47281a 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -126,17 +126,6 @@ static bool pd_active_wakeup(struct device *dev) return true; } -static void sh7372_late_pm_domain_off(void) -{ - /* request power down of unused pm domains */ - queue_work(pm_wq, &sh7372_a4lc.genpd.power_off_work); - queue_work(pm_wq, &sh7372_a4mp.genpd.power_off_work); - queue_work(pm_wq, &sh7372_d4.genpd.power_off_work); - queue_work(pm_wq, &sh7372_a3rv.genpd.power_off_work); - queue_work(pm_wq, &sh7372_a3ri.genpd.power_off_work); - queue_work(pm_wq, &sh7372_a3sg.genpd.power_off_work); -} - void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) { struct generic_pm_domain *genpd = &sh7372_pd->genpd; @@ -157,8 +146,6 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) genpd->power_on = pd_power_up; } genpd->power_on(&sh7372_pd->genpd); - - shmobile_runtime_pm_late_init = sh7372_late_pm_domain_off; } void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 2f6ded5..6ec454e 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -57,12 +58,9 @@ static int __init sh_pm_runtime_init(void) } core_initcall(sh_pm_runtime_init); -void (*shmobile_runtime_pm_late_init)(void); - static int __init sh_pm_runtime_late_init(void) { - if (shmobile_runtime_pm_late_init) - shmobile_runtime_pm_late_init(); + pm_genpd_poweroff_unused(); return 0; } late_initcall(sh_pm_runtime_late_init); -- cgit v0.10.2 From 0bc5b2debb832191a42baea7ff59d2ca6ce9f7d5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 14 Jul 2011 20:59:07 +0200 Subject: ARM / shmobile: Use genpd_queue_power_off_work() Make pd_power_down_a3rv() use genpd_queue_power_off_work() to queue up the powering off of the A4LC domain to avoid queuing it up when it is pending. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index f47281a..0b07138 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -107,7 +107,7 @@ static int pd_power_down_a3rv(struct generic_pm_domain *genpd) /* try to power down A4LC after A3RV is requested off */ pm_genpd_poweron(&sh7372_a4lc.genpd); - queue_work(pm_wq, &sh7372_a4lc.genpd.power_off_work); + genpd_queue_power_off_work(&sh7372_a4lc.genpd); return ret; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index c2c537d..00ed4f3 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -222,7 +222,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) * Queue up the execution of pm_genpd_poweroff() unless it's already been done * before. */ -static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { if (!work_pending(&genpd->power_off_work)) queue_work(pm_wq, &genpd->power_off_work); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 3e4f3d3..21097cb 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -73,6 +73,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); extern int pm_genpd_poweron(struct generic_pm_domain *genpd); extern void pm_genpd_poweroff_unused(void); +extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); #else static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) @@ -101,6 +102,7 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) return -ENOSYS; } static inline void pm_genpd_poweroff_unused(void) {} +static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} #endif #endif /* _LINUX_PM_DOMAIN_H */ -- cgit v0.10.2 From d28054020f97c7c9f15327a53945f0f40ffc5d7a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 14 Jul 2011 20:59:20 +0200 Subject: PM / Domains: Take .power_off() error code into account Currently pm_genpd_poweroff() discards error codes returned by the PM domain's .power_off() callback, because it's safer to always regard the domain as inaccessible to drivers after a failing .power_off(). Still, there are situations in which the low-level code may want to indicate that it doesn't want to power off the domain, so allow it to do that by returning -EBUSY from .power_off(). Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 00ed4f3..be8714a 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -312,8 +312,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } } - if (genpd->power_off) - genpd->power_off(genpd); + if (genpd->power_off) { + ret = genpd->power_off(genpd); + if (ret == -EBUSY) { + genpd_set_active(genpd); + if (parent) + genpd_release_lock(parent); + + goto out; + } + } genpd->status = GPD_STATE_POWER_OFF; -- cgit v0.10.2 From 5ca80817e231723f1399bff495854ba2171103ca Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 14 Jul 2011 20:59:35 +0200 Subject: ARM / shmobile: Return -EBUSY from A4LC power off if A3RV is active Since the A4LC should only be powered off if the A3RV is off, make the A4LC's power down routine return -EBUSY if A3RV is not off to indicate to the core that it doesn't want to power off the domain in that case. This will cause the core to regard A4LC as active, so the pm_genpd_poweron() in pd_power_down_a3rv() is not necessary any more. Signed-off-by: Rafael J. Wysocki Acked-by: Magnus Damm diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 0b07138..933fb41 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -106,7 +106,6 @@ static int pd_power_down_a3rv(struct generic_pm_domain *genpd) int ret = pd_power_down(genpd); /* try to power down A4LC after A3RV is requested off */ - pm_genpd_poweron(&sh7372_a4lc.genpd); genpd_queue_power_off_work(&sh7372_a4lc.genpd); return ret; @@ -118,7 +117,7 @@ static int pd_power_down_a4lc(struct generic_pm_domain *genpd) if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) return pd_power_down(genpd); - return 0; + return -EBUSY; } static bool pd_active_wakeup(struct device *dev) -- cgit v0.10.2 From 99f381d3549432a250fe846a2a82d61a032804b0 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Fri, 10 Jun 2011 20:24:57 +0200 Subject: PM / OPP: Introduce function to free cpufreq table cpufreq table allocated by opp_init_cpufreq_table is better freed by OPP layer itself. This allows future modifications to the table handling to be transparent to the users. Signed-off-by: Nishanth Menon Acked-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt index 5ae70a12..3035d00 100644 --- a/Documentation/power/opp.txt +++ b/Documentation/power/opp.txt @@ -321,6 +321,8 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with addition to CONFIG_PM as power management feature is required to dynamically scale voltage and frequency in a system. +opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table + 7. Data Structures ================== Typically an SoC contains multiple voltage domains which are variable. Each diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 56a6899..5cc1232 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev, return 0; } + +/** + * opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by opp_init_cpufreq_table + */ +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} #endif /* CONFIG_CPU_FREQ */ diff --git a/include/linux/opp.h b/include/linux/opp.h index 5449945..7020e97 100644 --- a/include/linux/opp.h +++ b/include/linux/opp.h @@ -94,12 +94,20 @@ static inline int opp_disable(struct device *dev, unsigned long freq) #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) int opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table); +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); #else static inline int opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table) { return -EINVAL; } + +static inline +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ +} #endif /* CONFIG_CPU_FREQ */ #endif /* __LINUX_OPP_H__ */ -- cgit v0.10.2 From 3b5fe85252326217cd96f24a7bda4460d8f71bee Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Sun, 12 Jun 2011 15:57:05 +0200 Subject: PM / Suspend: Add .suspend_again() callback to suspend_ops A system or a device may need to control suspend/wakeup events. It may want to wakeup the system after a predefined amount of time or at a predefined event decided while entering suspend for polling or delayed work. Then, it may want to enter suspend again if its predefined wakeup condition is the only wakeup reason and there is no outstanding events; thus, it does not wakeup the userspace unnecessary or unnecessary devices and keeps suspended as long as possible (saving the power). Enabling a system to wakeup after a specified time can be easily achieved by using RTC. However, to enter suspend again immediately without invoking userland and unrelated devices, we need additional features in the suspend framework. Such need comes from: 1. Monitoring a critical device status without interrupts that can wakeup the system. (in-suspend polling) An example is ambient temperature monitoring that needs to shut down the system or a specific device function if it is too hot or cold. The temperature of a specific device may be needed to be monitored as well; e.g., a charger monitors battery temperature in order to stop charging if overheated. 2. Execute critical "delayed work" at suspend. A driver or a system/board may have a delayed work (or any similar things) that it wants to execute at the requested time. For example, some chargers want to check the battery voltage some time (e.g., 30 seconds) after the battery is fully charged and the charger has stopped. Then, the charger restarts charging if the voltage has dropped more than a threshold, which is smaller than "restart-charger" voltage, which is a threshold to restart charging regardless of the time passed. This patch allows to add "suspend_again" callback at struct platform_suspend_ops and let the "suspend_again" callback return true if the system is required to enter suspend again after the current instance of wakeup. Device-wise suspend_again implemented at dev_pm_ops or syscore is not done because: a) suspend_again feature is usually under platform-wise decision and controls the behavior of the whole platform and b) There are very limited devices related to the usage cases of suspend_again; chargers and temperature sensors are mentioned so far. With suspend_again callback registered at struct platform_suspend_ops suspend_ops in kernel/power/suspend.c with suspend_set_ops by the platform, the suspend framework tries to enter suspend again by looping suspend_enter() if suspend_again has returned true and there has been no errors in the suspending sequence or pending wakeups (by pm_wakeup_pending). Tested at Exynos4-NURI. [rjw: Fixed up kerneldoc comment for suspend_enter().] Signed-off-by: MyungJoo Ham Signed-off-by: Kyungmin Park Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 083ffea..e1e3742 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -92,6 +92,13 @@ typedef int __bitwise suspend_state_t; * @enter() and @wake(), even if any of them fails. It is executed after * a failing @prepare. * + * @suspend_again: Returns whether the system should suspend again (true) or + * not (false). If the platform wants to poll sensors or execute some + * code during suspended without invoking userspace and most of devices, + * suspend_again callback is the place assuming that periodic-wakeup or + * alarm-wakeup is already setup. This allows to execute some codes while + * being kept suspended in the view of userland and devices. + * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or * the transition to the sleep state has been aborted. @@ -113,6 +120,7 @@ struct platform_suspend_ops { int (*enter)(suspend_state_t state); void (*wake)(void); void (*finish)(void); + bool (*suspend_again)(void); void (*end)(void); void (*recover)(void); }; diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 1c41ba2..b6762f4 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -126,12 +126,13 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void) } /** - * suspend_enter - enter the desired system sleep state. - * @state: state to enter + * suspend_enter - enter the desired system sleep state. + * @state: State to enter + * @wakeup: Returns information that suspend should not be entered again. * - * This function should be called after devices have been suspended. + * This function should be called after devices have been suspended. */ -static int suspend_enter(suspend_state_t state) +static int suspend_enter(suspend_state_t state, bool *wakeup) { int error; @@ -165,7 +166,8 @@ static int suspend_enter(suspend_state_t state) error = syscore_suspend(); if (!error) { - if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { + *wakeup = pm_wakeup_pending(); + if (!(suspend_test(TEST_CORE) || *wakeup)) { error = suspend_ops->enter(state); events_check_enabled = false; } @@ -199,6 +201,7 @@ static int suspend_enter(suspend_state_t state) int suspend_devices_and_enter(suspend_state_t state) { int error; + bool wakeup = false; if (!suspend_ops) return -ENOSYS; @@ -220,7 +223,10 @@ int suspend_devices_and_enter(suspend_state_t state) if (suspend_test(TEST_DEVICES)) goto Recover_platform; - error = suspend_enter(state); + do { + error = suspend_enter(state, &wakeup); + } while (!error && !wakeup + && suspend_ops->suspend_again && suspend_ops->suspend_again()); Resume_devices: suspend_test_start(); -- cgit v0.10.2 From a5e4fd8783a2bec861ecf1138cdc042269ff59aa Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Mon, 27 Jun 2011 01:01:07 +0200 Subject: PM / Suspend: Export suspend_set_ops, suspend_valid_only_mem Some platforms wish to implement their PM core suspend code as modules. To do so, these functions need to be exported to modules. [rjw: Replaced EXPORT_SYMBOL with EXPORT_SYMBOL_GPL] Reported-by: Jean Pihet Signed-off-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index b6762f4..b6b71ad 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -44,6 +44,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops) suspend_ops = ops; mutex_unlock(&pm_mutex); } +EXPORT_SYMBOL_GPL(suspend_set_ops); bool valid_state(suspend_state_t state) { @@ -65,6 +66,7 @@ int suspend_valid_only_mem(suspend_state_t state) { return state == PM_SUSPEND_MEM; } +EXPORT_SYMBOL_GPL(suspend_valid_only_mem); static int suspend_test(int level) { -- cgit v0.10.2 From 1d8047a6f7973470bb1de4606a6e00c0bbee3cc6 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 27 Jun 2011 01:01:16 +0200 Subject: PM: Add "RTC" to PM trace time stamps to avoid confusion Some users are apparently confused by dmesg output from read_magic_time(), which looks like "real" time and date. Add the "RTC" string to time stamps printed by read_magic_time() to avoid that confusion. Reported-by: Justin P. Mattock Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index c80e138..af10abe 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -112,7 +112,7 @@ static unsigned int read_magic_time(void) unsigned int val; get_rtc_time(&time); - pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", + pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n", time.tm_hour, time.tm_min, time.tm_sec, time.tm_mon + 1, time.tm_mday, time.tm_year % 100); val = time.tm_year; /* 100 years */ -- cgit v0.10.2 From f0c077a8b7f9dce590c760a7b2f3c417dffa52d1 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 8 Jul 2011 20:53:36 +0200 Subject: PM: Improve error code of pm_notifier_call_chain() This enables pm_notifier_call_chain() to get the actual error code in the callback rather than always assume -EINVAL by converting all PM notifier calls to return encapsulate error code with notifier_from_errno(). Signed-off-by: Akinobu Mita Signed-off-by: Rafael J. Wysocki diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 548708c..a7346ab 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb, return NOTIFY_OK; /* interrupted by signal */ - return NOTIFY_BAD; + return notifier_from_errno(err); case PM_POST_SUSPEND: /* diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 12ef912..11312f4 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -258,13 +258,13 @@ static int vmwdt_suspend(void) if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { pr_err("The system cannot be suspended while the watchdog" " is in use\n"); - return NOTIFY_BAD; + return notifier_from_errno(-EBUSY); } if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { clear_bit(VMWDT_OPEN, &vmwdt_is_open); pr_err("The system cannot be suspended while the watchdog" " is running\n"); - return NOTIFY_BAD; + return notifier_from_errno(-EBUSY); } return NOTIFY_DONE; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index c47b25f..92d7324 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, mutex_unlock(&css->mutex); continue; } - if (__chsc_do_secm(css, 0)) - ret = NOTIFY_BAD; + ret = __chsc_do_secm(css, 0); + ret = notifier_from_errno(ret); mutex_unlock(&css->mutex); } break; @@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, mutex_unlock(&css->mutex); continue; } - if (__chsc_do_secm(css, 1)) - ret = NOTIFY_BAD; + ret = __chsc_do_secm(css, 1); + ret = notifier_from_errno(ret); mutex_unlock(&css->mutex); } /* search for subchannels, which appeared during hibernation */ diff --git a/kernel/power/main.c b/kernel/power/main.c index 2981af4..6c601f8 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -37,8 +37,9 @@ EXPORT_SYMBOL_GPL(unregister_pm_notifier); int pm_notifier_call_chain(unsigned long val) { - return (blocking_notifier_call_chain(&pm_chain_head, val, NULL) - == NOTIFY_BAD) ? -EINVAL : 0; + int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL); + + return notifier_to_errno(ret); } /* If set, devices may be suspended and resumed asynchronously. */ -- cgit v0.10.2