aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-25 09:18:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-25 09:18:39 -0400
commit7e0bb71e75020348bee523720a0c2f04cc72f540 (patch)
tree1a22d65bbce34e8cc0f82c543c9486ffb58332f7 /drivers
parentb9e2780d576a010d4aba1e69f247170bf3718d6b (diff)
parent0ab1e79b825a5cd8aeb3b34d89c9a89dea900056 (diff)
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (63 commits) PM / Clocks: Remove redundant NULL checks before kfree() PM / Documentation: Update docs about suspend and CPU hotplug ACPI / PM: Add Sony VGN-FW21E to nonvs blacklist. ARM: mach-shmobile: sh7372 A4R support (v4) ARM: mach-shmobile: sh7372 A3SP support (v4) PM / Sleep: Mark devices involved in wakeup signaling during suspend PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image PM / Hibernate: Do not initialize static and extern variables to 0 PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too PM / Hibernate: Add resumedelay kernel param in addition to resumewait MAINTAINERS: Update linux-pm list address PM / ACPI: Blacklist Vaio VGN-FW520F machine known to require acpi_sleep=nonvs PM / ACPI: Blacklist Sony Vaio known to require acpi_sleep=nonvs PM / Hibernate: Add resumewait param to support MMC-like devices as resume file PM / Hibernate: Fix typo in a kerneldoc comment PM / Hibernate: Freeze kernel threads after preallocating memory PM: Update the policy on default wakeup settings PM / VT: Cleanup #if defined uglyness and fix compile error PM / Suspend: Off by one in pm_suspend() PM / Hibernate: Include storage keys in hibernation image on s390 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/clock_ops.c127
-rw-r--r--drivers/base/power/common.c86
-rw-r--r--drivers/base/power/domain.c352
-rw-r--r--drivers/base/power/main.c42
-rw-r--r--drivers/base/power/opp.c30
-rw-r--r--drivers/base/power/power.h10
-rw-r--r--drivers/base/power/qos.c419
-rw-r--r--drivers/base/power/runtime.c127
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/devfreq/Kconfig75
-rw-r--r--drivers/devfreq/Makefile5
-rw-r--r--drivers/devfreq/devfreq.c601
-rw-r--r--drivers/devfreq/governor.h24
-rw-r--r--drivers/devfreq/governor_performance.c29
-rw-r--r--drivers/devfreq/governor_powersave.c29
-rw-r--r--drivers/devfreq/governor_simpleondemand.c88
-rw-r--r--drivers/devfreq/governor_userspace.c116
-rw-r--r--drivers/hid/hid-picolcd.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c7
-rw-r--r--drivers/media/video/via-camera.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c4
-rw-r--r--drivers/tty/Kconfig4
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/driver.c9
-rw-r--r--drivers/usb/core/hcd.c9
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/serial/sierra.c2
-rw-r--r--drivers/usb/serial/usb_wwan.c2
41 files changed, 1969 insertions, 311 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e73aaaee013..6268167a1bb 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -132,4 +132,6 @@ source "drivers/iommu/Kconfig"
132 132
133source "drivers/virt/Kconfig" 133source "drivers/virt/Kconfig"
134 134
135source "drivers/devfreq/Kconfig"
136
135endmenu 137endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e7afb3acbc6..755eaf7a728 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -129,3 +129,5 @@ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
129 129
130# Virtualization drivers 130# Virtualization drivers
131obj-$(CONFIG_VIRT_DRIVERS) += virt/ 131obj-$(CONFIG_VIRT_DRIVERS) += virt/
132
133obj-$(CONFIG_PM_DEVFREQ) += devfreq/
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 431ab11c8c1..2e69e09ff03 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -37,7 +37,7 @@
37#include <linux/dmi.h> 37#include <linux/dmi.h>
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/sched.h> /* need_resched() */ 39#include <linux/sched.h> /* need_resched() */
40#include <linux/pm_qos_params.h> 40#include <linux/pm_qos.h>
41#include <linux/clockchips.h> 41#include <linux/clockchips.h>
42#include <linux/cpuidle.h> 42#include <linux/cpuidle.h>
43#include <linux/irqflags.h> 43#include <linux/irqflags.h>
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 3ed80b2ca90..0e46faef1d3 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -390,6 +390,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
390 }, 390 },
391 { 391 {
392 .callback = init_nvs_nosave, 392 .callback = init_nvs_nosave,
393 .ident = "Sony Vaio VGN-FW21E",
394 .matches = {
395 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
396 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
397 },
398 },
399 {
400 .callback = init_nvs_nosave,
393 .ident = "Sony Vaio VGN-SR11M", 401 .ident = "Sony Vaio VGN-SR11M",
394 .matches = { 402 .matches = {
395 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 403 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
@@ -444,6 +452,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
444 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), 452 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
445 }, 453 },
446 }, 454 },
455 {
456 .callback = init_nvs_nosave,
457 .ident = "Sony Vaio VGN-SR26GN_P",
458 .matches = {
459 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
460 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
461 },
462 },
463 {
464 .callback = init_nvs_nosave,
465 .ident = "Sony Vaio VGN-FW520F",
466 .matches = {
467 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
468 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
469 },
470 },
447 {}, 471 {},
448}; 472};
449#endif /* CONFIG_SUSPEND */ 473#endif /* CONFIG_SUSPEND */
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2639ae79a37..81676dd1790 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_PM) += sysfs.o generic_ops.o 1obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
2obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o 2obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
@@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o 6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
7obj-$(CONFIG_HAVE_CLK) += clock_ops.o 7obj-$(CONFIG_HAVE_CLK) += clock_ops.o
8 8
9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file 9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b97294e2d95..5f0f85d5c57 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -10,18 +10,13 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/pm_runtime.h> 13#include <linux/pm_clock.h>
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/err.h> 16#include <linux/err.h>
17 17
18#ifdef CONFIG_PM 18#ifdef CONFIG_PM
19 19
20struct pm_clk_data {
21 struct list_head clock_list;
22 spinlock_t lock;
23};
24
25enum pce_status { 20enum pce_status {
26 PCE_STATUS_NONE = 0, 21 PCE_STATUS_NONE = 0,
27 PCE_STATUS_ACQUIRED, 22 PCE_STATUS_ACQUIRED,
@@ -36,11 +31,6 @@ struct pm_clock_entry {
36 enum pce_status status; 31 enum pce_status status;
37}; 32};
38 33
39static struct pm_clk_data *__to_pcd(struct device *dev)
40{
41 return dev ? dev->power.subsys_data : NULL;
42}
43
44/** 34/**
45 * pm_clk_acquire - Acquire a device clock. 35 * pm_clk_acquire - Acquire a device clock.
46 * @dev: Device whose clock is to be acquired. 36 * @dev: Device whose clock is to be acquired.
@@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
67 */ 57 */
68int pm_clk_add(struct device *dev, const char *con_id) 58int pm_clk_add(struct device *dev, const char *con_id)
69{ 59{
70 struct pm_clk_data *pcd = __to_pcd(dev); 60 struct pm_subsys_data *psd = dev_to_psd(dev);
71 struct pm_clock_entry *ce; 61 struct pm_clock_entry *ce;
72 62
73 if (!pcd) 63 if (!psd)
74 return -EINVAL; 64 return -EINVAL;
75 65
76 ce = kzalloc(sizeof(*ce), GFP_KERNEL); 66 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
91 81
92 pm_clk_acquire(dev, ce); 82 pm_clk_acquire(dev, ce);
93 83
94 spin_lock_irq(&pcd->lock); 84 spin_lock_irq(&psd->lock);
95 list_add_tail(&ce->node, &pcd->clock_list); 85 list_add_tail(&ce->node, &psd->clock_list);
96 spin_unlock_irq(&pcd->lock); 86 spin_unlock_irq(&psd->lock);
97 return 0; 87 return 0;
98} 88}
99 89
@@ -114,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
114 clk_put(ce->clk); 104 clk_put(ce->clk);
115 } 105 }
116 106
117 if (ce->con_id) 107 kfree(ce->con_id);
118 kfree(ce->con_id);
119
120 kfree(ce); 108 kfree(ce);
121} 109}
122 110
@@ -130,15 +118,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
130 */ 118 */
131void pm_clk_remove(struct device *dev, const char *con_id) 119void pm_clk_remove(struct device *dev, const char *con_id)
132{ 120{
133 struct pm_clk_data *pcd = __to_pcd(dev); 121 struct pm_subsys_data *psd = dev_to_psd(dev);
134 struct pm_clock_entry *ce; 122 struct pm_clock_entry *ce;
135 123
136 if (!pcd) 124 if (!psd)
137 return; 125 return;
138 126
139 spin_lock_irq(&pcd->lock); 127 spin_lock_irq(&psd->lock);
140 128
141 list_for_each_entry(ce, &pcd->clock_list, node) { 129 list_for_each_entry(ce, &psd->clock_list, node) {
142 if (!con_id && !ce->con_id) 130 if (!con_id && !ce->con_id)
143 goto remove; 131 goto remove;
144 else if (!con_id || !ce->con_id) 132 else if (!con_id || !ce->con_id)
@@ -147,12 +135,12 @@ void pm_clk_remove(struct device *dev, const char *con_id)
147 goto remove; 135 goto remove;
148 } 136 }
149 137
150 spin_unlock_irq(&pcd->lock); 138 spin_unlock_irq(&psd->lock);
151 return; 139 return;
152 140
153 remove: 141 remove:
154 list_del(&ce->node); 142 list_del(&ce->node);
155 spin_unlock_irq(&pcd->lock); 143 spin_unlock_irq(&psd->lock);
156 144
157 __pm_clk_remove(ce); 145 __pm_clk_remove(ce);
158} 146}
@@ -161,23 +149,27 @@ void pm_clk_remove(struct device *dev, const char *con_id)
161 * pm_clk_init - Initialize a device's list of power management clocks. 149 * pm_clk_init - Initialize a device's list of power management clocks.
162 * @dev: Device to initialize the list of PM clocks for. 150 * @dev: Device to initialize the list of PM clocks for.
163 * 151 *
164 * Allocate a struct pm_clk_data object, initialize its lock member and 152 * Initialize the lock and clock_list members of the device's pm_subsys_data
165 * make the @dev's power.subsys_data field point to it. 153 * object.
166 */ 154 */
167int pm_clk_init(struct device *dev) 155void pm_clk_init(struct device *dev)
168{ 156{
169 struct pm_clk_data *pcd; 157 struct pm_subsys_data *psd = dev_to_psd(dev);
170 158 if (psd)
171 pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); 159 INIT_LIST_HEAD(&psd->clock_list);
172 if (!pcd) { 160}
173 dev_err(dev, "Not enough memory for PM clock data.\n");
174 return -ENOMEM;
175 }
176 161
177 INIT_LIST_HEAD(&pcd->clock_list); 162/**
178 spin_lock_init(&pcd->lock); 163 * pm_clk_create - Create and initialize a device's list of PM clocks.
179 dev->power.subsys_data = pcd; 164 * @dev: Device to create and initialize the list of PM clocks for.
180 return 0; 165 *
166 * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
167 * members and make the @dev's power.subsys_data field point to it.
168 */
169int pm_clk_create(struct device *dev)
170{
171 int ret = dev_pm_get_subsys_data(dev);
172 return ret < 0 ? ret : 0;
181} 173}
182 174
183/** 175/**
@@ -185,29 +177,28 @@ int pm_clk_init(struct device *dev)
185 * @dev: Device to destroy the list of PM clocks for. 177 * @dev: Device to destroy the list of PM clocks for.
186 * 178 *
187 * Clear the @dev's power.subsys_data field, remove the list of clock entries 179 * Clear the @dev's power.subsys_data field, remove the list of clock entries
188 * from the struct pm_clk_data object pointed to by it before and free 180 * from the struct pm_subsys_data object pointed to by it before and free
189 * that object. 181 * that object.
190 */ 182 */
191void pm_clk_destroy(struct device *dev) 183void pm_clk_destroy(struct device *dev)
192{ 184{
193 struct pm_clk_data *pcd = __to_pcd(dev); 185 struct pm_subsys_data *psd = dev_to_psd(dev);
194 struct pm_clock_entry *ce, *c; 186 struct pm_clock_entry *ce, *c;
195 struct list_head list; 187 struct list_head list;
196 188
197 if (!pcd) 189 if (!psd)
198 return; 190 return;
199 191
200 dev->power.subsys_data = NULL;
201 INIT_LIST_HEAD(&list); 192 INIT_LIST_HEAD(&list);
202 193
203 spin_lock_irq(&pcd->lock); 194 spin_lock_irq(&psd->lock);
204 195
205 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 196 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
206 list_move(&ce->node, &list); 197 list_move(&ce->node, &list);
207 198
208 spin_unlock_irq(&pcd->lock); 199 spin_unlock_irq(&psd->lock);
209 200
210 kfree(pcd); 201 dev_pm_put_subsys_data(dev);
211 202
212 list_for_each_entry_safe_reverse(ce, c, &list, node) { 203 list_for_each_entry_safe_reverse(ce, c, &list, node) {
213 list_del(&ce->node); 204 list_del(&ce->node);
@@ -225,25 +216,25 @@ void pm_clk_destroy(struct device *dev)
225 */ 216 */
226int pm_clk_suspend(struct device *dev) 217int pm_clk_suspend(struct device *dev)
227{ 218{
228 struct pm_clk_data *pcd = __to_pcd(dev); 219 struct pm_subsys_data *psd = dev_to_psd(dev);
229 struct pm_clock_entry *ce; 220 struct pm_clock_entry *ce;
230 unsigned long flags; 221 unsigned long flags;
231 222
232 dev_dbg(dev, "%s()\n", __func__); 223 dev_dbg(dev, "%s()\n", __func__);
233 224
234 if (!pcd) 225 if (!psd)
235 return 0; 226 return 0;
236 227
237 spin_lock_irqsave(&pcd->lock, flags); 228 spin_lock_irqsave(&psd->lock, flags);
238 229
239 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 230 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
240 if (ce->status < PCE_STATUS_ERROR) { 231 if (ce->status < PCE_STATUS_ERROR) {
241 clk_disable(ce->clk); 232 clk_disable(ce->clk);
242 ce->status = PCE_STATUS_ACQUIRED; 233 ce->status = PCE_STATUS_ACQUIRED;
243 } 234 }
244 } 235 }
245 236
246 spin_unlock_irqrestore(&pcd->lock, flags); 237 spin_unlock_irqrestore(&psd->lock, flags);
247 238
248 return 0; 239 return 0;
249} 240}
@@ -254,25 +245,25 @@ int pm_clk_suspend(struct device *dev)
254 */ 245 */
255int pm_clk_resume(struct device *dev) 246int pm_clk_resume(struct device *dev)
256{ 247{
257 struct pm_clk_data *pcd = __to_pcd(dev); 248 struct pm_subsys_data *psd = dev_to_psd(dev);
258 struct pm_clock_entry *ce; 249 struct pm_clock_entry *ce;
259 unsigned long flags; 250 unsigned long flags;
260 251
261 dev_dbg(dev, "%s()\n", __func__); 252 dev_dbg(dev, "%s()\n", __func__);
262 253
263 if (!pcd) 254 if (!psd)
264 return 0; 255 return 0;
265 256
266 spin_lock_irqsave(&pcd->lock, flags); 257 spin_lock_irqsave(&psd->lock, flags);
267 258
268 list_for_each_entry(ce, &pcd->clock_list, node) { 259 list_for_each_entry(ce, &psd->clock_list, node) {
269 if (ce->status < PCE_STATUS_ERROR) { 260 if (ce->status < PCE_STATUS_ERROR) {
270 clk_enable(ce->clk); 261 clk_enable(ce->clk);
271 ce->status = PCE_STATUS_ENABLED; 262 ce->status = PCE_STATUS_ENABLED;
272 } 263 }
273 } 264 }
274 265
275 spin_unlock_irqrestore(&pcd->lock, flags); 266 spin_unlock_irqrestore(&psd->lock, flags);
276 267
277 return 0; 268 return 0;
278} 269}
@@ -310,7 +301,7 @@ static int pm_clk_notify(struct notifier_block *nb,
310 if (dev->pm_domain) 301 if (dev->pm_domain)
311 break; 302 break;
312 303
313 error = pm_clk_init(dev); 304 error = pm_clk_create(dev);
314 if (error) 305 if (error)
315 break; 306 break;
316 307
@@ -345,22 +336,22 @@ static int pm_clk_notify(struct notifier_block *nb,
345 */ 336 */
346int pm_clk_suspend(struct device *dev) 337int pm_clk_suspend(struct device *dev)
347{ 338{
348 struct pm_clk_data *pcd = __to_pcd(dev); 339 struct pm_subsys_data *psd = dev_to_psd(dev);
349 struct pm_clock_entry *ce; 340 struct pm_clock_entry *ce;
350 unsigned long flags; 341 unsigned long flags;
351 342
352 dev_dbg(dev, "%s()\n", __func__); 343 dev_dbg(dev, "%s()\n", __func__);
353 344
354 /* If there is no driver, the clocks are already disabled. */ 345 /* If there is no driver, the clocks are already disabled. */
355 if (!pcd || !dev->driver) 346 if (!psd || !dev->driver)
356 return 0; 347 return 0;
357 348
358 spin_lock_irqsave(&pcd->lock, flags); 349 spin_lock_irqsave(&psd->lock, flags);
359 350
360 list_for_each_entry_reverse(ce, &pcd->clock_list, node) 351 list_for_each_entry_reverse(ce, &psd->clock_list, node)
361 clk_disable(ce->clk); 352 clk_disable(ce->clk);
362 353
363 spin_unlock_irqrestore(&pcd->lock, flags); 354 spin_unlock_irqrestore(&psd->lock, flags);
364 355
365 return 0; 356 return 0;
366} 357}
@@ -371,22 +362,22 @@ int pm_clk_suspend(struct device *dev)
371 */ 362 */
372int pm_clk_resume(struct device *dev) 363int pm_clk_resume(struct device *dev)
373{ 364{
374 struct pm_clk_data *pcd = __to_pcd(dev); 365 struct pm_subsys_data *psd = dev_to_psd(dev);
375 struct pm_clock_entry *ce; 366 struct pm_clock_entry *ce;
376 unsigned long flags; 367 unsigned long flags;
377 368
378 dev_dbg(dev, "%s()\n", __func__); 369 dev_dbg(dev, "%s()\n", __func__);
379 370
380 /* If there is no driver, the clocks should remain disabled. */ 371 /* If there is no driver, the clocks should remain disabled. */
381 if (!pcd || !dev->driver) 372 if (!psd || !dev->driver)
382 return 0; 373 return 0;
383 374
384 spin_lock_irqsave(&pcd->lock, flags); 375 spin_lock_irqsave(&psd->lock, flags);
385 376
386 list_for_each_entry(ce, &pcd->clock_list, node) 377 list_for_each_entry(ce, &psd->clock_list, node)
387 clk_enable(ce->clk); 378 clk_enable(ce->clk);
388 379
389 spin_unlock_irqrestore(&pcd->lock, flags); 380 spin_unlock_irqrestore(&psd->lock, flags);
390 381
391 return 0; 382 return 0;
392} 383}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 00000000000..29820c39618
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,86 @@
1/*
2 * drivers/base/power/common.c - Common device power management code.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/pm_clock.h>
14
15/**
16 * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
17 * @dev: Device to handle.
18 *
19 * If power.subsys_data is NULL, point it to a new object, otherwise increment
20 * its reference counter. Return 1 if a new object has been created, otherwise
21 * return 0 or error code.
22 */
23int dev_pm_get_subsys_data(struct device *dev)
24{
25 struct pm_subsys_data *psd;
26 int ret = 0;
27
28 psd = kzalloc(sizeof(*psd), GFP_KERNEL);
29 if (!psd)
30 return -ENOMEM;
31
32 spin_lock_irq(&dev->power.lock);
33
34 if (dev->power.subsys_data) {
35 dev->power.subsys_data->refcount++;
36 } else {
37 spin_lock_init(&psd->lock);
38 psd->refcount = 1;
39 dev->power.subsys_data = psd;
40 pm_clk_init(dev);
41 psd = NULL;
42 ret = 1;
43 }
44
45 spin_unlock_irq(&dev->power.lock);
46
47 /* kfree() verifies that its argument is nonzero. */
48 kfree(psd);
49
50 return ret;
51}
52EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
53
54/**
55 * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
56 * @dev: Device to handle.
57 *
58 * If the reference counter of power.subsys_data is zero after dropping the
59 * reference, power.subsys_data is removed. Return 1 if that happens or 0
60 * otherwise.
61 */
62int dev_pm_put_subsys_data(struct device *dev)
63{
64 struct pm_subsys_data *psd;
65 int ret = 0;
66
67 spin_lock_irq(&dev->power.lock);
68
69 psd = dev_to_psd(dev);
70 if (!psd) {
71 ret = -EINVAL;
72 goto out;
73 }
74
75 if (--psd->refcount == 0) {
76 dev->power.subsys_data = NULL;
77 kfree(psd);
78 ret = 1;
79 }
80
81 out:
82 spin_unlock_irq(&dev->power.lock);
83
84 return ret;
85}
86EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1c374579407..6790cf7eba5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
29 return pd_to_genpd(dev->pm_domain); 29 return pd_to_genpd(dev->pm_domain);
30} 30}
31 31
32static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) 32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{ 33{
34 if (!WARN_ON(genpd->sd_count == 0)) 34 bool ret = false;
35 genpd->sd_count--; 35
36 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
37 ret = !!atomic_dec_and_test(&genpd->sd_count);
38
39 return ret;
40}
41
42static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
43{
44 atomic_inc(&genpd->sd_count);
45 smp_mb__after_atomic_inc();
36} 46}
37 47
38static void genpd_acquire_lock(struct generic_pm_domain *genpd) 48static void genpd_acquire_lock(struct generic_pm_domain *genpd)
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
71} 81}
72 82
73/** 83/**
74 * pm_genpd_poweron - Restore power to a given PM domain and its parents. 84 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
75 * @genpd: PM domain to power up. 85 * @genpd: PM domain to power up.
76 * 86 *
77 * Restore power to @genpd and all of its parents so that it is possible to 87 * Restore power to @genpd and all of its masters so that it is possible to
78 * resume a device belonging to it. 88 * resume a device belonging to it.
79 */ 89 */
80int pm_genpd_poweron(struct generic_pm_domain *genpd) 90int __pm_genpd_poweron(struct generic_pm_domain *genpd)
91 __releases(&genpd->lock) __acquires(&genpd->lock)
81{ 92{
82 struct generic_pm_domain *parent = genpd->parent; 93 struct gpd_link *link;
94 DEFINE_WAIT(wait);
83 int ret = 0; 95 int ret = 0;
84 96
85 start: 97 /* If the domain's master is being waited for, we have to wait too. */
86 if (parent) { 98 for (;;) {
87 genpd_acquire_lock(parent); 99 prepare_to_wait(&genpd->status_wait_queue, &wait,
88 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 100 TASK_UNINTERRUPTIBLE);
89 } else { 101 if (genpd->status != GPD_STATE_WAIT_MASTER)
102 break;
103 mutex_unlock(&genpd->lock);
104
105 schedule();
106
90 mutex_lock(&genpd->lock); 107 mutex_lock(&genpd->lock);
91 } 108 }
109 finish_wait(&genpd->status_wait_queue, &wait);
92 110
93 if (genpd->status == GPD_STATE_ACTIVE 111 if (genpd->status == GPD_STATE_ACTIVE
94 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 112 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
95 goto out; 113 return 0;
96 114
97 if (genpd->status != GPD_STATE_POWER_OFF) { 115 if (genpd->status != GPD_STATE_POWER_OFF) {
98 genpd_set_active(genpd); 116 genpd_set_active(genpd);
99 goto out; 117 return 0;
100 } 118 }
101 119
102 if (parent && parent->status != GPD_STATE_ACTIVE) { 120 /*
121 * The list is guaranteed not to change while the loop below is being
122 * executed, unless one of the masters' .power_on() callbacks fiddles
123 * with it.
124 */
125 list_for_each_entry(link, &genpd->slave_links, slave_node) {
126 genpd_sd_counter_inc(link->master);
127 genpd->status = GPD_STATE_WAIT_MASTER;
128
103 mutex_unlock(&genpd->lock); 129 mutex_unlock(&genpd->lock);
104 genpd_release_lock(parent);
105 130
106 ret = pm_genpd_poweron(parent); 131 ret = pm_genpd_poweron(link->master);
107 if (ret)
108 return ret;
109 132
110 goto start; 133 mutex_lock(&genpd->lock);
134
135 /*
136 * The "wait for parent" status is guaranteed not to change
137 * while the master is powering on.
138 */
139 genpd->status = GPD_STATE_POWER_OFF;
140 wake_up_all(&genpd->status_wait_queue);
141 if (ret) {
142 genpd_sd_counter_dec(link->master);
143 goto err;
144 }
111 } 145 }
112 146
113 if (genpd->power_on) { 147 if (genpd->power_on) {
114 ret = genpd->power_on(genpd); 148 ret = genpd->power_on(genpd);
115 if (ret) 149 if (ret)
116 goto out; 150 goto err;
117 } 151 }
118 152
119 genpd_set_active(genpd); 153 genpd_set_active(genpd);
120 if (parent)
121 parent->sd_count++;
122 154
123 out: 155 return 0;
124 mutex_unlock(&genpd->lock); 156
125 if (parent) 157 err:
126 genpd_release_lock(parent); 158 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
159 genpd_sd_counter_dec(link->master);
127 160
128 return ret; 161 return ret;
129} 162}
130 163
164/**
165 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
166 * @genpd: PM domain to power up.
167 */
168int pm_genpd_poweron(struct generic_pm_domain *genpd)
169{
170 int ret;
171
172 mutex_lock(&genpd->lock);
173 ret = __pm_genpd_poweron(genpd);
174 mutex_unlock(&genpd->lock);
175 return ret;
176}
177
131#endif /* CONFIG_PM */ 178#endif /* CONFIG_PM */
132 179
133#ifdef CONFIG_PM_RUNTIME 180#ifdef CONFIG_PM_RUNTIME
134 181
135/** 182/**
136 * __pm_genpd_save_device - Save the pre-suspend state of a device. 183 * __pm_genpd_save_device - Save the pre-suspend state of a device.
137 * @dle: Device list entry of the device to save the state of. 184 * @pdd: Domain data of the device to save the state of.
138 * @genpd: PM domain the device belongs to. 185 * @genpd: PM domain the device belongs to.
139 */ 186 */
140static int __pm_genpd_save_device(struct dev_list_entry *dle, 187static int __pm_genpd_save_device(struct pm_domain_data *pdd,
141 struct generic_pm_domain *genpd) 188 struct generic_pm_domain *genpd)
142 __releases(&genpd->lock) __acquires(&genpd->lock) 189 __releases(&genpd->lock) __acquires(&genpd->lock)
143{ 190{
144 struct device *dev = dle->dev; 191 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
192 struct device *dev = pdd->dev;
145 struct device_driver *drv = dev->driver; 193 struct device_driver *drv = dev->driver;
146 int ret = 0; 194 int ret = 0;
147 195
148 if (dle->need_restore) 196 if (gpd_data->need_restore)
149 return 0; 197 return 0;
150 198
151 mutex_unlock(&genpd->lock); 199 mutex_unlock(&genpd->lock);
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
163 mutex_lock(&genpd->lock); 211 mutex_lock(&genpd->lock);
164 212
165 if (!ret) 213 if (!ret)
166 dle->need_restore = true; 214 gpd_data->need_restore = true;
167 215
168 return ret; 216 return ret;
169} 217}
170 218
171/** 219/**
172 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 220 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
173 * @dle: Device list entry of the device to restore the state of. 221 * @pdd: Domain data of the device to restore the state of.
174 * @genpd: PM domain the device belongs to. 222 * @genpd: PM domain the device belongs to.
175 */ 223 */
176static void __pm_genpd_restore_device(struct dev_list_entry *dle, 224static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
177 struct generic_pm_domain *genpd) 225 struct generic_pm_domain *genpd)
178 __releases(&genpd->lock) __acquires(&genpd->lock) 226 __releases(&genpd->lock) __acquires(&genpd->lock)
179{ 227{
180 struct device *dev = dle->dev; 228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
229 struct device *dev = pdd->dev;
181 struct device_driver *drv = dev->driver; 230 struct device_driver *drv = dev->driver;
182 231
183 if (!dle->need_restore) 232 if (!gpd_data->need_restore)
184 return; 233 return;
185 234
186 mutex_unlock(&genpd->lock); 235 mutex_unlock(&genpd->lock);
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
197 246
198 mutex_lock(&genpd->lock); 247 mutex_lock(&genpd->lock);
199 248
200 dle->need_restore = false; 249 gpd_data->need_restore = false;
201} 250}
202 251
203/** 252/**
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
211 */ 260 */
212static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 261static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
213{ 262{
214 return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 263 return genpd->status == GPD_STATE_WAIT_MASTER
264 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
215} 265}
216 266
217/** 267/**
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
238static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 288static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
239 __releases(&genpd->lock) __acquires(&genpd->lock) 289 __releases(&genpd->lock) __acquires(&genpd->lock)
240{ 290{
241 struct generic_pm_domain *parent; 291 struct pm_domain_data *pdd;
242 struct dev_list_entry *dle; 292 struct gpd_link *link;
243 unsigned int not_suspended; 293 unsigned int not_suspended;
244 int ret = 0; 294 int ret = 0;
245 295
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
247 /* 297 /*
248 * Do not try to power off the domain in the following situations: 298 * Do not try to power off the domain in the following situations:
249 * (1) The domain is already in the "power off" state. 299 * (1) The domain is already in the "power off" state.
250 * (2) System suspend is in progress. 300 * (2) The domain is waiting for its master to power up.
251 * (3) One of the domain's devices is being resumed right now. 301 * (3) One of the domain's devices is being resumed right now.
302 * (4) System suspend is in progress.
252 */ 303 */
253 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 304 if (genpd->status == GPD_STATE_POWER_OFF
254 || genpd->resume_count > 0) 305 || genpd->status == GPD_STATE_WAIT_MASTER
306 || genpd->resume_count > 0 || genpd->prepared_count > 0)
255 return 0; 307 return 0;
256 308
257 if (genpd->sd_count > 0) 309 if (atomic_read(&genpd->sd_count) > 0)
258 return -EBUSY; 310 return -EBUSY;
259 311
260 not_suspended = 0; 312 not_suspended = 0;
261 list_for_each_entry(dle, &genpd->dev_list, node) 313 list_for_each_entry(pdd, &genpd->dev_list, list_node)
262 if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) 314 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
315 || pdd->dev->power.irq_safe))
263 not_suspended++; 316 not_suspended++;
264 317
265 if (not_suspended > genpd->in_progress) 318 if (not_suspended > genpd->in_progress)
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
282 genpd->status = GPD_STATE_BUSY; 335 genpd->status = GPD_STATE_BUSY;
283 genpd->poweroff_task = current; 336 genpd->poweroff_task = current;
284 337
285 list_for_each_entry_reverse(dle, &genpd->dev_list, node) { 338 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
286 ret = __pm_genpd_save_device(dle, genpd); 339 ret = atomic_read(&genpd->sd_count) == 0 ?
340 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
341
342 if (genpd_abort_poweroff(genpd))
343 goto out;
344
287 if (ret) { 345 if (ret) {
288 genpd_set_active(genpd); 346 genpd_set_active(genpd);
289 goto out; 347 goto out;
290 } 348 }
291 349
292 if (genpd_abort_poweroff(genpd))
293 goto out;
294
295 if (genpd->status == GPD_STATE_REPEAT) { 350 if (genpd->status == GPD_STATE_REPEAT) {
296 genpd->poweroff_task = NULL; 351 genpd->poweroff_task = NULL;
297 goto start; 352 goto start;
298 } 353 }
299 } 354 }
300 355
301 parent = genpd->parent; 356 if (genpd->power_off) {
302 if (parent) { 357 if (atomic_read(&genpd->sd_count) > 0) {
303 mutex_unlock(&genpd->lock); 358 ret = -EBUSY;
304
305 genpd_acquire_lock(parent);
306 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
307
308 if (genpd_abort_poweroff(genpd)) {
309 genpd_release_lock(parent);
310 goto out; 359 goto out;
311 } 360 }
312 }
313 361
314 if (genpd->power_off) { 362 /*
363 * If sd_count > 0 at this point, one of the subdomains hasn't
364 * managed to call pm_genpd_poweron() for the master yet after
365 * incrementing it. In that case pm_genpd_poweron() will wait
366 * for us to drop the lock, so we can call .power_off() and let
367 * the pm_genpd_poweron() restore power for us (this shouldn't
368 * happen very often).
369 */
315 ret = genpd->power_off(genpd); 370 ret = genpd->power_off(genpd);
316 if (ret == -EBUSY) { 371 if (ret == -EBUSY) {
317 genpd_set_active(genpd); 372 genpd_set_active(genpd);
318 if (parent)
319 genpd_release_lock(parent);
320
321 goto out; 373 goto out;
322 } 374 }
323 } 375 }
324 376
325 genpd->status = GPD_STATE_POWER_OFF; 377 genpd->status = GPD_STATE_POWER_OFF;
326 378
327 if (parent) { 379 list_for_each_entry(link, &genpd->slave_links, slave_node) {
328 genpd_sd_counter_dec(parent); 380 genpd_sd_counter_dec(link->master);
329 if (parent->sd_count == 0) 381 genpd_queue_power_off_work(link->master);
330 genpd_queue_power_off_work(parent);
331
332 genpd_release_lock(parent);
333 } 382 }
334 383
335 out: 384 out:
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
371 if (IS_ERR(genpd)) 420 if (IS_ERR(genpd))
372 return -EINVAL; 421 return -EINVAL;
373 422
423 might_sleep_if(!genpd->dev_irq_safe);
424
374 if (genpd->stop_device) { 425 if (genpd->stop_device) {
375 int ret = genpd->stop_device(dev); 426 int ret = genpd->stop_device(dev);
376 if (ret) 427 if (ret)
377 return ret; 428 return ret;
378 } 429 }
379 430
431 /*
432 * If power.irq_safe is set, this routine will be run with interrupts
433 * off, so it can't use mutexes.
434 */
435 if (dev->power.irq_safe)
436 return 0;
437
380 mutex_lock(&genpd->lock); 438 mutex_lock(&genpd->lock);
381 genpd->in_progress++; 439 genpd->in_progress++;
382 pm_genpd_poweroff(genpd); 440 pm_genpd_poweroff(genpd);
@@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
387} 445}
388 446
389/** 447/**
390 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
391 * @dev: Device to resume.
392 * @genpd: PM domain the device belongs to.
393 */
394static void __pm_genpd_runtime_resume(struct device *dev,
395 struct generic_pm_domain *genpd)
396{
397 struct dev_list_entry *dle;
398
399 list_for_each_entry(dle, &genpd->dev_list, node) {
400 if (dle->dev == dev) {
401 __pm_genpd_restore_device(dle, genpd);
402 break;
403 }
404 }
405}
406
407/**
408 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 448 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
409 * @dev: Device to resume. 449 * @dev: Device to resume.
410 * 450 *
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev)
424 if (IS_ERR(genpd)) 464 if (IS_ERR(genpd))
425 return -EINVAL; 465 return -EINVAL;
426 466
427 ret = pm_genpd_poweron(genpd); 467 might_sleep_if(!genpd->dev_irq_safe);
428 if (ret) 468
429 return ret; 469 /* If power.irq_safe, the PM domain is never powered off. */
470 if (dev->power.irq_safe)
471 goto out;
430 472
431 mutex_lock(&genpd->lock); 473 mutex_lock(&genpd->lock);
474 ret = __pm_genpd_poweron(genpd);
475 if (ret) {
476 mutex_unlock(&genpd->lock);
477 return ret;
478 }
432 genpd->status = GPD_STATE_BUSY; 479 genpd->status = GPD_STATE_BUSY;
433 genpd->resume_count++; 480 genpd->resume_count++;
434 for (;;) { 481 for (;;) {
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
448 mutex_lock(&genpd->lock); 495 mutex_lock(&genpd->lock);
449 } 496 }
450 finish_wait(&genpd->status_wait_queue, &wait); 497 finish_wait(&genpd->status_wait_queue, &wait);
451 __pm_genpd_runtime_resume(dev, genpd); 498 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
452 genpd->resume_count--; 499 genpd->resume_count--;
453 genpd_set_active(genpd); 500 genpd_set_active(genpd);
454 wake_up_all(&genpd->status_wait_queue); 501 wake_up_all(&genpd->status_wait_queue);
455 mutex_unlock(&genpd->lock); 502 mutex_unlock(&genpd->lock);
456 503
504 out:
457 if (genpd->start_device) 505 if (genpd->start_device)
458 genpd->start_device(dev); 506 genpd->start_device(dev);
459 507
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void)
478#else 526#else
479 527
480static inline void genpd_power_off_work_fn(struct work_struct *work) {} 528static inline void genpd_power_off_work_fn(struct work_struct *work) {}
481static inline void __pm_genpd_runtime_resume(struct device *dev,
482 struct generic_pm_domain *genpd) {}
483 529
484#define pm_genpd_runtime_suspend NULL 530#define pm_genpd_runtime_suspend NULL
485#define pm_genpd_runtime_resume NULL 531#define pm_genpd_runtime_resume NULL
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
489#ifdef CONFIG_PM_SLEEP 535#ifdef CONFIG_PM_SLEEP
490 536
491/** 537/**
492 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. 538 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
493 * @genpd: PM domain to power off, if possible. 539 * @genpd: PM domain to power off, if possible.
494 * 540 *
495 * Check if the given PM domain can be powered off (during system suspend or 541 * Check if the given PM domain can be powered off (during system suspend or
496 * hibernation) and do that if so. Also, in that case propagate to its parent. 542 * hibernation) and do that if so. Also, in that case propagate to its masters.
497 * 543 *
498 * This function is only called in "noirq" stages of system power transitions, 544 * This function is only called in "noirq" stages of system power transitions,
499 * so it need not acquire locks (all of the "noirq" callbacks are executed 545 * so it need not acquire locks (all of the "noirq" callbacks are executed
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
501 */ 547 */
502static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 548static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
503{ 549{
504 struct generic_pm_domain *parent = genpd->parent; 550 struct gpd_link *link;
505 551
506 if (genpd->status == GPD_STATE_POWER_OFF) 552 if (genpd->status == GPD_STATE_POWER_OFF)
507 return; 553 return;
508 554
509 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) 555 if (genpd->suspended_count != genpd->device_count
556 || atomic_read(&genpd->sd_count) > 0)
510 return; 557 return;
511 558
512 if (genpd->power_off) 559 if (genpd->power_off)
513 genpd->power_off(genpd); 560 genpd->power_off(genpd);
514 561
515 genpd->status = GPD_STATE_POWER_OFF; 562 genpd->status = GPD_STATE_POWER_OFF;
516 if (parent) { 563
517 genpd_sd_counter_dec(parent); 564 list_for_each_entry(link, &genpd->slave_links, slave_node) {
518 pm_genpd_sync_poweroff(parent); 565 genpd_sd_counter_dec(link->master);
566 pm_genpd_sync_poweroff(link->master);
519 } 567 }
520} 568}
521 569
@@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
666 if (ret) 714 if (ret)
667 return ret; 715 return ret;
668 716
669 if (device_may_wakeup(dev) 717 if (dev->power.wakeup_path
670 && genpd->active_wakeup && genpd->active_wakeup(dev)) 718 && genpd->active_wakeup && genpd->active_wakeup(dev))
671 return 0; 719 return 0;
672 720
@@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
890 if (ret) 938 if (ret)
891 return ret; 939 return ret;
892 940
893 if (device_may_wakeup(dev) 941 if (dev->power.wakeup_path
894 && genpd->active_wakeup && genpd->active_wakeup(dev)) 942 && genpd->active_wakeup && genpd->active_wakeup(dev))
895 return 0; 943 return 0;
896 944
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev)
1034 */ 1082 */
1035int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1083int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1036{ 1084{
1037 struct dev_list_entry *dle; 1085 struct generic_pm_domain_data *gpd_data;
1086 struct pm_domain_data *pdd;
1038 int ret = 0; 1087 int ret = 0;
1039 1088
1040 dev_dbg(dev, "%s()\n", __func__); 1089 dev_dbg(dev, "%s()\n", __func__);
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1054 goto out; 1103 goto out;
1055 } 1104 }
1056 1105
1057 list_for_each_entry(dle, &genpd->dev_list, node) 1106 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1058 if (dle->dev == dev) { 1107 if (pdd->dev == dev) {
1059 ret = -EINVAL; 1108 ret = -EINVAL;
1060 goto out; 1109 goto out;
1061 } 1110 }
1062 1111
1063 dle = kzalloc(sizeof(*dle), GFP_KERNEL); 1112 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1064 if (!dle) { 1113 if (!gpd_data) {
1065 ret = -ENOMEM; 1114 ret = -ENOMEM;
1066 goto out; 1115 goto out;
1067 } 1116 }
1068 1117
1069 dle->dev = dev;
1070 dle->need_restore = false;
1071 list_add_tail(&dle->node, &genpd->dev_list);
1072 genpd->device_count++; 1118 genpd->device_count++;
1073 1119
1074 spin_lock_irq(&dev->power.lock);
1075 dev->pm_domain = &genpd->domain; 1120 dev->pm_domain = &genpd->domain;
1076 spin_unlock_irq(&dev->power.lock); 1121 dev_pm_get_subsys_data(dev);
1122 dev->power.subsys_data->domain_data = &gpd_data->base;
1123 gpd_data->base.dev = dev;
1124 gpd_data->need_restore = false;
1125 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1077 1126
1078 out: 1127 out:
1079 genpd_release_lock(genpd); 1128 genpd_release_lock(genpd);
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1089int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1138int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1090 struct device *dev) 1139 struct device *dev)
1091{ 1140{
1092 struct dev_list_entry *dle; 1141 struct pm_domain_data *pdd;
1093 int ret = -EINVAL; 1142 int ret = -EINVAL;
1094 1143
1095 dev_dbg(dev, "%s()\n", __func__); 1144 dev_dbg(dev, "%s()\n", __func__);
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1104 goto out; 1153 goto out;
1105 } 1154 }
1106 1155
1107 list_for_each_entry(dle, &genpd->dev_list, node) { 1156 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1108 if (dle->dev != dev) 1157 if (pdd->dev != dev)
1109 continue; 1158 continue;
1110 1159
1111 spin_lock_irq(&dev->power.lock); 1160 list_del_init(&pdd->list_node);
1161 pdd->dev = NULL;
1162 dev_pm_put_subsys_data(dev);
1112 dev->pm_domain = NULL; 1163 dev->pm_domain = NULL;
1113 spin_unlock_irq(&dev->power.lock); 1164 kfree(to_gpd_data(pdd));
1114 1165
1115 genpd->device_count--; 1166 genpd->device_count--;
1116 list_del(&dle->node);
1117 kfree(dle);
1118 1167
1119 ret = 0; 1168 ret = 0;
1120 break; 1169 break;
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1129/** 1178/**
1130 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1179 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1131 * @genpd: Master PM domain to add the subdomain to. 1180 * @genpd: Master PM domain to add the subdomain to.
1132 * @new_subdomain: Subdomain to be added. 1181 * @subdomain: Subdomain to be added.
1133 */ 1182 */
1134int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1183int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1135 struct generic_pm_domain *new_subdomain) 1184 struct generic_pm_domain *subdomain)
1136{ 1185{
1137 struct generic_pm_domain *subdomain; 1186 struct gpd_link *link;
1138 int ret = 0; 1187 int ret = 0;
1139 1188
1140 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) 1189 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1141 return -EINVAL; 1190 return -EINVAL;
1142 1191
1143 start: 1192 start:
1144 genpd_acquire_lock(genpd); 1193 genpd_acquire_lock(genpd);
1145 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); 1194 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1146 1195
1147 if (new_subdomain->status != GPD_STATE_POWER_OFF 1196 if (subdomain->status != GPD_STATE_POWER_OFF
1148 && new_subdomain->status != GPD_STATE_ACTIVE) { 1197 && subdomain->status != GPD_STATE_ACTIVE) {
1149 mutex_unlock(&new_subdomain->lock); 1198 mutex_unlock(&subdomain->lock);
1150 genpd_release_lock(genpd); 1199 genpd_release_lock(genpd);
1151 goto start; 1200 goto start;
1152 } 1201 }
1153 1202
1154 if (genpd->status == GPD_STATE_POWER_OFF 1203 if (genpd->status == GPD_STATE_POWER_OFF
1155 && new_subdomain->status != GPD_STATE_POWER_OFF) { 1204 && subdomain->status != GPD_STATE_POWER_OFF) {
1156 ret = -EINVAL; 1205 ret = -EINVAL;
1157 goto out; 1206 goto out;
1158 } 1207 }
1159 1208
1160 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { 1209 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1161 if (subdomain == new_subdomain) { 1210 if (link->slave == subdomain && link->master == genpd) {
1162 ret = -EINVAL; 1211 ret = -EINVAL;
1163 goto out; 1212 goto out;
1164 } 1213 }
1165 } 1214 }
1166 1215
1167 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); 1216 link = kzalloc(sizeof(*link), GFP_KERNEL);
1168 new_subdomain->parent = genpd; 1217 if (!link) {
1218 ret = -ENOMEM;
1219 goto out;
1220 }
1221 link->master = genpd;
1222 list_add_tail(&link->master_node, &genpd->master_links);
1223 link->slave = subdomain;
1224 list_add_tail(&link->slave_node, &subdomain->slave_links);
1169 if (subdomain->status != GPD_STATE_POWER_OFF) 1225 if (subdomain->status != GPD_STATE_POWER_OFF)
1170 genpd->sd_count++; 1226 genpd_sd_counter_inc(genpd);
1171 1227
1172 out: 1228 out:
1173 mutex_unlock(&new_subdomain->lock); 1229 mutex_unlock(&subdomain->lock);
1174 genpd_release_lock(genpd); 1230 genpd_release_lock(genpd);
1175 1231
1176 return ret; 1232 return ret;
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1179/** 1235/**
1180 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1236 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1181 * @genpd: Master PM domain to remove the subdomain from. 1237 * @genpd: Master PM domain to remove the subdomain from.
1182 * @target: Subdomain to be removed. 1238 * @subdomain: Subdomain to be removed.
1183 */ 1239 */
1184int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1240int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1185 struct generic_pm_domain *target) 1241 struct generic_pm_domain *subdomain)
1186{ 1242{
1187 struct generic_pm_domain *subdomain; 1243 struct gpd_link *link;
1188 int ret = -EINVAL; 1244 int ret = -EINVAL;
1189 1245
1190 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) 1246 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1191 return -EINVAL; 1247 return -EINVAL;
1192 1248
1193 start: 1249 start:
1194 genpd_acquire_lock(genpd); 1250 genpd_acquire_lock(genpd);
1195 1251
1196 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { 1252 list_for_each_entry(link, &genpd->master_links, master_node) {
1197 if (subdomain != target) 1253 if (link->slave != subdomain)
1198 continue; 1254 continue;
1199 1255
1200 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1256 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1206 goto start; 1262 goto start;
1207 } 1263 }
1208 1264
1209 list_del(&subdomain->sd_node); 1265 list_del(&link->master_node);
1210 subdomain->parent = NULL; 1266 list_del(&link->slave_node);
1267 kfree(link);
1211 if (subdomain->status != GPD_STATE_POWER_OFF) 1268 if (subdomain->status != GPD_STATE_POWER_OFF)
1212 genpd_sd_counter_dec(genpd); 1269 genpd_sd_counter_dec(genpd);
1213 1270
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1234 if (IS_ERR_OR_NULL(genpd)) 1291 if (IS_ERR_OR_NULL(genpd))
1235 return; 1292 return;
1236 1293
1237 INIT_LIST_HEAD(&genpd->sd_node); 1294 INIT_LIST_HEAD(&genpd->master_links);
1238 genpd->parent = NULL; 1295 INIT_LIST_HEAD(&genpd->slave_links);
1239 INIT_LIST_HEAD(&genpd->dev_list); 1296 INIT_LIST_HEAD(&genpd->dev_list);
1240 INIT_LIST_HEAD(&genpd->sd_list);
1241 mutex_init(&genpd->lock); 1297 mutex_init(&genpd->lock);
1242 genpd->gov = gov; 1298 genpd->gov = gov;
1243 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1299 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1244 genpd->in_progress = 0; 1300 genpd->in_progress = 0;
1245 genpd->sd_count = 0; 1301 atomic_set(&genpd->sd_count, 0);
1246 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1302 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1247 init_waitqueue_head(&genpd->status_wait_queue); 1303 init_waitqueue_head(&genpd->status_wait_queue);
1248 genpd->poweroff_task = NULL; 1304 genpd->poweroff_task = NULL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a85459126bc..59f8ab23548 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list);
46LIST_HEAD(dpm_suspended_list); 46LIST_HEAD(dpm_suspended_list);
47LIST_HEAD(dpm_noirq_list); 47LIST_HEAD(dpm_noirq_list);
48 48
49struct suspend_stats suspend_stats;
49static DEFINE_MUTEX(dpm_list_mtx); 50static DEFINE_MUTEX(dpm_list_mtx);
50static pm_message_t pm_transition; 51static pm_message_t pm_transition;
51 52
@@ -65,6 +66,7 @@ void device_pm_init(struct device *dev)
65 spin_lock_init(&dev->power.lock); 66 spin_lock_init(&dev->power.lock);
66 pm_runtime_init(dev); 67 pm_runtime_init(dev);
67 INIT_LIST_HEAD(&dev->power.entry); 68 INIT_LIST_HEAD(&dev->power.entry);
69 dev->power.power_state = PMSG_INVALID;
68} 70}
69 71
70/** 72/**
@@ -96,6 +98,7 @@ void device_pm_add(struct device *dev)
96 dev_warn(dev, "parent %s should not be sleeping\n", 98 dev_warn(dev, "parent %s should not be sleeping\n",
97 dev_name(dev->parent)); 99 dev_name(dev->parent));
98 list_add_tail(&dev->power.entry, &dpm_list); 100 list_add_tail(&dev->power.entry, &dpm_list);
101 dev_pm_qos_constraints_init(dev);
99 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
100} 103}
101 104
@@ -109,6 +112,7 @@ void device_pm_remove(struct device *dev)
109 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
110 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
111 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
115 dev_pm_qos_constraints_destroy(dev);
112 list_del_init(&dev->power.entry); 116 list_del_init(&dev->power.entry);
113 mutex_unlock(&dpm_list_mtx); 117 mutex_unlock(&dpm_list_mtx);
114 device_wakeup_disable(dev); 118 device_wakeup_disable(dev);
@@ -464,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state)
464 mutex_unlock(&dpm_list_mtx); 468 mutex_unlock(&dpm_list_mtx);
465 469
466 error = device_resume_noirq(dev, state); 470 error = device_resume_noirq(dev, state);
467 if (error) 471 if (error) {
472 suspend_stats.failed_resume_noirq++;
473 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
474 dpm_save_failed_dev(dev_name(dev));
468 pm_dev_err(dev, state, " early", error); 475 pm_dev_err(dev, state, " early", error);
476 }
469 477
470 mutex_lock(&dpm_list_mtx); 478 mutex_lock(&dpm_list_mtx);
471 put_device(dev); 479 put_device(dev);
@@ -626,8 +634,12 @@ void dpm_resume(pm_message_t state)
626 mutex_unlock(&dpm_list_mtx); 634 mutex_unlock(&dpm_list_mtx);
627 635
628 error = device_resume(dev, state, false); 636 error = device_resume(dev, state, false);
629 if (error) 637 if (error) {
638 suspend_stats.failed_resume++;
639 dpm_save_failed_step(SUSPEND_RESUME);
640 dpm_save_failed_dev(dev_name(dev));
630 pm_dev_err(dev, state, "", error); 641 pm_dev_err(dev, state, "", error);
642 }
631 643
632 mutex_lock(&dpm_list_mtx); 644 mutex_lock(&dpm_list_mtx);
633 } 645 }
@@ -802,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state)
802 mutex_lock(&dpm_list_mtx); 814 mutex_lock(&dpm_list_mtx);
803 if (error) { 815 if (error) {
804 pm_dev_err(dev, state, " late", error); 816 pm_dev_err(dev, state, " late", error);
817 suspend_stats.failed_suspend_noirq++;
818 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
819 dpm_save_failed_dev(dev_name(dev));
805 put_device(dev); 820 put_device(dev);
806 break; 821 break;
807 } 822 }
@@ -902,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
902 } 917 }
903 918
904 End: 919 End:
905 dev->power.is_suspended = !error; 920 if (!error) {
921 dev->power.is_suspended = true;
922 if (dev->power.wakeup_path && dev->parent)
923 dev->parent->power.wakeup_path = true;
924 }
906 925
907 device_unlock(dev); 926 device_unlock(dev);
908 complete_all(&dev->power.completion); 927 complete_all(&dev->power.completion);
@@ -923,8 +942,10 @@ static void async_suspend(void *data, async_cookie_t cookie)
923 int error; 942 int error;
924 943
925 error = __device_suspend(dev, pm_transition, true); 944 error = __device_suspend(dev, pm_transition, true);
926 if (error) 945 if (error) {
946 dpm_save_failed_dev(dev_name(dev));
927 pm_dev_err(dev, pm_transition, " async", error); 947 pm_dev_err(dev, pm_transition, " async", error);
948 }
928 949
929 put_device(dev); 950 put_device(dev);
930} 951}
@@ -967,6 +988,7 @@ int dpm_suspend(pm_message_t state)
967 mutex_lock(&dpm_list_mtx); 988 mutex_lock(&dpm_list_mtx);
968 if (error) { 989 if (error) {
969 pm_dev_err(dev, state, "", error); 990 pm_dev_err(dev, state, "", error);
991 dpm_save_failed_dev(dev_name(dev));
970 put_device(dev); 992 put_device(dev);
971 break; 993 break;
972 } 994 }
@@ -980,7 +1002,10 @@ int dpm_suspend(pm_message_t state)
980 async_synchronize_full(); 1002 async_synchronize_full();
981 if (!error) 1003 if (!error)
982 error = async_error; 1004 error = async_error;
983 if (!error) 1005 if (error) {
1006 suspend_stats.failed_suspend++;
1007 dpm_save_failed_step(SUSPEND_SUSPEND);
1008 } else
984 dpm_show_time(starttime, state, NULL); 1009 dpm_show_time(starttime, state, NULL);
985 return error; 1010 return error;
986} 1011}
@@ -999,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
999 1024
1000 device_lock(dev); 1025 device_lock(dev);
1001 1026
1027 dev->power.wakeup_path = device_may_wakeup(dev);
1028
1002 if (dev->pm_domain) { 1029 if (dev->pm_domain) {
1003 pm_dev_dbg(dev, state, "preparing power domain "); 1030 pm_dev_dbg(dev, state, "preparing power domain ");
1004 if (dev->pm_domain->ops.prepare) 1031 if (dev->pm_domain->ops.prepare)
@@ -1088,7 +1115,10 @@ int dpm_suspend_start(pm_message_t state)
1088 int error; 1115 int error;
1089 1116
1090 error = dpm_prepare(state); 1117 error = dpm_prepare(state);
1091 if (!error) 1118 if (error) {
1119 suspend_stats.failed_prepare++;
1120 dpm_save_failed_step(SUSPEND_PREPARE);
1121 } else
1092 error = dpm_suspend(state); 1122 error = dpm_suspend(state);
1093 return error; 1123 return error;
1094} 1124}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index b23de185cb0..434a6c01167 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -73,6 +73,7 @@ struct opp {
73 * RCU usage: nodes are not modified in the list of device_opp, 73 * RCU usage: nodes are not modified in the list of device_opp,
74 * however addition is possible and is secured by dev_opp_list_lock 74 * however addition is possible and is secured by dev_opp_list_lock
75 * @dev: device pointer 75 * @dev: device pointer
76 * @head: notifier head to notify the OPP availability changes.
76 * @opp_list: list of opps 77 * @opp_list: list of opps
77 * 78 *
78 * This is an internal data structure maintaining the link to opps attached to 79 * This is an internal data structure maintaining the link to opps attached to
@@ -83,6 +84,7 @@ struct device_opp {
83 struct list_head node; 84 struct list_head node;
84 85
85 struct device *dev; 86 struct device *dev;
87 struct srcu_notifier_head head;
86 struct list_head opp_list; 88 struct list_head opp_list;
87}; 89};
88 90
@@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
404 } 406 }
405 407
406 dev_opp->dev = dev; 408 dev_opp->dev = dev;
409 srcu_init_notifier_head(&dev_opp->head);
407 INIT_LIST_HEAD(&dev_opp->opp_list); 410 INIT_LIST_HEAD(&dev_opp->opp_list);
408 411
409 /* Secure the device list modification */ 412 /* Secure the device list modification */
@@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
428 list_add_rcu(&new_opp->node, head); 431 list_add_rcu(&new_opp->node, head);
429 mutex_unlock(&dev_opp_list_lock); 432 mutex_unlock(&dev_opp_list_lock);
430 433
434 /*
435 * Notify the changes in the availability of the operable
436 * frequency/voltage list.
437 */
438 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
431 return 0; 439 return 0;
432} 440}
433 441
@@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
504 mutex_unlock(&dev_opp_list_lock); 512 mutex_unlock(&dev_opp_list_lock);
505 synchronize_rcu(); 513 synchronize_rcu();
506 514
515 /* Notify the change of the OPP availability */
516 if (availability_req)
517 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
518 new_opp);
519 else
520 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
521 new_opp);
522
507 /* clean up old opp */ 523 /* clean up old opp */
508 new_opp = opp; 524 new_opp = opp;
509 goto out; 525 goto out;
@@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev,
643 *table = NULL; 659 *table = NULL;
644} 660}
645#endif /* CONFIG_CPU_FREQ */ 661#endif /* CONFIG_CPU_FREQ */
662
663/**
664 * opp_get_notifier() - find notifier_head of the device with opp
665 * @dev: device pointer used to lookup device OPPs.
666 */
667struct srcu_notifier_head *opp_get_notifier(struct device *dev)
668{
669 struct device_opp *dev_opp = find_device_opp(dev);
670
671 if (IS_ERR(dev_opp))
672 return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
673
674 return &dev_opp->head;
675}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index f2a25f18fde..9bf62323aaf 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,3 +1,5 @@
1#include <linux/pm_qos.h>
2
1#ifdef CONFIG_PM_RUNTIME 3#ifdef CONFIG_PM_RUNTIME
2 4
3extern void pm_runtime_init(struct device *dev); 5extern void pm_runtime_init(struct device *dev);
@@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *);
35static inline void device_pm_init(struct device *dev) 37static inline void device_pm_init(struct device *dev)
36{ 38{
37 spin_lock_init(&dev->power.lock); 39 spin_lock_init(&dev->power.lock);
40 dev->power.power_state = PMSG_INVALID;
38 pm_runtime_init(dev); 41 pm_runtime_init(dev);
39} 42}
40 43
44static inline void device_pm_add(struct device *dev)
45{
46 dev_pm_qos_constraints_init(dev);
47}
48
41static inline void device_pm_remove(struct device *dev) 49static inline void device_pm_remove(struct device *dev)
42{ 50{
51 dev_pm_qos_constraints_destroy(dev);
43 pm_runtime_remove(dev); 52 pm_runtime_remove(dev);
44} 53}
45 54
46static inline void device_pm_add(struct device *dev) {}
47static inline void device_pm_move_before(struct device *deva, 55static inline void device_pm_move_before(struct device *deva,
48 struct device *devb) {} 56 struct device *devb) {}
49static inline void device_pm_move_after(struct device *deva, 57static inline void device_pm_move_after(struct device *deva,
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
new file mode 100644
index 00000000000..91e06141738
--- /dev/null
+++ b/drivers/base/power/qos.c
@@ -0,0 +1,419 @@
1/*
2 * Devices PM QoS constraints management
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * This module exposes the interface to kernel space for specifying
12 * per-device PM QoS dependencies. It provides infrastructure for registration
13 * of:
14 *
15 * Dependents on a QoS value : register requests
16 * Watchers of QoS value : get notified when target QoS value changes
17 *
18 * This QoS design is best effort based. Dependents register their QoS needs.
19 * Watchers register to keep track of the current QoS needs of the system.
20 * Watchers can register different types of notification callbacks:
21 * . a per-device notification callback using the dev_pm_qos_*_notifier API.
22 * The notification chain data is stored in the per-device constraint
23 * data struct.
24 * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
25 * API. The notification chain data is stored in a static variable.
26 *
27 * Note about the per-device constraint data struct allocation:
28 * . The per-device constraints data struct ptr is tored into the device
29 * dev_pm_info.
30 * . To minimize the data usage by the per-device constraints, the data struct
31 * is only allocated at the first call to dev_pm_qos_add_request.
32 * . The data is later free'd when the device is removed from the system.
33 * . A global mutex protects the constraints users from the data being
34 * allocated and free'd.
35 */
36
37#include <linux/pm_qos.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/device.h>
41#include <linux/mutex.h>
42
43
44static DEFINE_MUTEX(dev_pm_qos_mtx);
45
46static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
47
48/**
49 * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
50 * @dev: Device to get the PM QoS constraint value for.
51 */
52s32 dev_pm_qos_read_value(struct device *dev)
53{
54 struct pm_qos_constraints *c;
55 unsigned long flags;
56 s32 ret = 0;
57
58 spin_lock_irqsave(&dev->power.lock, flags);
59
60 c = dev->power.constraints;
61 if (c)
62 ret = pm_qos_read_value(c);
63
64 spin_unlock_irqrestore(&dev->power.lock, flags);
65
66 return ret;
67}
68
69/*
70 * apply_constraint
71 * @req: constraint request to apply
72 * @action: action to perform add/update/remove, of type enum pm_qos_req_action
73 * @value: defines the qos request
74 *
75 * Internal function to update the constraints list using the PM QoS core
76 * code and if needed call the per-device and the global notification
77 * callbacks
78 */
79static int apply_constraint(struct dev_pm_qos_request *req,
80 enum pm_qos_req_action action, int value)
81{
82 int ret, curr_value;
83
84 ret = pm_qos_update_target(req->dev->power.constraints,
85 &req->node, action, value);
86
87 if (ret) {
88 /* Call the global callbacks if needed */
89 curr_value = pm_qos_read_value(req->dev->power.constraints);
90 blocking_notifier_call_chain(&dev_pm_notifiers,
91 (unsigned long)curr_value,
92 req);
93 }
94
95 return ret;
96}
97
98/*
99 * dev_pm_qos_constraints_allocate
100 * @dev: device to allocate data for
101 *
102 * Called at the first call to add_request, for constraint data allocation
103 * Must be called with the dev_pm_qos_mtx mutex held
104 */
105static int dev_pm_qos_constraints_allocate(struct device *dev)
106{
107 struct pm_qos_constraints *c;
108 struct blocking_notifier_head *n;
109
110 c = kzalloc(sizeof(*c), GFP_KERNEL);
111 if (!c)
112 return -ENOMEM;
113
114 n = kzalloc(sizeof(*n), GFP_KERNEL);
115 if (!n) {
116 kfree(c);
117 return -ENOMEM;
118 }
119 BLOCKING_INIT_NOTIFIER_HEAD(n);
120
121 plist_head_init(&c->list);
122 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
123 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
124 c->type = PM_QOS_MIN;
125 c->notifiers = n;
126
127 spin_lock_irq(&dev->power.lock);
128 dev->power.constraints = c;
129 spin_unlock_irq(&dev->power.lock);
130
131 return 0;
132}
133
134/**
135 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
136 * @dev: target device
137 *
138 * Called from the device PM subsystem during device insertion under
139 * device_pm_lock().
140 */
141void dev_pm_qos_constraints_init(struct device *dev)
142{
143 mutex_lock(&dev_pm_qos_mtx);
144 dev->power.constraints = NULL;
145 dev->power.power_state = PMSG_ON;
146 mutex_unlock(&dev_pm_qos_mtx);
147}
148
149/**
150 * dev_pm_qos_constraints_destroy
151 * @dev: target device
152 *
153 * Called from the device PM subsystem on device removal under device_pm_lock().
154 */
155void dev_pm_qos_constraints_destroy(struct device *dev)
156{
157 struct dev_pm_qos_request *req, *tmp;
158 struct pm_qos_constraints *c;
159
160 mutex_lock(&dev_pm_qos_mtx);
161
162 dev->power.power_state = PMSG_INVALID;
163 c = dev->power.constraints;
164 if (!c)
165 goto out;
166
167 /* Flush the constraints list for the device */
168 plist_for_each_entry_safe(req, tmp, &c->list, node) {
169 /*
170 * Update constraints list and call the notification
171 * callbacks if needed
172 */
173 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
174 memset(req, 0, sizeof(*req));
175 }
176
177 spin_lock_irq(&dev->power.lock);
178 dev->power.constraints = NULL;
179 spin_unlock_irq(&dev->power.lock);
180
181 kfree(c->notifiers);
182 kfree(c);
183
184 out:
185 mutex_unlock(&dev_pm_qos_mtx);
186}
187
188/**
189 * dev_pm_qos_add_request - inserts new qos request into the list
190 * @dev: target device for the constraint
191 * @req: pointer to a preallocated handle
192 * @value: defines the qos request
193 *
194 * This function inserts a new entry in the device constraints list of
195 * requested qos performance characteristics. It recomputes the aggregate
196 * QoS expectations of parameters and initializes the dev_pm_qos_request
197 * handle. Caller needs to save this handle for later use in updates and
198 * removal.
199 *
200 * Returns 1 if the aggregated constraint value has changed,
201 * 0 if the aggregated constraint value has not changed,
202 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
203 * to allocate for data structures, -ENODEV if the device has just been removed
204 * from the system.
205 */
206int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
207 s32 value)
208{
209 int ret = 0;
210
211 if (!dev || !req) /*guard against callers passing in null */
212 return -EINVAL;
213
214 if (dev_pm_qos_request_active(req)) {
215 WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
216 "added request\n");
217 return -EINVAL;
218 }
219
220 req->dev = dev;
221
222 mutex_lock(&dev_pm_qos_mtx);
223
224 if (!dev->power.constraints) {
225 if (dev->power.power_state.event == PM_EVENT_INVALID) {
226 /* The device has been removed from the system. */
227 req->dev = NULL;
228 ret = -ENODEV;
229 goto out;
230 } else {
231 /*
232 * Allocate the constraints data on the first call to
233 * add_request, i.e. only if the data is not already
234 * allocated and if the device has not been removed.
235 */
236 ret = dev_pm_qos_constraints_allocate(dev);
237 }
238 }
239
240 if (!ret)
241 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
242
243 out:
244 mutex_unlock(&dev_pm_qos_mtx);
245
246 return ret;
247}
248EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
249
250/**
251 * dev_pm_qos_update_request - modifies an existing qos request
252 * @req : handle to list element holding a dev_pm_qos request to use
253 * @new_value: defines the qos request
254 *
255 * Updates an existing dev PM qos request along with updating the
256 * target value.
257 *
258 * Attempts are made to make this code callable on hot code paths.
259 *
260 * Returns 1 if the aggregated constraint value has changed,
261 * 0 if the aggregated constraint value has not changed,
262 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
263 * removed from the system
264 */
265int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
266 s32 new_value)
267{
268 int ret = 0;
269
270 if (!req) /*guard against callers passing in null */
271 return -EINVAL;
272
273 if (!dev_pm_qos_request_active(req)) {
274 WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
275 "unknown object\n");
276 return -EINVAL;
277 }
278
279 mutex_lock(&dev_pm_qos_mtx);
280
281 if (req->dev->power.constraints) {
282 if (new_value != req->node.prio)
283 ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
284 new_value);
285 } else {
286 /* Return if the device has been removed */
287 ret = -ENODEV;
288 }
289
290 mutex_unlock(&dev_pm_qos_mtx);
291 return ret;
292}
293EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
294
295/**
296 * dev_pm_qos_remove_request - modifies an existing qos request
297 * @req: handle to request list element
298 *
299 * Will remove pm qos request from the list of constraints and
300 * recompute the current target value. Call this on slow code paths.
301 *
302 * Returns 1 if the aggregated constraint value has changed,
303 * 0 if the aggregated constraint value has not changed,
304 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
305 * removed from the system
306 */
307int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
308{
309 int ret = 0;
310
311 if (!req) /*guard against callers passing in null */
312 return -EINVAL;
313
314 if (!dev_pm_qos_request_active(req)) {
315 WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
316 "unknown object\n");
317 return -EINVAL;
318 }
319
320 mutex_lock(&dev_pm_qos_mtx);
321
322 if (req->dev->power.constraints) {
323 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
324 PM_QOS_DEFAULT_VALUE);
325 memset(req, 0, sizeof(*req));
326 } else {
327 /* Return if the device has been removed */
328 ret = -ENODEV;
329 }
330
331 mutex_unlock(&dev_pm_qos_mtx);
332 return ret;
333}
334EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
335
336/**
337 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
338 * of per-device PM QoS constraints
339 *
340 * @dev: target device for the constraint
341 * @notifier: notifier block managed by caller.
342 *
343 * Will register the notifier into a notification chain that gets called
344 * upon changes to the target value for the device.
345 */
346int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
347{
348 int retval = 0;
349
350 mutex_lock(&dev_pm_qos_mtx);
351
352 /* Silently return if the constraints object is not present. */
353 if (dev->power.constraints)
354 retval = blocking_notifier_chain_register(
355 dev->power.constraints->notifiers,
356 notifier);
357
358 mutex_unlock(&dev_pm_qos_mtx);
359 return retval;
360}
361EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
362
363/**
364 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
365 * of per-device PM QoS constraints
366 *
367 * @dev: target device for the constraint
368 * @notifier: notifier block to be removed.
369 *
370 * Will remove the notifier from the notification chain that gets called
371 * upon changes to the target value.
372 */
373int dev_pm_qos_remove_notifier(struct device *dev,
374 struct notifier_block *notifier)
375{
376 int retval = 0;
377
378 mutex_lock(&dev_pm_qos_mtx);
379
380 /* Silently return if the constraints object is not present. */
381 if (dev->power.constraints)
382 retval = blocking_notifier_chain_unregister(
383 dev->power.constraints->notifiers,
384 notifier);
385
386 mutex_unlock(&dev_pm_qos_mtx);
387 return retval;
388}
389EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
390
391/**
392 * dev_pm_qos_add_global_notifier - sets notification entry for changes to
393 * target value of the PM QoS constraints for any device
394 *
395 * @notifier: notifier block managed by caller.
396 *
397 * Will register the notifier into a notification chain that gets called
398 * upon changes to the target value for any device.
399 */
400int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
401{
402 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
403}
404EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
405
406/**
407 * dev_pm_qos_remove_global_notifier - deletes notification for changes to
408 * target value of PM QoS constraints for any device
409 *
410 * @notifier: notifier block to be removed.
411 *
412 * Will remove the notifier from the notification chain that gets called
413 * upon changes to the target value for any device.
414 */
415int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
416{
417 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
418}
419EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index acb3f83b807..6bb3aafa85e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/pm_runtime.h> 11#include <linux/pm_runtime.h>
12#include <trace/events/rpm.h>
12#include "power.h" 13#include "power.h"
13 14
14static int rpm_resume(struct device *dev, int rpmflags); 15static int rpm_resume(struct device *dev, int rpmflags);
@@ -155,6 +156,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
155} 156}
156 157
157/** 158/**
159 * __rpm_callback - Run a given runtime PM callback for a given device.
160 * @cb: Runtime PM callback to run.
161 * @dev: Device to run the callback for.
162 */
163static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
164 __releases(&dev->power.lock) __acquires(&dev->power.lock)
165{
166 int retval;
167
168 if (dev->power.irq_safe)
169 spin_unlock(&dev->power.lock);
170 else
171 spin_unlock_irq(&dev->power.lock);
172
173 retval = cb(dev);
174
175 if (dev->power.irq_safe)
176 spin_lock(&dev->power.lock);
177 else
178 spin_lock_irq(&dev->power.lock);
179
180 return retval;
181}
182
183/**
158 * rpm_idle - Notify device bus type if the device can be suspended. 184 * rpm_idle - Notify device bus type if the device can be suspended.
159 * @dev: Device to notify the bus type about. 185 * @dev: Device to notify the bus type about.
160 * @rpmflags: Flag bits. 186 * @rpmflags: Flag bits.
@@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
171 int (*callback)(struct device *); 197 int (*callback)(struct device *);
172 int retval; 198 int retval;
173 199
200 trace_rpm_idle(dev, rpmflags);
174 retval = rpm_check_suspend_allowed(dev); 201 retval = rpm_check_suspend_allowed(dev);
175 if (retval < 0) 202 if (retval < 0)
176 ; /* Conditions are wrong. */ 203 ; /* Conditions are wrong. */
@@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
225 else 252 else
226 callback = NULL; 253 callback = NULL;
227 254
228 if (callback) { 255 if (callback)
229 if (dev->power.irq_safe) 256 __rpm_callback(callback, dev);
230 spin_unlock(&dev->power.lock);
231 else
232 spin_unlock_irq(&dev->power.lock);
233
234 callback(dev);
235
236 if (dev->power.irq_safe)
237 spin_lock(&dev->power.lock);
238 else
239 spin_lock_irq(&dev->power.lock);
240 }
241 257
242 dev->power.idle_notification = false; 258 dev->power.idle_notification = false;
243 wake_up_all(&dev->power.wait_queue); 259 wake_up_all(&dev->power.wait_queue);
244 260
245 out: 261 out:
262 trace_rpm_return_int(dev, _THIS_IP_, retval);
246 return retval; 263 return retval;
247} 264}
248 265
@@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
252 * @dev: Device to run the callback for. 269 * @dev: Device to run the callback for.
253 */ 270 */
254static int rpm_callback(int (*cb)(struct device *), struct device *dev) 271static int rpm_callback(int (*cb)(struct device *), struct device *dev)
255 __releases(&dev->power.lock) __acquires(&dev->power.lock)
256{ 272{
257 int retval; 273 int retval;
258 274
259 if (!cb) 275 if (!cb)
260 return -ENOSYS; 276 return -ENOSYS;
261 277
262 if (dev->power.irq_safe) { 278 retval = __rpm_callback(cb, dev);
263 retval = cb(dev);
264 } else {
265 spin_unlock_irq(&dev->power.lock);
266
267 retval = cb(dev);
268 279
269 spin_lock_irq(&dev->power.lock);
270 }
271 dev->power.runtime_error = retval; 280 dev->power.runtime_error = retval;
272 return retval != -EACCES ? retval : -EIO; 281 return retval != -EACCES ? retval : -EIO;
273} 282}
@@ -277,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
277 * @dev: Device to suspend. 286 * @dev: Device to suspend.
278 * @rpmflags: Flag bits. 287 * @rpmflags: Flag bits.
279 * 288 *
280 * Check if the device's runtime PM status allows it to be suspended. If 289 * Check if the device's runtime PM status allows it to be suspended.
281 * another suspend has been started earlier, either return immediately or wait 290 * Cancel a pending idle notification, autosuspend or suspend. If
282 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a 291 * another suspend has been started earlier, either return immediately
283 * pending idle notification. If the RPM_ASYNC flag is set then queue a 292 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
284 * suspend request; otherwise run the ->runtime_suspend() callback directly. 293 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
285 * If a deferred resume was requested while the callback was running then carry 294 * otherwise run the ->runtime_suspend() callback directly. When
286 * it out; otherwise send an idle notification for the device (if the suspend 295 * ->runtime_suspend succeeded, if a deferred resume was requested while
287 * failed) or for its parent (if the suspend succeeded). 296 * the callback was running then carry it out, otherwise send an idle
297 * notification for its parent (if the suspend succeeded and both
298 * ignore_children of parent->power and irq_safe of dev->power are not set).
288 * 299 *
289 * This function must be called under dev->power.lock with interrupts disabled. 300 * This function must be called under dev->power.lock with interrupts disabled.
290 */ 301 */
@@ -295,7 +306,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
295 struct device *parent = NULL; 306 struct device *parent = NULL;
296 int retval; 307 int retval;
297 308
298 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 309 trace_rpm_suspend(dev, rpmflags);
299 310
300 repeat: 311 repeat:
301 retval = rpm_check_suspend_allowed(dev); 312 retval = rpm_check_suspend_allowed(dev);
@@ -347,6 +358,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
347 goto out; 358 goto out;
348 } 359 }
349 360
361 if (dev->power.irq_safe) {
362 spin_unlock(&dev->power.lock);
363
364 cpu_relax();
365
366 spin_lock(&dev->power.lock);
367 goto repeat;
368 }
369
350 /* Wait for the other suspend running in parallel with us. */ 370 /* Wait for the other suspend running in parallel with us. */
351 for (;;) { 371 for (;;) {
352 prepare_to_wait(&dev->power.wait_queue, &wait, 372 prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -400,15 +420,16 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 dev->power.runtime_error = 0; 420 dev->power.runtime_error = 0;
401 else 421 else
402 pm_runtime_cancel_pending(dev); 422 pm_runtime_cancel_pending(dev);
403 } else { 423 wake_up_all(&dev->power.wait_queue);
424 goto out;
425 }
404 no_callback: 426 no_callback:
405 __update_runtime_status(dev, RPM_SUSPENDED); 427 __update_runtime_status(dev, RPM_SUSPENDED);
406 pm_runtime_deactivate_timer(dev); 428 pm_runtime_deactivate_timer(dev);
407 429
408 if (dev->parent) { 430 if (dev->parent) {
409 parent = dev->parent; 431 parent = dev->parent;
410 atomic_add_unless(&parent->power.child_count, -1, 0); 432 atomic_add_unless(&parent->power.child_count, -1, 0);
411 }
412 } 433 }
413 wake_up_all(&dev->power.wait_queue); 434 wake_up_all(&dev->power.wait_queue);
414 435
@@ -430,7 +451,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
430 } 451 }
431 452
432 out: 453 out:
433 dev_dbg(dev, "%s returns %d\n", __func__, retval); 454 trace_rpm_return_int(dev, _THIS_IP_, retval);
434 455
435 return retval; 456 return retval;
436} 457}
@@ -459,7 +480,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
459 struct device *parent = NULL; 480 struct device *parent = NULL;
460 int retval = 0; 481 int retval = 0;
461 482
462 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 483 trace_rpm_resume(dev, rpmflags);
463 484
464 repeat: 485 repeat:
465 if (dev->power.runtime_error) 486 if (dev->power.runtime_error)
@@ -496,6 +517,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
496 goto out; 517 goto out;
497 } 518 }
498 519
520 if (dev->power.irq_safe) {
521 spin_unlock(&dev->power.lock);
522
523 cpu_relax();
524
525 spin_lock(&dev->power.lock);
526 goto repeat;
527 }
528
499 /* Wait for the operation carried out in parallel with us. */ 529 /* Wait for the operation carried out in parallel with us. */
500 for (;;) { 530 for (;;) {
501 prepare_to_wait(&dev->power.wait_queue, &wait, 531 prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -615,7 +645,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
615 spin_lock_irq(&dev->power.lock); 645 spin_lock_irq(&dev->power.lock);
616 } 646 }
617 647
618 dev_dbg(dev, "%s returns %d\n", __func__, retval); 648 trace_rpm_return_int(dev, _THIS_IP_, retval);
619 649
620 return retval; 650 return retval;
621} 651}
@@ -732,13 +762,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
732 * return immediately if it is larger than zero. Then carry out an idle 762 * return immediately if it is larger than zero. Then carry out an idle
733 * notification, either synchronous or asynchronous. 763 * notification, either synchronous or asynchronous.
734 * 764 *
735 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 765 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
766 * or if pm_runtime_irq_safe() has been called.
736 */ 767 */
737int __pm_runtime_idle(struct device *dev, int rpmflags) 768int __pm_runtime_idle(struct device *dev, int rpmflags)
738{ 769{
739 unsigned long flags; 770 unsigned long flags;
740 int retval; 771 int retval;
741 772
773 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
774
742 if (rpmflags & RPM_GET_PUT) { 775 if (rpmflags & RPM_GET_PUT) {
743 if (!atomic_dec_and_test(&dev->power.usage_count)) 776 if (!atomic_dec_and_test(&dev->power.usage_count))
744 return 0; 777 return 0;
@@ -761,13 +794,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
761 * return immediately if it is larger than zero. Then carry out a suspend, 794 * return immediately if it is larger than zero. Then carry out a suspend,
762 * either synchronous or asynchronous. 795 * either synchronous or asynchronous.
763 * 796 *
764 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 797 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
798 * or if pm_runtime_irq_safe() has been called.
765 */ 799 */
766int __pm_runtime_suspend(struct device *dev, int rpmflags) 800int __pm_runtime_suspend(struct device *dev, int rpmflags)
767{ 801{
768 unsigned long flags; 802 unsigned long flags;
769 int retval; 803 int retval;
770 804
805 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
806
771 if (rpmflags & RPM_GET_PUT) { 807 if (rpmflags & RPM_GET_PUT) {
772 if (!atomic_dec_and_test(&dev->power.usage_count)) 808 if (!atomic_dec_and_test(&dev->power.usage_count))
773 return 0; 809 return 0;
@@ -789,13 +825,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
789 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 825 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
790 * carry out a resume, either synchronous or asynchronous. 826 * carry out a resume, either synchronous or asynchronous.
791 * 827 *
792 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 828 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
829 * or if pm_runtime_irq_safe() has been called.
793 */ 830 */
794int __pm_runtime_resume(struct device *dev, int rpmflags) 831int __pm_runtime_resume(struct device *dev, int rpmflags)
795{ 832{
796 unsigned long flags; 833 unsigned long flags;
797 int retval; 834 int retval;
798 835
836 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
837
799 if (rpmflags & RPM_GET_PUT) 838 if (rpmflags & RPM_GET_PUT)
800 atomic_inc(&dev->power.usage_count); 839 atomic_inc(&dev->power.usage_count);
801 840
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 84f7c7d5a09..14ee07e9cc4 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
276 * 276 *
277 * By default, most devices should leave wakeup disabled. The exceptions are 277 * By default, most devices should leave wakeup disabled. The exceptions are
278 * devices that everyone expects to be wakeup sources: keyboards, power buttons, 278 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
279 * possibly network interfaces, etc. 279 * possibly network interfaces, etc. Also, devices that don't generate their
280 * own wakeup requests but merely forward requests from one bus to another
281 * (like PCI bridges) should have wakeup enabled by default.
280 */ 282 */
281int device_init_wakeup(struct device *dev, bool enable) 283int device_init_wakeup(struct device *dev, bool enable)
282{ 284{
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 675246a6f7e..f9b726091ad 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1118,7 +1118,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
1118 return 0; 1118 return 0;
1119 1119
1120 spin_lock_irq(&data->txlock); 1120 spin_lock_irq(&data->txlock);
1121 if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { 1121 if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
1122 set_bit(BTUSB_SUSPENDING, &data->flags); 1122 set_bit(BTUSB_SUSPENDING, &data->flags);
1123 spin_unlock_irq(&data->txlock); 1123 spin_unlock_irq(&data->txlock);
1124 } else { 1124 } else {
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d4c54237288..0df01411009 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -12,7 +12,7 @@
12#include <linux/mutex.h> 12#include <linux/mutex.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/notifier.h> 14#include <linux/notifier.h>
15#include <linux/pm_qos_params.h> 15#include <linux/pm_qos.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpuidle.h> 17#include <linux/cpuidle.h>
18#include <linux/ktime.h> 18#include <linux/ktime.h>
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 12c98900dcf..f62fde21e96 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -14,7 +14,7 @@
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/cpuidle.h> 16#include <linux/cpuidle.h>
17#include <linux/pm_qos_params.h> 17#include <linux/pm_qos.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20 20
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c47f3d09c1e..3600f1955e4 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/cpuidle.h> 14#include <linux/cpuidle.h>
15#include <linux/pm_qos_params.h> 15#include <linux/pm_qos.h>
16#include <linux/time.h> 16#include <linux/time.h>
17#include <linux/ktime.h> 17#include <linux/ktime.h>
18#include <linux/hrtimer.h> 18#include <linux/hrtimer.h>
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
new file mode 100644
index 00000000000..643b055ed3c
--- /dev/null
+++ b/drivers/devfreq/Kconfig
@@ -0,0 +1,75 @@
1config ARCH_HAS_DEVFREQ
2 bool
3 depends on ARCH_HAS_OPP
4 help
5 Denotes that the architecture supports DEVFREQ. If the architecture
6 supports multiple OPP entries per device and the frequency of the
7 devices with OPPs may be altered dynamically, the architecture
8 supports DEVFREQ.
9
10menuconfig PM_DEVFREQ
11 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
12 depends on PM_OPP && ARCH_HAS_DEVFREQ
13 help
14 With OPP support, a device may have a list of frequencies and
15 voltages available. DEVFREQ, a generic DVFS framework can be
16 registered for a device with OPP support in order to let the
17 governor provided to DEVFREQ choose an operating frequency
18 based on the OPP's list and the policy given with DEVFREQ.
19
20 Each device may have its own governor and policy. DEVFREQ can
21 reevaluate the device state periodically and/or based on the
22 OPP list changes (each frequency/voltage pair in OPP may be
23 disabled or enabled).
24
25 Like some CPUs with CPUFREQ, a device may have multiple clocks.
26 However, because the clock frequencies of a single device are
27 determined by the single device's state, an instance of DEVFREQ
28 is attached to a single device and returns a "representative"
29 clock frequency from the OPP of the device, which is also attached
30 to a device by 1-to-1. The device registering DEVFREQ takes the
31 responsiblity to "interpret" the frequency listed in OPP and
32 to set its every clock accordingly with the "target" callback
33 given to DEVFREQ.
34
35if PM_DEVFREQ
36
37comment "DEVFREQ Governors"
38
39config DEVFREQ_GOV_SIMPLE_ONDEMAND
40 bool "Simple Ondemand"
41 help
42 Chooses frequency based on the recent load on the device. Works
43 similar as ONDEMAND governor of CPUFREQ does. A device with
44 Simple-Ondemand should be able to provide busy/total counter
45 values that imply the usage rate. A device may provide tuned
46 values to the governor with data field at devfreq_add_device().
47
48config DEVFREQ_GOV_PERFORMANCE
49 bool "Performance"
50 help
51 Sets the frequency at the maximum available frequency.
52 This governor always returns UINT_MAX as frequency so that
53 the DEVFREQ framework returns the highest frequency available
54 at any time.
55
56config DEVFREQ_GOV_POWERSAVE
57 bool "Powersave"
58 help
59 Sets the frequency at the minimum available frequency.
60 This governor always returns 0 as frequency so that
61 the DEVFREQ framework returns the lowest frequency available
62 at any time.
63
64config DEVFREQ_GOV_USERSPACE
65 bool "Userspace"
66 help
67 Sets the frequency at the user specified one.
68 This governor returns the user configured frequency if there
69 has been an input to /sys/devices/.../power/devfreq_set_freq.
70 Otherwise, the governor does not change the frequnecy
71 given at the initialization.
72
73comment "DEVFREQ Drivers"
74
75endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
new file mode 100644
index 00000000000..4564a89e970
--- /dev/null
+++ b/drivers/devfreq/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
2obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
5obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
new file mode 100644
index 00000000000..5d15b812377
--- /dev/null
+++ b/drivers/devfreq/devfreq.c
@@ -0,0 +1,601 @@
1/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/opp.h>
20#include <linux/devfreq.h>
21#include <linux/workqueue.h>
22#include <linux/platform_device.h>
23#include <linux/list.h>
24#include <linux/printk.h>
25#include <linux/hrtimer.h>
26#include "governor.h"
27
28struct class *devfreq_class;
29
30/*
31 * devfreq_work periodically monitors every registered device.
32 * The minimum polling interval is one jiffy. The polling interval is
33 * determined by the minimum polling period among all polling devfreq
34 * devices. The resolution of polling interval is one jiffy.
35 */
36static bool polling;
37static struct workqueue_struct *devfreq_wq;
38static struct delayed_work devfreq_work;
39
40/* wait removing if this is to be removed */
41static struct devfreq *wait_remove_device;
42
43/* The list of all device-devfreq */
44static LIST_HEAD(devfreq_list);
45static DEFINE_MUTEX(devfreq_list_lock);
46
47/**
48 * find_device_devfreq() - find devfreq struct using device pointer
49 * @dev: device pointer used to lookup device devfreq.
50 *
51 * Search the list of device devfreqs and return the matched device's
52 * devfreq info. devfreq_list_lock should be held by the caller.
53 */
54static struct devfreq *find_device_devfreq(struct device *dev)
55{
56 struct devfreq *tmp_devfreq;
57
58 if (unlikely(IS_ERR_OR_NULL(dev))) {
59 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
60 return ERR_PTR(-EINVAL);
61 }
62 WARN(!mutex_is_locked(&devfreq_list_lock),
63 "devfreq_list_lock must be locked.");
64
65 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
66 if (tmp_devfreq->dev.parent == dev)
67 return tmp_devfreq;
68 }
69
70 return ERR_PTR(-ENODEV);
71}
72
73/**
74 * update_devfreq() - Reevaluate the device and configure frequency.
75 * @devfreq: the devfreq instance.
76 *
77 * Note: Lock devfreq->lock before calling update_devfreq
78 * This function is exported for governors.
79 */
80int update_devfreq(struct devfreq *devfreq)
81{
82 unsigned long freq;
83 int err = 0;
84
85 if (!mutex_is_locked(&devfreq->lock)) {
86 WARN(true, "devfreq->lock must be locked by the caller.\n");
87 return -EINVAL;
88 }
89
90 /* Reevaluate the proper frequency */
91 err = devfreq->governor->get_target_freq(devfreq, &freq);
92 if (err)
93 return err;
94
95 err = devfreq->profile->target(devfreq->dev.parent, &freq);
96 if (err)
97 return err;
98
99 devfreq->previous_freq = freq;
100 return err;
101}
102
103/**
104 * devfreq_notifier_call() - Notify that the device frequency requirements
105 * has been changed out of devfreq framework.
106 * @nb the notifier_block (supposed to be devfreq->nb)
107 * @type not used
108 * @devp not used
109 *
110 * Called by a notifier that uses devfreq->nb.
111 */
112static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
113 void *devp)
114{
115 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
116 int ret;
117
118 mutex_lock(&devfreq->lock);
119 ret = update_devfreq(devfreq);
120 mutex_unlock(&devfreq->lock);
121
122 return ret;
123}
124
125/**
126 * _remove_devfreq() - Remove devfreq from the device.
127 * @devfreq: the devfreq struct
128 * @skip: skip calling device_unregister().
129 *
130 * Note that the caller should lock devfreq->lock before calling
131 * this. _remove_devfreq() will unlock it and free devfreq
132 * internally. devfreq_list_lock should be locked by the caller
133 * as well (not relased at return)
134 *
135 * Lock usage:
136 * devfreq->lock: locked before call.
137 * unlocked at return (and freed)
138 * devfreq_list_lock: locked before call.
139 * kept locked at return.
140 * if devfreq is centrally polled.
141 *
142 * Freed memory:
143 * devfreq
144 */
145static void _remove_devfreq(struct devfreq *devfreq, bool skip)
146{
147 if (!mutex_is_locked(&devfreq->lock)) {
148 WARN(true, "devfreq->lock must be locked by the caller.\n");
149 return;
150 }
151 if (!devfreq->governor->no_central_polling &&
152 !mutex_is_locked(&devfreq_list_lock)) {
153 WARN(true, "devfreq_list_lock must be locked by the caller.\n");
154 return;
155 }
156
157 if (devfreq->being_removed)
158 return;
159
160 devfreq->being_removed = true;
161
162 if (devfreq->profile->exit)
163 devfreq->profile->exit(devfreq->dev.parent);
164
165 if (devfreq->governor->exit)
166 devfreq->governor->exit(devfreq);
167
168 if (!skip && get_device(&devfreq->dev)) {
169 device_unregister(&devfreq->dev);
170 put_device(&devfreq->dev);
171 }
172
173 if (!devfreq->governor->no_central_polling)
174 list_del(&devfreq->node);
175
176 mutex_unlock(&devfreq->lock);
177 mutex_destroy(&devfreq->lock);
178
179 kfree(devfreq);
180}
181
182/**
183 * devfreq_dev_release() - Callback for struct device to release the device.
184 * @dev: the devfreq device
185 *
186 * This calls _remove_devfreq() if _remove_devfreq() is not called.
187 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
188 * well as by others unregistering the device.
189 */
190static void devfreq_dev_release(struct device *dev)
191{
192 struct devfreq *devfreq = to_devfreq(dev);
193 bool central_polling = !devfreq->governor->no_central_polling;
194
195 /*
196 * If devfreq_dev_release() was called by device_unregister() of
197 * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
198 * being_removed is already set. This also partially checks the case
199 * where devfreq_dev_release() is called from a thread other than
200 * the one called _remove_devfreq(); however, this case is
201 * dealt completely with another following being_removed check.
202 *
203 * Because being_removed is never being
204 * unset, we do not need to worry about race conditions on
205 * being_removed.
206 */
207 if (devfreq->being_removed)
208 return;
209
210 if (central_polling)
211 mutex_lock(&devfreq_list_lock);
212
213 mutex_lock(&devfreq->lock);
214
215 /*
216 * Check being_removed flag again for the case where
217 * devfreq_dev_release() was called in a thread other than the one
218 * possibly called _remove_devfreq().
219 */
220 if (devfreq->being_removed) {
221 mutex_unlock(&devfreq->lock);
222 goto out;
223 }
224
225 /* devfreq->lock is unlocked and removed in _removed_devfreq() */
226 _remove_devfreq(devfreq, true);
227
228out:
229 if (central_polling)
230 mutex_unlock(&devfreq_list_lock);
231}
232
233/**
234 * devfreq_monitor() - Periodically poll devfreq objects.
235 * @work: the work struct used to run devfreq_monitor periodically.
236 *
237 */
238static void devfreq_monitor(struct work_struct *work)
239{
240 static unsigned long last_polled_at;
241 struct devfreq *devfreq, *tmp;
242 int error;
243 unsigned long jiffies_passed;
244 unsigned long next_jiffies = ULONG_MAX, now = jiffies;
245 struct device *dev;
246
247 /* Initially last_polled_at = 0, polling every device at bootup */
248 jiffies_passed = now - last_polled_at;
249 last_polled_at = now;
250 if (jiffies_passed == 0)
251 jiffies_passed = 1;
252
253 mutex_lock(&devfreq_list_lock);
254 list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
255 mutex_lock(&devfreq->lock);
256 dev = devfreq->dev.parent;
257
258 /* Do not remove tmp for a while */
259 wait_remove_device = tmp;
260
261 if (devfreq->governor->no_central_polling ||
262 devfreq->next_polling == 0) {
263 mutex_unlock(&devfreq->lock);
264 continue;
265 }
266 mutex_unlock(&devfreq_list_lock);
267
268 /*
269 * Reduce more next_polling if devfreq_wq took an extra
270 * delay. (i.e., CPU has been idled.)
271 */
272 if (devfreq->next_polling <= jiffies_passed) {
273 error = update_devfreq(devfreq);
274
275 /* Remove a devfreq with an error. */
276 if (error && error != -EAGAIN) {
277
278 dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
279 error, devfreq->governor->name);
280
281 /*
282 * Unlock devfreq before locking the list
283 * in order to avoid deadlock with
284 * find_device_devfreq or others
285 */
286 mutex_unlock(&devfreq->lock);
287 mutex_lock(&devfreq_list_lock);
288 /* Check if devfreq is already removed */
289 if (IS_ERR(find_device_devfreq(dev)))
290 continue;
291 mutex_lock(&devfreq->lock);
292 /* This unlocks devfreq->lock and free it */
293 _remove_devfreq(devfreq, false);
294 continue;
295 }
296 devfreq->next_polling = devfreq->polling_jiffies;
297 } else {
298 devfreq->next_polling -= jiffies_passed;
299 }
300
301 if (devfreq->next_polling)
302 next_jiffies = (next_jiffies > devfreq->next_polling) ?
303 devfreq->next_polling : next_jiffies;
304
305 mutex_unlock(&devfreq->lock);
306 mutex_lock(&devfreq_list_lock);
307 }
308 wait_remove_device = NULL;
309 mutex_unlock(&devfreq_list_lock);
310
311 if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
312 polling = true;
313 queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
314 } else {
315 polling = false;
316 }
317}
318
319/**
320 * devfreq_add_device() - Add devfreq feature to the device
321 * @dev: the device to add devfreq feature.
322 * @profile: device-specific profile to run devfreq.
323 * @governor: the policy to choose frequency.
324 * @data: private data for the governor. The devfreq framework does not
325 * touch this value.
326 */
327struct devfreq *devfreq_add_device(struct device *dev,
328 struct devfreq_dev_profile *profile,
329 const struct devfreq_governor *governor,
330 void *data)
331{
332 struct devfreq *devfreq;
333 int err = 0;
334
335 if (!dev || !profile || !governor) {
336 dev_err(dev, "%s: Invalid parameters.\n", __func__);
337 return ERR_PTR(-EINVAL);
338 }
339
340
341 if (!governor->no_central_polling) {
342 mutex_lock(&devfreq_list_lock);
343 devfreq = find_device_devfreq(dev);
344 mutex_unlock(&devfreq_list_lock);
345 if (!IS_ERR(devfreq)) {
346 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
347 err = -EINVAL;
348 goto out;
349 }
350 }
351
352 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
353 if (!devfreq) {
354 dev_err(dev, "%s: Unable to create devfreq for the device\n",
355 __func__);
356 err = -ENOMEM;
357 goto out;
358 }
359
360 mutex_init(&devfreq->lock);
361 mutex_lock(&devfreq->lock);
362 devfreq->dev.parent = dev;
363 devfreq->dev.class = devfreq_class;
364 devfreq->dev.release = devfreq_dev_release;
365 devfreq->profile = profile;
366 devfreq->governor = governor;
367 devfreq->previous_freq = profile->initial_freq;
368 devfreq->data = data;
369 devfreq->next_polling = devfreq->polling_jiffies
370 = msecs_to_jiffies(devfreq->profile->polling_ms);
371 devfreq->nb.notifier_call = devfreq_notifier_call;
372
373 dev_set_name(&devfreq->dev, dev_name(dev));
374 err = device_register(&devfreq->dev);
375 if (err) {
376 put_device(&devfreq->dev);
377 goto err_dev;
378 }
379
380 if (governor->init)
381 err = governor->init(devfreq);
382 if (err)
383 goto err_init;
384
385 mutex_unlock(&devfreq->lock);
386
387 if (governor->no_central_polling)
388 goto out;
389
390 mutex_lock(&devfreq_list_lock);
391
392 list_add(&devfreq->node, &devfreq_list);
393
394 if (devfreq_wq && devfreq->next_polling && !polling) {
395 polling = true;
396 queue_delayed_work(devfreq_wq, &devfreq_work,
397 devfreq->next_polling);
398 }
399 mutex_unlock(&devfreq_list_lock);
400 goto out;
401err_init:
402 device_unregister(&devfreq->dev);
403err_dev:
404 mutex_unlock(&devfreq->lock);
405 kfree(devfreq);
406out:
407 if (err)
408 return ERR_PTR(err);
409 else
410 return devfreq;
411}
412
413/**
414 * devfreq_remove_device() - Remove devfreq feature from a device.
415 * @devfreq the devfreq instance to be removed
416 */
417int devfreq_remove_device(struct devfreq *devfreq)
418{
419 if (!devfreq)
420 return -EINVAL;
421
422 if (!devfreq->governor->no_central_polling) {
423 mutex_lock(&devfreq_list_lock);
424 while (wait_remove_device == devfreq) {
425 mutex_unlock(&devfreq_list_lock);
426 schedule();
427 mutex_lock(&devfreq_list_lock);
428 }
429 }
430
431 mutex_lock(&devfreq->lock);
432 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
433
434 if (!devfreq->governor->no_central_polling)
435 mutex_unlock(&devfreq_list_lock);
436
437 return 0;
438}
439
440static ssize_t show_governor(struct device *dev,
441 struct device_attribute *attr, char *buf)
442{
443 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
444}
445
446static ssize_t show_freq(struct device *dev,
447 struct device_attribute *attr, char *buf)
448{
449 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
450}
451
452static ssize_t show_polling_interval(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
456}
457
458static ssize_t store_polling_interval(struct device *dev,
459 struct device_attribute *attr,
460 const char *buf, size_t count)
461{
462 struct devfreq *df = to_devfreq(dev);
463 unsigned int value;
464 int ret;
465
466 ret = sscanf(buf, "%u", &value);
467 if (ret != 1)
468 goto out;
469
470 mutex_lock(&df->lock);
471 df->profile->polling_ms = value;
472 df->next_polling = df->polling_jiffies
473 = msecs_to_jiffies(value);
474 mutex_unlock(&df->lock);
475
476 ret = count;
477
478 if (df->governor->no_central_polling)
479 goto out;
480
481 mutex_lock(&devfreq_list_lock);
482 if (df->next_polling > 0 && !polling) {
483 polling = true;
484 queue_delayed_work(devfreq_wq, &devfreq_work,
485 df->next_polling);
486 }
487 mutex_unlock(&devfreq_list_lock);
488out:
489 return ret;
490}
491
492static ssize_t show_central_polling(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 return sprintf(buf, "%d\n",
496 !to_devfreq(dev)->governor->no_central_polling);
497}
498
499static struct device_attribute devfreq_attrs[] = {
500 __ATTR(governor, S_IRUGO, show_governor, NULL),
501 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
502 __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
503 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
504 store_polling_interval),
505 { },
506};
507
508/**
509 * devfreq_start_polling() - Initialize data structure for devfreq framework and
510 * start polling registered devfreq devices.
511 */
512static int __init devfreq_start_polling(void)
513{
514 mutex_lock(&devfreq_list_lock);
515 polling = false;
516 devfreq_wq = create_freezable_workqueue("devfreq_wq");
517 INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
518 mutex_unlock(&devfreq_list_lock);
519
520 devfreq_monitor(&devfreq_work.work);
521 return 0;
522}
523late_initcall(devfreq_start_polling);
524
525static int __init devfreq_init(void)
526{
527 devfreq_class = class_create(THIS_MODULE, "devfreq");
528 if (IS_ERR(devfreq_class)) {
529 pr_err("%s: couldn't create class\n", __FILE__);
530 return PTR_ERR(devfreq_class);
531 }
532 devfreq_class->dev_attrs = devfreq_attrs;
533 return 0;
534}
535subsys_initcall(devfreq_init);
536
537static void __exit devfreq_exit(void)
538{
539 class_destroy(devfreq_class);
540}
541module_exit(devfreq_exit);
542
543/*
544 * The followings are helper functions for devfreq user device drivers with
545 * OPP framework.
546 */
547
548/**
549 * devfreq_recommended_opp() - Helper function to get proper OPP for the
550 * freq value given to target callback.
551 * @dev The devfreq user device. (parent of devfreq)
552 * @freq The frequency given to target function
553 *
554 */
555struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq)
556{
557 struct opp *opp = opp_find_freq_ceil(dev, freq);
558
559 if (opp == ERR_PTR(-ENODEV))
560 opp = opp_find_freq_floor(dev, freq);
561 return opp;
562}
563
564/**
565 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
566 * for any changes in the OPP availability
567 * changes
568 * @dev The devfreq user device. (parent of devfreq)
569 * @devfreq The devfreq object.
570 */
571int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
572{
573 struct srcu_notifier_head *nh = opp_get_notifier(dev);
574
575 if (IS_ERR(nh))
576 return PTR_ERR(nh);
577 return srcu_notifier_chain_register(nh, &devfreq->nb);
578}
579
580/**
581 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
582 * notified for any changes in the OPP
583 * availability changes anymore.
584 * @dev The devfreq user device. (parent of devfreq)
585 * @devfreq The devfreq object.
586 *
587 * At exit() callback of devfreq_dev_profile, this must be included if
588 * devfreq_recommended_opp is used.
589 */
590int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
591{
592 struct srcu_notifier_head *nh = opp_get_notifier(dev);
593
594 if (IS_ERR(nh))
595 return PTR_ERR(nh);
596 return srcu_notifier_chain_unregister(nh, &devfreq->nb);
597}
598
599MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
600MODULE_DESCRIPTION("devfreq class support");
601MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
new file mode 100644
index 00000000000..ea7f13c58de
--- /dev/null
+++ b/drivers/devfreq/governor.h
@@ -0,0 +1,24 @@
1/*
2 * governor.h - internal header for devfreq governors.
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This header is for devfreq governors in drivers/devfreq/
12 */
13
14#ifndef _GOVERNOR_H
15#define _GOVERNOR_H
16
17#include <linux/devfreq.h>
18
19#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
20
21/* Caution: devfreq->lock must be locked before calling update_devfreq */
22extern int update_devfreq(struct devfreq *devfreq);
23
24#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
new file mode 100644
index 00000000000..c0596b29176
--- /dev/null
+++ b/drivers/devfreq/governor_performance.c
@@ -0,0 +1,29 @@
1/*
2 * linux/drivers/devfreq/governor_performance.c
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/devfreq.h>
13
14static int devfreq_performance_func(struct devfreq *df,
15 unsigned long *freq)
16{
17 /*
18 * target callback should be able to get floor value as
19 * said in devfreq.h
20 */
21 *freq = UINT_MAX;
22 return 0;
23}
24
25const struct devfreq_governor devfreq_performance = {
26 .name = "performance",
27 .get_target_freq = devfreq_performance_func,
28 .no_central_polling = true,
29};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
new file mode 100644
index 00000000000..2483a85a266
--- /dev/null
+++ b/drivers/devfreq/governor_powersave.c
@@ -0,0 +1,29 @@
1/*
2 * linux/drivers/devfreq/governor_powersave.c
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/devfreq.h>
13
14static int devfreq_powersave_func(struct devfreq *df,
15 unsigned long *freq)
16{
17 /*
18 * target callback should be able to get ceiling value as
19 * said in devfreq.h
20 */
21 *freq = 0;
22 return 0;
23}
24
25const struct devfreq_governor devfreq_powersave = {
26 .name = "powersave",
27 .get_target_freq = devfreq_powersave_func,
28 .no_central_polling = true,
29};
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
new file mode 100644
index 00000000000..efad8dcf902
--- /dev/null
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -0,0 +1,88 @@
1/*
2 * linux/drivers/devfreq/governor_simpleondemand.c
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/errno.h>
13#include <linux/devfreq.h>
14#include <linux/math64.h>
15
16/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
17#define DFSO_UPTHRESHOLD (90)
18#define DFSO_DOWNDIFFERENCTIAL (5)
19static int devfreq_simple_ondemand_func(struct devfreq *df,
20 unsigned long *freq)
21{
22 struct devfreq_dev_status stat;
23 int err = df->profile->get_dev_status(df->dev.parent, &stat);
24 unsigned long long a, b;
25 unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
26 unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
27 struct devfreq_simple_ondemand_data *data = df->data;
28
29 if (err)
30 return err;
31
32 if (data) {
33 if (data->upthreshold)
34 dfso_upthreshold = data->upthreshold;
35 if (data->downdifferential)
36 dfso_downdifferential = data->downdifferential;
37 }
38 if (dfso_upthreshold > 100 ||
39 dfso_upthreshold < dfso_downdifferential)
40 return -EINVAL;
41
42 /* Assume MAX if it is going to be divided by zero */
43 if (stat.total_time == 0) {
44 *freq = UINT_MAX;
45 return 0;
46 }
47
48 /* Prevent overflow */
49 if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
50 stat.busy_time >>= 7;
51 stat.total_time >>= 7;
52 }
53
54 /* Set MAX if it's busy enough */
55 if (stat.busy_time * 100 >
56 stat.total_time * dfso_upthreshold) {
57 *freq = UINT_MAX;
58 return 0;
59 }
60
61 /* Set MAX if we do not know the initial frequency */
62 if (stat.current_frequency == 0) {
63 *freq = UINT_MAX;
64 return 0;
65 }
66
67 /* Keep the current frequency */
68 if (stat.busy_time * 100 >
69 stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
70 *freq = stat.current_frequency;
71 return 0;
72 }
73
74 /* Set the desired frequency based on the load */
75 a = stat.busy_time;
76 a *= stat.current_frequency;
77 b = div_u64(a, stat.total_time);
78 b *= 100;
79 b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
80 *freq = (unsigned long) b;
81
82 return 0;
83}
84
85const struct devfreq_governor devfreq_simple_ondemand = {
86 .name = "simple_ondemand",
87 .get_target_freq = devfreq_simple_ondemand_func,
88};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
new file mode 100644
index 00000000000..4f8b563da78
--- /dev/null
+++ b/drivers/devfreq/governor_userspace.c
@@ -0,0 +1,116 @@
1/*
2 * linux/drivers/devfreq/governor_simpleondemand.c
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/slab.h>
13#include <linux/device.h>
14#include <linux/devfreq.h>
15#include <linux/pm.h>
16#include <linux/mutex.h>
17#include "governor.h"
18
19struct userspace_data {
20 unsigned long user_frequency;
21 bool valid;
22};
23
24static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
25{
26 struct userspace_data *data = df->data;
27
28 if (!data->valid)
29 *freq = df->previous_freq; /* No user freq specified yet */
30 else
31 *freq = data->user_frequency;
32 return 0;
33}
34
35static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
36 const char *buf, size_t count)
37{
38 struct devfreq *devfreq = to_devfreq(dev);
39 struct userspace_data *data;
40 unsigned long wanted;
41 int err = 0;
42
43
44 mutex_lock(&devfreq->lock);
45 data = devfreq->data;
46
47 sscanf(buf, "%lu", &wanted);
48 data->user_frequency = wanted;
49 data->valid = true;
50 err = update_devfreq(devfreq);
51 if (err == 0)
52 err = count;
53 mutex_unlock(&devfreq->lock);
54 return err;
55}
56
57static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
58 char *buf)
59{
60 struct devfreq *devfreq = to_devfreq(dev);
61 struct userspace_data *data;
62 int err = 0;
63
64 mutex_lock(&devfreq->lock);
65 data = devfreq->data;
66
67 if (data->valid)
68 err = sprintf(buf, "%lu\n", data->user_frequency);
69 else
70 err = sprintf(buf, "undefined\n");
71 mutex_unlock(&devfreq->lock);
72 return err;
73}
74
75static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
76static struct attribute *dev_entries[] = {
77 &dev_attr_set_freq.attr,
78 NULL,
79};
80static struct attribute_group dev_attr_group = {
81 .name = "userspace",
82 .attrs = dev_entries,
83};
84
85static int userspace_init(struct devfreq *devfreq)
86{
87 int err = 0;
88 struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
89 GFP_KERNEL);
90
91 if (!data) {
92 err = -ENOMEM;
93 goto out;
94 }
95 data->valid = false;
96 devfreq->data = data;
97
98 err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
99out:
100 return err;
101}
102
103static void userspace_exit(struct devfreq *devfreq)
104{
105 sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
106 kfree(devfreq->data);
107 devfreq->data = NULL;
108}
109
110const struct devfreq_governor devfreq_userspace = {
111 .name = "userspace",
112 .get_target_freq = devfreq_userspace_func,
113 .init = userspace_init,
114 .exit = userspace_exit,
115 .no_central_polling = true,
116};
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 9d8710f8bc7..1782693819f 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -2409,7 +2409,7 @@ static int picolcd_raw_event(struct hid_device *hdev,
2409#ifdef CONFIG_PM 2409#ifdef CONFIG_PM
2410static int picolcd_suspend(struct hid_device *hdev, pm_message_t message) 2410static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
2411{ 2411{
2412 if (message.event & PM_EVENT_AUTO) 2412 if (PMSG_IS_AUTO(message))
2413 return 0; 2413 return 0;
2414 2414
2415 picolcd_suspend_backlight(hid_get_drvdata(hdev)); 2415 picolcd_suspend_backlight(hid_get_drvdata(hdev));
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 77e705c2209..b403fcef0b8 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1332,7 +1332,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1332 struct usbhid_device *usbhid = hid->driver_data; 1332 struct usbhid_device *usbhid = hid->driver_data;
1333 int status; 1333 int status;
1334 1334
1335 if (message.event & PM_EVENT_AUTO) { 1335 if (PMSG_IS_AUTO(message)) {
1336 spin_lock_irq(&usbhid->lock); /* Sync with error handler */ 1336 spin_lock_irq(&usbhid->lock); /* Sync with error handler */
1337 if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) 1337 if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
1338 && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) 1338 && !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
@@ -1367,7 +1367,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1367 return -EIO; 1367 return -EIO;
1368 } 1368 }
1369 1369
1370 if (!ignoreled && (message.event & PM_EVENT_AUTO)) { 1370 if (!ignoreled && PMSG_IS_AUTO(message)) {
1371 spin_lock_irq(&usbhid->lock); 1371 spin_lock_irq(&usbhid->lock);
1372 if (test_bit(HID_LED_ON, &usbhid->iofl)) { 1372 if (test_bit(HID_LED_ON, &usbhid->iofl)) {
1373 spin_unlock_irq(&usbhid->lock); 1373 spin_unlock_irq(&usbhid->lock);
@@ -1380,8 +1380,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1380 hid_cancel_delayed_stuff(usbhid); 1380 hid_cancel_delayed_stuff(usbhid);
1381 hid_cease_io(usbhid); 1381 hid_cease_io(usbhid);
1382 1382
1383 if ((message.event & PM_EVENT_AUTO) && 1383 if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
1384 test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
1385 /* lost race against keypresses */ 1384 /* lost race against keypresses */
1386 status = hid_start_in(hid); 1385 status = hid_start_in(hid);
1387 if (status < 0) 1386 if (status < 0)
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index bb7f17f2a33..cbf13d09b4a 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -21,7 +21,7 @@
21#include <media/videobuf-dma-sg.h> 21#include <media/videobuf-dma-sg.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/pm_qos_params.h> 24#include <linux/pm_qos.h>
25#include <linux/via-core.h> 25#include <linux/via-core.h>
26#include <linux/via-gpio.h> 26#include <linux/via-gpio.h>
27#include <linux/via_i2c.h> 27#include <linux/via_i2c.h>
@@ -69,7 +69,7 @@ struct via_camera {
69 struct mutex lock; 69 struct mutex lock;
70 enum viacam_opstate opstate; 70 enum viacam_opstate opstate;
71 unsigned long flags; 71 unsigned long flags;
72 struct pm_qos_request_list qos_request; 72 struct pm_qos_request qos_request;
73 /* 73 /*
74 * GPIO info for power/reset management 74 * GPIO info for power/reset management
75 */ 75 */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 680312710a7..a855db1ad24 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -47,7 +47,7 @@
47#include <linux/if_vlan.h> 47#include <linux/if_vlan.h>
48#include <linux/cpu.h> 48#include <linux/cpu.h>
49#include <linux/smp.h> 49#include <linux/smp.h>
50#include <linux/pm_qos_params.h> 50#include <linux/pm_qos.h>
51#include <linux/pm_runtime.h> 51#include <linux/pm_runtime.h>
52#include <linux/aer.h> 52#include <linux/aer.h>
53#include <linux/prefetch.h> 53#include <linux/prefetch.h>
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index cdb958875ba..7d6082160bc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1476,7 +1476,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1476 if (!dev->suspend_count++) { 1476 if (!dev->suspend_count++) {
1477 spin_lock_irq(&dev->txq.lock); 1477 spin_lock_irq(&dev->txq.lock);
1478 /* don't autosuspend while transmitting */ 1478 /* don't autosuspend while transmitting */
1479 if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) { 1479 if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1480 spin_unlock_irq(&dev->txq.lock); 1480 spin_unlock_irq(&dev->txq.lock);
1481 return -EBUSY; 1481 return -EBUSY;
1482 } else { 1482 } else {
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 298f2b0b631..9a644d052f1 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
599 * 599 *
600 * As well, the device might refuse going to sleep for whichever 600 * As well, the device might refuse going to sleep for whichever
601 * reason. In this case we just fail. For system suspend/hibernate, 601 * reason. In this case we just fail. For system suspend/hibernate,
602 * we *can't* fail. We check PM_EVENT_AUTO to see if the 602 * we *can't* fail. We check PMSG_IS_AUTO to see if the
603 * suspend call comes from the USB stack or from the system and act 603 * suspend call comes from the USB stack or from the system and act
604 * in consequence. 604 * in consequence.
605 * 605 *
@@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
615 struct i2400m *i2400m = &i2400mu->i2400m; 615 struct i2400m *i2400m = &i2400mu->i2400m;
616 616
617#ifdef CONFIG_PM 617#ifdef CONFIG_PM
618 if (pm_msg.event & PM_EVENT_AUTO) 618 if (PMSG_IS_AUTO(pm_msg))
619 is_autosuspend = 1; 619 is_autosuspend = 1;
620#endif 620#endif
621 621
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index ef9ad79d1bf..127e9c63bea 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -161,7 +161,7 @@ that only one external action is invoked at a time.
161#include <linux/firmware.h> 161#include <linux/firmware.h>
162#include <linux/acpi.h> 162#include <linux/acpi.h>
163#include <linux/ctype.h> 163#include <linux/ctype.h>
164#include <linux/pm_qos_params.h> 164#include <linux/pm_qos.h>
165 165
166#include <net/lib80211.h> 166#include <net/lib80211.h>
167 167
@@ -174,7 +174,7 @@ that only one external action is invoked at a time.
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
176 176
177static struct pm_qos_request_list ipw2100_pm_qos_req; 177static struct pm_qos_request ipw2100_pm_qos_req;
178 178
179/* Debugging stuff */ 179/* Debugging stuff */
180#ifdef CONFIG_IPW2100_DEBUG 180#ifdef CONFIG_IPW2100_DEBUG
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index f462fa5f937..33175504bb3 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -60,6 +60,10 @@ config VT_CONSOLE
60 60
61 If unsure, say Y. 61 If unsure, say Y.
62 62
63config VT_CONSOLE_SLEEP
64 def_bool y
65 depends on VT_CONSOLE && PM_SLEEP
66
63config HW_CONSOLE 67config HW_CONSOLE
64 bool 68 bool
65 depends on VT && !UML 69 depends on VT && !UML
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3ec6699ab72..6960715c506 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1305,7 +1305,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1305 struct acm *acm = usb_get_intfdata(intf); 1305 struct acm *acm = usb_get_intfdata(intf);
1306 int cnt; 1306 int cnt;
1307 1307
1308 if (message.event & PM_EVENT_AUTO) { 1308 if (PMSG_IS_AUTO(message)) {
1309 int b; 1309 int b;
1310 1310
1311 spin_lock_irq(&acm->write_lock); 1311 spin_lock_irq(&acm->write_lock);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 1d26a7135dd..efe684908c1 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -798,11 +798,11 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
798 dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); 798 dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
799 799
800 /* if this is an autosuspend the caller does the locking */ 800 /* if this is an autosuspend the caller does the locking */
801 if (!(message.event & PM_EVENT_AUTO)) 801 if (!PMSG_IS_AUTO(message))
802 mutex_lock(&desc->lock); 802 mutex_lock(&desc->lock);
803 spin_lock_irq(&desc->iuspin); 803 spin_lock_irq(&desc->iuspin);
804 804
805 if ((message.event & PM_EVENT_AUTO) && 805 if (PMSG_IS_AUTO(message) &&
806 (test_bit(WDM_IN_USE, &desc->flags) 806 (test_bit(WDM_IN_USE, &desc->flags)
807 || test_bit(WDM_RESPONDING, &desc->flags))) { 807 || test_bit(WDM_RESPONDING, &desc->flags))) {
808 spin_unlock_irq(&desc->iuspin); 808 spin_unlock_irq(&desc->iuspin);
@@ -815,7 +815,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
815 kill_urbs(desc); 815 kill_urbs(desc);
816 cancel_work_sync(&desc->rxwork); 816 cancel_work_sync(&desc->rxwork);
817 } 817 }
818 if (!(message.event & PM_EVENT_AUTO)) 818 if (!PMSG_IS_AUTO(message))
819 mutex_unlock(&desc->lock); 819 mutex_unlock(&desc->lock);
820 820
821 return rv; 821 return rv;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index adf5ca8a239..3b029a0a478 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1046,8 +1046,7 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
1046 /* Non-root devices on a full/low-speed bus must wait for their 1046 /* Non-root devices on a full/low-speed bus must wait for their
1047 * companion high-speed root hub, in case a handoff is needed. 1047 * companion high-speed root hub, in case a handoff is needed.
1048 */ 1048 */
1049 if (!(msg.event & PM_EVENT_AUTO) && udev->parent && 1049 if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion)
1050 udev->bus->hs_companion)
1051 device_pm_wait_for_dev(&udev->dev, 1050 device_pm_wait_for_dev(&udev->dev,
1052 &udev->bus->hs_companion->root_hub->dev); 1051 &udev->bus->hs_companion->root_hub->dev);
1053 1052
@@ -1075,7 +1074,7 @@ static int usb_suspend_interface(struct usb_device *udev,
1075 1074
1076 if (driver->suspend) { 1075 if (driver->suspend) {
1077 status = driver->suspend(intf, msg); 1076 status = driver->suspend(intf, msg);
1078 if (status && !(msg.event & PM_EVENT_AUTO)) 1077 if (status && !PMSG_IS_AUTO(msg))
1079 dev_err(&intf->dev, "%s error %d\n", 1078 dev_err(&intf->dev, "%s error %d\n",
1080 "suspend", status); 1079 "suspend", status);
1081 } else { 1080 } else {
@@ -1189,7 +1188,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1189 status = usb_suspend_interface(udev, intf, msg); 1188 status = usb_suspend_interface(udev, intf, msg);
1190 1189
1191 /* Ignore errors during system sleep transitions */ 1190 /* Ignore errors during system sleep transitions */
1192 if (!(msg.event & PM_EVENT_AUTO)) 1191 if (!PMSG_IS_AUTO(msg))
1193 status = 0; 1192 status = 0;
1194 if (status != 0) 1193 if (status != 0)
1195 break; 1194 break;
@@ -1199,7 +1198,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1199 status = usb_suspend_device(udev, msg); 1198 status = usb_suspend_device(udev, msg);
1200 1199
1201 /* Again, ignore errors during system sleep transitions */ 1200 /* Again, ignore errors during system sleep transitions */
1202 if (!(msg.event & PM_EVENT_AUTO)) 1201 if (!PMSG_IS_AUTO(msg))
1203 status = 0; 1202 status = 0;
1204 } 1203 }
1205 1204
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index b3b7d062906..13222d352a6 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1975,8 +1975,9 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
1975 int status; 1975 int status;
1976 int old_state = hcd->state; 1976 int old_state = hcd->state;
1977 1977
1978 dev_dbg(&rhdev->dev, "bus %s%s\n", 1978 dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n",
1979 (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); 1979 (PMSG_IS_AUTO(msg) ? "auto-" : ""),
1980 rhdev->do_remote_wakeup);
1980 if (HCD_DEAD(hcd)) { 1981 if (HCD_DEAD(hcd)) {
1981 dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); 1982 dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend");
1982 return 0; 1983 return 0;
@@ -2011,8 +2012,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2011 int status; 2012 int status;
2012 int old_state = hcd->state; 2013 int old_state = hcd->state;
2013 2014
2014 dev_dbg(&rhdev->dev, "usb %s%s\n", 2015 dev_dbg(&rhdev->dev, "usb %sresume\n",
2015 (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); 2016 (PMSG_IS_AUTO(msg) ? "auto-" : ""));
2016 if (HCD_DEAD(hcd)) { 2017 if (HCD_DEAD(hcd)) {
2017 dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); 2018 dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume");
2018 return 0; 2019 return 0;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d6cc8324934..96f05b29c9a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2369,8 +2369,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2369 int port1 = udev->portnum; 2369 int port1 = udev->portnum;
2370 int status; 2370 int status;
2371 2371
2372 // dev_dbg(hub->intfdev, "suspend port %d\n", port1);
2373
2374 /* enable remote wakeup when appropriate; this lets the device 2372 /* enable remote wakeup when appropriate; this lets the device
2375 * wake up the upstream hub (including maybe the root hub). 2373 * wake up the upstream hub (including maybe the root hub).
2376 * 2374 *
@@ -2387,7 +2385,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2387 dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", 2385 dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
2388 status); 2386 status);
2389 /* bail if autosuspend is requested */ 2387 /* bail if autosuspend is requested */
2390 if (msg.event & PM_EVENT_AUTO) 2388 if (PMSG_IS_AUTO(msg))
2391 return status; 2389 return status;
2392 } 2390 }
2393 } 2391 }
@@ -2416,12 +2414,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2416 USB_CTRL_SET_TIMEOUT); 2414 USB_CTRL_SET_TIMEOUT);
2417 2415
2418 /* System sleep transitions should never fail */ 2416 /* System sleep transitions should never fail */
2419 if (!(msg.event & PM_EVENT_AUTO)) 2417 if (!PMSG_IS_AUTO(msg))
2420 status = 0; 2418 status = 0;
2421 } else { 2419 } else {
2422 /* device has up to 10 msec to fully suspend */ 2420 /* device has up to 10 msec to fully suspend */
2423 dev_dbg(&udev->dev, "usb %ssuspend\n", 2421 dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
2424 (msg.event & PM_EVENT_AUTO ? "auto-" : "")); 2422 (PMSG_IS_AUTO(msg) ? "auto-" : ""),
2423 udev->do_remote_wakeup);
2425 usb_set_device_state(udev, USB_STATE_SUSPENDED); 2424 usb_set_device_state(udev, USB_STATE_SUSPENDED);
2426 msleep(10); 2425 msleep(10);
2427 } 2426 }
@@ -2572,7 +2571,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2572 } else { 2571 } else {
2573 /* drive resume for at least 20 msec */ 2572 /* drive resume for at least 20 msec */
2574 dev_dbg(&udev->dev, "usb %sresume\n", 2573 dev_dbg(&udev->dev, "usb %sresume\n",
2575 (msg.event & PM_EVENT_AUTO ? "auto-" : "")); 2574 (PMSG_IS_AUTO(msg) ? "auto-" : ""));
2576 msleep(25); 2575 msleep(25);
2577 2576
2578 /* Virtual root hubs can trigger on GET_PORT_STATUS to 2577 /* Virtual root hubs can trigger on GET_PORT_STATUS to
@@ -2679,7 +2678,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
2679 udev = hdev->children [port1-1]; 2678 udev = hdev->children [port1-1];
2680 if (udev && udev->can_submit) { 2679 if (udev && udev->can_submit) {
2681 dev_warn(&intf->dev, "port %d nyet suspended\n", port1); 2680 dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
2682 if (msg.event & PM_EVENT_AUTO) 2681 if (PMSG_IS_AUTO(msg))
2683 return -EBUSY; 2682 return -EBUSY;
2684 } 2683 }
2685 } 2684 }
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index d5d136a53b6..b18179bda0d 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1009,7 +1009,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
1009 struct sierra_intf_private *intfdata; 1009 struct sierra_intf_private *intfdata;
1010 int b; 1010 int b;
1011 1011
1012 if (message.event & PM_EVENT_AUTO) { 1012 if (PMSG_IS_AUTO(message)) {
1013 intfdata = serial->private; 1013 intfdata = serial->private;
1014 spin_lock_irq(&intfdata->susp_lock); 1014 spin_lock_irq(&intfdata->susp_lock);
1015 b = intfdata->in_flight; 1015 b = intfdata->in_flight;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index e4fad5e643d..d555ca9567b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -651,7 +651,7 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
651 651
652 dbg("%s entered", __func__); 652 dbg("%s entered", __func__);
653 653
654 if (message.event & PM_EVENT_AUTO) { 654 if (PMSG_IS_AUTO(message)) {
655 spin_lock_irq(&intfdata->susp_lock); 655 spin_lock_irq(&intfdata->susp_lock);
656 b = intfdata->in_flight; 656 b = intfdata->in_flight;
657 spin_unlock_irq(&intfdata->susp_lock); 657 spin_unlock_irq(&intfdata->susp_lock);