diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 13:15:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 13:15:51 -0400 |
commit | c7c66c0cb0c77b1a8edf09bca57d922312d58030 (patch) | |
tree | 77277103c5f16aa4dee64978a060933d92e14776 | |
parent | 9f3938346a5c1fa504647670edb5fea5756cfb00 (diff) | |
parent | 98e8bdafeb4728a6af7bbcbcc3984967d1cf2bc1 (diff) |
Merge tag 'pm-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates for 3.4 from Rafael Wysocki:
"Assorted extensions and fixes including:
* Introduction of early/late suspend/hibernation device callbacks.
* Generic PM domains extensions and fixes.
* devfreq updates from Axel Lin and MyungJoo Ham.
* Device PM QoS updates.
* Fixes of concurrency problems with wakeup sources.
* System suspend and hibernation fixes."
* tag 'pm-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (43 commits)
PM / Domains: Check domain status during hibernation restore of devices
PM / devfreq: add relation of recommended frequency.
PM / shmobile: Make MTU2 driver use pm_genpd_dev_always_on()
PM / shmobile: Make CMT driver use pm_genpd_dev_always_on()
PM / shmobile: Make TMU driver use pm_genpd_dev_always_on()
PM / Domains: Introduce "always on" device flag
PM / Domains: Fix hibernation restore of devices, v2
PM / Domains: Fix handling of wakeup devices during system resume
sh_mmcif / PM: Use PM QoS latency constraint
tmio_mmc / PM: Use PM QoS latency constraint
PM / QoS: Make it possible to expose PM QoS latency constraints
PM / Sleep: JBD and JBD2 missing set_freezable()
PM / Domains: Fix include for PM_GENERIC_DOMAINS=n case
PM / Freezer: Remove references to TIF_FREEZE in comments
PM / Sleep: Add more wakeup source initialization routines
PM / Hibernate: Enable usermodehelpers in hibernate() error path
PM / Sleep: Make __pm_stay_awake() delete wakeup source timers
PM / Sleep: Fix race conditions related to wakeup source timer function
PM / Sleep: Fix possible infinite loop during wakeup source destruction
PM / Hibernate: print physical addresses consistently with other parts of kernel
...
53 files changed, 1452 insertions, 611 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index 8ffbc25376a0..840f7d64d483 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power | |||
@@ -165,3 +165,21 @@ Description: | |||
165 | 165 | ||
166 | Not all drivers support this attribute. If it isn't supported, | 166 | Not all drivers support this attribute. If it isn't supported, |
167 | attempts to read or write it will yield I/O errors. | 167 | attempts to read or write it will yield I/O errors. |
168 | |||
169 | What: /sys/devices/.../power/pm_qos_latency_us | ||
170 | Date: March 2012 | ||
171 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
172 | Description: | ||
173 | The /sys/devices/.../power/pm_qos_resume_latency_us attribute | ||
174 | contains the PM QoS resume latency limit for the given device, | ||
175 | which is the maximum allowed time it can take to resume the | ||
176 | device, after it has been suspended at run time, from a resume | ||
177 | request to the moment the device will be ready to process I/O, | ||
178 | in microseconds. If it is equal to 0, however, this means that | ||
179 | the PM QoS resume latency may be arbitrary. | ||
180 | |||
181 | Not all drivers support this attribute. If it isn't supported, | ||
182 | it is not present. | ||
183 | |||
184 | This attribute has no effect on system-wide suspend/resume and | ||
185 | hibernation. | ||
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt new file mode 100644 index 000000000000..6528e215c5fe --- /dev/null +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt | |||
@@ -0,0 +1,21 @@ | |||
1 | * Samsung Exynos Power Domains | ||
2 | |||
3 | Exynos processors include support for multiple power domains which are used | ||
4 | to gate power to one or more peripherals on the processor. | ||
5 | |||
6 | Required Properties: | ||
7 | - compatiable: should be one of the following. | ||
8 | * samsung,exynos4210-pd - for exynos4210 type power domain. | ||
9 | - reg: physical base address of the controller and length of memory mapped | ||
10 | region. | ||
11 | |||
12 | Optional Properties: | ||
13 | - samsung,exynos4210-pd-off: Specifies that the power domain is in turned-off | ||
14 | state during boot and remains to be turned-off until explicitly turned-on. | ||
15 | |||
16 | Example: | ||
17 | |||
18 | lcd0: power-domain-lcd0 { | ||
19 | compatible = "samsung,exynos4210-pd"; | ||
20 | reg = <0x10023C00 0x10>; | ||
21 | }; | ||
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 20af7def23c8..872815cd41d3 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -96,6 +96,12 @@ struct dev_pm_ops { | |||
96 | int (*thaw)(struct device *dev); | 96 | int (*thaw)(struct device *dev); |
97 | int (*poweroff)(struct device *dev); | 97 | int (*poweroff)(struct device *dev); |
98 | int (*restore)(struct device *dev); | 98 | int (*restore)(struct device *dev); |
99 | int (*suspend_late)(struct device *dev); | ||
100 | int (*resume_early)(struct device *dev); | ||
101 | int (*freeze_late)(struct device *dev); | ||
102 | int (*thaw_early)(struct device *dev); | ||
103 | int (*poweroff_late)(struct device *dev); | ||
104 | int (*restore_early)(struct device *dev); | ||
99 | int (*suspend_noirq)(struct device *dev); | 105 | int (*suspend_noirq)(struct device *dev); |
100 | int (*resume_noirq)(struct device *dev); | 106 | int (*resume_noirq)(struct device *dev); |
101 | int (*freeze_noirq)(struct device *dev); | 107 | int (*freeze_noirq)(struct device *dev); |
@@ -305,7 +311,7 @@ Entering System Suspend | |||
305 | ----------------------- | 311 | ----------------------- |
306 | When the system goes into the standby or memory sleep state, the phases are: | 312 | When the system goes into the standby or memory sleep state, the phases are: |
307 | 313 | ||
308 | prepare, suspend, suspend_noirq. | 314 | prepare, suspend, suspend_late, suspend_noirq. |
309 | 315 | ||
310 | 1. The prepare phase is meant to prevent races by preventing new devices | 316 | 1. The prepare phase is meant to prevent races by preventing new devices |
311 | from being registered; the PM core would never know that all the | 317 | from being registered; the PM core would never know that all the |
@@ -324,7 +330,12 @@ When the system goes into the standby or memory sleep state, the phases are: | |||
324 | appropriate low-power state, depending on the bus type the device is on, | 330 | appropriate low-power state, depending on the bus type the device is on, |
325 | and they may enable wakeup events. | 331 | and they may enable wakeup events. |
326 | 332 | ||
327 | 3. The suspend_noirq phase occurs after IRQ handlers have been disabled, | 333 | 3 For a number of devices it is convenient to split suspend into the |
334 | "quiesce device" and "save device state" phases, in which cases | ||
335 | suspend_late is meant to do the latter. It is always executed after | ||
336 | runtime power management has been disabled for all devices. | ||
337 | |||
338 | 4. The suspend_noirq phase occurs after IRQ handlers have been disabled, | ||
328 | which means that the driver's interrupt handler will not be called while | 339 | which means that the driver's interrupt handler will not be called while |
329 | the callback method is running. The methods should save the values of | 340 | the callback method is running. The methods should save the values of |
330 | the device's registers that weren't saved previously and finally put the | 341 | the device's registers that weren't saved previously and finally put the |
@@ -359,7 +370,7 @@ Leaving System Suspend | |||
359 | ---------------------- | 370 | ---------------------- |
360 | When resuming from standby or memory sleep, the phases are: | 371 | When resuming from standby or memory sleep, the phases are: |
361 | 372 | ||
362 | resume_noirq, resume, complete. | 373 | resume_noirq, resume_early, resume, complete. |
363 | 374 | ||
364 | 1. The resume_noirq callback methods should perform any actions needed | 375 | 1. The resume_noirq callback methods should perform any actions needed |
365 | before the driver's interrupt handlers are invoked. This generally | 376 | before the driver's interrupt handlers are invoked. This generally |
@@ -375,14 +386,18 @@ When resuming from standby or memory sleep, the phases are: | |||
375 | device driver's ->pm.resume_noirq() method to perform device-specific | 386 | device driver's ->pm.resume_noirq() method to perform device-specific |
376 | actions. | 387 | actions. |
377 | 388 | ||
378 | 2. The resume methods should bring the the device back to its operating | 389 | 2. The resume_early methods should prepare devices for the execution of |
390 | the resume methods. This generally involves undoing the actions of the | ||
391 | preceding suspend_late phase. | ||
392 | |||
393 | 3 The resume methods should bring the the device back to its operating | ||
379 | state, so that it can perform normal I/O. This generally involves | 394 | state, so that it can perform normal I/O. This generally involves |
380 | undoing the actions of the suspend phase. | 395 | undoing the actions of the suspend phase. |
381 | 396 | ||
382 | 3. The complete phase uses only a bus callback. The method should undo the | 397 | 4. The complete phase should undo the actions of the prepare phase. Note, |
383 | actions of the prepare phase. Note, however, that new children may be | 398 | however, that new children may be registered below the device as soon as |
384 | registered below the device as soon as the resume callbacks occur; it's | 399 | the resume callbacks occur; it's not necessary to wait until the |
385 | not necessary to wait until the complete phase. | 400 | complete phase. |
386 | 401 | ||
387 | At the end of these phases, drivers should be as functional as they were before | 402 | At the end of these phases, drivers should be as functional as they were before |
388 | suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are | 403 | suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are |
@@ -429,8 +444,8 @@ an image of the system memory while everything is stable, reactivate all | |||
429 | devices (thaw), write the image to permanent storage, and finally shut down the | 444 | devices (thaw), write the image to permanent storage, and finally shut down the |
430 | system (poweroff). The phases used to accomplish this are: | 445 | system (poweroff). The phases used to accomplish this are: |
431 | 446 | ||
432 | prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete, | 447 | prepare, freeze, freeze_late, freeze_noirq, thaw_noirq, thaw_early, |
433 | prepare, poweroff, poweroff_noirq | 448 | thaw, complete, prepare, poweroff, poweroff_late, poweroff_noirq |
434 | 449 | ||
435 | 1. The prepare phase is discussed in the "Entering System Suspend" section | 450 | 1. The prepare phase is discussed in the "Entering System Suspend" section |
436 | above. | 451 | above. |
@@ -441,7 +456,11 @@ system (poweroff). The phases used to accomplish this are: | |||
441 | save time it's best not to do so. Also, the device should not be | 456 | save time it's best not to do so. Also, the device should not be |
442 | prepared to generate wakeup events. | 457 | prepared to generate wakeup events. |
443 | 458 | ||
444 | 3. The freeze_noirq phase is analogous to the suspend_noirq phase discussed | 459 | 3. The freeze_late phase is analogous to the suspend_late phase described |
460 | above, except that the device should not be put in a low-power state and | ||
461 | should not be allowed to generate wakeup events by it. | ||
462 | |||
463 | 4. The freeze_noirq phase is analogous to the suspend_noirq phase discussed | ||
445 | above, except again that the device should not be put in a low-power | 464 | above, except again that the device should not be put in a low-power |
446 | state and should not be allowed to generate wakeup events. | 465 | state and should not be allowed to generate wakeup events. |
447 | 466 | ||
@@ -449,15 +468,19 @@ At this point the system image is created. All devices should be inactive and | |||
449 | the contents of memory should remain undisturbed while this happens, so that the | 468 | the contents of memory should remain undisturbed while this happens, so that the |
450 | image forms an atomic snapshot of the system state. | 469 | image forms an atomic snapshot of the system state. |
451 | 470 | ||
452 | 4. The thaw_noirq phase is analogous to the resume_noirq phase discussed | 471 | 5. The thaw_noirq phase is analogous to the resume_noirq phase discussed |
453 | above. The main difference is that its methods can assume the device is | 472 | above. The main difference is that its methods can assume the device is |
454 | in the same state as at the end of the freeze_noirq phase. | 473 | in the same state as at the end of the freeze_noirq phase. |
455 | 474 | ||
456 | 5. The thaw phase is analogous to the resume phase discussed above. Its | 475 | 6. The thaw_early phase is analogous to the resume_early phase described |
476 | above. Its methods should undo the actions of the preceding | ||
477 | freeze_late, if necessary. | ||
478 | |||
479 | 7. The thaw phase is analogous to the resume phase discussed above. Its | ||
457 | methods should bring the device back to an operating state, so that it | 480 | methods should bring the device back to an operating state, so that it |
458 | can be used for saving the image if necessary. | 481 | can be used for saving the image if necessary. |
459 | 482 | ||
460 | 6. The complete phase is discussed in the "Leaving System Suspend" section | 483 | 8. The complete phase is discussed in the "Leaving System Suspend" section |
461 | above. | 484 | above. |
462 | 485 | ||
463 | At this point the system image is saved, and the devices then need to be | 486 | At this point the system image is saved, and the devices then need to be |
@@ -465,16 +488,19 @@ prepared for the upcoming system shutdown. This is much like suspending them | |||
465 | before putting the system into the standby or memory sleep state, and the phases | 488 | before putting the system into the standby or memory sleep state, and the phases |
466 | are similar. | 489 | are similar. |
467 | 490 | ||
468 | 7. The prepare phase is discussed above. | 491 | 9. The prepare phase is discussed above. |
492 | |||
493 | 10. The poweroff phase is analogous to the suspend phase. | ||
469 | 494 | ||
470 | 8. The poweroff phase is analogous to the suspend phase. | 495 | 11. The poweroff_late phase is analogous to the suspend_late phase. |
471 | 496 | ||
472 | 9. The poweroff_noirq phase is analogous to the suspend_noirq phase. | 497 | 12. The poweroff_noirq phase is analogous to the suspend_noirq phase. |
473 | 498 | ||
474 | The poweroff and poweroff_noirq callbacks should do essentially the same things | 499 | The poweroff, poweroff_late and poweroff_noirq callbacks should do essentially |
475 | as the suspend and suspend_noirq callbacks. The only notable difference is that | 500 | the same things as the suspend, suspend_late and suspend_noirq callbacks, |
476 | they need not store the device register values, because the registers should | 501 | respectively. The only notable difference is that they need not store the |
477 | already have been stored during the freeze or freeze_noirq phases. | 502 | device register values, because the registers should already have been stored |
503 | during the freeze, freeze_late or freeze_noirq phases. | ||
478 | 504 | ||
479 | 505 | ||
480 | Leaving Hibernation | 506 | Leaving Hibernation |
@@ -518,22 +544,25 @@ To achieve this, the image kernel must restore the devices' pre-hibernation | |||
518 | functionality. The operation is much like waking up from the memory sleep | 544 | functionality. The operation is much like waking up from the memory sleep |
519 | state, although it involves different phases: | 545 | state, although it involves different phases: |
520 | 546 | ||
521 | restore_noirq, restore, complete | 547 | restore_noirq, restore_early, restore, complete |
522 | 548 | ||
523 | 1. The restore_noirq phase is analogous to the resume_noirq phase. | 549 | 1. The restore_noirq phase is analogous to the resume_noirq phase. |
524 | 550 | ||
525 | 2. The restore phase is analogous to the resume phase. | 551 | 2. The restore_early phase is analogous to the resume_early phase. |
552 | |||
553 | 3. The restore phase is analogous to the resume phase. | ||
526 | 554 | ||
527 | 3. The complete phase is discussed above. | 555 | 4. The complete phase is discussed above. |
528 | 556 | ||
529 | The main difference from resume[_noirq] is that restore[_noirq] must assume the | 557 | The main difference from resume[_early|_noirq] is that restore[_early|_noirq] |
530 | device has been accessed and reconfigured by the boot loader or the boot kernel. | 558 | must assume the device has been accessed and reconfigured by the boot loader or |
531 | Consequently the state of the device may be different from the state remembered | 559 | the boot kernel. Consequently the state of the device may be different from the |
532 | from the freeze and freeze_noirq phases. The device may even need to be reset | 560 | state remembered from the freeze, freeze_late and freeze_noirq phases. The |
533 | and completely re-initialized. In many cases this difference doesn't matter, so | 561 | device may even need to be reset and completely re-initialized. In many cases |
534 | the resume[_noirq] and restore[_norq] method pointers can be set to the same | 562 | this difference doesn't matter, so the resume[_early|_noirq] and |
535 | routines. Nevertheless, different callback pointers are used in case there is a | 563 | restore[_early|_norq] method pointers can be set to the same routines. |
536 | situation where it actually matters. | 564 | Nevertheless, different callback pointers are used in case there is a situation |
565 | where it actually does matter. | ||
537 | 566 | ||
538 | 567 | ||
539 | Device Power Management Domains | 568 | Device Power Management Domains |
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt index ebd7490ef1df..ec715cd78fbb 100644 --- a/Documentation/power/freezing-of-tasks.txt +++ b/Documentation/power/freezing-of-tasks.txt | |||
@@ -63,6 +63,27 @@ devices have been reinitialized, the function thaw_processes() is called in | |||
63 | order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that | 63 | order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that |
64 | have been frozen leave __refrigerator() and continue running. | 64 | have been frozen leave __refrigerator() and continue running. |
65 | 65 | ||
66 | |||
67 | Rationale behind the functions dealing with freezing and thawing of tasks: | ||
68 | ------------------------------------------------------------------------- | ||
69 | |||
70 | freeze_processes(): | ||
71 | - freezes only userspace tasks | ||
72 | |||
73 | freeze_kernel_threads(): | ||
74 | - freezes all tasks (including kernel threads) because we can't freeze | ||
75 | kernel threads without freezing userspace tasks | ||
76 | |||
77 | thaw_kernel_threads(): | ||
78 | - thaws only kernel threads; this is particularly useful if we need to do | ||
79 | anything special in between thawing of kernel threads and thawing of | ||
80 | userspace tasks, or if we want to postpone the thawing of userspace tasks | ||
81 | |||
82 | thaw_processes(): | ||
83 | - thaws all tasks (including kernel threads) because we can't thaw userspace | ||
84 | tasks without thawing kernel threads | ||
85 | |||
86 | |||
66 | III. Which kernel threads are freezable? | 87 | III. Which kernel threads are freezable? |
67 | 88 | ||
68 | Kernel threads are not freezable by default. However, a kernel thread may clear | 89 | Kernel threads are not freezable by default. However, a kernel thread may clear |
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 5d602f68a0e8..dfad6538b273 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
@@ -34,6 +34,7 @@ config CPU_EXYNOS4210 | |||
34 | select ARM_CPU_SUSPEND if PM | 34 | select ARM_CPU_SUSPEND if PM |
35 | select S5P_PM if PM | 35 | select S5P_PM if PM |
36 | select S5P_SLEEP if PM | 36 | select S5P_SLEEP if PM |
37 | select PM_GENERIC_DOMAINS | ||
37 | help | 38 | help |
38 | Enable EXYNOS4210 CPU support | 39 | Enable EXYNOS4210 CPU support |
39 | 40 | ||
@@ -74,11 +75,6 @@ config EXYNOS4_SETUP_FIMD0 | |||
74 | help | 75 | help |
75 | Common setup code for FIMD0. | 76 | Common setup code for FIMD0. |
76 | 77 | ||
77 | config EXYNOS4_DEV_PD | ||
78 | bool | ||
79 | help | ||
80 | Compile in platform device definitions for Power Domain | ||
81 | |||
82 | config EXYNOS4_DEV_SYSMMU | 78 | config EXYNOS4_DEV_SYSMMU |
83 | bool | 79 | bool |
84 | help | 80 | help |
@@ -195,7 +191,6 @@ config MACH_SMDKV310 | |||
195 | select EXYNOS4_DEV_AHCI | 191 | select EXYNOS4_DEV_AHCI |
196 | select SAMSUNG_DEV_KEYPAD | 192 | select SAMSUNG_DEV_KEYPAD |
197 | select EXYNOS4_DEV_DMA | 193 | select EXYNOS4_DEV_DMA |
198 | select EXYNOS4_DEV_PD | ||
199 | select SAMSUNG_DEV_PWM | 194 | select SAMSUNG_DEV_PWM |
200 | select EXYNOS4_DEV_USB_OHCI | 195 | select EXYNOS4_DEV_USB_OHCI |
201 | select EXYNOS4_DEV_SYSMMU | 196 | select EXYNOS4_DEV_SYSMMU |
@@ -243,7 +238,6 @@ config MACH_UNIVERSAL_C210 | |||
243 | select S5P_DEV_ONENAND | 238 | select S5P_DEV_ONENAND |
244 | select S5P_DEV_TV | 239 | select S5P_DEV_TV |
245 | select EXYNOS4_DEV_DMA | 240 | select EXYNOS4_DEV_DMA |
246 | select EXYNOS4_DEV_PD | ||
247 | select EXYNOS4_SETUP_FIMD0 | 241 | select EXYNOS4_SETUP_FIMD0 |
248 | select EXYNOS4_SETUP_I2C1 | 242 | select EXYNOS4_SETUP_I2C1 |
249 | select EXYNOS4_SETUP_I2C3 | 243 | select EXYNOS4_SETUP_I2C3 |
@@ -277,7 +271,6 @@ config MACH_NURI | |||
277 | select S5P_DEV_USB_EHCI | 271 | select S5P_DEV_USB_EHCI |
278 | select S5P_SETUP_MIPIPHY | 272 | select S5P_SETUP_MIPIPHY |
279 | select EXYNOS4_DEV_DMA | 273 | select EXYNOS4_DEV_DMA |
280 | select EXYNOS4_DEV_PD | ||
281 | select EXYNOS4_SETUP_FIMC | 274 | select EXYNOS4_SETUP_FIMC |
282 | select EXYNOS4_SETUP_FIMD0 | 275 | select EXYNOS4_SETUP_FIMD0 |
283 | select EXYNOS4_SETUP_I2C1 | 276 | select EXYNOS4_SETUP_I2C1 |
@@ -310,7 +303,6 @@ config MACH_ORIGEN | |||
310 | select SAMSUNG_DEV_BACKLIGHT | 303 | select SAMSUNG_DEV_BACKLIGHT |
311 | select SAMSUNG_DEV_PWM | 304 | select SAMSUNG_DEV_PWM |
312 | select EXYNOS4_DEV_DMA | 305 | select EXYNOS4_DEV_DMA |
313 | select EXYNOS4_DEV_PD | ||
314 | select EXYNOS4_DEV_USB_OHCI | 306 | select EXYNOS4_DEV_USB_OHCI |
315 | select EXYNOS4_SETUP_FIMD0 | 307 | select EXYNOS4_SETUP_FIMD0 |
316 | select EXYNOS4_SETUP_SDHCI | 308 | select EXYNOS4_SETUP_SDHCI |
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile index 5fc202cdfdb6..d9191f9a7af8 100644 --- a/arch/arm/mach-exynos/Makefile +++ b/arch/arm/mach-exynos/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_CPU_EXYNOS4210) += clock-exynos4210.o | |||
17 | obj-$(CONFIG_SOC_EXYNOS4212) += clock-exynos4212.o | 17 | obj-$(CONFIG_SOC_EXYNOS4212) += clock-exynos4212.o |
18 | 18 | ||
19 | obj-$(CONFIG_PM) += pm.o | 19 | obj-$(CONFIG_PM) += pm.o |
20 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o | ||
20 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | 21 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o |
21 | 22 | ||
22 | obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o | 23 | obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o |
@@ -45,7 +46,6 @@ obj-$(CONFIG_MACH_EXYNOS4_DT) += mach-exynos4-dt.o | |||
45 | 46 | ||
46 | obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o | 47 | obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o |
47 | obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o | 48 | obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o |
48 | obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o | ||
49 | obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o | 49 | obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o |
50 | obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o | 50 | obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o |
51 | obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o | 51 | obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o |
diff --git a/arch/arm/mach-exynos/dev-pd.c b/arch/arm/mach-exynos/dev-pd.c deleted file mode 100644 index 3273f25d6a75..000000000000 --- a/arch/arm/mach-exynos/dev-pd.c +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* linux/arch/arm/mach-exynos4/dev-pd.c | ||
2 | * | ||
3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com | ||
5 | * | ||
6 | * EXYNOS4 - Power Domain support | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/io.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/delay.h> | ||
17 | |||
18 | #include <mach/regs-pmu.h> | ||
19 | |||
20 | #include <plat/pd.h> | ||
21 | |||
22 | static int exynos4_pd_enable(struct device *dev) | ||
23 | { | ||
24 | struct samsung_pd_info *pdata = dev->platform_data; | ||
25 | u32 timeout; | ||
26 | |||
27 | __raw_writel(S5P_INT_LOCAL_PWR_EN, pdata->base); | ||
28 | |||
29 | /* Wait max 1ms */ | ||
30 | timeout = 10; | ||
31 | while ((__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) | ||
32 | != S5P_INT_LOCAL_PWR_EN) { | ||
33 | if (timeout == 0) { | ||
34 | printk(KERN_ERR "Power domain %s enable failed.\n", | ||
35 | dev_name(dev)); | ||
36 | return -ETIMEDOUT; | ||
37 | } | ||
38 | timeout--; | ||
39 | udelay(100); | ||
40 | } | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static int exynos4_pd_disable(struct device *dev) | ||
46 | { | ||
47 | struct samsung_pd_info *pdata = dev->platform_data; | ||
48 | u32 timeout; | ||
49 | |||
50 | __raw_writel(0, pdata->base); | ||
51 | |||
52 | /* Wait max 1ms */ | ||
53 | timeout = 10; | ||
54 | while (__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) { | ||
55 | if (timeout == 0) { | ||
56 | printk(KERN_ERR "Power domain %s disable failed.\n", | ||
57 | dev_name(dev)); | ||
58 | return -ETIMEDOUT; | ||
59 | } | ||
60 | timeout--; | ||
61 | udelay(100); | ||
62 | } | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | struct platform_device exynos4_device_pd[] = { | ||
68 | { | ||
69 | .name = "samsung-pd", | ||
70 | .id = 0, | ||
71 | .dev = { | ||
72 | .platform_data = &(struct samsung_pd_info) { | ||
73 | .enable = exynos4_pd_enable, | ||
74 | .disable = exynos4_pd_disable, | ||
75 | .base = S5P_PMU_MFC_CONF, | ||
76 | }, | ||
77 | }, | ||
78 | }, { | ||
79 | .name = "samsung-pd", | ||
80 | .id = 1, | ||
81 | .dev = { | ||
82 | .platform_data = &(struct samsung_pd_info) { | ||
83 | .enable = exynos4_pd_enable, | ||
84 | .disable = exynos4_pd_disable, | ||
85 | .base = S5P_PMU_G3D_CONF, | ||
86 | }, | ||
87 | }, | ||
88 | }, { | ||
89 | .name = "samsung-pd", | ||
90 | .id = 2, | ||
91 | .dev = { | ||
92 | .platform_data = &(struct samsung_pd_info) { | ||
93 | .enable = exynos4_pd_enable, | ||
94 | .disable = exynos4_pd_disable, | ||
95 | .base = S5P_PMU_LCD0_CONF, | ||
96 | }, | ||
97 | }, | ||
98 | }, { | ||
99 | .name = "samsung-pd", | ||
100 | .id = 3, | ||
101 | .dev = { | ||
102 | .platform_data = &(struct samsung_pd_info) { | ||
103 | .enable = exynos4_pd_enable, | ||
104 | .disable = exynos4_pd_disable, | ||
105 | .base = S5P_PMU_LCD1_CONF, | ||
106 | }, | ||
107 | }, | ||
108 | }, { | ||
109 | .name = "samsung-pd", | ||
110 | .id = 4, | ||
111 | .dev = { | ||
112 | .platform_data = &(struct samsung_pd_info) { | ||
113 | .enable = exynos4_pd_enable, | ||
114 | .disable = exynos4_pd_disable, | ||
115 | .base = S5P_PMU_TV_CONF, | ||
116 | }, | ||
117 | }, | ||
118 | }, { | ||
119 | .name = "samsung-pd", | ||
120 | .id = 5, | ||
121 | .dev = { | ||
122 | .platform_data = &(struct samsung_pd_info) { | ||
123 | .enable = exynos4_pd_enable, | ||
124 | .disable = exynos4_pd_disable, | ||
125 | .base = S5P_PMU_CAM_CONF, | ||
126 | }, | ||
127 | }, | ||
128 | }, { | ||
129 | .name = "samsung-pd", | ||
130 | .id = 6, | ||
131 | .dev = { | ||
132 | .platform_data = &(struct samsung_pd_info) { | ||
133 | .enable = exynos4_pd_enable, | ||
134 | .disable = exynos4_pd_disable, | ||
135 | .base = S5P_PMU_GPS_CONF, | ||
136 | }, | ||
137 | }, | ||
138 | }, | ||
139 | }; | ||
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c index 435261f83f46..aa37179d776c 100644 --- a/arch/arm/mach-exynos/mach-nuri.c +++ b/arch/arm/mach-exynos/mach-nuri.c | |||
@@ -1263,9 +1263,6 @@ static struct platform_device *nuri_devices[] __initdata = { | |||
1263 | &s5p_device_mfc, | 1263 | &s5p_device_mfc, |
1264 | &s5p_device_mfc_l, | 1264 | &s5p_device_mfc_l, |
1265 | &s5p_device_mfc_r, | 1265 | &s5p_device_mfc_r, |
1266 | &exynos4_device_pd[PD_MFC], | ||
1267 | &exynos4_device_pd[PD_LCD0], | ||
1268 | &exynos4_device_pd[PD_CAM], | ||
1269 | &s5p_device_fimc_md, | 1266 | &s5p_device_fimc_md, |
1270 | 1267 | ||
1271 | /* NURI Devices */ | 1268 | /* NURI Devices */ |
@@ -1315,14 +1312,6 @@ static void __init nuri_machine_init(void) | |||
1315 | 1312 | ||
1316 | /* Last */ | 1313 | /* Last */ |
1317 | platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices)); | 1314 | platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices)); |
1318 | s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev; | ||
1319 | s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev; | ||
1320 | |||
1321 | s5p_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1322 | s5p_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1323 | s5p_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1324 | s5p_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1325 | s5p_device_mipi_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1326 | } | 1315 | } |
1327 | 1316 | ||
1328 | MACHINE_START(NURI, "NURI") | 1317 | MACHINE_START(NURI, "NURI") |
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c index 0679b8ad2d1e..fa5c4a59b0aa 100644 --- a/arch/arm/mach-exynos/mach-origen.c +++ b/arch/arm/mach-exynos/mach-origen.c | |||
@@ -621,13 +621,6 @@ static struct platform_device *origen_devices[] __initdata = { | |||
621 | &s5p_device_mfc_r, | 621 | &s5p_device_mfc_r, |
622 | &s5p_device_mixer, | 622 | &s5p_device_mixer, |
623 | &exynos4_device_ohci, | 623 | &exynos4_device_ohci, |
624 | &exynos4_device_pd[PD_LCD0], | ||
625 | &exynos4_device_pd[PD_TV], | ||
626 | &exynos4_device_pd[PD_G3D], | ||
627 | &exynos4_device_pd[PD_LCD1], | ||
628 | &exynos4_device_pd[PD_CAM], | ||
629 | &exynos4_device_pd[PD_GPS], | ||
630 | &exynos4_device_pd[PD_MFC], | ||
631 | &origen_device_gpiokeys, | 624 | &origen_device_gpiokeys, |
632 | &origen_lcd_hv070wsa, | 625 | &origen_lcd_hv070wsa, |
633 | }; | 626 | }; |
@@ -695,13 +688,6 @@ static void __init origen_machine_init(void) | |||
695 | 688 | ||
696 | platform_add_devices(origen_devices, ARRAY_SIZE(origen_devices)); | 689 | platform_add_devices(origen_devices, ARRAY_SIZE(origen_devices)); |
697 | 690 | ||
698 | s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev; | ||
699 | |||
700 | s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev; | ||
701 | s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev; | ||
702 | |||
703 | s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev; | ||
704 | |||
705 | samsung_bl_set(&origen_bl_gpio_info, &origen_bl_data); | 691 | samsung_bl_set(&origen_bl_gpio_info, &origen_bl_data); |
706 | } | 692 | } |
707 | 693 | ||
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c index b2c5557f50e4..5258b8563676 100644 --- a/arch/arm/mach-exynos/mach-smdkv310.c +++ b/arch/arm/mach-exynos/mach-smdkv310.c | |||
@@ -277,13 +277,6 @@ static struct platform_device *smdkv310_devices[] __initdata = { | |||
277 | &s5p_device_mfc, | 277 | &s5p_device_mfc, |
278 | &s5p_device_mfc_l, | 278 | &s5p_device_mfc_l, |
279 | &s5p_device_mfc_r, | 279 | &s5p_device_mfc_r, |
280 | &exynos4_device_pd[PD_MFC], | ||
281 | &exynos4_device_pd[PD_G3D], | ||
282 | &exynos4_device_pd[PD_LCD0], | ||
283 | &exynos4_device_pd[PD_LCD1], | ||
284 | &exynos4_device_pd[PD_CAM], | ||
285 | &exynos4_device_pd[PD_TV], | ||
286 | &exynos4_device_pd[PD_GPS], | ||
287 | &exynos4_device_spdif, | 280 | &exynos4_device_spdif, |
288 | &exynos4_device_sysmmu, | 281 | &exynos4_device_sysmmu, |
289 | &samsung_asoc_dma, | 282 | &samsung_asoc_dma, |
@@ -336,10 +329,6 @@ static void s5p_tv_setup(void) | |||
336 | WARN_ON(gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug")); | 329 | WARN_ON(gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug")); |
337 | s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3)); | 330 | s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3)); |
338 | s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE); | 331 | s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE); |
339 | |||
340 | /* setup dependencies between TV devices */ | ||
341 | s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev; | ||
342 | s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev; | ||
343 | } | 332 | } |
344 | 333 | ||
345 | static void __init smdkv310_map_io(void) | 334 | static void __init smdkv310_map_io(void) |
@@ -379,7 +368,6 @@ static void __init smdkv310_machine_init(void) | |||
379 | clk_xusbxti.rate = 24000000; | 368 | clk_xusbxti.rate = 24000000; |
380 | 369 | ||
381 | platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices)); | 370 | platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices)); |
382 | s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev; | ||
383 | } | 371 | } |
384 | 372 | ||
385 | MACHINE_START(SMDKV310, "SMDKV310") | 373 | MACHINE_START(SMDKV310, "SMDKV310") |
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 38939956c34f..b2d495b31094 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c | |||
@@ -971,7 +971,6 @@ static struct platform_device *universal_devices[] __initdata = { | |||
971 | &s3c_device_i2c5, | 971 | &s3c_device_i2c5, |
972 | &s5p_device_i2c_hdmiphy, | 972 | &s5p_device_i2c_hdmiphy, |
973 | &hdmi_fixed_voltage, | 973 | &hdmi_fixed_voltage, |
974 | &exynos4_device_pd[PD_TV], | ||
975 | &s5p_device_hdmi, | 974 | &s5p_device_hdmi, |
976 | &s5p_device_sdo, | 975 | &s5p_device_sdo, |
977 | &s5p_device_mixer, | 976 | &s5p_device_mixer, |
@@ -984,9 +983,6 @@ static struct platform_device *universal_devices[] __initdata = { | |||
984 | &s5p_device_mfc, | 983 | &s5p_device_mfc, |
985 | &s5p_device_mfc_l, | 984 | &s5p_device_mfc_l, |
986 | &s5p_device_mfc_r, | 985 | &s5p_device_mfc_r, |
987 | &exynos4_device_pd[PD_MFC], | ||
988 | &exynos4_device_pd[PD_LCD0], | ||
989 | &exynos4_device_pd[PD_CAM], | ||
990 | &cam_i_core_fixed_reg_dev, | 986 | &cam_i_core_fixed_reg_dev, |
991 | &cam_s_if_fixed_reg_dev, | 987 | &cam_s_if_fixed_reg_dev, |
992 | &s5p_device_fimc_md, | 988 | &s5p_device_fimc_md, |
@@ -1005,10 +1001,6 @@ void s5p_tv_setup(void) | |||
1005 | gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug"); | 1001 | gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug"); |
1006 | s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3)); | 1002 | s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3)); |
1007 | s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE); | 1003 | s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE); |
1008 | |||
1009 | /* setup dependencies between TV devices */ | ||
1010 | s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev; | ||
1011 | s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev; | ||
1012 | } | 1004 | } |
1013 | 1005 | ||
1014 | static void __init universal_reserve(void) | 1006 | static void __init universal_reserve(void) |
@@ -1042,15 +1034,6 @@ static void __init universal_machine_init(void) | |||
1042 | 1034 | ||
1043 | /* Last */ | 1035 | /* Last */ |
1044 | platform_add_devices(universal_devices, ARRAY_SIZE(universal_devices)); | 1036 | platform_add_devices(universal_devices, ARRAY_SIZE(universal_devices)); |
1045 | |||
1046 | s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev; | ||
1047 | s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev; | ||
1048 | |||
1049 | s5p_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1050 | s5p_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1051 | s5p_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1052 | s5p_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1053 | s5p_device_mipi_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev; | ||
1054 | } | 1037 | } |
1055 | 1038 | ||
1056 | MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") | 1039 | MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") |
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c new file mode 100644 index 000000000000..0b04af2b13cc --- /dev/null +++ b/arch/arm/mach-exynos/pm_domains.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * Exynos Generic power domain support. | ||
3 | * | ||
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com | ||
6 | * | ||
7 | * Implementation of Exynos specific power domain control which is used in | ||
8 | * conjunction with runtime-pm. Support for both device-tree and non-device-tree | ||
9 | * based power domain support is included. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/io.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/pm_domain.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/of_address.h> | ||
22 | |||
23 | #include <mach/regs-pmu.h> | ||
24 | #include <plat/devs.h> | ||
25 | |||
26 | /* | ||
27 | * Exynos specific wrapper around the generic power domain | ||
28 | */ | ||
29 | struct exynos_pm_domain { | ||
30 | void __iomem *base; | ||
31 | char const *name; | ||
32 | bool is_off; | ||
33 | struct generic_pm_domain pd; | ||
34 | }; | ||
35 | |||
36 | static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) | ||
37 | { | ||
38 | struct exynos_pm_domain *pd; | ||
39 | void __iomem *base; | ||
40 | u32 timeout, pwr; | ||
41 | char *op; | ||
42 | |||
43 | pd = container_of(domain, struct exynos_pm_domain, pd); | ||
44 | base = pd->base; | ||
45 | |||
46 | pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0; | ||
47 | __raw_writel(pwr, base); | ||
48 | |||
49 | /* Wait max 1ms */ | ||
50 | timeout = 10; | ||
51 | |||
52 | while ((__raw_readl(base + 0x4) & S5P_INT_LOCAL_PWR_EN) != pwr) { | ||
53 | if (!timeout) { | ||
54 | op = (power_on) ? "enable" : "disable"; | ||
55 | pr_err("Power domain %s %s failed\n", domain->name, op); | ||
56 | return -ETIMEDOUT; | ||
57 | } | ||
58 | timeout--; | ||
59 | cpu_relax(); | ||
60 | usleep_range(80, 100); | ||
61 | } | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int exynos_pd_power_on(struct generic_pm_domain *domain) | ||
66 | { | ||
67 | return exynos_pd_power(domain, true); | ||
68 | } | ||
69 | |||
70 | static int exynos_pd_power_off(struct generic_pm_domain *domain) | ||
71 | { | ||
72 | return exynos_pd_power(domain, false); | ||
73 | } | ||
74 | |||
75 | #define EXYNOS_GPD(PD, BASE, NAME) \ | ||
76 | static struct exynos_pm_domain PD = { \ | ||
77 | .base = (void __iomem *)BASE, \ | ||
78 | .name = NAME, \ | ||
79 | .pd = { \ | ||
80 | .power_off = exynos_pd_power_off, \ | ||
81 | .power_on = exynos_pd_power_on, \ | ||
82 | }, \ | ||
83 | } | ||
84 | |||
85 | #ifdef CONFIG_OF | ||
86 | static __init int exynos_pm_dt_parse_domains(void) | ||
87 | { | ||
88 | struct device_node *np; | ||
89 | |||
90 | for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { | ||
91 | struct exynos_pm_domain *pd; | ||
92 | |||
93 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | ||
94 | if (!pd) { | ||
95 | pr_err("%s: failed to allocate memory for domain\n", | ||
96 | __func__); | ||
97 | return -ENOMEM; | ||
98 | } | ||
99 | |||
100 | if (of_get_property(np, "samsung,exynos4210-pd-off", NULL)) | ||
101 | pd->is_off = true; | ||
102 | pd->name = np->name; | ||
103 | pd->base = of_iomap(np, 0); | ||
104 | pd->pd.power_off = exynos_pd_power_off; | ||
105 | pd->pd.power_on = exynos_pd_power_on; | ||
106 | pd->pd.of_node = np; | ||
107 | pm_genpd_init(&pd->pd, NULL, false); | ||
108 | } | ||
109 | return 0; | ||
110 | } | ||
111 | #else | ||
112 | static __init int exynos_pm_dt_parse_domains(void) | ||
113 | { | ||
114 | return 0; | ||
115 | } | ||
116 | #endif /* CONFIG_OF */ | ||
117 | |||
118 | static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, | ||
119 | struct exynos_pm_domain *pd) | ||
120 | { | ||
121 | if (pdev->dev.bus) { | ||
122 | if (pm_genpd_add_device(&pd->pd, &pdev->dev)) | ||
123 | pr_info("%s: error in adding %s device to %s power" | ||
124 | "domain\n", __func__, dev_name(&pdev->dev), | ||
125 | pd->name); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | EXYNOS_GPD(exynos4_pd_mfc, S5P_PMU_MFC_CONF, "pd-mfc"); | ||
130 | EXYNOS_GPD(exynos4_pd_g3d, S5P_PMU_G3D_CONF, "pd-g3d"); | ||
131 | EXYNOS_GPD(exynos4_pd_lcd0, S5P_PMU_LCD0_CONF, "pd-lcd0"); | ||
132 | EXYNOS_GPD(exynos4_pd_lcd1, S5P_PMU_LCD1_CONF, "pd-lcd1"); | ||
133 | EXYNOS_GPD(exynos4_pd_tv, S5P_PMU_TV_CONF, "pd-tv"); | ||
134 | EXYNOS_GPD(exynos4_pd_cam, S5P_PMU_CAM_CONF, "pd-cam"); | ||
135 | EXYNOS_GPD(exynos4_pd_gps, S5P_PMU_GPS_CONF, "pd-gps"); | ||
136 | |||
137 | static struct exynos_pm_domain *exynos4_pm_domains[] = { | ||
138 | &exynos4_pd_mfc, | ||
139 | &exynos4_pd_g3d, | ||
140 | &exynos4_pd_lcd0, | ||
141 | &exynos4_pd_lcd1, | ||
142 | &exynos4_pd_tv, | ||
143 | &exynos4_pd_cam, | ||
144 | &exynos4_pd_gps, | ||
145 | }; | ||
146 | |||
147 | static __init int exynos4_pm_init_power_domain(void) | ||
148 | { | ||
149 | int idx; | ||
150 | |||
151 | if (of_have_populated_dt()) | ||
152 | return exynos_pm_dt_parse_domains(); | ||
153 | |||
154 | for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) | ||
155 | pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL, | ||
156 | exynos4_pm_domains[idx]->is_off); | ||
157 | |||
158 | #ifdef CONFIG_S5P_DEV_FIMD0 | ||
159 | exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0); | ||
160 | #endif | ||
161 | #ifdef CONFIG_S5P_DEV_TV | ||
162 | exynos_pm_add_dev_to_genpd(&s5p_device_hdmi, &exynos4_pd_tv); | ||
163 | exynos_pm_add_dev_to_genpd(&s5p_device_mixer, &exynos4_pd_tv); | ||
164 | #endif | ||
165 | #ifdef CONFIG_S5P_DEV_MFC | ||
166 | exynos_pm_add_dev_to_genpd(&s5p_device_mfc, &exynos4_pd_mfc); | ||
167 | #endif | ||
168 | #ifdef CONFIG_S5P_DEV_FIMC0 | ||
169 | exynos_pm_add_dev_to_genpd(&s5p_device_fimc0, &exynos4_pd_cam); | ||
170 | #endif | ||
171 | #ifdef CONFIG_S5P_DEV_FIMC1 | ||
172 | exynos_pm_add_dev_to_genpd(&s5p_device_fimc1, &exynos4_pd_cam); | ||
173 | #endif | ||
174 | #ifdef CONFIG_S5P_DEV_FIMC2 | ||
175 | exynos_pm_add_dev_to_genpd(&s5p_device_fimc2, &exynos4_pd_cam); | ||
176 | #endif | ||
177 | #ifdef CONFIG_S5P_DEV_FIMC3 | ||
178 | exynos_pm_add_dev_to_genpd(&s5p_device_fimc3, &exynos4_pd_cam); | ||
179 | #endif | ||
180 | #ifdef CONFIG_S5P_DEV_CSIS0 | ||
181 | exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis0, &exynos4_pd_cam); | ||
182 | #endif | ||
183 | #ifdef CONFIG_S5P_DEV_CSIS1 | ||
184 | exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis1, &exynos4_pd_cam); | ||
185 | #endif | ||
186 | return 0; | ||
187 | } | ||
188 | arch_initcall(exynos4_pm_init_power_domain); | ||
189 | |||
190 | static __init int exynos_pm_late_initcall(void) | ||
191 | { | ||
192 | pm_genpd_poweroff_unused(); | ||
193 | return 0; | ||
194 | } | ||
195 | late_initcall(exynos_pm_late_initcall); | ||
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index a83cf51fc099..cccf91b8fae1 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c | |||
@@ -1043,6 +1043,8 @@ void __init sh7372_add_standard_devices(void) | |||
1043 | sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device); | 1043 | sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device); |
1044 | sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device); | 1044 | sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device); |
1045 | sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device); | 1045 | sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device); |
1046 | sh7372_add_device_to_domain(&sh7372_a4r, &tmu00_device); | ||
1047 | sh7372_add_device_to_domain(&sh7372_a4r, &tmu01_device); | ||
1046 | } | 1048 | } |
1047 | 1049 | ||
1048 | void __init sh7372_add_early_devices(void) | 1050 | void __init sh7372_add_early_devices(void) |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index f76623cbe263..5d56931a15b3 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1234,8 +1234,7 @@ static int suspend(int vetoable) | |||
1234 | struct apm_user *as; | 1234 | struct apm_user *as; |
1235 | 1235 | ||
1236 | dpm_suspend_start(PMSG_SUSPEND); | 1236 | dpm_suspend_start(PMSG_SUSPEND); |
1237 | 1237 | dpm_suspend_end(PMSG_SUSPEND); | |
1238 | dpm_suspend_noirq(PMSG_SUSPEND); | ||
1239 | 1238 | ||
1240 | local_irq_disable(); | 1239 | local_irq_disable(); |
1241 | syscore_suspend(); | 1240 | syscore_suspend(); |
@@ -1259,9 +1258,9 @@ static int suspend(int vetoable) | |||
1259 | syscore_resume(); | 1258 | syscore_resume(); |
1260 | local_irq_enable(); | 1259 | local_irq_enable(); |
1261 | 1260 | ||
1262 | dpm_resume_noirq(PMSG_RESUME); | 1261 | dpm_resume_start(PMSG_RESUME); |
1263 | |||
1264 | dpm_resume_end(PMSG_RESUME); | 1262 | dpm_resume_end(PMSG_RESUME); |
1263 | |||
1265 | queue_event(APM_NORMAL_RESUME, NULL); | 1264 | queue_event(APM_NORMAL_RESUME, NULL); |
1266 | spin_lock(&user_list_lock); | 1265 | spin_lock(&user_list_lock); |
1267 | for (as = user_list; as != NULL; as = as->next) { | 1266 | for (as = user_list; as != NULL; as = as->next) { |
@@ -1277,7 +1276,7 @@ static void standby(void) | |||
1277 | { | 1276 | { |
1278 | int err; | 1277 | int err; |
1279 | 1278 | ||
1280 | dpm_suspend_noirq(PMSG_SUSPEND); | 1279 | dpm_suspend_end(PMSG_SUSPEND); |
1281 | 1280 | ||
1282 | local_irq_disable(); | 1281 | local_irq_disable(); |
1283 | syscore_suspend(); | 1282 | syscore_suspend(); |
@@ -1291,7 +1290,7 @@ static void standby(void) | |||
1291 | syscore_resume(); | 1290 | syscore_resume(); |
1292 | local_irq_enable(); | 1291 | local_irq_enable(); |
1293 | 1292 | ||
1294 | dpm_resume_noirq(PMSG_RESUME); | 1293 | dpm_resume_start(PMSG_RESUME); |
1295 | } | 1294 | } |
1296 | 1295 | ||
1297 | static apm_event_t get_event(void) | 1296 | static apm_event_t get_event(void) |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 978bbf7ac6af..73ce9fbe9839 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
366 | not_suspended = 0; | 366 | not_suspended = 0; |
367 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 367 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
368 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) | 368 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
369 | || pdd->dev->power.irq_safe)) | 369 | || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) |
370 | not_suspended++; | 370 | not_suspended++; |
371 | 371 | ||
372 | if (not_suspended > genpd->in_progress) | 372 | if (not_suspended > genpd->in_progress) |
@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
503 | 503 | ||
504 | might_sleep_if(!genpd->dev_irq_safe); | 504 | might_sleep_if(!genpd->dev_irq_safe); |
505 | 505 | ||
506 | if (dev_gpd_data(dev)->always_on) | ||
507 | return -EBUSY; | ||
508 | |||
506 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; | 509 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; |
507 | if (stop_ok && !stop_ok(dev)) | 510 | if (stop_ok && !stop_ok(dev)) |
508 | return -EBUSY; | 511 | return -EBUSY; |
@@ -764,8 +767,10 @@ static int pm_genpd_prepare(struct device *dev) | |||
764 | 767 | ||
765 | genpd_acquire_lock(genpd); | 768 | genpd_acquire_lock(genpd); |
766 | 769 | ||
767 | if (genpd->prepared_count++ == 0) | 770 | if (genpd->prepared_count++ == 0) { |
771 | genpd->suspended_count = 0; | ||
768 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; | 772 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; |
773 | } | ||
769 | 774 | ||
770 | genpd_release_lock(genpd); | 775 | genpd_release_lock(genpd); |
771 | 776 | ||
@@ -820,17 +825,16 @@ static int pm_genpd_suspend(struct device *dev) | |||
820 | } | 825 | } |
821 | 826 | ||
822 | /** | 827 | /** |
823 | * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. | 828 | * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. |
824 | * @dev: Device to suspend. | 829 | * @dev: Device to suspend. |
825 | * | 830 | * |
826 | * Carry out a late suspend of a device under the assumption that its | 831 | * Carry out a late suspend of a device under the assumption that its |
827 | * pm_domain field points to the domain member of an object of type | 832 | * pm_domain field points to the domain member of an object of type |
828 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | 833 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. |
829 | */ | 834 | */ |
830 | static int pm_genpd_suspend_noirq(struct device *dev) | 835 | static int pm_genpd_suspend_late(struct device *dev) |
831 | { | 836 | { |
832 | struct generic_pm_domain *genpd; | 837 | struct generic_pm_domain *genpd; |
833 | int ret; | ||
834 | 838 | ||
835 | dev_dbg(dev, "%s()\n", __func__); | 839 | dev_dbg(dev, "%s()\n", __func__); |
836 | 840 | ||
@@ -838,14 +842,28 @@ static int pm_genpd_suspend_noirq(struct device *dev) | |||
838 | if (IS_ERR(genpd)) | 842 | if (IS_ERR(genpd)) |
839 | return -EINVAL; | 843 | return -EINVAL; |
840 | 844 | ||
841 | if (genpd->suspend_power_off) | 845 | return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); |
842 | return 0; | 846 | } |
843 | 847 | ||
844 | ret = genpd_suspend_late(genpd, dev); | 848 | /** |
845 | if (ret) | 849 | * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. |
846 | return ret; | 850 | * @dev: Device to suspend. |
851 | * | ||
852 | * Stop the device and remove power from the domain if all devices in it have | ||
853 | * been stopped. | ||
854 | */ | ||
855 | static int pm_genpd_suspend_noirq(struct device *dev) | ||
856 | { | ||
857 | struct generic_pm_domain *genpd; | ||
847 | 858 | ||
848 | if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) | 859 | dev_dbg(dev, "%s()\n", __func__); |
860 | |||
861 | genpd = dev_to_genpd(dev); | ||
862 | if (IS_ERR(genpd)) | ||
863 | return -EINVAL; | ||
864 | |||
865 | if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on | ||
866 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) | ||
849 | return 0; | 867 | return 0; |
850 | 868 | ||
851 | genpd_stop_dev(genpd, dev); | 869 | genpd_stop_dev(genpd, dev); |
@@ -862,13 +880,10 @@ static int pm_genpd_suspend_noirq(struct device *dev) | |||
862 | } | 880 | } |
863 | 881 | ||
864 | /** | 882 | /** |
865 | * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. | 883 | * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. |
866 | * @dev: Device to resume. | 884 | * @dev: Device to resume. |
867 | * | 885 | * |
868 | * Carry out an early resume of a device under the assumption that its | 886 | * Restore power to the device's PM domain, if necessary, and start the device. |
869 | * pm_domain field points to the domain member of an object of type | ||
870 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
871 | * devices. | ||
872 | */ | 887 | */ |
873 | static int pm_genpd_resume_noirq(struct device *dev) | 888 | static int pm_genpd_resume_noirq(struct device *dev) |
874 | { | 889 | { |
@@ -880,7 +895,8 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
880 | if (IS_ERR(genpd)) | 895 | if (IS_ERR(genpd)) |
881 | return -EINVAL; | 896 | return -EINVAL; |
882 | 897 | ||
883 | if (genpd->suspend_power_off) | 898 | if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on |
899 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) | ||
884 | return 0; | 900 | return 0; |
885 | 901 | ||
886 | /* | 902 | /* |
@@ -890,13 +906,34 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
890 | */ | 906 | */ |
891 | pm_genpd_poweron(genpd); | 907 | pm_genpd_poweron(genpd); |
892 | genpd->suspended_count--; | 908 | genpd->suspended_count--; |
893 | genpd_start_dev(genpd, dev); | ||
894 | 909 | ||
895 | return genpd_resume_early(genpd, dev); | 910 | return genpd_start_dev(genpd, dev); |
896 | } | 911 | } |
897 | 912 | ||
898 | /** | 913 | /** |
899 | * pm_genpd_resume - Resume a device belonging to an I/O power domain. | 914 | * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. |
915 | * @dev: Device to resume. | ||
916 | * | ||
917 | * Carry out an early resume of a device under the assumption that its | ||
918 | * pm_domain field points to the domain member of an object of type | ||
919 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
920 | * devices. | ||
921 | */ | ||
922 | static int pm_genpd_resume_early(struct device *dev) | ||
923 | { | ||
924 | struct generic_pm_domain *genpd; | ||
925 | |||
926 | dev_dbg(dev, "%s()\n", __func__); | ||
927 | |||
928 | genpd = dev_to_genpd(dev); | ||
929 | if (IS_ERR(genpd)) | ||
930 | return -EINVAL; | ||
931 | |||
932 | return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); | ||
933 | } | ||
934 | |||
935 | /** | ||
936 | * pm_genpd_resume - Resume of device in an I/O PM domain. | ||
900 | * @dev: Device to resume. | 937 | * @dev: Device to resume. |
901 | * | 938 | * |
902 | * Resume a device under the assumption that its pm_domain field points to the | 939 | * Resume a device under the assumption that its pm_domain field points to the |
@@ -917,7 +954,7 @@ static int pm_genpd_resume(struct device *dev) | |||
917 | } | 954 | } |
918 | 955 | ||
919 | /** | 956 | /** |
920 | * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. | 957 | * pm_genpd_freeze - Freezing a device in an I/O PM domain. |
921 | * @dev: Device to freeze. | 958 | * @dev: Device to freeze. |
922 | * | 959 | * |
923 | * Freeze a device under the assumption that its pm_domain field points to the | 960 | * Freeze a device under the assumption that its pm_domain field points to the |
@@ -938,7 +975,29 @@ static int pm_genpd_freeze(struct device *dev) | |||
938 | } | 975 | } |
939 | 976 | ||
940 | /** | 977 | /** |
941 | * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. | 978 | * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. |
979 | * @dev: Device to freeze. | ||
980 | * | ||
981 | * Carry out a late freeze of a device under the assumption that its | ||
982 | * pm_domain field points to the domain member of an object of type | ||
983 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
984 | * devices. | ||
985 | */ | ||
986 | static int pm_genpd_freeze_late(struct device *dev) | ||
987 | { | ||
988 | struct generic_pm_domain *genpd; | ||
989 | |||
990 | dev_dbg(dev, "%s()\n", __func__); | ||
991 | |||
992 | genpd = dev_to_genpd(dev); | ||
993 | if (IS_ERR(genpd)) | ||
994 | return -EINVAL; | ||
995 | |||
996 | return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); | ||
997 | } | ||
998 | |||
999 | /** | ||
1000 | * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. | ||
942 | * @dev: Device to freeze. | 1001 | * @dev: Device to freeze. |
943 | * | 1002 | * |
944 | * Carry out a late freeze of a device under the assumption that its | 1003 | * Carry out a late freeze of a device under the assumption that its |
@@ -949,7 +1008,6 @@ static int pm_genpd_freeze(struct device *dev) | |||
949 | static int pm_genpd_freeze_noirq(struct device *dev) | 1008 | static int pm_genpd_freeze_noirq(struct device *dev) |
950 | { | 1009 | { |
951 | struct generic_pm_domain *genpd; | 1010 | struct generic_pm_domain *genpd; |
952 | int ret; | ||
953 | 1011 | ||
954 | dev_dbg(dev, "%s()\n", __func__); | 1012 | dev_dbg(dev, "%s()\n", __func__); |
955 | 1013 | ||
@@ -957,20 +1015,33 @@ static int pm_genpd_freeze_noirq(struct device *dev) | |||
957 | if (IS_ERR(genpd)) | 1015 | if (IS_ERR(genpd)) |
958 | return -EINVAL; | 1016 | return -EINVAL; |
959 | 1017 | ||
960 | if (genpd->suspend_power_off) | 1018 | return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? |
961 | return 0; | 1019 | 0 : genpd_stop_dev(genpd, dev); |
1020 | } | ||
962 | 1021 | ||
963 | ret = genpd_freeze_late(genpd, dev); | 1022 | /** |
964 | if (ret) | 1023 | * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. |
965 | return ret; | 1024 | * @dev: Device to thaw. |
1025 | * | ||
1026 | * Start the device, unless power has been removed from the domain already | ||
1027 | * before the system transition. | ||
1028 | */ | ||
1029 | static int pm_genpd_thaw_noirq(struct device *dev) | ||
1030 | { | ||
1031 | struct generic_pm_domain *genpd; | ||
966 | 1032 | ||
967 | genpd_stop_dev(genpd, dev); | 1033 | dev_dbg(dev, "%s()\n", __func__); |
968 | 1034 | ||
969 | return 0; | 1035 | genpd = dev_to_genpd(dev); |
1036 | if (IS_ERR(genpd)) | ||
1037 | return -EINVAL; | ||
1038 | |||
1039 | return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? | ||
1040 | 0 : genpd_start_dev(genpd, dev); | ||
970 | } | 1041 | } |
971 | 1042 | ||
972 | /** | 1043 | /** |
973 | * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. | 1044 | * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. |
974 | * @dev: Device to thaw. | 1045 | * @dev: Device to thaw. |
975 | * | 1046 | * |
976 | * Carry out an early thaw of a device under the assumption that its | 1047 | * Carry out an early thaw of a device under the assumption that its |
@@ -978,7 +1049,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) | |||
978 | * struct generic_pm_domain representing a power domain consisting of I/O | 1049 | * struct generic_pm_domain representing a power domain consisting of I/O |
979 | * devices. | 1050 | * devices. |
980 | */ | 1051 | */ |
981 | static int pm_genpd_thaw_noirq(struct device *dev) | 1052 | static int pm_genpd_thaw_early(struct device *dev) |
982 | { | 1053 | { |
983 | struct generic_pm_domain *genpd; | 1054 | struct generic_pm_domain *genpd; |
984 | 1055 | ||
@@ -988,12 +1059,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) | |||
988 | if (IS_ERR(genpd)) | 1059 | if (IS_ERR(genpd)) |
989 | return -EINVAL; | 1060 | return -EINVAL; |
990 | 1061 | ||
991 | if (genpd->suspend_power_off) | 1062 | return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); |
992 | return 0; | ||
993 | |||
994 | genpd_start_dev(genpd, dev); | ||
995 | |||
996 | return genpd_thaw_early(genpd, dev); | ||
997 | } | 1063 | } |
998 | 1064 | ||
999 | /** | 1065 | /** |
@@ -1018,13 +1084,11 @@ static int pm_genpd_thaw(struct device *dev) | |||
1018 | } | 1084 | } |
1019 | 1085 | ||
1020 | /** | 1086 | /** |
1021 | * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. | 1087 | * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. |
1022 | * @dev: Device to resume. | 1088 | * @dev: Device to resume. |
1023 | * | 1089 | * |
1024 | * Carry out an early restore of a device under the assumption that its | 1090 | * Make sure the domain will be in the same power state as before the |
1025 | * pm_domain field points to the domain member of an object of type | 1091 | * hibernation the system is resuming from and start the device if necessary. |
1026 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
1027 | * devices. | ||
1028 | */ | 1092 | */ |
1029 | static int pm_genpd_restore_noirq(struct device *dev) | 1093 | static int pm_genpd_restore_noirq(struct device *dev) |
1030 | { | 1094 | { |
@@ -1040,23 +1104,35 @@ static int pm_genpd_restore_noirq(struct device *dev) | |||
1040 | * Since all of the "noirq" callbacks are executed sequentially, it is | 1104 | * Since all of the "noirq" callbacks are executed sequentially, it is |
1041 | * guaranteed that this function will never run twice in parallel for | 1105 | * guaranteed that this function will never run twice in parallel for |
1042 | * the same PM domain, so it is not necessary to use locking here. | 1106 | * the same PM domain, so it is not necessary to use locking here. |
1107 | * | ||
1108 | * At this point suspended_count == 0 means we are being run for the | ||
1109 | * first time for the given domain in the present cycle. | ||
1043 | */ | 1110 | */ |
1044 | genpd->status = GPD_STATE_POWER_OFF; | 1111 | if (genpd->suspended_count++ == 0) { |
1045 | if (genpd->suspend_power_off) { | ||
1046 | /* | 1112 | /* |
1047 | * The boot kernel might put the domain into the power on state, | 1113 | * The boot kernel might put the domain into arbitrary state, |
1048 | * so make sure it really is powered off. | 1114 | * so make it appear as powered off to pm_genpd_poweron(), so |
1115 | * that it tries to power it on in case it was really off. | ||
1049 | */ | 1116 | */ |
1050 | if (genpd->power_off) | 1117 | genpd->status = GPD_STATE_POWER_OFF; |
1051 | genpd->power_off(genpd); | 1118 | if (genpd->suspend_power_off) { |
1052 | return 0; | 1119 | /* |
1120 | * If the domain was off before the hibernation, make | ||
1121 | * sure it will be off going forward. | ||
1122 | */ | ||
1123 | if (genpd->power_off) | ||
1124 | genpd->power_off(genpd); | ||
1125 | |||
1126 | return 0; | ||
1127 | } | ||
1053 | } | 1128 | } |
1054 | 1129 | ||
1130 | if (genpd->suspend_power_off) | ||
1131 | return 0; | ||
1132 | |||
1055 | pm_genpd_poweron(genpd); | 1133 | pm_genpd_poweron(genpd); |
1056 | genpd->suspended_count--; | ||
1057 | genpd_start_dev(genpd, dev); | ||
1058 | 1134 | ||
1059 | return genpd_resume_early(genpd, dev); | 1135 | return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); |
1060 | } | 1136 | } |
1061 | 1137 | ||
1062 | /** | 1138 | /** |
@@ -1099,11 +1175,15 @@ static void pm_genpd_complete(struct device *dev) | |||
1099 | 1175 | ||
1100 | #define pm_genpd_prepare NULL | 1176 | #define pm_genpd_prepare NULL |
1101 | #define pm_genpd_suspend NULL | 1177 | #define pm_genpd_suspend NULL |
1178 | #define pm_genpd_suspend_late NULL | ||
1102 | #define pm_genpd_suspend_noirq NULL | 1179 | #define pm_genpd_suspend_noirq NULL |
1180 | #define pm_genpd_resume_early NULL | ||
1103 | #define pm_genpd_resume_noirq NULL | 1181 | #define pm_genpd_resume_noirq NULL |
1104 | #define pm_genpd_resume NULL | 1182 | #define pm_genpd_resume NULL |
1105 | #define pm_genpd_freeze NULL | 1183 | #define pm_genpd_freeze NULL |
1184 | #define pm_genpd_freeze_late NULL | ||
1106 | #define pm_genpd_freeze_noirq NULL | 1185 | #define pm_genpd_freeze_noirq NULL |
1186 | #define pm_genpd_thaw_early NULL | ||
1107 | #define pm_genpd_thaw_noirq NULL | 1187 | #define pm_genpd_thaw_noirq NULL |
1108 | #define pm_genpd_thaw NULL | 1188 | #define pm_genpd_thaw NULL |
1109 | #define pm_genpd_restore_noirq NULL | 1189 | #define pm_genpd_restore_noirq NULL |
@@ -1171,6 +1251,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
1171 | } | 1251 | } |
1172 | 1252 | ||
1173 | /** | 1253 | /** |
1254 | * __pm_genpd_of_add_device - Add a device to an I/O PM domain. | ||
1255 | * @genpd_node: Device tree node pointer representing a PM domain to which the | ||
1256 | * the device is added to. | ||
1257 | * @dev: Device to be added. | ||
1258 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
1259 | */ | ||
1260 | int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, | ||
1261 | struct gpd_timing_data *td) | ||
1262 | { | ||
1263 | struct generic_pm_domain *genpd = NULL, *gpd; | ||
1264 | |||
1265 | dev_dbg(dev, "%s()\n", __func__); | ||
1266 | |||
1267 | if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) | ||
1268 | return -EINVAL; | ||
1269 | |||
1270 | mutex_lock(&gpd_list_lock); | ||
1271 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
1272 | if (gpd->of_node == genpd_node) { | ||
1273 | genpd = gpd; | ||
1274 | break; | ||
1275 | } | ||
1276 | } | ||
1277 | mutex_unlock(&gpd_list_lock); | ||
1278 | |||
1279 | if (!genpd) | ||
1280 | return -EINVAL; | ||
1281 | |||
1282 | return __pm_genpd_add_device(genpd, dev, td); | ||
1283 | } | ||
1284 | |||
1285 | /** | ||
1174 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. | 1286 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. |
1175 | * @genpd: PM domain to remove the device from. | 1287 | * @genpd: PM domain to remove the device from. |
1176 | * @dev: Device to be removed. | 1288 | * @dev: Device to be removed. |
@@ -1216,6 +1328,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1216 | } | 1328 | } |
1217 | 1329 | ||
1218 | /** | 1330 | /** |
1331 | * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. | ||
1332 | * @dev: Device to set/unset the flag for. | ||
1333 | * @val: The new value of the device's "always on" flag. | ||
1334 | */ | ||
1335 | void pm_genpd_dev_always_on(struct device *dev, bool val) | ||
1336 | { | ||
1337 | struct pm_subsys_data *psd; | ||
1338 | unsigned long flags; | ||
1339 | |||
1340 | spin_lock_irqsave(&dev->power.lock, flags); | ||
1341 | |||
1342 | psd = dev_to_psd(dev); | ||
1343 | if (psd && psd->domain_data) | ||
1344 | to_gpd_data(psd->domain_data)->always_on = val; | ||
1345 | |||
1346 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
1347 | } | ||
1348 | EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); | ||
1349 | |||
1350 | /** | ||
1219 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1351 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
1220 | * @genpd: Master PM domain to add the subdomain to. | 1352 | * @genpd: Master PM domain to add the subdomain to. |
1221 | * @subdomain: Subdomain to be added. | 1353 | * @subdomain: Subdomain to be added. |
@@ -1450,7 +1582,7 @@ static int pm_genpd_default_suspend_late(struct device *dev) | |||
1450 | { | 1582 | { |
1451 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; | 1583 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; |
1452 | 1584 | ||
1453 | return cb ? cb(dev) : pm_generic_suspend_noirq(dev); | 1585 | return cb ? cb(dev) : pm_generic_suspend_late(dev); |
1454 | } | 1586 | } |
1455 | 1587 | ||
1456 | /** | 1588 | /** |
@@ -1461,7 +1593,7 @@ static int pm_genpd_default_resume_early(struct device *dev) | |||
1461 | { | 1593 | { |
1462 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; | 1594 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; |
1463 | 1595 | ||
1464 | return cb ? cb(dev) : pm_generic_resume_noirq(dev); | 1596 | return cb ? cb(dev) : pm_generic_resume_early(dev); |
1465 | } | 1597 | } |
1466 | 1598 | ||
1467 | /** | 1599 | /** |
@@ -1494,7 +1626,7 @@ static int pm_genpd_default_freeze_late(struct device *dev) | |||
1494 | { | 1626 | { |
1495 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; | 1627 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; |
1496 | 1628 | ||
1497 | return cb ? cb(dev) : pm_generic_freeze_noirq(dev); | 1629 | return cb ? cb(dev) : pm_generic_freeze_late(dev); |
1498 | } | 1630 | } |
1499 | 1631 | ||
1500 | /** | 1632 | /** |
@@ -1505,7 +1637,7 @@ static int pm_genpd_default_thaw_early(struct device *dev) | |||
1505 | { | 1637 | { |
1506 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; | 1638 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; |
1507 | 1639 | ||
1508 | return cb ? cb(dev) : pm_generic_thaw_noirq(dev); | 1640 | return cb ? cb(dev) : pm_generic_thaw_early(dev); |
1509 | } | 1641 | } |
1510 | 1642 | ||
1511 | /** | 1643 | /** |
@@ -1557,23 +1689,28 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1557 | genpd->poweroff_task = NULL; | 1689 | genpd->poweroff_task = NULL; |
1558 | genpd->resume_count = 0; | 1690 | genpd->resume_count = 0; |
1559 | genpd->device_count = 0; | 1691 | genpd->device_count = 0; |
1560 | genpd->suspended_count = 0; | ||
1561 | genpd->max_off_time_ns = -1; | 1692 | genpd->max_off_time_ns = -1; |
1562 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; | 1693 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; |
1563 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; | 1694 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; |
1564 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; | 1695 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; |
1565 | genpd->domain.ops.prepare = pm_genpd_prepare; | 1696 | genpd->domain.ops.prepare = pm_genpd_prepare; |
1566 | genpd->domain.ops.suspend = pm_genpd_suspend; | 1697 | genpd->domain.ops.suspend = pm_genpd_suspend; |
1698 | genpd->domain.ops.suspend_late = pm_genpd_suspend_late; | ||
1567 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; | 1699 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; |
1568 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; | 1700 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; |
1701 | genpd->domain.ops.resume_early = pm_genpd_resume_early; | ||
1569 | genpd->domain.ops.resume = pm_genpd_resume; | 1702 | genpd->domain.ops.resume = pm_genpd_resume; |
1570 | genpd->domain.ops.freeze = pm_genpd_freeze; | 1703 | genpd->domain.ops.freeze = pm_genpd_freeze; |
1704 | genpd->domain.ops.freeze_late = pm_genpd_freeze_late; | ||
1571 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; | 1705 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; |
1572 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; | 1706 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; |
1707 | genpd->domain.ops.thaw_early = pm_genpd_thaw_early; | ||
1573 | genpd->domain.ops.thaw = pm_genpd_thaw; | 1708 | genpd->domain.ops.thaw = pm_genpd_thaw; |
1574 | genpd->domain.ops.poweroff = pm_genpd_suspend; | 1709 | genpd->domain.ops.poweroff = pm_genpd_suspend; |
1710 | genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; | ||
1575 | genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; | 1711 | genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; |
1576 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; | 1712 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; |
1713 | genpd->domain.ops.restore_early = pm_genpd_resume_early; | ||
1577 | genpd->domain.ops.restore = pm_genpd_resume; | 1714 | genpd->domain.ops.restore = pm_genpd_resume; |
1578 | genpd->domain.ops.complete = pm_genpd_complete; | 1715 | genpd->domain.ops.complete = pm_genpd_complete; |
1579 | genpd->dev_ops.save_state = pm_genpd_default_save_state; | 1716 | genpd->dev_ops.save_state = pm_genpd_default_save_state; |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 10bdd793f0bd..d03d290f31c2 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /** | 94 | /** |
95 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | 95 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. |
96 | * @dev: Device to handle. | 96 | * @dev: Device to suspend. |
97 | * @event: PM transition of the system under way. | ||
98 | * @bool: Whether or not this is the "noirq" stage. | ||
99 | * | ||
100 | * Execute the PM callback corresponding to @event provided by the driver of | ||
101 | * @dev, if defined, and return its error code. Return 0 if the callback is | ||
102 | * not present. | ||
103 | */ | 97 | */ |
104 | static int __pm_generic_call(struct device *dev, int event, bool noirq) | 98 | int pm_generic_suspend_noirq(struct device *dev) |
105 | { | 99 | { |
106 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 100 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
107 | int (*callback)(struct device *); | ||
108 | |||
109 | if (!pm) | ||
110 | return 0; | ||
111 | |||
112 | switch (event) { | ||
113 | case PM_EVENT_SUSPEND: | ||
114 | callback = noirq ? pm->suspend_noirq : pm->suspend; | ||
115 | break; | ||
116 | case PM_EVENT_FREEZE: | ||
117 | callback = noirq ? pm->freeze_noirq : pm->freeze; | ||
118 | break; | ||
119 | case PM_EVENT_HIBERNATE: | ||
120 | callback = noirq ? pm->poweroff_noirq : pm->poweroff; | ||
121 | break; | ||
122 | case PM_EVENT_RESUME: | ||
123 | callback = noirq ? pm->resume_noirq : pm->resume; | ||
124 | break; | ||
125 | case PM_EVENT_THAW: | ||
126 | callback = noirq ? pm->thaw_noirq : pm->thaw; | ||
127 | break; | ||
128 | case PM_EVENT_RESTORE: | ||
129 | callback = noirq ? pm->restore_noirq : pm->restore; | ||
130 | break; | ||
131 | default: | ||
132 | callback = NULL; | ||
133 | break; | ||
134 | } | ||
135 | 101 | ||
136 | return callback ? callback(dev) : 0; | 102 | return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; |
137 | } | 103 | } |
104 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | ||
138 | 105 | ||
139 | /** | 106 | /** |
140 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. | 107 | * pm_generic_suspend_late - Generic suspend_late callback for subsystems. |
141 | * @dev: Device to suspend. | 108 | * @dev: Device to suspend. |
142 | */ | 109 | */ |
143 | int pm_generic_suspend_noirq(struct device *dev) | 110 | int pm_generic_suspend_late(struct device *dev) |
144 | { | 111 | { |
145 | return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); | 112 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
113 | |||
114 | return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; | ||
146 | } | 115 | } |
147 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | 116 | EXPORT_SYMBOL_GPL(pm_generic_suspend_late); |
148 | 117 | ||
149 | /** | 118 | /** |
150 | * pm_generic_suspend - Generic suspend callback for subsystems. | 119 | * pm_generic_suspend - Generic suspend callback for subsystems. |
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | |||
152 | */ | 121 | */ |
153 | int pm_generic_suspend(struct device *dev) | 122 | int pm_generic_suspend(struct device *dev) |
154 | { | 123 | { |
155 | return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); | 124 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
125 | |||
126 | return pm && pm->suspend ? pm->suspend(dev) : 0; | ||
156 | } | 127 | } |
157 | EXPORT_SYMBOL_GPL(pm_generic_suspend); | 128 | EXPORT_SYMBOL_GPL(pm_generic_suspend); |
158 | 129 | ||
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend); | |||
162 | */ | 133 | */ |
163 | int pm_generic_freeze_noirq(struct device *dev) | 134 | int pm_generic_freeze_noirq(struct device *dev) |
164 | { | 135 | { |
165 | return __pm_generic_call(dev, PM_EVENT_FREEZE, true); | 136 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
137 | |||
138 | return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; | ||
166 | } | 139 | } |
167 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); | 140 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); |
168 | 141 | ||
169 | /** | 142 | /** |
143 | * pm_generic_freeze_late - Generic freeze_late callback for subsystems. | ||
144 | * @dev: Device to freeze. | ||
145 | */ | ||
146 | int pm_generic_freeze_late(struct device *dev) | ||
147 | { | ||
148 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
149 | |||
150 | return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(pm_generic_freeze_late); | ||
153 | |||
154 | /** | ||
170 | * pm_generic_freeze - Generic freeze callback for subsystems. | 155 | * pm_generic_freeze - Generic freeze callback for subsystems. |
171 | * @dev: Device to freeze. | 156 | * @dev: Device to freeze. |
172 | */ | 157 | */ |
173 | int pm_generic_freeze(struct device *dev) | 158 | int pm_generic_freeze(struct device *dev) |
174 | { | 159 | { |
175 | return __pm_generic_call(dev, PM_EVENT_FREEZE, false); | 160 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
161 | |||
162 | return pm && pm->freeze ? pm->freeze(dev) : 0; | ||
176 | } | 163 | } |
177 | EXPORT_SYMBOL_GPL(pm_generic_freeze); | 164 | EXPORT_SYMBOL_GPL(pm_generic_freeze); |
178 | 165 | ||
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze); | |||
182 | */ | 169 | */ |
183 | int pm_generic_poweroff_noirq(struct device *dev) | 170 | int pm_generic_poweroff_noirq(struct device *dev) |
184 | { | 171 | { |
185 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); | 172 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
173 | |||
174 | return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; | ||
186 | } | 175 | } |
187 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); | 176 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); |
188 | 177 | ||
189 | /** | 178 | /** |
179 | * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems. | ||
180 | * @dev: Device to handle. | ||
181 | */ | ||
182 | int pm_generic_poweroff_late(struct device *dev) | ||
183 | { | ||
184 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
185 | |||
186 | return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; | ||
187 | } | ||
188 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); | ||
189 | |||
190 | /** | ||
190 | * pm_generic_poweroff - Generic poweroff callback for subsystems. | 191 | * pm_generic_poweroff - Generic poweroff callback for subsystems. |
191 | * @dev: Device to handle. | 192 | * @dev: Device to handle. |
192 | */ | 193 | */ |
193 | int pm_generic_poweroff(struct device *dev) | 194 | int pm_generic_poweroff(struct device *dev) |
194 | { | 195 | { |
195 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); | 196 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
197 | |||
198 | return pm && pm->poweroff ? pm->poweroff(dev) : 0; | ||
196 | } | 199 | } |
197 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); | 200 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); |
198 | 201 | ||
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff); | |||
202 | */ | 205 | */ |
203 | int pm_generic_thaw_noirq(struct device *dev) | 206 | int pm_generic_thaw_noirq(struct device *dev) |
204 | { | 207 | { |
205 | return __pm_generic_call(dev, PM_EVENT_THAW, true); | 208 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
209 | |||
210 | return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; | ||
206 | } | 211 | } |
207 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); | 212 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); |
208 | 213 | ||
209 | /** | 214 | /** |
215 | * pm_generic_thaw_early - Generic thaw_early callback for subsystems. | ||
216 | * @dev: Device to thaw. | ||
217 | */ | ||
218 | int pm_generic_thaw_early(struct device *dev) | ||
219 | { | ||
220 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
221 | |||
222 | return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(pm_generic_thaw_early); | ||
225 | |||
226 | /** | ||
210 | * pm_generic_thaw - Generic thaw callback for subsystems. | 227 | * pm_generic_thaw - Generic thaw callback for subsystems. |
211 | * @dev: Device to thaw. | 228 | * @dev: Device to thaw. |
212 | */ | 229 | */ |
213 | int pm_generic_thaw(struct device *dev) | 230 | int pm_generic_thaw(struct device *dev) |
214 | { | 231 | { |
215 | return __pm_generic_call(dev, PM_EVENT_THAW, false); | 232 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
233 | |||
234 | return pm && pm->thaw ? pm->thaw(dev) : 0; | ||
216 | } | 235 | } |
217 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | 236 | EXPORT_SYMBOL_GPL(pm_generic_thaw); |
218 | 237 | ||
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw); | |||
222 | */ | 241 | */ |
223 | int pm_generic_resume_noirq(struct device *dev) | 242 | int pm_generic_resume_noirq(struct device *dev) |
224 | { | 243 | { |
225 | return __pm_generic_call(dev, PM_EVENT_RESUME, true); | 244 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
245 | |||
246 | return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; | ||
226 | } | 247 | } |
227 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); | 248 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); |
228 | 249 | ||
229 | /** | 250 | /** |
251 | * pm_generic_resume_early - Generic resume_early callback for subsystems. | ||
252 | * @dev: Device to resume. | ||
253 | */ | ||
254 | int pm_generic_resume_early(struct device *dev) | ||
255 | { | ||
256 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
257 | |||
258 | return pm && pm->resume_early ? pm->resume_early(dev) : 0; | ||
259 | } | ||
260 | EXPORT_SYMBOL_GPL(pm_generic_resume_early); | ||
261 | |||
262 | /** | ||
230 | * pm_generic_resume - Generic resume callback for subsystems. | 263 | * pm_generic_resume - Generic resume callback for subsystems. |
231 | * @dev: Device to resume. | 264 | * @dev: Device to resume. |
232 | */ | 265 | */ |
233 | int pm_generic_resume(struct device *dev) | 266 | int pm_generic_resume(struct device *dev) |
234 | { | 267 | { |
235 | return __pm_generic_call(dev, PM_EVENT_RESUME, false); | 268 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
269 | |||
270 | return pm && pm->resume ? pm->resume(dev) : 0; | ||
236 | } | 271 | } |
237 | EXPORT_SYMBOL_GPL(pm_generic_resume); | 272 | EXPORT_SYMBOL_GPL(pm_generic_resume); |
238 | 273 | ||
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); | |||
242 | */ | 277 | */ |
243 | int pm_generic_restore_noirq(struct device *dev) | 278 | int pm_generic_restore_noirq(struct device *dev) |
244 | { | 279 | { |
245 | return __pm_generic_call(dev, PM_EVENT_RESTORE, true); | 280 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
281 | |||
282 | return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; | ||
246 | } | 283 | } |
247 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); | 284 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); |
248 | 285 | ||
249 | /** | 286 | /** |
287 | * pm_generic_restore_early - Generic restore_early callback for subsystems. | ||
288 | * @dev: Device to resume. | ||
289 | */ | ||
290 | int pm_generic_restore_early(struct device *dev) | ||
291 | { | ||
292 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
293 | |||
294 | return pm && pm->restore_early ? pm->restore_early(dev) : 0; | ||
295 | } | ||
296 | EXPORT_SYMBOL_GPL(pm_generic_restore_early); | ||
297 | |||
298 | /** | ||
250 | * pm_generic_restore - Generic restore callback for subsystems. | 299 | * pm_generic_restore - Generic restore callback for subsystems. |
251 | * @dev: Device to restore. | 300 | * @dev: Device to restore. |
252 | */ | 301 | */ |
253 | int pm_generic_restore(struct device *dev) | 302 | int pm_generic_restore(struct device *dev) |
254 | { | 303 | { |
255 | return __pm_generic_call(dev, PM_EVENT_RESTORE, false); | 304 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
305 | |||
306 | return pm && pm->restore ? pm->restore(dev) : 0; | ||
256 | } | 307 | } |
257 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 308 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
258 | 309 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e2cc3d2e0ecc..b462c0e341cb 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *); | |||
47 | LIST_HEAD(dpm_list); | 47 | LIST_HEAD(dpm_list); |
48 | LIST_HEAD(dpm_prepared_list); | 48 | LIST_HEAD(dpm_prepared_list); |
49 | LIST_HEAD(dpm_suspended_list); | 49 | LIST_HEAD(dpm_suspended_list); |
50 | LIST_HEAD(dpm_late_early_list); | ||
50 | LIST_HEAD(dpm_noirq_list); | 51 | LIST_HEAD(dpm_noirq_list); |
51 | 52 | ||
52 | struct suspend_stats suspend_stats; | 53 | struct suspend_stats suspend_stats; |
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) | |||
246 | } | 247 | } |
247 | 248 | ||
248 | /** | 249 | /** |
250 | * pm_late_early_op - Return the PM operation appropriate for given PM event. | ||
251 | * @ops: PM operations to choose from. | ||
252 | * @state: PM transition of the system being carried out. | ||
253 | * | ||
254 | * Runtime PM is disabled for @dev while this function is being executed. | ||
255 | */ | ||
256 | static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, | ||
257 | pm_message_t state) | ||
258 | { | ||
259 | switch (state.event) { | ||
260 | #ifdef CONFIG_SUSPEND | ||
261 | case PM_EVENT_SUSPEND: | ||
262 | return ops->suspend_late; | ||
263 | case PM_EVENT_RESUME: | ||
264 | return ops->resume_early; | ||
265 | #endif /* CONFIG_SUSPEND */ | ||
266 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
267 | case PM_EVENT_FREEZE: | ||
268 | case PM_EVENT_QUIESCE: | ||
269 | return ops->freeze_late; | ||
270 | case PM_EVENT_HIBERNATE: | ||
271 | return ops->poweroff_late; | ||
272 | case PM_EVENT_THAW: | ||
273 | case PM_EVENT_RECOVER: | ||
274 | return ops->thaw_early; | ||
275 | case PM_EVENT_RESTORE: | ||
276 | return ops->restore_early; | ||
277 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | ||
278 | } | ||
279 | |||
280 | return NULL; | ||
281 | } | ||
282 | |||
283 | /** | ||
249 | * pm_noirq_op - Return the PM operation appropriate for given PM event. | 284 | * pm_noirq_op - Return the PM operation appropriate for given PM event. |
250 | * @ops: PM operations to choose from. | 285 | * @ops: PM operations to choose from. |
251 | * @state: PM transition of the system being carried out. | 286 | * @state: PM transition of the system being carried out. |
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
374 | TRACE_RESUME(0); | 409 | TRACE_RESUME(0); |
375 | 410 | ||
376 | if (dev->pm_domain) { | 411 | if (dev->pm_domain) { |
377 | info = "EARLY power domain "; | 412 | info = "noirq power domain "; |
378 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 413 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
379 | } else if (dev->type && dev->type->pm) { | 414 | } else if (dev->type && dev->type->pm) { |
380 | info = "EARLY type "; | 415 | info = "noirq type "; |
381 | callback = pm_noirq_op(dev->type->pm, state); | 416 | callback = pm_noirq_op(dev->type->pm, state); |
382 | } else if (dev->class && dev->class->pm) { | 417 | } else if (dev->class && dev->class->pm) { |
383 | info = "EARLY class "; | 418 | info = "noirq class "; |
384 | callback = pm_noirq_op(dev->class->pm, state); | 419 | callback = pm_noirq_op(dev->class->pm, state); |
385 | } else if (dev->bus && dev->bus->pm) { | 420 | } else if (dev->bus && dev->bus->pm) { |
386 | info = "EARLY bus "; | 421 | info = "noirq bus "; |
387 | callback = pm_noirq_op(dev->bus->pm, state); | 422 | callback = pm_noirq_op(dev->bus->pm, state); |
388 | } | 423 | } |
389 | 424 | ||
390 | if (!callback && dev->driver && dev->driver->pm) { | 425 | if (!callback && dev->driver && dev->driver->pm) { |
391 | info = "EARLY driver "; | 426 | info = "noirq driver "; |
392 | callback = pm_noirq_op(dev->driver->pm, state); | 427 | callback = pm_noirq_op(dev->driver->pm, state); |
393 | } | 428 | } |
394 | 429 | ||
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
399 | } | 434 | } |
400 | 435 | ||
401 | /** | 436 | /** |
402 | * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. | 437 | * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. |
403 | * @state: PM transition of the system being carried out. | 438 | * @state: PM transition of the system being carried out. |
404 | * | 439 | * |
405 | * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and | 440 | * Call the "noirq" resume handlers for all devices in dpm_noirq_list and |
406 | * enable device drivers to receive interrupts. | 441 | * enable device drivers to receive interrupts. |
407 | */ | 442 | */ |
408 | void dpm_resume_noirq(pm_message_t state) | 443 | static void dpm_resume_noirq(pm_message_t state) |
409 | { | 444 | { |
410 | ktime_t starttime = ktime_get(); | 445 | ktime_t starttime = ktime_get(); |
411 | 446 | ||
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state) | |||
415 | int error; | 450 | int error; |
416 | 451 | ||
417 | get_device(dev); | 452 | get_device(dev); |
418 | list_move_tail(&dev->power.entry, &dpm_suspended_list); | 453 | list_move_tail(&dev->power.entry, &dpm_late_early_list); |
419 | mutex_unlock(&dpm_list_mtx); | 454 | mutex_unlock(&dpm_list_mtx); |
420 | 455 | ||
421 | error = device_resume_noirq(dev, state); | 456 | error = device_resume_noirq(dev, state); |
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state) | |||
423 | suspend_stats.failed_resume_noirq++; | 458 | suspend_stats.failed_resume_noirq++; |
424 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); | 459 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); |
425 | dpm_save_failed_dev(dev_name(dev)); | 460 | dpm_save_failed_dev(dev_name(dev)); |
461 | pm_dev_err(dev, state, " noirq", error); | ||
462 | } | ||
463 | |||
464 | mutex_lock(&dpm_list_mtx); | ||
465 | put_device(dev); | ||
466 | } | ||
467 | mutex_unlock(&dpm_list_mtx); | ||
468 | dpm_show_time(starttime, state, "noirq"); | ||
469 | resume_device_irqs(); | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * device_resume_early - Execute an "early resume" callback for given device. | ||
474 | * @dev: Device to handle. | ||
475 | * @state: PM transition of the system being carried out. | ||
476 | * | ||
477 | * Runtime PM is disabled for @dev while this function is being executed. | ||
478 | */ | ||
479 | static int device_resume_early(struct device *dev, pm_message_t state) | ||
480 | { | ||
481 | pm_callback_t callback = NULL; | ||
482 | char *info = NULL; | ||
483 | int error = 0; | ||
484 | |||
485 | TRACE_DEVICE(dev); | ||
486 | TRACE_RESUME(0); | ||
487 | |||
488 | if (dev->pm_domain) { | ||
489 | info = "early power domain "; | ||
490 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | ||
491 | } else if (dev->type && dev->type->pm) { | ||
492 | info = "early type "; | ||
493 | callback = pm_late_early_op(dev->type->pm, state); | ||
494 | } else if (dev->class && dev->class->pm) { | ||
495 | info = "early class "; | ||
496 | callback = pm_late_early_op(dev->class->pm, state); | ||
497 | } else if (dev->bus && dev->bus->pm) { | ||
498 | info = "early bus "; | ||
499 | callback = pm_late_early_op(dev->bus->pm, state); | ||
500 | } | ||
501 | |||
502 | if (!callback && dev->driver && dev->driver->pm) { | ||
503 | info = "early driver "; | ||
504 | callback = pm_late_early_op(dev->driver->pm, state); | ||
505 | } | ||
506 | |||
507 | error = dpm_run_callback(callback, dev, state, info); | ||
508 | |||
509 | TRACE_RESUME(error); | ||
510 | return error; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * dpm_resume_early - Execute "early resume" callbacks for all devices. | ||
515 | * @state: PM transition of the system being carried out. | ||
516 | */ | ||
517 | static void dpm_resume_early(pm_message_t state) | ||
518 | { | ||
519 | ktime_t starttime = ktime_get(); | ||
520 | |||
521 | mutex_lock(&dpm_list_mtx); | ||
522 | while (!list_empty(&dpm_late_early_list)) { | ||
523 | struct device *dev = to_device(dpm_late_early_list.next); | ||
524 | int error; | ||
525 | |||
526 | get_device(dev); | ||
527 | list_move_tail(&dev->power.entry, &dpm_suspended_list); | ||
528 | mutex_unlock(&dpm_list_mtx); | ||
529 | |||
530 | error = device_resume_early(dev, state); | ||
531 | if (error) { | ||
532 | suspend_stats.failed_resume_early++; | ||
533 | dpm_save_failed_step(SUSPEND_RESUME_EARLY); | ||
534 | dpm_save_failed_dev(dev_name(dev)); | ||
426 | pm_dev_err(dev, state, " early", error); | 535 | pm_dev_err(dev, state, " early", error); |
427 | } | 536 | } |
428 | 537 | ||
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state) | |||
431 | } | 540 | } |
432 | mutex_unlock(&dpm_list_mtx); | 541 | mutex_unlock(&dpm_list_mtx); |
433 | dpm_show_time(starttime, state, "early"); | 542 | dpm_show_time(starttime, state, "early"); |
434 | resume_device_irqs(); | ||
435 | } | 543 | } |
436 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 544 | |
545 | /** | ||
546 | * dpm_resume_start - Execute "noirq" and "early" device callbacks. | ||
547 | * @state: PM transition of the system being carried out. | ||
548 | */ | ||
549 | void dpm_resume_start(pm_message_t state) | ||
550 | { | ||
551 | dpm_resume_noirq(state); | ||
552 | dpm_resume_early(state); | ||
553 | } | ||
554 | EXPORT_SYMBOL_GPL(dpm_resume_start); | ||
437 | 555 | ||
438 | /** | 556 | /** |
439 | * device_resume - Execute "resume" callbacks for given device. | 557 | * device_resume - Execute "resume" callbacks for given device. |
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
716 | char *info = NULL; | 834 | char *info = NULL; |
717 | 835 | ||
718 | if (dev->pm_domain) { | 836 | if (dev->pm_domain) { |
719 | info = "LATE power domain "; | 837 | info = "noirq power domain "; |
720 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 838 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
721 | } else if (dev->type && dev->type->pm) { | 839 | } else if (dev->type && dev->type->pm) { |
722 | info = "LATE type "; | 840 | info = "noirq type "; |
723 | callback = pm_noirq_op(dev->type->pm, state); | 841 | callback = pm_noirq_op(dev->type->pm, state); |
724 | } else if (dev->class && dev->class->pm) { | 842 | } else if (dev->class && dev->class->pm) { |
725 | info = "LATE class "; | 843 | info = "noirq class "; |
726 | callback = pm_noirq_op(dev->class->pm, state); | 844 | callback = pm_noirq_op(dev->class->pm, state); |
727 | } else if (dev->bus && dev->bus->pm) { | 845 | } else if (dev->bus && dev->bus->pm) { |
728 | info = "LATE bus "; | 846 | info = "noirq bus "; |
729 | callback = pm_noirq_op(dev->bus->pm, state); | 847 | callback = pm_noirq_op(dev->bus->pm, state); |
730 | } | 848 | } |
731 | 849 | ||
732 | if (!callback && dev->driver && dev->driver->pm) { | 850 | if (!callback && dev->driver && dev->driver->pm) { |
733 | info = "LATE driver "; | 851 | info = "noirq driver "; |
734 | callback = pm_noirq_op(dev->driver->pm, state); | 852 | callback = pm_noirq_op(dev->driver->pm, state); |
735 | } | 853 | } |
736 | 854 | ||
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
738 | } | 856 | } |
739 | 857 | ||
740 | /** | 858 | /** |
741 | * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. | 859 | * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. |
742 | * @state: PM transition of the system being carried out. | 860 | * @state: PM transition of the system being carried out. |
743 | * | 861 | * |
744 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend | 862 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend |
745 | * handlers for all non-sysdev devices. | 863 | * handlers for all non-sysdev devices. |
746 | */ | 864 | */ |
747 | int dpm_suspend_noirq(pm_message_t state) | 865 | static int dpm_suspend_noirq(pm_message_t state) |
748 | { | 866 | { |
749 | ktime_t starttime = ktime_get(); | 867 | ktime_t starttime = ktime_get(); |
750 | int error = 0; | 868 | int error = 0; |
751 | 869 | ||
752 | suspend_device_irqs(); | 870 | suspend_device_irqs(); |
753 | mutex_lock(&dpm_list_mtx); | 871 | mutex_lock(&dpm_list_mtx); |
754 | while (!list_empty(&dpm_suspended_list)) { | 872 | while (!list_empty(&dpm_late_early_list)) { |
755 | struct device *dev = to_device(dpm_suspended_list.prev); | 873 | struct device *dev = to_device(dpm_late_early_list.prev); |
756 | 874 | ||
757 | get_device(dev); | 875 | get_device(dev); |
758 | mutex_unlock(&dpm_list_mtx); | 876 | mutex_unlock(&dpm_list_mtx); |
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state) | |||
761 | 879 | ||
762 | mutex_lock(&dpm_list_mtx); | 880 | mutex_lock(&dpm_list_mtx); |
763 | if (error) { | 881 | if (error) { |
764 | pm_dev_err(dev, state, " late", error); | 882 | pm_dev_err(dev, state, " noirq", error); |
765 | suspend_stats.failed_suspend_noirq++; | 883 | suspend_stats.failed_suspend_noirq++; |
766 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); | 884 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); |
767 | dpm_save_failed_dev(dev_name(dev)); | 885 | dpm_save_failed_dev(dev_name(dev)); |
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state) | |||
776 | if (error) | 894 | if (error) |
777 | dpm_resume_noirq(resume_event(state)); | 895 | dpm_resume_noirq(resume_event(state)); |
778 | else | 896 | else |
897 | dpm_show_time(starttime, state, "noirq"); | ||
898 | return error; | ||
899 | } | ||
900 | |||
901 | /** | ||
902 | * device_suspend_late - Execute a "late suspend" callback for given device. | ||
903 | * @dev: Device to handle. | ||
904 | * @state: PM transition of the system being carried out. | ||
905 | * | ||
906 | * Runtime PM is disabled for @dev while this function is being executed. | ||
907 | */ | ||
908 | static int device_suspend_late(struct device *dev, pm_message_t state) | ||
909 | { | ||
910 | pm_callback_t callback = NULL; | ||
911 | char *info = NULL; | ||
912 | |||
913 | if (dev->pm_domain) { | ||
914 | info = "late power domain "; | ||
915 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | ||
916 | } else if (dev->type && dev->type->pm) { | ||
917 | info = "late type "; | ||
918 | callback = pm_late_early_op(dev->type->pm, state); | ||
919 | } else if (dev->class && dev->class->pm) { | ||
920 | info = "late class "; | ||
921 | callback = pm_late_early_op(dev->class->pm, state); | ||
922 | } else if (dev->bus && dev->bus->pm) { | ||
923 | info = "late bus "; | ||
924 | callback = pm_late_early_op(dev->bus->pm, state); | ||
925 | } | ||
926 | |||
927 | if (!callback && dev->driver && dev->driver->pm) { | ||
928 | info = "late driver "; | ||
929 | callback = pm_late_early_op(dev->driver->pm, state); | ||
930 | } | ||
931 | |||
932 | return dpm_run_callback(callback, dev, state, info); | ||
933 | } | ||
934 | |||
935 | /** | ||
936 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. | ||
937 | * @state: PM transition of the system being carried out. | ||
938 | */ | ||
939 | static int dpm_suspend_late(pm_message_t state) | ||
940 | { | ||
941 | ktime_t starttime = ktime_get(); | ||
942 | int error = 0; | ||
943 | |||
944 | mutex_lock(&dpm_list_mtx); | ||
945 | while (!list_empty(&dpm_suspended_list)) { | ||
946 | struct device *dev = to_device(dpm_suspended_list.prev); | ||
947 | |||
948 | get_device(dev); | ||
949 | mutex_unlock(&dpm_list_mtx); | ||
950 | |||
951 | error = device_suspend_late(dev, state); | ||
952 | |||
953 | mutex_lock(&dpm_list_mtx); | ||
954 | if (error) { | ||
955 | pm_dev_err(dev, state, " late", error); | ||
956 | suspend_stats.failed_suspend_late++; | ||
957 | dpm_save_failed_step(SUSPEND_SUSPEND_LATE); | ||
958 | dpm_save_failed_dev(dev_name(dev)); | ||
959 | put_device(dev); | ||
960 | break; | ||
961 | } | ||
962 | if (!list_empty(&dev->power.entry)) | ||
963 | list_move(&dev->power.entry, &dpm_late_early_list); | ||
964 | put_device(dev); | ||
965 | } | ||
966 | mutex_unlock(&dpm_list_mtx); | ||
967 | if (error) | ||
968 | dpm_resume_early(resume_event(state)); | ||
969 | else | ||
779 | dpm_show_time(starttime, state, "late"); | 970 | dpm_show_time(starttime, state, "late"); |
971 | |||
780 | return error; | 972 | return error; |
781 | } | 973 | } |
782 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 974 | |
975 | /** | ||
976 | * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. | ||
977 | * @state: PM transition of the system being carried out. | ||
978 | */ | ||
979 | int dpm_suspend_end(pm_message_t state) | ||
980 | { | ||
981 | int error = dpm_suspend_late(state); | ||
982 | |||
983 | return error ? : dpm_suspend_noirq(state); | ||
984 | } | ||
985 | EXPORT_SYMBOL_GPL(dpm_suspend_end); | ||
783 | 986 | ||
784 | /** | 987 | /** |
785 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | 988 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 9bf62323aaf3..eeb4bff9505c 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev); | |||
71 | extern void rpm_sysfs_remove(struct device *dev); | 71 | extern void rpm_sysfs_remove(struct device *dev); |
72 | extern int wakeup_sysfs_add(struct device *dev); | 72 | extern int wakeup_sysfs_add(struct device *dev); |
73 | extern void wakeup_sysfs_remove(struct device *dev); | 73 | extern void wakeup_sysfs_remove(struct device *dev); |
74 | extern int pm_qos_sysfs_add(struct device *dev); | ||
75 | extern void pm_qos_sysfs_remove(struct device *dev); | ||
74 | 76 | ||
75 | #else /* CONFIG_PM */ | 77 | #else /* CONFIG_PM */ |
76 | 78 | ||
@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {} | |||
79 | static inline void rpm_sysfs_remove(struct device *dev) {} | 81 | static inline void rpm_sysfs_remove(struct device *dev) {} |
80 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } | 82 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } |
81 | static inline void wakeup_sysfs_remove(struct device *dev) {} | 83 | static inline void wakeup_sysfs_remove(struct device *dev) {} |
84 | static inline int pm_qos_sysfs_add(struct device *dev) { return 0; } | ||
85 | static inline void pm_qos_sysfs_remove(struct device *dev) {} | ||
82 | 86 | ||
83 | #endif | 87 | #endif |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index c5d358837461..71855570922d 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/mutex.h> | 41 | #include <linux/mutex.h> |
42 | #include <linux/export.h> | 42 | #include <linux/export.h> |
43 | 43 | ||
44 | #include "power.h" | ||
44 | 45 | ||
45 | static DEFINE_MUTEX(dev_pm_qos_mtx); | 46 | static DEFINE_MUTEX(dev_pm_qos_mtx); |
46 | 47 | ||
@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
166 | struct dev_pm_qos_request *req, *tmp; | 167 | struct dev_pm_qos_request *req, *tmp; |
167 | struct pm_qos_constraints *c; | 168 | struct pm_qos_constraints *c; |
168 | 169 | ||
170 | /* | ||
171 | * If the device's PM QoS resume latency limit has been exposed to user | ||
172 | * space, it has to be hidden at this point. | ||
173 | */ | ||
174 | dev_pm_qos_hide_latency_limit(dev); | ||
175 | |||
169 | mutex_lock(&dev_pm_qos_mtx); | 176 | mutex_lock(&dev_pm_qos_mtx); |
170 | 177 | ||
171 | dev->power.power_state = PMSG_INVALID; | 178 | dev->power.power_state = PMSG_INVALID; |
@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
445 | return error; | 452 | return error; |
446 | } | 453 | } |
447 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); | 454 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); |
455 | |||
456 | #ifdef CONFIG_PM_RUNTIME | ||
457 | static void __dev_pm_qos_drop_user_request(struct device *dev) | ||
458 | { | ||
459 | dev_pm_qos_remove_request(dev->power.pq_req); | ||
460 | dev->power.pq_req = 0; | ||
461 | } | ||
462 | |||
463 | /** | ||
464 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. | ||
465 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. | ||
466 | * @value: Initial value of the latency limit. | ||
467 | */ | ||
468 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | ||
469 | { | ||
470 | struct dev_pm_qos_request *req; | ||
471 | int ret; | ||
472 | |||
473 | if (!device_is_registered(dev) || value < 0) | ||
474 | return -EINVAL; | ||
475 | |||
476 | if (dev->power.pq_req) | ||
477 | return -EEXIST; | ||
478 | |||
479 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
480 | if (!req) | ||
481 | return -ENOMEM; | ||
482 | |||
483 | ret = dev_pm_qos_add_request(dev, req, value); | ||
484 | if (ret < 0) | ||
485 | return ret; | ||
486 | |||
487 | dev->power.pq_req = req; | ||
488 | ret = pm_qos_sysfs_add(dev); | ||
489 | if (ret) | ||
490 | __dev_pm_qos_drop_user_request(dev); | ||
491 | |||
492 | return ret; | ||
493 | } | ||
494 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | ||
495 | |||
496 | /** | ||
497 | * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. | ||
498 | * @dev: Device whose PM QoS latency limit is to be hidden from user space. | ||
499 | */ | ||
500 | void dev_pm_qos_hide_latency_limit(struct device *dev) | ||
501 | { | ||
502 | if (dev->power.pq_req) { | ||
503 | pm_qos_sysfs_remove(dev); | ||
504 | __dev_pm_qos_drop_user_request(dev); | ||
505 | } | ||
506 | } | ||
507 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | ||
508 | #endif /* CONFIG_PM_RUNTIME */ | ||
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index adf41be0ea66..95c12f6cb5b9 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
7 | #include <linux/export.h> | 7 | #include <linux/export.h> |
8 | #include <linux/pm_qos.h> | ||
8 | #include <linux/pm_runtime.h> | 9 | #include <linux/pm_runtime.h> |
9 | #include <linux/atomic.h> | 10 | #include <linux/atomic.h> |
10 | #include <linux/jiffies.h> | 11 | #include <linux/jiffies.h> |
@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, | |||
217 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | 218 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, |
218 | autosuspend_delay_ms_store); | 219 | autosuspend_delay_ms_store); |
219 | 220 | ||
221 | static ssize_t pm_qos_latency_show(struct device *dev, | ||
222 | struct device_attribute *attr, char *buf) | ||
223 | { | ||
224 | return sprintf(buf, "%d\n", dev->power.pq_req->node.prio); | ||
225 | } | ||
226 | |||
227 | static ssize_t pm_qos_latency_store(struct device *dev, | ||
228 | struct device_attribute *attr, | ||
229 | const char *buf, size_t n) | ||
230 | { | ||
231 | s32 value; | ||
232 | int ret; | ||
233 | |||
234 | if (kstrtos32(buf, 0, &value)) | ||
235 | return -EINVAL; | ||
236 | |||
237 | if (value < 0) | ||
238 | return -EINVAL; | ||
239 | |||
240 | ret = dev_pm_qos_update_request(dev->power.pq_req, value); | ||
241 | return ret < 0 ? ret : n; | ||
242 | } | ||
243 | |||
244 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, | ||
245 | pm_qos_latency_show, pm_qos_latency_store); | ||
220 | #endif /* CONFIG_PM_RUNTIME */ | 246 | #endif /* CONFIG_PM_RUNTIME */ |
221 | 247 | ||
222 | #ifdef CONFIG_PM_SLEEP | 248 | #ifdef CONFIG_PM_SLEEP |
@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = { | |||
490 | .attrs = runtime_attrs, | 516 | .attrs = runtime_attrs, |
491 | }; | 517 | }; |
492 | 518 | ||
519 | static struct attribute *pm_qos_attrs[] = { | ||
520 | #ifdef CONFIG_PM_RUNTIME | ||
521 | &dev_attr_pm_qos_resume_latency_us.attr, | ||
522 | #endif /* CONFIG_PM_RUNTIME */ | ||
523 | NULL, | ||
524 | }; | ||
525 | static struct attribute_group pm_qos_attr_group = { | ||
526 | .name = power_group_name, | ||
527 | .attrs = pm_qos_attrs, | ||
528 | }; | ||
529 | |||
493 | int dpm_sysfs_add(struct device *dev) | 530 | int dpm_sysfs_add(struct device *dev) |
494 | { | 531 | { |
495 | int rc; | 532 | int rc; |
@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev) | |||
530 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | 567 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); |
531 | } | 568 | } |
532 | 569 | ||
570 | int pm_qos_sysfs_add(struct device *dev) | ||
571 | { | ||
572 | return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group); | ||
573 | } | ||
574 | |||
575 | void pm_qos_sysfs_remove(struct device *dev) | ||
576 | { | ||
577 | sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group); | ||
578 | } | ||
579 | |||
533 | void rpm_sysfs_remove(struct device *dev) | 580 | void rpm_sysfs_remove(struct device *dev) |
534 | { | 581 | { |
535 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); | 582 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index caf995fb774b..2a3e581b8dcd 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data); | |||
53 | static LIST_HEAD(wakeup_sources); | 53 | static LIST_HEAD(wakeup_sources); |
54 | 54 | ||
55 | /** | 55 | /** |
56 | * wakeup_source_prepare - Prepare a new wakeup source for initialization. | ||
57 | * @ws: Wakeup source to prepare. | ||
58 | * @name: Pointer to the name of the new wakeup source. | ||
59 | * | ||
60 | * Callers must ensure that the @name string won't be freed when @ws is still in | ||
61 | * use. | ||
62 | */ | ||
63 | void wakeup_source_prepare(struct wakeup_source *ws, const char *name) | ||
64 | { | ||
65 | if (ws) { | ||
66 | memset(ws, 0, sizeof(*ws)); | ||
67 | ws->name = name; | ||
68 | } | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(wakeup_source_prepare); | ||
71 | |||
72 | /** | ||
56 | * wakeup_source_create - Create a struct wakeup_source object. | 73 | * wakeup_source_create - Create a struct wakeup_source object. |
57 | * @name: Name of the new wakeup source. | 74 | * @name: Name of the new wakeup source. |
58 | */ | 75 | */ |
@@ -60,37 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name) | |||
60 | { | 77 | { |
61 | struct wakeup_source *ws; | 78 | struct wakeup_source *ws; |
62 | 79 | ||
63 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | 80 | ws = kmalloc(sizeof(*ws), GFP_KERNEL); |
64 | if (!ws) | 81 | if (!ws) |
65 | return NULL; | 82 | return NULL; |
66 | 83 | ||
67 | spin_lock_init(&ws->lock); | 84 | wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); |
68 | if (name) | ||
69 | ws->name = kstrdup(name, GFP_KERNEL); | ||
70 | |||
71 | return ws; | 85 | return ws; |
72 | } | 86 | } |
73 | EXPORT_SYMBOL_GPL(wakeup_source_create); | 87 | EXPORT_SYMBOL_GPL(wakeup_source_create); |
74 | 88 | ||
75 | /** | 89 | /** |
90 | * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. | ||
91 | * @ws: Wakeup source to prepare for destruction. | ||
92 | * | ||
93 | * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never | ||
94 | * be run in parallel with this function for the same wakeup source object. | ||
95 | */ | ||
96 | void wakeup_source_drop(struct wakeup_source *ws) | ||
97 | { | ||
98 | if (!ws) | ||
99 | return; | ||
100 | |||
101 | del_timer_sync(&ws->timer); | ||
102 | __pm_relax(ws); | ||
103 | } | ||
104 | EXPORT_SYMBOL_GPL(wakeup_source_drop); | ||
105 | |||
106 | /** | ||
76 | * wakeup_source_destroy - Destroy a struct wakeup_source object. | 107 | * wakeup_source_destroy - Destroy a struct wakeup_source object. |
77 | * @ws: Wakeup source to destroy. | 108 | * @ws: Wakeup source to destroy. |
109 | * | ||
110 | * Use only for wakeup source objects created with wakeup_source_create(). | ||
78 | */ | 111 | */ |
79 | void wakeup_source_destroy(struct wakeup_source *ws) | 112 | void wakeup_source_destroy(struct wakeup_source *ws) |
80 | { | 113 | { |
81 | if (!ws) | 114 | if (!ws) |
82 | return; | 115 | return; |
83 | 116 | ||
84 | spin_lock_irq(&ws->lock); | 117 | wakeup_source_drop(ws); |
85 | while (ws->active) { | ||
86 | spin_unlock_irq(&ws->lock); | ||
87 | |||
88 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
89 | |||
90 | spin_lock_irq(&ws->lock); | ||
91 | } | ||
92 | spin_unlock_irq(&ws->lock); | ||
93 | |||
94 | kfree(ws->name); | 118 | kfree(ws->name); |
95 | kfree(ws); | 119 | kfree(ws); |
96 | } | 120 | } |
@@ -105,6 +129,7 @@ void wakeup_source_add(struct wakeup_source *ws) | |||
105 | if (WARN_ON(!ws)) | 129 | if (WARN_ON(!ws)) |
106 | return; | 130 | return; |
107 | 131 | ||
132 | spin_lock_init(&ws->lock); | ||
108 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); | 133 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); |
109 | ws->active = false; | 134 | ws->active = false; |
110 | 135 | ||
@@ -152,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register); | |||
152 | */ | 177 | */ |
153 | void wakeup_source_unregister(struct wakeup_source *ws) | 178 | void wakeup_source_unregister(struct wakeup_source *ws) |
154 | { | 179 | { |
155 | wakeup_source_remove(ws); | 180 | if (ws) { |
156 | wakeup_source_destroy(ws); | 181 | wakeup_source_remove(ws); |
182 | wakeup_source_destroy(ws); | ||
183 | } | ||
157 | } | 184 | } |
158 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); | 185 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); |
159 | 186 | ||
@@ -349,7 +376,6 @@ static void wakeup_source_activate(struct wakeup_source *ws) | |||
349 | { | 376 | { |
350 | ws->active = true; | 377 | ws->active = true; |
351 | ws->active_count++; | 378 | ws->active_count++; |
352 | ws->timer_expires = jiffies; | ||
353 | ws->last_time = ktime_get(); | 379 | ws->last_time = ktime_get(); |
354 | 380 | ||
355 | /* Increment the counter of events in progress. */ | 381 | /* Increment the counter of events in progress. */ |
@@ -370,9 +396,14 @@ void __pm_stay_awake(struct wakeup_source *ws) | |||
370 | return; | 396 | return; |
371 | 397 | ||
372 | spin_lock_irqsave(&ws->lock, flags); | 398 | spin_lock_irqsave(&ws->lock, flags); |
399 | |||
373 | ws->event_count++; | 400 | ws->event_count++; |
374 | if (!ws->active) | 401 | if (!ws->active) |
375 | wakeup_source_activate(ws); | 402 | wakeup_source_activate(ws); |
403 | |||
404 | del_timer(&ws->timer); | ||
405 | ws->timer_expires = 0; | ||
406 | |||
376 | spin_unlock_irqrestore(&ws->lock, flags); | 407 | spin_unlock_irqrestore(&ws->lock, flags); |
377 | } | 408 | } |
378 | EXPORT_SYMBOL_GPL(__pm_stay_awake); | 409 | EXPORT_SYMBOL_GPL(__pm_stay_awake); |
@@ -438,6 +469,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) | |||
438 | ws->max_time = duration; | 469 | ws->max_time = duration; |
439 | 470 | ||
440 | del_timer(&ws->timer); | 471 | del_timer(&ws->timer); |
472 | ws->timer_expires = 0; | ||
441 | 473 | ||
442 | /* | 474 | /* |
443 | * Increment the counter of registered wakeup events and decrement the | 475 | * Increment the counter of registered wakeup events and decrement the |
@@ -492,11 +524,22 @@ EXPORT_SYMBOL_GPL(pm_relax); | |||
492 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | 524 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. |
493 | * @data: Address of the wakeup source object associated with the event source. | 525 | * @data: Address of the wakeup source object associated with the event source. |
494 | * | 526 | * |
495 | * Call __pm_relax() for the wakeup source whose address is stored in @data. | 527 | * Call wakeup_source_deactivate() for the wakeup source whose address is stored |
528 | * in @data if it is currently active and its timer has not been canceled and | ||
529 | * the expiration time of the timer is not in future. | ||
496 | */ | 530 | */ |
497 | static void pm_wakeup_timer_fn(unsigned long data) | 531 | static void pm_wakeup_timer_fn(unsigned long data) |
498 | { | 532 | { |
499 | __pm_relax((struct wakeup_source *)data); | 533 | struct wakeup_source *ws = (struct wakeup_source *)data; |
534 | unsigned long flags; | ||
535 | |||
536 | spin_lock_irqsave(&ws->lock, flags); | ||
537 | |||
538 | if (ws->active && ws->timer_expires | ||
539 | && time_after_eq(jiffies, ws->timer_expires)) | ||
540 | wakeup_source_deactivate(ws); | ||
541 | |||
542 | spin_unlock_irqrestore(&ws->lock, flags); | ||
500 | } | 543 | } |
501 | 544 | ||
502 | /** | 545 | /** |
@@ -534,7 +577,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | |||
534 | if (!expires) | 577 | if (!expires) |
535 | expires = 1; | 578 | expires = 1; |
536 | 579 | ||
537 | if (time_after(expires, ws->timer_expires)) { | 580 | if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { |
538 | mod_timer(&ws->timer, expires); | 581 | mod_timer(&ws->timer, expires); |
539 | ws->timer_expires = expires; | 582 | ws->timer_expires = expires; |
540 | } | 583 | } |
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index ca09bc421ddb..32fe9ef5cc5c 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/sh_timer.h> | 32 | #include <linux/sh_timer.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/pm_domain.h> | ||
35 | 36 | ||
36 | struct sh_cmt_priv { | 37 | struct sh_cmt_priv { |
37 | void __iomem *mapbase; | 38 | void __iomem *mapbase; |
@@ -689,6 +690,9 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) | |||
689 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); | 690 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); |
690 | int ret; | 691 | int ret; |
691 | 692 | ||
693 | if (!is_early_platform_device(pdev)) | ||
694 | pm_genpd_dev_always_on(&pdev->dev, true); | ||
695 | |||
692 | if (p) { | 696 | if (p) { |
693 | dev_info(&pdev->dev, "kept as earlytimer\n"); | 697 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
694 | return 0; | 698 | return 0; |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index db8d5955bad4..a2172f690418 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/sh_timer.h> | 31 | #include <linux/sh_timer.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/pm_domain.h> | ||
34 | 35 | ||
35 | struct sh_mtu2_priv { | 36 | struct sh_mtu2_priv { |
36 | void __iomem *mapbase; | 37 | void __iomem *mapbase; |
@@ -306,6 +307,9 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) | |||
306 | struct sh_mtu2_priv *p = platform_get_drvdata(pdev); | 307 | struct sh_mtu2_priv *p = platform_get_drvdata(pdev); |
307 | int ret; | 308 | int ret; |
308 | 309 | ||
310 | if (!is_early_platform_device(pdev)) | ||
311 | pm_genpd_dev_always_on(&pdev->dev, true); | ||
312 | |||
309 | if (p) { | 313 | if (p) { |
310 | dev_info(&pdev->dev, "kept as earlytimer\n"); | 314 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
311 | return 0; | 315 | return 0; |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 079e96ad44e8..97f54b634be4 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/sh_timer.h> | 32 | #include <linux/sh_timer.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/pm_domain.h> | ||
35 | 36 | ||
36 | struct sh_tmu_priv { | 37 | struct sh_tmu_priv { |
37 | void __iomem *mapbase; | 38 | void __iomem *mapbase; |
@@ -410,6 +411,9 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) | |||
410 | struct sh_tmu_priv *p = platform_get_drvdata(pdev); | 411 | struct sh_tmu_priv *p = platform_get_drvdata(pdev); |
411 | int ret; | 412 | int ret; |
412 | 413 | ||
414 | if (!is_early_platform_device(pdev)) | ||
415 | pm_genpd_dev_always_on(&pdev->dev, true); | ||
416 | |||
413 | if (p) { | 417 | if (p) { |
414 | dev_info(&pdev->dev, "kept as earlytimer\n"); | 418 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
415 | return 0; | 419 | return 0; |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index c189b82f5ece..70c31d43fff3 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -83,6 +83,7 @@ int update_devfreq(struct devfreq *devfreq) | |||
83 | { | 83 | { |
84 | unsigned long freq; | 84 | unsigned long freq; |
85 | int err = 0; | 85 | int err = 0; |
86 | u32 flags = 0; | ||
86 | 87 | ||
87 | if (!mutex_is_locked(&devfreq->lock)) { | 88 | if (!mutex_is_locked(&devfreq->lock)) { |
88 | WARN(true, "devfreq->lock must be locked by the caller.\n"); | 89 | WARN(true, "devfreq->lock must be locked by the caller.\n"); |
@@ -94,7 +95,24 @@ int update_devfreq(struct devfreq *devfreq) | |||
94 | if (err) | 95 | if (err) |
95 | return err; | 96 | return err; |
96 | 97 | ||
97 | err = devfreq->profile->target(devfreq->dev.parent, &freq); | 98 | /* |
99 | * Adjust the freuqency with user freq and QoS. | ||
100 | * | ||
101 | * List from the highest proiority | ||
102 | * max_freq (probably called by thermal when it's too hot) | ||
103 | * min_freq | ||
104 | */ | ||
105 | |||
106 | if (devfreq->min_freq && freq < devfreq->min_freq) { | ||
107 | freq = devfreq->min_freq; | ||
108 | flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ | ||
109 | } | ||
110 | if (devfreq->max_freq && freq > devfreq->max_freq) { | ||
111 | freq = devfreq->max_freq; | ||
112 | flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ | ||
113 | } | ||
114 | |||
115 | err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); | ||
98 | if (err) | 116 | if (err) |
99 | return err; | 117 | return err; |
100 | 118 | ||
@@ -501,12 +519,82 @@ static ssize_t show_central_polling(struct device *dev, | |||
501 | !to_devfreq(dev)->governor->no_central_polling); | 519 | !to_devfreq(dev)->governor->no_central_polling); |
502 | } | 520 | } |
503 | 521 | ||
522 | static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | ||
523 | const char *buf, size_t count) | ||
524 | { | ||
525 | struct devfreq *df = to_devfreq(dev); | ||
526 | unsigned long value; | ||
527 | int ret; | ||
528 | unsigned long max; | ||
529 | |||
530 | ret = sscanf(buf, "%lu", &value); | ||
531 | if (ret != 1) | ||
532 | goto out; | ||
533 | |||
534 | mutex_lock(&df->lock); | ||
535 | max = df->max_freq; | ||
536 | if (value && max && value > max) { | ||
537 | ret = -EINVAL; | ||
538 | goto unlock; | ||
539 | } | ||
540 | |||
541 | df->min_freq = value; | ||
542 | update_devfreq(df); | ||
543 | ret = count; | ||
544 | unlock: | ||
545 | mutex_unlock(&df->lock); | ||
546 | out: | ||
547 | return ret; | ||
548 | } | ||
549 | |||
550 | static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr, | ||
551 | char *buf) | ||
552 | { | ||
553 | return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq); | ||
554 | } | ||
555 | |||
556 | static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, | ||
557 | const char *buf, size_t count) | ||
558 | { | ||
559 | struct devfreq *df = to_devfreq(dev); | ||
560 | unsigned long value; | ||
561 | int ret; | ||
562 | unsigned long min; | ||
563 | |||
564 | ret = sscanf(buf, "%lu", &value); | ||
565 | if (ret != 1) | ||
566 | goto out; | ||
567 | |||
568 | mutex_lock(&df->lock); | ||
569 | min = df->min_freq; | ||
570 | if (value && min && value < min) { | ||
571 | ret = -EINVAL; | ||
572 | goto unlock; | ||
573 | } | ||
574 | |||
575 | df->max_freq = value; | ||
576 | update_devfreq(df); | ||
577 | ret = count; | ||
578 | unlock: | ||
579 | mutex_unlock(&df->lock); | ||
580 | out: | ||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr, | ||
585 | char *buf) | ||
586 | { | ||
587 | return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); | ||
588 | } | ||
589 | |||
504 | static struct device_attribute devfreq_attrs[] = { | 590 | static struct device_attribute devfreq_attrs[] = { |
505 | __ATTR(governor, S_IRUGO, show_governor, NULL), | 591 | __ATTR(governor, S_IRUGO, show_governor, NULL), |
506 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), | 592 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), |
507 | __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), | 593 | __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), |
508 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, | 594 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, |
509 | store_polling_interval), | 595 | store_polling_interval), |
596 | __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), | ||
597 | __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), | ||
510 | { }, | 598 | { }, |
511 | }; | 599 | }; |
512 | 600 | ||
@@ -555,14 +643,30 @@ module_exit(devfreq_exit); | |||
555 | * freq value given to target callback. | 643 | * freq value given to target callback. |
556 | * @dev The devfreq user device. (parent of devfreq) | 644 | * @dev The devfreq user device. (parent of devfreq) |
557 | * @freq The frequency given to target function | 645 | * @freq The frequency given to target function |
646 | * @flags Flags handed from devfreq framework. | ||
558 | * | 647 | * |
559 | */ | 648 | */ |
560 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq) | 649 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, |
650 | u32 flags) | ||
561 | { | 651 | { |
562 | struct opp *opp = opp_find_freq_ceil(dev, freq); | 652 | struct opp *opp; |
563 | 653 | ||
564 | if (opp == ERR_PTR(-ENODEV)) | 654 | if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { |
655 | /* The freq is an upper bound. opp should be lower */ | ||
565 | opp = opp_find_freq_floor(dev, freq); | 656 | opp = opp_find_freq_floor(dev, freq); |
657 | |||
658 | /* If not available, use the closest opp */ | ||
659 | if (opp == ERR_PTR(-ENODEV)) | ||
660 | opp = opp_find_freq_ceil(dev, freq); | ||
661 | } else { | ||
662 | /* The freq is an lower bound. opp should be higher */ | ||
663 | opp = opp_find_freq_ceil(dev, freq); | ||
664 | |||
665 | /* If not available, use the closest opp */ | ||
666 | if (opp == ERR_PTR(-ENODEV)) | ||
667 | opp = opp_find_freq_floor(dev, freq); | ||
668 | } | ||
669 | |||
566 | return opp; | 670 | return opp; |
567 | } | 671 | } |
568 | 672 | ||
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c index 6460577d6701..1a361e99965a 100644 --- a/drivers/devfreq/exynos4_bus.c +++ b/drivers/devfreq/exynos4_bus.c | |||
@@ -619,15 +619,19 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp, | |||
619 | return err; | 619 | return err; |
620 | } | 620 | } |
621 | 621 | ||
622 | static int exynos4_bus_target(struct device *dev, unsigned long *_freq) | 622 | static int exynos4_bus_target(struct device *dev, unsigned long *_freq, |
623 | u32 flags) | ||
623 | { | 624 | { |
624 | int err = 0; | 625 | int err = 0; |
625 | struct platform_device *pdev = container_of(dev, struct platform_device, | 626 | struct platform_device *pdev = container_of(dev, struct platform_device, |
626 | dev); | 627 | dev); |
627 | struct busfreq_data *data = platform_get_drvdata(pdev); | 628 | struct busfreq_data *data = platform_get_drvdata(pdev); |
628 | struct opp *opp = devfreq_recommended_opp(dev, _freq); | 629 | struct opp *opp = devfreq_recommended_opp(dev, _freq, flags); |
629 | unsigned long old_freq = opp_get_freq(data->curr_opp); | ||
630 | unsigned long freq = opp_get_freq(opp); | 630 | unsigned long freq = opp_get_freq(opp); |
631 | unsigned long old_freq = opp_get_freq(data->curr_opp); | ||
632 | |||
633 | if (IS_ERR(opp)) | ||
634 | return PTR_ERR(opp); | ||
631 | 635 | ||
632 | if (old_freq == freq) | 636 | if (old_freq == freq) |
633 | return 0; | 637 | return 0; |
@@ -689,9 +693,7 @@ static int exynos4_get_busier_dmc(struct busfreq_data *data) | |||
689 | static int exynos4_bus_get_dev_status(struct device *dev, | 693 | static int exynos4_bus_get_dev_status(struct device *dev, |
690 | struct devfreq_dev_status *stat) | 694 | struct devfreq_dev_status *stat) |
691 | { | 695 | { |
692 | struct platform_device *pdev = container_of(dev, struct platform_device, | 696 | struct busfreq_data *data = dev_get_drvdata(dev); |
693 | dev); | ||
694 | struct busfreq_data *data = platform_get_drvdata(pdev); | ||
695 | int busier_dmc; | 697 | int busier_dmc; |
696 | int cycles_x2 = 2; /* 2 x cycles */ | 698 | int cycles_x2 = 2; /* 2 x cycles */ |
697 | void __iomem *addr; | 699 | void __iomem *addr; |
@@ -739,9 +741,7 @@ static int exynos4_bus_get_dev_status(struct device *dev, | |||
739 | 741 | ||
740 | static void exynos4_bus_exit(struct device *dev) | 742 | static void exynos4_bus_exit(struct device *dev) |
741 | { | 743 | { |
742 | struct platform_device *pdev = container_of(dev, struct platform_device, | 744 | struct busfreq_data *data = dev_get_drvdata(dev); |
743 | dev); | ||
744 | struct busfreq_data *data = platform_get_drvdata(pdev); | ||
745 | 745 | ||
746 | devfreq_unregister_opp_notifier(dev, data->devfreq); | 746 | devfreq_unregister_opp_notifier(dev, data->devfreq); |
747 | } | 747 | } |
@@ -1087,9 +1087,7 @@ static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) | |||
1087 | 1087 | ||
1088 | static int exynos4_busfreq_resume(struct device *dev) | 1088 | static int exynos4_busfreq_resume(struct device *dev) |
1089 | { | 1089 | { |
1090 | struct platform_device *pdev = container_of(dev, struct platform_device, | 1090 | struct busfreq_data *data = dev_get_drvdata(dev); |
1091 | dev); | ||
1092 | struct busfreq_data *data = platform_get_drvdata(pdev); | ||
1093 | 1091 | ||
1094 | busfreq_mon_reset(data); | 1092 | busfreq_mon_reset(data); |
1095 | return 0; | 1093 | return 0; |
@@ -1132,4 +1130,3 @@ module_exit(exynos4_busfreq_exit); | |||
1132 | MODULE_LICENSE("GPL"); | 1130 | MODULE_LICENSE("GPL"); |
1133 | MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework"); | 1131 | MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework"); |
1134 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | 1132 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); |
1135 | MODULE_ALIAS("exynos4-busfreq"); | ||
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c index c0596b291761..574a06b1b1de 100644 --- a/drivers/devfreq/governor_performance.c +++ b/drivers/devfreq/governor_performance.c | |||
@@ -18,7 +18,10 @@ static int devfreq_performance_func(struct devfreq *df, | |||
18 | * target callback should be able to get floor value as | 18 | * target callback should be able to get floor value as |
19 | * said in devfreq.h | 19 | * said in devfreq.h |
20 | */ | 20 | */ |
21 | *freq = UINT_MAX; | 21 | if (!df->max_freq) |
22 | *freq = UINT_MAX; | ||
23 | else | ||
24 | *freq = df->max_freq; | ||
22 | return 0; | 25 | return 0; |
23 | } | 26 | } |
24 | 27 | ||
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c index 2483a85a266f..d742d4a82d6a 100644 --- a/drivers/devfreq/governor_powersave.c +++ b/drivers/devfreq/governor_powersave.c | |||
@@ -18,7 +18,7 @@ static int devfreq_powersave_func(struct devfreq *df, | |||
18 | * target callback should be able to get ceiling value as | 18 | * target callback should be able to get ceiling value as |
19 | * said in devfreq.h | 19 | * said in devfreq.h |
20 | */ | 20 | */ |
21 | *freq = 0; | 21 | *freq = df->min_freq; |
22 | return 0; | 22 | return 0; |
23 | } | 23 | } |
24 | 24 | ||
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index efad8dcf9028..a2e3eae79011 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c | |||
@@ -25,6 +25,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
25 | unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; | 25 | unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; |
26 | unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; | 26 | unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; |
27 | struct devfreq_simple_ondemand_data *data = df->data; | 27 | struct devfreq_simple_ondemand_data *data = df->data; |
28 | unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; | ||
28 | 29 | ||
29 | if (err) | 30 | if (err) |
30 | return err; | 31 | return err; |
@@ -41,7 +42,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
41 | 42 | ||
42 | /* Assume MAX if it is going to be divided by zero */ | 43 | /* Assume MAX if it is going to be divided by zero */ |
43 | if (stat.total_time == 0) { | 44 | if (stat.total_time == 0) { |
44 | *freq = UINT_MAX; | 45 | *freq = max; |
45 | return 0; | 46 | return 0; |
46 | } | 47 | } |
47 | 48 | ||
@@ -54,13 +55,13 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
54 | /* Set MAX if it's busy enough */ | 55 | /* Set MAX if it's busy enough */ |
55 | if (stat.busy_time * 100 > | 56 | if (stat.busy_time * 100 > |
56 | stat.total_time * dfso_upthreshold) { | 57 | stat.total_time * dfso_upthreshold) { |
57 | *freq = UINT_MAX; | 58 | *freq = max; |
58 | return 0; | 59 | return 0; |
59 | } | 60 | } |
60 | 61 | ||
61 | /* Set MAX if we do not know the initial frequency */ | 62 | /* Set MAX if we do not know the initial frequency */ |
62 | if (stat.current_frequency == 0) { | 63 | if (stat.current_frequency == 0) { |
63 | *freq = UINT_MAX; | 64 | *freq = max; |
64 | return 0; | 65 | return 0; |
65 | } | 66 | } |
66 | 67 | ||
@@ -79,6 +80,11 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
79 | b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); | 80 | b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); |
80 | *freq = (unsigned long) b; | 81 | *freq = (unsigned long) b; |
81 | 82 | ||
83 | if (df->min_freq && *freq < df->min_freq) | ||
84 | *freq = df->min_freq; | ||
85 | if (df->max_freq && *freq > df->max_freq) | ||
86 | *freq = df->max_freq; | ||
87 | |||
82 | return 0; | 88 | return 0; |
83 | } | 89 | } |
84 | 90 | ||
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index 4f8b563da782..0681246fc89d 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c | |||
@@ -25,10 +25,19 @@ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) | |||
25 | { | 25 | { |
26 | struct userspace_data *data = df->data; | 26 | struct userspace_data *data = df->data; |
27 | 27 | ||
28 | if (!data->valid) | 28 | if (data->valid) { |
29 | unsigned long adjusted_freq = data->user_frequency; | ||
30 | |||
31 | if (df->max_freq && adjusted_freq > df->max_freq) | ||
32 | adjusted_freq = df->max_freq; | ||
33 | |||
34 | if (df->min_freq && adjusted_freq < df->min_freq) | ||
35 | adjusted_freq = df->min_freq; | ||
36 | |||
37 | *freq = adjusted_freq; | ||
38 | } else { | ||
29 | *freq = df->previous_freq; /* No user freq specified yet */ | 39 | *freq = df->previous_freq; /* No user freq specified yet */ |
30 | else | 40 | } |
31 | *freq = data->user_frequency; | ||
32 | return 0; | 41 | return 0; |
33 | } | 42 | } |
34 | 43 | ||
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 352d4797865b..75a485448796 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/mmc/sh_mmcif.h> | 56 | #include <linux/mmc/sh_mmcif.h> |
57 | #include <linux/pagemap.h> | 57 | #include <linux/pagemap.h> |
58 | #include <linux/platform_device.h> | 58 | #include <linux/platform_device.h> |
59 | #include <linux/pm_qos.h> | ||
59 | #include <linux/pm_runtime.h> | 60 | #include <linux/pm_runtime.h> |
60 | #include <linux/spinlock.h> | 61 | #include <linux/spinlock.h> |
61 | #include <linux/module.h> | 62 | #include <linux/module.h> |
@@ -1346,6 +1347,8 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) | |||
1346 | if (ret < 0) | 1347 | if (ret < 0) |
1347 | goto clean_up5; | 1348 | goto clean_up5; |
1348 | 1349 | ||
1350 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); | ||
1351 | |||
1349 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); | 1352 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); |
1350 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", | 1353 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", |
1351 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); | 1354 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); |
@@ -1376,6 +1379,8 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) | |||
1376 | host->dying = true; | 1379 | host->dying = true; |
1377 | pm_runtime_get_sync(&pdev->dev); | 1380 | pm_runtime_get_sync(&pdev->dev); |
1378 | 1381 | ||
1382 | dev_pm_qos_hide_latency_limit(&pdev->dev); | ||
1383 | |||
1379 | mmc_remove_host(host->mmc); | 1384 | mmc_remove_host(host->mmc); |
1380 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1385 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1381 | 1386 | ||
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 5f9ad74fbf80..e21988901c36 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
41 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
42 | #include <linux/pm_qos.h> | ||
42 | #include <linux/pm_runtime.h> | 43 | #include <linux/pm_runtime.h> |
43 | #include <linux/scatterlist.h> | 44 | #include <linux/scatterlist.h> |
44 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
@@ -955,6 +956,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
955 | 956 | ||
956 | mmc_add_host(mmc); | 957 | mmc_add_host(mmc); |
957 | 958 | ||
959 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); | ||
960 | |||
958 | /* Unmask the IRQs we want to know about */ | 961 | /* Unmask the IRQs we want to know about */ |
959 | if (!_host->chan_rx) | 962 | if (!_host->chan_rx) |
960 | irq_mask |= TMIO_MASK_READOP; | 963 | irq_mask |= TMIO_MASK_READOP; |
@@ -993,6 +996,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) | |||
993 | || host->mmc->caps & MMC_CAP_NONREMOVABLE) | 996 | || host->mmc->caps & MMC_CAP_NONREMOVABLE) |
994 | pm_runtime_get_sync(&pdev->dev); | 997 | pm_runtime_get_sync(&pdev->dev); |
995 | 998 | ||
999 | dev_pm_qos_hide_latency_limit(&pdev->dev); | ||
1000 | |||
996 | mmc_remove_host(host->mmc); | 1001 | mmc_remove_host(host->mmc); |
997 | cancel_work_sync(&host->done); | 1002 | cancel_work_sync(&host->done); |
998 | cancel_delayed_work_sync(&host->delayed_reset_work); | 1003 | cancel_delayed_work_sync(&host->delayed_reset_work); |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index ce4fa0831860..9e14ae6cd49c 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -129,9 +129,9 @@ static void do_suspend(void) | |||
129 | printk(KERN_DEBUG "suspending xenstore...\n"); | 129 | printk(KERN_DEBUG "suspending xenstore...\n"); |
130 | xs_suspend(); | 130 | xs_suspend(); |
131 | 131 | ||
132 | err = dpm_suspend_noirq(PMSG_FREEZE); | 132 | err = dpm_suspend_end(PMSG_FREEZE); |
133 | if (err) { | 133 | if (err) { |
134 | printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); | 134 | printk(KERN_ERR "dpm_suspend_end failed: %d\n", err); |
135 | goto out_resume; | 135 | goto out_resume; |
136 | } | 136 | } |
137 | 137 | ||
@@ -149,7 +149,7 @@ static void do_suspend(void) | |||
149 | 149 | ||
150 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); | 150 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); |
151 | 151 | ||
152 | dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); | 152 | dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |
153 | 153 | ||
154 | if (err) { | 154 | if (err) { |
155 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | 155 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index e49e81bb80ef..0971e9217808 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -129,6 +129,8 @@ static int kjournald(void *arg) | |||
129 | setup_timer(&journal->j_commit_timer, commit_timeout, | 129 | setup_timer(&journal->j_commit_timer, commit_timeout, |
130 | (unsigned long)current); | 130 | (unsigned long)current); |
131 | 131 | ||
132 | set_freezable(); | ||
133 | |||
132 | /* Record that the journal thread is running */ | 134 | /* Record that the journal thread is running */ |
133 | journal->j_task = current; | 135 | journal->j_task = current; |
134 | wake_up(&journal->j_wait_done_commit); | 136 | wake_up(&journal->j_wait_done_commit); |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 5ff8940b8f02..839377e3d624 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -139,6 +139,8 @@ static int kjournald2(void *arg) | |||
139 | setup_timer(&journal->j_commit_timer, commit_timeout, | 139 | setup_timer(&journal->j_commit_timer, commit_timeout, |
140 | (unsigned long)current); | 140 | (unsigned long)current); |
141 | 141 | ||
142 | set_freezable(); | ||
143 | |||
142 | /* Record that the journal thread is running */ | 144 | /* Record that the journal thread is running */ |
143 | journal->j_task = current; | 145 | journal->j_task = current; |
144 | wake_up(&journal->j_wait_done_commit); | 146 | wake_up(&journal->j_wait_done_commit); |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 98ce8124b1cc..281c72a3b9d5 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
@@ -44,6 +44,14 @@ struct devfreq_dev_status { | |||
44 | void *private_data; | 44 | void *private_data; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* | ||
48 | * The resulting frequency should be at most this. (this bound is the | ||
49 | * least upper bound; thus, the resulting freq should be lower or same) | ||
50 | * If the flag is not set, the resulting frequency should be at most the | ||
51 | * bound (greatest lower bound) | ||
52 | */ | ||
53 | #define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1 | ||
54 | |||
47 | /** | 55 | /** |
48 | * struct devfreq_dev_profile - Devfreq's user device profile | 56 | * struct devfreq_dev_profile - Devfreq's user device profile |
49 | * @initial_freq The operating frequency when devfreq_add_device() is | 57 | * @initial_freq The operating frequency when devfreq_add_device() is |
@@ -54,6 +62,8 @@ struct devfreq_dev_status { | |||
54 | * higher than any operable frequency, set maximum. | 62 | * higher than any operable frequency, set maximum. |
55 | * Before returning, target function should set | 63 | * Before returning, target function should set |
56 | * freq at the current frequency. | 64 | * freq at the current frequency. |
65 | * The "flags" parameter's possible values are | ||
66 | * explained above with "DEVFREQ_FLAG_*" macros. | ||
57 | * @get_dev_status The device should provide the current performance | 67 | * @get_dev_status The device should provide the current performance |
58 | * status to devfreq, which is used by governors. | 68 | * status to devfreq, which is used by governors. |
59 | * @exit An optional callback that is called when devfreq | 69 | * @exit An optional callback that is called when devfreq |
@@ -66,7 +76,7 @@ struct devfreq_dev_profile { | |||
66 | unsigned long initial_freq; | 76 | unsigned long initial_freq; |
67 | unsigned int polling_ms; | 77 | unsigned int polling_ms; |
68 | 78 | ||
69 | int (*target)(struct device *dev, unsigned long *freq); | 79 | int (*target)(struct device *dev, unsigned long *freq, u32 flags); |
70 | int (*get_dev_status)(struct device *dev, | 80 | int (*get_dev_status)(struct device *dev, |
71 | struct devfreq_dev_status *stat); | 81 | struct devfreq_dev_status *stat); |
72 | void (*exit)(struct device *dev); | 82 | void (*exit)(struct device *dev); |
@@ -124,6 +134,8 @@ struct devfreq_governor { | |||
124 | * touch this. | 134 | * touch this. |
125 | * @being_removed a flag to mark that this object is being removed in | 135 | * @being_removed a flag to mark that this object is being removed in |
126 | * order to prevent trying to remove the object multiple times. | 136 | * order to prevent trying to remove the object multiple times. |
137 | * @min_freq Limit minimum frequency requested by user (0: none) | ||
138 | * @max_freq Limit maximum frequency requested by user (0: none) | ||
127 | * | 139 | * |
128 | * This structure stores the devfreq information for a give device. | 140 | * This structure stores the devfreq information for a give device. |
129 | * | 141 | * |
@@ -149,6 +161,9 @@ struct devfreq { | |||
149 | void *data; /* private data for governors */ | 161 | void *data; /* private data for governors */ |
150 | 162 | ||
151 | bool being_removed; | 163 | bool being_removed; |
164 | |||
165 | unsigned long min_freq; | ||
166 | unsigned long max_freq; | ||
152 | }; | 167 | }; |
153 | 168 | ||
154 | #if defined(CONFIG_PM_DEVFREQ) | 169 | #if defined(CONFIG_PM_DEVFREQ) |
@@ -160,7 +175,7 @@ extern int devfreq_remove_device(struct devfreq *devfreq); | |||
160 | 175 | ||
161 | /* Helper functions for devfreq user device driver with OPP. */ | 176 | /* Helper functions for devfreq user device driver with OPP. */ |
162 | extern struct opp *devfreq_recommended_opp(struct device *dev, | 177 | extern struct opp *devfreq_recommended_opp(struct device *dev, |
163 | unsigned long *freq); | 178 | unsigned long *freq, u32 flags); |
164 | extern int devfreq_register_opp_notifier(struct device *dev, | 179 | extern int devfreq_register_opp_notifier(struct device *dev, |
165 | struct devfreq *devfreq); | 180 | struct devfreq *devfreq); |
166 | extern int devfreq_unregister_opp_notifier(struct device *dev, | 181 | extern int devfreq_unregister_opp_notifier(struct device *dev, |
@@ -200,18 +215,18 @@ struct devfreq_simple_ondemand_data { | |||
200 | static struct devfreq *devfreq_add_device(struct device *dev, | 215 | static struct devfreq *devfreq_add_device(struct device *dev, |
201 | struct devfreq_dev_profile *profile, | 216 | struct devfreq_dev_profile *profile, |
202 | struct devfreq_governor *governor, | 217 | struct devfreq_governor *governor, |
203 | void *data); | 218 | void *data) |
204 | { | 219 | { |
205 | return NULL; | 220 | return NULL; |
206 | } | 221 | } |
207 | 222 | ||
208 | static int devfreq_remove_device(struct devfreq *devfreq); | 223 | static int devfreq_remove_device(struct devfreq *devfreq) |
209 | { | 224 | { |
210 | return 0; | 225 | return 0; |
211 | } | 226 | } |
212 | 227 | ||
213 | static struct opp *devfreq_recommended_opp(struct device *dev, | 228 | static struct opp *devfreq_recommended_opp(struct device *dev, |
214 | unsigned long *freq) | 229 | unsigned long *freq, u32 flags) |
215 | { | 230 | { |
216 | return -EINVAL; | 231 | return -EINVAL; |
217 | } | 232 | } |
diff --git a/include/linux/pm.h b/include/linux/pm.h index e4982ac3fbbc..715305e05123 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -110,6 +110,10 @@ typedef struct pm_message { | |||
110 | * Subsystem-level @suspend() is executed for all devices after invoking | 110 | * Subsystem-level @suspend() is executed for all devices after invoking |
111 | * subsystem-level @prepare() for all of them. | 111 | * subsystem-level @prepare() for all of them. |
112 | * | 112 | * |
113 | * @suspend_late: Continue operations started by @suspend(). For a number of | ||
114 | * devices @suspend_late() may point to the same callback routine as the | ||
115 | * runtime suspend callback. | ||
116 | * | ||
113 | * @resume: Executed after waking the system up from a sleep state in which the | 117 | * @resume: Executed after waking the system up from a sleep state in which the |
114 | * contents of main memory were preserved. The exact action to perform | 118 | * contents of main memory were preserved. The exact action to perform |
115 | * depends on the device's subsystem, but generally the driver is expected | 119 | * depends on the device's subsystem, but generally the driver is expected |
@@ -122,6 +126,10 @@ typedef struct pm_message { | |||
122 | * Subsystem-level @resume() is executed for all devices after invoking | 126 | * Subsystem-level @resume() is executed for all devices after invoking |
123 | * subsystem-level @resume_noirq() for all of them. | 127 | * subsystem-level @resume_noirq() for all of them. |
124 | * | 128 | * |
129 | * @resume_early: Prepare to execute @resume(). For a number of devices | ||
130 | * @resume_early() may point to the same callback routine as the runtime | ||
131 | * resume callback. | ||
132 | * | ||
125 | * @freeze: Hibernation-specific, executed before creating a hibernation image. | 133 | * @freeze: Hibernation-specific, executed before creating a hibernation image. |
126 | * Analogous to @suspend(), but it should not enable the device to signal | 134 | * Analogous to @suspend(), but it should not enable the device to signal |
127 | * wakeup events or change its power state. The majority of subsystems | 135 | * wakeup events or change its power state. The majority of subsystems |
@@ -131,6 +139,10 @@ typedef struct pm_message { | |||
131 | * Subsystem-level @freeze() is executed for all devices after invoking | 139 | * Subsystem-level @freeze() is executed for all devices after invoking |
132 | * subsystem-level @prepare() for all of them. | 140 | * subsystem-level @prepare() for all of them. |
133 | * | 141 | * |
142 | * @freeze_late: Continue operations started by @freeze(). Analogous to | ||
143 | * @suspend_late(), but it should not enable the device to signal wakeup | ||
144 | * events or change its power state. | ||
145 | * | ||
134 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR | 146 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR |
135 | * if the creation of an image has failed. Also executed after a failing | 147 | * if the creation of an image has failed. Also executed after a failing |
136 | * attempt to restore the contents of main memory from such an image. | 148 | * attempt to restore the contents of main memory from such an image. |
@@ -140,15 +152,23 @@ typedef struct pm_message { | |||
140 | * subsystem-level @thaw_noirq() for all of them. It also may be executed | 152 | * subsystem-level @thaw_noirq() for all of them. It also may be executed |
141 | * directly after @freeze() in case of a transition error. | 153 | * directly after @freeze() in case of a transition error. |
142 | * | 154 | * |
155 | * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the | ||
156 | * preceding @freeze_late(). | ||
157 | * | ||
143 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. | 158 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. |
144 | * Analogous to @suspend(), but it need not save the device's settings in | 159 | * Analogous to @suspend(), but it need not save the device's settings in |
145 | * memory. | 160 | * memory. |
146 | * Subsystem-level @poweroff() is executed for all devices after invoking | 161 | * Subsystem-level @poweroff() is executed for all devices after invoking |
147 | * subsystem-level @prepare() for all of them. | 162 | * subsystem-level @prepare() for all of them. |
148 | * | 163 | * |
164 | * @poweroff_late: Continue operations started by @poweroff(). Analogous to | ||
165 | * @suspend_late(), but it need not save the device's settings in memory. | ||
166 | * | ||
149 | * @restore: Hibernation-specific, executed after restoring the contents of main | 167 | * @restore: Hibernation-specific, executed after restoring the contents of main |
150 | * memory from a hibernation image, analogous to @resume(). | 168 | * memory from a hibernation image, analogous to @resume(). |
151 | * | 169 | * |
170 | * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). | ||
171 | * | ||
152 | * @suspend_noirq: Complete the actions started by @suspend(). Carry out any | 172 | * @suspend_noirq: Complete the actions started by @suspend(). Carry out any |
153 | * additional operations required for suspending the device that might be | 173 | * additional operations required for suspending the device that might be |
154 | * racing with its driver's interrupt handler, which is guaranteed not to | 174 | * racing with its driver's interrupt handler, which is guaranteed not to |
@@ -158,9 +178,10 @@ typedef struct pm_message { | |||
158 | * @suspend_noirq() has returned successfully. If the device can generate | 178 | * @suspend_noirq() has returned successfully. If the device can generate |
159 | * system wakeup signals and is enabled to wake up the system, it should be | 179 | * system wakeup signals and is enabled to wake up the system, it should be |
160 | * configured to do so at that time. However, depending on the platform | 180 | * configured to do so at that time. However, depending on the platform |
161 | * and device's subsystem, @suspend() may be allowed to put the device into | 181 | * and device's subsystem, @suspend() or @suspend_late() may be allowed to |
162 | * the low-power state and configure it to generate wakeup signals, in | 182 | * put the device into the low-power state and configure it to generate |
163 | * which case it generally is not necessary to define @suspend_noirq(). | 183 | * wakeup signals, in which case it generally is not necessary to define |
184 | * @suspend_noirq(). | ||
164 | * | 185 | * |
165 | * @resume_noirq: Prepare for the execution of @resume() by carrying out any | 186 | * @resume_noirq: Prepare for the execution of @resume() by carrying out any |
166 | * operations required for resuming the device that might be racing with | 187 | * operations required for resuming the device that might be racing with |
@@ -171,9 +192,9 @@ typedef struct pm_message { | |||
171 | * additional operations required for freezing the device that might be | 192 | * additional operations required for freezing the device that might be |
172 | * racing with its driver's interrupt handler, which is guaranteed not to | 193 | * racing with its driver's interrupt handler, which is guaranteed not to |
173 | * run while @freeze_noirq() is being executed. | 194 | * run while @freeze_noirq() is being executed. |
174 | * The power state of the device should not be changed by either @freeze() | 195 | * The power state of the device should not be changed by either @freeze(), |
175 | * or @freeze_noirq() and it should not be configured to signal system | 196 | * or @freeze_late(), or @freeze_noirq() and it should not be configured to |
176 | * wakeup by any of these callbacks. | 197 | * signal system wakeup by any of these callbacks. |
177 | * | 198 | * |
178 | * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any | 199 | * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any |
179 | * operations required for thawing the device that might be racing with its | 200 | * operations required for thawing the device that might be racing with its |
@@ -249,6 +270,12 @@ struct dev_pm_ops { | |||
249 | int (*thaw)(struct device *dev); | 270 | int (*thaw)(struct device *dev); |
250 | int (*poweroff)(struct device *dev); | 271 | int (*poweroff)(struct device *dev); |
251 | int (*restore)(struct device *dev); | 272 | int (*restore)(struct device *dev); |
273 | int (*suspend_late)(struct device *dev); | ||
274 | int (*resume_early)(struct device *dev); | ||
275 | int (*freeze_late)(struct device *dev); | ||
276 | int (*thaw_early)(struct device *dev); | ||
277 | int (*poweroff_late)(struct device *dev); | ||
278 | int (*restore_early)(struct device *dev); | ||
252 | int (*suspend_noirq)(struct device *dev); | 279 | int (*suspend_noirq)(struct device *dev); |
253 | int (*resume_noirq)(struct device *dev); | 280 | int (*resume_noirq)(struct device *dev); |
254 | int (*freeze_noirq)(struct device *dev); | 281 | int (*freeze_noirq)(struct device *dev); |
@@ -293,6 +320,15 @@ const struct dev_pm_ops name = { \ | |||
293 | /* | 320 | /* |
294 | * Use this for defining a set of PM operations to be used in all situations | 321 | * Use this for defining a set of PM operations to be used in all situations |
295 | * (sustem suspend, hibernation or runtime PM). | 322 | * (sustem suspend, hibernation or runtime PM). |
323 | * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should | ||
324 | * be different from the corresponding runtime PM callbacks, .runtime_suspend(), | ||
325 | * and .runtime_resume(), because .runtime_suspend() always works on an already | ||
326 | * quiescent device, while .suspend() should assume that the device may be doing | ||
327 | * something when it is called (it should ensure that the device will be | ||
328 | * quiescent after it has returned). Therefore it's better to point the "late" | ||
329 | * suspend and "early" resume callback pointers, .suspend_late() and | ||
330 | * .resume_early(), to the same routines as .runtime_suspend() and | ||
331 | * .runtime_resume(), respectively (and analogously for hibernation). | ||
296 | */ | 332 | */ |
297 | #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ | 333 | #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ |
298 | const struct dev_pm_ops name = { \ | 334 | const struct dev_pm_ops name = { \ |
@@ -510,6 +546,7 @@ struct dev_pm_info { | |||
510 | unsigned long accounting_timestamp; | 546 | unsigned long accounting_timestamp; |
511 | ktime_t suspend_time; | 547 | ktime_t suspend_time; |
512 | s64 max_time_suspended_ns; | 548 | s64 max_time_suspended_ns; |
549 | struct dev_pm_qos_request *pq_req; | ||
513 | #endif | 550 | #endif |
514 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ | 551 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ |
515 | struct pm_qos_constraints *constraints; | 552 | struct pm_qos_constraints *constraints; |
@@ -584,13 +621,13 @@ struct dev_pm_domain { | |||
584 | 621 | ||
585 | #ifdef CONFIG_PM_SLEEP | 622 | #ifdef CONFIG_PM_SLEEP |
586 | extern void device_pm_lock(void); | 623 | extern void device_pm_lock(void); |
587 | extern void dpm_resume_noirq(pm_message_t state); | 624 | extern void dpm_resume_start(pm_message_t state); |
588 | extern void dpm_resume_end(pm_message_t state); | 625 | extern void dpm_resume_end(pm_message_t state); |
589 | extern void dpm_resume(pm_message_t state); | 626 | extern void dpm_resume(pm_message_t state); |
590 | extern void dpm_complete(pm_message_t state); | 627 | extern void dpm_complete(pm_message_t state); |
591 | 628 | ||
592 | extern void device_pm_unlock(void); | 629 | extern void device_pm_unlock(void); |
593 | extern int dpm_suspend_noirq(pm_message_t state); | 630 | extern int dpm_suspend_end(pm_message_t state); |
594 | extern int dpm_suspend_start(pm_message_t state); | 631 | extern int dpm_suspend_start(pm_message_t state); |
595 | extern int dpm_suspend(pm_message_t state); | 632 | extern int dpm_suspend(pm_message_t state); |
596 | extern int dpm_prepare(pm_message_t state); | 633 | extern int dpm_prepare(pm_message_t state); |
@@ -605,17 +642,23 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
605 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); | 642 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
606 | 643 | ||
607 | extern int pm_generic_prepare(struct device *dev); | 644 | extern int pm_generic_prepare(struct device *dev); |
645 | extern int pm_generic_suspend_late(struct device *dev); | ||
608 | extern int pm_generic_suspend_noirq(struct device *dev); | 646 | extern int pm_generic_suspend_noirq(struct device *dev); |
609 | extern int pm_generic_suspend(struct device *dev); | 647 | extern int pm_generic_suspend(struct device *dev); |
648 | extern int pm_generic_resume_early(struct device *dev); | ||
610 | extern int pm_generic_resume_noirq(struct device *dev); | 649 | extern int pm_generic_resume_noirq(struct device *dev); |
611 | extern int pm_generic_resume(struct device *dev); | 650 | extern int pm_generic_resume(struct device *dev); |
612 | extern int pm_generic_freeze_noirq(struct device *dev); | 651 | extern int pm_generic_freeze_noirq(struct device *dev); |
652 | extern int pm_generic_freeze_late(struct device *dev); | ||
613 | extern int pm_generic_freeze(struct device *dev); | 653 | extern int pm_generic_freeze(struct device *dev); |
614 | extern int pm_generic_thaw_noirq(struct device *dev); | 654 | extern int pm_generic_thaw_noirq(struct device *dev); |
655 | extern int pm_generic_thaw_early(struct device *dev); | ||
615 | extern int pm_generic_thaw(struct device *dev); | 656 | extern int pm_generic_thaw(struct device *dev); |
616 | extern int pm_generic_restore_noirq(struct device *dev); | 657 | extern int pm_generic_restore_noirq(struct device *dev); |
658 | extern int pm_generic_restore_early(struct device *dev); | ||
617 | extern int pm_generic_restore(struct device *dev); | 659 | extern int pm_generic_restore(struct device *dev); |
618 | extern int pm_generic_poweroff_noirq(struct device *dev); | 660 | extern int pm_generic_poweroff_noirq(struct device *dev); |
661 | extern int pm_generic_poweroff_late(struct device *dev); | ||
619 | extern int pm_generic_poweroff(struct device *dev); | 662 | extern int pm_generic_poweroff(struct device *dev); |
620 | extern void pm_generic_complete(struct device *dev); | 663 | extern void pm_generic_complete(struct device *dev); |
621 | 664 | ||
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a03a0ad998b8..1236d262b3e8 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/of.h> | ||
14 | 15 | ||
15 | enum gpd_status { | 16 | enum gpd_status { |
16 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ | 17 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ |
@@ -70,6 +71,7 @@ struct generic_pm_domain { | |||
70 | s64 break_even_ns; /* Power break even for the entire domain. */ | 71 | s64 break_even_ns; /* Power break even for the entire domain. */ |
71 | s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ | 72 | s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ |
72 | ktime_t power_off_time; | 73 | ktime_t power_off_time; |
74 | struct device_node *of_node; /* Node in device tree */ | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) | 77 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) |
@@ -97,14 +99,15 @@ struct generic_pm_domain_data { | |||
97 | struct gpd_dev_ops ops; | 99 | struct gpd_dev_ops ops; |
98 | struct gpd_timing_data td; | 100 | struct gpd_timing_data td; |
99 | bool need_restore; | 101 | bool need_restore; |
102 | bool always_on; | ||
100 | }; | 103 | }; |
101 | 104 | ||
105 | #ifdef CONFIG_PM_GENERIC_DOMAINS | ||
102 | static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) | 106 | static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) |
103 | { | 107 | { |
104 | return container_of(pdd, struct generic_pm_domain_data, base); | 108 | return container_of(pdd, struct generic_pm_domain_data, base); |
105 | } | 109 | } |
106 | 110 | ||
107 | #ifdef CONFIG_PM_GENERIC_DOMAINS | ||
108 | static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) | 111 | static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) |
109 | { | 112 | { |
110 | return to_gpd_data(dev->power.subsys_data->domain_data); | 113 | return to_gpd_data(dev->power.subsys_data->domain_data); |
@@ -117,14 +120,25 @@ extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, | |||
117 | struct device *dev, | 120 | struct device *dev, |
118 | struct gpd_timing_data *td); | 121 | struct gpd_timing_data *td); |
119 | 122 | ||
123 | extern int __pm_genpd_of_add_device(struct device_node *genpd_node, | ||
124 | struct device *dev, | ||
125 | struct gpd_timing_data *td); | ||
126 | |||
120 | static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, | 127 | static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, |
121 | struct device *dev) | 128 | struct device *dev) |
122 | { | 129 | { |
123 | return __pm_genpd_add_device(genpd, dev, NULL); | 130 | return __pm_genpd_add_device(genpd, dev, NULL); |
124 | } | 131 | } |
125 | 132 | ||
133 | static inline int pm_genpd_of_add_device(struct device_node *genpd_node, | ||
134 | struct device *dev) | ||
135 | { | ||
136 | return __pm_genpd_of_add_device(genpd_node, dev, NULL); | ||
137 | } | ||
138 | |||
126 | extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 139 | extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
127 | struct device *dev); | 140 | struct device *dev); |
141 | extern void pm_genpd_dev_always_on(struct device *dev, bool val); | ||
128 | extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 142 | extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
129 | struct generic_pm_domain *new_subdomain); | 143 | struct generic_pm_domain *new_subdomain); |
130 | extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 144 | extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
@@ -143,6 +157,10 @@ extern bool default_stop_ok(struct device *dev); | |||
143 | extern struct dev_power_governor pm_domain_always_on_gov; | 157 | extern struct dev_power_governor pm_domain_always_on_gov; |
144 | #else | 158 | #else |
145 | 159 | ||
160 | static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) | ||
161 | { | ||
162 | return ERR_PTR(-ENOSYS); | ||
163 | } | ||
146 | static inline struct generic_pm_domain *dev_to_genpd(struct device *dev) | 164 | static inline struct generic_pm_domain *dev_to_genpd(struct device *dev) |
147 | { | 165 | { |
148 | return ERR_PTR(-ENOSYS); | 166 | return ERR_PTR(-ENOSYS); |
@@ -163,6 +181,7 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
163 | { | 181 | { |
164 | return -ENOSYS; | 182 | return -ENOSYS; |
165 | } | 183 | } |
184 | static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {} | ||
166 | static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 185 | static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
167 | struct generic_pm_domain *new_sd) | 186 | struct generic_pm_domain *new_sd) |
168 | { | 187 | { |
@@ -183,7 +202,8 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) | |||
183 | { | 202 | { |
184 | return -ENOSYS; | 203 | return -ENOSYS; |
185 | } | 204 | } |
186 | static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off) | 205 | static inline void pm_genpd_init(struct generic_pm_domain *genpd, |
206 | struct dev_power_governor *gov, bool is_off) | ||
187 | { | 207 | { |
188 | } | 208 | } |
189 | static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) | 209 | static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) |
@@ -194,6 +214,7 @@ static inline bool default_stop_ok(struct device *dev) | |||
194 | { | 214 | { |
195 | return false; | 215 | return false; |
196 | } | 216 | } |
217 | #define simple_qos_governor NULL | ||
197 | #define pm_domain_always_on_gov NULL | 218 | #define pm_domain_always_on_gov NULL |
198 | #endif | 219 | #endif |
199 | 220 | ||
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 4d99e4e6ef83..2e9191a712f3 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h | |||
@@ -9,12 +9,16 @@ | |||
9 | #include <linux/miscdevice.h> | 9 | #include <linux/miscdevice.h> |
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | 11 | ||
12 | #define PM_QOS_RESERVED 0 | 12 | enum { |
13 | #define PM_QOS_CPU_DMA_LATENCY 1 | 13 | PM_QOS_RESERVED = 0, |
14 | #define PM_QOS_NETWORK_LATENCY 2 | 14 | PM_QOS_CPU_DMA_LATENCY, |
15 | #define PM_QOS_NETWORK_THROUGHPUT 3 | 15 | PM_QOS_NETWORK_LATENCY, |
16 | PM_QOS_NETWORK_THROUGHPUT, | ||
17 | |||
18 | /* insert new class ID */ | ||
19 | PM_QOS_NUM_CLASSES, | ||
20 | }; | ||
16 | 21 | ||
17 | #define PM_QOS_NUM_CLASSES 4 | ||
18 | #define PM_QOS_DEFAULT_VALUE -1 | 22 | #define PM_QOS_DEFAULT_VALUE -1 |
19 | 23 | ||
20 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | 24 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
@@ -63,7 +67,6 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) | |||
63 | return req->dev != 0; | 67 | return req->dev != 0; |
64 | } | 68 | } |
65 | 69 | ||
66 | #ifdef CONFIG_PM | ||
67 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | 70 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
68 | enum pm_qos_req_action action, int value); | 71 | enum pm_qos_req_action action, int value); |
69 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, | 72 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
@@ -78,6 +81,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); | |||
78 | int pm_qos_request_active(struct pm_qos_request *req); | 81 | int pm_qos_request_active(struct pm_qos_request *req); |
79 | s32 pm_qos_read_value(struct pm_qos_constraints *c); | 82 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
80 | 83 | ||
84 | #ifdef CONFIG_PM | ||
81 | s32 __dev_pm_qos_read_value(struct device *dev); | 85 | s32 __dev_pm_qos_read_value(struct device *dev); |
82 | s32 dev_pm_qos_read_value(struct device *dev); | 86 | s32 dev_pm_qos_read_value(struct device *dev); |
83 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | 87 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
@@ -95,45 +99,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev); | |||
95 | int dev_pm_qos_add_ancestor_request(struct device *dev, | 99 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
96 | struct dev_pm_qos_request *req, s32 value); | 100 | struct dev_pm_qos_request *req, s32 value); |
97 | #else | 101 | #else |
98 | static inline int pm_qos_update_target(struct pm_qos_constraints *c, | ||
99 | struct plist_node *node, | ||
100 | enum pm_qos_req_action action, | ||
101 | int value) | ||
102 | { return 0; } | ||
103 | static inline void pm_qos_add_request(struct pm_qos_request *req, | ||
104 | int pm_qos_class, s32 value) | ||
105 | { return; } | ||
106 | static inline void pm_qos_update_request(struct pm_qos_request *req, | ||
107 | s32 new_value) | ||
108 | { return; } | ||
109 | static inline void pm_qos_remove_request(struct pm_qos_request *req) | ||
110 | { return; } | ||
111 | |||
112 | static inline int pm_qos_request(int pm_qos_class) | ||
113 | { | ||
114 | switch (pm_qos_class) { | ||
115 | case PM_QOS_CPU_DMA_LATENCY: | ||
116 | return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; | ||
117 | case PM_QOS_NETWORK_LATENCY: | ||
118 | return PM_QOS_NETWORK_LAT_DEFAULT_VALUE; | ||
119 | case PM_QOS_NETWORK_THROUGHPUT: | ||
120 | return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE; | ||
121 | default: | ||
122 | return PM_QOS_DEFAULT_VALUE; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static inline int pm_qos_add_notifier(int pm_qos_class, | ||
127 | struct notifier_block *notifier) | ||
128 | { return 0; } | ||
129 | static inline int pm_qos_remove_notifier(int pm_qos_class, | ||
130 | struct notifier_block *notifier) | ||
131 | { return 0; } | ||
132 | static inline int pm_qos_request_active(struct pm_qos_request *req) | ||
133 | { return 0; } | ||
134 | static inline s32 pm_qos_read_value(struct pm_qos_constraints *c) | ||
135 | { return 0; } | ||
136 | |||
137 | static inline s32 __dev_pm_qos_read_value(struct device *dev) | 102 | static inline s32 __dev_pm_qos_read_value(struct device *dev) |
138 | { return 0; } | 103 | { return 0; } |
139 | static inline s32 dev_pm_qos_read_value(struct device *dev) | 104 | static inline s32 dev_pm_qos_read_value(struct device *dev) |
@@ -172,4 +137,13 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
172 | { return 0; } | 137 | { return 0; } |
173 | #endif | 138 | #endif |
174 | 139 | ||
140 | #ifdef CONFIG_PM_RUNTIME | ||
141 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | ||
142 | void dev_pm_qos_hide_latency_limit(struct device *dev); | ||
143 | #else | ||
144 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | ||
145 | { return 0; } | ||
146 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} | ||
147 | #endif | ||
148 | |||
175 | #endif | 149 | #endif |
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index a32da962d693..d9f05113e5fb 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -41,7 +41,7 @@ | |||
41 | * @active: Status of the wakeup source. | 41 | * @active: Status of the wakeup source. |
42 | */ | 42 | */ |
43 | struct wakeup_source { | 43 | struct wakeup_source { |
44 | char *name; | 44 | const char *name; |
45 | struct list_head entry; | 45 | struct list_head entry; |
46 | spinlock_t lock; | 46 | spinlock_t lock; |
47 | struct timer_list timer; | 47 | struct timer_list timer; |
@@ -73,7 +73,9 @@ static inline bool device_may_wakeup(struct device *dev) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | /* drivers/base/power/wakeup.c */ | 75 | /* drivers/base/power/wakeup.c */ |
76 | extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); | ||
76 | extern struct wakeup_source *wakeup_source_create(const char *name); | 77 | extern struct wakeup_source *wakeup_source_create(const char *name); |
78 | extern void wakeup_source_drop(struct wakeup_source *ws); | ||
77 | extern void wakeup_source_destroy(struct wakeup_source *ws); | 79 | extern void wakeup_source_destroy(struct wakeup_source *ws); |
78 | extern void wakeup_source_add(struct wakeup_source *ws); | 80 | extern void wakeup_source_add(struct wakeup_source *ws); |
79 | extern void wakeup_source_remove(struct wakeup_source *ws); | 81 | extern void wakeup_source_remove(struct wakeup_source *ws); |
@@ -103,11 +105,16 @@ static inline bool device_can_wakeup(struct device *dev) | |||
103 | return dev->power.can_wakeup; | 105 | return dev->power.can_wakeup; |
104 | } | 106 | } |
105 | 107 | ||
108 | static inline void wakeup_source_prepare(struct wakeup_source *ws, | ||
109 | const char *name) {} | ||
110 | |||
106 | static inline struct wakeup_source *wakeup_source_create(const char *name) | 111 | static inline struct wakeup_source *wakeup_source_create(const char *name) |
107 | { | 112 | { |
108 | return NULL; | 113 | return NULL; |
109 | } | 114 | } |
110 | 115 | ||
116 | static inline void wakeup_source_drop(struct wakeup_source *ws) {} | ||
117 | |||
111 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} | 118 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} |
112 | 119 | ||
113 | static inline void wakeup_source_add(struct wakeup_source *ws) {} | 120 | static inline void wakeup_source_add(struct wakeup_source *ws) {} |
@@ -165,4 +172,17 @@ static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | |||
165 | 172 | ||
166 | #endif /* !CONFIG_PM_SLEEP */ | 173 | #endif /* !CONFIG_PM_SLEEP */ |
167 | 174 | ||
175 | static inline void wakeup_source_init(struct wakeup_source *ws, | ||
176 | const char *name) | ||
177 | { | ||
178 | wakeup_source_prepare(ws, name); | ||
179 | wakeup_source_add(ws); | ||
180 | } | ||
181 | |||
182 | static inline void wakeup_source_trash(struct wakeup_source *ws) | ||
183 | { | ||
184 | wakeup_source_remove(ws); | ||
185 | wakeup_source_drop(ws); | ||
186 | } | ||
187 | |||
168 | #endif /* _LINUX_PM_WAKEUP_H */ | 188 | #endif /* _LINUX_PM_WAKEUP_H */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 91784a4f8608..ac1c114c499d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -42,8 +42,10 @@ enum suspend_stat_step { | |||
42 | SUSPEND_FREEZE = 1, | 42 | SUSPEND_FREEZE = 1, |
43 | SUSPEND_PREPARE, | 43 | SUSPEND_PREPARE, |
44 | SUSPEND_SUSPEND, | 44 | SUSPEND_SUSPEND, |
45 | SUSPEND_SUSPEND_LATE, | ||
45 | SUSPEND_SUSPEND_NOIRQ, | 46 | SUSPEND_SUSPEND_NOIRQ, |
46 | SUSPEND_RESUME_NOIRQ, | 47 | SUSPEND_RESUME_NOIRQ, |
48 | SUSPEND_RESUME_EARLY, | ||
47 | SUSPEND_RESUME | 49 | SUSPEND_RESUME |
48 | }; | 50 | }; |
49 | 51 | ||
@@ -53,8 +55,10 @@ struct suspend_stats { | |||
53 | int failed_freeze; | 55 | int failed_freeze; |
54 | int failed_prepare; | 56 | int failed_prepare; |
55 | int failed_suspend; | 57 | int failed_suspend; |
58 | int failed_suspend_late; | ||
56 | int failed_suspend_noirq; | 59 | int failed_suspend_noirq; |
57 | int failed_resume; | 60 | int failed_resume; |
61 | int failed_resume_early; | ||
58 | int failed_resume_noirq; | 62 | int failed_resume_noirq; |
59 | #define REC_FAILED_NUM 2 | 63 | #define REC_FAILED_NUM 2 |
60 | int last_failed_dev; | 64 | int last_failed_dev; |
diff --git a/kernel/exit.c b/kernel/exit.c index ce5f758f40bd..0ed15fed579f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -424,7 +424,7 @@ void daemonize(const char *name, ...) | |||
424 | */ | 424 | */ |
425 | exit_mm(current); | 425 | exit_mm(current); |
426 | /* | 426 | /* |
427 | * We don't want to have TIF_FREEZE set if the system-wide hibernation | 427 | * We don't want to get frozen, in case system-wide hibernation |
428 | * or suspend transition begins right now. | 428 | * or suspend transition begins right now. |
429 | */ | 429 | */ |
430 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); | 430 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index 9815b8d1eed5..11f82a4d4eae 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -99,9 +99,9 @@ static void fake_signal_wake_up(struct task_struct *p) | |||
99 | * freeze_task - send a freeze request to given task | 99 | * freeze_task - send a freeze request to given task |
100 | * @p: task to send the request to | 100 | * @p: task to send the request to |
101 | * | 101 | * |
102 | * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE | 102 | * If @p is freezing, the freeze request is sent either by sending a fake |
103 | * flag and either sending a fake signal to it or waking it up, depending | 103 | * signal (if it's not a kernel thread) or waking it up (if it's a kernel |
104 | * on whether it has %PF_FREEZER_NOSIG set. | 104 | * thread). |
105 | * | 105 | * |
106 | * RETURNS: | 106 | * RETURNS: |
107 | * %false, if @p is not freezing or already frozen; %true, otherwise | 107 | * %false, if @p is not freezing or already frozen; %true, otherwise |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 7b0886786701..a6a675cb9818 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1546,13 +1546,13 @@ int kernel_kexec(void) | |||
1546 | if (error) | 1546 | if (error) |
1547 | goto Resume_console; | 1547 | goto Resume_console; |
1548 | /* At this point, dpm_suspend_start() has been called, | 1548 | /* At this point, dpm_suspend_start() has been called, |
1549 | * but *not* dpm_suspend_noirq(). We *must* call | 1549 | * but *not* dpm_suspend_end(). We *must* call |
1550 | * dpm_suspend_noirq() now. Otherwise, drivers for | 1550 | * dpm_suspend_end() now. Otherwise, drivers for |
1551 | * some devices (e.g. interrupt controllers) become | 1551 | * some devices (e.g. interrupt controllers) become |
1552 | * desynchronized with the actual state of the | 1552 | * desynchronized with the actual state of the |
1553 | * hardware at resume time, and evil weirdness ensues. | 1553 | * hardware at resume time, and evil weirdness ensues. |
1554 | */ | 1554 | */ |
1555 | error = dpm_suspend_noirq(PMSG_FREEZE); | 1555 | error = dpm_suspend_end(PMSG_FREEZE); |
1556 | if (error) | 1556 | if (error) |
1557 | goto Resume_devices; | 1557 | goto Resume_devices; |
1558 | error = disable_nonboot_cpus(); | 1558 | error = disable_nonboot_cpus(); |
@@ -1579,7 +1579,7 @@ int kernel_kexec(void) | |||
1579 | local_irq_enable(); | 1579 | local_irq_enable(); |
1580 | Enable_cpus: | 1580 | Enable_cpus: |
1581 | enable_nonboot_cpus(); | 1581 | enable_nonboot_cpus(); |
1582 | dpm_resume_noirq(PMSG_RESTORE); | 1582 | dpm_resume_start(PMSG_RESTORE); |
1583 | Resume_devices: | 1583 | Resume_devices: |
1584 | dpm_resume_end(PMSG_RESTORE); | 1584 | dpm_resume_end(PMSG_RESTORE); |
1585 | Resume_console: | 1585 | Resume_console: |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 07e0e28ffba7..66d808ec5252 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -1,7 +1,8 @@ | |||
1 | 1 | ||
2 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG | 2 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG |
3 | 3 | ||
4 | obj-$(CONFIG_PM) += main.o qos.o | 4 | obj-y += qos.o |
5 | obj-$(CONFIG_PM) += main.o | ||
5 | obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o | 6 | obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o |
6 | obj-$(CONFIG_FREEZER) += process.o | 7 | obj-$(CONFIG_FREEZER) += process.o |
7 | obj-$(CONFIG_SUSPEND) += suspend.o | 8 | obj-$(CONFIG_SUSPEND) += suspend.o |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 6d6d28870335..0a186cfde788 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -245,8 +245,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop, | |||
245 | * create_image - Create a hibernation image. | 245 | * create_image - Create a hibernation image. |
246 | * @platform_mode: Whether or not to use the platform driver. | 246 | * @platform_mode: Whether or not to use the platform driver. |
247 | * | 247 | * |
248 | * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image | 248 | * Execute device drivers' "late" and "noirq" freeze callbacks, create a |
249 | * and execute the drivers' .thaw_noirq() callbacks. | 249 | * hibernation image and run the drivers' "noirq" and "early" thaw callbacks. |
250 | * | 250 | * |
251 | * Control reappears in this routine after the subsequent restore. | 251 | * Control reappears in this routine after the subsequent restore. |
252 | */ | 252 | */ |
@@ -254,7 +254,7 @@ static int create_image(int platform_mode) | |||
254 | { | 254 | { |
255 | int error; | 255 | int error; |
256 | 256 | ||
257 | error = dpm_suspend_noirq(PMSG_FREEZE); | 257 | error = dpm_suspend_end(PMSG_FREEZE); |
258 | if (error) { | 258 | if (error) { |
259 | printk(KERN_ERR "PM: Some devices failed to power down, " | 259 | printk(KERN_ERR "PM: Some devices failed to power down, " |
260 | "aborting hibernation\n"); | 260 | "aborting hibernation\n"); |
@@ -306,7 +306,7 @@ static int create_image(int platform_mode) | |||
306 | Platform_finish: | 306 | Platform_finish: |
307 | platform_finish(platform_mode); | 307 | platform_finish(platform_mode); |
308 | 308 | ||
309 | dpm_resume_noirq(in_suspend ? | 309 | dpm_resume_start(in_suspend ? |
310 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 310 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
311 | 311 | ||
312 | return error; | 312 | return error; |
@@ -343,13 +343,13 @@ int hibernation_snapshot(int platform_mode) | |||
343 | * successful freezer test. | 343 | * successful freezer test. |
344 | */ | 344 | */ |
345 | freezer_test_done = true; | 345 | freezer_test_done = true; |
346 | goto Cleanup; | 346 | goto Thaw; |
347 | } | 347 | } |
348 | 348 | ||
349 | error = dpm_prepare(PMSG_FREEZE); | 349 | error = dpm_prepare(PMSG_FREEZE); |
350 | if (error) { | 350 | if (error) { |
351 | dpm_complete(PMSG_RECOVER); | 351 | dpm_complete(PMSG_RECOVER); |
352 | goto Cleanup; | 352 | goto Thaw; |
353 | } | 353 | } |
354 | 354 | ||
355 | suspend_console(); | 355 | suspend_console(); |
@@ -385,6 +385,8 @@ int hibernation_snapshot(int platform_mode) | |||
385 | platform_end(platform_mode); | 385 | platform_end(platform_mode); |
386 | return error; | 386 | return error; |
387 | 387 | ||
388 | Thaw: | ||
389 | thaw_kernel_threads(); | ||
388 | Cleanup: | 390 | Cleanup: |
389 | swsusp_free(); | 391 | swsusp_free(); |
390 | goto Close; | 392 | goto Close; |
@@ -394,16 +396,16 @@ int hibernation_snapshot(int platform_mode) | |||
394 | * resume_target_kernel - Restore system state from a hibernation image. | 396 | * resume_target_kernel - Restore system state from a hibernation image. |
395 | * @platform_mode: Whether or not to use the platform driver. | 397 | * @platform_mode: Whether or not to use the platform driver. |
396 | * | 398 | * |
397 | * Execute device drivers' .freeze_noirq() callbacks, restore the contents of | 399 | * Execute device drivers' "noirq" and "late" freeze callbacks, restore the |
398 | * highmem that have not been restored yet from the image and run the low-level | 400 | * contents of highmem that have not been restored yet from the image and run |
399 | * code that will restore the remaining contents of memory and switch to the | 401 | * the low-level code that will restore the remaining contents of memory and |
400 | * just restored target kernel. | 402 | * switch to the just restored target kernel. |
401 | */ | 403 | */ |
402 | static int resume_target_kernel(bool platform_mode) | 404 | static int resume_target_kernel(bool platform_mode) |
403 | { | 405 | { |
404 | int error; | 406 | int error; |
405 | 407 | ||
406 | error = dpm_suspend_noirq(PMSG_QUIESCE); | 408 | error = dpm_suspend_end(PMSG_QUIESCE); |
407 | if (error) { | 409 | if (error) { |
408 | printk(KERN_ERR "PM: Some devices failed to power down, " | 410 | printk(KERN_ERR "PM: Some devices failed to power down, " |
409 | "aborting resume\n"); | 411 | "aborting resume\n"); |
@@ -460,7 +462,7 @@ static int resume_target_kernel(bool platform_mode) | |||
460 | Cleanup: | 462 | Cleanup: |
461 | platform_restore_cleanup(platform_mode); | 463 | platform_restore_cleanup(platform_mode); |
462 | 464 | ||
463 | dpm_resume_noirq(PMSG_RECOVER); | 465 | dpm_resume_start(PMSG_RECOVER); |
464 | 466 | ||
465 | return error; | 467 | return error; |
466 | } | 468 | } |
@@ -518,7 +520,7 @@ int hibernation_platform_enter(void) | |||
518 | goto Resume_devices; | 520 | goto Resume_devices; |
519 | } | 521 | } |
520 | 522 | ||
521 | error = dpm_suspend_noirq(PMSG_HIBERNATE); | 523 | error = dpm_suspend_end(PMSG_HIBERNATE); |
522 | if (error) | 524 | if (error) |
523 | goto Resume_devices; | 525 | goto Resume_devices; |
524 | 526 | ||
@@ -549,7 +551,7 @@ int hibernation_platform_enter(void) | |||
549 | Platform_finish: | 551 | Platform_finish: |
550 | hibernation_ops->finish(); | 552 | hibernation_ops->finish(); |
551 | 553 | ||
552 | dpm_resume_noirq(PMSG_RESTORE); | 554 | dpm_resume_start(PMSG_RESTORE); |
553 | 555 | ||
554 | Resume_devices: | 556 | Resume_devices: |
555 | entering_platform_hibernation = false; | 557 | entering_platform_hibernation = false; |
@@ -616,7 +618,7 @@ int hibernate(void) | |||
616 | /* Allocate memory management structures */ | 618 | /* Allocate memory management structures */ |
617 | error = create_basic_memory_bitmaps(); | 619 | error = create_basic_memory_bitmaps(); |
618 | if (error) | 620 | if (error) |
619 | goto Exit; | 621 | goto Enable_umh; |
620 | 622 | ||
621 | printk(KERN_INFO "PM: Syncing filesystems ... "); | 623 | printk(KERN_INFO "PM: Syncing filesystems ... "); |
622 | sys_sync(); | 624 | sys_sync(); |
@@ -624,15 +626,11 @@ int hibernate(void) | |||
624 | 626 | ||
625 | error = freeze_processes(); | 627 | error = freeze_processes(); |
626 | if (error) | 628 | if (error) |
627 | goto Finish; | 629 | goto Free_bitmaps; |
628 | 630 | ||
629 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); | 631 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
630 | if (error) | 632 | if (error || freezer_test_done) |
631 | goto Thaw; | ||
632 | if (freezer_test_done) { | ||
633 | freezer_test_done = false; | ||
634 | goto Thaw; | 633 | goto Thaw; |
635 | } | ||
636 | 634 | ||
637 | if (in_suspend) { | 635 | if (in_suspend) { |
638 | unsigned int flags = 0; | 636 | unsigned int flags = 0; |
@@ -657,8 +655,13 @@ int hibernate(void) | |||
657 | 655 | ||
658 | Thaw: | 656 | Thaw: |
659 | thaw_processes(); | 657 | thaw_processes(); |
660 | Finish: | 658 | |
659 | /* Don't bother checking whether freezer_test_done is true */ | ||
660 | freezer_test_done = false; | ||
661 | |||
662 | Free_bitmaps: | ||
661 | free_basic_memory_bitmaps(); | 663 | free_basic_memory_bitmaps(); |
664 | Enable_umh: | ||
662 | usermodehelper_enable(); | 665 | usermodehelper_enable(); |
663 | Exit: | 666 | Exit: |
664 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 667 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 9824b41e5a18..1c12581f1c62 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -165,16 +165,20 @@ static int suspend_stats_show(struct seq_file *s, void *unused) | |||
165 | last_errno %= REC_FAILED_NUM; | 165 | last_errno %= REC_FAILED_NUM; |
166 | last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; | 166 | last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; |
167 | last_step %= REC_FAILED_NUM; | 167 | last_step %= REC_FAILED_NUM; |
168 | seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n" | 168 | seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" |
169 | "%s: %d\n%s: %d\n%s: %d\n%s: %d\n", | 169 | "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", |
170 | "success", suspend_stats.success, | 170 | "success", suspend_stats.success, |
171 | "fail", suspend_stats.fail, | 171 | "fail", suspend_stats.fail, |
172 | "failed_freeze", suspend_stats.failed_freeze, | 172 | "failed_freeze", suspend_stats.failed_freeze, |
173 | "failed_prepare", suspend_stats.failed_prepare, | 173 | "failed_prepare", suspend_stats.failed_prepare, |
174 | "failed_suspend", suspend_stats.failed_suspend, | 174 | "failed_suspend", suspend_stats.failed_suspend, |
175 | "failed_suspend_late", | ||
176 | suspend_stats.failed_suspend_late, | ||
175 | "failed_suspend_noirq", | 177 | "failed_suspend_noirq", |
176 | suspend_stats.failed_suspend_noirq, | 178 | suspend_stats.failed_suspend_noirq, |
177 | "failed_resume", suspend_stats.failed_resume, | 179 | "failed_resume", suspend_stats.failed_resume, |
180 | "failed_resume_early", | ||
181 | suspend_stats.failed_resume_early, | ||
178 | "failed_resume_noirq", | 182 | "failed_resume_noirq", |
179 | suspend_stats.failed_resume_noirq); | 183 | suspend_stats.failed_resume_noirq); |
180 | seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", | 184 | seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", |
@@ -287,16 +291,10 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
287 | 291 | ||
288 | #ifdef CONFIG_SUSPEND | 292 | #ifdef CONFIG_SUSPEND |
289 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { | 293 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { |
290 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) | 294 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { |
295 | error = pm_suspend(state); | ||
291 | break; | 296 | break; |
292 | } | 297 | } |
293 | if (state < PM_SUSPEND_MAX && *s) { | ||
294 | error = enter_state(state); | ||
295 | if (error) { | ||
296 | suspend_stats.fail++; | ||
297 | dpm_save_failed_errno(error); | ||
298 | } else | ||
299 | suspend_stats.success++; | ||
300 | } | 298 | } |
301 | #endif | 299 | #endif |
302 | 300 | ||
diff --git a/kernel/power/power.h b/kernel/power/power.h index 21724eee5206..98f3622d7407 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -177,13 +177,11 @@ extern const char *const pm_states[]; | |||
177 | 177 | ||
178 | extern bool valid_state(suspend_state_t state); | 178 | extern bool valid_state(suspend_state_t state); |
179 | extern int suspend_devices_and_enter(suspend_state_t state); | 179 | extern int suspend_devices_and_enter(suspend_state_t state); |
180 | extern int enter_state(suspend_state_t state); | ||
181 | #else /* !CONFIG_SUSPEND */ | 180 | #else /* !CONFIG_SUSPEND */ |
182 | static inline int suspend_devices_and_enter(suspend_state_t state) | 181 | static inline int suspend_devices_and_enter(suspend_state_t state) |
183 | { | 182 | { |
184 | return -ENOSYS; | 183 | return -ENOSYS; |
185 | } | 184 | } |
186 | static inline int enter_state(suspend_state_t state) { return -ENOSYS; } | ||
187 | static inline bool valid_state(suspend_state_t state) { return false; } | 185 | static inline bool valid_state(suspend_state_t state) { return false; } |
188 | #endif /* !CONFIG_SUSPEND */ | 186 | #endif /* !CONFIG_SUSPEND */ |
189 | 187 | ||
@@ -234,16 +232,14 @@ static inline int suspend_freeze_processes(void) | |||
234 | int error; | 232 | int error; |
235 | 233 | ||
236 | error = freeze_processes(); | 234 | error = freeze_processes(); |
237 | |||
238 | /* | 235 | /* |
239 | * freeze_processes() automatically thaws every task if freezing | 236 | * freeze_processes() automatically thaws every task if freezing |
240 | * fails. So we need not do anything extra upon error. | 237 | * fails. So we need not do anything extra upon error. |
241 | */ | 238 | */ |
242 | if (error) | 239 | if (error) |
243 | goto Finish; | 240 | return error; |
244 | 241 | ||
245 | error = freeze_kernel_threads(); | 242 | error = freeze_kernel_threads(); |
246 | |||
247 | /* | 243 | /* |
248 | * freeze_kernel_threads() thaws only kernel threads upon freezing | 244 | * freeze_kernel_threads() thaws only kernel threads upon freezing |
249 | * failure. So we have to thaw the userspace tasks ourselves. | 245 | * failure. So we have to thaw the userspace tasks ourselves. |
@@ -251,7 +247,6 @@ static inline int suspend_freeze_processes(void) | |||
251 | if (error) | 247 | if (error) |
252 | thaw_processes(); | 248 | thaw_processes(); |
253 | 249 | ||
254 | Finish: | ||
255 | return error; | 250 | return error; |
256 | } | 251 | } |
257 | 252 | ||
diff --git a/kernel/power/process.c b/kernel/power/process.c index 7e426459e60a..0d2aeb226108 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -53,11 +53,9 @@ static int try_to_freeze_tasks(bool user_only) | |||
53 | * It is "frozen enough". If the task does wake | 53 | * It is "frozen enough". If the task does wake |
54 | * up, it will immediately call try_to_freeze. | 54 | * up, it will immediately call try_to_freeze. |
55 | * | 55 | * |
56 | * Because freeze_task() goes through p's | 56 | * Because freeze_task() goes through p's scheduler lock, it's |
57 | * scheduler lock after setting TIF_FREEZE, it's | 57 | * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING |
58 | * guaranteed that either we see TASK_RUNNING or | 58 | * transition can't race with task state testing here. |
59 | * try_to_stop() after schedule() in ptrace/signal | ||
60 | * stop sees TIF_FREEZE. | ||
61 | */ | 59 | */ |
62 | if (!task_is_stopped_or_traced(p) && | 60 | if (!task_is_stopped_or_traced(p) && |
63 | !freezer_should_skip(p)) | 61 | !freezer_should_skip(p)) |
@@ -98,13 +96,15 @@ static int try_to_freeze_tasks(bool user_only) | |||
98 | elapsed_csecs / 100, elapsed_csecs % 100, | 96 | elapsed_csecs / 100, elapsed_csecs % 100, |
99 | todo - wq_busy, wq_busy); | 97 | todo - wq_busy, wq_busy); |
100 | 98 | ||
101 | read_lock(&tasklist_lock); | 99 | if (!wakeup) { |
102 | do_each_thread(g, p) { | 100 | read_lock(&tasklist_lock); |
103 | if (!wakeup && !freezer_should_skip(p) && | 101 | do_each_thread(g, p) { |
104 | p != current && freezing(p) && !frozen(p)) | 102 | if (p != current && !freezer_should_skip(p) |
105 | sched_show_task(p); | 103 | && freezing(p) && !frozen(p)) |
106 | } while_each_thread(g, p); | 104 | sched_show_task(p); |
107 | read_unlock(&tasklist_lock); | 105 | } while_each_thread(g, p); |
106 | read_unlock(&tasklist_lock); | ||
107 | } | ||
108 | } else { | 108 | } else { |
109 | printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, | 109 | printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, |
110 | elapsed_csecs % 100); | 110 | elapsed_csecs % 100); |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 995e3bd3417b..d6d6dbd1ecc0 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -469,21 +469,18 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
469 | static int __init pm_qos_power_init(void) | 469 | static int __init pm_qos_power_init(void) |
470 | { | 470 | { |
471 | int ret = 0; | 471 | int ret = 0; |
472 | int i; | ||
472 | 473 | ||
473 | ret = register_pm_qos_misc(&cpu_dma_pm_qos); | 474 | BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES); |
474 | if (ret < 0) { | 475 | |
475 | printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n"); | 476 | for (i = 1; i < PM_QOS_NUM_CLASSES; i++) { |
476 | return ret; | 477 | ret = register_pm_qos_misc(pm_qos_array[i]); |
477 | } | 478 | if (ret < 0) { |
478 | ret = register_pm_qos_misc(&network_lat_pm_qos); | 479 | printk(KERN_ERR "pm_qos_param: %s setup failed\n", |
479 | if (ret < 0) { | 480 | pm_qos_array[i]->name); |
480 | printk(KERN_ERR "pm_qos_param: network_latency setup failed\n"); | 481 | return ret; |
481 | return ret; | 482 | } |
482 | } | 483 | } |
483 | ret = register_pm_qos_misc(&network_throughput_pm_qos); | ||
484 | if (ret < 0) | ||
485 | printk(KERN_ERR | ||
486 | "pm_qos_param: network_throughput setup failed\n"); | ||
487 | 484 | ||
488 | return ret; | 485 | return ret; |
489 | } | 486 | } |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 3a564ac85f36..0de28576807d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -711,9 +711,10 @@ static void mark_nosave_pages(struct memory_bitmap *bm) | |||
711 | list_for_each_entry(region, &nosave_regions, list) { | 711 | list_for_each_entry(region, &nosave_regions, list) { |
712 | unsigned long pfn; | 712 | unsigned long pfn; |
713 | 713 | ||
714 | pr_debug("PM: Marking nosave pages: %016lx - %016lx\n", | 714 | pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", |
715 | region->start_pfn << PAGE_SHIFT, | 715 | (unsigned long long) region->start_pfn << PAGE_SHIFT, |
716 | region->end_pfn << PAGE_SHIFT); | 716 | ((unsigned long long) region->end_pfn << PAGE_SHIFT) |
717 | - 1); | ||
717 | 718 | ||
718 | for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) | 719 | for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) |
719 | if (pfn_valid(pfn)) { | 720 | if (pfn_valid(pfn)) { |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4fd51beed879..88e5c967370d 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -37,8 +37,8 @@ const char *const pm_states[PM_SUSPEND_MAX] = { | |||
37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * suspend_set_ops - Set the global suspend method table. | 40 | * suspend_set_ops - Set the global suspend method table. |
41 | * @ops: Pointer to ops structure. | 41 | * @ops: Suspend operations to use. |
42 | */ | 42 | */ |
43 | void suspend_set_ops(const struct platform_suspend_ops *ops) | 43 | void suspend_set_ops(const struct platform_suspend_ops *ops) |
44 | { | 44 | { |
@@ -58,11 +58,11 @@ bool valid_state(suspend_state_t state) | |||
58 | } | 58 | } |
59 | 59 | ||
60 | /** | 60 | /** |
61 | * suspend_valid_only_mem - generic memory-only valid callback | 61 | * suspend_valid_only_mem - Generic memory-only valid callback. |
62 | * | 62 | * |
63 | * Platform drivers that implement mem suspend only and only need | 63 | * Platform drivers that implement mem suspend only and only need to check for |
64 | * to check for that in their .valid callback can use this instead | 64 | * that in their .valid() callback can use this instead of rolling their own |
65 | * of rolling their own .valid callback. | 65 | * .valid() callback. |
66 | */ | 66 | */ |
67 | int suspend_valid_only_mem(suspend_state_t state) | 67 | int suspend_valid_only_mem(suspend_state_t state) |
68 | { | 68 | { |
@@ -83,10 +83,11 @@ static int suspend_test(int level) | |||
83 | } | 83 | } |
84 | 84 | ||
85 | /** | 85 | /** |
86 | * suspend_prepare - Do prep work before entering low-power state. | 86 | * suspend_prepare - Prepare for entering system sleep state. |
87 | * | 87 | * |
88 | * This is common code that is called for each state that we're entering. | 88 | * Common code run for every system sleep state that can be entered (except for |
89 | * Run suspend notifiers, allocate a console and stop all processes. | 89 | * hibernation). Run suspend notifiers, allocate the "suspend" console and |
90 | * freeze processes. | ||
90 | */ | 91 | */ |
91 | static int suspend_prepare(void) | 92 | static int suspend_prepare(void) |
92 | { | 93 | { |
@@ -131,9 +132,9 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | |||
131 | } | 132 | } |
132 | 133 | ||
133 | /** | 134 | /** |
134 | * suspend_enter - enter the desired system sleep state. | 135 | * suspend_enter - Make the system enter the given sleep state. |
135 | * @state: State to enter | 136 | * @state: System sleep state to enter. |
136 | * @wakeup: Returns information that suspend should not be entered again. | 137 | * @wakeup: Returns information that the sleep state should not be re-entered. |
137 | * | 138 | * |
138 | * This function should be called after devices have been suspended. | 139 | * This function should be called after devices have been suspended. |
139 | */ | 140 | */ |
@@ -147,7 +148,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
147 | goto Platform_finish; | 148 | goto Platform_finish; |
148 | } | 149 | } |
149 | 150 | ||
150 | error = dpm_suspend_noirq(PMSG_SUSPEND); | 151 | error = dpm_suspend_end(PMSG_SUSPEND); |
151 | if (error) { | 152 | if (error) { |
152 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 153 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
153 | goto Platform_finish; | 154 | goto Platform_finish; |
@@ -189,7 +190,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
189 | if (suspend_ops->wake) | 190 | if (suspend_ops->wake) |
190 | suspend_ops->wake(); | 191 | suspend_ops->wake(); |
191 | 192 | ||
192 | dpm_resume_noirq(PMSG_RESUME); | 193 | dpm_resume_start(PMSG_RESUME); |
193 | 194 | ||
194 | Platform_finish: | 195 | Platform_finish: |
195 | if (suspend_ops->finish) | 196 | if (suspend_ops->finish) |
@@ -199,9 +200,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
199 | } | 200 | } |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * suspend_devices_and_enter - suspend devices and enter the desired system | 203 | * suspend_devices_and_enter - Suspend devices and enter system sleep state. |
203 | * sleep state. | 204 | * @state: System sleep state to enter. |
204 | * @state: state to enter | ||
205 | */ | 205 | */ |
206 | int suspend_devices_and_enter(suspend_state_t state) | 206 | int suspend_devices_and_enter(suspend_state_t state) |
207 | { | 207 | { |
@@ -251,10 +251,10 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | /** | 253 | /** |
254 | * suspend_finish - Do final work before exiting suspend sequence. | 254 | * suspend_finish - Clean up before finishing the suspend sequence. |
255 | * | 255 | * |
256 | * Call platform code to clean up, restart processes, and free the | 256 | * Call platform code to clean up, restart processes, and free the console that |
257 | * console that we've allocated. This is not called for suspend-to-disk. | 257 | * we've allocated. This routine is not called for hibernation. |
258 | */ | 258 | */ |
259 | static void suspend_finish(void) | 259 | static void suspend_finish(void) |
260 | { | 260 | { |
@@ -265,16 +265,14 @@ static void suspend_finish(void) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | /** | 267 | /** |
268 | * enter_state - Do common work of entering low-power state. | 268 | * enter_state - Do common work needed to enter system sleep state. |
269 | * @state: pm_state structure for state we're entering. | 269 | * @state: System sleep state to enter. |
270 | * | 270 | * |
271 | * Make sure we're the only ones trying to enter a sleep state. Fail | 271 | * Make sure that no one else is trying to put the system into a sleep state. |
272 | * if someone has beat us to it, since we don't want anything weird to | 272 | * Fail if that's not the case. Otherwise, prepare for system suspend, make the |
273 | * happen when we wake up. | 273 | * system enter the given sleep state and clean up after wakeup. |
274 | * Then, do the setup for suspend, enter the state, and cleaup (after | ||
275 | * we've woken up). | ||
276 | */ | 274 | */ |
277 | int enter_state(suspend_state_t state) | 275 | static int enter_state(suspend_state_t state) |
278 | { | 276 | { |
279 | int error; | 277 | int error; |
280 | 278 | ||
@@ -310,24 +308,26 @@ int enter_state(suspend_state_t state) | |||
310 | } | 308 | } |
311 | 309 | ||
312 | /** | 310 | /** |
313 | * pm_suspend - Externally visible function for suspending system. | 311 | * pm_suspend - Externally visible function for suspending the system. |
314 | * @state: Enumerated value of state to enter. | 312 | * @state: System sleep state to enter. |
315 | * | 313 | * |
316 | * Determine whether or not value is within range, get state | 314 | * Check if the value of @state represents one of the supported states, |
317 | * structure, and enter (above). | 315 | * execute enter_state() and update system suspend statistics. |
318 | */ | 316 | */ |
319 | int pm_suspend(suspend_state_t state) | 317 | int pm_suspend(suspend_state_t state) |
320 | { | 318 | { |
321 | int ret; | 319 | int error; |
322 | if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) { | 320 | |
323 | ret = enter_state(state); | 321 | if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) |
324 | if (ret) { | 322 | return -EINVAL; |
325 | suspend_stats.fail++; | 323 | |
326 | dpm_save_failed_errno(ret); | 324 | error = enter_state(state); |
327 | } else | 325 | if (error) { |
328 | suspend_stats.success++; | 326 | suspend_stats.fail++; |
329 | return ret; | 327 | dpm_save_failed_errno(error); |
328 | } else { | ||
329 | suspend_stats.success++; | ||
330 | } | 330 | } |
331 | return -EINVAL; | 331 | return error; |
332 | } | 332 | } |
333 | EXPORT_SYMBOL(pm_suspend); | 333 | EXPORT_SYMBOL(pm_suspend); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 3e100075b13c..33c4329205af 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -249,16 +249,10 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
249 | } | 249 | } |
250 | pm_restore_gfp_mask(); | 250 | pm_restore_gfp_mask(); |
251 | error = hibernation_snapshot(data->platform_support); | 251 | error = hibernation_snapshot(data->platform_support); |
252 | if (error) { | 252 | if (!error) { |
253 | thaw_kernel_threads(); | ||
254 | } else { | ||
255 | error = put_user(in_suspend, (int __user *)arg); | 253 | error = put_user(in_suspend, (int __user *)arg); |
256 | if (!error && !freezer_test_done) | 254 | data->ready = !freezer_test_done && !error; |
257 | data->ready = 1; | 255 | freezer_test_done = false; |
258 | if (freezer_test_done) { | ||
259 | freezer_test_done = false; | ||
260 | thaw_kernel_threads(); | ||
261 | } | ||
262 | } | 256 | } |
263 | break; | 257 | break; |
264 | 258 | ||