diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/acpi/sleep.c | 16 | ||||
| -rw-r--r-- | drivers/amba/bus.c | 136 | ||||
| -rw-r--r-- | drivers/base/firmware_class.c | 4 | ||||
| -rw-r--r-- | drivers/base/platform.c | 115 | ||||
| -rw-r--r-- | drivers/base/power/generic_ops.c | 91 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 375 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 9 | ||||
| -rw-r--r-- | drivers/bluetooth/btmrvl_main.c | 2 | ||||
| -rw-r--r-- | drivers/dma/dmatest.c | 46 | ||||
| -rw-r--r-- | drivers/mfd/twl6030-irq.c | 2 | ||||
| -rw-r--r-- | drivers/net/irda/stir4200.c | 2 | ||||
| -rw-r--r-- | drivers/platform/x86/thinkpad_acpi.c | 15 | ||||
| -rw-r--r-- | drivers/staging/rts_pstor/rtsx.c | 2 | ||||
| -rw-r--r-- | drivers/usb/storage/usb.c | 13 |
14 files changed, 250 insertions, 578 deletions
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6d9a3ab58db2..0a7ed69546ba 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
| 476 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), | 476 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), |
| 477 | }, | 477 | }, |
| 478 | }, | 478 | }, |
| 479 | { | ||
| 480 | .callback = init_nvs_nosave, | ||
| 481 | .ident = "Asus K54C", | ||
| 482 | .matches = { | ||
| 483 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
| 484 | DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), | ||
| 485 | }, | ||
| 486 | }, | ||
| 487 | { | ||
| 488 | .callback = init_nvs_nosave, | ||
| 489 | .ident = "Asus K54HR", | ||
| 490 | .matches = { | ||
| 491 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
| 492 | DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), | ||
| 493 | }, | ||
| 494 | }, | ||
| 479 | {}, | 495 | {}, |
| 480 | }; | 496 | }; |
| 481 | #endif /* CONFIG_SUSPEND */ | 497 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index bd230e801131..0304b3fdff5a 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
| @@ -109,31 +109,7 @@ static int amba_legacy_resume(struct device *dev) | |||
| 109 | return ret; | 109 | return ret; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | static int amba_pm_prepare(struct device *dev) | 112 | #endif /* CONFIG_PM_SLEEP */ |
| 113 | { | ||
| 114 | struct device_driver *drv = dev->driver; | ||
| 115 | int ret = 0; | ||
| 116 | |||
| 117 | if (drv && drv->pm && drv->pm->prepare) | ||
| 118 | ret = drv->pm->prepare(dev); | ||
| 119 | |||
| 120 | return ret; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void amba_pm_complete(struct device *dev) | ||
| 124 | { | ||
| 125 | struct device_driver *drv = dev->driver; | ||
| 126 | |||
| 127 | if (drv && drv->pm && drv->pm->complete) | ||
| 128 | drv->pm->complete(dev); | ||
| 129 | } | ||
| 130 | |||
| 131 | #else /* !CONFIG_PM_SLEEP */ | ||
| 132 | |||
| 133 | #define amba_pm_prepare NULL | ||
| 134 | #define amba_pm_complete NULL | ||
| 135 | |||
| 136 | #endif /* !CONFIG_PM_SLEEP */ | ||
| 137 | 113 | ||
| 138 | #ifdef CONFIG_SUSPEND | 114 | #ifdef CONFIG_SUSPEND |
| 139 | 115 | ||
| @@ -155,22 +131,6 @@ static int amba_pm_suspend(struct device *dev) | |||
| 155 | return ret; | 131 | return ret; |
| 156 | } | 132 | } |
| 157 | 133 | ||
| 158 | static int amba_pm_suspend_noirq(struct device *dev) | ||
| 159 | { | ||
| 160 | struct device_driver *drv = dev->driver; | ||
| 161 | int ret = 0; | ||
| 162 | |||
| 163 | if (!drv) | ||
| 164 | return 0; | ||
| 165 | |||
| 166 | if (drv->pm) { | ||
| 167 | if (drv->pm->suspend_noirq) | ||
| 168 | ret = drv->pm->suspend_noirq(dev); | ||
| 169 | } | ||
| 170 | |||
| 171 | return ret; | ||
| 172 | } | ||
| 173 | |||
| 174 | static int amba_pm_resume(struct device *dev) | 134 | static int amba_pm_resume(struct device *dev) |
| 175 | { | 135 | { |
| 176 | struct device_driver *drv = dev->driver; | 136 | struct device_driver *drv = dev->driver; |
| @@ -189,28 +149,10 @@ static int amba_pm_resume(struct device *dev) | |||
| 189 | return ret; | 149 | return ret; |
| 190 | } | 150 | } |
| 191 | 151 | ||
| 192 | static int amba_pm_resume_noirq(struct device *dev) | ||
| 193 | { | ||
| 194 | struct device_driver *drv = dev->driver; | ||
| 195 | int ret = 0; | ||
| 196 | |||
| 197 | if (!drv) | ||
| 198 | return 0; | ||
| 199 | |||
| 200 | if (drv->pm) { | ||
| 201 | if (drv->pm->resume_noirq) | ||
| 202 | ret = drv->pm->resume_noirq(dev); | ||
| 203 | } | ||
| 204 | |||
| 205 | return ret; | ||
| 206 | } | ||
| 207 | |||
| 208 | #else /* !CONFIG_SUSPEND */ | 152 | #else /* !CONFIG_SUSPEND */ |
| 209 | 153 | ||
| 210 | #define amba_pm_suspend NULL | 154 | #define amba_pm_suspend NULL |
| 211 | #define amba_pm_resume NULL | 155 | #define amba_pm_resume NULL |
| 212 | #define amba_pm_suspend_noirq NULL | ||
| 213 | #define amba_pm_resume_noirq NULL | ||
| 214 | 156 | ||
| 215 | #endif /* !CONFIG_SUSPEND */ | 157 | #endif /* !CONFIG_SUSPEND */ |
| 216 | 158 | ||
| @@ -234,22 +176,6 @@ static int amba_pm_freeze(struct device *dev) | |||
| 234 | return ret; | 176 | return ret; |
| 235 | } | 177 | } |
| 236 | 178 | ||
| 237 | static int amba_pm_freeze_noirq(struct device *dev) | ||
| 238 | { | ||
| 239 | struct device_driver *drv = dev->driver; | ||
| 240 | int ret = 0; | ||
| 241 | |||
| 242 | if (!drv) | ||
| 243 | return 0; | ||
| 244 | |||
| 245 | if (drv->pm) { | ||
| 246 | if (drv->pm->freeze_noirq) | ||
| 247 | ret = drv->pm->freeze_noirq(dev); | ||
| 248 | } | ||
| 249 | |||
| 250 | return ret; | ||
| 251 | } | ||
| 252 | |||
| 253 | static int amba_pm_thaw(struct device *dev) | 179 | static int amba_pm_thaw(struct device *dev) |
| 254 | { | 180 | { |
| 255 | struct device_driver *drv = dev->driver; | 181 | struct device_driver *drv = dev->driver; |
| @@ -268,22 +194,6 @@ static int amba_pm_thaw(struct device *dev) | |||
| 268 | return ret; | 194 | return ret; |
| 269 | } | 195 | } |
| 270 | 196 | ||
| 271 | static int amba_pm_thaw_noirq(struct device *dev) | ||
| 272 | { | ||
| 273 | struct device_driver *drv = dev->driver; | ||
| 274 | int ret = 0; | ||
| 275 | |||
| 276 | if (!drv) | ||
| 277 | return 0; | ||
| 278 | |||
| 279 | if (drv->pm) { | ||
| 280 | if (drv->pm->thaw_noirq) | ||
| 281 | ret = drv->pm->thaw_noirq(dev); | ||
| 282 | } | ||
| 283 | |||
| 284 | return ret; | ||
| 285 | } | ||
| 286 | |||
| 287 | static int amba_pm_poweroff(struct device *dev) | 197 | static int amba_pm_poweroff(struct device *dev) |
| 288 | { | 198 | { |
| 289 | struct device_driver *drv = dev->driver; | 199 | struct device_driver *drv = dev->driver; |
| @@ -302,22 +212,6 @@ static int amba_pm_poweroff(struct device *dev) | |||
| 302 | return ret; | 212 | return ret; |
| 303 | } | 213 | } |
| 304 | 214 | ||
| 305 | static int amba_pm_poweroff_noirq(struct device *dev) | ||
| 306 | { | ||
| 307 | struct device_driver *drv = dev->driver; | ||
| 308 | int ret = 0; | ||
| 309 | |||
| 310 | if (!drv) | ||
| 311 | return 0; | ||
| 312 | |||
| 313 | if (drv->pm) { | ||
| 314 | if (drv->pm->poweroff_noirq) | ||
| 315 | ret = drv->pm->poweroff_noirq(dev); | ||
| 316 | } | ||
| 317 | |||
| 318 | return ret; | ||
| 319 | } | ||
| 320 | |||
| 321 | static int amba_pm_restore(struct device *dev) | 215 | static int amba_pm_restore(struct device *dev) |
| 322 | { | 216 | { |
| 323 | struct device_driver *drv = dev->driver; | 217 | struct device_driver *drv = dev->driver; |
| @@ -336,32 +230,12 @@ static int amba_pm_restore(struct device *dev) | |||
| 336 | return ret; | 230 | return ret; |
| 337 | } | 231 | } |
| 338 | 232 | ||
| 339 | static int amba_pm_restore_noirq(struct device *dev) | ||
| 340 | { | ||
| 341 | struct device_driver *drv = dev->driver; | ||
| 342 | int ret = 0; | ||
| 343 | |||
| 344 | if (!drv) | ||
| 345 | return 0; | ||
| 346 | |||
| 347 | if (drv->pm) { | ||
| 348 | if (drv->pm->restore_noirq) | ||
| 349 | ret = drv->pm->restore_noirq(dev); | ||
| 350 | } | ||
| 351 | |||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | |||
| 355 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ | 233 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 356 | 234 | ||
| 357 | #define amba_pm_freeze NULL | 235 | #define amba_pm_freeze NULL |
| 358 | #define amba_pm_thaw NULL | 236 | #define amba_pm_thaw NULL |
| 359 | #define amba_pm_poweroff NULL | 237 | #define amba_pm_poweroff NULL |
| 360 | #define amba_pm_restore NULL | 238 | #define amba_pm_restore NULL |
| 361 | #define amba_pm_freeze_noirq NULL | ||
| 362 | #define amba_pm_thaw_noirq NULL | ||
| 363 | #define amba_pm_poweroff_noirq NULL | ||
| 364 | #define amba_pm_restore_noirq NULL | ||
| 365 | 239 | ||
| 366 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ | 240 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 367 | 241 | ||
| @@ -402,20 +276,12 @@ static int amba_pm_runtime_resume(struct device *dev) | |||
| 402 | #ifdef CONFIG_PM | 276 | #ifdef CONFIG_PM |
| 403 | 277 | ||
| 404 | static const struct dev_pm_ops amba_pm = { | 278 | static const struct dev_pm_ops amba_pm = { |
| 405 | .prepare = amba_pm_prepare, | ||
| 406 | .complete = amba_pm_complete, | ||
| 407 | .suspend = amba_pm_suspend, | 279 | .suspend = amba_pm_suspend, |
| 408 | .resume = amba_pm_resume, | 280 | .resume = amba_pm_resume, |
| 409 | .freeze = amba_pm_freeze, | 281 | .freeze = amba_pm_freeze, |
| 410 | .thaw = amba_pm_thaw, | 282 | .thaw = amba_pm_thaw, |
| 411 | .poweroff = amba_pm_poweroff, | 283 | .poweroff = amba_pm_poweroff, |
| 412 | .restore = amba_pm_restore, | 284 | .restore = amba_pm_restore, |
| 413 | .suspend_noirq = amba_pm_suspend_noirq, | ||
| 414 | .resume_noirq = amba_pm_resume_noirq, | ||
| 415 | .freeze_noirq = amba_pm_freeze_noirq, | ||
| 416 | .thaw_noirq = amba_pm_thaw_noirq, | ||
| 417 | .poweroff_noirq = amba_pm_poweroff_noirq, | ||
| 418 | .restore_noirq = amba_pm_restore_noirq, | ||
| 419 | SET_RUNTIME_PM_OPS( | 285 | SET_RUNTIME_PM_OPS( |
| 420 | amba_pm_runtime_suspend, | 286 | amba_pm_runtime_suspend, |
| 421 | amba_pm_runtime_resume, | 287 | amba_pm_runtime_resume, |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 06ed6b4e7df5..d5585da14c8a 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
| 534 | return 0; | 534 | return 0; |
| 535 | } | 535 | } |
| 536 | 536 | ||
| 537 | read_lock_usermodehelper(); | ||
| 538 | |||
| 537 | if (WARN_ON(usermodehelper_is_disabled())) { | 539 | if (WARN_ON(usermodehelper_is_disabled())) { |
| 538 | dev_err(device, "firmware: %s will not be loaded\n", name); | 540 | dev_err(device, "firmware: %s will not be loaded\n", name); |
| 539 | retval = -EBUSY; | 541 | retval = -EBUSY; |
| @@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
| 572 | fw_destroy_instance(fw_priv); | 574 | fw_destroy_instance(fw_priv); |
| 573 | 575 | ||
| 574 | out: | 576 | out: |
| 577 | read_unlock_usermodehelper(); | ||
| 578 | |||
| 575 | if (retval) { | 579 | if (retval) { |
| 576 | release_firmware(firmware); | 580 | release_firmware(firmware); |
| 577 | *firmware_p = NULL; | 581 | *firmware_p = NULL; |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 7a24895543e7..7d912d5675d8 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev) | |||
| 700 | return ret; | 700 | return ret; |
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | int platform_pm_prepare(struct device *dev) | ||
| 704 | { | ||
| 705 | struct device_driver *drv = dev->driver; | ||
| 706 | int ret = 0; | ||
| 707 | |||
| 708 | if (drv && drv->pm && drv->pm->prepare) | ||
| 709 | ret = drv->pm->prepare(dev); | ||
| 710 | |||
| 711 | return ret; | ||
| 712 | } | ||
| 713 | |||
| 714 | void platform_pm_complete(struct device *dev) | ||
| 715 | { | ||
| 716 | struct device_driver *drv = dev->driver; | ||
| 717 | |||
| 718 | if (drv && drv->pm && drv->pm->complete) | ||
| 719 | drv->pm->complete(dev); | ||
| 720 | } | ||
| 721 | |||
| 722 | #endif /* CONFIG_PM_SLEEP */ | 703 | #endif /* CONFIG_PM_SLEEP */ |
| 723 | 704 | ||
| 724 | #ifdef CONFIG_SUSPEND | 705 | #ifdef CONFIG_SUSPEND |
| @@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev) | |||
| 741 | return ret; | 722 | return ret; |
| 742 | } | 723 | } |
| 743 | 724 | ||
| 744 | int platform_pm_suspend_noirq(struct device *dev) | ||
| 745 | { | ||
| 746 | struct device_driver *drv = dev->driver; | ||
| 747 | int ret = 0; | ||
| 748 | |||
| 749 | if (!drv) | ||
| 750 | return 0; | ||
| 751 | |||
| 752 | if (drv->pm) { | ||
| 753 | if (drv->pm->suspend_noirq) | ||
| 754 | ret = drv->pm->suspend_noirq(dev); | ||
| 755 | } | ||
| 756 | |||
| 757 | return ret; | ||
| 758 | } | ||
| 759 | |||
| 760 | int platform_pm_resume(struct device *dev) | 725 | int platform_pm_resume(struct device *dev) |
| 761 | { | 726 | { |
| 762 | struct device_driver *drv = dev->driver; | 727 | struct device_driver *drv = dev->driver; |
| @@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev) | |||
| 775 | return ret; | 740 | return ret; |
| 776 | } | 741 | } |
| 777 | 742 | ||
| 778 | int platform_pm_resume_noirq(struct device *dev) | ||
| 779 | { | ||
| 780 | struct device_driver *drv = dev->driver; | ||
| 781 | int ret = 0; | ||
| 782 | |||
| 783 | if (!drv) | ||
| 784 | return 0; | ||
| 785 | |||
| 786 | if (drv->pm) { | ||
| 787 | if (drv->pm->resume_noirq) | ||
| 788 | ret = drv->pm->resume_noirq(dev); | ||
| 789 | } | ||
| 790 | |||
| 791 | return ret; | ||
| 792 | } | ||
| 793 | |||
| 794 | #endif /* CONFIG_SUSPEND */ | 743 | #endif /* CONFIG_SUSPEND */ |
| 795 | 744 | ||
| 796 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 745 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| @@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev) | |||
| 813 | return ret; | 762 | return ret; |
| 814 | } | 763 | } |
| 815 | 764 | ||
| 816 | int platform_pm_freeze_noirq(struct device *dev) | ||
| 817 | { | ||
| 818 | struct device_driver *drv = dev->driver; | ||
| 819 | int ret = 0; | ||
| 820 | |||
| 821 | if (!drv) | ||
| 822 | return 0; | ||
| 823 | |||
| 824 | if (drv->pm) { | ||
| 825 | if (drv->pm->freeze_noirq) | ||
| 826 | ret = drv->pm->freeze_noirq(dev); | ||
| 827 | } | ||
| 828 | |||
| 829 | return ret; | ||
| 830 | } | ||
| 831 | |||
| 832 | int platform_pm_thaw(struct device *dev) | 765 | int platform_pm_thaw(struct device *dev) |
| 833 | { | 766 | { |
| 834 | struct device_driver *drv = dev->driver; | 767 | struct device_driver *drv = dev->driver; |
| @@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev) | |||
| 847 | return ret; | 780 | return ret; |
| 848 | } | 781 | } |
| 849 | 782 | ||
| 850 | int platform_pm_thaw_noirq(struct device *dev) | ||
| 851 | { | ||
| 852 | struct device_driver *drv = dev->driver; | ||
| 853 | int ret = 0; | ||
| 854 | |||
| 855 | if (!drv) | ||
| 856 | return 0; | ||
| 857 | |||
| 858 | if (drv->pm) { | ||
| 859 | if (drv->pm->thaw_noirq) | ||
| 860 | ret = drv->pm->thaw_noirq(dev); | ||
| 861 | } | ||
| 862 | |||
| 863 | return ret; | ||
| 864 | } | ||
| 865 | |||
| 866 | int platform_pm_poweroff(struct device *dev) | 783 | int platform_pm_poweroff(struct device *dev) |
| 867 | { | 784 | { |
| 868 | struct device_driver *drv = dev->driver; | 785 | struct device_driver *drv = dev->driver; |
| @@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev) | |||
| 881 | return ret; | 798 | return ret; |
| 882 | } | 799 | } |
| 883 | 800 | ||
| 884 | int platform_pm_poweroff_noirq(struct device *dev) | ||
| 885 | { | ||
| 886 | struct device_driver *drv = dev->driver; | ||
| 887 | int ret = 0; | ||
| 888 | |||
| 889 | if (!drv) | ||
| 890 | return 0; | ||
| 891 | |||
| 892 | if (drv->pm) { | ||
| 893 | if (drv->pm->poweroff_noirq) | ||
| 894 | ret = drv->pm->poweroff_noirq(dev); | ||
| 895 | } | ||
| 896 | |||
| 897 | return ret; | ||
| 898 | } | ||
| 899 | |||
| 900 | int platform_pm_restore(struct device *dev) | 801 | int platform_pm_restore(struct device *dev) |
| 901 | { | 802 | { |
| 902 | struct device_driver *drv = dev->driver; | 803 | struct device_driver *drv = dev->driver; |
| @@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev) | |||
| 915 | return ret; | 816 | return ret; |
| 916 | } | 817 | } |
| 917 | 818 | ||
| 918 | int platform_pm_restore_noirq(struct device *dev) | ||
| 919 | { | ||
| 920 | struct device_driver *drv = dev->driver; | ||
| 921 | int ret = 0; | ||
| 922 | |||
| 923 | if (!drv) | ||
| 924 | return 0; | ||
| 925 | |||
| 926 | if (drv->pm) { | ||
| 927 | if (drv->pm->restore_noirq) | ||
| 928 | ret = drv->pm->restore_noirq(dev); | ||
| 929 | } | ||
| 930 | |||
| 931 | return ret; | ||
| 932 | } | ||
| 933 | |||
| 934 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 819 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 935 | 820 | ||
| 936 | static const struct dev_pm_ops platform_dev_pm_ops = { | 821 | static const struct dev_pm_ops platform_dev_pm_ops = { |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 265a0ee3b49e..10bdd793f0bd 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
| @@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev) | |||
| 97 | * @event: PM transition of the system under way. | 97 | * @event: PM transition of the system under way. |
| 98 | * @bool: Whether or not this is the "noirq" stage. | 98 | * @bool: Whether or not this is the "noirq" stage. |
| 99 | * | 99 | * |
| 100 | * If the device has not been suspended at run time, execute the | 100 | * Execute the PM callback corresponding to @event provided by the driver of |
| 101 | * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and | 101 | * @dev, if defined, and return its error code. Return 0 if the callback is |
| 102 | * return its error code. Otherwise, return zero. | 102 | * not present. |
| 103 | */ | 103 | */ |
| 104 | static int __pm_generic_call(struct device *dev, int event, bool noirq) | 104 | static int __pm_generic_call(struct device *dev, int event, bool noirq) |
| 105 | { | 105 | { |
| 106 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 106 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 107 | int (*callback)(struct device *); | 107 | int (*callback)(struct device *); |
| 108 | 108 | ||
| 109 | if (!pm || pm_runtime_suspended(dev)) | 109 | if (!pm) |
| 110 | return 0; | 110 | return 0; |
| 111 | 111 | ||
| 112 | switch (event) { | 112 | switch (event) { |
| @@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq) | |||
| 119 | case PM_EVENT_HIBERNATE: | 119 | case PM_EVENT_HIBERNATE: |
| 120 | callback = noirq ? pm->poweroff_noirq : pm->poweroff; | 120 | callback = noirq ? pm->poweroff_noirq : pm->poweroff; |
| 121 | break; | 121 | break; |
| 122 | case PM_EVENT_RESUME: | ||
| 123 | callback = noirq ? pm->resume_noirq : pm->resume; | ||
| 124 | break; | ||
| 122 | case PM_EVENT_THAW: | 125 | case PM_EVENT_THAW: |
| 123 | callback = noirq ? pm->thaw_noirq : pm->thaw; | 126 | callback = noirq ? pm->thaw_noirq : pm->thaw; |
| 124 | break; | 127 | break; |
| 128 | case PM_EVENT_RESTORE: | ||
| 129 | callback = noirq ? pm->restore_noirq : pm->restore; | ||
| 130 | break; | ||
| 125 | default: | 131 | default: |
| 126 | callback = NULL; | 132 | callback = NULL; |
| 127 | break; | 133 | break; |
| @@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev) | |||
| 211 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | 217 | EXPORT_SYMBOL_GPL(pm_generic_thaw); |
| 212 | 218 | ||
| 213 | /** | 219 | /** |
| 214 | * __pm_generic_resume - Generic resume/restore callback for subsystems. | ||
| 215 | * @dev: Device to handle. | ||
| 216 | * @event: PM transition of the system under way. | ||
| 217 | * @bool: Whether or not this is the "noirq" stage. | ||
| 218 | * | ||
| 219 | * Execute the resume/resotre callback provided by the @dev's driver, if | ||
| 220 | * defined. If it returns 0, change the device's runtime PM status to 'active'. | ||
| 221 | * Return the callback's error code. | ||
| 222 | */ | ||
| 223 | static int __pm_generic_resume(struct device *dev, int event, bool noirq) | ||
| 224 | { | ||
| 225 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 226 | int (*callback)(struct device *); | ||
| 227 | int ret; | ||
| 228 | |||
| 229 | if (!pm) | ||
| 230 | return 0; | ||
| 231 | |||
| 232 | switch (event) { | ||
| 233 | case PM_EVENT_RESUME: | ||
| 234 | callback = noirq ? pm->resume_noirq : pm->resume; | ||
| 235 | break; | ||
| 236 | case PM_EVENT_RESTORE: | ||
| 237 | callback = noirq ? pm->restore_noirq : pm->restore; | ||
| 238 | break; | ||
| 239 | default: | ||
| 240 | callback = NULL; | ||
| 241 | break; | ||
| 242 | } | ||
| 243 | |||
| 244 | if (!callback) | ||
| 245 | return 0; | ||
| 246 | |||
| 247 | ret = callback(dev); | ||
| 248 | if (!ret && !noirq && pm_runtime_enabled(dev)) { | ||
| 249 | pm_runtime_disable(dev); | ||
| 250 | pm_runtime_set_active(dev); | ||
| 251 | pm_runtime_enable(dev); | ||
| 252 | } | ||
| 253 | |||
| 254 | return ret; | ||
| 255 | } | ||
| 256 | |||
| 257 | /** | ||
| 258 | * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. | 220 | * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. |
| 259 | * @dev: Device to resume. | 221 | * @dev: Device to resume. |
| 260 | */ | 222 | */ |
| 261 | int pm_generic_resume_noirq(struct device *dev) | 223 | int pm_generic_resume_noirq(struct device *dev) |
| 262 | { | 224 | { |
| 263 | return __pm_generic_resume(dev, PM_EVENT_RESUME, true); | 225 | return __pm_generic_call(dev, PM_EVENT_RESUME, true); |
| 264 | } | 226 | } |
| 265 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); | 227 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); |
| 266 | 228 | ||
| @@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); | |||
| 270 | */ | 232 | */ |
| 271 | int pm_generic_resume(struct device *dev) | 233 | int pm_generic_resume(struct device *dev) |
| 272 | { | 234 | { |
| 273 | return __pm_generic_resume(dev, PM_EVENT_RESUME, false); | 235 | return __pm_generic_call(dev, PM_EVENT_RESUME, false); |
| 274 | } | 236 | } |
| 275 | EXPORT_SYMBOL_GPL(pm_generic_resume); | 237 | EXPORT_SYMBOL_GPL(pm_generic_resume); |
| 276 | 238 | ||
| @@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); | |||
| 280 | */ | 242 | */ |
| 281 | int pm_generic_restore_noirq(struct device *dev) | 243 | int pm_generic_restore_noirq(struct device *dev) |
| 282 | { | 244 | { |
| 283 | return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); | 245 | return __pm_generic_call(dev, PM_EVENT_RESTORE, true); |
| 284 | } | 246 | } |
| 285 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); | 247 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); |
| 286 | 248 | ||
| @@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); | |||
| 290 | */ | 252 | */ |
| 291 | int pm_generic_restore(struct device *dev) | 253 | int pm_generic_restore(struct device *dev) |
| 292 | { | 254 | { |
| 293 | return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); | 255 | return __pm_generic_call(dev, PM_EVENT_RESTORE, false); |
| 294 | } | 256 | } |
| 295 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 257 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
| 296 | 258 | ||
| @@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev) | |||
| 314 | pm_runtime_idle(dev); | 276 | pm_runtime_idle(dev); |
| 315 | } | 277 | } |
| 316 | #endif /* CONFIG_PM_SLEEP */ | 278 | #endif /* CONFIG_PM_SLEEP */ |
| 317 | |||
| 318 | struct dev_pm_ops generic_subsys_pm_ops = { | ||
| 319 | #ifdef CONFIG_PM_SLEEP | ||
| 320 | .prepare = pm_generic_prepare, | ||
| 321 | .suspend = pm_generic_suspend, | ||
| 322 | .suspend_noirq = pm_generic_suspend_noirq, | ||
| 323 | .resume = pm_generic_resume, | ||
| 324 | .resume_noirq = pm_generic_resume_noirq, | ||
| 325 | .freeze = pm_generic_freeze, | ||
| 326 | .freeze_noirq = pm_generic_freeze_noirq, | ||
| 327 | .thaw = pm_generic_thaw, | ||
| 328 | .thaw_noirq = pm_generic_thaw_noirq, | ||
| 329 | .poweroff = pm_generic_poweroff, | ||
| 330 | .poweroff_noirq = pm_generic_poweroff_noirq, | ||
| 331 | .restore = pm_generic_restore, | ||
| 332 | .restore_noirq = pm_generic_restore_noirq, | ||
| 333 | .complete = pm_generic_complete, | ||
| 334 | #endif | ||
| 335 | #ifdef CONFIG_PM_RUNTIME | ||
| 336 | .runtime_suspend = pm_generic_runtime_suspend, | ||
| 337 | .runtime_resume = pm_generic_runtime_resume, | ||
| 338 | .runtime_idle = pm_generic_runtime_idle, | ||
| 339 | #endif | ||
| 340 | }; | ||
| 341 | EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c3d2dfcf438d..e2cc3d2e0ecc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #include "../base.h" | 32 | #include "../base.h" |
| 33 | #include "power.h" | 33 | #include "power.h" |
| 34 | 34 | ||
| 35 | typedef int (*pm_callback_t)(struct device *); | ||
| 36 | |||
| 35 | /* | 37 | /* |
| 36 | * The entries in the dpm_list list are in a depth first order, simply | 38 | * The entries in the dpm_list list are in a depth first order, simply |
| 37 | * because children are guaranteed to be discovered after parents, and | 39 | * because children are guaranteed to be discovered after parents, and |
| @@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev) | |||
| 164 | ktime_t calltime = ktime_set(0, 0); | 166 | ktime_t calltime = ktime_set(0, 0); |
| 165 | 167 | ||
| 166 | if (initcall_debug) { | 168 | if (initcall_debug) { |
| 167 | pr_info("calling %s+ @ %i\n", | 169 | pr_info("calling %s+ @ %i, parent: %s\n", |
| 168 | dev_name(dev), task_pid_nr(current)); | 170 | dev_name(dev), task_pid_nr(current), |
| 171 | dev->parent ? dev_name(dev->parent) : "none"); | ||
| 169 | calltime = ktime_get(); | 172 | calltime = ktime_get(); |
| 170 | } | 173 | } |
| 171 | 174 | ||
| @@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async) | |||
| 211 | } | 214 | } |
| 212 | 215 | ||
| 213 | /** | 216 | /** |
| 214 | * pm_op - Execute the PM operation appropriate for given PM event. | 217 | * pm_op - Return the PM operation appropriate for given PM event. |
| 215 | * @dev: Device to handle. | ||
| 216 | * @ops: PM operations to choose from. | 218 | * @ops: PM operations to choose from. |
| 217 | * @state: PM transition of the system being carried out. | 219 | * @state: PM transition of the system being carried out. |
| 218 | */ | 220 | */ |
| 219 | static int pm_op(struct device *dev, | 221 | static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) |
| 220 | const struct dev_pm_ops *ops, | ||
| 221 | pm_message_t state) | ||
| 222 | { | 222 | { |
| 223 | int error = 0; | ||
| 224 | ktime_t calltime; | ||
| 225 | |||
| 226 | calltime = initcall_debug_start(dev); | ||
| 227 | |||
| 228 | switch (state.event) { | 223 | switch (state.event) { |
| 229 | #ifdef CONFIG_SUSPEND | 224 | #ifdef CONFIG_SUSPEND |
| 230 | case PM_EVENT_SUSPEND: | 225 | case PM_EVENT_SUSPEND: |
| 231 | if (ops->suspend) { | 226 | return ops->suspend; |
| 232 | error = ops->suspend(dev); | ||
| 233 | suspend_report_result(ops->suspend, error); | ||
| 234 | } | ||
| 235 | break; | ||
| 236 | case PM_EVENT_RESUME: | 227 | case PM_EVENT_RESUME: |
| 237 | if (ops->resume) { | 228 | return ops->resume; |
| 238 | error = ops->resume(dev); | ||
| 239 | suspend_report_result(ops->resume, error); | ||
| 240 | } | ||
| 241 | break; | ||
| 242 | #endif /* CONFIG_SUSPEND */ | 229 | #endif /* CONFIG_SUSPEND */ |
| 243 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 230 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 244 | case PM_EVENT_FREEZE: | 231 | case PM_EVENT_FREEZE: |
| 245 | case PM_EVENT_QUIESCE: | 232 | case PM_EVENT_QUIESCE: |
| 246 | if (ops->freeze) { | 233 | return ops->freeze; |
| 247 | error = ops->freeze(dev); | ||
| 248 | suspend_report_result(ops->freeze, error); | ||
| 249 | } | ||
| 250 | break; | ||
| 251 | case PM_EVENT_HIBERNATE: | 234 | case PM_EVENT_HIBERNATE: |
| 252 | if (ops->poweroff) { | 235 | return ops->poweroff; |
| 253 | error = ops->poweroff(dev); | ||
| 254 | suspend_report_result(ops->poweroff, error); | ||
| 255 | } | ||
| 256 | break; | ||
| 257 | case PM_EVENT_THAW: | 236 | case PM_EVENT_THAW: |
| 258 | case PM_EVENT_RECOVER: | 237 | case PM_EVENT_RECOVER: |
| 259 | if (ops->thaw) { | 238 | return ops->thaw; |
| 260 | error = ops->thaw(dev); | ||
| 261 | suspend_report_result(ops->thaw, error); | ||
| 262 | } | ||
| 263 | break; | 239 | break; |
| 264 | case PM_EVENT_RESTORE: | 240 | case PM_EVENT_RESTORE: |
| 265 | if (ops->restore) { | 241 | return ops->restore; |
| 266 | error = ops->restore(dev); | ||
| 267 | suspend_report_result(ops->restore, error); | ||
| 268 | } | ||
| 269 | break; | ||
| 270 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 242 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 271 | default: | ||
| 272 | error = -EINVAL; | ||
| 273 | } | 243 | } |
| 274 | 244 | ||
| 275 | initcall_debug_report(dev, calltime, error); | 245 | return NULL; |
| 276 | |||
| 277 | return error; | ||
| 278 | } | 246 | } |
| 279 | 247 | ||
| 280 | /** | 248 | /** |
| 281 | * pm_noirq_op - Execute the PM operation appropriate for given PM event. | 249 | * pm_noirq_op - Return the PM operation appropriate for given PM event. |
| 282 | * @dev: Device to handle. | ||
| 283 | * @ops: PM operations to choose from. | 250 | * @ops: PM operations to choose from. |
| 284 | * @state: PM transition of the system being carried out. | 251 | * @state: PM transition of the system being carried out. |
| 285 | * | 252 | * |
| 286 | * The driver of @dev will not receive interrupts while this function is being | 253 | * The driver of @dev will not receive interrupts while this function is being |
| 287 | * executed. | 254 | * executed. |
| 288 | */ | 255 | */ |
| 289 | static int pm_noirq_op(struct device *dev, | 256 | static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) |
| 290 | const struct dev_pm_ops *ops, | ||
| 291 | pm_message_t state) | ||
| 292 | { | 257 | { |
| 293 | int error = 0; | ||
| 294 | ktime_t calltime = ktime_set(0, 0), delta, rettime; | ||
| 295 | |||
| 296 | if (initcall_debug) { | ||
| 297 | pr_info("calling %s+ @ %i, parent: %s\n", | ||
| 298 | dev_name(dev), task_pid_nr(current), | ||
| 299 | dev->parent ? dev_name(dev->parent) : "none"); | ||
| 300 | calltime = ktime_get(); | ||
| 301 | } | ||
| 302 | |||
| 303 | switch (state.event) { | 258 | switch (state.event) { |
| 304 | #ifdef CONFIG_SUSPEND | 259 | #ifdef CONFIG_SUSPEND |
| 305 | case PM_EVENT_SUSPEND: | 260 | case PM_EVENT_SUSPEND: |
| 306 | if (ops->suspend_noirq) { | 261 | return ops->suspend_noirq; |
| 307 | error = ops->suspend_noirq(dev); | ||
| 308 | suspend_report_result(ops->suspend_noirq, error); | ||
| 309 | } | ||
| 310 | break; | ||
| 311 | case PM_EVENT_RESUME: | 262 | case PM_EVENT_RESUME: |
| 312 | if (ops->resume_noirq) { | 263 | return ops->resume_noirq; |
| 313 | error = ops->resume_noirq(dev); | ||
| 314 | suspend_report_result(ops->resume_noirq, error); | ||
| 315 | } | ||
| 316 | break; | ||
| 317 | #endif /* CONFIG_SUSPEND */ | 264 | #endif /* CONFIG_SUSPEND */ |
| 318 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 265 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 319 | case PM_EVENT_FREEZE: | 266 | case PM_EVENT_FREEZE: |
| 320 | case PM_EVENT_QUIESCE: | 267 | case PM_EVENT_QUIESCE: |
| 321 | if (ops->freeze_noirq) { | 268 | return ops->freeze_noirq; |
| 322 | error = ops->freeze_noirq(dev); | ||
| 323 | suspend_report_result(ops->freeze_noirq, error); | ||
| 324 | } | ||
| 325 | break; | ||
| 326 | case PM_EVENT_HIBERNATE: | 269 | case PM_EVENT_HIBERNATE: |
| 327 | if (ops->poweroff_noirq) { | 270 | return ops->poweroff_noirq; |
| 328 | error = ops->poweroff_noirq(dev); | ||
| 329 | suspend_report_result(ops->poweroff_noirq, error); | ||
| 330 | } | ||
| 331 | break; | ||
| 332 | case PM_EVENT_THAW: | 271 | case PM_EVENT_THAW: |
| 333 | case PM_EVENT_RECOVER: | 272 | case PM_EVENT_RECOVER: |
| 334 | if (ops->thaw_noirq) { | 273 | return ops->thaw_noirq; |
| 335 | error = ops->thaw_noirq(dev); | ||
| 336 | suspend_report_result(ops->thaw_noirq, error); | ||
| 337 | } | ||
| 338 | break; | ||
| 339 | case PM_EVENT_RESTORE: | 274 | case PM_EVENT_RESTORE: |
| 340 | if (ops->restore_noirq) { | 275 | return ops->restore_noirq; |
| 341 | error = ops->restore_noirq(dev); | ||
| 342 | suspend_report_result(ops->restore_noirq, error); | ||
| 343 | } | ||
| 344 | break; | ||
| 345 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 276 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 346 | default: | ||
| 347 | error = -EINVAL; | ||
| 348 | } | ||
| 349 | |||
| 350 | if (initcall_debug) { | ||
| 351 | rettime = ktime_get(); | ||
| 352 | delta = ktime_sub(rettime, calltime); | ||
| 353 | printk("initcall %s_i+ returned %d after %Ld usecs\n", | ||
| 354 | dev_name(dev), error, | ||
| 355 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 356 | } | 277 | } |
| 357 | 278 | ||
| 358 | return error; | 279 | return NULL; |
| 359 | } | 280 | } |
| 360 | 281 | ||
| 361 | static char *pm_verb(int event) | 282 | static char *pm_verb(int event) |
| @@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | |||
| 413 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | 334 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); |
| 414 | } | 335 | } |
| 415 | 336 | ||
| 337 | static int dpm_run_callback(pm_callback_t cb, struct device *dev, | ||
| 338 | pm_message_t state, char *info) | ||
| 339 | { | ||
| 340 | ktime_t calltime; | ||
| 341 | int error; | ||
| 342 | |||
| 343 | if (!cb) | ||
| 344 | return 0; | ||
| 345 | |||
| 346 | calltime = initcall_debug_start(dev); | ||
| 347 | |||
| 348 | pm_dev_dbg(dev, state, info); | ||
| 349 | error = cb(dev); | ||
| 350 | suspend_report_result(cb, error); | ||
| 351 | |||
| 352 | initcall_debug_report(dev, calltime, error); | ||
| 353 | |||
| 354 | return error; | ||
| 355 | } | ||
| 356 | |||
| 416 | /*------------------------- Resume routines -------------------------*/ | 357 | /*------------------------- Resume routines -------------------------*/ |
| 417 | 358 | ||
| 418 | /** | 359 | /** |
| @@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | |||
| 425 | */ | 366 | */ |
| 426 | static int device_resume_noirq(struct device *dev, pm_message_t state) | 367 | static int device_resume_noirq(struct device *dev, pm_message_t state) |
| 427 | { | 368 | { |
| 369 | pm_callback_t callback = NULL; | ||
| 370 | char *info = NULL; | ||
| 428 | int error = 0; | 371 | int error = 0; |
| 429 | 372 | ||
| 430 | TRACE_DEVICE(dev); | 373 | TRACE_DEVICE(dev); |
| 431 | TRACE_RESUME(0); | 374 | TRACE_RESUME(0); |
| 432 | 375 | ||
| 433 | if (dev->pm_domain) { | 376 | if (dev->pm_domain) { |
| 434 | pm_dev_dbg(dev, state, "EARLY power domain "); | 377 | info = "EARLY power domain "; |
| 435 | error = pm_noirq_op(dev, &dev->pm_domain->ops, state); | 378 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
| 436 | } else if (dev->type && dev->type->pm) { | 379 | } else if (dev->type && dev->type->pm) { |
| 437 | pm_dev_dbg(dev, state, "EARLY type "); | 380 | info = "EARLY type "; |
| 438 | error = pm_noirq_op(dev, dev->type->pm, state); | 381 | callback = pm_noirq_op(dev->type->pm, state); |
| 439 | } else if (dev->class && dev->class->pm) { | 382 | } else if (dev->class && dev->class->pm) { |
| 440 | pm_dev_dbg(dev, state, "EARLY class "); | 383 | info = "EARLY class "; |
| 441 | error = pm_noirq_op(dev, dev->class->pm, state); | 384 | callback = pm_noirq_op(dev->class->pm, state); |
| 442 | } else if (dev->bus && dev->bus->pm) { | 385 | } else if (dev->bus && dev->bus->pm) { |
| 443 | pm_dev_dbg(dev, state, "EARLY "); | 386 | info = "EARLY bus "; |
| 444 | error = pm_noirq_op(dev, dev->bus->pm, state); | 387 | callback = pm_noirq_op(dev->bus->pm, state); |
| 445 | } | 388 | } |
| 446 | 389 | ||
| 390 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 391 | info = "EARLY driver "; | ||
| 392 | callback = pm_noirq_op(dev->driver->pm, state); | ||
| 393 | } | ||
| 394 | |||
| 395 | error = dpm_run_callback(callback, dev, state, info); | ||
| 396 | |||
| 447 | TRACE_RESUME(error); | 397 | TRACE_RESUME(error); |
| 448 | return error; | 398 | return error; |
| 449 | } | 399 | } |
| @@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state) | |||
| 486 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 436 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); |
| 487 | 437 | ||
| 488 | /** | 438 | /** |
| 489 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | ||
| 490 | * @dev: Device to resume. | ||
| 491 | * @cb: Resume callback to execute. | ||
| 492 | */ | ||
| 493 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | ||
| 494 | { | ||
| 495 | int error; | ||
| 496 | ktime_t calltime; | ||
| 497 | |||
| 498 | calltime = initcall_debug_start(dev); | ||
| 499 | |||
| 500 | error = cb(dev); | ||
| 501 | suspend_report_result(cb, error); | ||
| 502 | |||
| 503 | initcall_debug_report(dev, calltime, error); | ||
| 504 | |||
| 505 | return error; | ||
| 506 | } | ||
| 507 | |||
| 508 | /** | ||
| 509 | * device_resume - Execute "resume" callbacks for given device. | 439 | * device_resume - Execute "resume" callbacks for given device. |
| 510 | * @dev: Device to handle. | 440 | * @dev: Device to handle. |
| 511 | * @state: PM transition of the system being carried out. | 441 | * @state: PM transition of the system being carried out. |
| @@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | |||
| 513 | */ | 443 | */ |
| 514 | static int device_resume(struct device *dev, pm_message_t state, bool async) | 444 | static int device_resume(struct device *dev, pm_message_t state, bool async) |
| 515 | { | 445 | { |
| 446 | pm_callback_t callback = NULL; | ||
| 447 | char *info = NULL; | ||
| 516 | int error = 0; | 448 | int error = 0; |
| 517 | bool put = false; | 449 | bool put = false; |
| 518 | 450 | ||
| @@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
| 535 | put = true; | 467 | put = true; |
| 536 | 468 | ||
| 537 | if (dev->pm_domain) { | 469 | if (dev->pm_domain) { |
| 538 | pm_dev_dbg(dev, state, "power domain "); | 470 | info = "power domain "; |
| 539 | error = pm_op(dev, &dev->pm_domain->ops, state); | 471 | callback = pm_op(&dev->pm_domain->ops, state); |
| 540 | goto End; | 472 | goto Driver; |
| 541 | } | 473 | } |
| 542 | 474 | ||
| 543 | if (dev->type && dev->type->pm) { | 475 | if (dev->type && dev->type->pm) { |
| 544 | pm_dev_dbg(dev, state, "type "); | 476 | info = "type "; |
| 545 | error = pm_op(dev, dev->type->pm, state); | 477 | callback = pm_op(dev->type->pm, state); |
| 546 | goto End; | 478 | goto Driver; |
| 547 | } | 479 | } |
| 548 | 480 | ||
| 549 | if (dev->class) { | 481 | if (dev->class) { |
| 550 | if (dev->class->pm) { | 482 | if (dev->class->pm) { |
| 551 | pm_dev_dbg(dev, state, "class "); | 483 | info = "class "; |
| 552 | error = pm_op(dev, dev->class->pm, state); | 484 | callback = pm_op(dev->class->pm, state); |
| 553 | goto End; | 485 | goto Driver; |
| 554 | } else if (dev->class->resume) { | 486 | } else if (dev->class->resume) { |
| 555 | pm_dev_dbg(dev, state, "legacy class "); | 487 | info = "legacy class "; |
| 556 | error = legacy_resume(dev, dev->class->resume); | 488 | callback = dev->class->resume; |
| 557 | goto End; | 489 | goto End; |
| 558 | } | 490 | } |
| 559 | } | 491 | } |
| 560 | 492 | ||
| 561 | if (dev->bus) { | 493 | if (dev->bus) { |
| 562 | if (dev->bus->pm) { | 494 | if (dev->bus->pm) { |
| 563 | pm_dev_dbg(dev, state, ""); | 495 | info = "bus "; |
| 564 | error = pm_op(dev, dev->bus->pm, state); | 496 | callback = pm_op(dev->bus->pm, state); |
| 565 | } else if (dev->bus->resume) { | 497 | } else if (dev->bus->resume) { |
| 566 | pm_dev_dbg(dev, state, "legacy "); | 498 | info = "legacy bus "; |
| 567 | error = legacy_resume(dev, dev->bus->resume); | 499 | callback = dev->bus->resume; |
| 500 | goto End; | ||
| 568 | } | 501 | } |
| 569 | } | 502 | } |
| 570 | 503 | ||
| 504 | Driver: | ||
| 505 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 506 | info = "driver "; | ||
| 507 | callback = pm_op(dev->driver->pm, state); | ||
| 508 | } | ||
| 509 | |||
| 571 | End: | 510 | End: |
| 511 | error = dpm_run_callback(callback, dev, state, info); | ||
| 572 | dev->power.is_suspended = false; | 512 | dev->power.is_suspended = false; |
| 573 | 513 | ||
| 574 | Unlock: | 514 | Unlock: |
| @@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state) | |||
| 660 | */ | 600 | */ |
| 661 | static void device_complete(struct device *dev, pm_message_t state) | 601 | static void device_complete(struct device *dev, pm_message_t state) |
| 662 | { | 602 | { |
| 603 | void (*callback)(struct device *) = NULL; | ||
| 604 | char *info = NULL; | ||
| 605 | |||
| 663 | device_lock(dev); | 606 | device_lock(dev); |
| 664 | 607 | ||
| 665 | if (dev->pm_domain) { | 608 | if (dev->pm_domain) { |
| 666 | pm_dev_dbg(dev, state, "completing power domain "); | 609 | info = "completing power domain "; |
| 667 | if (dev->pm_domain->ops.complete) | 610 | callback = dev->pm_domain->ops.complete; |
| 668 | dev->pm_domain->ops.complete(dev); | ||
| 669 | } else if (dev->type && dev->type->pm) { | 611 | } else if (dev->type && dev->type->pm) { |
| 670 | pm_dev_dbg(dev, state, "completing type "); | 612 | info = "completing type "; |
| 671 | if (dev->type->pm->complete) | 613 | callback = dev->type->pm->complete; |
| 672 | dev->type->pm->complete(dev); | ||
| 673 | } else if (dev->class && dev->class->pm) { | 614 | } else if (dev->class && dev->class->pm) { |
| 674 | pm_dev_dbg(dev, state, "completing class "); | 615 | info = "completing class "; |
| 675 | if (dev->class->pm->complete) | 616 | callback = dev->class->pm->complete; |
| 676 | dev->class->pm->complete(dev); | ||
| 677 | } else if (dev->bus && dev->bus->pm) { | 617 | } else if (dev->bus && dev->bus->pm) { |
| 678 | pm_dev_dbg(dev, state, "completing "); | 618 | info = "completing bus "; |
| 679 | if (dev->bus->pm->complete) | 619 | callback = dev->bus->pm->complete; |
| 680 | dev->bus->pm->complete(dev); | 620 | } |
| 621 | |||
| 622 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 623 | info = "completing driver "; | ||
| 624 | callback = dev->driver->pm->complete; | ||
| 625 | } | ||
| 626 | |||
| 627 | if (callback) { | ||
| 628 | pm_dev_dbg(dev, state, info); | ||
| 629 | callback(dev); | ||
| 681 | } | 630 | } |
| 682 | 631 | ||
| 683 | device_unlock(dev); | 632 | device_unlock(dev); |
| @@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state) | |||
| 763 | */ | 712 | */ |
| 764 | static int device_suspend_noirq(struct device *dev, pm_message_t state) | 713 | static int device_suspend_noirq(struct device *dev, pm_message_t state) |
| 765 | { | 714 | { |
| 766 | int error; | 715 | pm_callback_t callback = NULL; |
| 716 | char *info = NULL; | ||
| 767 | 717 | ||
| 768 | if (dev->pm_domain) { | 718 | if (dev->pm_domain) { |
| 769 | pm_dev_dbg(dev, state, "LATE power domain "); | 719 | info = "LATE power domain "; |
| 770 | error = pm_noirq_op(dev, &dev->pm_domain->ops, state); | 720 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
| 771 | if (error) | ||
| 772 | return error; | ||
| 773 | } else if (dev->type && dev->type->pm) { | 721 | } else if (dev->type && dev->type->pm) { |
| 774 | pm_dev_dbg(dev, state, "LATE type "); | 722 | info = "LATE type "; |
| 775 | error = pm_noirq_op(dev, dev->type->pm, state); | 723 | callback = pm_noirq_op(dev->type->pm, state); |
| 776 | if (error) | ||
| 777 | return error; | ||
| 778 | } else if (dev->class && dev->class->pm) { | 724 | } else if (dev->class && dev->class->pm) { |
| 779 | pm_dev_dbg(dev, state, "LATE class "); | 725 | info = "LATE class "; |
| 780 | error = pm_noirq_op(dev, dev->class->pm, state); | 726 | callback = pm_noirq_op(dev->class->pm, state); |
| 781 | if (error) | ||
| 782 | return error; | ||
| 783 | } else if (dev->bus && dev->bus->pm) { | 727 | } else if (dev->bus && dev->bus->pm) { |
| 784 | pm_dev_dbg(dev, state, "LATE "); | 728 | info = "LATE bus "; |
| 785 | error = pm_noirq_op(dev, dev->bus->pm, state); | 729 | callback = pm_noirq_op(dev->bus->pm, state); |
| 786 | if (error) | ||
| 787 | return error; | ||
| 788 | } | 730 | } |
| 789 | 731 | ||
| 790 | return 0; | 732 | if (!callback && dev->driver && dev->driver->pm) { |
| 733 | info = "LATE driver "; | ||
| 734 | callback = pm_noirq_op(dev->driver->pm, state); | ||
| 735 | } | ||
| 736 | |||
| 737 | return dpm_run_callback(callback, dev, state, info); | ||
| 791 | } | 738 | } |
| 792 | 739 | ||
| 793 | /** | 740 | /** |
| @@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
| 864 | */ | 811 | */ |
| 865 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) | 812 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) |
| 866 | { | 813 | { |
| 814 | pm_callback_t callback = NULL; | ||
| 815 | char *info = NULL; | ||
| 867 | int error = 0; | 816 | int error = 0; |
| 868 | 817 | ||
| 869 | dpm_wait_for_children(dev, async); | 818 | dpm_wait_for_children(dev, async); |
| @@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 884 | device_lock(dev); | 833 | device_lock(dev); |
| 885 | 834 | ||
| 886 | if (dev->pm_domain) { | 835 | if (dev->pm_domain) { |
| 887 | pm_dev_dbg(dev, state, "power domain "); | 836 | info = "power domain "; |
| 888 | error = pm_op(dev, &dev->pm_domain->ops, state); | 837 | callback = pm_op(&dev->pm_domain->ops, state); |
| 889 | goto End; | 838 | goto Run; |
| 890 | } | 839 | } |
| 891 | 840 | ||
| 892 | if (dev->type && dev->type->pm) { | 841 | if (dev->type && dev->type->pm) { |
| 893 | pm_dev_dbg(dev, state, "type "); | 842 | info = "type "; |
| 894 | error = pm_op(dev, dev->type->pm, state); | 843 | callback = pm_op(dev->type->pm, state); |
| 895 | goto End; | 844 | goto Run; |
| 896 | } | 845 | } |
| 897 | 846 | ||
| 898 | if (dev->class) { | 847 | if (dev->class) { |
| 899 | if (dev->class->pm) { | 848 | if (dev->class->pm) { |
| 900 | pm_dev_dbg(dev, state, "class "); | 849 | info = "class "; |
| 901 | error = pm_op(dev, dev->class->pm, state); | 850 | callback = pm_op(dev->class->pm, state); |
| 902 | goto End; | 851 | goto Run; |
| 903 | } else if (dev->class->suspend) { | 852 | } else if (dev->class->suspend) { |
| 904 | pm_dev_dbg(dev, state, "legacy class "); | 853 | pm_dev_dbg(dev, state, "legacy class "); |
| 905 | error = legacy_suspend(dev, state, dev->class->suspend); | 854 | error = legacy_suspend(dev, state, dev->class->suspend); |
| @@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 909 | 858 | ||
| 910 | if (dev->bus) { | 859 | if (dev->bus) { |
| 911 | if (dev->bus->pm) { | 860 | if (dev->bus->pm) { |
| 912 | pm_dev_dbg(dev, state, ""); | 861 | info = "bus "; |
| 913 | error = pm_op(dev, dev->bus->pm, state); | 862 | callback = pm_op(dev->bus->pm, state); |
| 914 | } else if (dev->bus->suspend) { | 863 | } else if (dev->bus->suspend) { |
| 915 | pm_dev_dbg(dev, state, "legacy "); | 864 | pm_dev_dbg(dev, state, "legacy bus "); |
| 916 | error = legacy_suspend(dev, state, dev->bus->suspend); | 865 | error = legacy_suspend(dev, state, dev->bus->suspend); |
| 866 | goto End; | ||
| 917 | } | 867 | } |
| 918 | } | 868 | } |
| 919 | 869 | ||
| 870 | Run: | ||
| 871 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 872 | info = "driver "; | ||
| 873 | callback = pm_op(dev->driver->pm, state); | ||
| 874 | } | ||
| 875 | |||
| 876 | error = dpm_run_callback(callback, dev, state, info); | ||
| 877 | |||
| 920 | End: | 878 | End: |
| 921 | if (!error) { | 879 | if (!error) { |
| 922 | dev->power.is_suspended = true; | 880 | dev->power.is_suspended = true; |
| @@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state) | |||
| 1022 | */ | 980 | */ |
| 1023 | static int device_prepare(struct device *dev, pm_message_t state) | 981 | static int device_prepare(struct device *dev, pm_message_t state) |
| 1024 | { | 982 | { |
| 983 | int (*callback)(struct device *) = NULL; | ||
| 984 | char *info = NULL; | ||
| 1025 | int error = 0; | 985 | int error = 0; |
| 1026 | 986 | ||
| 1027 | device_lock(dev); | 987 | device_lock(dev); |
| @@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 1029 | dev->power.wakeup_path = device_may_wakeup(dev); | 989 | dev->power.wakeup_path = device_may_wakeup(dev); |
| 1030 | 990 | ||
| 1031 | if (dev->pm_domain) { | 991 | if (dev->pm_domain) { |
| 1032 | pm_dev_dbg(dev, state, "preparing power domain "); | 992 | info = "preparing power domain "; |
| 1033 | if (dev->pm_domain->ops.prepare) | 993 | callback = dev->pm_domain->ops.prepare; |
| 1034 | error = dev->pm_domain->ops.prepare(dev); | ||
| 1035 | suspend_report_result(dev->pm_domain->ops.prepare, error); | ||
| 1036 | if (error) | ||
| 1037 | goto End; | ||
| 1038 | } else if (dev->type && dev->type->pm) { | 994 | } else if (dev->type && dev->type->pm) { |
| 1039 | pm_dev_dbg(dev, state, "preparing type "); | 995 | info = "preparing type "; |
| 1040 | if (dev->type->pm->prepare) | 996 | callback = dev->type->pm->prepare; |
| 1041 | error = dev->type->pm->prepare(dev); | ||
| 1042 | suspend_report_result(dev->type->pm->prepare, error); | ||
| 1043 | if (error) | ||
| 1044 | goto End; | ||
| 1045 | } else if (dev->class && dev->class->pm) { | 997 | } else if (dev->class && dev->class->pm) { |
| 1046 | pm_dev_dbg(dev, state, "preparing class "); | 998 | info = "preparing class "; |
| 1047 | if (dev->class->pm->prepare) | 999 | callback = dev->class->pm->prepare; |
| 1048 | error = dev->class->pm->prepare(dev); | ||
| 1049 | suspend_report_result(dev->class->pm->prepare, error); | ||
| 1050 | if (error) | ||
| 1051 | goto End; | ||
| 1052 | } else if (dev->bus && dev->bus->pm) { | 1000 | } else if (dev->bus && dev->bus->pm) { |
| 1053 | pm_dev_dbg(dev, state, "preparing "); | 1001 | info = "preparing bus "; |
| 1054 | if (dev->bus->pm->prepare) | 1002 | callback = dev->bus->pm->prepare; |
| 1055 | error = dev->bus->pm->prepare(dev); | 1003 | } |
| 1056 | suspend_report_result(dev->bus->pm->prepare, error); | 1004 | |
| 1005 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 1006 | info = "preparing driver "; | ||
| 1007 | callback = dev->driver->pm->prepare; | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | if (callback) { | ||
| 1011 | error = callback(dev); | ||
| 1012 | suspend_report_result(callback, error); | ||
| 1057 | } | 1013 | } |
| 1058 | 1014 | ||
| 1059 | End: | ||
| 1060 | device_unlock(dev); | 1015 | device_unlock(dev); |
| 1061 | 1016 | ||
| 1062 | return error; | 1017 | return error; |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 8c78443bca8f..c56efd756531 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
| 250 | else | 250 | else |
| 251 | callback = NULL; | 251 | callback = NULL; |
| 252 | 252 | ||
| 253 | if (!callback && dev->driver && dev->driver->pm) | ||
| 254 | callback = dev->driver->pm->runtime_idle; | ||
| 255 | |||
| 253 | if (callback) | 256 | if (callback) |
| 254 | __rpm_callback(callback, dev); | 257 | __rpm_callback(callback, dev); |
| 255 | 258 | ||
| @@ -413,6 +416,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 413 | else | 416 | else |
| 414 | callback = NULL; | 417 | callback = NULL; |
| 415 | 418 | ||
| 419 | if (!callback && dev->driver && dev->driver->pm) | ||
| 420 | callback = dev->driver->pm->runtime_suspend; | ||
| 421 | |||
| 416 | retval = rpm_callback(callback, dev); | 422 | retval = rpm_callback(callback, dev); |
| 417 | if (retval) { | 423 | if (retval) { |
| 418 | __update_runtime_status(dev, RPM_ACTIVE); | 424 | __update_runtime_status(dev, RPM_ACTIVE); |
| @@ -633,6 +639,9 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 633 | else | 639 | else |
| 634 | callback = NULL; | 640 | callback = NULL; |
| 635 | 641 | ||
| 642 | if (!callback && dev->driver && dev->driver->pm) | ||
| 643 | callback = dev->driver->pm->runtime_resume; | ||
| 644 | |||
| 636 | retval = rpm_callback(callback, dev); | 645 | retval = rpm_callback(callback, dev); |
| 637 | if (retval) { | 646 | if (retval) { |
| 638 | __update_runtime_status(dev, RPM_SUSPENDED); | 647 | __update_runtime_status(dev, RPM_SUSPENDED); |
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index a88a78c86162..6c3defa50845 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c | |||
| @@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data) | |||
| 475 | 475 | ||
| 476 | init_waitqueue_entry(&wait, current); | 476 | init_waitqueue_entry(&wait, current); |
| 477 | 477 | ||
| 478 | current->flags |= PF_NOFREEZE; | ||
| 479 | |||
| 480 | for (;;) { | 478 | for (;;) { |
| 481 | add_wait_queue(&thread->wait_q, &wait); | 479 | add_wait_queue(&thread->wait_q, &wait); |
| 482 | 480 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index eb1d8641cf5c..2b8661b54eaf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
| 214 | return error_count; | 214 | return error_count; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void dmatest_callback(void *completion) | 217 | /* poor man's completion - we want to use wait_event_freezable() on it */ |
| 218 | struct dmatest_done { | ||
| 219 | bool done; | ||
| 220 | wait_queue_head_t *wait; | ||
| 221 | }; | ||
| 222 | |||
| 223 | static void dmatest_callback(void *arg) | ||
| 218 | { | 224 | { |
| 219 | complete(completion); | 225 | struct dmatest_done *done = arg; |
| 226 | |||
| 227 | done->done = true; | ||
| 228 | wake_up_all(done->wait); | ||
| 220 | } | 229 | } |
| 221 | 230 | ||
| 222 | /* | 231 | /* |
| @@ -235,7 +244,9 @@ static void dmatest_callback(void *completion) | |||
| 235 | */ | 244 | */ |
| 236 | static int dmatest_func(void *data) | 245 | static int dmatest_func(void *data) |
| 237 | { | 246 | { |
| 247 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); | ||
| 238 | struct dmatest_thread *thread = data; | 248 | struct dmatest_thread *thread = data; |
| 249 | struct dmatest_done done = { .wait = &done_wait }; | ||
| 239 | struct dma_chan *chan; | 250 | struct dma_chan *chan; |
| 240 | const char *thread_name; | 251 | const char *thread_name; |
| 241 | unsigned int src_off, dst_off, len; | 252 | unsigned int src_off, dst_off, len; |
| @@ -252,7 +263,7 @@ static int dmatest_func(void *data) | |||
| 252 | int i; | 263 | int i; |
| 253 | 264 | ||
| 254 | thread_name = current->comm; | 265 | thread_name = current->comm; |
| 255 | set_freezable_with_signal(); | 266 | set_freezable(); |
| 256 | 267 | ||
| 257 | ret = -ENOMEM; | 268 | ret = -ENOMEM; |
| 258 | 269 | ||
| @@ -306,9 +317,6 @@ static int dmatest_func(void *data) | |||
| 306 | struct dma_async_tx_descriptor *tx = NULL; | 317 | struct dma_async_tx_descriptor *tx = NULL; |
| 307 | dma_addr_t dma_srcs[src_cnt]; | 318 | dma_addr_t dma_srcs[src_cnt]; |
| 308 | dma_addr_t dma_dsts[dst_cnt]; | 319 | dma_addr_t dma_dsts[dst_cnt]; |
| 309 | struct completion cmp; | ||
| 310 | unsigned long start, tmo, end = 0 /* compiler... */; | ||
| 311 | bool reload = true; | ||
| 312 | u8 align = 0; | 320 | u8 align = 0; |
| 313 | 321 | ||
| 314 | total_tests++; | 322 | total_tests++; |
| @@ -391,9 +399,9 @@ static int dmatest_func(void *data) | |||
| 391 | continue; | 399 | continue; |
| 392 | } | 400 | } |
| 393 | 401 | ||
| 394 | init_completion(&cmp); | 402 | done.done = false; |
| 395 | tx->callback = dmatest_callback; | 403 | tx->callback = dmatest_callback; |
| 396 | tx->callback_param = &cmp; | 404 | tx->callback_param = &done; |
| 397 | cookie = tx->tx_submit(tx); | 405 | cookie = tx->tx_submit(tx); |
| 398 | 406 | ||
| 399 | if (dma_submit_error(cookie)) { | 407 | if (dma_submit_error(cookie)) { |
| @@ -407,20 +415,20 @@ static int dmatest_func(void *data) | |||
| 407 | } | 415 | } |
| 408 | dma_async_issue_pending(chan); | 416 | dma_async_issue_pending(chan); |
| 409 | 417 | ||
| 410 | do { | 418 | wait_event_freezable_timeout(done_wait, done.done, |
| 411 | start = jiffies; | 419 | msecs_to_jiffies(timeout)); |
| 412 | if (reload) | ||
| 413 | end = start + msecs_to_jiffies(timeout); | ||
| 414 | else if (end <= start) | ||
| 415 | end = start + 1; | ||
| 416 | tmo = wait_for_completion_interruptible_timeout(&cmp, | ||
| 417 | end - start); | ||
| 418 | reload = try_to_freeze(); | ||
| 419 | } while (tmo == -ERESTARTSYS); | ||
| 420 | 420 | ||
| 421 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 421 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
| 422 | 422 | ||
| 423 | if (tmo == 0) { | 423 | if (!done.done) { |
| 424 | /* | ||
| 425 | * We're leaving the timed out dma operation with | ||
| 426 | * dangling pointer to done_wait. To make this | ||
| 427 | * correct, we'll need to allocate wait_done for | ||
| 428 | * each test iteration and perform "who's gonna | ||
| 429 | * free it this time?" dancing. For now, just | ||
| 430 | * leave it dangling. | ||
| 431 | */ | ||
| 424 | pr_warning("%s: #%u: test timed out\n", | 432 | pr_warning("%s: #%u: test timed out\n", |
| 425 | thread_name, total_tests - 1); | 433 | thread_name, total_tests - 1); |
| 426 | failed_tests++; | 434 | failed_tests++; |
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 3eee45ffb096..c6b456ad7342 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c | |||
| @@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data) | |||
| 138 | static const unsigned max_i2c_errors = 100; | 138 | static const unsigned max_i2c_errors = 100; |
| 139 | int ret; | 139 | int ret; |
| 140 | 140 | ||
| 141 | current->flags |= PF_NOFREEZE; | ||
| 142 | |||
| 143 | while (!kthread_should_stop()) { | 141 | while (!kthread_should_stop()) { |
| 144 | int i; | 142 | int i; |
| 145 | union { | 143 | union { |
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 41c96b3d8152..e880c79d7bd8 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c | |||
| @@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg) | |||
| 750 | 750 | ||
| 751 | write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); | 751 | write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); |
| 752 | 752 | ||
| 753 | refrigerator(); | 753 | try_to_freeze(); |
| 754 | 754 | ||
| 755 | if (change_speed(stir, stir->speed)) | 755 | if (change_speed(stir, stir->speed)) |
| 756 | break; | 756 | break; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 7b828680b21d..4b11fc91fa7d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data) | |||
| 2456 | u32 poll_mask, event_mask; | 2456 | u32 poll_mask, event_mask; |
| 2457 | unsigned int si, so; | 2457 | unsigned int si, so; |
| 2458 | unsigned long t; | 2458 | unsigned long t; |
| 2459 | unsigned int change_detector, must_reset; | 2459 | unsigned int change_detector; |
| 2460 | unsigned int poll_freq; | 2460 | unsigned int poll_freq; |
| 2461 | bool was_frozen; | ||
| 2461 | 2462 | ||
| 2462 | mutex_lock(&hotkey_thread_mutex); | 2463 | mutex_lock(&hotkey_thread_mutex); |
| 2463 | 2464 | ||
| @@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data) | |||
| 2488 | t = 100; /* should never happen... */ | 2489 | t = 100; /* should never happen... */ |
| 2489 | } | 2490 | } |
| 2490 | t = msleep_interruptible(t); | 2491 | t = msleep_interruptible(t); |
| 2491 | if (unlikely(kthread_should_stop())) | 2492 | if (unlikely(kthread_freezable_should_stop(&was_frozen))) |
| 2492 | break; | 2493 | break; |
| 2493 | must_reset = try_to_freeze(); | 2494 | |
| 2494 | if (t > 0 && !must_reset) | 2495 | if (t > 0 && !was_frozen) |
| 2495 | continue; | 2496 | continue; |
| 2496 | 2497 | ||
| 2497 | mutex_lock(&hotkey_thread_data_mutex); | 2498 | mutex_lock(&hotkey_thread_data_mutex); |
| 2498 | if (must_reset || hotkey_config_change != change_detector) { | 2499 | if (was_frozen || hotkey_config_change != change_detector) { |
| 2499 | /* forget old state on thaw or config change */ | 2500 | /* forget old state on thaw or config change */ |
| 2500 | si = so; | 2501 | si = so; |
| 2501 | t = 0; | 2502 | t = 0; |
| @@ -2528,10 +2529,6 @@ exit: | |||
| 2528 | static void hotkey_poll_stop_sync(void) | 2529 | static void hotkey_poll_stop_sync(void) |
| 2529 | { | 2530 | { |
| 2530 | if (tpacpi_hotkey_task) { | 2531 | if (tpacpi_hotkey_task) { |
| 2531 | if (frozen(tpacpi_hotkey_task) || | ||
| 2532 | freezing(tpacpi_hotkey_task)) | ||
| 2533 | thaw_process(tpacpi_hotkey_task); | ||
| 2534 | |||
| 2535 | kthread_stop(tpacpi_hotkey_task); | 2532 | kthread_stop(tpacpi_hotkey_task); |
| 2536 | tpacpi_hotkey_task = NULL; | 2533 | tpacpi_hotkey_task = NULL; |
| 2537 | mutex_lock(&hotkey_thread_mutex); | 2534 | mutex_lock(&hotkey_thread_mutex); |
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 115635f95024..a7feb3e328a0 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c | |||
| @@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev) | |||
| 466 | struct rtsx_chip *chip = dev->chip; | 466 | struct rtsx_chip *chip = dev->chip; |
| 467 | struct Scsi_Host *host = rtsx_to_host(dev); | 467 | struct Scsi_Host *host = rtsx_to_host(dev); |
| 468 | 468 | ||
| 469 | current->flags |= PF_NOFREEZE; | ||
| 470 | |||
| 471 | for (;;) { | 469 | for (;;) { |
| 472 | if (wait_for_completion_interruptible(&dev->cmnd_ready)) | 470 | if (wait_for_completion_interruptible(&dev->cmnd_ready)) |
| 473 | break; | 471 | break; |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index c325e69415a1..aa84b3d77274 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
| @@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us) | |||
| 831 | 831 | ||
| 832 | dev_dbg(dev, "device found\n"); | 832 | dev_dbg(dev, "device found\n"); |
| 833 | 833 | ||
| 834 | set_freezable_with_signal(); | 834 | set_freezable(); |
| 835 | |||
| 835 | /* | 836 | /* |
| 836 | * Wait for the timeout to expire or for a disconnect | 837 | * Wait for the timeout to expire or for a disconnect |
| 837 | * | 838 | * |
| @@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us) | |||
| 839 | * fail to freeze, but we can't be non-freezable either. Nor can | 840 | * fail to freeze, but we can't be non-freezable either. Nor can |
| 840 | * khubd freeze while waiting for scanning to complete as it may | 841 | * khubd freeze while waiting for scanning to complete as it may |
| 841 | * hold the device lock, causing a hang when suspending devices. | 842 | * hold the device lock, causing a hang when suspending devices. |
| 842 | * So we request a fake signal when freezing and use | 843 | * So instead of using wait_event_freezable(), explicitly test |
| 843 | * interruptible sleep to kick us out of our wait early when | 844 | * for (DONT_SCAN || freezing) in interruptible wait and proceed |
| 844 | * freezing happens. | 845 | * if any of DONT_SCAN, freezing or timeout has happened. |
| 845 | */ | 846 | */ |
| 846 | if (delay_use > 0) { | 847 | if (delay_use > 0) { |
| 847 | dev_dbg(dev, "waiting for device to settle " | 848 | dev_dbg(dev, "waiting for device to settle " |
| 848 | "before scanning\n"); | 849 | "before scanning\n"); |
| 849 | wait_event_interruptible_timeout(us->delay_wait, | 850 | wait_event_interruptible_timeout(us->delay_wait, |
| 850 | test_bit(US_FLIDX_DONT_SCAN, &us->dflags), | 851 | test_bit(US_FLIDX_DONT_SCAN, &us->dflags) || |
| 851 | delay_use * HZ); | 852 | freezing(current), delay_use * HZ); |
| 852 | } | 853 | } |
| 853 | 854 | ||
| 854 | /* If the device is still connected, perform the scanning */ | 855 | /* If the device is still connected, perform the scanning */ |
