aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-05 20:54:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-05 20:54:18 -0400
commit8110efc64c4790cd1bf7e30f080e5ba3faa7cb85 (patch)
tree708c2a1ef6bebce8eda9676c554bea366547678e
parent06d8eb1b7d1eb34c76538dab791bc14136ebbc8f (diff)
parentd6cc76856d353a3a9c43bead33210b9216dce332 (diff)
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM / Freezer: Revert 27920651fe "PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too" PM / Freezer: Reimplement wait_event_freezekillable using freezer_do_not_count/freezer_count USB: Update last_busy time after autosuspend fails PM / Runtime: Automatically retry failed autosuspends PM / QoS: Remove redundant check PM / OPP: Fix build when CONFIG_PM_OPP is not set PM / Runtime: Fix runtime accounting calculation error PM / Sleep: Update freezer documentation PM / Sleep: Remove unused symbol 'suspend_cpu_hotplug' PM / Sleep: Fix race between CPU hotplug and freezer ACPI / PM: Add Sony VPCEB17FX to nonvs blacklist
-rw-r--r--Documentation/power/freezing-of-tasks.txt8
-rw-r--r--Documentation/power/runtime_pm.txt10
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/base/power/runtime.c23
-rw-r--r--drivers/usb/core/driver.c5
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/freezer.h11
-rw-r--r--include/linux/opp.h4
-rw-r--r--kernel/cpu.c74
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/power/qos.c3
11 files changed, 125 insertions, 27 deletions
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 38b57248fd61..316c2ba187f4 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -22,12 +22,12 @@ try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
22either wakes them up, if they are kernel threads, or sends fake signals to them, 22either wakes them up, if they are kernel threads, or sends fake signals to them,
23if they are user space processes. A task that has TIF_FREEZE set, should react 23if they are user space processes. A task that has TIF_FREEZE set, should react
24to it by calling the function called refrigerator() (defined in 24to it by calling the function called refrigerator() (defined in
25kernel/power/process.c), which sets the task's PF_FROZEN flag, changes its state 25kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. 26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
27Then, we say that the task is 'frozen' and therefore the set of functions 27Then, we say that the task is 'frozen' and therefore the set of functions
28handling this mechanism is referred to as 'the freezer' (these functions are 28handling this mechanism is referred to as 'the freezer' (these functions are
29defined in kernel/power/process.c and include/linux/freezer.h). User space 29defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
30processes are generally frozen before kernel threads. 30User space processes are generally frozen before kernel threads.
31 31
32It is not recommended to call refrigerator() directly. Instead, it is 32It is not recommended to call refrigerator() directly. Instead, it is
33recommended to use the try_to_freeze() function (defined in 33recommended to use the try_to_freeze() function (defined in
@@ -95,7 +95,7 @@ after the memory for the image has been freed, we don't want tasks to allocate
95additional memory and we prevent them from doing that by freezing them earlier. 95additional memory and we prevent them from doing that by freezing them earlier.
96[Of course, this also means that device drivers should not allocate substantial 96[Of course, this also means that device drivers should not allocate substantial
97amounts of memory from their .suspend() callbacks before hibernation, but this 97amounts of memory from their .suspend() callbacks before hibernation, but this
98is e separate issue.] 98is a separate issue.]
99 99
1003. The third reason is to prevent user space processes and some kernel threads 1003. The third reason is to prevent user space processes and some kernel threads
101from interfering with the suspending and resuming of devices. A user space 101from interfering with the suspending and resuming of devices. A user space
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 0e856088db7c..5336149f831b 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -789,6 +789,16 @@ will behave normally, not taking the autosuspend delay into account.
789Similarly, if the power.use_autosuspend field isn't set then the autosuspend 789Similarly, if the power.use_autosuspend field isn't set then the autosuspend
790helper functions will behave just like the non-autosuspend counterparts. 790helper functions will behave just like the non-autosuspend counterparts.
791 791
792Under some circumstances a driver or subsystem may want to prevent a device
793from autosuspending immediately, even though the usage counter is zero and the
794autosuspend delay time has expired. If the ->runtime_suspend() callback
795returns -EAGAIN or -EBUSY, and if the next autosuspend delay expiration time is
796in the future (as it normally would be if the callback invoked
797pm_runtime_mark_last_busy()), the PM core will automatically reschedule the
798autosuspend. The ->runtime_suspend() callback can't do this rescheduling
799itself because no suspend requests of any kind are accepted while the device is
800suspending (i.e., while the callback is running).
801
792The implementation is well suited for asynchronous use in interrupt contexts. 802The implementation is well suited for asynchronous use in interrupt contexts.
793However such use inevitably involves races, because the PM core can't 803However such use inevitably involves races, because the PM core can't
794synchronize ->runtime_suspend() callbacks with the arrival of I/O requests. 804synchronize ->runtime_suspend() callbacks with the arrival of I/O requests.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 0e46faef1d30..6d9a3ab58db2 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -398,6 +398,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
398 }, 398 },
399 { 399 {
400 .callback = init_nvs_nosave, 400 .callback = init_nvs_nosave,
401 .ident = "Sony Vaio VPCEB17FX",
402 .matches = {
403 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
404 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
405 },
406 },
407 {
408 .callback = init_nvs_nosave,
401 .ident = "Sony Vaio VGN-SR11M", 409 .ident = "Sony Vaio VGN-SR11M",
402 .matches = { 410 .matches = {
403 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 411 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6bb3aafa85ed..124dbf60c9bf 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -29,13 +29,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
29void update_pm_runtime_accounting(struct device *dev) 29void update_pm_runtime_accounting(struct device *dev)
30{ 30{
31 unsigned long now = jiffies; 31 unsigned long now = jiffies;
32 int delta; 32 unsigned long delta;
33 33
34 delta = now - dev->power.accounting_timestamp; 34 delta = now - dev->power.accounting_timestamp;
35 35
36 if (delta < 0)
37 delta = 0;
38
39 dev->power.accounting_timestamp = now; 36 dev->power.accounting_timestamp = now;
40 37
41 if (dev->power.disable_depth > 0) 38 if (dev->power.disable_depth > 0)
@@ -296,6 +293,9 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
296 * the callback was running then carry it out, otherwise send an idle 293 * the callback was running then carry it out, otherwise send an idle
297 * notification for its parent (if the suspend succeeded and both 294 * notification for its parent (if the suspend succeeded and both
298 * ignore_children of parent->power and irq_safe of dev->power are not set). 295 * ignore_children of parent->power and irq_safe of dev->power are not set).
296 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
297 * flag is set and the next autosuspend-delay expiration time is in the
298 * future, schedule another autosuspend attempt.
299 * 299 *
300 * This function must be called under dev->power.lock with interrupts disabled. 300 * This function must be called under dev->power.lock with interrupts disabled.
301 */ 301 */
@@ -416,10 +416,21 @@ static int rpm_suspend(struct device *dev, int rpmflags)
416 if (retval) { 416 if (retval) {
417 __update_runtime_status(dev, RPM_ACTIVE); 417 __update_runtime_status(dev, RPM_ACTIVE);
418 dev->power.deferred_resume = false; 418 dev->power.deferred_resume = false;
419 if (retval == -EAGAIN || retval == -EBUSY) 419 if (retval == -EAGAIN || retval == -EBUSY) {
420 dev->power.runtime_error = 0; 420 dev->power.runtime_error = 0;
421 else 421
422 /*
423 * If the callback routine failed an autosuspend, and
424 * if the last_busy time has been updated so that there
425 * is a new autosuspend expiration time, automatically
426 * reschedule another autosuspend.
427 */
428 if ((rpmflags & RPM_AUTO) &&
429 pm_runtime_autosuspend_expiration(dev) != 0)
430 goto repeat;
431 } else {
422 pm_runtime_cancel_pending(dev); 432 pm_runtime_cancel_pending(dev);
433 }
423 wake_up_all(&dev->power.wait_queue); 434 wake_up_all(&dev->power.wait_queue);
424 goto out; 435 goto out;
425 } 436 }
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 3b029a0a4787..c2c0ae57e7ff 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1667,6 +1667,11 @@ int usb_runtime_suspend(struct device *dev)
1667 return -EAGAIN; 1667 return -EAGAIN;
1668 1668
1669 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND); 1669 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1670
1671 /* Allow a retry if autosuspend failed temporarily */
1672 if (status == -EAGAIN || status == -EBUSY)
1673 usb_mark_last_busy(udev);
1674
1670 /* The PM core reacts badly unless the return code is 0, 1675 /* The PM core reacts badly unless the return code is 0,
1671 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error. 1676 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
1672 */ 1677 */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b1a635acf72a..6cb60fd2ea84 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -196,13 +196,9 @@ static inline void cpu_hotplug_driver_unlock(void)
196#endif /* CONFIG_HOTPLUG_CPU */ 196#endif /* CONFIG_HOTPLUG_CPU */
197 197
198#ifdef CONFIG_PM_SLEEP_SMP 198#ifdef CONFIG_PM_SLEEP_SMP
199extern int suspend_cpu_hotplug;
200
201extern int disable_nonboot_cpus(void); 199extern int disable_nonboot_cpus(void);
202extern void enable_nonboot_cpus(void); 200extern void enable_nonboot_cpus(void);
203#else /* !CONFIG_PM_SLEEP_SMP */ 201#else /* !CONFIG_PM_SLEEP_SMP */
204#define suspend_cpu_hotplug 0
205
206static inline int disable_nonboot_cpus(void) { return 0; } 202static inline int disable_nonboot_cpus(void) { return 0; }
207static inline void enable_nonboot_cpus(void) {} 203static inline void enable_nonboot_cpus(void) {}
208#endif /* !CONFIG_PM_SLEEP_SMP */ 204#endif /* !CONFIG_PM_SLEEP_SMP */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a49b52934c55..a5386e3ee756 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -143,14 +143,9 @@ static inline void set_freezable_with_signal(void)
143#define wait_event_freezekillable(wq, condition) \ 143#define wait_event_freezekillable(wq, condition) \
144({ \ 144({ \
145 int __retval; \ 145 int __retval; \
146 do { \ 146 freezer_do_not_count(); \
147 __retval = wait_event_killable(wq, \ 147 __retval = wait_event_killable(wq, (condition)); \
148 (condition) || freezing(current)); \ 148 freezer_count(); \
149 if (__retval && !freezing(current)) \
150 break; \
151 else if (!(condition)) \
152 __retval = -ERESTARTSYS; \
153 } while (try_to_freeze()); \
154 __retval; \ 149 __retval; \
155}) 150})
156 151
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 87a9208f8aec..ee94b33080c2 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -97,11 +97,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
97 return 0; 97 return 0;
98} 98}
99 99
100struct srcu_notifier_head *opp_get_notifier(struct device *dev) 100static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
101{ 101{
102 return ERR_PTR(-EINVAL); 102 return ERR_PTR(-EINVAL);
103} 103}
104#endif /* CONFIG_PM */ 104#endif /* CONFIG_PM_OPP */
105 105
106#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) 106#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
107int opp_init_cpufreq_table(struct device *dev, 107int opp_init_cpufreq_table(struct device *dev,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 12b7458f23b1..aa39dd7a3846 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,6 +15,7 @@
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/suspend.h>
18 19
19#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 21/* Serializes the updates to cpu_online_mask, cpu_present_mask */
@@ -476,6 +477,79 @@ static int alloc_frozen_cpus(void)
476 return 0; 477 return 0;
477} 478}
478core_initcall(alloc_frozen_cpus); 479core_initcall(alloc_frozen_cpus);
480
481/*
482 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
483 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
484 * to continue until any currently running CPU hotplug operation gets
485 * completed.
486 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
487 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
488 * CPU hotplug path and released only after it is complete. Thus, we
489 * (and hence the freezer) will block here until any currently running CPU
490 * hotplug operation gets completed.
491 */
492void cpu_hotplug_disable_before_freeze(void)
493{
494 cpu_maps_update_begin();
495 cpu_hotplug_disabled = 1;
496 cpu_maps_update_done();
497}
498
499
500/*
501 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
502 * disabled while beginning to freeze tasks).
503 */
504void cpu_hotplug_enable_after_thaw(void)
505{
506 cpu_maps_update_begin();
507 cpu_hotplug_disabled = 0;
508 cpu_maps_update_done();
509}
510
511/*
512 * When callbacks for CPU hotplug notifications are being executed, we must
513 * ensure that the state of the system with respect to the tasks being frozen
514 * or not, as reported by the notification, remains unchanged *throughout the
515 * duration* of the execution of the callbacks.
516 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
517 *
518 * This synchronization is implemented by mutually excluding regular CPU
519 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
520 * Hibernate notifications.
521 */
522static int
523cpu_hotplug_pm_callback(struct notifier_block *nb,
524 unsigned long action, void *ptr)
525{
526 switch (action) {
527
528 case PM_SUSPEND_PREPARE:
529 case PM_HIBERNATION_PREPARE:
530 cpu_hotplug_disable_before_freeze();
531 break;
532
533 case PM_POST_SUSPEND:
534 case PM_POST_HIBERNATION:
535 cpu_hotplug_enable_after_thaw();
536 break;
537
538 default:
539 return NOTIFY_DONE;
540 }
541
542 return NOTIFY_OK;
543}
544
545
546int cpu_hotplug_pm_sync_init(void)
547{
548 pm_notifier(cpu_hotplug_pm_callback, 0);
549 return 0;
550}
551core_initcall(cpu_hotplug_pm_sync_init);
552
479#endif /* CONFIG_PM_SLEEP_SMP */ 553#endif /* CONFIG_PM_SLEEP_SMP */
480 554
481/** 555/**
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 66a594e8ad2f..7b01de98bb6a 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p)
67 unsigned long flags; 67 unsigned long flags;
68 68
69 spin_lock_irqsave(&p->sighand->siglock, flags); 69 spin_lock_irqsave(&p->sighand->siglock, flags);
70 signal_wake_up(p, 1); 70 signal_wake_up(p, 0);
71 spin_unlock_irqrestore(&p->sighand->siglock, flags); 71 spin_unlock_irqrestore(&p->sighand->siglock, flags);
72} 72}
73 73
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 1c1797dd1d1d..5167d996cd02 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -386,8 +386,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
386 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); 386 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
387 filp->private_data = req; 387 filp->private_data = req;
388 388
389 if (filp->private_data) 389 return 0;
390 return 0;
391 } 390 }
392 return -EPERM; 391 return -EPERM;
393} 392}