aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-12-25 17:42:20 -0500
committerRafael J. Wysocki <rjw@sisk.pl>2011-12-25 17:42:20 -0500
commitb7ba68c4a072c9aa8f04b8cf7838b6cd2f48d918 (patch)
treecd8f8029111fc52d06060691fb0325ba2e798e94
parent8d274ab7d3d6f23e2bc0e433c8d53acbe60a9773 (diff)
parent90363ddf0a1a4dccfbb8d0c10b8f488bc7fa69f8 (diff)
Merge branch 'pm-sleep' into pm-for-linus
* pm-sleep: (51 commits) PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() PM / Sleep: Recommend [un]lock_system_sleep() over using pm_mutex directly PM / Sleep: Replace mutex_[un]lock(&pm_mutex) with [un]lock_system_sleep() PM / Sleep: Make [un]lock_system_sleep() generic PM / Sleep: Use the freezer_count() functions in [un]lock_system_sleep() APIs PM / Freezer: Remove the "userspace only" constraint from freezer[_do_not]_count() PM / Hibernate: Replace unintuitive 'if' condition in kernel/power/user.c with 'else' Freezer / sunrpc / NFS: don't allow TASK_KILLABLE sleeps to block the freezer PM / Sleep: Unify diagnostic messages from device suspend/resume ACPI / PM: Do not save/restore NVS on Asus K54C/K54HR PM / Hibernate: Remove deprecated hibernation test modes PM / Hibernate: Thaw processes in SNAPSHOT_CREATE_IMAGE ioctl test path ... Conflicts: kernel/kmod.c
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--Documentation/power/devices.txt37
-rw-r--r--Documentation/power/freezing-of-tasks.txt39
-rw-r--r--Documentation/power/runtime_pm.txt130
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/avr32/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m68k/include/asm/thread_info.h1
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/kernel/vio.c1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/unicore32/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/amba/bus.c136
-rw-r--r--drivers/base/firmware_class.c4
-rw-r--r--drivers/base/platform.c115
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/runtime.c9
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/dma/dmatest.c46
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/staging/rts_pstor/rtsx.c2
-rw-r--r--drivers/usb/storage/usb.c13
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/ext4/super.c3
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c4
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/nfs3proc.c3
-rw-r--r--fs/nfs/nfs4proc.c5
-rw-r--r--fs/nfs/proc.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--include/linux/freezer.h157
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/platform_device.h30
-rw-r--r--include/linux/pm.h13
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/suspend.h35
-rw-r--r--kernel/cgroup_freezer.c63
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c203
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kmod.c25
-rw-r--r--kernel/kthread.c27
-rw-r--r--kernel/power/hibernate.c92
-rw-r--r--kernel/power/main.c10
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/process.c77
-rw-r--r--kernel/power/suspend.c12
-rw-r--r--kernel/power/user.c120
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/sunrpc/sched.c3
81 files changed, 770 insertions, 1268 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 3d849122b5b1..9f51fc439a81 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
85 85
86--------------------------- 86---------------------------
87 87
88What: Deprecated snapshot ioctls
89When: 2.6.36
90
91Why: The ioctls in kernel/power/user.c were marked as deprecated long time
92 ago. Now they notify users about that so that they need to replace
93 their userspace. After some more time, remove them completely.
94
95Who: Jiri Slaby <jirislaby@gmail.com>
96
97---------------------------
98
99What: The ieee80211_regdom module parameter 88What: The ieee80211_regdom module parameter
100When: March 2010 / desktop catchup 89When: March 2010 / desktop catchup
101 90
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 3139fb505dce..20af7def23c8 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
126pointed to by the ops member of struct dev_pm_domain, or by the pm member of 126pointed to by the ops member of struct dev_pm_domain, or by the pm member of
127struct bus_type, struct device_type and struct class. They are mostly of 127struct bus_type, struct device_type and struct class. They are mostly of
128interest to the people writing infrastructure for platforms and buses, like PCI 128interest to the people writing infrastructure for platforms and buses, like PCI
129or USB, or device type and device class drivers. 129or USB, or device type and device class drivers. They also are relevant to the
130writers of device drivers whose subsystems (PM domains, device types, device
131classes and bus types) don't provide all power management methods.
130 132
131Bus drivers implement these methods as appropriate for the hardware and the 133Bus drivers implement these methods as appropriate for the hardware and the
132drivers using it; PCI works differently from USB, and so on. Not many people 134drivers using it; PCI works differently from USB, and so on. Not many people
@@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
268unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have 270unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
269been disabled (except for those marked with the IRQF_NO_SUSPEND flag). 271been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
270 272
271All phases use PM domain, bus, type, or class callbacks (that is, methods 273All phases use PM domain, bus, type, class or driver callbacks (that is, methods
272defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm). 274defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
273These callbacks are regarded by the PM core as mutually exclusive. Moreover, 275dev->driver->pm). These callbacks are regarded by the PM core as mutually
274PM domain callbacks always take precedence over bus, type and class callbacks, 276exclusive. Moreover, PM domain callbacks always take precedence over all of the
275while type callbacks take precedence over bus and class callbacks, and class 277other callbacks and, for example, type callbacks take precedence over bus, class
276callbacks take precedence over bus callbacks. To be precise, the following 278and driver callbacks. To be precise, the following rules are used to determine
277rules are used to determine which callback to execute in the given phase: 279which callback to execute in the given phase:
278 280
279 1. If dev->pm_domain is present, the PM core will attempt to execute the 281 1. If dev->pm_domain is present, the PM core will choose the callback
280 callback included in dev->pm_domain->ops. If that callback is not 282 included in dev->pm_domain->ops for execution
281 present, no action will be carried out for the given device.
282 283
283 2. Otherwise, if both dev->type and dev->type->pm are present, the callback 284 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
284 included in dev->type->pm will be executed. 285 included in dev->type->pm will be chosen for execution.
285 286
286 3. Otherwise, if both dev->class and dev->class->pm are present, the 287 3. Otherwise, if both dev->class and dev->class->pm are present, the
287 callback included in dev->class->pm will be executed. 288 callback included in dev->class->pm will be chosen for execution.
288 289
289 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback 290 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
290 included in dev->bus->pm will be executed. 291 included in dev->bus->pm will be chosen for execution.
291 292
292This allows PM domains and device types to override callbacks provided by bus 293This allows PM domains and device types to override callbacks provided by bus
293types or device classes if necessary. 294types or device classes if necessary.
294 295
295These callbacks may in turn invoke device- or driver-specific methods stored in 296The PM domain, type, class and bus callbacks may in turn invoke device- or
296dev->driver->pm, but they don't have to. 297driver-specific methods stored in dev->driver->pm, but they don't have to do
298that.
299
300If the subsystem callback chosen for execution is not present, the PM core will
301execute the corresponding method from dev->driver->pm instead if there is one.
297 302
298 303
299Entering System Suspend 304Entering System Suspend
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 316c2ba187f4..6ccb68f68da6 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
21try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and 21try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
22either wakes them up, if they are kernel threads, or sends fake signals to them, 22either wakes them up, if they are kernel threads, or sends fake signals to them,
23if they are user space processes. A task that has TIF_FREEZE set, should react 23if they are user space processes. A task that has TIF_FREEZE set, should react
24to it by calling the function called refrigerator() (defined in 24to it by calling the function called __refrigerator() (defined in
25kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state 25kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. 26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
27Then, we say that the task is 'frozen' and therefore the set of functions 27Then, we say that the task is 'frozen' and therefore the set of functions
@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
29defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). 29defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
30User space processes are generally frozen before kernel threads. 30User space processes are generally frozen before kernel threads.
31 31
32It is not recommended to call refrigerator() directly. Instead, it is 32__refrigerator() must not be called directly. Instead, use the
33recommended to use the try_to_freeze() function (defined in 33try_to_freeze() function (defined in include/linux/freezer.h), that checks
34include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the 34the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
35task enter refrigerator() if the flag is set. 35flag is set.
36 36
37For user space processes try_to_freeze() is called automatically from the 37For user space processes try_to_freeze() is called automatically from the
38signal-handling code, but the freezable kernel threads need to call it 38signal-handling code, but the freezable kernel threads need to call it
@@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
61After the system memory state has been restored from a hibernation image and 61After the system memory state has been restored from a hibernation image and
62devices have been reinitialized, the function thaw_processes() is called in 62devices have been reinitialized, the function thaw_processes() is called in
63order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that 63order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
64have been frozen leave refrigerator() and continue running. 64have been frozen leave __refrigerator() and continue running.
65 65
66III. Which kernel threads are freezable? 66III. Which kernel threads are freezable?
67 67
68Kernel threads are not freezable by default. However, a kernel thread may clear 68Kernel threads are not freezable by default. However, a kernel thread may clear
69PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE 69PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
70directly is strongly discouraged). From this point it is regarded as freezable 70directly is not allowed). From this point it is regarded as freezable
71and must call try_to_freeze() in a suitable place. 71and must call try_to_freeze() in a suitable place.
72 72
73IV. Why do we do that? 73IV. Why do we do that?
@@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
176A driver must have all firmwares it may need in RAM before suspend() is called. 176A driver must have all firmwares it may need in RAM before suspend() is called.
177If keeping them is not practical, for example due to their size, they must be 177If keeping them is not practical, for example due to their size, they must be
178requested early enough using the suspend notifier API described in notifiers.txt. 178requested early enough using the suspend notifier API described in notifiers.txt.
179
180VI. Are there any precautions to be taken to prevent freezing failures?
181
182Yes, there are.
183
184First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
185from system-wide sleep such as suspend/hibernation is not encouraged.
186If possible, that piece of code must instead hook onto the suspend/hibernation
187notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
188(kernel/cpu.c) for an example.
189
190However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
191it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
192that could lead to freezing failures, because if the suspend/hibernate code
193successfully acquired the 'pm_mutex' lock, and hence that other entity failed
194to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
195state. As a consequence, the freezer would not be able to freeze that task,
196leading to freezing failure.
197
198However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
199since they ask the freezer to skip freezing this task, since it is anyway
200"frozen enough" as it is blocked on 'pm_mutex', which will be released
201only after the entire suspend/hibernation sequence is complete.
202So, to summarize, use [un]lock_system_sleep() instead of directly using
203mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index c2ae8bf77d46..4abe83e1045a 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -57,6 +57,10 @@ the following:
57 57
58 4. Bus type of the device, if both dev->bus and dev->bus->pm are present. 58 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
59 59
60If the subsystem chosen by applying the above rules doesn't provide the relevant
61callback, the PM core will invoke the corresponding driver callback stored in
62dev->driver->pm directly (if present).
63
60The PM core always checks which callback to use in the order given above, so the 64The PM core always checks which callback to use in the order given above, so the
61priority order of callbacks from high to low is: PM domain, device type, class 65priority order of callbacks from high to low is: PM domain, device type, class
62and bus type. Moreover, the high-priority one will always take precedence over 66and bus type. Moreover, the high-priority one will always take precedence over
@@ -64,86 +68,88 @@ a low-priority one. The PM domain, bus type, device type and class callbacks
64are referred to as subsystem-level callbacks in what follows. 68are referred to as subsystem-level callbacks in what follows.
65 69
66By default, the callbacks are always invoked in process context with interrupts 70By default, the callbacks are always invoked in process context with interrupts
67enabled. However, subsystems can use the pm_runtime_irq_safe() helper function 71enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
68to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and 72the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
69->runtime_idle() callbacks may be invoked in atomic context with interrupts 73and ->runtime_idle() callbacks for the given device in atomic context with
70disabled for a given device. This implies that the callback routines in 74interrupts disabled. This implies that the callback routines in question must
71question must not block or sleep, but it also means that the synchronous helper 75not block or sleep, but it also means that the synchronous helper functions
72functions listed at the end of Section 4 may be used for that device within an 76listed at the end of Section 4 may be used for that device within an interrupt
73interrupt handler or generally in an atomic context. 77handler or generally in an atomic context.
74 78
75The subsystem-level suspend callback is _entirely_ _responsible_ for handling 79The subsystem-level suspend callback, if present, is _entirely_ _responsible_
76the suspend of the device as appropriate, which may, but need not include 80for handling the suspend of the device as appropriate, which may, but need not
77executing the device driver's own ->runtime_suspend() callback (from the 81include executing the device driver's own ->runtime_suspend() callback (from the
78PM core's point of view it is not necessary to implement a ->runtime_suspend() 82PM core's point of view it is not necessary to implement a ->runtime_suspend()
79callback in a device driver as long as the subsystem-level suspend callback 83callback in a device driver as long as the subsystem-level suspend callback
80knows what to do to handle the device). 84knows what to do to handle the device).
81 85
82 * Once the subsystem-level suspend callback has completed successfully 86 * Once the subsystem-level suspend callback (or the driver suspend callback,
83 for given device, the PM core regards the device as suspended, which need 87 if invoked directly) has completed successfully for the given device, the PM
84 not mean that the device has been put into a low power state. It is 88 core regards the device as suspended, which need not mean that it has been
85 supposed to mean, however, that the device will not process data and will 89 put into a low power state. It is supposed to mean, however, that the
86 not communicate with the CPU(s) and RAM until the subsystem-level resume 90 device will not process data and will not communicate with the CPU(s) and
87 callback is executed for it. The runtime PM status of a device after 91 RAM until the appropriate resume callback is executed for it. The runtime
88 successful execution of the subsystem-level suspend callback is 'suspended'. 92 PM status of a device after successful execution of the suspend callback is
89 93 'suspended'.
90 * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN, 94
91 the device's runtime PM status is 'active', which means that the device 95 * If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
92 _must_ be fully operational afterwards. 96 status remains 'active', which means that the device _must_ be fully
93 97 operational afterwards.
94 * If the subsystem-level suspend callback returns an error code different 98
95 from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will 99 * If the suspend callback returns an error code different from -EBUSY and
96 refuse to run the helper functions described in Section 4 for the device, 100 -EAGAIN, the PM core regards this as a fatal error and will refuse to run
97 until the status of it is directly set either to 'active', or to 'suspended' 101 the helper functions described in Section 4 for the device until its status
98 (the PM core provides special helper functions for this purpose). 102 is directly set to either'active', or 'suspended' (the PM core provides
99 103 special helper functions for this purpose).
100In particular, if the driver requires remote wake-up capability (i.e. hardware 104
105In particular, if the driver requires remote wakeup capability (i.e. hardware
101mechanism allowing the device to request a change of its power state, such as 106mechanism allowing the device to request a change of its power state, such as
102PCI PME) for proper functioning and device_run_wake() returns 'false' for the 107PCI PME) for proper functioning and device_run_wake() returns 'false' for the
103device, then ->runtime_suspend() should return -EBUSY. On the other hand, if 108device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
104device_run_wake() returns 'true' for the device and the device is put into a low 109device_run_wake() returns 'true' for the device and the device is put into a
105power state during the execution of the subsystem-level suspend callback, it is 110low-power state during the execution of the suspend callback, it is expected
106expected that remote wake-up will be enabled for the device. Generally, remote 111that remote wakeup will be enabled for the device. Generally, remote wakeup
107wake-up should be enabled for all input devices put into a low power state at 112should be enabled for all input devices put into low-power states at run time.
108run time. 113
109 114The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
110The subsystem-level resume callback is _entirely_ _responsible_ for handling the 115handling the resume of the device as appropriate, which may, but need not
111resume of the device as appropriate, which may, but need not include executing 116include executing the device driver's own ->runtime_resume() callback (from the
112the device driver's own ->runtime_resume() callback (from the PM core's point of 117PM core's point of view it is not necessary to implement a ->runtime_resume()
113view it is not necessary to implement a ->runtime_resume() callback in a device 118callback in a device driver as long as the subsystem-level resume callback knows
114driver as long as the subsystem-level resume callback knows what to do to handle 119what to do to handle the device).
115the device). 120
116 121 * Once the subsystem-level resume callback (or the driver resume callback, if
117 * Once the subsystem-level resume callback has completed successfully, the PM 122 invoked directly) has completed successfully, the PM core regards the device
118 core regards the device as fully operational, which means that the device 123 as fully operational, which means that the device _must_ be able to complete
119 _must_ be able to complete I/O operations as needed. The runtime PM status 124 I/O operations as needed. The runtime PM status of the device is then
120 of the device is then 'active'. 125 'active'.
121 126
122 * If the subsystem-level resume callback returns an error code, the PM core 127 * If the resume callback returns an error code, the PM core regards this as a
123 regards this as a fatal error and will refuse to run the helper functions 128 fatal error and will refuse to run the helper functions described in Section
124 described in Section 4 for the device, until its status is directly set 129 4 for the device, until its status is directly set to either 'active', or
125 either to 'active' or to 'suspended' (the PM core provides special helper 130 'suspended' (by means of special helper functions provided by the PM core
126 functions for this purpose). 131 for this purpose).
127 132
128The subsystem-level idle callback is executed by the PM core whenever the device 133The idle callback (a subsystem-level one, if present, or the driver one) is
129appears to be idle, which is indicated to the PM core by two counters, the 134executed by the PM core whenever the device appears to be idle, which is
130device's usage counter and the counter of 'active' children of the device. 135indicated to the PM core by two counters, the device's usage counter and the
136counter of 'active' children of the device.
131 137
132 * If any of these counters is decreased using a helper function provided by 138 * If any of these counters is decreased using a helper function provided by
133 the PM core and it turns out to be equal to zero, the other counter is 139 the PM core and it turns out to be equal to zero, the other counter is
134 checked. If that counter also is equal to zero, the PM core executes the 140 checked. If that counter also is equal to zero, the PM core executes the
135 subsystem-level idle callback with the device as an argument. 141 idle callback with the device as its argument.
136 142
137The action performed by a subsystem-level idle callback is totally dependent on 143The action performed by the idle callback is totally dependent on the subsystem
138the subsystem in question, but the expected and recommended action is to check 144(or driver) in question, but the expected and recommended action is to check
139if the device can be suspended (i.e. if all of the conditions necessary for 145if the device can be suspended (i.e. if all of the conditions necessary for
140suspending the device are satisfied) and to queue up a suspend request for the 146suspending the device are satisfied) and to queue up a suspend request for the
141device in that case. The value returned by this callback is ignored by the PM 147device in that case. The value returned by this callback is ignored by the PM
142core. 148core.
143 149
144The helper functions provided by the PM core, described in Section 4, guarantee 150The helper functions provided by the PM core, described in Section 4, guarantee
145that the following constraints are met with respect to the bus type's runtime 151that the following constraints are met with respect to runtime PM callbacks for
146PM callbacks: 152one device:
147 153
148(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute 154(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
149 ->runtime_suspend() in parallel with ->runtime_resume() or with another 155 ->runtime_suspend() in parallel with ->runtime_resume() or with another
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index ff73db022342..28335bd40e40 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
79#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ 79#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ 81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
82#define TIF_FREEZE 16 /* is freezing for suspend */
83 82
84#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 83#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
85#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 84#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
87#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 86#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
88#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 87#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
89#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 88#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
90#define _TIF_FREEZE (1<<TIF_FREEZE)
91 89
92/* Work to do on interrupt/exception return. */ 90/* Work to do on interrupt/exception return. */
93#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 91#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 7b5cc8dae06e..0f30c3a78fc1 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
142#define TIF_POLLING_NRFLAG 16 142#define TIF_POLLING_NRFLAG 16
143#define TIF_USING_IWMMXT 17 143#define TIF_USING_IWMMXT 17
144#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 144#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
145#define TIF_FREEZE 19
146#define TIF_RESTORE_SIGMASK 20 145#define TIF_RESTORE_SIGMASK 20
147#define TIF_SECCOMP 21 146#define TIF_SECCOMP 21
148 147
@@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
152#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 151#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
153#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 152#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
154#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 153#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
155#define _TIF_FREEZE (1 << TIF_FREEZE)
156#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 154#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
157#define _TIF_SECCOMP (1 << TIF_SECCOMP) 155#define _TIF_SECCOMP (1 << TIF_SECCOMP)
158 156
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 7a9c03dcb0b6..e5deda4691db 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
85#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ 85#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
86#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ 86#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
87#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ 87#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
88#define TIF_FREEZE 29
89#define TIF_DEBUG 30 /* debugging enabled */ 88#define TIF_DEBUG 30 /* debugging enabled */
90#define TIF_USERSPACE 31 /* true if FS sets userspace */ 89#define TIF_USERSPACE 31 /* true if FS sets userspace */
91 90
@@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
98#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 97#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
99#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) 98#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 99#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
101#define _TIF_FREEZE (1 << TIF_FREEZE)
102 100
103/* Note: The masks below must never span more than 16 bits! */ 101/* Note: The masks below must never span more than 16 bits! */
104 102
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8a121..53ad10005ae3 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
100 TIF_NEED_RESCHED */ 100 TIF_NEED_RESCHED */
101#define TIF_MEMDIE 4 /* is terminating due to OOM killer */ 101#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
103#define TIF_FREEZE 6 /* is freezing for suspend */
104#define TIF_IRQ_SYNC 7 /* sync pipeline stage */ 103#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
105#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 104#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
106#define TIF_SINGLESTEP 9 105#define TIF_SINGLESTEP 9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
111#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 110#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
112#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 111#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
113#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 112#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
114#define _TIF_FREEZE (1<<TIF_FREEZE)
115#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) 113#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
116#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 114#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
117#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 115#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 332f19c54557..29b92884d793 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -86,7 +86,6 @@ struct thread_info {
86#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 86#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
89#define TIF_FREEZE 18 /* is freezing for suspend */
90 89
91#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 90#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
92#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 91#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -94,7 +93,6 @@ struct thread_info {
94#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 93#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
95#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 94#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
96#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 95#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
97#define _TIF_FREEZE (1<<TIF_FREEZE)
98 96
99#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 97#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
100#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 98#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index cefbe73dc119..92d83ea99ae5 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
111#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 111#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
112#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 112#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
114#define TIF_FREEZE 18 /* freezing for suspend */
115 114
116#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 115#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
117#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 116#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
120#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 119#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
121#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 120#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
122#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 121#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
123#define _TIF_FREEZE (1 << TIF_FREEZE)
124 122
125#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 123#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
126#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 124#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index d6f1784bfdee..9c126e0c09aa 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
90#define TIF_MEMDIE 4 /* is terminating due to OOM killer */ 90#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
91#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 91#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
92#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ 92#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
93#define TIF_FREEZE 16 /* is freezing for suspend */
94 93
95/* as above, but as bit values */ 94/* as above, but as bit values */
96#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 95#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
99#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 98#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
100#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 99#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
102#define _TIF_FREEZE (1<<TIF_FREEZE)
103 101
104#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 102#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
105 103
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff0cc84e7bcc..e054bcc4273c 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -113,7 +113,6 @@ struct thread_info {
113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
114#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 114#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
115#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 115#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
116#define TIF_FREEZE 20 /* is freezing for suspend */
117#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */ 116#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
118 117
119#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 118#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -126,7 +125,6 @@ struct thread_info {
126#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 125#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
127#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 126#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
128#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 127#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
129#define _TIF_FREEZE (1 << TIF_FREEZE)
130#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE) 128#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
131 129
132/* "work to do on user-return" bits */ 130/* "work to do on user-return" bits */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 0227dba44068..bf8fa3c06f4e 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
138#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 138#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
139#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 139#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
140#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 140#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
141#define TIF_FREEZE 19 /* is freezing for suspend */
142 141
143#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
144#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
149#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 148#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
150#define _TIF_USEDFPU (1<<TIF_USEDFPU) 149#define _TIF_USEDFPU (1<<TIF_USEDFPU)
151#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 150#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
152#define _TIF_FREEZE (1<<TIF_FREEZE)
153 151
154#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 152#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
155#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 153#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 790988967ba7..294df1592de5 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void)
103#define TIF_DELAYED_TRACE 14 /* single step a syscall */ 103#define TIF_DELAYED_TRACE 14 /* single step a syscall */
104#define TIF_SYSCALL_TRACE 15 /* syscall trace active */ 104#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
105#define TIF_MEMDIE 16 /* is terminating due to OOM killer */ 105#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
106#define TIF_FREEZE 17 /* thread is freezing for suspend */
107#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */ 106#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
108 107
109#endif /* _ASM_M68K_THREAD_INFO_H */ 108#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b73da2ac21b3..1a8ab6a5c03f 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
125#define TIF_MEMDIE 6 /* is terminating due to OOM killer */ 125#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
126#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 126#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
127#define TIF_SECCOMP 10 /* secure computing */ 127#define TIF_SECCOMP 10 /* secure computing */
128#define TIF_FREEZE 14 /* Freezing for suspend */
129 128
130/* true if poll_idle() is polling TIF_NEED_RESCHED */ 129/* true if poll_idle() is polling TIF_NEED_RESCHED */
131#define TIF_POLLING_NRFLAG 16 130#define TIF_POLLING_NRFLAG 16
@@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
137#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 136#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
138#define _TIF_IRET (1 << TIF_IRET) 137#define _TIF_IRET (1 << TIF_IRET)
139#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 138#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
140#define _TIF_FREEZE (1 << TIF_FREEZE)
141#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 139#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
142#define _TIF_SECCOMP (1 << TIF_SECCOMP) 140#define _TIF_SECCOMP (1 << TIF_SECCOMP)
143 141
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 97f8bf6639e7..0d85d8e440c5 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
117#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 117#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
118#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 118#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
119#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 119#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
120#define TIF_FREEZE 19
121#define TIF_FIXADE 20 /* Fix address errors in software */ 120#define TIF_FIXADE 20 /* Fix address errors in software */
122#define TIF_LOGADE 21 /* Log address errors to syslog */ 121#define TIF_LOGADE 21 /* Log address errors to syslog */
123#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ 122#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
@@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
141#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 140#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
142#define _TIF_USEDFPU (1<<TIF_USEDFPU) 141#define _TIF_USEDFPU (1<<TIF_USEDFPU)
143#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 142#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
144#define _TIF_FREEZE (1<<TIF_FREEZE)
145#define _TIF_FIXADE (1<<TIF_FIXADE) 143#define _TIF_FIXADE (1<<TIF_FIXADE)
146#define _TIF_LOGADE (1<<TIF_LOGADE) 144#define _TIF_LOGADE (1<<TIF_LOGADE)
147#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) 145#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 87c213002d4c..28cf52100baa 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
165#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 165#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
166#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 166#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
167#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 167#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
168#define TIF_FREEZE 18 /* freezing for suspend */
169 168
170#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE) 169#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
171#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME) 170#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
@@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
174#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP) 173#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
175#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK) 174#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
176#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG) 175#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
177#define _TIF_FREEZE +(1 << TIF_FREEZE)
178 176
179#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 177#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
180#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 178#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index aa8de727e90b..6d9c7c7973d0 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -58,7 +58,6 @@ struct thread_info {
58#define TIF_32BIT 4 /* 32 bit binary */ 58#define TIF_32BIT 4 /* 32 bit binary */
59#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 59#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ 60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
61#define TIF_FREEZE 7 /* is freezing for suspend */
62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 61#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
63#define TIF_SINGLESTEP 9 /* single stepping? */ 62#define TIF_SINGLESTEP 9 /* single stepping? */
64#define TIF_BLOCKSTEP 10 /* branch stepping? */ 63#define TIF_BLOCKSTEP 10 /* branch stepping? */
@@ -69,7 +68,6 @@ struct thread_info {
69#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 68#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
70#define _TIF_32BIT (1 << TIF_32BIT) 69#define _TIF_32BIT (1 << TIF_32BIT)
71#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 70#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
72#define _TIF_FREEZE (1 << TIF_FREEZE)
73#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 71#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
74#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 72#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
75#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 73#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 836f231ec1f0..964714940961 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
109#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 109#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
110#define TIF_NOERROR 12 /* Force successful syscall return */ 110#define TIF_NOERROR 12 /* Force successful syscall return */
111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
112#define TIF_FREEZE 14 /* Freezing for suspend */
113#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 112#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
114#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ 113#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
115 114
@@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
127#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 126#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
128#define _TIF_NOERROR (1<<TIF_NOERROR) 127#define _TIF_NOERROR (1<<TIF_NOERROR)
129#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 128#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
130#define _TIF_FREEZE (1<<TIF_FREEZE)
131#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 129#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
132#define _TIF_RUNLATCH (1<<TIF_RUNLATCH) 130#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
133#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 131#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f65af61996bd..8b086299ba25 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
1406 .match = vio_bus_match, 1406 .match = vio_bus_match,
1407 .probe = vio_bus_probe, 1407 .probe = vio_bus_probe,
1408 .remove = vio_bus_remove, 1408 .remove = vio_bus_remove,
1409 .pm = GENERIC_SUBSYS_PM_OPS,
1410}; 1409};
1411 1410
1412/** 1411/**
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a23183423b14..a73038155e0d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
102#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 102#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
103#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 103#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
104#define TIF_SINGLE_STEP 20 /* This task is single stepped */ 104#define TIF_SINGLE_STEP 20 /* This task is single stepped */
105#define TIF_FREEZE 21 /* thread is freezing for suspend */
106 105
107#define _TIF_SYSCALL (1<<TIF_SYSCALL) 106#define _TIF_SYSCALL (1<<TIF_SYSCALL)
108#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 107#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
119#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
120#define _TIF_31BIT (1<<TIF_31BIT) 119#define _TIF_31BIT (1<<TIF_31BIT)
121#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 120#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
122#define _TIF_FREEZE (1<<TIF_FREEZE)
123 121
124#ifdef CONFIG_64BIT 122#ifdef CONFIG_64BIT
125#define is_32bit_task() (test_thread_flag(TIF_31BIT)) 123#define is_32bit_task() (test_thread_flag(TIF_31BIT))
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index ea2d5089de1e..20ee40af16e9 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
122#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ 122#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
123#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 123#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
124#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 124#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
125#define TIF_FREEZE 19 /* Freezing for suspend */
126 125
127#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 126#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
128#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 127#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
133#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 132#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
134#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 133#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
135#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 134#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
136#define _TIF_FREEZE (1 << TIF_FREEZE)
137 135
138/* 136/*
139 * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we 137 * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fa5753233410..5cc5888ad5a3 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
133#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling 133#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
134 * TIF_NEED_RESCHED */ 134 * TIF_NEED_RESCHED */
135#define TIF_MEMDIE 10 /* is terminating due to OOM killer */ 135#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
136#define TIF_FREEZE 11 /* is freezing for suspend */
137 136
138/* as above, but as bit values */ 137/* as above, but as bit values */
139#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 138#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
147#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ 146#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
148 _TIF_SIGPENDING | \ 147 _TIF_SIGPENDING | \
149 _TIF_RESTORE_SIGMASK) 148 _TIF_RESTORE_SIGMASK)
150#define _TIF_FREEZE (1<<TIF_FREEZE)
151 149
152#endif /* __KERNEL__ */ 150#endif /* __KERNEL__ */
153 151
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 60d86be1a533..01d057fe6a3f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
225/* flag bit 12 is available */ 225/* flag bit 12 is available */
226#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 226#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
227#define TIF_POLLING_NRFLAG 14 227#define TIF_POLLING_NRFLAG 14
228#define TIF_FREEZE 15 /* is freezing for suspend */
229 228
230#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 229#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
231#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 230#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
237#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 236#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
238#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 237#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
239#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 238#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
240#define _TIF_FREEZE (1<<TIF_FREEZE)
241 239
242#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ 240#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
243 _TIF_DO_NOTIFY_RESUME_MASK | \ 241 _TIF_DO_NOTIFY_RESUME_MASK | \
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 5bd1bad33fab..200c4ab1240c 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
71#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 71#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
72#define TIF_SYSCALL_AUDIT 6 72#define TIF_SYSCALL_AUDIT 6
73#define TIF_RESTORE_SIGMASK 7 73#define TIF_RESTORE_SIGMASK 7
74#define TIF_FREEZE 16 /* is freezing for suspend */
75 74
76#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 75#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
77#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 76#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
80#define _TIF_MEMDIE (1 << TIF_MEMDIE) 79#define _TIF_MEMDIE (1 << TIF_MEMDIE)
81#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 80#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
82#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 81#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
83#define _TIF_FREEZE (1 << TIF_FREEZE)
84 82
85#endif 83#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index c270e9e04861..89f7557583b8 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
135#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 135#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
136#define TIF_SYSCALL_TRACE 8 136#define TIF_SYSCALL_TRACE 8
137#define TIF_MEMDIE 18 137#define TIF_MEMDIE 18
138#define TIF_FREEZE 19
139#define TIF_RESTORE_SIGMASK 20 138#define TIF_RESTORE_SIGMASK 20
140 139
141#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 140#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
142#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 141#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
143#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 142#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
144#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 143#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
145#define _TIF_FREEZE (1 << TIF_FREEZE)
146#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 144#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
147 145
148/* 146/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a1fe5c127b52..32125af20d32 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -90,7 +90,6 @@ struct thread_info {
90#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 90#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
91#define TIF_DEBUG 21 /* uses debug registers */ 91#define TIF_DEBUG 21 /* uses debug registers */
92#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 92#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
93#define TIF_FREEZE 23 /* is freezing for suspend */
94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ 93#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ 94#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 95#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
@@ -112,7 +111,6 @@ struct thread_info {
112#define _TIF_FORK (1 << TIF_FORK) 111#define _TIF_FORK (1 << TIF_FORK)
113#define _TIF_DEBUG (1 << TIF_DEBUG) 112#define _TIF_DEBUG (1 << TIF_DEBUG)
114#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) 113#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
115#define _TIF_FREEZE (1 << TIF_FREEZE)
116#define _TIF_FORCED_TF (1 << TIF_FORCED_TF) 114#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
117#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 115#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
118#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 116#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be8accb0b0c..6abbedd09d85 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
132#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 132#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
133#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ 133#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
134#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 134#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
135#define TIF_FREEZE 17 /* is freezing for suspend */
136 135
137#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 136#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
138#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 137#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
141#define _TIF_IRET (1<<TIF_IRET) 140#define _TIF_IRET (1<<TIF_IRET)
142#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 141#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
143#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 142#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
144#define _TIF_FREEZE (1<<TIF_FREEZE)
145 143
146#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 144#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
147#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 145#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab58db2..0a7ed69546ba 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
476 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), 476 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
477 }, 477 },
478 }, 478 },
479 {
480 .callback = init_nvs_nosave,
481 .ident = "Asus K54C",
482 .matches = {
483 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
484 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
485 },
486 },
487 {
488 .callback = init_nvs_nosave,
489 .ident = "Asus K54HR",
490 .matches = {
491 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
492 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
493 },
494 },
479 {}, 495 {},
480}; 496};
481#endif /* CONFIG_SUSPEND */ 497#endif /* CONFIG_SUSPEND */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index bd230e801131..0304b3fdff5a 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -109,31 +109,7 @@ static int amba_legacy_resume(struct device *dev)
109 return ret; 109 return ret;
110} 110}
111 111
112static int amba_pm_prepare(struct device *dev) 112#endif /* CONFIG_PM_SLEEP */
113{
114 struct device_driver *drv = dev->driver;
115 int ret = 0;
116
117 if (drv && drv->pm && drv->pm->prepare)
118 ret = drv->pm->prepare(dev);
119
120 return ret;
121}
122
123static void amba_pm_complete(struct device *dev)
124{
125 struct device_driver *drv = dev->driver;
126
127 if (drv && drv->pm && drv->pm->complete)
128 drv->pm->complete(dev);
129}
130
131#else /* !CONFIG_PM_SLEEP */
132
133#define amba_pm_prepare NULL
134#define amba_pm_complete NULL
135
136#endif /* !CONFIG_PM_SLEEP */
137 113
138#ifdef CONFIG_SUSPEND 114#ifdef CONFIG_SUSPEND
139 115
@@ -155,22 +131,6 @@ static int amba_pm_suspend(struct device *dev)
155 return ret; 131 return ret;
156} 132}
157 133
158static int amba_pm_suspend_noirq(struct device *dev)
159{
160 struct device_driver *drv = dev->driver;
161 int ret = 0;
162
163 if (!drv)
164 return 0;
165
166 if (drv->pm) {
167 if (drv->pm->suspend_noirq)
168 ret = drv->pm->suspend_noirq(dev);
169 }
170
171 return ret;
172}
173
174static int amba_pm_resume(struct device *dev) 134static int amba_pm_resume(struct device *dev)
175{ 135{
176 struct device_driver *drv = dev->driver; 136 struct device_driver *drv = dev->driver;
@@ -189,28 +149,10 @@ static int amba_pm_resume(struct device *dev)
189 return ret; 149 return ret;
190} 150}
191 151
192static int amba_pm_resume_noirq(struct device *dev)
193{
194 struct device_driver *drv = dev->driver;
195 int ret = 0;
196
197 if (!drv)
198 return 0;
199
200 if (drv->pm) {
201 if (drv->pm->resume_noirq)
202 ret = drv->pm->resume_noirq(dev);
203 }
204
205 return ret;
206}
207
208#else /* !CONFIG_SUSPEND */ 152#else /* !CONFIG_SUSPEND */
209 153
210#define amba_pm_suspend NULL 154#define amba_pm_suspend NULL
211#define amba_pm_resume NULL 155#define amba_pm_resume NULL
212#define amba_pm_suspend_noirq NULL
213#define amba_pm_resume_noirq NULL
214 156
215#endif /* !CONFIG_SUSPEND */ 157#endif /* !CONFIG_SUSPEND */
216 158
@@ -234,22 +176,6 @@ static int amba_pm_freeze(struct device *dev)
234 return ret; 176 return ret;
235} 177}
236 178
237static int amba_pm_freeze_noirq(struct device *dev)
238{
239 struct device_driver *drv = dev->driver;
240 int ret = 0;
241
242 if (!drv)
243 return 0;
244
245 if (drv->pm) {
246 if (drv->pm->freeze_noirq)
247 ret = drv->pm->freeze_noirq(dev);
248 }
249
250 return ret;
251}
252
253static int amba_pm_thaw(struct device *dev) 179static int amba_pm_thaw(struct device *dev)
254{ 180{
255 struct device_driver *drv = dev->driver; 181 struct device_driver *drv = dev->driver;
@@ -268,22 +194,6 @@ static int amba_pm_thaw(struct device *dev)
268 return ret; 194 return ret;
269} 195}
270 196
271static int amba_pm_thaw_noirq(struct device *dev)
272{
273 struct device_driver *drv = dev->driver;
274 int ret = 0;
275
276 if (!drv)
277 return 0;
278
279 if (drv->pm) {
280 if (drv->pm->thaw_noirq)
281 ret = drv->pm->thaw_noirq(dev);
282 }
283
284 return ret;
285}
286
287static int amba_pm_poweroff(struct device *dev) 197static int amba_pm_poweroff(struct device *dev)
288{ 198{
289 struct device_driver *drv = dev->driver; 199 struct device_driver *drv = dev->driver;
@@ -302,22 +212,6 @@ static int amba_pm_poweroff(struct device *dev)
302 return ret; 212 return ret;
303} 213}
304 214
305static int amba_pm_poweroff_noirq(struct device *dev)
306{
307 struct device_driver *drv = dev->driver;
308 int ret = 0;
309
310 if (!drv)
311 return 0;
312
313 if (drv->pm) {
314 if (drv->pm->poweroff_noirq)
315 ret = drv->pm->poweroff_noirq(dev);
316 }
317
318 return ret;
319}
320
321static int amba_pm_restore(struct device *dev) 215static int amba_pm_restore(struct device *dev)
322{ 216{
323 struct device_driver *drv = dev->driver; 217 struct device_driver *drv = dev->driver;
@@ -336,32 +230,12 @@ static int amba_pm_restore(struct device *dev)
336 return ret; 230 return ret;
337} 231}
338 232
339static int amba_pm_restore_noirq(struct device *dev)
340{
341 struct device_driver *drv = dev->driver;
342 int ret = 0;
343
344 if (!drv)
345 return 0;
346
347 if (drv->pm) {
348 if (drv->pm->restore_noirq)
349 ret = drv->pm->restore_noirq(dev);
350 }
351
352 return ret;
353}
354
355#else /* !CONFIG_HIBERNATE_CALLBACKS */ 233#else /* !CONFIG_HIBERNATE_CALLBACKS */
356 234
357#define amba_pm_freeze NULL 235#define amba_pm_freeze NULL
358#define amba_pm_thaw NULL 236#define amba_pm_thaw NULL
359#define amba_pm_poweroff NULL 237#define amba_pm_poweroff NULL
360#define amba_pm_restore NULL 238#define amba_pm_restore NULL
361#define amba_pm_freeze_noirq NULL
362#define amba_pm_thaw_noirq NULL
363#define amba_pm_poweroff_noirq NULL
364#define amba_pm_restore_noirq NULL
365 239
366#endif /* !CONFIG_HIBERNATE_CALLBACKS */ 240#endif /* !CONFIG_HIBERNATE_CALLBACKS */
367 241
@@ -402,20 +276,12 @@ static int amba_pm_runtime_resume(struct device *dev)
402#ifdef CONFIG_PM 276#ifdef CONFIG_PM
403 277
404static const struct dev_pm_ops amba_pm = { 278static const struct dev_pm_ops amba_pm = {
405 .prepare = amba_pm_prepare,
406 .complete = amba_pm_complete,
407 .suspend = amba_pm_suspend, 279 .suspend = amba_pm_suspend,
408 .resume = amba_pm_resume, 280 .resume = amba_pm_resume,
409 .freeze = amba_pm_freeze, 281 .freeze = amba_pm_freeze,
410 .thaw = amba_pm_thaw, 282 .thaw = amba_pm_thaw,
411 .poweroff = amba_pm_poweroff, 283 .poweroff = amba_pm_poweroff,
412 .restore = amba_pm_restore, 284 .restore = amba_pm_restore,
413 .suspend_noirq = amba_pm_suspend_noirq,
414 .resume_noirq = amba_pm_resume_noirq,
415 .freeze_noirq = amba_pm_freeze_noirq,
416 .thaw_noirq = amba_pm_thaw_noirq,
417 .poweroff_noirq = amba_pm_poweroff_noirq,
418 .restore_noirq = amba_pm_restore_noirq,
419 SET_RUNTIME_PM_OPS( 285 SET_RUNTIME_PM_OPS(
420 amba_pm_runtime_suspend, 286 amba_pm_runtime_suspend,
421 amba_pm_runtime_resume, 287 amba_pm_runtime_resume,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 06ed6b4e7df5..d5585da14c8a 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
534 return 0; 534 return 0;
535 } 535 }
536 536
537 read_lock_usermodehelper();
538
537 if (WARN_ON(usermodehelper_is_disabled())) { 539 if (WARN_ON(usermodehelper_is_disabled())) {
538 dev_err(device, "firmware: %s will not be loaded\n", name); 540 dev_err(device, "firmware: %s will not be loaded\n", name);
539 retval = -EBUSY; 541 retval = -EBUSY;
@@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
572 fw_destroy_instance(fw_priv); 574 fw_destroy_instance(fw_priv);
573 575
574out: 576out:
577 read_unlock_usermodehelper();
578
575 if (retval) { 579 if (retval) {
576 release_firmware(firmware); 580 release_firmware(firmware);
577 *firmware_p = NULL; 581 *firmware_p = NULL;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7a24895543e7..7d912d5675d8 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
700 return ret; 700 return ret;
701} 701}
702 702
703int platform_pm_prepare(struct device *dev)
704{
705 struct device_driver *drv = dev->driver;
706 int ret = 0;
707
708 if (drv && drv->pm && drv->pm->prepare)
709 ret = drv->pm->prepare(dev);
710
711 return ret;
712}
713
714void platform_pm_complete(struct device *dev)
715{
716 struct device_driver *drv = dev->driver;
717
718 if (drv && drv->pm && drv->pm->complete)
719 drv->pm->complete(dev);
720}
721
722#endif /* CONFIG_PM_SLEEP */ 703#endif /* CONFIG_PM_SLEEP */
723 704
724#ifdef CONFIG_SUSPEND 705#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
741 return ret; 722 return ret;
742} 723}
743 724
744int platform_pm_suspend_noirq(struct device *dev)
745{
746 struct device_driver *drv = dev->driver;
747 int ret = 0;
748
749 if (!drv)
750 return 0;
751
752 if (drv->pm) {
753 if (drv->pm->suspend_noirq)
754 ret = drv->pm->suspend_noirq(dev);
755 }
756
757 return ret;
758}
759
760int platform_pm_resume(struct device *dev) 725int platform_pm_resume(struct device *dev)
761{ 726{
762 struct device_driver *drv = dev->driver; 727 struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
775 return ret; 740 return ret;
776} 741}
777 742
778int platform_pm_resume_noirq(struct device *dev)
779{
780 struct device_driver *drv = dev->driver;
781 int ret = 0;
782
783 if (!drv)
784 return 0;
785
786 if (drv->pm) {
787 if (drv->pm->resume_noirq)
788 ret = drv->pm->resume_noirq(dev);
789 }
790
791 return ret;
792}
793
794#endif /* CONFIG_SUSPEND */ 743#endif /* CONFIG_SUSPEND */
795 744
796#ifdef CONFIG_HIBERNATE_CALLBACKS 745#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
813 return ret; 762 return ret;
814} 763}
815 764
816int platform_pm_freeze_noirq(struct device *dev)
817{
818 struct device_driver *drv = dev->driver;
819 int ret = 0;
820
821 if (!drv)
822 return 0;
823
824 if (drv->pm) {
825 if (drv->pm->freeze_noirq)
826 ret = drv->pm->freeze_noirq(dev);
827 }
828
829 return ret;
830}
831
832int platform_pm_thaw(struct device *dev) 765int platform_pm_thaw(struct device *dev)
833{ 766{
834 struct device_driver *drv = dev->driver; 767 struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
847 return ret; 780 return ret;
848} 781}
849 782
850int platform_pm_thaw_noirq(struct device *dev)
851{
852 struct device_driver *drv = dev->driver;
853 int ret = 0;
854
855 if (!drv)
856 return 0;
857
858 if (drv->pm) {
859 if (drv->pm->thaw_noirq)
860 ret = drv->pm->thaw_noirq(dev);
861 }
862
863 return ret;
864}
865
866int platform_pm_poweroff(struct device *dev) 783int platform_pm_poweroff(struct device *dev)
867{ 784{
868 struct device_driver *drv = dev->driver; 785 struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
881 return ret; 798 return ret;
882} 799}
883 800
884int platform_pm_poweroff_noirq(struct device *dev)
885{
886 struct device_driver *drv = dev->driver;
887 int ret = 0;
888
889 if (!drv)
890 return 0;
891
892 if (drv->pm) {
893 if (drv->pm->poweroff_noirq)
894 ret = drv->pm->poweroff_noirq(dev);
895 }
896
897 return ret;
898}
899
900int platform_pm_restore(struct device *dev) 801int platform_pm_restore(struct device *dev)
901{ 802{
902 struct device_driver *drv = dev->driver; 803 struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
915 return ret; 816 return ret;
916} 817}
917 818
918int platform_pm_restore_noirq(struct device *dev)
919{
920 struct device_driver *drv = dev->driver;
921 int ret = 0;
922
923 if (!drv)
924 return 0;
925
926 if (drv->pm) {
927 if (drv->pm->restore_noirq)
928 ret = drv->pm->restore_noirq(dev);
929 }
930
931 return ret;
932}
933
934#endif /* CONFIG_HIBERNATE_CALLBACKS */ 819#endif /* CONFIG_HIBERNATE_CALLBACKS */
935 820
936static const struct dev_pm_ops platform_dev_pm_ops = { 821static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee3b49e..10bdd793f0bd 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
97 * @event: PM transition of the system under way. 97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage. 98 * @bool: Whether or not this is the "noirq" stage.
99 * 99 *
100 * If the device has not been suspended at run time, execute the 100 * Execute the PM callback corresponding to @event provided by the driver of
101 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and 101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * return its error code. Otherwise, return zero. 102 * not present.
103 */ 103 */
104static int __pm_generic_call(struct device *dev, int event, bool noirq) 104static int __pm_generic_call(struct device *dev, int event, bool noirq)
105{ 105{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *); 107 int (*callback)(struct device *);
108 108
109 if (!pm || pm_runtime_suspended(dev)) 109 if (!pm)
110 return 0; 110 return 0;
111 111
112 switch (event) { 112 switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
119 case PM_EVENT_HIBERNATE: 119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff; 120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break; 121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
122 case PM_EVENT_THAW: 125 case PM_EVENT_THAW:
123 callback = noirq ? pm->thaw_noirq : pm->thaw; 126 callback = noirq ? pm->thaw_noirq : pm->thaw;
124 break; 127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
125 default: 131 default:
126 callback = NULL; 132 callback = NULL;
127 break; 133 break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
211EXPORT_SYMBOL_GPL(pm_generic_thaw); 217EXPORT_SYMBOL_GPL(pm_generic_thaw);
212 218
213/** 219/**
214 * __pm_generic_resume - Generic resume/restore callback for subsystems.
215 * @dev: Device to handle.
216 * @event: PM transition of the system under way.
217 * @bool: Whether or not this is the "noirq" stage.
218 *
219 * Execute the resume/resotre callback provided by the @dev's driver, if
220 * defined. If it returns 0, change the device's runtime PM status to 'active'.
221 * Return the callback's error code.
222 */
223static int __pm_generic_resume(struct device *dev, int event, bool noirq)
224{
225 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
226 int (*callback)(struct device *);
227 int ret;
228
229 if (!pm)
230 return 0;
231
232 switch (event) {
233 case PM_EVENT_RESUME:
234 callback = noirq ? pm->resume_noirq : pm->resume;
235 break;
236 case PM_EVENT_RESTORE:
237 callback = noirq ? pm->restore_noirq : pm->restore;
238 break;
239 default:
240 callback = NULL;
241 break;
242 }
243
244 if (!callback)
245 return 0;
246
247 ret = callback(dev);
248 if (!ret && !noirq && pm_runtime_enabled(dev)) {
249 pm_runtime_disable(dev);
250 pm_runtime_set_active(dev);
251 pm_runtime_enable(dev);
252 }
253
254 return ret;
255}
256
257/**
258 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. 220 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
259 * @dev: Device to resume. 221 * @dev: Device to resume.
260 */ 222 */
261int pm_generic_resume_noirq(struct device *dev) 223int pm_generic_resume_noirq(struct device *dev)
262{ 224{
263 return __pm_generic_resume(dev, PM_EVENT_RESUME, true); 225 return __pm_generic_call(dev, PM_EVENT_RESUME, true);
264} 226}
265EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 227EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
266 228
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
270 */ 232 */
271int pm_generic_resume(struct device *dev) 233int pm_generic_resume(struct device *dev)
272{ 234{
273 return __pm_generic_resume(dev, PM_EVENT_RESUME, false); 235 return __pm_generic_call(dev, PM_EVENT_RESUME, false);
274} 236}
275EXPORT_SYMBOL_GPL(pm_generic_resume); 237EXPORT_SYMBOL_GPL(pm_generic_resume);
276 238
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
280 */ 242 */
281int pm_generic_restore_noirq(struct device *dev) 243int pm_generic_restore_noirq(struct device *dev)
282{ 244{
283 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); 245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
284} 246}
285EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 247EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
286 248
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
290 */ 252 */
291int pm_generic_restore(struct device *dev) 253int pm_generic_restore(struct device *dev)
292{ 254{
293 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); 255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
294} 256}
295EXPORT_SYMBOL_GPL(pm_generic_restore); 257EXPORT_SYMBOL_GPL(pm_generic_restore);
296 258
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
314 pm_runtime_idle(dev); 276 pm_runtime_idle(dev);
315} 277}
316#endif /* CONFIG_PM_SLEEP */ 278#endif /* CONFIG_PM_SLEEP */
317
318struct dev_pm_ops generic_subsys_pm_ops = {
319#ifdef CONFIG_PM_SLEEP
320 .prepare = pm_generic_prepare,
321 .suspend = pm_generic_suspend,
322 .suspend_noirq = pm_generic_suspend_noirq,
323 .resume = pm_generic_resume,
324 .resume_noirq = pm_generic_resume_noirq,
325 .freeze = pm_generic_freeze,
326 .freeze_noirq = pm_generic_freeze_noirq,
327 .thaw = pm_generic_thaw,
328 .thaw_noirq = pm_generic_thaw_noirq,
329 .poweroff = pm_generic_poweroff,
330 .poweroff_noirq = pm_generic_poweroff_noirq,
331 .restore = pm_generic_restore,
332 .restore_noirq = pm_generic_restore_noirq,
333 .complete = pm_generic_complete,
334#endif
335#ifdef CONFIG_PM_RUNTIME
336 .runtime_suspend = pm_generic_runtime_suspend,
337 .runtime_resume = pm_generic_runtime_resume,
338 .runtime_idle = pm_generic_runtime_idle,
339#endif
340};
341EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfcf438d..e2cc3d2e0ecc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
32#include "../base.h" 32#include "../base.h"
33#include "power.h" 33#include "power.h"
34 34
35typedef int (*pm_callback_t)(struct device *);
36
35/* 37/*
36 * The entries in the dpm_list list are in a depth first order, simply 38 * The entries in the dpm_list list are in a depth first order, simply
37 * because children are guaranteed to be discovered after parents, and 39 * because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
164 ktime_t calltime = ktime_set(0, 0); 166 ktime_t calltime = ktime_set(0, 0);
165 167
166 if (initcall_debug) { 168 if (initcall_debug) {
167 pr_info("calling %s+ @ %i\n", 169 pr_info("calling %s+ @ %i, parent: %s\n",
168 dev_name(dev), task_pid_nr(current)); 170 dev_name(dev), task_pid_nr(current),
171 dev->parent ? dev_name(dev->parent) : "none");
169 calltime = ktime_get(); 172 calltime = ktime_get();
170 } 173 }
171 174
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
211} 214}
212 215
213/** 216/**
214 * pm_op - Execute the PM operation appropriate for given PM event. 217 * pm_op - Return the PM operation appropriate for given PM event.
215 * @dev: Device to handle.
216 * @ops: PM operations to choose from. 218 * @ops: PM operations to choose from.
217 * @state: PM transition of the system being carried out. 219 * @state: PM transition of the system being carried out.
218 */ 220 */
219static int pm_op(struct device *dev, 221static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 const struct dev_pm_ops *ops,
221 pm_message_t state)
222{ 222{
223 int error = 0;
224 ktime_t calltime;
225
226 calltime = initcall_debug_start(dev);
227
228 switch (state.event) { 223 switch (state.event) {
229#ifdef CONFIG_SUSPEND 224#ifdef CONFIG_SUSPEND
230 case PM_EVENT_SUSPEND: 225 case PM_EVENT_SUSPEND:
231 if (ops->suspend) { 226 return ops->suspend;
232 error = ops->suspend(dev);
233 suspend_report_result(ops->suspend, error);
234 }
235 break;
236 case PM_EVENT_RESUME: 227 case PM_EVENT_RESUME:
237 if (ops->resume) { 228 return ops->resume;
238 error = ops->resume(dev);
239 suspend_report_result(ops->resume, error);
240 }
241 break;
242#endif /* CONFIG_SUSPEND */ 229#endif /* CONFIG_SUSPEND */
243#ifdef CONFIG_HIBERNATE_CALLBACKS 230#ifdef CONFIG_HIBERNATE_CALLBACKS
244 case PM_EVENT_FREEZE: 231 case PM_EVENT_FREEZE:
245 case PM_EVENT_QUIESCE: 232 case PM_EVENT_QUIESCE:
246 if (ops->freeze) { 233 return ops->freeze;
247 error = ops->freeze(dev);
248 suspend_report_result(ops->freeze, error);
249 }
250 break;
251 case PM_EVENT_HIBERNATE: 234 case PM_EVENT_HIBERNATE:
252 if (ops->poweroff) { 235 return ops->poweroff;
253 error = ops->poweroff(dev);
254 suspend_report_result(ops->poweroff, error);
255 }
256 break;
257 case PM_EVENT_THAW: 236 case PM_EVENT_THAW:
258 case PM_EVENT_RECOVER: 237 case PM_EVENT_RECOVER:
259 if (ops->thaw) { 238 return ops->thaw;
260 error = ops->thaw(dev);
261 suspend_report_result(ops->thaw, error);
262 }
263 break; 239 break;
264 case PM_EVENT_RESTORE: 240 case PM_EVENT_RESTORE:
265 if (ops->restore) { 241 return ops->restore;
266 error = ops->restore(dev);
267 suspend_report_result(ops->restore, error);
268 }
269 break;
270#endif /* CONFIG_HIBERNATE_CALLBACKS */ 242#endif /* CONFIG_HIBERNATE_CALLBACKS */
271 default:
272 error = -EINVAL;
273 } 243 }
274 244
275 initcall_debug_report(dev, calltime, error); 245 return NULL;
276
277 return error;
278} 246}
279 247
280/** 248/**
281 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 249 * pm_noirq_op - Return the PM operation appropriate for given PM event.
282 * @dev: Device to handle.
283 * @ops: PM operations to choose from. 250 * @ops: PM operations to choose from.
284 * @state: PM transition of the system being carried out. 251 * @state: PM transition of the system being carried out.
285 * 252 *
286 * The driver of @dev will not receive interrupts while this function is being 253 * The driver of @dev will not receive interrupts while this function is being
287 * executed. 254 * executed.
288 */ 255 */
289static int pm_noirq_op(struct device *dev, 256static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
290 const struct dev_pm_ops *ops,
291 pm_message_t state)
292{ 257{
293 int error = 0;
294 ktime_t calltime = ktime_set(0, 0), delta, rettime;
295
296 if (initcall_debug) {
297 pr_info("calling %s+ @ %i, parent: %s\n",
298 dev_name(dev), task_pid_nr(current),
299 dev->parent ? dev_name(dev->parent) : "none");
300 calltime = ktime_get();
301 }
302
303 switch (state.event) { 258 switch (state.event) {
304#ifdef CONFIG_SUSPEND 259#ifdef CONFIG_SUSPEND
305 case PM_EVENT_SUSPEND: 260 case PM_EVENT_SUSPEND:
306 if (ops->suspend_noirq) { 261 return ops->suspend_noirq;
307 error = ops->suspend_noirq(dev);
308 suspend_report_result(ops->suspend_noirq, error);
309 }
310 break;
311 case PM_EVENT_RESUME: 262 case PM_EVENT_RESUME:
312 if (ops->resume_noirq) { 263 return ops->resume_noirq;
313 error = ops->resume_noirq(dev);
314 suspend_report_result(ops->resume_noirq, error);
315 }
316 break;
317#endif /* CONFIG_SUSPEND */ 264#endif /* CONFIG_SUSPEND */
318#ifdef CONFIG_HIBERNATE_CALLBACKS 265#ifdef CONFIG_HIBERNATE_CALLBACKS
319 case PM_EVENT_FREEZE: 266 case PM_EVENT_FREEZE:
320 case PM_EVENT_QUIESCE: 267 case PM_EVENT_QUIESCE:
321 if (ops->freeze_noirq) { 268 return ops->freeze_noirq;
322 error = ops->freeze_noirq(dev);
323 suspend_report_result(ops->freeze_noirq, error);
324 }
325 break;
326 case PM_EVENT_HIBERNATE: 269 case PM_EVENT_HIBERNATE:
327 if (ops->poweroff_noirq) { 270 return ops->poweroff_noirq;
328 error = ops->poweroff_noirq(dev);
329 suspend_report_result(ops->poweroff_noirq, error);
330 }
331 break;
332 case PM_EVENT_THAW: 271 case PM_EVENT_THAW:
333 case PM_EVENT_RECOVER: 272 case PM_EVENT_RECOVER:
334 if (ops->thaw_noirq) { 273 return ops->thaw_noirq;
335 error = ops->thaw_noirq(dev);
336 suspend_report_result(ops->thaw_noirq, error);
337 }
338 break;
339 case PM_EVENT_RESTORE: 274 case PM_EVENT_RESTORE:
340 if (ops->restore_noirq) { 275 return ops->restore_noirq;
341 error = ops->restore_noirq(dev);
342 suspend_report_result(ops->restore_noirq, error);
343 }
344 break;
345#endif /* CONFIG_HIBERNATE_CALLBACKS */ 276#endif /* CONFIG_HIBERNATE_CALLBACKS */
346 default:
347 error = -EINVAL;
348 }
349
350 if (initcall_debug) {
351 rettime = ktime_get();
352 delta = ktime_sub(rettime, calltime);
353 printk("initcall %s_i+ returned %d after %Ld usecs\n",
354 dev_name(dev), error,
355 (unsigned long long)ktime_to_ns(delta) >> 10);
356 } 277 }
357 278
358 return error; 279 return NULL;
359} 280}
360 281
361static char *pm_verb(int event) 282static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
413 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 334 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
414} 335}
415 336
337static int dpm_run_callback(pm_callback_t cb, struct device *dev,
338 pm_message_t state, char *info)
339{
340 ktime_t calltime;
341 int error;
342
343 if (!cb)
344 return 0;
345
346 calltime = initcall_debug_start(dev);
347
348 pm_dev_dbg(dev, state, info);
349 error = cb(dev);
350 suspend_report_result(cb, error);
351
352 initcall_debug_report(dev, calltime, error);
353
354 return error;
355}
356
416/*------------------------- Resume routines -------------------------*/ 357/*------------------------- Resume routines -------------------------*/
417 358
418/** 359/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
425 */ 366 */
426static int device_resume_noirq(struct device *dev, pm_message_t state) 367static int device_resume_noirq(struct device *dev, pm_message_t state)
427{ 368{
369 pm_callback_t callback = NULL;
370 char *info = NULL;
428 int error = 0; 371 int error = 0;
429 372
430 TRACE_DEVICE(dev); 373 TRACE_DEVICE(dev);
431 TRACE_RESUME(0); 374 TRACE_RESUME(0);
432 375
433 if (dev->pm_domain) { 376 if (dev->pm_domain) {
434 pm_dev_dbg(dev, state, "EARLY power domain "); 377 info = "EARLY power domain ";
435 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 378 callback = pm_noirq_op(&dev->pm_domain->ops, state);
436 } else if (dev->type && dev->type->pm) { 379 } else if (dev->type && dev->type->pm) {
437 pm_dev_dbg(dev, state, "EARLY type "); 380 info = "EARLY type ";
438 error = pm_noirq_op(dev, dev->type->pm, state); 381 callback = pm_noirq_op(dev->type->pm, state);
439 } else if (dev->class && dev->class->pm) { 382 } else if (dev->class && dev->class->pm) {
440 pm_dev_dbg(dev, state, "EARLY class "); 383 info = "EARLY class ";
441 error = pm_noirq_op(dev, dev->class->pm, state); 384 callback = pm_noirq_op(dev->class->pm, state);
442 } else if (dev->bus && dev->bus->pm) { 385 } else if (dev->bus && dev->bus->pm) {
443 pm_dev_dbg(dev, state, "EARLY "); 386 info = "EARLY bus ";
444 error = pm_noirq_op(dev, dev->bus->pm, state); 387 callback = pm_noirq_op(dev->bus->pm, state);
445 } 388 }
446 389
390 if (!callback && dev->driver && dev->driver->pm) {
391 info = "EARLY driver ";
392 callback = pm_noirq_op(dev->driver->pm, state);
393 }
394
395 error = dpm_run_callback(callback, dev, state, info);
396
447 TRACE_RESUME(error); 397 TRACE_RESUME(error);
448 return error; 398 return error;
449} 399}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
486EXPORT_SYMBOL_GPL(dpm_resume_noirq); 436EXPORT_SYMBOL_GPL(dpm_resume_noirq);
487 437
488/** 438/**
489 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
490 * @dev: Device to resume.
491 * @cb: Resume callback to execute.
492 */
493static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
494{
495 int error;
496 ktime_t calltime;
497
498 calltime = initcall_debug_start(dev);
499
500 error = cb(dev);
501 suspend_report_result(cb, error);
502
503 initcall_debug_report(dev, calltime, error);
504
505 return error;
506}
507
508/**
509 * device_resume - Execute "resume" callbacks for given device. 439 * device_resume - Execute "resume" callbacks for given device.
510 * @dev: Device to handle. 440 * @dev: Device to handle.
511 * @state: PM transition of the system being carried out. 441 * @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
513 */ 443 */
514static int device_resume(struct device *dev, pm_message_t state, bool async) 444static int device_resume(struct device *dev, pm_message_t state, bool async)
515{ 445{
446 pm_callback_t callback = NULL;
447 char *info = NULL;
516 int error = 0; 448 int error = 0;
517 bool put = false; 449 bool put = false;
518 450
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
535 put = true; 467 put = true;
536 468
537 if (dev->pm_domain) { 469 if (dev->pm_domain) {
538 pm_dev_dbg(dev, state, "power domain "); 470 info = "power domain ";
539 error = pm_op(dev, &dev->pm_domain->ops, state); 471 callback = pm_op(&dev->pm_domain->ops, state);
540 goto End; 472 goto Driver;
541 } 473 }
542 474
543 if (dev->type && dev->type->pm) { 475 if (dev->type && dev->type->pm) {
544 pm_dev_dbg(dev, state, "type "); 476 info = "type ";
545 error = pm_op(dev, dev->type->pm, state); 477 callback = pm_op(dev->type->pm, state);
546 goto End; 478 goto Driver;
547 } 479 }
548 480
549 if (dev->class) { 481 if (dev->class) {
550 if (dev->class->pm) { 482 if (dev->class->pm) {
551 pm_dev_dbg(dev, state, "class "); 483 info = "class ";
552 error = pm_op(dev, dev->class->pm, state); 484 callback = pm_op(dev->class->pm, state);
553 goto End; 485 goto Driver;
554 } else if (dev->class->resume) { 486 } else if (dev->class->resume) {
555 pm_dev_dbg(dev, state, "legacy class "); 487 info = "legacy class ";
556 error = legacy_resume(dev, dev->class->resume); 488 callback = dev->class->resume;
557 goto End; 489 goto End;
558 } 490 }
559 } 491 }
560 492
561 if (dev->bus) { 493 if (dev->bus) {
562 if (dev->bus->pm) { 494 if (dev->bus->pm) {
563 pm_dev_dbg(dev, state, ""); 495 info = "bus ";
564 error = pm_op(dev, dev->bus->pm, state); 496 callback = pm_op(dev->bus->pm, state);
565 } else if (dev->bus->resume) { 497 } else if (dev->bus->resume) {
566 pm_dev_dbg(dev, state, "legacy "); 498 info = "legacy bus ";
567 error = legacy_resume(dev, dev->bus->resume); 499 callback = dev->bus->resume;
500 goto End;
568 } 501 }
569 } 502 }
570 503
504 Driver:
505 if (!callback && dev->driver && dev->driver->pm) {
506 info = "driver ";
507 callback = pm_op(dev->driver->pm, state);
508 }
509
571 End: 510 End:
511 error = dpm_run_callback(callback, dev, state, info);
572 dev->power.is_suspended = false; 512 dev->power.is_suspended = false;
573 513
574 Unlock: 514 Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
660 */ 600 */
661static void device_complete(struct device *dev, pm_message_t state) 601static void device_complete(struct device *dev, pm_message_t state)
662{ 602{
603 void (*callback)(struct device *) = NULL;
604 char *info = NULL;
605
663 device_lock(dev); 606 device_lock(dev);
664 607
665 if (dev->pm_domain) { 608 if (dev->pm_domain) {
666 pm_dev_dbg(dev, state, "completing power domain "); 609 info = "completing power domain ";
667 if (dev->pm_domain->ops.complete) 610 callback = dev->pm_domain->ops.complete;
668 dev->pm_domain->ops.complete(dev);
669 } else if (dev->type && dev->type->pm) { 611 } else if (dev->type && dev->type->pm) {
670 pm_dev_dbg(dev, state, "completing type "); 612 info = "completing type ";
671 if (dev->type->pm->complete) 613 callback = dev->type->pm->complete;
672 dev->type->pm->complete(dev);
673 } else if (dev->class && dev->class->pm) { 614 } else if (dev->class && dev->class->pm) {
674 pm_dev_dbg(dev, state, "completing class "); 615 info = "completing class ";
675 if (dev->class->pm->complete) 616 callback = dev->class->pm->complete;
676 dev->class->pm->complete(dev);
677 } else if (dev->bus && dev->bus->pm) { 617 } else if (dev->bus && dev->bus->pm) {
678 pm_dev_dbg(dev, state, "completing "); 618 info = "completing bus ";
679 if (dev->bus->pm->complete) 619 callback = dev->bus->pm->complete;
680 dev->bus->pm->complete(dev); 620 }
621
622 if (!callback && dev->driver && dev->driver->pm) {
623 info = "completing driver ";
624 callback = dev->driver->pm->complete;
625 }
626
627 if (callback) {
628 pm_dev_dbg(dev, state, info);
629 callback(dev);
681 } 630 }
682 631
683 device_unlock(dev); 632 device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
763 */ 712 */
764static int device_suspend_noirq(struct device *dev, pm_message_t state) 713static int device_suspend_noirq(struct device *dev, pm_message_t state)
765{ 714{
766 int error; 715 pm_callback_t callback = NULL;
716 char *info = NULL;
767 717
768 if (dev->pm_domain) { 718 if (dev->pm_domain) {
769 pm_dev_dbg(dev, state, "LATE power domain "); 719 info = "LATE power domain ";
770 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 720 callback = pm_noirq_op(&dev->pm_domain->ops, state);
771 if (error)
772 return error;
773 } else if (dev->type && dev->type->pm) { 721 } else if (dev->type && dev->type->pm) {
774 pm_dev_dbg(dev, state, "LATE type "); 722 info = "LATE type ";
775 error = pm_noirq_op(dev, dev->type->pm, state); 723 callback = pm_noirq_op(dev->type->pm, state);
776 if (error)
777 return error;
778 } else if (dev->class && dev->class->pm) { 724 } else if (dev->class && dev->class->pm) {
779 pm_dev_dbg(dev, state, "LATE class "); 725 info = "LATE class ";
780 error = pm_noirq_op(dev, dev->class->pm, state); 726 callback = pm_noirq_op(dev->class->pm, state);
781 if (error)
782 return error;
783 } else if (dev->bus && dev->bus->pm) { 727 } else if (dev->bus && dev->bus->pm) {
784 pm_dev_dbg(dev, state, "LATE "); 728 info = "LATE bus ";
785 error = pm_noirq_op(dev, dev->bus->pm, state); 729 callback = pm_noirq_op(dev->bus->pm, state);
786 if (error)
787 return error;
788 } 730 }
789 731
790 return 0; 732 if (!callback && dev->driver && dev->driver->pm) {
733 info = "LATE driver ";
734 callback = pm_noirq_op(dev->driver->pm, state);
735 }
736
737 return dpm_run_callback(callback, dev, state, info);
791} 738}
792 739
793/** 740/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
864 */ 811 */
865static int __device_suspend(struct device *dev, pm_message_t state, bool async) 812static int __device_suspend(struct device *dev, pm_message_t state, bool async)
866{ 813{
814 pm_callback_t callback = NULL;
815 char *info = NULL;
867 int error = 0; 816 int error = 0;
868 817
869 dpm_wait_for_children(dev, async); 818 dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
884 device_lock(dev); 833 device_lock(dev);
885 834
886 if (dev->pm_domain) { 835 if (dev->pm_domain) {
887 pm_dev_dbg(dev, state, "power domain "); 836 info = "power domain ";
888 error = pm_op(dev, &dev->pm_domain->ops, state); 837 callback = pm_op(&dev->pm_domain->ops, state);
889 goto End; 838 goto Run;
890 } 839 }
891 840
892 if (dev->type && dev->type->pm) { 841 if (dev->type && dev->type->pm) {
893 pm_dev_dbg(dev, state, "type "); 842 info = "type ";
894 error = pm_op(dev, dev->type->pm, state); 843 callback = pm_op(dev->type->pm, state);
895 goto End; 844 goto Run;
896 } 845 }
897 846
898 if (dev->class) { 847 if (dev->class) {
899 if (dev->class->pm) { 848 if (dev->class->pm) {
900 pm_dev_dbg(dev, state, "class "); 849 info = "class ";
901 error = pm_op(dev, dev->class->pm, state); 850 callback = pm_op(dev->class->pm, state);
902 goto End; 851 goto Run;
903 } else if (dev->class->suspend) { 852 } else if (dev->class->suspend) {
904 pm_dev_dbg(dev, state, "legacy class "); 853 pm_dev_dbg(dev, state, "legacy class ");
905 error = legacy_suspend(dev, state, dev->class->suspend); 854 error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
909 858
910 if (dev->bus) { 859 if (dev->bus) {
911 if (dev->bus->pm) { 860 if (dev->bus->pm) {
912 pm_dev_dbg(dev, state, ""); 861 info = "bus ";
913 error = pm_op(dev, dev->bus->pm, state); 862 callback = pm_op(dev->bus->pm, state);
914 } else if (dev->bus->suspend) { 863 } else if (dev->bus->suspend) {
915 pm_dev_dbg(dev, state, "legacy "); 864 pm_dev_dbg(dev, state, "legacy bus ");
916 error = legacy_suspend(dev, state, dev->bus->suspend); 865 error = legacy_suspend(dev, state, dev->bus->suspend);
866 goto End;
917 } 867 }
918 } 868 }
919 869
870 Run:
871 if (!callback && dev->driver && dev->driver->pm) {
872 info = "driver ";
873 callback = pm_op(dev->driver->pm, state);
874 }
875
876 error = dpm_run_callback(callback, dev, state, info);
877
920 End: 878 End:
921 if (!error) { 879 if (!error) {
922 dev->power.is_suspended = true; 880 dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
1022 */ 980 */
1023static int device_prepare(struct device *dev, pm_message_t state) 981static int device_prepare(struct device *dev, pm_message_t state)
1024{ 982{
983 int (*callback)(struct device *) = NULL;
984 char *info = NULL;
1025 int error = 0; 985 int error = 0;
1026 986
1027 device_lock(dev); 987 device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
1029 dev->power.wakeup_path = device_may_wakeup(dev); 989 dev->power.wakeup_path = device_may_wakeup(dev);
1030 990
1031 if (dev->pm_domain) { 991 if (dev->pm_domain) {
1032 pm_dev_dbg(dev, state, "preparing power domain "); 992 info = "preparing power domain ";
1033 if (dev->pm_domain->ops.prepare) 993 callback = dev->pm_domain->ops.prepare;
1034 error = dev->pm_domain->ops.prepare(dev);
1035 suspend_report_result(dev->pm_domain->ops.prepare, error);
1036 if (error)
1037 goto End;
1038 } else if (dev->type && dev->type->pm) { 994 } else if (dev->type && dev->type->pm) {
1039 pm_dev_dbg(dev, state, "preparing type "); 995 info = "preparing type ";
1040 if (dev->type->pm->prepare) 996 callback = dev->type->pm->prepare;
1041 error = dev->type->pm->prepare(dev);
1042 suspend_report_result(dev->type->pm->prepare, error);
1043 if (error)
1044 goto End;
1045 } else if (dev->class && dev->class->pm) { 997 } else if (dev->class && dev->class->pm) {
1046 pm_dev_dbg(dev, state, "preparing class "); 998 info = "preparing class ";
1047 if (dev->class->pm->prepare) 999 callback = dev->class->pm->prepare;
1048 error = dev->class->pm->prepare(dev);
1049 suspend_report_result(dev->class->pm->prepare, error);
1050 if (error)
1051 goto End;
1052 } else if (dev->bus && dev->bus->pm) { 1000 } else if (dev->bus && dev->bus->pm) {
1053 pm_dev_dbg(dev, state, "preparing "); 1001 info = "preparing bus ";
1054 if (dev->bus->pm->prepare) 1002 callback = dev->bus->pm->prepare;
1055 error = dev->bus->pm->prepare(dev); 1003 }
1056 suspend_report_result(dev->bus->pm->prepare, error); 1004
1005 if (!callback && dev->driver && dev->driver->pm) {
1006 info = "preparing driver ";
1007 callback = dev->driver->pm->prepare;
1008 }
1009
1010 if (callback) {
1011 error = callback(dev);
1012 suspend_report_result(callback, error);
1057 } 1013 }
1058 1014
1059 End:
1060 device_unlock(dev); 1015 device_unlock(dev);
1061 1016
1062 return error; 1017 return error;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443bca8f..c56efd756531 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
250 else 250 else
251 callback = NULL; 251 callback = NULL;
252 252
253 if (!callback && dev->driver && dev->driver->pm)
254 callback = dev->driver->pm->runtime_idle;
255
253 if (callback) 256 if (callback)
254 __rpm_callback(callback, dev); 257 __rpm_callback(callback, dev);
255 258
@@ -413,6 +416,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
413 else 416 else
414 callback = NULL; 417 callback = NULL;
415 418
419 if (!callback && dev->driver && dev->driver->pm)
420 callback = dev->driver->pm->runtime_suspend;
421
416 retval = rpm_callback(callback, dev); 422 retval = rpm_callback(callback, dev);
417 if (retval) { 423 if (retval) {
418 __update_runtime_status(dev, RPM_ACTIVE); 424 __update_runtime_status(dev, RPM_ACTIVE);
@@ -633,6 +639,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
633 else 639 else
634 callback = NULL; 640 callback = NULL;
635 641
642 if (!callback && dev->driver && dev->driver->pm)
643 callback = dev->driver->pm->runtime_resume;
644
636 retval = rpm_callback(callback, dev); 645 retval = rpm_callback(callback, dev);
637 if (retval) { 646 if (retval) {
638 __update_runtime_status(dev, RPM_SUSPENDED); 647 __update_runtime_status(dev, RPM_SUSPENDED);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c86162..6c3defa50845 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
475 475
476 init_waitqueue_entry(&wait, current); 476 init_waitqueue_entry(&wait, current);
477 477
478 current->flags |= PF_NOFREEZE;
479
480 for (;;) { 478 for (;;) {
481 add_wait_queue(&thread->wait_q, &wait); 479 add_wait_queue(&thread->wait_q, &wait);
482 480
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d8641cf5c..2b8661b54eaf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
214 return error_count; 214 return error_count;
215} 215}
216 216
217static void dmatest_callback(void *completion) 217/* poor man's completion - we want to use wait_event_freezable() on it */
218struct dmatest_done {
219 bool done;
220 wait_queue_head_t *wait;
221};
222
223static void dmatest_callback(void *arg)
218{ 224{
219 complete(completion); 225 struct dmatest_done *done = arg;
226
227 done->done = true;
228 wake_up_all(done->wait);
220} 229}
221 230
222/* 231/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
235 */ 244 */
236static int dmatest_func(void *data) 245static int dmatest_func(void *data)
237{ 246{
247 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
238 struct dmatest_thread *thread = data; 248 struct dmatest_thread *thread = data;
249 struct dmatest_done done = { .wait = &done_wait };
239 struct dma_chan *chan; 250 struct dma_chan *chan;
240 const char *thread_name; 251 const char *thread_name;
241 unsigned int src_off, dst_off, len; 252 unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
252 int i; 263 int i;
253 264
254 thread_name = current->comm; 265 thread_name = current->comm;
255 set_freezable_with_signal(); 266 set_freezable();
256 267
257 ret = -ENOMEM; 268 ret = -ENOMEM;
258 269
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
306 struct dma_async_tx_descriptor *tx = NULL; 317 struct dma_async_tx_descriptor *tx = NULL;
307 dma_addr_t dma_srcs[src_cnt]; 318 dma_addr_t dma_srcs[src_cnt];
308 dma_addr_t dma_dsts[dst_cnt]; 319 dma_addr_t dma_dsts[dst_cnt];
309 struct completion cmp;
310 unsigned long start, tmo, end = 0 /* compiler... */;
311 bool reload = true;
312 u8 align = 0; 320 u8 align = 0;
313 321
314 total_tests++; 322 total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
391 continue; 399 continue;
392 } 400 }
393 401
394 init_completion(&cmp); 402 done.done = false;
395 tx->callback = dmatest_callback; 403 tx->callback = dmatest_callback;
396 tx->callback_param = &cmp; 404 tx->callback_param = &done;
397 cookie = tx->tx_submit(tx); 405 cookie = tx->tx_submit(tx);
398 406
399 if (dma_submit_error(cookie)) { 407 if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
407 } 415 }
408 dma_async_issue_pending(chan); 416 dma_async_issue_pending(chan);
409 417
410 do { 418 wait_event_freezable_timeout(done_wait, done.done,
411 start = jiffies; 419 msecs_to_jiffies(timeout));
412 if (reload)
413 end = start + msecs_to_jiffies(timeout);
414 else if (end <= start)
415 end = start + 1;
416 tmo = wait_for_completion_interruptible_timeout(&cmp,
417 end - start);
418 reload = try_to_freeze();
419 } while (tmo == -ERESTARTSYS);
420 420
421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
422 422
423 if (tmo == 0) { 423 if (!done.done) {
424 /*
425 * We're leaving the timed out dma operation with
426 * dangling pointer to done_wait. To make this
427 * correct, we'll need to allocate wait_done for
428 * each test iteration and perform "who's gonna
429 * free it this time?" dancing. For now, just
430 * leave it dangling.
431 */
424 pr_warning("%s: #%u: test timed out\n", 432 pr_warning("%s: #%u: test timed out\n",
425 thread_name, total_tests - 1); 433 thread_name, total_tests - 1);
426 failed_tests++; 434 failed_tests++;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45ffb096..c6b456ad7342 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
138 static const unsigned max_i2c_errors = 100; 138 static const unsigned max_i2c_errors = 100;
139 int ret; 139 int ret;
140 140
141 current->flags |= PF_NOFREEZE;
142
143 while (!kthread_should_stop()) { 141 while (!kthread_should_stop()) {
144 int i; 142 int i;
145 union { 143 union {
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 41c96b3d8152..e880c79d7bd8 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
750 750
751 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); 751 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
752 752
753 refrigerator(); 753 try_to_freeze();
754 754
755 if (change_speed(stir, stir->speed)) 755 if (change_speed(stir, stir->speed))
756 break; 756 break;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7b828680b21d..4b11fc91fa7d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
2456 u32 poll_mask, event_mask; 2456 u32 poll_mask, event_mask;
2457 unsigned int si, so; 2457 unsigned int si, so;
2458 unsigned long t; 2458 unsigned long t;
2459 unsigned int change_detector, must_reset; 2459 unsigned int change_detector;
2460 unsigned int poll_freq; 2460 unsigned int poll_freq;
2461 bool was_frozen;
2461 2462
2462 mutex_lock(&hotkey_thread_mutex); 2463 mutex_lock(&hotkey_thread_mutex);
2463 2464
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
2488 t = 100; /* should never happen... */ 2489 t = 100; /* should never happen... */
2489 } 2490 }
2490 t = msleep_interruptible(t); 2491 t = msleep_interruptible(t);
2491 if (unlikely(kthread_should_stop())) 2492 if (unlikely(kthread_freezable_should_stop(&was_frozen)))
2492 break; 2493 break;
2493 must_reset = try_to_freeze(); 2494
2494 if (t > 0 && !must_reset) 2495 if (t > 0 && !was_frozen)
2495 continue; 2496 continue;
2496 2497
2497 mutex_lock(&hotkey_thread_data_mutex); 2498 mutex_lock(&hotkey_thread_data_mutex);
2498 if (must_reset || hotkey_config_change != change_detector) { 2499 if (was_frozen || hotkey_config_change != change_detector) {
2499 /* forget old state on thaw or config change */ 2500 /* forget old state on thaw or config change */
2500 si = so; 2501 si = so;
2501 t = 0; 2502 t = 0;
@@ -2528,10 +2529,6 @@ exit:
2528static void hotkey_poll_stop_sync(void) 2529static void hotkey_poll_stop_sync(void)
2529{ 2530{
2530 if (tpacpi_hotkey_task) { 2531 if (tpacpi_hotkey_task) {
2531 if (frozen(tpacpi_hotkey_task) ||
2532 freezing(tpacpi_hotkey_task))
2533 thaw_process(tpacpi_hotkey_task);
2534
2535 kthread_stop(tpacpi_hotkey_task); 2532 kthread_stop(tpacpi_hotkey_task);
2536 tpacpi_hotkey_task = NULL; 2533 tpacpi_hotkey_task = NULL;
2537 mutex_lock(&hotkey_thread_mutex); 2534 mutex_lock(&hotkey_thread_mutex);
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 115635f95024..a7feb3e328a0 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
466 struct rtsx_chip *chip = dev->chip; 466 struct rtsx_chip *chip = dev->chip;
467 struct Scsi_Host *host = rtsx_to_host(dev); 467 struct Scsi_Host *host = rtsx_to_host(dev);
468 468
469 current->flags |= PF_NOFREEZE;
470
471 for (;;) { 469 for (;;) {
472 if (wait_for_completion_interruptible(&dev->cmnd_ready)) 470 if (wait_for_completion_interruptible(&dev->cmnd_ready))
473 break; 471 break;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c325e69415a1..aa84b3d77274 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
831 831
832 dev_dbg(dev, "device found\n"); 832 dev_dbg(dev, "device found\n");
833 833
834 set_freezable_with_signal(); 834 set_freezable();
835
835 /* 836 /*
836 * Wait for the timeout to expire or for a disconnect 837 * Wait for the timeout to expire or for a disconnect
837 * 838 *
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
839 * fail to freeze, but we can't be non-freezable either. Nor can 840 * fail to freeze, but we can't be non-freezable either. Nor can
840 * khubd freeze while waiting for scanning to complete as it may 841 * khubd freeze while waiting for scanning to complete as it may
841 * hold the device lock, causing a hang when suspending devices. 842 * hold the device lock, causing a hang when suspending devices.
842 * So we request a fake signal when freezing and use 843 * So instead of using wait_event_freezable(), explicitly test
843 * interruptible sleep to kick us out of our wait early when 844 * for (DONT_SCAN || freezing) in interruptible wait and proceed
844 * freezing happens. 845 * if any of DONT_SCAN, freezing or timeout has happened.
845 */ 846 */
846 if (delay_use > 0) { 847 if (delay_use > 0) {
847 dev_dbg(dev, "waiting for device to settle " 848 dev_dbg(dev, "waiting for device to settle "
848 "before scanning\n"); 849 "before scanning\n");
849 wait_event_interruptible_timeout(us->delay_wait, 850 wait_event_interruptible_timeout(us->delay_wait,
850 test_bit(US_FLIDX_DONT_SCAN, &us->dflags), 851 test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
851 delay_use * HZ); 852 freezing(current), delay_use * HZ);
852 } 853 }
853 854
854 /* If the device is still connected, perform the scanning */ 855 /* If the device is still connected, perform the scanning */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0b394580d860..0cc20b35c1c4 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -334,7 +334,7 @@ again:
334 if (freezing(current)) { 334 if (freezing(current)) {
335 worker->working = 0; 335 worker->working = 0;
336 spin_unlock_irq(&worker->lock); 336 spin_unlock_irq(&worker->lock);
337 refrigerator(); 337 try_to_freeze();
338 } else { 338 } else {
339 spin_unlock_irq(&worker->lock); 339 spin_unlock_irq(&worker->lock);
340 if (!kthread_should_stop()) { 340 if (!kthread_should_stop()) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f44b3928dc2d..f99a099a7747 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
1579 btrfs_run_defrag_inodes(root->fs_info); 1579 btrfs_run_defrag_inodes(root->fs_info);
1580 } 1580 }
1581 1581
1582 if (freezing(current)) { 1582 if (!try_to_freeze()) {
1583 refrigerator();
1584 } else {
1585 set_current_state(TASK_INTERRUPTIBLE); 1583 set_current_state(TASK_INTERRUPTIBLE);
1586 if (!kthread_should_stop()) 1584 if (!kthread_should_stop())
1587 schedule(); 1585 schedule();
@@ -1635,9 +1633,7 @@ sleep:
1635 wake_up_process(root->fs_info->cleaner_kthread); 1633 wake_up_process(root->fs_info->cleaner_kthread);
1636 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 1634 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1637 1635
1638 if (freezing(current)) { 1636 if (!try_to_freeze()) {
1639 refrigerator();
1640 } else {
1641 set_current_state(TASK_INTERRUPTIBLE); 1637 set_current_state(TASK_INTERRUPTIBLE);
1642 if (!kthread_should_stop() && 1638 if (!kthread_should_stop() &&
1643 !btrfs_transaction_blocked(root->fs_info)) 1639 !btrfs_transaction_blocked(root->fs_info))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3e1329e2f826..d0666c8d15f2 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2883,8 +2883,7 @@ cont_thread:
2883 } 2883 }
2884 mutex_unlock(&eli->li_list_mtx); 2884 mutex_unlock(&eli->li_list_mtx);
2885 2885
2886 if (freezing(current)) 2886 try_to_freeze();
2887 refrigerator();
2888 2887
2889 cur = jiffies; 2888 cur = jiffies;
2890 if ((time_after_eq(cur, next_wakeup)) || 2889 if ((time_after_eq(cur, next_wakeup)) ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 517f211a3bd4..30f78bb16afb 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -937,7 +937,7 @@ int bdi_writeback_thread(void *data)
937 937
938 trace_writeback_thread_start(bdi); 938 trace_writeback_thread_start(bdi);
939 939
940 while (!kthread_should_stop()) { 940 while (!kthread_freezable_should_stop(NULL)) {
941 /* 941 /*
942 * Remove own delayed wake-up timer, since we are already awake 942 * Remove own delayed wake-up timer, since we are already awake
943 * and we'll take care of the preriodic write-back. 943 * and we'll take care of the preriodic write-back.
@@ -967,8 +967,6 @@ int bdi_writeback_thread(void *data)
967 */ 967 */
968 schedule(); 968 schedule();
969 } 969 }
970
971 try_to_freeze();
972 } 970 }
973 971
974 /* Flush any work that raced with us exiting */ 972 /* Flush any work that raced with us exiting */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 598646434362..8154d42e4647 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -951,8 +951,8 @@ int gfs2_logd(void *data)
951 wake_up(&sdp->sd_log_waitq); 951 wake_up(&sdp->sd_log_waitq);
952 952
953 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; 953 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
954 if (freezing(current)) 954
955 refrigerator(); 955 try_to_freeze();
956 956
957 do { 957 do {
958 prepare_to_wait(&sdp->sd_logd_waitq, &wait, 958 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 7e528dc14f85..d49669e92652 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)
1427 /* Check for & recover partially truncated inodes */ 1427 /* Check for & recover partially truncated inodes */
1428 quotad_check_trunc_list(sdp); 1428 quotad_check_trunc_list(sdp);
1429 1429
1430 if (freezing(current)) 1430 try_to_freeze();
1431 refrigerator(); 1431
1432 t = min(quotad_timeo, statfs_timeo); 1432 t = min(quotad_timeo, statfs_timeo);
1433 1433
1434 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1434 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index fea8dd661d2b..a96cff0c5f1d 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -166,7 +166,7 @@ loop:
166 */ 166 */
167 jbd_debug(1, "Now suspending kjournald\n"); 167 jbd_debug(1, "Now suspending kjournald\n");
168 spin_unlock(&journal->j_state_lock); 168 spin_unlock(&journal->j_state_lock);
169 refrigerator(); 169 try_to_freeze();
170 spin_lock(&journal->j_state_lock); 170 spin_lock(&journal->j_state_lock);
171 } else { 171 } else {
172 /* 172 /*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0fa0123151d3..c0a5f9f1b127 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -173,7 +173,7 @@ loop:
173 */ 173 */
174 jbd_debug(1, "Now suspending kjournald2\n"); 174 jbd_debug(1, "Now suspending kjournald2\n");
175 write_unlock(&journal->j_state_lock); 175 write_unlock(&journal->j_state_lock);
176 refrigerator(); 176 try_to_freeze();
177 write_lock(&journal->j_state_lock); 177 write_lock(&journal->j_state_lock);
178 } else { 178 } else {
179 /* 179 /*
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index cc5f811ed383..2eb952c41a69 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
2349 2349
2350 if (freezing(current)) { 2350 if (freezing(current)) {
2351 spin_unlock_irq(&log_redrive_lock); 2351 spin_unlock_irq(&log_redrive_lock);
2352 refrigerator(); 2352 try_to_freeze();
2353 } else { 2353 } else {
2354 set_current_state(TASK_INTERRUPTIBLE); 2354 set_current_state(TASK_INTERRUPTIBLE);
2355 spin_unlock_irq(&log_redrive_lock); 2355 spin_unlock_irq(&log_redrive_lock);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index af9606057dde..bb8b661bcc50 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
2800 2800
2801 if (freezing(current)) { 2801 if (freezing(current)) {
2802 LAZY_UNLOCK(flags); 2802 LAZY_UNLOCK(flags);
2803 refrigerator(); 2803 try_to_freeze();
2804 } else { 2804 } else {
2805 DECLARE_WAITQUEUE(wq, current); 2805 DECLARE_WAITQUEUE(wq, current);
2806 2806
@@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
2994 2994
2995 if (freezing(current)) { 2995 if (freezing(current)) {
2996 TXN_UNLOCK(); 2996 TXN_UNLOCK();
2997 refrigerator(); 2997 try_to_freeze();
2998 } else { 2998 } else {
2999 set_current_state(TASK_INTERRUPTIBLE); 2999 set_current_state(TASK_INTERRUPTIBLE);
3000 TXN_UNLOCK(); 3000 TXN_UNLOCK();
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 50a15fa8cf98..bf3a57bbbfcf 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,6 +38,7 @@
38#include <linux/nfs_xdr.h> 38#include <linux/nfs_xdr.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/freezer.h>
41 42
42#include <asm/system.h> 43#include <asm/system.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
77{ 78{
78 if (fatal_signal_pending(current)) 79 if (fatal_signal_pending(current))
79 return -ERESTARTSYS; 80 return -ERESTARTSYS;
80 schedule(); 81 freezable_schedule();
81 return 0; 82 return 0;
82} 83}
83 84
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d4bc9ed91748..91943953a370 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -17,6 +17,7 @@
17#include <linux/nfs_page.h> 17#include <linux/nfs_page.h>
18#include <linux/lockd/bind.h> 18#include <linux/lockd/bind.h>
19#include <linux/nfs_mount.h> 19#include <linux/nfs_mount.h>
20#include <linux/freezer.h>
20 21
21#include "iostat.h" 22#include "iostat.h"
22#include "internal.h" 23#include "internal.h"
@@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
32 res = rpc_call_sync(clnt, msg, flags); 33 res = rpc_call_sync(clnt, msg, flags);
33 if (res != -EJUKEBOX && res != -EKEYEXPIRED) 34 if (res != -EJUKEBOX && res != -EKEYEXPIRED)
34 break; 35 break;
35 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 36 freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
36 res = -ERESTARTSYS; 37 res = -ERESTARTSYS;
37 } while (!fatal_signal_pending(current)); 38 } while (!fatal_signal_pending(current));
38 return res; 39 return res;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d9f4d78c3413..dcda0ba7af60 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -55,6 +55,7 @@
55#include <linux/sunrpc/bc_xprt.h> 55#include <linux/sunrpc/bc_xprt.h>
56#include <linux/xattr.h> 56#include <linux/xattr.h>
57#include <linux/utsname.h> 57#include <linux/utsname.h>
58#include <linux/freezer.h>
58 59
59#include "nfs4_fs.h" 60#include "nfs4_fs.h"
60#include "delegation.h" 61#include "delegation.h"
@@ -243,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
243 *timeout = NFS4_POLL_RETRY_MIN; 244 *timeout = NFS4_POLL_RETRY_MIN;
244 if (*timeout > NFS4_POLL_RETRY_MAX) 245 if (*timeout > NFS4_POLL_RETRY_MAX)
245 *timeout = NFS4_POLL_RETRY_MAX; 246 *timeout = NFS4_POLL_RETRY_MAX;
246 schedule_timeout_killable(*timeout); 247 freezable_schedule_timeout_killable(*timeout);
247 if (fatal_signal_pending(current)) 248 if (fatal_signal_pending(current))
248 res = -ERESTARTSYS; 249 res = -ERESTARTSYS;
249 *timeout <<= 1; 250 *timeout <<= 1;
@@ -3958,7 +3959,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
3958static unsigned long 3959static unsigned long
3959nfs4_set_lock_task_retry(unsigned long timeout) 3960nfs4_set_lock_task_retry(unsigned long timeout)
3960{ 3961{
3961 schedule_timeout_killable(timeout); 3962 freezable_schedule_timeout_killable(timeout);
3962 timeout <<= 1; 3963 timeout <<= 1;
3963 if (timeout > NFS4_LOCK_MAXTIMEOUT) 3964 if (timeout > NFS4_LOCK_MAXTIMEOUT)
3964 return NFS4_LOCK_MAXTIMEOUT; 3965 return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f48125da198a..0c672588fe5a 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -41,6 +41,7 @@
41#include <linux/nfs_fs.h> 41#include <linux/nfs_fs.h>
42#include <linux/nfs_page.h> 42#include <linux/nfs_page.h>
43#include <linux/lockd/bind.h> 43#include <linux/lockd/bind.h>
44#include <linux/freezer.h>
44#include "internal.h" 45#include "internal.h"
45 46
46#define NFSDBG_FACILITY NFSDBG_PROC 47#define NFSDBG_FACILITY NFSDBG_PROC
@@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
59 res = rpc_call_sync(clnt, msg, flags); 60 res = rpc_call_sync(clnt, msg, flags);
60 if (res != -EKEYEXPIRED) 61 if (res != -EKEYEXPIRED)
61 break; 62 break;
62 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 63 freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
63 res = -ERESTARTSYS; 64 res = -ERESTARTSYS;
64 } while (!fatal_signal_pending(current)); 65 } while (!fatal_signal_pending(current));
65 return res; 66 return res;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bb24ab6c282f..0e72ad6f22aa 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
2470 2470
2471 if (freezing(current)) { 2471 if (freezing(current)) {
2472 spin_unlock(&sci->sc_state_lock); 2472 spin_unlock(&sci->sc_state_lock);
2473 refrigerator(); 2473 try_to_freeze();
2474 spin_lock(&sci->sc_state_lock); 2474 spin_lock(&sci->sc_state_lock);
2475 } else { 2475 } else {
2476 DEFINE_WAIT(wait); 2476 DEFINE_WAIT(wait);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index cf0ac056815f..018829936d6d 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1703,7 +1703,7 @@ xfsbufd(
1703 1703
1704 if (unlikely(freezing(current))) { 1704 if (unlikely(freezing(current))) {
1705 set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1705 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1706 refrigerator(); 1706 try_to_freeze();
1707 } else { 1707 } else {
1708 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1708 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1709 } 1709 }
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a5386e3ee756..7bcfe73d999b 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,71 +5,58 @@
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/wait.h> 7#include <linux/wait.h>
8#include <linux/atomic.h>
8 9
9#ifdef CONFIG_FREEZER 10#ifdef CONFIG_FREEZER
11extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
12extern bool pm_freezing; /* PM freezing in effect */
13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
14
10/* 15/*
11 * Check if a process has been frozen 16 * Check if a process has been frozen
12 */ 17 */
13static inline int frozen(struct task_struct *p) 18static inline bool frozen(struct task_struct *p)
14{ 19{
15 return p->flags & PF_FROZEN; 20 return p->flags & PF_FROZEN;
16} 21}
17 22
18/* 23extern bool freezing_slow_path(struct task_struct *p);
19 * Check if there is a request to freeze a process
20 */
21static inline int freezing(struct task_struct *p)
22{
23 return test_tsk_thread_flag(p, TIF_FREEZE);
24}
25 24
26/* 25/*
27 * Request that a process be frozen 26 * Check if there is a request to freeze a process
28 */
29static inline void set_freeze_flag(struct task_struct *p)
30{
31 set_tsk_thread_flag(p, TIF_FREEZE);
32}
33
34/*
35 * Sometimes we may need to cancel the previous 'freeze' request
36 */ 27 */
37static inline void clear_freeze_flag(struct task_struct *p) 28static inline bool freezing(struct task_struct *p)
38{
39 clear_tsk_thread_flag(p, TIF_FREEZE);
40}
41
42static inline bool should_send_signal(struct task_struct *p)
43{ 29{
44 return !(p->flags & PF_FREEZER_NOSIG); 30 if (likely(!atomic_read(&system_freezing_cnt)))
31 return false;
32 return freezing_slow_path(p);
45} 33}
46 34
47/* Takes and releases task alloc lock using task_lock() */ 35/* Takes and releases task alloc lock using task_lock() */
48extern int thaw_process(struct task_struct *p); 36extern void __thaw_task(struct task_struct *t);
49 37
50extern void refrigerator(void); 38extern bool __refrigerator(bool check_kthr_stop);
51extern int freeze_processes(void); 39extern int freeze_processes(void);
52extern int freeze_kernel_threads(void); 40extern int freeze_kernel_threads(void);
53extern void thaw_processes(void); 41extern void thaw_processes(void);
54 42
55static inline int try_to_freeze(void) 43static inline bool try_to_freeze(void)
56{ 44{
57 if (freezing(current)) { 45 might_sleep();
58 refrigerator(); 46 if (likely(!freezing(current)))
59 return 1; 47 return false;
60 } else 48 return __refrigerator(false);
61 return 0;
62} 49}
63 50
64extern bool freeze_task(struct task_struct *p, bool sig_only); 51extern bool freeze_task(struct task_struct *p);
65extern void cancel_freezing(struct task_struct *p); 52extern bool set_freezable(void);
66 53
67#ifdef CONFIG_CGROUP_FREEZER 54#ifdef CONFIG_CGROUP_FREEZER
68extern int cgroup_freezing_or_frozen(struct task_struct *task); 55extern bool cgroup_freezing(struct task_struct *task);
69#else /* !CONFIG_CGROUP_FREEZER */ 56#else /* !CONFIG_CGROUP_FREEZER */
70static inline int cgroup_freezing_or_frozen(struct task_struct *task) 57static inline bool cgroup_freezing(struct task_struct *task)
71{ 58{
72 return 0; 59 return false;
73} 60}
74#endif /* !CONFIG_CGROUP_FREEZER */ 61#endif /* !CONFIG_CGROUP_FREEZER */
75 62
@@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
80 * appropriately in case the child has exited before the freezing of tasks is 67 * appropriately in case the child has exited before the freezing of tasks is
81 * complete. However, we don't want kernel threads to be frozen in unexpected 68 * complete. However, we don't want kernel threads to be frozen in unexpected
82 * places, so we allow them to block freeze_processes() instead or to set 69 * places, so we allow them to block freeze_processes() instead or to set
83 * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork 70 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
84 * parents. Fortunately, in the ____call_usermodehelper() case the parent won't 71 * parent won't really block freeze_processes(), since ____call_usermodehelper()
85 * really block freeze_processes(), since ____call_usermodehelper() (the child) 72 * (the child) does a little before exec/exit and it can't be frozen before
86 * does a little before exec/exit and it can't be frozen before waking up the 73 * waking up the parent.
87 * parent.
88 */ 74 */
89 75
90/* 76
91 * If the current task is a user space one, tell the freezer not to count it as 77/* Tell the freezer not to count the current task as freezable. */
92 * freezable.
93 */
94static inline void freezer_do_not_count(void) 78static inline void freezer_do_not_count(void)
95{ 79{
96 if (current->mm) 80 current->flags |= PF_FREEZER_SKIP;
97 current->flags |= PF_FREEZER_SKIP;
98} 81}
99 82
100/* 83/*
101 * If the current task is a user space one, tell the freezer to count it as 84 * Tell the freezer to count the current task as freezable again and try to
102 * freezable again and try to freeze it. 85 * freeze it.
103 */ 86 */
104static inline void freezer_count(void) 87static inline void freezer_count(void)
105{ 88{
106 if (current->mm) { 89 current->flags &= ~PF_FREEZER_SKIP;
107 current->flags &= ~PF_FREEZER_SKIP; 90 try_to_freeze();
108 try_to_freeze();
109 }
110} 91}
111 92
112/* 93/*
@@ -118,21 +99,27 @@ static inline int freezer_should_skip(struct task_struct *p)
118} 99}
119 100
120/* 101/*
121 * Tell the freezer that the current task should be frozen by it 102 * These macros are intended to be used whenever you want allow a task that's
103 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
104 * that neither return any clear indication of whether a freeze event happened
105 * while in this function.
122 */ 106 */
123static inline void set_freezable(void)
124{
125 current->flags &= ~PF_NOFREEZE;
126}
127 107
128/* 108/* Like schedule(), but should not block the freezer. */
129 * Tell the freezer that the current task should be frozen by it and that it 109#define freezable_schedule() \
130 * should send a fake signal to the task to freeze it. 110({ \
131 */ 111 freezer_do_not_count(); \
132static inline void set_freezable_with_signal(void) 112 schedule(); \
133{ 113 freezer_count(); \
134 current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); 114})
135} 115
116/* Like schedule_timeout_killable(), but should not block the freezer. */
117#define freezable_schedule_timeout_killable(timeout) \
118({ \
119 freezer_do_not_count(); \
120 schedule_timeout_killable(timeout); \
121 freezer_count(); \
122})
136 123
137/* 124/*
138 * Freezer-friendly wrappers around wait_event_interruptible(), 125 * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -152,47 +139,51 @@ static inline void set_freezable_with_signal(void)
152#define wait_event_freezable(wq, condition) \ 139#define wait_event_freezable(wq, condition) \
153({ \ 140({ \
154 int __retval; \ 141 int __retval; \
155 do { \ 142 for (;;) { \
156 __retval = wait_event_interruptible(wq, \ 143 __retval = wait_event_interruptible(wq, \
157 (condition) || freezing(current)); \ 144 (condition) || freezing(current)); \
158 if (__retval && !freezing(current)) \ 145 if (__retval || (condition)) \
159 break; \ 146 break; \
160 else if (!(condition)) \ 147 try_to_freeze(); \
161 __retval = -ERESTARTSYS; \ 148 } \
162 } while (try_to_freeze()); \
163 __retval; \ 149 __retval; \
164}) 150})
165 151
166
167#define wait_event_freezable_timeout(wq, condition, timeout) \ 152#define wait_event_freezable_timeout(wq, condition, timeout) \
168({ \ 153({ \
169 long __retval = timeout; \ 154 long __retval = timeout; \
170 do { \ 155 for (;;) { \
171 __retval = wait_event_interruptible_timeout(wq, \ 156 __retval = wait_event_interruptible_timeout(wq, \
172 (condition) || freezing(current), \ 157 (condition) || freezing(current), \
173 __retval); \ 158 __retval); \
174 } while (try_to_freeze()); \ 159 if (__retval <= 0 || (condition)) \
160 break; \
161 try_to_freeze(); \
162 } \
175 __retval; \ 163 __retval; \
176}) 164})
165
177#else /* !CONFIG_FREEZER */ 166#else /* !CONFIG_FREEZER */
178static inline int frozen(struct task_struct *p) { return 0; } 167static inline bool frozen(struct task_struct *p) { return false; }
179static inline int freezing(struct task_struct *p) { return 0; } 168static inline bool freezing(struct task_struct *p) { return false; }
180static inline void set_freeze_flag(struct task_struct *p) {} 169static inline void __thaw_task(struct task_struct *t) {}
181static inline void clear_freeze_flag(struct task_struct *p) {}
182static inline int thaw_process(struct task_struct *p) { return 1; }
183 170
184static inline void refrigerator(void) {} 171static inline bool __refrigerator(bool check_kthr_stop) { return false; }
185static inline int freeze_processes(void) { return -ENOSYS; } 172static inline int freeze_processes(void) { return -ENOSYS; }
186static inline int freeze_kernel_threads(void) { return -ENOSYS; } 173static inline int freeze_kernel_threads(void) { return -ENOSYS; }
187static inline void thaw_processes(void) {} 174static inline void thaw_processes(void) {}
188 175
189static inline int try_to_freeze(void) { return 0; } 176static inline bool try_to_freeze(void) { return false; }
190 177
191static inline void freezer_do_not_count(void) {} 178static inline void freezer_do_not_count(void) {}
192static inline void freezer_count(void) {} 179static inline void freezer_count(void) {}
193static inline int freezer_should_skip(struct task_struct *p) { return 0; } 180static inline int freezer_should_skip(struct task_struct *p) { return 0; }
194static inline void set_freezable(void) {} 181static inline void set_freezable(void) {}
195static inline void set_freezable_with_signal(void) {} 182
183#define freezable_schedule() schedule()
184
185#define freezable_schedule_timeout_killable(timeout) \
186 schedule_timeout_killable(timeout)
196 187
197#define wait_event_freezable(wq, condition) \ 188#define wait_event_freezable(wq, condition) \
198 wait_event_interruptible(wq, condition) 189 wait_event_interruptible(wq, condition)
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index b16f65390734..722f477c4ef7 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
117extern int usermodehelper_disable(void); 117extern int usermodehelper_disable(void);
118extern void usermodehelper_enable(void); 118extern void usermodehelper_enable(void);
119extern bool usermodehelper_is_disabled(void); 119extern bool usermodehelper_is_disabled(void);
120extern void read_lock_usermodehelper(void);
121extern void read_unlock_usermodehelper(void);
120 122
121#endif /* __LINUX_KMOD_H__ */ 123#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 5cac19b3a266..0714b24c0e45 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
35void kthread_bind(struct task_struct *k, unsigned int cpu); 35void kthread_bind(struct task_struct *k, unsigned int cpu);
36int kthread_stop(struct task_struct *k); 36int kthread_stop(struct task_struct *k);
37int kthread_should_stop(void); 37int kthread_should_stop(void);
38bool kthread_freezable_should_stop(bool *was_frozen);
38void *kthread_data(struct task_struct *k); 39void *kthread_data(struct task_struct *k);
39 40
40int kthreadd(void *unused); 41int kthreadd(void *unused);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 2a23f7d1a825..b5267c951161 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -264,62 +264,34 @@ static inline char *early_platform_driver_setup_func(void) \
264} 264}
265#endif /* MODULE */ 265#endif /* MODULE */
266 266
267#ifdef CONFIG_PM_SLEEP
268extern int platform_pm_prepare(struct device *dev);
269extern void platform_pm_complete(struct device *dev);
270#else
271#define platform_pm_prepare NULL
272#define platform_pm_complete NULL
273#endif
274
275#ifdef CONFIG_SUSPEND 267#ifdef CONFIG_SUSPEND
276extern int platform_pm_suspend(struct device *dev); 268extern int platform_pm_suspend(struct device *dev);
277extern int platform_pm_suspend_noirq(struct device *dev);
278extern int platform_pm_resume(struct device *dev); 269extern int platform_pm_resume(struct device *dev);
279extern int platform_pm_resume_noirq(struct device *dev);
280#else 270#else
281#define platform_pm_suspend NULL 271#define platform_pm_suspend NULL
282#define platform_pm_resume NULL 272#define platform_pm_resume NULL
283#define platform_pm_suspend_noirq NULL
284#define platform_pm_resume_noirq NULL
285#endif 273#endif
286 274
287#ifdef CONFIG_HIBERNATE_CALLBACKS 275#ifdef CONFIG_HIBERNATE_CALLBACKS
288extern int platform_pm_freeze(struct device *dev); 276extern int platform_pm_freeze(struct device *dev);
289extern int platform_pm_freeze_noirq(struct device *dev);
290extern int platform_pm_thaw(struct device *dev); 277extern int platform_pm_thaw(struct device *dev);
291extern int platform_pm_thaw_noirq(struct device *dev);
292extern int platform_pm_poweroff(struct device *dev); 278extern int platform_pm_poweroff(struct device *dev);
293extern int platform_pm_poweroff_noirq(struct device *dev);
294extern int platform_pm_restore(struct device *dev); 279extern int platform_pm_restore(struct device *dev);
295extern int platform_pm_restore_noirq(struct device *dev);
296#else 280#else
297#define platform_pm_freeze NULL 281#define platform_pm_freeze NULL
298#define platform_pm_thaw NULL 282#define platform_pm_thaw NULL
299#define platform_pm_poweroff NULL 283#define platform_pm_poweroff NULL
300#define platform_pm_restore NULL 284#define platform_pm_restore NULL
301#define platform_pm_freeze_noirq NULL
302#define platform_pm_thaw_noirq NULL
303#define platform_pm_poweroff_noirq NULL
304#define platform_pm_restore_noirq NULL
305#endif 285#endif
306 286
307#ifdef CONFIG_PM_SLEEP 287#ifdef CONFIG_PM_SLEEP
308#define USE_PLATFORM_PM_SLEEP_OPS \ 288#define USE_PLATFORM_PM_SLEEP_OPS \
309 .prepare = platform_pm_prepare, \
310 .complete = platform_pm_complete, \
311 .suspend = platform_pm_suspend, \ 289 .suspend = platform_pm_suspend, \
312 .resume = platform_pm_resume, \ 290 .resume = platform_pm_resume, \
313 .freeze = platform_pm_freeze, \ 291 .freeze = platform_pm_freeze, \
314 .thaw = platform_pm_thaw, \ 292 .thaw = platform_pm_thaw, \
315 .poweroff = platform_pm_poweroff, \ 293 .poweroff = platform_pm_poweroff, \
316 .restore = platform_pm_restore, \ 294 .restore = platform_pm_restore,
317 .suspend_noirq = platform_pm_suspend_noirq, \
318 .resume_noirq = platform_pm_resume_noirq, \
319 .freeze_noirq = platform_pm_freeze_noirq, \
320 .thaw_noirq = platform_pm_thaw_noirq, \
321 .poweroff_noirq = platform_pm_poweroff_noirq, \
322 .restore_noirq = platform_pm_restore_noirq,
323#else 295#else
324#define USE_PLATFORM_PM_SLEEP_OPS 296#define USE_PLATFORM_PM_SLEEP_OPS
325#endif 297#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3f3ed83a9aa5..21e04dd72a84 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
300 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 300 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
301} 301}
302 302
303/*
304 * Use this for subsystems (bus types, device types, device classes) that don't
305 * need any special suspend/resume handling in addition to invoking the PM
306 * callbacks provided by device drivers supporting both the system sleep PM and
307 * runtime PM, make the pm member point to generic_subsys_pm_ops.
308 */
309#ifdef CONFIG_PM
310extern struct dev_pm_ops generic_subsys_pm_ops;
311#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
312#else
313#define GENERIC_SUBSYS_PM_OPS NULL
314#endif
315
316/** 303/**
317 * PM_EVENT_ messages 304 * PM_EVENT_ messages
318 * 305 *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c4f3e9b9bc5..d81cce933869 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
221#define task_contributes_to_load(task) \ 221#define task_contributes_to_load(task) \
222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
223 (task->flags & PF_FREEZING) == 0) 223 (task->flags & PF_FROZEN) == 0)
224 224
225#define __set_task_state(tsk, state_value) \ 225#define __set_task_state(tsk, state_value) \
226 do { (tsk)->state = (state_value); } while (0) 226 do { (tsk)->state = (state_value); } while (0)
@@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1772#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1772#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1773#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1773#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1774#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1774#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1775#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1776#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1775#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1777#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1776#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1778#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1777#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1788#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1787#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1789#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1788#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1790#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1789#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1791#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1792 1790
1793/* 1791/*
1794 * Only the _current_ task can read/write to tsk->flags, but other 1792 * Only the _current_ task can read/write to tsk->flags, but other
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 57a692432f8a..95040cc33107 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/freezer.h>
9#include <asm/errno.h> 10#include <asm/errno.h>
10 11
11#ifdef CONFIG_VT 12#ifdef CONFIG_VT
@@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
331#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ 332#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
332#define PM_POST_RESTORE 0x0006 /* Restore failed */ 333#define PM_POST_RESTORE 0x0006 /* Restore failed */
333 334
335extern struct mutex pm_mutex;
336
334#ifdef CONFIG_PM_SLEEP 337#ifdef CONFIG_PM_SLEEP
335void save_processor_state(void); 338void save_processor_state(void);
336void restore_processor_state(void); 339void restore_processor_state(void);
@@ -351,6 +354,19 @@ extern bool events_check_enabled;
351extern bool pm_wakeup_pending(void); 354extern bool pm_wakeup_pending(void);
352extern bool pm_get_wakeup_count(unsigned int *count); 355extern bool pm_get_wakeup_count(unsigned int *count);
353extern bool pm_save_wakeup_count(unsigned int count); 356extern bool pm_save_wakeup_count(unsigned int count);
357
358static inline void lock_system_sleep(void)
359{
360 freezer_do_not_count();
361 mutex_lock(&pm_mutex);
362}
363
364static inline void unlock_system_sleep(void)
365{
366 mutex_unlock(&pm_mutex);
367 freezer_count();
368}
369
354#else /* !CONFIG_PM_SLEEP */ 370#else /* !CONFIG_PM_SLEEP */
355 371
356static inline int register_pm_notifier(struct notifier_block *nb) 372static inline int register_pm_notifier(struct notifier_block *nb)
@@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
366#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 382#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
367 383
368static inline bool pm_wakeup_pending(void) { return false; } 384static inline bool pm_wakeup_pending(void) { return false; }
369#endif /* !CONFIG_PM_SLEEP */
370
371extern struct mutex pm_mutex;
372 385
373#ifndef CONFIG_HIBERNATE_CALLBACKS
374static inline void lock_system_sleep(void) {} 386static inline void lock_system_sleep(void) {}
375static inline void unlock_system_sleep(void) {} 387static inline void unlock_system_sleep(void) {}
376 388
377#else 389#endif /* !CONFIG_PM_SLEEP */
378
379/* Let some subsystems like memory hotadd exclude hibernation */
380
381static inline void lock_system_sleep(void)
382{
383 mutex_lock(&pm_mutex);
384}
385
386static inline void unlock_system_sleep(void)
387{
388 mutex_unlock(&pm_mutex);
389}
390#endif
391 390
392#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS 391#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
393/* 392/*
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 213c0351dad8..fcb93fca782d 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
48 struct freezer, css); 48 struct freezer, css);
49} 49}
50 50
51static inline int __cgroup_freezing_or_frozen(struct task_struct *task) 51bool cgroup_freezing(struct task_struct *task)
52{ 52{
53 enum freezer_state state = task_freezer(task)->state; 53 enum freezer_state state;
54 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); 54 bool ret;
55}
56 55
57int cgroup_freezing_or_frozen(struct task_struct *task) 56 rcu_read_lock();
58{ 57 state = task_freezer(task)->state;
59 int result; 58 ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
60 task_lock(task); 59 rcu_read_unlock();
61 result = __cgroup_freezing_or_frozen(task); 60
62 task_unlock(task); 61 return ret;
63 return result;
64} 62}
65 63
66/* 64/*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
102 * freezer_can_attach(): 100 * freezer_can_attach():
103 * cgroup_mutex (held by caller of can_attach) 101 * cgroup_mutex (held by caller of can_attach)
104 * 102 *
105 * cgroup_freezing_or_frozen():
106 * task->alloc_lock (to get task's cgroup)
107 *
108 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex): 103 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
109 * freezer->lock 104 * freezer->lock
110 * sighand->siglock (if the cgroup is freezing) 105 * sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
130 * write_lock css_set_lock (cgroup iterator start) 125 * write_lock css_set_lock (cgroup iterator start)
131 * task->alloc_lock 126 * task->alloc_lock
132 * read_lock css_set_lock (cgroup iterator start) 127 * read_lock css_set_lock (cgroup iterator start)
133 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator()) 128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
134 * sighand->siglock 129 * sighand->siglock
135 */ 130 */
136static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, 131static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
150static void freezer_destroy(struct cgroup_subsys *ss, 145static void freezer_destroy(struct cgroup_subsys *ss,
151 struct cgroup *cgroup) 146 struct cgroup *cgroup)
152{ 147{
153 kfree(cgroup_freezer(cgroup)); 148 struct freezer *freezer = cgroup_freezer(cgroup);
149
150 if (freezer->state != CGROUP_THAWED)
151 atomic_dec(&system_freezing_cnt);
152 kfree(freezer);
154} 153}
155 154
156/* task is frozen or will freeze immediately when next it gets woken */ 155/* task is frozen or will freeze immediately when next it gets woken */
@@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
184 183
185static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 184static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
186{ 185{
187 rcu_read_lock(); 186 return cgroup_freezing(tsk) ? -EBUSY : 0;
188 if (__cgroup_freezing_or_frozen(tsk)) {
189 rcu_read_unlock();
190 return -EBUSY;
191 }
192 rcu_read_unlock();
193 return 0;
194} 187}
195 188
196static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) 189static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
220 213
221 /* Locking avoids race with FREEZING -> THAWED transitions. */ 214 /* Locking avoids race with FREEZING -> THAWED transitions. */
222 if (freezer->state == CGROUP_FREEZING) 215 if (freezer->state == CGROUP_FREEZING)
223 freeze_task(task, true); 216 freeze_task(task);
224 spin_unlock_irq(&freezer->lock); 217 spin_unlock_irq(&freezer->lock);
225} 218}
226 219
@@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
238 cgroup_iter_start(cgroup, &it); 231 cgroup_iter_start(cgroup, &it);
239 while ((task = cgroup_iter_next(cgroup, &it))) { 232 while ((task = cgroup_iter_next(cgroup, &it))) {
240 ntotal++; 233 ntotal++;
241 if (is_task_frozen_enough(task)) 234 if (freezing(task) && is_task_frozen_enough(task))
242 nfrozen++; 235 nfrozen++;
243 } 236 }
244 237
@@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
286 struct task_struct *task; 279 struct task_struct *task;
287 unsigned int num_cant_freeze_now = 0; 280 unsigned int num_cant_freeze_now = 0;
288 281
289 freezer->state = CGROUP_FREEZING;
290 cgroup_iter_start(cgroup, &it); 282 cgroup_iter_start(cgroup, &it);
291 while ((task = cgroup_iter_next(cgroup, &it))) { 283 while ((task = cgroup_iter_next(cgroup, &it))) {
292 if (!freeze_task(task, true)) 284 if (!freeze_task(task))
293 continue; 285 continue;
294 if (is_task_frozen_enough(task)) 286 if (is_task_frozen_enough(task))
295 continue; 287 continue;
@@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
307 struct task_struct *task; 299 struct task_struct *task;
308 300
309 cgroup_iter_start(cgroup, &it); 301 cgroup_iter_start(cgroup, &it);
310 while ((task = cgroup_iter_next(cgroup, &it))) { 302 while ((task = cgroup_iter_next(cgroup, &it)))
311 thaw_process(task); 303 __thaw_task(task);
312 }
313 cgroup_iter_end(cgroup, &it); 304 cgroup_iter_end(cgroup, &it);
314
315 freezer->state = CGROUP_THAWED;
316} 305}
317 306
318static int freezer_change_state(struct cgroup *cgroup, 307static int freezer_change_state(struct cgroup *cgroup,
@@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
326 spin_lock_irq(&freezer->lock); 315 spin_lock_irq(&freezer->lock);
327 316
328 update_if_frozen(cgroup, freezer); 317 update_if_frozen(cgroup, freezer);
329 if (goal_state == freezer->state)
330 goto out;
331 318
332 switch (goal_state) { 319 switch (goal_state) {
333 case CGROUP_THAWED: 320 case CGROUP_THAWED:
321 if (freezer->state != CGROUP_THAWED)
322 atomic_dec(&system_freezing_cnt);
323 freezer->state = CGROUP_THAWED;
334 unfreeze_cgroup(cgroup, freezer); 324 unfreeze_cgroup(cgroup, freezer);
335 break; 325 break;
336 case CGROUP_FROZEN: 326 case CGROUP_FROZEN:
327 if (freezer->state == CGROUP_THAWED)
328 atomic_inc(&system_freezing_cnt);
329 freezer->state = CGROUP_FREEZING;
337 retval = try_to_freeze_cgroup(cgroup, freezer); 330 retval = try_to_freeze_cgroup(cgroup, freezer);
338 break; 331 break;
339 default: 332 default:
340 BUG(); 333 BUG();
341 } 334 }
342out: 335
343 spin_unlock_irq(&freezer->lock); 336 spin_unlock_irq(&freezer->lock);
344 337
345 return retval; 338 return retval;
diff --git a/kernel/exit.c b/kernel/exit.c
index d0b7d988f873..95a4141d07e7 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
679 tsk->mm = NULL; 679 tsk->mm = NULL;
680 up_read(&mm->mmap_sem); 680 up_read(&mm->mmap_sem);
681 enter_lazy_tlb(mm, current); 681 enter_lazy_tlb(mm, current);
682 /* We don't want this task to be frozen prematurely */
683 clear_freeze_flag(tsk);
684 task_unlock(tsk); 682 task_unlock(tsk);
685 mm_update_next_owner(mm); 683 mm_update_next_owner(mm);
686 mmput(mm); 684 mmput(mm);
@@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
1040 exit_rcu(); 1038 exit_rcu();
1041 /* causes final put_task_struct in finish_task_switch(). */ 1039 /* causes final put_task_struct in finish_task_switch(). */
1042 tsk->state = TASK_DEAD; 1040 tsk->state = TASK_DEAD;
1041 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
1043 schedule(); 1042 schedule();
1044 BUG(); 1043 BUG();
1045 /* Avoid "noreturn function does return". */ 1044 /* Avoid "noreturn function does return". */
diff --git a/kernel/fork.c b/kernel/fork.c
index da4a6a10d088..827808613847 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
992 new_flags |= PF_FORKNOEXEC; 992 new_flags |= PF_FORKNOEXEC;
993 new_flags |= PF_STARTING; 993 new_flags |= PF_STARTING;
994 p->flags = new_flags; 994 p->flags = new_flags;
995 clear_freeze_flag(p);
996} 995}
997 996
998SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 997SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 7be56c534397..9815b8d1eed5 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -9,101 +9,114 @@
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/syscalls.h> 10#include <linux/syscalls.h>
11#include <linux/freezer.h> 11#include <linux/freezer.h>
12#include <linux/kthread.h>
12 13
13/* 14/* total number of freezing conditions in effect */
14 * freezing is complete, mark current process as frozen 15atomic_t system_freezing_cnt = ATOMIC_INIT(0);
16EXPORT_SYMBOL(system_freezing_cnt);
17
18/* indicate whether PM freezing is in effect, protected by pm_mutex */
19bool pm_freezing;
20bool pm_nosig_freezing;
21
22/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock);
24
25/**
26 * freezing_slow_path - slow path for testing whether a task needs to be frozen
27 * @p: task to be tested
28 *
29 * This function is called by freezing() if system_freezing_cnt isn't zero
30 * and tests whether @p needs to enter and stay in frozen state. Can be
31 * called under any context. The freezers are responsible for ensuring the
32 * target tasks see the updated state.
15 */ 33 */
16static inline void frozen_process(void) 34bool freezing_slow_path(struct task_struct *p)
17{ 35{
18 if (!unlikely(current->flags & PF_NOFREEZE)) { 36 if (p->flags & PF_NOFREEZE)
19 current->flags |= PF_FROZEN; 37 return false;
20 smp_wmb(); 38
21 } 39 if (pm_nosig_freezing || cgroup_freezing(p))
22 clear_freeze_flag(current); 40 return true;
41
42 if (pm_freezing && !(p->flags & PF_KTHREAD))
43 return true;
44
45 return false;
23} 46}
47EXPORT_SYMBOL(freezing_slow_path);
24 48
25/* Refrigerator is place where frozen processes are stored :-). */ 49/* Refrigerator is place where frozen processes are stored :-). */
26void refrigerator(void) 50bool __refrigerator(bool check_kthr_stop)
27{ 51{
28 /* Hmm, should we be allowed to suspend when there are realtime 52 /* Hmm, should we be allowed to suspend when there are realtime
29 processes around? */ 53 processes around? */
30 long save; 54 bool was_frozen = false;
55 long save = current->state;
31 56
32 task_lock(current);
33 if (freezing(current)) {
34 frozen_process();
35 task_unlock(current);
36 } else {
37 task_unlock(current);
38 return;
39 }
40 save = current->state;
41 pr_debug("%s entered refrigerator\n", current->comm); 57 pr_debug("%s entered refrigerator\n", current->comm);
42 58
43 spin_lock_irq(&current->sighand->siglock);
44 recalc_sigpending(); /* We sent fake signal, clean it up */
45 spin_unlock_irq(&current->sighand->siglock);
46
47 /* prevent accounting of that task to load */
48 current->flags |= PF_FREEZING;
49
50 for (;;) { 59 for (;;) {
51 set_current_state(TASK_UNINTERRUPTIBLE); 60 set_current_state(TASK_UNINTERRUPTIBLE);
52 if (!frozen(current)) 61
62 spin_lock_irq(&freezer_lock);
63 current->flags |= PF_FROZEN;
64 if (!freezing(current) ||
65 (check_kthr_stop && kthread_should_stop()))
66 current->flags &= ~PF_FROZEN;
67 spin_unlock_irq(&freezer_lock);
68
69 if (!(current->flags & PF_FROZEN))
53 break; 70 break;
71 was_frozen = true;
54 schedule(); 72 schedule();
55 } 73 }
56 74
57 /* Remove the accounting blocker */
58 current->flags &= ~PF_FREEZING;
59
60 pr_debug("%s left refrigerator\n", current->comm); 75 pr_debug("%s left refrigerator\n", current->comm);
61 __set_current_state(save); 76
77 /*
78 * Restore saved task state before returning. The mb'd version
79 * needs to be used; otherwise, it might silently break
80 * synchronization which depends on ordered task state change.
81 */
82 set_current_state(save);
83
84 return was_frozen;
62} 85}
63EXPORT_SYMBOL(refrigerator); 86EXPORT_SYMBOL(__refrigerator);
64 87
65static void fake_signal_wake_up(struct task_struct *p) 88static void fake_signal_wake_up(struct task_struct *p)
66{ 89{
67 unsigned long flags; 90 unsigned long flags;
68 91
69 spin_lock_irqsave(&p->sighand->siglock, flags); 92 if (lock_task_sighand(p, &flags)) {
70 signal_wake_up(p, 0); 93 signal_wake_up(p, 0);
71 spin_unlock_irqrestore(&p->sighand->siglock, flags); 94 unlock_task_sighand(p, &flags);
95 }
72} 96}
73 97
74/** 98/**
75 * freeze_task - send a freeze request to given task 99 * freeze_task - send a freeze request to given task
76 * @p: task to send the request to 100 * @p: task to send the request to
77 * @sig_only: if set, the request will only be sent if the task has the 101 *
78 * PF_FREEZER_NOSIG flag unset 102 * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
79 * Return value: 'false', if @sig_only is set and the task has 103 * flag and either sending a fake signal to it or waking it up, depending
80 * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise 104 * on whether it has %PF_FREEZER_NOSIG set.
81 * 105 *
82 * The freeze request is sent by setting the tasks's TIF_FREEZE flag and 106 * RETURNS:
83 * either sending a fake signal to it or waking it up, depending on whether 107 * %false, if @p is not freezing or already frozen; %true, otherwise
84 * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
85 * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
86 * TIF_FREEZE flag will not be set.
87 */ 108 */
88bool freeze_task(struct task_struct *p, bool sig_only) 109bool freeze_task(struct task_struct *p)
89{ 110{
90 /* 111 unsigned long flags;
91 * We first check if the task is freezing and next if it has already 112
92 * been frozen to avoid the race with frozen_process() which first marks 113 spin_lock_irqsave(&freezer_lock, flags);
93 * the task as frozen and next clears its TIF_FREEZE. 114 if (!freezing(p) || frozen(p)) {
94 */ 115 spin_unlock_irqrestore(&freezer_lock, flags);
95 if (!freezing(p)) { 116 return false;
96 smp_rmb();
97 if (frozen(p))
98 return false;
99
100 if (!sig_only || should_send_signal(p))
101 set_freeze_flag(p);
102 else
103 return false;
104 } 117 }
105 118
106 if (should_send_signal(p)) { 119 if (!(p->flags & PF_KTHREAD)) {
107 fake_signal_wake_up(p); 120 fake_signal_wake_up(p);
108 /* 121 /*
109 * fake_signal_wake_up() goes through p's scheduler 122 * fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
111 * TASK_RUNNING transition can't race with task state 124 * TASK_RUNNING transition can't race with task state
112 * testing in try_to_freeze_tasks(). 125 * testing in try_to_freeze_tasks().
113 */ 126 */
114 } else if (sig_only) {
115 return false;
116 } else { 127 } else {
117 wake_up_state(p, TASK_INTERRUPTIBLE); 128 wake_up_state(p, TASK_INTERRUPTIBLE);
118 } 129 }
119 130
131 spin_unlock_irqrestore(&freezer_lock, flags);
120 return true; 132 return true;
121} 133}
122 134
123void cancel_freezing(struct task_struct *p) 135void __thaw_task(struct task_struct *p)
124{ 136{
125 unsigned long flags; 137 unsigned long flags;
126 138
127 if (freezing(p)) { 139 /*
128 pr_debug(" clean up: %s\n", p->comm); 140 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
129 clear_freeze_flag(p); 141 * be visible to @p as waking up implies wmb. Waking up inside
130 spin_lock_irqsave(&p->sighand->siglock, flags); 142 * freezer_lock also prevents wakeups from leaking outside
131 recalc_sigpending_and_wake(p); 143 * refrigerator.
132 spin_unlock_irqrestore(&p->sighand->siglock, flags); 144 */
133 } 145 spin_lock_irqsave(&freezer_lock, flags);
134} 146 if (frozen(p))
135 147 wake_up_process(p);
136static int __thaw_process(struct task_struct *p) 148 spin_unlock_irqrestore(&freezer_lock, flags);
137{
138 if (frozen(p)) {
139 p->flags &= ~PF_FROZEN;
140 return 1;
141 }
142 clear_freeze_flag(p);
143 return 0;
144} 149}
145 150
146/* 151/**
147 * Wake up a frozen process 152 * set_freezable - make %current freezable
148 * 153 *
149 * task_lock() is needed to prevent the race with refrigerator() which may 154 * Mark %current freezable and enter refrigerator if necessary.
150 * occur if the freezing of tasks fails. Namely, without the lock, if the
151 * freezing of tasks failed, thaw_tasks() might have run before a task in
152 * refrigerator() could call frozen_process(), in which case the task would be
153 * frozen and no one would thaw it.
154 */ 155 */
155int thaw_process(struct task_struct *p) 156bool set_freezable(void)
156{ 157{
157 task_lock(p); 158 might_sleep();
158 if (__thaw_process(p) == 1) { 159
159 task_unlock(p); 160 /*
160 wake_up_process(p); 161 * Modify flags while holding freezer_lock. This ensures the
161 return 1; 162 * freezer notices that we aren't frozen yet or the freezing
162 } 163 * condition is visible to try_to_freeze() below.
163 task_unlock(p); 164 */
164 return 0; 165 spin_lock_irq(&freezer_lock);
166 current->flags &= ~PF_NOFREEZE;
167 spin_unlock_irq(&freezer_lock);
168
169 return try_to_freeze();
165} 170}
166EXPORT_SYMBOL(thaw_process); 171EXPORT_SYMBOL(set_freezable);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index dc7bc0829286..090ee10d9604 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1523,7 +1523,7 @@ int kernel_kexec(void)
1523 1523
1524#ifdef CONFIG_KEXEC_JUMP 1524#ifdef CONFIG_KEXEC_JUMP
1525 if (kexec_image->preserve_context) { 1525 if (kexec_image->preserve_context) {
1526 mutex_lock(&pm_mutex); 1526 lock_system_sleep();
1527 pm_prepare_console(); 1527 pm_prepare_console();
1528 error = freeze_processes(); 1528 error = freeze_processes();
1529 if (error) { 1529 if (error) {
@@ -1576,7 +1576,7 @@ int kernel_kexec(void)
1576 thaw_processes(); 1576 thaw_processes();
1577 Restore_console: 1577 Restore_console:
1578 pm_restore_console(); 1578 pm_restore_console();
1579 mutex_unlock(&pm_mutex); 1579 unlock_system_sleep();
1580 } 1580 }
1581#endif 1581#endif
1582 1582
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2142687094d3..a0a88543934e 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -34,6 +34,9 @@
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/resource.h> 36#include <linux/resource.h>
37#include <linux/notifier.h>
38#include <linux/suspend.h>
39#include <linux/rwsem.h>
37#include <asm/uaccess.h> 40#include <asm/uaccess.h>
38 41
39#include <trace/events/module.h> 42#include <trace/events/module.h>
@@ -48,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
48static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; 51static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
49static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 52static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
50static DEFINE_SPINLOCK(umh_sysctl_lock); 53static DEFINE_SPINLOCK(umh_sysctl_lock);
54static DECLARE_RWSEM(umhelper_sem);
51 55
52#ifdef CONFIG_MODULES 56#ifdef CONFIG_MODULES
53 57
@@ -273,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
273 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY 277 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
274 * (used for preventing user land processes from being created after the user 278 * (used for preventing user land processes from being created after the user
275 * land has been frozen during a system-wide hibernation or suspend operation). 279 * land has been frozen during a system-wide hibernation or suspend operation).
280 * Should always be manipulated under umhelper_sem acquired for write.
276 */ 281 */
277static int usermodehelper_disabled = 1; 282static int usermodehelper_disabled = 1;
278 283
@@ -291,6 +296,18 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
291 */ 296 */
292#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 297#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
293 298
299void read_lock_usermodehelper(void)
300{
301 down_read(&umhelper_sem);
302}
303EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
304
305void read_unlock_usermodehelper(void)
306{
307 up_read(&umhelper_sem);
308}
309EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
310
294/** 311/**
295 * usermodehelper_disable - prevent new helpers from being started 312 * usermodehelper_disable - prevent new helpers from being started
296 */ 313 */
@@ -298,8 +315,10 @@ int usermodehelper_disable(void)
298{ 315{
299 long retval; 316 long retval;
300 317
318 down_write(&umhelper_sem);
301 usermodehelper_disabled = 1; 319 usermodehelper_disabled = 1;
302 smp_mb(); 320 up_write(&umhelper_sem);
321
303 /* 322 /*
304 * From now on call_usermodehelper_exec() won't start any new 323 * From now on call_usermodehelper_exec() won't start any new
305 * helpers, so it is sufficient if running_helpers turns out to 324 * helpers, so it is sufficient if running_helpers turns out to
@@ -312,7 +331,9 @@ int usermodehelper_disable(void)
312 if (retval) 331 if (retval)
313 return 0; 332 return 0;
314 333
334 down_write(&umhelper_sem);
315 usermodehelper_disabled = 0; 335 usermodehelper_disabled = 0;
336 up_write(&umhelper_sem);
316 return -EAGAIN; 337 return -EAGAIN;
317} 338}
318 339
@@ -321,7 +342,9 @@ int usermodehelper_disable(void)
321 */ 342 */
322void usermodehelper_enable(void) 343void usermodehelper_enable(void)
323{ 344{
345 down_write(&umhelper_sem);
324 usermodehelper_disabled = 0; 346 usermodehelper_disabled = 0;
347 up_write(&umhelper_sem);
325} 348}
326 349
327/** 350/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b6d216a92639..3d3de633702e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -59,6 +59,31 @@ int kthread_should_stop(void)
59EXPORT_SYMBOL(kthread_should_stop); 59EXPORT_SYMBOL(kthread_should_stop);
60 60
61/** 61/**
62 * kthread_freezable_should_stop - should this freezable kthread return now?
63 * @was_frozen: optional out parameter, indicates whether %current was frozen
64 *
65 * kthread_should_stop() for freezable kthreads, which will enter
66 * refrigerator if necessary. This function is safe from kthread_stop() /
67 * freezer deadlock and freezable kthreads should use this function instead
68 * of calling try_to_freeze() directly.
69 */
70bool kthread_freezable_should_stop(bool *was_frozen)
71{
72 bool frozen = false;
73
74 might_sleep();
75
76 if (unlikely(freezing(current)))
77 frozen = __refrigerator(true);
78
79 if (was_frozen)
80 *was_frozen = frozen;
81
82 return kthread_should_stop();
83}
84EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
85
86/**
62 * kthread_data - return data value specified on kthread creation 87 * kthread_data - return data value specified on kthread creation
63 * @task: kthread task in question 88 * @task: kthread task in question
64 * 89 *
@@ -257,7 +282,7 @@ int kthreadd(void *unused)
257 set_cpus_allowed_ptr(tsk, cpu_all_mask); 282 set_cpus_allowed_ptr(tsk, cpu_all_mask);
258 set_mems_allowed(node_states[N_HIGH_MEMORY]); 283 set_mems_allowed(node_states[N_HIGH_MEMORY]);
259 284
260 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 285 current->flags |= PF_NOFREEZE;
261 286
262 for (;;) { 287 for (;;) {
263 set_current_state(TASK_INTERRUPTIBLE); 288 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a6b0503574ee..6d6d28870335 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -43,8 +43,6 @@ int in_suspend __nosavedata;
43enum { 43enum {
44 HIBERNATION_INVALID, 44 HIBERNATION_INVALID,
45 HIBERNATION_PLATFORM, 45 HIBERNATION_PLATFORM,
46 HIBERNATION_TEST,
47 HIBERNATION_TESTPROC,
48 HIBERNATION_SHUTDOWN, 46 HIBERNATION_SHUTDOWN,
49 HIBERNATION_REBOOT, 47 HIBERNATION_REBOOT,
50 /* keep last */ 48 /* keep last */
@@ -55,7 +53,7 @@ enum {
55 53
56static int hibernation_mode = HIBERNATION_SHUTDOWN; 54static int hibernation_mode = HIBERNATION_SHUTDOWN;
57 55
58static bool freezer_test_done; 56bool freezer_test_done;
59 57
60static const struct platform_hibernation_ops *hibernation_ops; 58static const struct platform_hibernation_ops *hibernation_ops;
61 59
@@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
71 WARN_ON(1); 69 WARN_ON(1);
72 return; 70 return;
73 } 71 }
74 mutex_lock(&pm_mutex); 72 lock_system_sleep();
75 hibernation_ops = ops; 73 hibernation_ops = ops;
76 if (ops) 74 if (ops)
77 hibernation_mode = HIBERNATION_PLATFORM; 75 hibernation_mode = HIBERNATION_PLATFORM;
78 else if (hibernation_mode == HIBERNATION_PLATFORM) 76 else if (hibernation_mode == HIBERNATION_PLATFORM)
79 hibernation_mode = HIBERNATION_SHUTDOWN; 77 hibernation_mode = HIBERNATION_SHUTDOWN;
80 78
81 mutex_unlock(&pm_mutex); 79 unlock_system_sleep();
82} 80}
83 81
84static bool entering_platform_hibernation; 82static bool entering_platform_hibernation;
@@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
96 mdelay(5000); 94 mdelay(5000);
97} 95}
98 96
99static int hibernation_testmode(int mode)
100{
101 if (hibernation_mode == mode) {
102 hibernation_debug_sleep();
103 return 1;
104 }
105 return 0;
106}
107
108static int hibernation_test(int level) 97static int hibernation_test(int level)
109{ 98{
110 if (pm_test_level == level) { 99 if (pm_test_level == level) {
@@ -114,7 +103,6 @@ static int hibernation_test(int level)
114 return 0; 103 return 0;
115} 104}
116#else /* !CONFIG_PM_DEBUG */ 105#else /* !CONFIG_PM_DEBUG */
117static int hibernation_testmode(int mode) { return 0; }
118static int hibernation_test(int level) { return 0; } 106static int hibernation_test(int level) { return 0; }
119#endif /* !CONFIG_PM_DEBUG */ 107#endif /* !CONFIG_PM_DEBUG */
120 108
@@ -278,8 +266,7 @@ static int create_image(int platform_mode)
278 goto Platform_finish; 266 goto Platform_finish;
279 267
280 error = disable_nonboot_cpus(); 268 error = disable_nonboot_cpus();
281 if (error || hibernation_test(TEST_CPUS) 269 if (error || hibernation_test(TEST_CPUS))
282 || hibernation_testmode(HIBERNATION_TEST))
283 goto Enable_cpus; 270 goto Enable_cpus;
284 271
285 local_irq_disable(); 272 local_irq_disable();
@@ -333,7 +320,7 @@ static int create_image(int platform_mode)
333 */ 320 */
334int hibernation_snapshot(int platform_mode) 321int hibernation_snapshot(int platform_mode)
335{ 322{
336 pm_message_t msg = PMSG_RECOVER; 323 pm_message_t msg;
337 int error; 324 int error;
338 325
339 error = platform_begin(platform_mode); 326 error = platform_begin(platform_mode);
@@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
349 if (error) 336 if (error)
350 goto Cleanup; 337 goto Cleanup;
351 338
352 if (hibernation_test(TEST_FREEZER) || 339 if (hibernation_test(TEST_FREEZER)) {
353 hibernation_testmode(HIBERNATION_TESTPROC)) {
354 340
355 /* 341 /*
356 * Indicate to the caller that we are returning due to a 342 * Indicate to the caller that we are returning due to a
@@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
362 348
363 error = dpm_prepare(PMSG_FREEZE); 349 error = dpm_prepare(PMSG_FREEZE);
364 if (error) { 350 if (error) {
365 dpm_complete(msg); 351 dpm_complete(PMSG_RECOVER);
366 goto Cleanup; 352 goto Cleanup;
367 } 353 }
368 354
369 suspend_console(); 355 suspend_console();
370 pm_restrict_gfp_mask(); 356 pm_restrict_gfp_mask();
357
371 error = dpm_suspend(PMSG_FREEZE); 358 error = dpm_suspend(PMSG_FREEZE);
372 if (error)
373 goto Recover_platform;
374 359
375 if (hibernation_test(TEST_DEVICES)) 360 if (error || hibernation_test(TEST_DEVICES))
376 goto Recover_platform; 361 platform_recover(platform_mode);
362 else
363 error = create_image(platform_mode);
377 364
378 error = create_image(platform_mode);
379 /* 365 /*
380 * Control returns here (1) after the image has been created or the 366 * In the case that we call create_image() above, the control
367 * returns here (1) after the image has been created or the
381 * image creation has failed and (2) after a successful restore. 368 * image creation has failed and (2) after a successful restore.
382 */ 369 */
383 370
384 Resume_devices:
385 /* We may need to release the preallocated image pages here. */ 371 /* We may need to release the preallocated image pages here. */
386 if (error || !in_suspend) 372 if (error || !in_suspend)
387 swsusp_free(); 373 swsusp_free();
@@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
399 platform_end(platform_mode); 385 platform_end(platform_mode);
400 return error; 386 return error;
401 387
402 Recover_platform:
403 platform_recover(platform_mode);
404 goto Resume_devices;
405
406 Cleanup: 388 Cleanup:
407 swsusp_free(); 389 swsusp_free();
408 goto Close; 390 goto Close;
@@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
590static void power_down(void) 572static void power_down(void)
591{ 573{
592 switch (hibernation_mode) { 574 switch (hibernation_mode) {
593 case HIBERNATION_TEST:
594 case HIBERNATION_TESTPROC:
595 break;
596 case HIBERNATION_REBOOT: 575 case HIBERNATION_REBOOT:
597 kernel_restart(NULL); 576 kernel_restart(NULL);
598 break; 577 break;
@@ -611,17 +590,6 @@ static void power_down(void)
611 while(1); 590 while(1);
612} 591}
613 592
614static int prepare_processes(void)
615{
616 int error = 0;
617
618 if (freeze_processes()) {
619 error = -EBUSY;
620 thaw_processes();
621 }
622 return error;
623}
624
625/** 593/**
626 * hibernate - Carry out system hibernation, including saving the image. 594 * hibernate - Carry out system hibernation, including saving the image.
627 */ 595 */
@@ -629,7 +597,7 @@ int hibernate(void)
629{ 597{
630 int error; 598 int error;
631 599
632 mutex_lock(&pm_mutex); 600 lock_system_sleep();
633 /* The snapshot device should not be opened while we're running */ 601 /* The snapshot device should not be opened while we're running */
634 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 602 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
635 error = -EBUSY; 603 error = -EBUSY;
@@ -654,7 +622,7 @@ int hibernate(void)
654 sys_sync(); 622 sys_sync();
655 printk("done.\n"); 623 printk("done.\n");
656 624
657 error = prepare_processes(); 625 error = freeze_processes();
658 if (error) 626 if (error)
659 goto Finish; 627 goto Finish;
660 628
@@ -697,7 +665,7 @@ int hibernate(void)
697 pm_restore_console(); 665 pm_restore_console();
698 atomic_inc(&snapshot_device_available); 666 atomic_inc(&snapshot_device_available);
699 Unlock: 667 Unlock:
700 mutex_unlock(&pm_mutex); 668 unlock_system_sleep();
701 return error; 669 return error;
702} 670}
703 671
@@ -811,11 +779,13 @@ static int software_resume(void)
811 goto close_finish; 779 goto close_finish;
812 780
813 error = create_basic_memory_bitmaps(); 781 error = create_basic_memory_bitmaps();
814 if (error) 782 if (error) {
783 usermodehelper_enable();
815 goto close_finish; 784 goto close_finish;
785 }
816 786
817 pr_debug("PM: Preparing processes for restore.\n"); 787 pr_debug("PM: Preparing processes for restore.\n");
818 error = prepare_processes(); 788 error = freeze_processes();
819 if (error) { 789 if (error) {
820 swsusp_close(FMODE_READ); 790 swsusp_close(FMODE_READ);
821 goto Done; 791 goto Done;
@@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
855 [HIBERNATION_PLATFORM] = "platform", 825 [HIBERNATION_PLATFORM] = "platform",
856 [HIBERNATION_SHUTDOWN] = "shutdown", 826 [HIBERNATION_SHUTDOWN] = "shutdown",
857 [HIBERNATION_REBOOT] = "reboot", 827 [HIBERNATION_REBOOT] = "reboot",
858 [HIBERNATION_TEST] = "test",
859 [HIBERNATION_TESTPROC] = "testproc",
860}; 828};
861 829
862/* 830/*
@@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
865 * Hibernation can be handled in several ways. There are a few different ways 833 * Hibernation can be handled in several ways. There are a few different ways
866 * to put the system into the sleep state: using the platform driver (e.g. ACPI 834 * to put the system into the sleep state: using the platform driver (e.g. ACPI
867 * or other hibernation_ops), powering it off or rebooting it (for testing 835 * or other hibernation_ops), powering it off or rebooting it (for testing
868 * mostly), or using one of the two available test modes. 836 * mostly).
869 * 837 *
870 * The sysfs file /sys/power/disk provides an interface for selecting the 838 * The sysfs file /sys/power/disk provides an interface for selecting the
871 * hibernation mode to use. Reading from this file causes the available modes 839 * hibernation mode to use. Reading from this file causes the available modes
872 * to be printed. There are 5 modes that can be supported: 840 * to be printed. There are 3 modes that can be supported:
873 * 841 *
874 * 'platform' 842 * 'platform'
875 * 'shutdown' 843 * 'shutdown'
876 * 'reboot' 844 * 'reboot'
877 * 'test'
878 * 'testproc'
879 * 845 *
880 * If a platform hibernation driver is in use, 'platform' will be supported 846 * If a platform hibernation driver is in use, 'platform' will be supported
881 * and will be used by default. Otherwise, 'shutdown' will be used by default. 847 * and will be used by default. Otherwise, 'shutdown' will be used by default.
@@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
899 switch (i) { 865 switch (i) {
900 case HIBERNATION_SHUTDOWN: 866 case HIBERNATION_SHUTDOWN:
901 case HIBERNATION_REBOOT: 867 case HIBERNATION_REBOOT:
902 case HIBERNATION_TEST:
903 case HIBERNATION_TESTPROC:
904 break; 868 break;
905 case HIBERNATION_PLATFORM: 869 case HIBERNATION_PLATFORM:
906 if (hibernation_ops) 870 if (hibernation_ops)
@@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
929 p = memchr(buf, '\n', n); 893 p = memchr(buf, '\n', n);
930 len = p ? p - buf : n; 894 len = p ? p - buf : n;
931 895
932 mutex_lock(&pm_mutex); 896 lock_system_sleep();
933 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 897 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
934 if (len == strlen(hibernation_modes[i]) 898 if (len == strlen(hibernation_modes[i])
935 && !strncmp(buf, hibernation_modes[i], len)) { 899 && !strncmp(buf, hibernation_modes[i], len)) {
@@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
941 switch (mode) { 905 switch (mode) {
942 case HIBERNATION_SHUTDOWN: 906 case HIBERNATION_SHUTDOWN:
943 case HIBERNATION_REBOOT: 907 case HIBERNATION_REBOOT:
944 case HIBERNATION_TEST:
945 case HIBERNATION_TESTPROC:
946 hibernation_mode = mode; 908 hibernation_mode = mode;
947 break; 909 break;
948 case HIBERNATION_PLATFORM: 910 case HIBERNATION_PLATFORM:
@@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
957 if (!error) 919 if (!error)
958 pr_debug("PM: Hibernation mode set to '%s'\n", 920 pr_debug("PM: Hibernation mode set to '%s'\n",
959 hibernation_modes[mode]); 921 hibernation_modes[mode]);
960 mutex_unlock(&pm_mutex); 922 unlock_system_sleep();
961 return error ? error : n; 923 return error ? error : n;
962} 924}
963 925
@@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
984 if (maj != MAJOR(res) || min != MINOR(res)) 946 if (maj != MAJOR(res) || min != MINOR(res))
985 goto out; 947 goto out;
986 948
987 mutex_lock(&pm_mutex); 949 lock_system_sleep();
988 swsusp_resume_device = res; 950 swsusp_resume_device = res;
989 mutex_unlock(&pm_mutex); 951 unlock_system_sleep();
990 printk(KERN_INFO "PM: Starting manual resume from disk\n"); 952 printk(KERN_INFO "PM: Starting manual resume from disk\n");
991 noresume = 0; 953 noresume = 0;
992 software_resume(); 954 software_resume();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 36e0f0903c32..9824b41e5a18 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * 6 *
7 * This file is released under the GPLv2 7 * This file is released under the GPLv2
8 * 8 *
9 */ 9 */
@@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
116 p = memchr(buf, '\n', n); 116 p = memchr(buf, '\n', n);
117 len = p ? p - buf : n; 117 len = p ? p - buf : n;
118 118
119 mutex_lock(&pm_mutex); 119 lock_system_sleep();
120 120
121 level = TEST_FIRST; 121 level = TEST_FIRST;
122 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 122 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
126 break; 126 break;
127 } 127 }
128 128
129 mutex_unlock(&pm_mutex); 129 unlock_system_sleep();
130 130
131 return error ? error : n; 131 return error ? error : n;
132} 132}
@@ -240,7 +240,7 @@ struct kobject *power_kobj;
240 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 240 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
241 * 'disk' (Suspend-to-Disk). 241 * 'disk' (Suspend-to-Disk).
242 * 242 *
243 * store() accepts one of those strings, translates it into the 243 * store() accepts one of those strings, translates it into the
244 * proper enumerated value, and initiates a suspend transition. 244 * proper enumerated value, and initiates a suspend transition.
245 */ 245 */
246static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 246static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
282 /* First, check if we are requested to hibernate */ 282 /* First, check if we are requested to hibernate */
283 if (len == 4 && !strncmp(buf, "disk", len)) { 283 if (len == 4 && !strncmp(buf, "disk", len)) {
284 error = hibernate(); 284 error = hibernate();
285 goto Exit; 285 goto Exit;
286 } 286 }
287 287
288#ifdef CONFIG_SUSPEND 288#ifdef CONFIG_SUSPEND
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 23a2db1ec442..0c4defe6d3b8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
50#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 50#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
51 51
52/* kernel/power/hibernate.c */ 52/* kernel/power/hibernate.c */
53extern bool freezer_test_done;
54
53extern int hibernation_snapshot(int platform_mode); 55extern int hibernation_snapshot(int platform_mode);
54extern int hibernation_restore(int platform_mode); 56extern int hibernation_restore(int platform_mode);
55extern int hibernation_platform_enter(void); 57extern int hibernation_platform_enter(void);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index addbbe5531bc..77274c9ba2f1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,16 +22,7 @@
22 */ 22 */
23#define TIMEOUT (20 * HZ) 23#define TIMEOUT (20 * HZ)
24 24
25static inline int freezable(struct task_struct * p) 25static int try_to_freeze_tasks(bool user_only)
26{
27 if ((p == current) ||
28 (p->flags & PF_NOFREEZE) ||
29 (p->exit_state != 0))
30 return 0;
31 return 1;
32}
33
34static int try_to_freeze_tasks(bool sig_only)
35{ 26{
36 struct task_struct *g, *p; 27 struct task_struct *g, *p;
37 unsigned long end_time; 28 unsigned long end_time;
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
46 37
47 end_time = jiffies + TIMEOUT; 38 end_time = jiffies + TIMEOUT;
48 39
49 if (!sig_only) 40 if (!user_only)
50 freeze_workqueues_begin(); 41 freeze_workqueues_begin();
51 42
52 while (true) { 43 while (true) {
53 todo = 0; 44 todo = 0;
54 read_lock(&tasklist_lock); 45 read_lock(&tasklist_lock);
55 do_each_thread(g, p) { 46 do_each_thread(g, p) {
56 if (frozen(p) || !freezable(p)) 47 if (p == current || !freeze_task(p))
57 continue;
58
59 if (!freeze_task(p, sig_only))
60 continue; 48 continue;
61 49
62 /* 50 /*
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
77 } while_each_thread(g, p); 65 } while_each_thread(g, p);
78 read_unlock(&tasklist_lock); 66 read_unlock(&tasklist_lock);
79 67
80 if (!sig_only) { 68 if (!user_only) {
81 wq_busy = freeze_workqueues_busy(); 69 wq_busy = freeze_workqueues_busy();
82 todo += wq_busy; 70 todo += wq_busy;
83 } 71 }
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
103 elapsed_csecs = elapsed_csecs64; 91 elapsed_csecs = elapsed_csecs64;
104 92
105 if (todo) { 93 if (todo) {
106 /* This does not unfreeze processes that are already frozen
107 * (we have slightly ugly calling convention in that respect,
108 * and caller must call thaw_processes() if something fails),
109 * but it cleans up leftover PF_FREEZE requests.
110 */
111 printk("\n"); 94 printk("\n");
112 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " 95 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
113 "(%d tasks refusing to freeze, wq_busy=%d):\n", 96 "(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
115 elapsed_csecs / 100, elapsed_csecs % 100, 98 elapsed_csecs / 100, elapsed_csecs % 100,
116 todo - wq_busy, wq_busy); 99 todo - wq_busy, wq_busy);
117 100
118 thaw_workqueues();
119
120 read_lock(&tasklist_lock); 101 read_lock(&tasklist_lock);
121 do_each_thread(g, p) { 102 do_each_thread(g, p) {
122 task_lock(p); 103 if (!wakeup && !freezer_should_skip(p) &&
123 if (!wakeup && freezing(p) && !freezer_should_skip(p)) 104 p != current && freezing(p) && !frozen(p))
124 sched_show_task(p); 105 sched_show_task(p);
125 cancel_freezing(p);
126 task_unlock(p);
127 } while_each_thread(g, p); 106 } while_each_thread(g, p);
128 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
129 } else { 108 } else {
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
136 115
137/** 116/**
138 * freeze_processes - Signal user space processes to enter the refrigerator. 117 * freeze_processes - Signal user space processes to enter the refrigerator.
118 *
119 * On success, returns 0. On failure, -errno and system is fully thawed.
139 */ 120 */
140int freeze_processes(void) 121int freeze_processes(void)
141{ 122{
142 int error; 123 int error;
143 124
125 if (!pm_freezing)
126 atomic_inc(&system_freezing_cnt);
127
144 printk("Freezing user space processes ... "); 128 printk("Freezing user space processes ... ");
129 pm_freezing = true;
145 error = try_to_freeze_tasks(true); 130 error = try_to_freeze_tasks(true);
146 if (!error) { 131 if (!error) {
147 printk("done."); 132 printk("done.");
@@ -150,17 +135,22 @@ int freeze_processes(void)
150 printk("\n"); 135 printk("\n");
151 BUG_ON(in_atomic()); 136 BUG_ON(in_atomic());
152 137
138 if (error)
139 thaw_processes();
153 return error; 140 return error;
154} 141}
155 142
156/** 143/**
157 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 144 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
145 *
146 * On success, returns 0. On failure, -errno and system is fully thawed.
158 */ 147 */
159int freeze_kernel_threads(void) 148int freeze_kernel_threads(void)
160{ 149{
161 int error; 150 int error;
162 151
163 printk("Freezing remaining freezable tasks ... "); 152 printk("Freezing remaining freezable tasks ... ");
153 pm_nosig_freezing = true;
164 error = try_to_freeze_tasks(false); 154 error = try_to_freeze_tasks(false);
165 if (!error) 155 if (!error)
166 printk("done."); 156 printk("done.");
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
168 printk("\n"); 158 printk("\n");
169 BUG_ON(in_atomic()); 159 BUG_ON(in_atomic());
170 160
161 if (error)
162 thaw_processes();
171 return error; 163 return error;
172} 164}
173 165
174static void thaw_tasks(bool nosig_only) 166void thaw_processes(void)
175{ 167{
176 struct task_struct *g, *p; 168 struct task_struct *g, *p;
177 169
178 read_lock(&tasklist_lock); 170 if (pm_freezing)
179 do_each_thread(g, p) { 171 atomic_dec(&system_freezing_cnt);
180 if (!freezable(p)) 172 pm_freezing = false;
181 continue; 173 pm_nosig_freezing = false;
182 174
183 if (nosig_only && should_send_signal(p)) 175 oom_killer_enable();
184 continue; 176
177 printk("Restarting tasks ... ");
185 178
186 if (cgroup_freezing_or_frozen(p)) 179 thaw_workqueues();
187 continue;
188 180
189 thaw_process(p); 181 read_lock(&tasklist_lock);
182 do_each_thread(g, p) {
183 __thaw_task(p);
190 } while_each_thread(g, p); 184 } while_each_thread(g, p);
191 read_unlock(&tasklist_lock); 185 read_unlock(&tasklist_lock);
192}
193 186
194void thaw_processes(void)
195{
196 oom_killer_enable();
197
198 printk("Restarting tasks ... ");
199 thaw_workqueues();
200 thaw_tasks(true);
201 thaw_tasks(false);
202 schedule(); 187 schedule();
203 printk("done.\n"); 188 printk("done.\n");
204} 189}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4953dc054c53..4fd51beed879 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
42 */ 42 */
43void suspend_set_ops(const struct platform_suspend_ops *ops) 43void suspend_set_ops(const struct platform_suspend_ops *ops)
44{ 44{
45 mutex_lock(&pm_mutex); 45 lock_system_sleep();
46 suspend_ops = ops; 46 suspend_ops = ops;
47 mutex_unlock(&pm_mutex); 47 unlock_system_sleep();
48} 48}
49EXPORT_SYMBOL_GPL(suspend_set_ops); 49EXPORT_SYMBOL_GPL(suspend_set_ops);
50 50
@@ -106,13 +106,11 @@ static int suspend_prepare(void)
106 goto Finish; 106 goto Finish;
107 107
108 error = suspend_freeze_processes(); 108 error = suspend_freeze_processes();
109 if (error) { 109 if (!error)
110 suspend_stats.failed_freeze++;
111 dpm_save_failed_step(SUSPEND_FREEZE);
112 } else
113 return 0; 110 return 0;
114 111
115 suspend_thaw_processes(); 112 suspend_stats.failed_freeze++;
113 dpm_save_failed_step(SUSPEND_FREEZE);
116 usermodehelper_enable(); 114 usermodehelper_enable();
117 Finish: 115 Finish:
118 pm_notifier_call_chain(PM_POST_SUSPEND); 116 pm_notifier_call_chain(PM_POST_SUSPEND);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6d8f535c2b88..78bdb4404aab 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -30,28 +30,6 @@
30 30
31#include "power.h" 31#include "power.h"
32 32
33/*
34 * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
35 * will be removed in the future. They are only preserved here for
36 * compatibility with existing userland utilities.
37 */
38#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
39#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
40
41#define PMOPS_PREPARE 1
42#define PMOPS_ENTER 2
43#define PMOPS_FINISH 3
44
45/*
46 * NOTE: The following ioctl definitions are wrong and have been replaced with
47 * correct ones. They are only preserved here for compatibility with existing
48 * userland utilities and will be removed in the future.
49 */
50#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
51#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
52#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
53#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
54
55 33
56#define SNAPSHOT_MINOR 231 34#define SNAPSHOT_MINOR 231
57 35
@@ -71,7 +49,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
71 struct snapshot_data *data; 49 struct snapshot_data *data;
72 int error; 50 int error;
73 51
74 mutex_lock(&pm_mutex); 52 lock_system_sleep();
75 53
76 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 54 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
77 error = -EBUSY; 55 error = -EBUSY;
@@ -123,7 +101,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
123 data->platform_support = 0; 101 data->platform_support = 0;
124 102
125 Unlock: 103 Unlock:
126 mutex_unlock(&pm_mutex); 104 unlock_system_sleep();
127 105
128 return error; 106 return error;
129} 107}
@@ -132,7 +110,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
132{ 110{
133 struct snapshot_data *data; 111 struct snapshot_data *data;
134 112
135 mutex_lock(&pm_mutex); 113 lock_system_sleep();
136 114
137 swsusp_free(); 115 swsusp_free();
138 free_basic_memory_bitmaps(); 116 free_basic_memory_bitmaps();
@@ -146,7 +124,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
146 PM_POST_HIBERNATION : PM_POST_RESTORE); 124 PM_POST_HIBERNATION : PM_POST_RESTORE);
147 atomic_inc(&snapshot_device_available); 125 atomic_inc(&snapshot_device_available);
148 126
149 mutex_unlock(&pm_mutex); 127 unlock_system_sleep();
150 128
151 return 0; 129 return 0;
152} 130}
@@ -158,7 +136,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
158 ssize_t res; 136 ssize_t res;
159 loff_t pg_offp = *offp & ~PAGE_MASK; 137 loff_t pg_offp = *offp & ~PAGE_MASK;
160 138
161 mutex_lock(&pm_mutex); 139 lock_system_sleep();
162 140
163 data = filp->private_data; 141 data = filp->private_data;
164 if (!data->ready) { 142 if (!data->ready) {
@@ -179,7 +157,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
179 *offp += res; 157 *offp += res;
180 158
181 Unlock: 159 Unlock:
182 mutex_unlock(&pm_mutex); 160 unlock_system_sleep();
183 161
184 return res; 162 return res;
185} 163}
@@ -191,7 +169,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
191 ssize_t res; 169 ssize_t res;
192 loff_t pg_offp = *offp & ~PAGE_MASK; 170 loff_t pg_offp = *offp & ~PAGE_MASK;
193 171
194 mutex_lock(&pm_mutex); 172 lock_system_sleep();
195 173
196 data = filp->private_data; 174 data = filp->private_data;
197 175
@@ -208,20 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
208 if (res > 0) 186 if (res > 0)
209 *offp += res; 187 *offp += res;
210unlock: 188unlock:
211 mutex_unlock(&pm_mutex); 189 unlock_system_sleep();
212 190
213 return res; 191 return res;
214} 192}
215 193
216static void snapshot_deprecated_ioctl(unsigned int cmd)
217{
218 if (printk_ratelimit())
219 printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
220 "be removed soon, update your suspend-to-disk "
221 "utilities\n",
222 __builtin_return_address(0), cmd);
223}
224
225static long snapshot_ioctl(struct file *filp, unsigned int cmd, 194static long snapshot_ioctl(struct file *filp, unsigned int cmd,
226 unsigned long arg) 195 unsigned long arg)
227{ 196{
@@ -257,11 +226,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
257 break; 226 break;
258 227
259 error = freeze_processes(); 228 error = freeze_processes();
260 if (error) { 229 if (error)
261 thaw_processes();
262 usermodehelper_enable(); 230 usermodehelper_enable();
263 } 231 else
264 if (!error)
265 data->frozen = 1; 232 data->frozen = 1;
266 break; 233 break;
267 234
@@ -274,8 +241,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
274 data->frozen = 0; 241 data->frozen = 0;
275 break; 242 break;
276 243
277 case SNAPSHOT_ATOMIC_SNAPSHOT:
278 snapshot_deprecated_ioctl(cmd);
279 case SNAPSHOT_CREATE_IMAGE: 244 case SNAPSHOT_CREATE_IMAGE:
280 if (data->mode != O_RDONLY || !data->frozen || data->ready) { 245 if (data->mode != O_RDONLY || !data->frozen || data->ready) {
281 error = -EPERM; 246 error = -EPERM;
@@ -283,10 +248,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
283 } 248 }
284 pm_restore_gfp_mask(); 249 pm_restore_gfp_mask();
285 error = hibernation_snapshot(data->platform_support); 250 error = hibernation_snapshot(data->platform_support);
286 if (!error) 251 if (!error) {
287 error = put_user(in_suspend, (int __user *)arg); 252 error = put_user(in_suspend, (int __user *)arg);
288 if (!error) 253 if (!error && !freezer_test_done)
289 data->ready = 1; 254 data->ready = 1;
255 if (freezer_test_done) {
256 freezer_test_done = false;
257 thaw_processes();
258 }
259 }
290 break; 260 break;
291 261
292 case SNAPSHOT_ATOMIC_RESTORE: 262 case SNAPSHOT_ATOMIC_RESTORE:
@@ -305,8 +275,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
305 data->ready = 0; 275 data->ready = 0;
306 break; 276 break;
307 277
308 case SNAPSHOT_SET_IMAGE_SIZE:
309 snapshot_deprecated_ioctl(cmd);
310 case SNAPSHOT_PREF_IMAGE_SIZE: 278 case SNAPSHOT_PREF_IMAGE_SIZE:
311 image_size = arg; 279 image_size = arg;
312 break; 280 break;
@@ -321,16 +289,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
321 error = put_user(size, (loff_t __user *)arg); 289 error = put_user(size, (loff_t __user *)arg);
322 break; 290 break;
323 291
324 case SNAPSHOT_AVAIL_SWAP:
325 snapshot_deprecated_ioctl(cmd);
326 case SNAPSHOT_AVAIL_SWAP_SIZE: 292 case SNAPSHOT_AVAIL_SWAP_SIZE:
327 size = count_swap_pages(data->swap, 1); 293 size = count_swap_pages(data->swap, 1);
328 size <<= PAGE_SHIFT; 294 size <<= PAGE_SHIFT;
329 error = put_user(size, (loff_t __user *)arg); 295 error = put_user(size, (loff_t __user *)arg);
330 break; 296 break;
331 297
332 case SNAPSHOT_GET_SWAP_PAGE:
333 snapshot_deprecated_ioctl(cmd);
334 case SNAPSHOT_ALLOC_SWAP_PAGE: 298 case SNAPSHOT_ALLOC_SWAP_PAGE:
335 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { 299 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
336 error = -ENODEV; 300 error = -ENODEV;
@@ -353,27 +317,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
353 free_all_swap_pages(data->swap); 317 free_all_swap_pages(data->swap);
354 break; 318 break;
355 319
356 case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
357 snapshot_deprecated_ioctl(cmd);
358 if (!swsusp_swap_in_use()) {
359 /*
360 * User space encodes device types as two-byte values,
361 * so we need to recode them
362 */
363 if (old_decode_dev(arg)) {
364 data->swap = swap_type_of(old_decode_dev(arg),
365 0, NULL);
366 if (data->swap < 0)
367 error = -ENODEV;
368 } else {
369 data->swap = -1;
370 error = -EINVAL;
371 }
372 } else {
373 error = -EPERM;
374 }
375 break;
376
377 case SNAPSHOT_S2RAM: 320 case SNAPSHOT_S2RAM:
378 if (!data->frozen) { 321 if (!data->frozen) {
379 error = -EPERM; 322 error = -EPERM;
@@ -396,33 +339,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
396 error = hibernation_platform_enter(); 339 error = hibernation_platform_enter();
397 break; 340 break;
398 341
399 case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
400 snapshot_deprecated_ioctl(cmd);
401 error = -EINVAL;
402
403 switch (arg) {
404
405 case PMOPS_PREPARE:
406 data->platform_support = 1;
407 error = 0;
408 break;
409
410 case PMOPS_ENTER:
411 if (data->platform_support)
412 error = hibernation_platform_enter();
413 break;
414
415 case PMOPS_FINISH:
416 if (data->platform_support)
417 error = 0;
418 break;
419
420 default:
421 printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
422
423 }
424 break;
425
426 case SNAPSHOT_SET_SWAP_AREA: 342 case SNAPSHOT_SET_SWAP_AREA:
427 if (swsusp_swap_in_use()) { 343 if (swsusp_swap_in_use()) {
428 error = -EPERM; 344 error = -EPERM;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 71034f41a2ba..7ba8feae11b8 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
600 600
601 /* 601 /*
602 * Finally, kill the kernel thread. We don't need to be RCU 602 * Finally, kill the kernel thread. We don't need to be RCU
603 * safe anymore, since the bdi is gone from visibility. Force 603 * safe anymore, since the bdi is gone from visibility.
604 * unfreeze of the thread before calling kthread_stop(), otherwise
605 * it would never exet if it is currently stuck in the refrigerator.
606 */ 604 */
607 if (bdi->wb.task) { 605 if (bdi->wb.task)
608 thaw_process(bdi->wb.task);
609 kthread_stop(bdi->wb.task); 606 kthread_stop(bdi->wb.task);
610 }
611} 607}
612 608
613/* 609/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 069b64e521fc..eeb27e27dce3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
328 */ 328 */
329 if (test_tsk_thread_flag(p, TIF_MEMDIE)) { 329 if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
330 if (unlikely(frozen(p))) 330 if (unlikely(frozen(p)))
331 thaw_process(p); 331 __thaw_task(p);
332 return ERR_PTR(-1UL); 332 return ERR_PTR(-1UL);
333 } 333 }
334 if (!p->mm) 334 if (!p->mm)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 00a1a2acd587..3341d8962786 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -18,6 +18,7 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/freezer.h>
21 22
22#include <linux/sunrpc/clnt.h> 23#include <linux/sunrpc/clnt.h>
23 24
@@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)
231{ 232{
232 if (fatal_signal_pending(current)) 233 if (fatal_signal_pending(current))
233 return -ERESTARTSYS; 234 return -ERESTARTSYS;
234 schedule(); 235 freezable_schedule();
235 return 0; 236 return 0;
236} 237}
237 238