aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--Documentation/power/devices.txt37
-rw-r--r--Documentation/power/freezing-of-tasks.txt39
-rw-r--r--Documentation/power/runtime_pm.txt130
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/pm.c176
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h6
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c50
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c196
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c6
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S21
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h6
-rw-r--r--arch/avr32/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m68k/include/asm/thread_info.h1
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/kernel/vio.c1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/unicore32/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/amba/bus.c136
-rw-r--r--drivers/base/firmware_class.c4
-rw-r--r--drivers/base/platform.c115
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c539
-rw-r--r--drivers/base/power/domain_governor.c156
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/qos.c49
-rw-r--r--drivers/base/power/runtime.c157
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos4_bus.c1135
-rw-r--r--drivers/dma/dmatest.c46
-rw-r--r--drivers/input/touchscreen/st1232.c13
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/sh/intc/core.c8
-rw-r--r--drivers/sh/intc/internals.h1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c2
-rw-r--r--drivers/usb/storage/usb.c13
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/ext4/super.c3
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c4
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/nfs3proc.c3
-rw-r--r--fs/nfs/nfs4proc.c5
-rw-r--r--fs/nfs/proc.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--include/linux/freezer.h159
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/platform_device.h30
-rw-r--r--include/linux/pm.h15
-rw-r--r--include/linux/pm_domain.h103
-rw-r--r--include/linux/pm_qos.h8
-rw-r--r--include/linux/pm_runtime.h5
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sh_intc.h1
-rw-r--r--include/linux/suspend.h35
-rw-r--r--kernel/cgroup_freezer.c63
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c203
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kmod.c27
-rw-r--r--kernel/kthread.c27
-rw-r--r--kernel/power/hibernate.c92
-rw-r--r--kernel/power/main.c10
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/process.c77
-rw-r--r--kernel/power/suspend.c12
-rw-r--r--kernel/power/user.c184
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/sunrpc/sched.c3
107 files changed, 3249 insertions, 1530 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 33f7327d0451..a1e7f3eec98f 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
85 85
86--------------------------- 86---------------------------
87 87
88What: Deprecated snapshot ioctls
89When: 2.6.36
90
91Why: The ioctls in kernel/power/user.c were marked as deprecated long time
92 ago. Now they notify users about that so that they need to replace
93 their userspace. After some more time, remove them completely.
94
95Who: Jiri Slaby <jirislaby@gmail.com>
96
97---------------------------
98
99What: The ieee80211_regdom module parameter 88What: The ieee80211_regdom module parameter
100When: March 2010 / desktop catchup 89When: March 2010 / desktop catchup
101 90
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 3139fb505dce..20af7def23c8 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
126pointed to by the ops member of struct dev_pm_domain, or by the pm member of 126pointed to by the ops member of struct dev_pm_domain, or by the pm member of
127struct bus_type, struct device_type and struct class. They are mostly of 127struct bus_type, struct device_type and struct class. They are mostly of
128interest to the people writing infrastructure for platforms and buses, like PCI 128interest to the people writing infrastructure for platforms and buses, like PCI
129or USB, or device type and device class drivers. 129or USB, or device type and device class drivers. They also are relevant to the
130writers of device drivers whose subsystems (PM domains, device types, device
131classes and bus types) don't provide all power management methods.
130 132
131Bus drivers implement these methods as appropriate for the hardware and the 133Bus drivers implement these methods as appropriate for the hardware and the
132drivers using it; PCI works differently from USB, and so on. Not many people 134drivers using it; PCI works differently from USB, and so on. Not many people
@@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
268unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have 270unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
269been disabled (except for those marked with the IRQF_NO_SUSPEND flag). 271been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
270 272
271All phases use PM domain, bus, type, or class callbacks (that is, methods 273All phases use PM domain, bus, type, class or driver callbacks (that is, methods
272defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm). 274defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
273These callbacks are regarded by the PM core as mutually exclusive. Moreover, 275dev->driver->pm). These callbacks are regarded by the PM core as mutually
274PM domain callbacks always take precedence over bus, type and class callbacks, 276exclusive. Moreover, PM domain callbacks always take precedence over all of the
275while type callbacks take precedence over bus and class callbacks, and class 277other callbacks and, for example, type callbacks take precedence over bus, class
276callbacks take precedence over bus callbacks. To be precise, the following 278and driver callbacks. To be precise, the following rules are used to determine
277rules are used to determine which callback to execute in the given phase: 279which callback to execute in the given phase:
278 280
279 1. If dev->pm_domain is present, the PM core will attempt to execute the 281 1. If dev->pm_domain is present, the PM core will choose the callback
280 callback included in dev->pm_domain->ops. If that callback is not 282 included in dev->pm_domain->ops for execution
281 present, no action will be carried out for the given device.
282 283
283 2. Otherwise, if both dev->type and dev->type->pm are present, the callback 284 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
284 included in dev->type->pm will be executed. 285 included in dev->type->pm will be chosen for execution.
285 286
286 3. Otherwise, if both dev->class and dev->class->pm are present, the 287 3. Otherwise, if both dev->class and dev->class->pm are present, the
287 callback included in dev->class->pm will be executed. 288 callback included in dev->class->pm will be chosen for execution.
288 289
289 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback 290 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
290 included in dev->bus->pm will be executed. 291 included in dev->bus->pm will be chosen for execution.
291 292
292This allows PM domains and device types to override callbacks provided by bus 293This allows PM domains and device types to override callbacks provided by bus
293types or device classes if necessary. 294types or device classes if necessary.
294 295
295These callbacks may in turn invoke device- or driver-specific methods stored in 296The PM domain, type, class and bus callbacks may in turn invoke device- or
296dev->driver->pm, but they don't have to. 297driver-specific methods stored in dev->driver->pm, but they don't have to do
298that.
299
300If the subsystem callback chosen for execution is not present, the PM core will
301execute the corresponding method from dev->driver->pm instead if there is one.
297 302
298 303
299Entering System Suspend 304Entering System Suspend
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 316c2ba187f4..6ccb68f68da6 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
21try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and 21try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
22either wakes them up, if they are kernel threads, or sends fake signals to them, 22either wakes them up, if they are kernel threads, or sends fake signals to them,
23if they are user space processes. A task that has TIF_FREEZE set, should react 23if they are user space processes. A task that has TIF_FREEZE set, should react
24to it by calling the function called refrigerator() (defined in 24to it by calling the function called __refrigerator() (defined in
25kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state 25kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. 26to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
27Then, we say that the task is 'frozen' and therefore the set of functions 27Then, we say that the task is 'frozen' and therefore the set of functions
@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
29defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). 29defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
30User space processes are generally frozen before kernel threads. 30User space processes are generally frozen before kernel threads.
31 31
32It is not recommended to call refrigerator() directly. Instead, it is 32__refrigerator() must not be called directly. Instead, use the
33recommended to use the try_to_freeze() function (defined in 33try_to_freeze() function (defined in include/linux/freezer.h), that checks
34include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the 34the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
35task enter refrigerator() if the flag is set. 35flag is set.
36 36
37For user space processes try_to_freeze() is called automatically from the 37For user space processes try_to_freeze() is called automatically from the
38signal-handling code, but the freezable kernel threads need to call it 38signal-handling code, but the freezable kernel threads need to call it
@@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
61After the system memory state has been restored from a hibernation image and 61After the system memory state has been restored from a hibernation image and
62devices have been reinitialized, the function thaw_processes() is called in 62devices have been reinitialized, the function thaw_processes() is called in
63order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that 63order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
64have been frozen leave refrigerator() and continue running. 64have been frozen leave __refrigerator() and continue running.
65 65
66III. Which kernel threads are freezable? 66III. Which kernel threads are freezable?
67 67
68Kernel threads are not freezable by default. However, a kernel thread may clear 68Kernel threads are not freezable by default. However, a kernel thread may clear
69PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE 69PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
70directly is strongly discouraged). From this point it is regarded as freezable 70directly is not allowed). From this point it is regarded as freezable
71and must call try_to_freeze() in a suitable place. 71and must call try_to_freeze() in a suitable place.
72 72
73IV. Why do we do that? 73IV. Why do we do that?
@@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
176A driver must have all firmwares it may need in RAM before suspend() is called. 176A driver must have all firmwares it may need in RAM before suspend() is called.
177If keeping them is not practical, for example due to their size, they must be 177If keeping them is not practical, for example due to their size, they must be
178requested early enough using the suspend notifier API described in notifiers.txt. 178requested early enough using the suspend notifier API described in notifiers.txt.
179
180VI. Are there any precautions to be taken to prevent freezing failures?
181
182Yes, there are.
183
184First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
185from system-wide sleep such as suspend/hibernation is not encouraged.
186If possible, that piece of code must instead hook onto the suspend/hibernation
187notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
188(kernel/cpu.c) for an example.
189
190However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
191it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
192that could lead to freezing failures, because if the suspend/hibernate code
193successfully acquired the 'pm_mutex' lock, and hence that other entity failed
194to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
195state. As a consequence, the freezer would not be able to freeze that task,
196leading to freezing failure.
197
198However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
199since they ask the freezer to skip freezing this task, since it is anyway
200"frozen enough" as it is blocked on 'pm_mutex', which will be released
201only after the entire suspend/hibernation sequence is complete.
202So, to summarize, use [un]lock_system_sleep() instead of directly using
203mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index c2ae8bf77d46..4abe83e1045a 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -57,6 +57,10 @@ the following:
57 57
58 4. Bus type of the device, if both dev->bus and dev->bus->pm are present. 58 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
59 59
60If the subsystem chosen by applying the above rules doesn't provide the relevant
61callback, the PM core will invoke the corresponding driver callback stored in
62dev->driver->pm directly (if present).
63
60The PM core always checks which callback to use in the order given above, so the 64The PM core always checks which callback to use in the order given above, so the
61priority order of callbacks from high to low is: PM domain, device type, class 65priority order of callbacks from high to low is: PM domain, device type, class
62and bus type. Moreover, the high-priority one will always take precedence over 66and bus type. Moreover, the high-priority one will always take precedence over
@@ -64,86 +68,88 @@ a low-priority one. The PM domain, bus type, device type and class callbacks
64are referred to as subsystem-level callbacks in what follows. 68are referred to as subsystem-level callbacks in what follows.
65 69
66By default, the callbacks are always invoked in process context with interrupts 70By default, the callbacks are always invoked in process context with interrupts
67enabled. However, subsystems can use the pm_runtime_irq_safe() helper function 71enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
68to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and 72the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
69->runtime_idle() callbacks may be invoked in atomic context with interrupts 73and ->runtime_idle() callbacks for the given device in atomic context with
70disabled for a given device. This implies that the callback routines in 74interrupts disabled. This implies that the callback routines in question must
71question must not block or sleep, but it also means that the synchronous helper 75not block or sleep, but it also means that the synchronous helper functions
72functions listed at the end of Section 4 may be used for that device within an 76listed at the end of Section 4 may be used for that device within an interrupt
73interrupt handler or generally in an atomic context. 77handler or generally in an atomic context.
74 78
75The subsystem-level suspend callback is _entirely_ _responsible_ for handling 79The subsystem-level suspend callback, if present, is _entirely_ _responsible_
76the suspend of the device as appropriate, which may, but need not include 80for handling the suspend of the device as appropriate, which may, but need not
77executing the device driver's own ->runtime_suspend() callback (from the 81include executing the device driver's own ->runtime_suspend() callback (from the
78PM core's point of view it is not necessary to implement a ->runtime_suspend() 82PM core's point of view it is not necessary to implement a ->runtime_suspend()
79callback in a device driver as long as the subsystem-level suspend callback 83callback in a device driver as long as the subsystem-level suspend callback
80knows what to do to handle the device). 84knows what to do to handle the device).
81 85
82 * Once the subsystem-level suspend callback has completed successfully 86 * Once the subsystem-level suspend callback (or the driver suspend callback,
83 for given device, the PM core regards the device as suspended, which need 87 if invoked directly) has completed successfully for the given device, the PM
84 not mean that the device has been put into a low power state. It is 88 core regards the device as suspended, which need not mean that it has been
85 supposed to mean, however, that the device will not process data and will 89 put into a low power state. It is supposed to mean, however, that the
86 not communicate with the CPU(s) and RAM until the subsystem-level resume 90 device will not process data and will not communicate with the CPU(s) and
87 callback is executed for it. The runtime PM status of a device after 91 RAM until the appropriate resume callback is executed for it. The runtime
88 successful execution of the subsystem-level suspend callback is 'suspended'. 92 PM status of a device after successful execution of the suspend callback is
89 93 'suspended'.
90 * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN, 94
91 the device's runtime PM status is 'active', which means that the device 95 * If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
92 _must_ be fully operational afterwards. 96 status remains 'active', which means that the device _must_ be fully
93 97 operational afterwards.
94 * If the subsystem-level suspend callback returns an error code different 98
95 from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will 99 * If the suspend callback returns an error code different from -EBUSY and
96 refuse to run the helper functions described in Section 4 for the device, 100 -EAGAIN, the PM core regards this as a fatal error and will refuse to run
97 until the status of it is directly set either to 'active', or to 'suspended' 101 the helper functions described in Section 4 for the device until its status
98 (the PM core provides special helper functions for this purpose). 102 is directly set to either'active', or 'suspended' (the PM core provides
99 103 special helper functions for this purpose).
100In particular, if the driver requires remote wake-up capability (i.e. hardware 104
105In particular, if the driver requires remote wakeup capability (i.e. hardware
101mechanism allowing the device to request a change of its power state, such as 106mechanism allowing the device to request a change of its power state, such as
102PCI PME) for proper functioning and device_run_wake() returns 'false' for the 107PCI PME) for proper functioning and device_run_wake() returns 'false' for the
103device, then ->runtime_suspend() should return -EBUSY. On the other hand, if 108device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
104device_run_wake() returns 'true' for the device and the device is put into a low 109device_run_wake() returns 'true' for the device and the device is put into a
105power state during the execution of the subsystem-level suspend callback, it is 110low-power state during the execution of the suspend callback, it is expected
106expected that remote wake-up will be enabled for the device. Generally, remote 111that remote wakeup will be enabled for the device. Generally, remote wakeup
107wake-up should be enabled for all input devices put into a low power state at 112should be enabled for all input devices put into low-power states at run time.
108run time. 113
109 114The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
110The subsystem-level resume callback is _entirely_ _responsible_ for handling the 115handling the resume of the device as appropriate, which may, but need not
111resume of the device as appropriate, which may, but need not include executing 116include executing the device driver's own ->runtime_resume() callback (from the
112the device driver's own ->runtime_resume() callback (from the PM core's point of 117PM core's point of view it is not necessary to implement a ->runtime_resume()
113view it is not necessary to implement a ->runtime_resume() callback in a device 118callback in a device driver as long as the subsystem-level resume callback knows
114driver as long as the subsystem-level resume callback knows what to do to handle 119what to do to handle the device).
115the device). 120
116 121 * Once the subsystem-level resume callback (or the driver resume callback, if
117 * Once the subsystem-level resume callback has completed successfully, the PM 122 invoked directly) has completed successfully, the PM core regards the device
118 core regards the device as fully operational, which means that the device 123 as fully operational, which means that the device _must_ be able to complete
119 _must_ be able to complete I/O operations as needed. The runtime PM status 124 I/O operations as needed. The runtime PM status of the device is then
120 of the device is then 'active'. 125 'active'.
121 126
122 * If the subsystem-level resume callback returns an error code, the PM core 127 * If the resume callback returns an error code, the PM core regards this as a
123 regards this as a fatal error and will refuse to run the helper functions 128 fatal error and will refuse to run the helper functions described in Section
124 described in Section 4 for the device, until its status is directly set 129 4 for the device, until its status is directly set to either 'active', or
125 either to 'active' or to 'suspended' (the PM core provides special helper 130 'suspended' (by means of special helper functions provided by the PM core
126 functions for this purpose). 131 for this purpose).
127 132
128The subsystem-level idle callback is executed by the PM core whenever the device 133The idle callback (a subsystem-level one, if present, or the driver one) is
129appears to be idle, which is indicated to the PM core by two counters, the 134executed by the PM core whenever the device appears to be idle, which is
130device's usage counter and the counter of 'active' children of the device. 135indicated to the PM core by two counters, the device's usage counter and the
136counter of 'active' children of the device.
131 137
132 * If any of these counters is decreased using a helper function provided by 138 * If any of these counters is decreased using a helper function provided by
133 the PM core and it turns out to be equal to zero, the other counter is 139 the PM core and it turns out to be equal to zero, the other counter is
134 checked. If that counter also is equal to zero, the PM core executes the 140 checked. If that counter also is equal to zero, the PM core executes the
135 subsystem-level idle callback with the device as an argument. 141 idle callback with the device as its argument.
136 142
137The action performed by a subsystem-level idle callback is totally dependent on 143The action performed by the idle callback is totally dependent on the subsystem
138the subsystem in question, but the expected and recommended action is to check 144(or driver) in question, but the expected and recommended action is to check
139if the device can be suspended (i.e. if all of the conditions necessary for 145if the device can be suspended (i.e. if all of the conditions necessary for
140suspending the device are satisfied) and to queue up a suspend request for the 146suspending the device are satisfied) and to queue up a suspend request for the
141device in that case. The value returned by this callback is ignored by the PM 147device in that case. The value returned by this callback is ignored by the PM
142core. 148core.
143 149
144The helper functions provided by the PM core, described in Section 4, guarantee 150The helper functions provided by the PM core, described in Section 4, guarantee
145that the following constraints are met with respect to the bus type's runtime 151that the following constraints are met with respect to runtime PM callbacks for
146PM callbacks: 152one device:
147 153
148(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute 154(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
149 ->runtime_suspend() in parallel with ->runtime_resume() or with another 155 ->runtime_suspend() in parallel with ->runtime_resume() or with another
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index ff73db022342..28335bd40e40 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
79#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ 79#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ 81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
82#define TIF_FREEZE 16 /* is freezing for suspend */
83 82
84#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 83#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
85#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 84#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
87#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 86#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
88#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 87#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
89#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 88#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
90#define _TIF_FREEZE (1<<TIF_FREEZE)
91 89
92/* Work to do on interrupt/exception return. */ 90/* Work to do on interrupt/exception return. */
93#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 91#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 7b5cc8dae06e..0f30c3a78fc1 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
142#define TIF_POLLING_NRFLAG 16 142#define TIF_POLLING_NRFLAG 16
143#define TIF_USING_IWMMXT 17 143#define TIF_USING_IWMMXT 17
144#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 144#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
145#define TIF_FREEZE 19
146#define TIF_RESTORE_SIGMASK 20 145#define TIF_RESTORE_SIGMASK 20
147#define TIF_SECCOMP 21 146#define TIF_SECCOMP 21
148 147
@@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
152#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 151#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
153#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 152#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
154#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 153#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
155#define _TIF_FREEZE (1 << TIF_FREEZE)
156#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 154#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
157#define _TIF_SECCOMP (1 << TIF_SECCOMP) 155#define _TIF_SECCOMP (1 << TIF_SECCOMP)
158 156
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 5552e048c2be..381586c7b1b2 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -8,6 +8,7 @@ config PLAT_S3C64XX
8 bool 8 bool
9 depends on ARCH_S3C64XX 9 depends on ARCH_S3C64XX
10 select SAMSUNG_WAKEMASK 10 select SAMSUNG_WAKEMASK
11 select PM_GENERIC_DOMAINS
11 default y 12 default y
12 help 13 help
13 Base platform code for any Samsung S3C64XX device 14 Base platform code for any Samsung S3C64XX device
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index f1c848aa4a1e..fb786b6a2eae 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -706,7 +706,7 @@ static void __init crag6410_machine_init(void)
706 706
707 regulator_has_full_constraints(); 707 regulator_has_full_constraints();
708 708
709 s3c_pm_init(); 709 s3c64xx_pm_init();
710} 710}
711 711
712MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410") 712MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index b375cd5c47cb..7d3e81b9dd06 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -17,10 +17,12 @@
17#include <linux/serial_core.h> 17#include <linux/serial_core.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/pm_domain.h>
20 21
21#include <mach/map.h> 22#include <mach/map.h>
22#include <mach/irqs.h> 23#include <mach/irqs.h>
23 24
25#include <plat/devs.h>
24#include <plat/pm.h> 26#include <plat/pm.h>
25#include <plat/wakeup-mask.h> 27#include <plat/wakeup-mask.h>
26 28
@@ -31,6 +33,148 @@
31#include <mach/regs-gpio-memport.h> 33#include <mach/regs-gpio-memport.h>
32#include <mach/regs-modem.h> 34#include <mach/regs-modem.h>
33 35
36struct s3c64xx_pm_domain {
37 char *const name;
38 u32 ena;
39 u32 pwr_stat;
40 struct generic_pm_domain pd;
41};
42
43static int s3c64xx_pd_off(struct generic_pm_domain *domain)
44{
45 struct s3c64xx_pm_domain *pd;
46 u32 val;
47
48 pd = container_of(domain, struct s3c64xx_pm_domain, pd);
49
50 val = __raw_readl(S3C64XX_NORMAL_CFG);
51 val &= ~(pd->ena);
52 __raw_writel(val, S3C64XX_NORMAL_CFG);
53
54 return 0;
55}
56
57static int s3c64xx_pd_on(struct generic_pm_domain *domain)
58{
59 struct s3c64xx_pm_domain *pd;
60 u32 val;
61 long retry = 1000000L;
62
63 pd = container_of(domain, struct s3c64xx_pm_domain, pd);
64
65 val = __raw_readl(S3C64XX_NORMAL_CFG);
66 val |= pd->ena;
67 __raw_writel(val, S3C64XX_NORMAL_CFG);
68
69 /* Not all domains provide power status readback */
70 if (pd->pwr_stat) {
71 do {
72 cpu_relax();
73 if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat)
74 break;
75 } while (retry--);
76
77 if (!retry) {
78 pr_err("Failed to start domain %s\n", pd->name);
79 return -EBUSY;
80 }
81 }
82
83 return 0;
84}
85
86static struct s3c64xx_pm_domain s3c64xx_pm_irom = {
87 .name = "IROM",
88 .ena = S3C64XX_NORMALCFG_IROM_ON,
89 .pd = {
90 .power_off = s3c64xx_pd_off,
91 .power_on = s3c64xx_pd_on,
92 },
93};
94
95static struct s3c64xx_pm_domain s3c64xx_pm_etm = {
96 .name = "ETM",
97 .ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON,
98 .pwr_stat = S3C64XX_BLKPWRSTAT_ETM,
99 .pd = {
100 .power_off = s3c64xx_pd_off,
101 .power_on = s3c64xx_pd_on,
102 },
103};
104
105static struct s3c64xx_pm_domain s3c64xx_pm_s = {
106 .name = "S",
107 .ena = S3C64XX_NORMALCFG_DOMAIN_S_ON,
108 .pwr_stat = S3C64XX_BLKPWRSTAT_S,
109 .pd = {
110 .power_off = s3c64xx_pd_off,
111 .power_on = s3c64xx_pd_on,
112 },
113};
114
115static struct s3c64xx_pm_domain s3c64xx_pm_f = {
116 .name = "F",
117 .ena = S3C64XX_NORMALCFG_DOMAIN_F_ON,
118 .pwr_stat = S3C64XX_BLKPWRSTAT_F,
119 .pd = {
120 .power_off = s3c64xx_pd_off,
121 .power_on = s3c64xx_pd_on,
122 },
123};
124
125static struct s3c64xx_pm_domain s3c64xx_pm_p = {
126 .name = "P",
127 .ena = S3C64XX_NORMALCFG_DOMAIN_P_ON,
128 .pwr_stat = S3C64XX_BLKPWRSTAT_P,
129 .pd = {
130 .power_off = s3c64xx_pd_off,
131 .power_on = s3c64xx_pd_on,
132 },
133};
134
135static struct s3c64xx_pm_domain s3c64xx_pm_i = {
136 .name = "I",
137 .ena = S3C64XX_NORMALCFG_DOMAIN_I_ON,
138 .pwr_stat = S3C64XX_BLKPWRSTAT_I,
139 .pd = {
140 .power_off = s3c64xx_pd_off,
141 .power_on = s3c64xx_pd_on,
142 },
143};
144
145static struct s3c64xx_pm_domain s3c64xx_pm_g = {
146 .name = "G",
147 .ena = S3C64XX_NORMALCFG_DOMAIN_G_ON,
148 .pd = {
149 .power_off = s3c64xx_pd_off,
150 .power_on = s3c64xx_pd_on,
151 },
152};
153
154static struct s3c64xx_pm_domain s3c64xx_pm_v = {
155 .name = "V",
156 .ena = S3C64XX_NORMALCFG_DOMAIN_V_ON,
157 .pwr_stat = S3C64XX_BLKPWRSTAT_V,
158 .pd = {
159 .power_off = s3c64xx_pd_off,
160 .power_on = s3c64xx_pd_on,
161 },
162};
163
164static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = {
165 &s3c64xx_pm_irom,
166};
167
168static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = {
169 &s3c64xx_pm_etm,
170 &s3c64xx_pm_g,
171 &s3c64xx_pm_v,
172 &s3c64xx_pm_i,
173 &s3c64xx_pm_p,
174 &s3c64xx_pm_s,
175 &s3c64xx_pm_f,
176};
177
34#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK 178#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
35void s3c_pm_debug_smdkled(u32 set, u32 clear) 179void s3c_pm_debug_smdkled(u32 set, u32 clear)
36{ 180{
@@ -89,6 +233,8 @@ static struct sleep_save misc_save[] = {
89 233
90 SAVE_ITEM(S3C64XX_SDMA_SEL), 234 SAVE_ITEM(S3C64XX_SDMA_SEL),
91 SAVE_ITEM(S3C64XX_MODEM_MIFPCON), 235 SAVE_ITEM(S3C64XX_MODEM_MIFPCON),
236
237 SAVE_ITEM(S3C64XX_NORMAL_CFG),
92}; 238};
93 239
94void s3c_pm_configure_extint(void) 240void s3c_pm_configure_extint(void)
@@ -179,7 +325,26 @@ static void s3c64xx_pm_prepare(void)
179 __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT); 325 __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
180} 326}
181 327
182static int s3c64xx_pm_init(void) 328int __init s3c64xx_pm_init(void)
329{
330 int i;
331
332 s3c_pm_init();
333
334 for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++)
335 pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd,
336 &pm_domain_always_on_gov, false);
337
338 for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
339 pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
340
341 if (dev_get_platdata(&s3c_device_fb.dev))
342 pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
343
344 return 0;
345}
346
347static __init int s3c64xx_pm_initcall(void)
183{ 348{
184 pm_cpu_prep = s3c64xx_pm_prepare; 349 pm_cpu_prep = s3c64xx_pm_prepare;
185 pm_cpu_sleep = s3c64xx_cpu_suspend; 350 pm_cpu_sleep = s3c64xx_cpu_suspend;
@@ -198,5 +363,12 @@ static int s3c64xx_pm_init(void)
198 363
199 return 0; 364 return 0;
200} 365}
366arch_initcall(s3c64xx_pm_initcall);
367
368static __init int s3c64xx_pm_late_initcall(void)
369{
370 pm_genpd_poweroff_unused();
201 371
202arch_initcall(s3c64xx_pm_init); 372 return 0;
373}
374late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 4bf82c156771..be78a2c73db4 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -34,8 +34,8 @@ extern void sh7372_add_standard_devices(void);
34extern void sh7372_clock_init(void); 34extern void sh7372_clock_init(void);
35extern void sh7372_pinmux_init(void); 35extern void sh7372_pinmux_init(void);
36extern void sh7372_pm_init(void); 36extern void sh7372_pm_init(void);
37extern void sh7372_resume_core_standby_a3sm(void); 37extern void sh7372_resume_core_standby_sysc(void);
38extern int sh7372_do_idle_a3sm(unsigned long unused); 38extern int sh7372_do_idle_sysc(unsigned long sleep_mode);
39extern struct clk sh7372_extal1_clk; 39extern struct clk sh7372_extal1_clk;
40extern struct clk sh7372_extal2_clk; 40extern struct clk sh7372_extal2_clk;
41 41
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 84532f9629b2..8254ab86f6cd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -480,11 +480,10 @@ struct platform_device;
480struct sh7372_pm_domain { 480struct sh7372_pm_domain {
481 struct generic_pm_domain genpd; 481 struct generic_pm_domain genpd;
482 struct dev_power_governor *gov; 482 struct dev_power_governor *gov;
483 void (*suspend)(void); 483 int (*suspend)(void);
484 void (*resume)(void); 484 void (*resume)(void);
485 unsigned int bit_shift; 485 unsigned int bit_shift;
486 bool no_debug; 486 bool no_debug;
487 bool stay_on;
488}; 487};
489 488
490static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) 489static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
@@ -499,6 +498,7 @@ extern struct sh7372_pm_domain sh7372_d4;
499extern struct sh7372_pm_domain sh7372_a4r; 498extern struct sh7372_pm_domain sh7372_a4r;
500extern struct sh7372_pm_domain sh7372_a3rv; 499extern struct sh7372_pm_domain sh7372_a3rv;
501extern struct sh7372_pm_domain sh7372_a3ri; 500extern struct sh7372_pm_domain sh7372_a3ri;
501extern struct sh7372_pm_domain sh7372_a4s;
502extern struct sh7372_pm_domain sh7372_a3sp; 502extern struct sh7372_pm_domain sh7372_a3sp;
503extern struct sh7372_pm_domain sh7372_a3sg; 503extern struct sh7372_pm_domain sh7372_a3sg;
504 504
@@ -515,5 +515,7 @@ extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
515 515
516extern void sh7372_intcs_suspend(void); 516extern void sh7372_intcs_suspend(void);
517extern void sh7372_intcs_resume(void); 517extern void sh7372_intcs_resume(void);
518extern void sh7372_intca_suspend(void);
519extern void sh7372_intca_resume(void);
518 520
519#endif /* __ASM_SH7372_H__ */ 521#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 2d8856df80e2..89afcaba99a1 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -535,6 +535,7 @@ static struct resource intcs_resources[] __initdata = {
535static struct intc_desc intcs_desc __initdata = { 535static struct intc_desc intcs_desc __initdata = {
536 .name = "sh7372-intcs", 536 .name = "sh7372-intcs",
537 .force_enable = ENABLED_INTCS, 537 .force_enable = ENABLED_INTCS,
538 .skip_syscore_suspend = true,
538 .resource = intcs_resources, 539 .resource = intcs_resources,
539 .num_resources = ARRAY_SIZE(intcs_resources), 540 .num_resources = ARRAY_SIZE(intcs_resources),
540 .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers, 541 .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
@@ -611,3 +612,52 @@ void sh7372_intcs_resume(void)
611 for (k = 0x80; k <= 0x9c; k += 4) 612 for (k = 0x80; k <= 0x9c; k += 4)
612 __raw_writeb(ffd5[k], intcs_ffd5 + k); 613 __raw_writeb(ffd5[k], intcs_ffd5 + k);
613} 614}
615
616static unsigned short e694[0x200];
617static unsigned short e695[0x200];
618
619void sh7372_intca_suspend(void)
620{
621 int k;
622
623 for (k = 0x00; k <= 0x38; k += 4)
624 e694[k] = __raw_readw(0xe6940000 + k);
625
626 for (k = 0x80; k <= 0xb4; k += 4)
627 e694[k] = __raw_readb(0xe6940000 + k);
628
629 for (k = 0x180; k <= 0x1b4; k += 4)
630 e694[k] = __raw_readb(0xe6940000 + k);
631
632 for (k = 0x00; k <= 0x50; k += 4)
633 e695[k] = __raw_readw(0xe6950000 + k);
634
635 for (k = 0x80; k <= 0xa8; k += 4)
636 e695[k] = __raw_readb(0xe6950000 + k);
637
638 for (k = 0x180; k <= 0x1a8; k += 4)
639 e695[k] = __raw_readb(0xe6950000 + k);
640}
641
642void sh7372_intca_resume(void)
643{
644 int k;
645
646 for (k = 0x00; k <= 0x38; k += 4)
647 __raw_writew(e694[k], 0xe6940000 + k);
648
649 for (k = 0x80; k <= 0xb4; k += 4)
650 __raw_writeb(e694[k], 0xe6940000 + k);
651
652 for (k = 0x180; k <= 0x1b4; k += 4)
653 __raw_writeb(e694[k], 0xe6940000 + k);
654
655 for (k = 0x00; k <= 0x50; k += 4)
656 __raw_writew(e695[k], 0xe6950000 + k);
657
658 for (k = 0x80; k <= 0xa8; k += 4)
659 __raw_writeb(e695[k], 0xe6950000 + k);
660
661 for (k = 0x180; k <= 0x1a8; k += 4)
662 __raw_writeb(e695[k], 0xe6950000 + k);
663}
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 34bbcbfb1706..77b8fc12fc2f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -82,11 +82,12 @@ static int pd_power_down(struct generic_pm_domain *genpd)
82 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); 82 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
83 unsigned int mask = 1 << sh7372_pd->bit_shift; 83 unsigned int mask = 1 << sh7372_pd->bit_shift;
84 84
85 if (sh7372_pd->suspend) 85 if (sh7372_pd->suspend) {
86 sh7372_pd->suspend(); 86 int ret = sh7372_pd->suspend();
87 87
88 if (sh7372_pd->stay_on) 88 if (ret)
89 return 0; 89 return ret;
90 }
90 91
91 if (__raw_readl(PSTR) & mask) { 92 if (__raw_readl(PSTR) & mask) {
92 unsigned int retry_count; 93 unsigned int retry_count;
@@ -101,8 +102,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
101 } 102 }
102 103
103 if (!sh7372_pd->no_debug) 104 if (!sh7372_pd->no_debug)
104 pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", 105 pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
105 mask, __raw_readl(PSTR)); 106 genpd->name, mask, __raw_readl(PSTR));
106 107
107 return 0; 108 return 0;
108} 109}
@@ -113,9 +114,6 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
113 unsigned int retry_count; 114 unsigned int retry_count;
114 int ret = 0; 115 int ret = 0;
115 116
116 if (sh7372_pd->stay_on)
117 goto out;
118
119 if (__raw_readl(PSTR) & mask) 117 if (__raw_readl(PSTR) & mask)
120 goto out; 118 goto out;
121 119
@@ -133,8 +131,8 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
133 ret = -EIO; 131 ret = -EIO;
134 132
135 if (!sh7372_pd->no_debug) 133 if (!sh7372_pd->no_debug)
136 pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", 134 pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
137 mask, __raw_readl(PSTR)); 135 sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
138 136
139 out: 137 out:
140 if (ret == 0 && sh7372_pd->resume && do_resume) 138 if (ret == 0 && sh7372_pd->resume && do_resume)
@@ -148,35 +146,60 @@ static int pd_power_up(struct generic_pm_domain *genpd)
148 return __pd_power_up(to_sh7372_pd(genpd), true); 146 return __pd_power_up(to_sh7372_pd(genpd), true);
149} 147}
150 148
151static void sh7372_a4r_suspend(void) 149static int sh7372_a4r_suspend(void)
152{ 150{
153 sh7372_intcs_suspend(); 151 sh7372_intcs_suspend();
154 __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ 152 __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
153 return 0;
155} 154}
156 155
157static bool pd_active_wakeup(struct device *dev) 156static bool pd_active_wakeup(struct device *dev)
158{ 157{
159 return true; 158 bool (*active_wakeup)(struct device *dev);
159
160 active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
161 return active_wakeup ? active_wakeup(dev) : true;
160} 162}
161 163
162static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain) 164static int sh7372_stop_dev(struct device *dev)
163{ 165{
164 return false; 166 int (*stop)(struct device *dev);
167
168 stop = dev_gpd_data(dev)->ops.stop;
169 if (stop) {
170 int ret = stop(dev);
171 if (ret)
172 return ret;
173 }
174 return pm_clk_suspend(dev);
165} 175}
166 176
167struct dev_power_governor sh7372_always_on_gov = { 177static int sh7372_start_dev(struct device *dev)
168 .power_down_ok = sh7372_power_down_forbidden, 178{
169}; 179 int (*start)(struct device *dev);
180 int ret;
181
182 ret = pm_clk_resume(dev);
183 if (ret)
184 return ret;
185
186 start = dev_gpd_data(dev)->ops.start;
187 if (start)
188 ret = start(dev);
189
190 return ret;
191}
170 192
171void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) 193void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
172{ 194{
173 struct generic_pm_domain *genpd = &sh7372_pd->genpd; 195 struct generic_pm_domain *genpd = &sh7372_pd->genpd;
196 struct dev_power_governor *gov = sh7372_pd->gov;
174 197
175 pm_genpd_init(genpd, sh7372_pd->gov, false); 198 pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
176 genpd->stop_device = pm_clk_suspend; 199 genpd->dev_ops.stop = sh7372_stop_dev;
177 genpd->start_device = pm_clk_resume; 200 genpd->dev_ops.start = sh7372_start_dev;
201 genpd->dev_ops.active_wakeup = pd_active_wakeup;
178 genpd->dev_irq_safe = true; 202 genpd->dev_irq_safe = true;
179 genpd->active_wakeup = pd_active_wakeup;
180 genpd->power_off = pd_power_down; 203 genpd->power_off = pd_power_down;
181 genpd->power_on = pd_power_up; 204 genpd->power_on = pd_power_up;
182 __pd_power_up(sh7372_pd, false); 205 __pd_power_up(sh7372_pd, false);
@@ -199,48 +222,73 @@ void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
199} 222}
200 223
201struct sh7372_pm_domain sh7372_a4lc = { 224struct sh7372_pm_domain sh7372_a4lc = {
225 .genpd.name = "A4LC",
202 .bit_shift = 1, 226 .bit_shift = 1,
203}; 227};
204 228
205struct sh7372_pm_domain sh7372_a4mp = { 229struct sh7372_pm_domain sh7372_a4mp = {
230 .genpd.name = "A4MP",
206 .bit_shift = 2, 231 .bit_shift = 2,
207}; 232};
208 233
209struct sh7372_pm_domain sh7372_d4 = { 234struct sh7372_pm_domain sh7372_d4 = {
235 .genpd.name = "D4",
210 .bit_shift = 3, 236 .bit_shift = 3,
211}; 237};
212 238
213struct sh7372_pm_domain sh7372_a4r = { 239struct sh7372_pm_domain sh7372_a4r = {
240 .genpd.name = "A4R",
214 .bit_shift = 5, 241 .bit_shift = 5,
215 .gov = &sh7372_always_on_gov,
216 .suspend = sh7372_a4r_suspend, 242 .suspend = sh7372_a4r_suspend,
217 .resume = sh7372_intcs_resume, 243 .resume = sh7372_intcs_resume,
218 .stay_on = true,
219}; 244};
220 245
221struct sh7372_pm_domain sh7372_a3rv = { 246struct sh7372_pm_domain sh7372_a3rv = {
247 .genpd.name = "A3RV",
222 .bit_shift = 6, 248 .bit_shift = 6,
223}; 249};
224 250
225struct sh7372_pm_domain sh7372_a3ri = { 251struct sh7372_pm_domain sh7372_a3ri = {
252 .genpd.name = "A3RI",
226 .bit_shift = 8, 253 .bit_shift = 8,
227}; 254};
228 255
229struct sh7372_pm_domain sh7372_a3sp = { 256static int sh7372_a4s_suspend(void)
230 .bit_shift = 11, 257{
231 .gov = &sh7372_always_on_gov, 258 /*
259 * The A4S domain contains the CPU core and therefore it should
260 * only be turned off if the CPU is in use.
261 */
262 return -EBUSY;
263}
264
265struct sh7372_pm_domain sh7372_a4s = {
266 .genpd.name = "A4S",
267 .bit_shift = 10,
268 .gov = &pm_domain_always_on_gov,
232 .no_debug = true, 269 .no_debug = true,
270 .suspend = sh7372_a4s_suspend,
233}; 271};
234 272
235static void sh7372_a3sp_init(void) 273static int sh7372_a3sp_suspend(void)
236{ 274{
237 /* serial consoles make use of SCIF hardware located in A3SP, 275 /*
276 * Serial consoles make use of SCIF hardware located in A3SP,
238 * keep such power domain on if "no_console_suspend" is set. 277 * keep such power domain on if "no_console_suspend" is set.
239 */ 278 */
240 sh7372_a3sp.stay_on = !console_suspend_enabled; 279 return console_suspend_enabled ? -EBUSY : 0;
241} 280}
242 281
282struct sh7372_pm_domain sh7372_a3sp = {
283 .genpd.name = "A3SP",
284 .bit_shift = 11,
285 .gov = &pm_domain_always_on_gov,
286 .no_debug = true,
287 .suspend = sh7372_a3sp_suspend,
288};
289
243struct sh7372_pm_domain sh7372_a3sg = { 290struct sh7372_pm_domain sh7372_a3sg = {
291 .genpd.name = "A3SG",
244 .bit_shift = 13, 292 .bit_shift = 13,
245}; 293};
246 294
@@ -257,11 +305,16 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
257 return 0; 305 return 0;
258} 306}
259 307
260static void sh7372_enter_core_standby(void) 308static void sh7372_set_reset_vector(unsigned long address)
261{ 309{
262 /* set reset vector, translate 4k */ 310 /* set reset vector, translate 4k */
263 __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); 311 __raw_writel(address, SBAR);
264 __raw_writel(0, APARMBAREA); 312 __raw_writel(0, APARMBAREA);
313}
314
315static void sh7372_enter_core_standby(void)
316{
317 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
265 318
266 /* enter sleep mode with SYSTBCR to 0x10 */ 319 /* enter sleep mode with SYSTBCR to 0x10 */
267 __raw_writel(0x10, SYSTBCR); 320 __raw_writel(0x10, SYSTBCR);
@@ -274,27 +327,22 @@ static void sh7372_enter_core_standby(void)
274#endif 327#endif
275 328
276#ifdef CONFIG_SUSPEND 329#ifdef CONFIG_SUSPEND
277static void sh7372_enter_a3sm_common(int pllc0_on) 330static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
278{ 331{
279 /* set reset vector, translate 4k */
280 __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
281 __raw_writel(0, APARMBAREA);
282
283 if (pllc0_on) 332 if (pllc0_on)
284 __raw_writel(0, PLLC01STPCR); 333 __raw_writel(0, PLLC01STPCR);
285 else 334 else
286 __raw_writel(1 << 28, PLLC01STPCR); 335 __raw_writel(1 << 28, PLLC01STPCR);
287 336
288 __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
289 __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ 337 __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
290 cpu_suspend(0, sh7372_do_idle_a3sm); 338 cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
291 __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ 339 __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
292 340
293 /* disable reset vector translation */ 341 /* disable reset vector translation */
294 __raw_writel(0, SBAR); 342 __raw_writel(0, SBAR);
295} 343}
296 344
297static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p) 345static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
298{ 346{
299 unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; 347 unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
300 unsigned long msk, msk2; 348 unsigned long msk, msk2;
@@ -382,7 +430,7 @@ static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
382 *irqcr2p = irqcr2; 430 *irqcr2p = irqcr2;
383} 431}
384 432
385static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2) 433static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
386{ 434{
387 u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; 435 u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
388 unsigned long tmp; 436 unsigned long tmp;
@@ -415,6 +463,22 @@ static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
415 __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); 463 __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
416 __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); 464 __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
417} 465}
466
467static void sh7372_enter_a3sm_common(int pllc0_on)
468{
469 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
470 sh7372_enter_sysc(pllc0_on, 1 << 12);
471}
472
473static void sh7372_enter_a4s_common(int pllc0_on)
474{
475 sh7372_intca_suspend();
476 memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
477 sh7372_set_reset_vector(SMFRAM);
478 sh7372_enter_sysc(pllc0_on, 1 << 10);
479 sh7372_intca_resume();
480}
481
418#endif 482#endif
419 483
420#ifdef CONFIG_CPU_IDLE 484#ifdef CONFIG_CPU_IDLE
@@ -448,14 +512,20 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
448 unsigned long msk, msk2; 512 unsigned long msk, msk2;
449 513
450 /* check active clocks to determine potential wakeup sources */ 514 /* check active clocks to determine potential wakeup sources */
451 if (sh7372_a3sm_valid(&msk, &msk2)) { 515 if (sh7372_sysc_valid(&msk, &msk2)) {
452
453 /* convert INTC mask and sense to SYSC mask and sense */ 516 /* convert INTC mask and sense to SYSC mask and sense */
454 sh7372_setup_a3sm(msk, msk2); 517 sh7372_setup_sysc(msk, msk2);
455 518
456 /* enter A3SM sleep with PLLC0 off */ 519 if (!console_suspend_enabled &&
457 pr_debug("entering A3SM\n"); 520 sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
458 sh7372_enter_a3sm_common(0); 521 /* enter A4S sleep with PLLC0 off */
522 pr_debug("entering A4S\n");
523 sh7372_enter_a4s_common(0);
524 } else {
525 /* enter A3SM sleep with PLLC0 off */
526 pr_debug("entering A3SM\n");
527 sh7372_enter_a3sm_common(0);
528 }
459 } else { 529 } else {
460 /* default to Core Standby that supports all wakeup sources */ 530 /* default to Core Standby that supports all wakeup sources */
461 pr_debug("entering Core Standby\n"); 531 pr_debug("entering Core Standby\n");
@@ -464,9 +534,37 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
464 return 0; 534 return 0;
465} 535}
466 536
537/**
538 * sh7372_pm_notifier_fn - SH7372 PM notifier routine.
539 * @notifier: Unused.
540 * @pm_event: Event being handled.
541 * @unused: Unused.
542 */
543static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
544 unsigned long pm_event, void *unused)
545{
546 switch (pm_event) {
547 case PM_SUSPEND_PREPARE:
548 /*
549 * This is necessary, because the A4R domain has to be "on"
550 * when suspend_device_irqs() and resume_device_irqs() are
551 * executed during system suspend and resume, respectively, so
552 * that those functions don't crash while accessing the INTCS.
553 */
554 pm_genpd_poweron(&sh7372_a4r.genpd);
555 break;
556 case PM_POST_SUSPEND:
557 pm_genpd_poweroff_unused();
558 break;
559 }
560
561 return NOTIFY_DONE;
562}
563
467static void sh7372_suspend_init(void) 564static void sh7372_suspend_init(void)
468{ 565{
469 shmobile_suspend_ops.enter = sh7372_enter_suspend; 566 shmobile_suspend_ops.enter = sh7372_enter_suspend;
567 pm_notifier(sh7372_pm_notifier_fn, 0);
470} 568}
471#else 569#else
472static void sh7372_suspend_init(void) {} 570static void sh7372_suspend_init(void) {}
@@ -482,8 +580,6 @@ void __init sh7372_pm_init(void)
482 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ 580 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
483 __raw_writel(0, PDNSEL); 581 __raw_writel(0, PDNSEL);
484 582
485 sh7372_a3sp_init();
486
487 sh7372_suspend_init(); 583 sh7372_suspend_init();
488 sh7372_cpuidle_init(); 584 sh7372_cpuidle_init();
489} 585}
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2380389e6ac5..c197f9d29d04 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -994,12 +994,16 @@ void __init sh7372_add_standard_devices(void)
994 sh7372_init_pm_domain(&sh7372_a4r); 994 sh7372_init_pm_domain(&sh7372_a4r);
995 sh7372_init_pm_domain(&sh7372_a3rv); 995 sh7372_init_pm_domain(&sh7372_a3rv);
996 sh7372_init_pm_domain(&sh7372_a3ri); 996 sh7372_init_pm_domain(&sh7372_a3ri);
997 sh7372_init_pm_domain(&sh7372_a3sg); 997 sh7372_init_pm_domain(&sh7372_a4s);
998 sh7372_init_pm_domain(&sh7372_a3sp); 998 sh7372_init_pm_domain(&sh7372_a3sp);
999 sh7372_init_pm_domain(&sh7372_a3sg);
999 1000
1000 sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv); 1001 sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
1001 sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc); 1002 sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
1002 1003
1004 sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg);
1005 sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp);
1006
1003 platform_add_devices(sh7372_early_devices, 1007 platform_add_devices(sh7372_early_devices,
1004 ARRAY_SIZE(sh7372_early_devices)); 1008 ARRAY_SIZE(sh7372_early_devices));
1005 1009
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index f3ab3c5810ea..1d564674451d 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -37,13 +37,18 @@
37#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) 37#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
38 .align 12 38 .align 12
39 .text 39 .text
40 .global sh7372_resume_core_standby_a3sm 40 .global sh7372_resume_core_standby_sysc
41sh7372_resume_core_standby_a3sm: 41sh7372_resume_core_standby_sysc:
42 ldr pc, 1f 42 ldr pc, 1f
431: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET 431: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
44 44
45 .global sh7372_do_idle_a3sm 45#define SPDCR 0xe6180008
46sh7372_do_idle_a3sm: 46
47 /* A3SM & A4S power down */
48 .global sh7372_do_idle_sysc
49sh7372_do_idle_sysc:
50 mov r8, r0 /* sleep mode passed in r0 */
51
47 /* 52 /*
48 * Clear the SCTLR.C bit to prevent further data cache 53 * Clear the SCTLR.C bit to prevent further data cache
49 * allocation. Clearing SCTLR.C would make all the data accesses 54 * allocation. Clearing SCTLR.C would make all the data accesses
@@ -80,13 +85,9 @@ sh7372_do_idle_a3sm:
80 dsb 85 dsb
81 dmb 86 dmb
82 87
83#define SPDCR 0xe6180008 88 /* SYSC power down */
84#define A3SM (1 << 12)
85
86 /* A3SM power down */
87 ldr r0, =SPDCR 89 ldr r0, =SPDCR
88 ldr r1, =A3SM 90 str r8, [r0]
89 str r1, [r0]
901: 911:
91 b 1b 92 b 1b
92 93
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 78014e53eb3c..61fc53740fbd 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -22,6 +22,7 @@ struct device;
22#ifdef CONFIG_PM 22#ifdef CONFIG_PM
23 23
24extern __init int s3c_pm_init(void); 24extern __init int s3c_pm_init(void);
25extern __init int s3c64xx_pm_init(void);
25 26
26#else 27#else
27 28
@@ -29,6 +30,11 @@ static inline int s3c_pm_init(void)
29{ 30{
30 return 0; 31 return 0;
31} 32}
33
34static inline int s3c64xx_pm_init(void)
35{
36 return 0;
37}
32#endif 38#endif
33 39
34/* configuration for the IRQ mask over sleep */ 40/* configuration for the IRQ mask over sleep */
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 7a9c03dcb0b6..e5deda4691db 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
85#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ 85#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
86#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ 86#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
87#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ 87#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
88#define TIF_FREEZE 29
89#define TIF_DEBUG 30 /* debugging enabled */ 88#define TIF_DEBUG 30 /* debugging enabled */
90#define TIF_USERSPACE 31 /* true if FS sets userspace */ 89#define TIF_USERSPACE 31 /* true if FS sets userspace */
91 90
@@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
98#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 97#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
99#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) 98#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 99#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
101#define _TIF_FREEZE (1 << TIF_FREEZE)
102 100
103/* Note: The masks below must never span more than 16 bits! */ 101/* Note: The masks below must never span more than 16 bits! */
104 102
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8a121..53ad10005ae3 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
100 TIF_NEED_RESCHED */ 100 TIF_NEED_RESCHED */
101#define TIF_MEMDIE 4 /* is terminating due to OOM killer */ 101#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
103#define TIF_FREEZE 6 /* is freezing for suspend */
104#define TIF_IRQ_SYNC 7 /* sync pipeline stage */ 103#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
105#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 104#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
106#define TIF_SINGLESTEP 9 105#define TIF_SINGLESTEP 9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
111#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 110#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
112#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 111#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
113#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 112#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
114#define _TIF_FREEZE (1<<TIF_FREEZE)
115#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) 113#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
116#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 114#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
117#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 115#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 332f19c54557..29b92884d793 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -86,7 +86,6 @@ struct thread_info {
86#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 86#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
89#define TIF_FREEZE 18 /* is freezing for suspend */
90 89
91#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 90#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
92#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 91#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -94,7 +93,6 @@ struct thread_info {
94#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 93#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
95#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 94#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
96#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 95#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
97#define _TIF_FREEZE (1<<TIF_FREEZE)
98 96
99#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 97#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
100#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 98#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index cefbe73dc119..92d83ea99ae5 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
111#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 111#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
112#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 112#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
114#define TIF_FREEZE 18 /* freezing for suspend */
115 114
116#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 115#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
117#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 116#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
120#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 119#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
121#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 120#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
122#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 121#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
123#define _TIF_FREEZE (1 << TIF_FREEZE)
124 122
125#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 123#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
126#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 124#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index d6f1784bfdee..9c126e0c09aa 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
90#define TIF_MEMDIE 4 /* is terminating due to OOM killer */ 90#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
91#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 91#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
92#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ 92#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
93#define TIF_FREEZE 16 /* is freezing for suspend */
94 93
95/* as above, but as bit values */ 94/* as above, but as bit values */
96#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 95#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
99#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 98#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
100#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 99#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
102#define _TIF_FREEZE (1<<TIF_FREEZE)
103 101
104#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 102#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
105 103
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff0cc84e7bcc..e054bcc4273c 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -113,7 +113,6 @@ struct thread_info {
113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 113#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
114#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 114#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
115#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 115#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
116#define TIF_FREEZE 20 /* is freezing for suspend */
117#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */ 116#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
118 117
119#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 118#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -126,7 +125,6 @@ struct thread_info {
126#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 125#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
127#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 126#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
128#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 127#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
129#define _TIF_FREEZE (1 << TIF_FREEZE)
130#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE) 128#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
131 129
132/* "work to do on user-return" bits */ 130/* "work to do on user-return" bits */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 0227dba44068..bf8fa3c06f4e 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
138#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 138#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
139#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 139#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
140#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 140#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
141#define TIF_FREEZE 19 /* is freezing for suspend */
142 141
143#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
144#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
149#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 148#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
150#define _TIF_USEDFPU (1<<TIF_USEDFPU) 149#define _TIF_USEDFPU (1<<TIF_USEDFPU)
151#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 150#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
152#define _TIF_FREEZE (1<<TIF_FREEZE)
153 151
154#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 152#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
155#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 153#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 29fa6da4f17c..e8665e6f9464 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -76,7 +76,6 @@ static inline struct thread_info *current_thread_info(void)
76#define TIF_DELAYED_TRACE 14 /* single step a syscall */ 76#define TIF_DELAYED_TRACE 14 /* single step a syscall */
77#define TIF_SYSCALL_TRACE 15 /* syscall trace active */ 77#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
78#define TIF_MEMDIE 16 /* is terminating due to OOM killer */ 78#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
79#define TIF_FREEZE 17 /* thread is freezing for suspend */
80#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */ 79#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
81 80
82#endif /* _ASM_M68K_THREAD_INFO_H */ 81#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b73da2ac21b3..1a8ab6a5c03f 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
125#define TIF_MEMDIE 6 /* is terminating due to OOM killer */ 125#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
126#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 126#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
127#define TIF_SECCOMP 10 /* secure computing */ 127#define TIF_SECCOMP 10 /* secure computing */
128#define TIF_FREEZE 14 /* Freezing for suspend */
129 128
130/* true if poll_idle() is polling TIF_NEED_RESCHED */ 129/* true if poll_idle() is polling TIF_NEED_RESCHED */
131#define TIF_POLLING_NRFLAG 16 130#define TIF_POLLING_NRFLAG 16
@@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
137#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 136#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
138#define _TIF_IRET (1 << TIF_IRET) 137#define _TIF_IRET (1 << TIF_IRET)
139#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 138#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
140#define _TIF_FREEZE (1 << TIF_FREEZE)
141#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 139#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
142#define _TIF_SECCOMP (1 << TIF_SECCOMP) 140#define _TIF_SECCOMP (1 << TIF_SECCOMP)
143 141
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 97f8bf6639e7..0d85d8e440c5 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
117#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 117#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
118#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 118#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
119#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 119#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
120#define TIF_FREEZE 19
121#define TIF_FIXADE 20 /* Fix address errors in software */ 120#define TIF_FIXADE 20 /* Fix address errors in software */
122#define TIF_LOGADE 21 /* Log address errors to syslog */ 121#define TIF_LOGADE 21 /* Log address errors to syslog */
123#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ 122#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
@@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
141#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 140#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
142#define _TIF_USEDFPU (1<<TIF_USEDFPU) 141#define _TIF_USEDFPU (1<<TIF_USEDFPU)
143#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 142#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
144#define _TIF_FREEZE (1<<TIF_FREEZE)
145#define _TIF_FIXADE (1<<TIF_FIXADE) 143#define _TIF_FIXADE (1<<TIF_FIXADE)
146#define _TIF_LOGADE (1<<TIF_LOGADE) 144#define _TIF_LOGADE (1<<TIF_LOGADE)
147#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) 145#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 87c213002d4c..28cf52100baa 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
165#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 165#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
166#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 166#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
167#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 167#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
168#define TIF_FREEZE 18 /* freezing for suspend */
169 168
170#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE) 169#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
171#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME) 170#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
@@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
174#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP) 173#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
175#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK) 174#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
176#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG) 175#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
177#define _TIF_FREEZE +(1 << TIF_FREEZE)
178 176
179#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 177#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
180#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 178#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index aa8de727e90b..6d9c7c7973d0 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -58,7 +58,6 @@ struct thread_info {
58#define TIF_32BIT 4 /* 32 bit binary */ 58#define TIF_32BIT 4 /* 32 bit binary */
59#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 59#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ 60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
61#define TIF_FREEZE 7 /* is freezing for suspend */
62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 61#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
63#define TIF_SINGLESTEP 9 /* single stepping? */ 62#define TIF_SINGLESTEP 9 /* single stepping? */
64#define TIF_BLOCKSTEP 10 /* branch stepping? */ 63#define TIF_BLOCKSTEP 10 /* branch stepping? */
@@ -69,7 +68,6 @@ struct thread_info {
69#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 68#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
70#define _TIF_32BIT (1 << TIF_32BIT) 69#define _TIF_32BIT (1 << TIF_32BIT)
71#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 70#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
72#define _TIF_FREEZE (1 << TIF_FREEZE)
73#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 71#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
74#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 72#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
75#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 73#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 836f231ec1f0..964714940961 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
109#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 109#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
110#define TIF_NOERROR 12 /* Force successful syscall return */ 110#define TIF_NOERROR 12 /* Force successful syscall return */
111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
112#define TIF_FREEZE 14 /* Freezing for suspend */
113#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 112#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
114#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ 113#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
115 114
@@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
127#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 126#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
128#define _TIF_NOERROR (1<<TIF_NOERROR) 127#define _TIF_NOERROR (1<<TIF_NOERROR)
129#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 128#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
130#define _TIF_FREEZE (1<<TIF_FREEZE)
131#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 129#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
132#define _TIF_RUNLATCH (1<<TIF_RUNLATCH) 130#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
133#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 131#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f65af61996bd..8b086299ba25 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
1406 .match = vio_bus_match, 1406 .match = vio_bus_match,
1407 .probe = vio_bus_probe, 1407 .probe = vio_bus_probe,
1408 .remove = vio_bus_remove, 1408 .remove = vio_bus_remove,
1409 .pm = GENERIC_SUBSYS_PM_OPS,
1410}; 1409};
1411 1410
1412/** 1411/**
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a23183423b14..a73038155e0d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
102#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 102#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
103#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 103#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
104#define TIF_SINGLE_STEP 20 /* This task is single stepped */ 104#define TIF_SINGLE_STEP 20 /* This task is single stepped */
105#define TIF_FREEZE 21 /* thread is freezing for suspend */
106 105
107#define _TIF_SYSCALL (1<<TIF_SYSCALL) 106#define _TIF_SYSCALL (1<<TIF_SYSCALL)
108#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 107#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
119#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
120#define _TIF_31BIT (1<<TIF_31BIT) 119#define _TIF_31BIT (1<<TIF_31BIT)
121#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 120#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
122#define _TIF_FREEZE (1<<TIF_FREEZE)
123 121
124#ifdef CONFIG_64BIT 122#ifdef CONFIG_64BIT
125#define is_32bit_task() (test_thread_flag(TIF_31BIT)) 123#define is_32bit_task() (test_thread_flag(TIF_31BIT))
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index ea2d5089de1e..20ee40af16e9 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
122#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ 122#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
123#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 123#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
124#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 124#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
125#define TIF_FREEZE 19 /* Freezing for suspend */
126 125
127#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 126#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
128#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 127#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
133#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 132#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
134#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 133#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
135#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 134#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
136#define _TIF_FREEZE (1 << TIF_FREEZE)
137 135
138/* 136/*
139 * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we 137 * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fa5753233410..5cc5888ad5a3 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
133#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling 133#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
134 * TIF_NEED_RESCHED */ 134 * TIF_NEED_RESCHED */
135#define TIF_MEMDIE 10 /* is terminating due to OOM killer */ 135#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
136#define TIF_FREEZE 11 /* is freezing for suspend */
137 136
138/* as above, but as bit values */ 137/* as above, but as bit values */
139#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 138#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
147#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ 146#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
148 _TIF_SIGPENDING | \ 147 _TIF_SIGPENDING | \
149 _TIF_RESTORE_SIGMASK) 148 _TIF_RESTORE_SIGMASK)
150#define _TIF_FREEZE (1<<TIF_FREEZE)
151 149
152#endif /* __KERNEL__ */ 150#endif /* __KERNEL__ */
153 151
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 60d86be1a533..01d057fe6a3f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
225/* flag bit 12 is available */ 225/* flag bit 12 is available */
226#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 226#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
227#define TIF_POLLING_NRFLAG 14 227#define TIF_POLLING_NRFLAG 14
228#define TIF_FREEZE 15 /* is freezing for suspend */
229 228
230#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 229#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
231#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 230#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
237#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 236#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
238#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 237#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
239#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 238#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
240#define _TIF_FREEZE (1<<TIF_FREEZE)
241 239
242#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ 240#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
243 _TIF_DO_NOTIFY_RESUME_MASK | \ 241 _TIF_DO_NOTIFY_RESUME_MASK | \
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 5bd1bad33fab..200c4ab1240c 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
71#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 71#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
72#define TIF_SYSCALL_AUDIT 6 72#define TIF_SYSCALL_AUDIT 6
73#define TIF_RESTORE_SIGMASK 7 73#define TIF_RESTORE_SIGMASK 7
74#define TIF_FREEZE 16 /* is freezing for suspend */
75 74
76#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 75#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
77#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 76#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
80#define _TIF_MEMDIE (1 << TIF_MEMDIE) 79#define _TIF_MEMDIE (1 << TIF_MEMDIE)
81#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 80#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
82#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 81#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
83#define _TIF_FREEZE (1 << TIF_FREEZE)
84 82
85#endif 83#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index c270e9e04861..89f7557583b8 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
135#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 135#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
136#define TIF_SYSCALL_TRACE 8 136#define TIF_SYSCALL_TRACE 8
137#define TIF_MEMDIE 18 137#define TIF_MEMDIE 18
138#define TIF_FREEZE 19
139#define TIF_RESTORE_SIGMASK 20 138#define TIF_RESTORE_SIGMASK 20
140 139
141#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 140#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
142#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 141#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
143#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 142#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
144#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 143#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
145#define _TIF_FREEZE (1 << TIF_FREEZE)
146#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 144#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
147 145
148/* 146/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 185b719ec61a..74047159d0ab 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -91,7 +91,6 @@ struct thread_info {
91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
92#define TIF_DEBUG 21 /* uses debug registers */ 92#define TIF_DEBUG 21 /* uses debug registers */
93#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 93#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
94#define TIF_FREEZE 23 /* is freezing for suspend */
95#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ 94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
96#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ 95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
97#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
@@ -113,7 +112,6 @@ struct thread_info {
113#define _TIF_FORK (1 << TIF_FORK) 112#define _TIF_FORK (1 << TIF_FORK)
114#define _TIF_DEBUG (1 << TIF_DEBUG) 113#define _TIF_DEBUG (1 << TIF_DEBUG)
115#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) 114#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
116#define _TIF_FREEZE (1 << TIF_FREEZE)
117#define _TIF_FORCED_TF (1 << TIF_FORCED_TF) 115#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
118#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 116#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
119#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 117#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be8accb0b0c..6abbedd09d85 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
132#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 132#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
133#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ 133#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
134#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 134#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
135#define TIF_FREEZE 17 /* is freezing for suspend */
136 135
137#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 136#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
138#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 137#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
141#define _TIF_IRET (1<<TIF_IRET) 140#define _TIF_IRET (1<<TIF_IRET)
142#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 141#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
143#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 142#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
144#define _TIF_FREEZE (1<<TIF_FREEZE)
145 143
146#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 144#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
147#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 145#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab58db2..0a7ed69546ba 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
476 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), 476 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
477 }, 477 },
478 }, 478 },
479 {
480 .callback = init_nvs_nosave,
481 .ident = "Asus K54C",
482 .matches = {
483 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
484 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
485 },
486 },
487 {
488 .callback = init_nvs_nosave,
489 .ident = "Asus K54HR",
490 .matches = {
491 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
492 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
493 },
494 },
479 {}, 495 {},
480}; 496};
481#endif /* CONFIG_SUSPEND */ 497#endif /* CONFIG_SUSPEND */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 936c98cb2475..54eaf96ab217 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -113,31 +113,7 @@ static int amba_legacy_resume(struct device *dev)
113 return ret; 113 return ret;
114} 114}
115 115
116static int amba_pm_prepare(struct device *dev) 116#endif /* CONFIG_PM_SLEEP */
117{
118 struct device_driver *drv = dev->driver;
119 int ret = 0;
120
121 if (drv && drv->pm && drv->pm->prepare)
122 ret = drv->pm->prepare(dev);
123
124 return ret;
125}
126
127static void amba_pm_complete(struct device *dev)
128{
129 struct device_driver *drv = dev->driver;
130
131 if (drv && drv->pm && drv->pm->complete)
132 drv->pm->complete(dev);
133}
134
135#else /* !CONFIG_PM_SLEEP */
136
137#define amba_pm_prepare NULL
138#define amba_pm_complete NULL
139
140#endif /* !CONFIG_PM_SLEEP */
141 117
142#ifdef CONFIG_SUSPEND 118#ifdef CONFIG_SUSPEND
143 119
@@ -159,22 +135,6 @@ static int amba_pm_suspend(struct device *dev)
159 return ret; 135 return ret;
160} 136}
161 137
162static int amba_pm_suspend_noirq(struct device *dev)
163{
164 struct device_driver *drv = dev->driver;
165 int ret = 0;
166
167 if (!drv)
168 return 0;
169
170 if (drv->pm) {
171 if (drv->pm->suspend_noirq)
172 ret = drv->pm->suspend_noirq(dev);
173 }
174
175 return ret;
176}
177
178static int amba_pm_resume(struct device *dev) 138static int amba_pm_resume(struct device *dev)
179{ 139{
180 struct device_driver *drv = dev->driver; 140 struct device_driver *drv = dev->driver;
@@ -193,28 +153,10 @@ static int amba_pm_resume(struct device *dev)
193 return ret; 153 return ret;
194} 154}
195 155
196static int amba_pm_resume_noirq(struct device *dev)
197{
198 struct device_driver *drv = dev->driver;
199 int ret = 0;
200
201 if (!drv)
202 return 0;
203
204 if (drv->pm) {
205 if (drv->pm->resume_noirq)
206 ret = drv->pm->resume_noirq(dev);
207 }
208
209 return ret;
210}
211
212#else /* !CONFIG_SUSPEND */ 156#else /* !CONFIG_SUSPEND */
213 157
214#define amba_pm_suspend NULL 158#define amba_pm_suspend NULL
215#define amba_pm_resume NULL 159#define amba_pm_resume NULL
216#define amba_pm_suspend_noirq NULL
217#define amba_pm_resume_noirq NULL
218 160
219#endif /* !CONFIG_SUSPEND */ 161#endif /* !CONFIG_SUSPEND */
220 162
@@ -238,22 +180,6 @@ static int amba_pm_freeze(struct device *dev)
238 return ret; 180 return ret;
239} 181}
240 182
241static int amba_pm_freeze_noirq(struct device *dev)
242{
243 struct device_driver *drv = dev->driver;
244 int ret = 0;
245
246 if (!drv)
247 return 0;
248
249 if (drv->pm) {
250 if (drv->pm->freeze_noirq)
251 ret = drv->pm->freeze_noirq(dev);
252 }
253
254 return ret;
255}
256
257static int amba_pm_thaw(struct device *dev) 183static int amba_pm_thaw(struct device *dev)
258{ 184{
259 struct device_driver *drv = dev->driver; 185 struct device_driver *drv = dev->driver;
@@ -272,22 +198,6 @@ static int amba_pm_thaw(struct device *dev)
272 return ret; 198 return ret;
273} 199}
274 200
275static int amba_pm_thaw_noirq(struct device *dev)
276{
277 struct device_driver *drv = dev->driver;
278 int ret = 0;
279
280 if (!drv)
281 return 0;
282
283 if (drv->pm) {
284 if (drv->pm->thaw_noirq)
285 ret = drv->pm->thaw_noirq(dev);
286 }
287
288 return ret;
289}
290
291static int amba_pm_poweroff(struct device *dev) 201static int amba_pm_poweroff(struct device *dev)
292{ 202{
293 struct device_driver *drv = dev->driver; 203 struct device_driver *drv = dev->driver;
@@ -306,22 +216,6 @@ static int amba_pm_poweroff(struct device *dev)
306 return ret; 216 return ret;
307} 217}
308 218
309static int amba_pm_poweroff_noirq(struct device *dev)
310{
311 struct device_driver *drv = dev->driver;
312 int ret = 0;
313
314 if (!drv)
315 return 0;
316
317 if (drv->pm) {
318 if (drv->pm->poweroff_noirq)
319 ret = drv->pm->poweroff_noirq(dev);
320 }
321
322 return ret;
323}
324
325static int amba_pm_restore(struct device *dev) 219static int amba_pm_restore(struct device *dev)
326{ 220{
327 struct device_driver *drv = dev->driver; 221 struct device_driver *drv = dev->driver;
@@ -340,32 +234,12 @@ static int amba_pm_restore(struct device *dev)
340 return ret; 234 return ret;
341} 235}
342 236
343static int amba_pm_restore_noirq(struct device *dev)
344{
345 struct device_driver *drv = dev->driver;
346 int ret = 0;
347
348 if (!drv)
349 return 0;
350
351 if (drv->pm) {
352 if (drv->pm->restore_noirq)
353 ret = drv->pm->restore_noirq(dev);
354 }
355
356 return ret;
357}
358
359#else /* !CONFIG_HIBERNATE_CALLBACKS */ 237#else /* !CONFIG_HIBERNATE_CALLBACKS */
360 238
361#define amba_pm_freeze NULL 239#define amba_pm_freeze NULL
362#define amba_pm_thaw NULL 240#define amba_pm_thaw NULL
363#define amba_pm_poweroff NULL 241#define amba_pm_poweroff NULL
364#define amba_pm_restore NULL 242#define amba_pm_restore NULL
365#define amba_pm_freeze_noirq NULL
366#define amba_pm_thaw_noirq NULL
367#define amba_pm_poweroff_noirq NULL
368#define amba_pm_restore_noirq NULL
369 243
370#endif /* !CONFIG_HIBERNATE_CALLBACKS */ 244#endif /* !CONFIG_HIBERNATE_CALLBACKS */
371 245
@@ -406,20 +280,12 @@ static int amba_pm_runtime_resume(struct device *dev)
406#ifdef CONFIG_PM 280#ifdef CONFIG_PM
407 281
408static const struct dev_pm_ops amba_pm = { 282static const struct dev_pm_ops amba_pm = {
409 .prepare = amba_pm_prepare,
410 .complete = amba_pm_complete,
411 .suspend = amba_pm_suspend, 283 .suspend = amba_pm_suspend,
412 .resume = amba_pm_resume, 284 .resume = amba_pm_resume,
413 .freeze = amba_pm_freeze, 285 .freeze = amba_pm_freeze,
414 .thaw = amba_pm_thaw, 286 .thaw = amba_pm_thaw,
415 .poweroff = amba_pm_poweroff, 287 .poweroff = amba_pm_poweroff,
416 .restore = amba_pm_restore, 288 .restore = amba_pm_restore,
417 .suspend_noirq = amba_pm_suspend_noirq,
418 .resume_noirq = amba_pm_resume_noirq,
419 .freeze_noirq = amba_pm_freeze_noirq,
420 .thaw_noirq = amba_pm_thaw_noirq,
421 .poweroff_noirq = amba_pm_poweroff_noirq,
422 .restore_noirq = amba_pm_restore_noirq,
423 SET_RUNTIME_PM_OPS( 289 SET_RUNTIME_PM_OPS(
424 amba_pm_runtime_suspend, 290 amba_pm_runtime_suspend,
425 amba_pm_runtime_resume, 291 amba_pm_runtime_resume,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 3719c94be19c..26ab358dac62 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
534 return 0; 534 return 0;
535 } 535 }
536 536
537 read_lock_usermodehelper();
538
537 if (WARN_ON(usermodehelper_is_disabled())) { 539 if (WARN_ON(usermodehelper_is_disabled())) {
538 dev_err(device, "firmware: %s will not be loaded\n", name); 540 dev_err(device, "firmware: %s will not be loaded\n", name);
539 retval = -EBUSY; 541 retval = -EBUSY;
@@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
572 fw_destroy_instance(fw_priv); 574 fw_destroy_instance(fw_priv);
573 575
574out: 576out:
577 read_unlock_usermodehelper();
578
575 if (retval) { 579 if (retval) {
576 release_firmware(firmware); 580 release_firmware(firmware);
577 *firmware_p = NULL; 581 *firmware_p = NULL;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a7c06374062e..f0c605e99ade 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
700 return ret; 700 return ret;
701} 701}
702 702
703int platform_pm_prepare(struct device *dev)
704{
705 struct device_driver *drv = dev->driver;
706 int ret = 0;
707
708 if (drv && drv->pm && drv->pm->prepare)
709 ret = drv->pm->prepare(dev);
710
711 return ret;
712}
713
714void platform_pm_complete(struct device *dev)
715{
716 struct device_driver *drv = dev->driver;
717
718 if (drv && drv->pm && drv->pm->complete)
719 drv->pm->complete(dev);
720}
721
722#endif /* CONFIG_PM_SLEEP */ 703#endif /* CONFIG_PM_SLEEP */
723 704
724#ifdef CONFIG_SUSPEND 705#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
741 return ret; 722 return ret;
742} 723}
743 724
744int platform_pm_suspend_noirq(struct device *dev)
745{
746 struct device_driver *drv = dev->driver;
747 int ret = 0;
748
749 if (!drv)
750 return 0;
751
752 if (drv->pm) {
753 if (drv->pm->suspend_noirq)
754 ret = drv->pm->suspend_noirq(dev);
755 }
756
757 return ret;
758}
759
760int platform_pm_resume(struct device *dev) 725int platform_pm_resume(struct device *dev)
761{ 726{
762 struct device_driver *drv = dev->driver; 727 struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
775 return ret; 740 return ret;
776} 741}
777 742
778int platform_pm_resume_noirq(struct device *dev)
779{
780 struct device_driver *drv = dev->driver;
781 int ret = 0;
782
783 if (!drv)
784 return 0;
785
786 if (drv->pm) {
787 if (drv->pm->resume_noirq)
788 ret = drv->pm->resume_noirq(dev);
789 }
790
791 return ret;
792}
793
794#endif /* CONFIG_SUSPEND */ 743#endif /* CONFIG_SUSPEND */
795 744
796#ifdef CONFIG_HIBERNATE_CALLBACKS 745#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
813 return ret; 762 return ret;
814} 763}
815 764
816int platform_pm_freeze_noirq(struct device *dev)
817{
818 struct device_driver *drv = dev->driver;
819 int ret = 0;
820
821 if (!drv)
822 return 0;
823
824 if (drv->pm) {
825 if (drv->pm->freeze_noirq)
826 ret = drv->pm->freeze_noirq(dev);
827 }
828
829 return ret;
830}
831
832int platform_pm_thaw(struct device *dev) 765int platform_pm_thaw(struct device *dev)
833{ 766{
834 struct device_driver *drv = dev->driver; 767 struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
847 return ret; 780 return ret;
848} 781}
849 782
850int platform_pm_thaw_noirq(struct device *dev)
851{
852 struct device_driver *drv = dev->driver;
853 int ret = 0;
854
855 if (!drv)
856 return 0;
857
858 if (drv->pm) {
859 if (drv->pm->thaw_noirq)
860 ret = drv->pm->thaw_noirq(dev);
861 }
862
863 return ret;
864}
865
866int platform_pm_poweroff(struct device *dev) 783int platform_pm_poweroff(struct device *dev)
867{ 784{
868 struct device_driver *drv = dev->driver; 785 struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
881 return ret; 798 return ret;
882} 799}
883 800
884int platform_pm_poweroff_noirq(struct device *dev)
885{
886 struct device_driver *drv = dev->driver;
887 int ret = 0;
888
889 if (!drv)
890 return 0;
891
892 if (drv->pm) {
893 if (drv->pm->poweroff_noirq)
894 ret = drv->pm->poweroff_noirq(dev);
895 }
896
897 return ret;
898}
899
900int platform_pm_restore(struct device *dev) 801int platform_pm_restore(struct device *dev)
901{ 802{
902 struct device_driver *drv = dev->driver; 803 struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
915 return ret; 816 return ret;
916} 817}
917 818
918int platform_pm_restore_noirq(struct device *dev)
919{
920 struct device_driver *drv = dev->driver;
921 int ret = 0;
922
923 if (!drv)
924 return 0;
925
926 if (drv->pm) {
927 if (drv->pm->restore_noirq)
928 ret = drv->pm->restore_noirq(dev);
929 }
930
931 return ret;
932}
933
934#endif /* CONFIG_HIBERNATE_CALLBACKS */ 819#endif /* CONFIG_HIBERNATE_CALLBACKS */
935 820
936static const struct dev_pm_ops platform_dev_pm_ops = { 821static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd17900..2e58ebb1f6c0 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o 6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
7obj-$(CONFIG_HAVE_CLK) += clock_ops.o 7obj-$(CONFIG_HAVE_CLK) += clock_ops.o
8 8
9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7eba5a..92e6a9048065 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
35
36#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
37({ \
38 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
42 if (__elapsed > __gpd_data->td.field) { \
43 __gpd_data->td.field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \
46 } \
47 __retval; \
48})
18 49
19static LIST_HEAD(gpd_list); 50static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock); 51static DEFINE_MUTEX(gpd_list_lock);
21 52
22#ifdef CONFIG_PM 53#ifdef CONFIG_PM
23 54
24static struct generic_pm_domain *dev_to_genpd(struct device *dev) 55struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{ 56{
26 if (IS_ERR_OR_NULL(dev->pm_domain)) 57 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL); 58 return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
29 return pd_to_genpd(dev->pm_domain); 60 return pd_to_genpd(dev->pm_domain);
30} 61}
31 62
63static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
64{
65 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
66 stop_latency_ns, "stop");
67}
68
69static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
70{
71 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
72 start_latency_ns, "start");
73}
74
75static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
76{
77 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
78 save_state_latency_ns, "state save");
79}
80
81static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
82{
83 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
84 restore_state_latency_ns,
85 "state restore");
86}
87
32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 88static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{ 89{
34 bool ret = false; 90 bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
145 } 201 }
146 202
147 if (genpd->power_on) { 203 if (genpd->power_on) {
204 ktime_t time_start = ktime_get();
205 s64 elapsed_ns;
206
148 ret = genpd->power_on(genpd); 207 ret = genpd->power_on(genpd);
149 if (ret) 208 if (ret)
150 goto err; 209 goto err;
210
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns) {
213 genpd->power_on_latency_ns = elapsed_ns;
214 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name,
217 elapsed_ns);
218 }
151 } 219 }
152 220
153 genpd_set_active(genpd); 221 genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
190{ 258{
191 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 259 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
192 struct device *dev = pdd->dev; 260 struct device *dev = pdd->dev;
193 struct device_driver *drv = dev->driver;
194 int ret = 0; 261 int ret = 0;
195 262
196 if (gpd_data->need_restore) 263 if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
198 265
199 mutex_unlock(&genpd->lock); 266 mutex_unlock(&genpd->lock);
200 267
201 if (drv && drv->pm && drv->pm->runtime_suspend) { 268 genpd_start_dev(genpd, dev);
202 if (genpd->start_device) 269 ret = genpd_save_dev(genpd, dev);
203 genpd->start_device(dev); 270 genpd_stop_dev(genpd, dev);
204
205 ret = drv->pm->runtime_suspend(dev);
206
207 if (genpd->stop_device)
208 genpd->stop_device(dev);
209 }
210 271
211 mutex_lock(&genpd->lock); 272 mutex_lock(&genpd->lock);
212 273
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
227{ 288{
228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 289 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
229 struct device *dev = pdd->dev; 290 struct device *dev = pdd->dev;
230 struct device_driver *drv = dev->driver;
231 291
232 if (!gpd_data->need_restore) 292 if (!gpd_data->need_restore)
233 return; 293 return;
234 294
235 mutex_unlock(&genpd->lock); 295 mutex_unlock(&genpd->lock);
236 296
237 if (drv && drv->pm && drv->pm->runtime_resume) { 297 genpd_start_dev(genpd, dev);
238 if (genpd->start_device) 298 genpd_restore_dev(genpd, dev);
239 genpd->start_device(dev); 299 genpd_stop_dev(genpd, dev);
240
241 drv->pm->runtime_resume(dev);
242
243 if (genpd->stop_device)
244 genpd->stop_device(dev);
245 }
246 300
247 mutex_lock(&genpd->lock); 301 mutex_lock(&genpd->lock);
248 302
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
354 } 408 }
355 409
356 if (genpd->power_off) { 410 if (genpd->power_off) {
411 ktime_t time_start;
412 s64 elapsed_ns;
413
357 if (atomic_read(&genpd->sd_count) > 0) { 414 if (atomic_read(&genpd->sd_count) > 0) {
358 ret = -EBUSY; 415 ret = -EBUSY;
359 goto out; 416 goto out;
360 } 417 }
361 418
419 time_start = ktime_get();
420
362 /* 421 /*
363 * If sd_count > 0 at this point, one of the subdomains hasn't 422 * If sd_count > 0 at this point, one of the subdomains hasn't
364 * managed to call pm_genpd_poweron() for the master yet after 423 * managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
372 genpd_set_active(genpd); 431 genpd_set_active(genpd);
373 goto out; 432 goto out;
374 } 433 }
434
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns > genpd->power_off_latency_ns) {
437 genpd->power_off_latency_ns = elapsed_ns;
438 if (genpd->name)
439 pr_warning("%s: Power-off latency exceeded, "
440 "new value %lld ns\n", genpd->name,
441 elapsed_ns);
442 }
375 } 443 }
376 444
377 genpd->status = GPD_STATE_POWER_OFF; 445 genpd->status = GPD_STATE_POWER_OFF;
446 genpd->power_off_time = ktime_get();
447
448 /* Update PM QoS information for devices in the domain. */
449 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
450 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
451
452 pm_runtime_update_max_time_suspended(pdd->dev,
453 td->start_latency_ns +
454 td->restore_state_latency_ns +
455 genpd->power_on_latency_ns);
456 }
378 457
379 list_for_each_entry(link, &genpd->slave_links, slave_node) { 458 list_for_each_entry(link, &genpd->slave_links, slave_node) {
380 genpd_sd_counter_dec(link->master); 459 genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
413static int pm_genpd_runtime_suspend(struct device *dev) 492static int pm_genpd_runtime_suspend(struct device *dev)
414{ 493{
415 struct generic_pm_domain *genpd; 494 struct generic_pm_domain *genpd;
495 bool (*stop_ok)(struct device *__dev);
496 int ret;
416 497
417 dev_dbg(dev, "%s()\n", __func__); 498 dev_dbg(dev, "%s()\n", __func__);
418 499
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
422 503
423 might_sleep_if(!genpd->dev_irq_safe); 504 might_sleep_if(!genpd->dev_irq_safe);
424 505
425 if (genpd->stop_device) { 506 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
426 int ret = genpd->stop_device(dev); 507 if (stop_ok && !stop_ok(dev))
427 if (ret) 508 return -EBUSY;
428 return ret; 509
429 } 510 ret = genpd_stop_dev(genpd, dev);
511 if (ret)
512 return ret;
513
514 pm_runtime_update_max_time_suspended(dev,
515 dev_gpd_data(dev)->td.start_latency_ns);
430 516
431 /* 517 /*
432 * If power.irq_safe is set, this routine will be run with interrupts 518 * If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
502 mutex_unlock(&genpd->lock); 588 mutex_unlock(&genpd->lock);
503 589
504 out: 590 out:
505 if (genpd->start_device) 591 genpd_start_dev(genpd, dev);
506 genpd->start_device(dev);
507 592
508 return 0; 593 return 0;
509} 594}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
534 619
535#ifdef CONFIG_PM_SLEEP 620#ifdef CONFIG_PM_SLEEP
536 621
622static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
623 struct device *dev)
624{
625 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
626}
627
628static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
629{
630 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
631}
632
633static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
634{
635 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
636}
637
638static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
639{
640 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
641}
642
643static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
644{
645 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
646}
647
648static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
649{
650 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
651}
652
653static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
654{
655 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
656}
657
658static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
659{
660 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
661}
662
663static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
664{
665 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
666}
667
537/** 668/**
538 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 669 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
539 * @genpd: PM domain to power off, if possible. 670 * @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
590 if (!device_can_wakeup(dev)) 721 if (!device_can_wakeup(dev))
591 return false; 722 return false;
592 723
593 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); 724 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
594 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 725 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
595} 726}
596 727
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
646 /* 777 /*
647 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 778 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
648 * so pm_genpd_poweron() will return immediately, but if the device 779 * so pm_genpd_poweron() will return immediately, but if the device
649 * is suspended (e.g. it's been stopped by .stop_device()), we need 780 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
650 * to make it operational. 781 * to make it operational.
651 */ 782 */
652 pm_runtime_resume(dev); 783 pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
685 if (IS_ERR(genpd)) 816 if (IS_ERR(genpd))
686 return -EINVAL; 817 return -EINVAL;
687 818
688 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 819 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
689} 820}
690 821
691/** 822/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
710 if (genpd->suspend_power_off) 841 if (genpd->suspend_power_off)
711 return 0; 842 return 0;
712 843
713 ret = pm_generic_suspend_noirq(dev); 844 ret = genpd_suspend_late(genpd, dev);
714 if (ret) 845 if (ret)
715 return ret; 846 return ret;
716 847
717 if (dev->power.wakeup_path 848 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
718 && genpd->active_wakeup && genpd->active_wakeup(dev))
719 return 0; 849 return 0;
720 850
721 if (genpd->stop_device) 851 genpd_stop_dev(genpd, dev);
722 genpd->stop_device(dev);
723 852
724 /* 853 /*
725 * Since all of the "noirq" callbacks are executed sequentially, it is 854 * Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
761 */ 890 */
762 pm_genpd_poweron(genpd); 891 pm_genpd_poweron(genpd);
763 genpd->suspended_count--; 892 genpd->suspended_count--;
764 if (genpd->start_device) 893 genpd_start_dev(genpd, dev);
765 genpd->start_device(dev);
766 894
767 return pm_generic_resume_noirq(dev); 895 return genpd_resume_early(genpd, dev);
768} 896}
769 897
770/** 898/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
785 if (IS_ERR(genpd)) 913 if (IS_ERR(genpd))
786 return -EINVAL; 914 return -EINVAL;
787 915
788 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 916 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
789} 917}
790 918
791/** 919/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
806 if (IS_ERR(genpd)) 934 if (IS_ERR(genpd))
807 return -EINVAL; 935 return -EINVAL;
808 936
809 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 937 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
810} 938}
811 939
812/** 940/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
832 if (genpd->suspend_power_off) 960 if (genpd->suspend_power_off)
833 return 0; 961 return 0;
834 962
835 ret = pm_generic_freeze_noirq(dev); 963 ret = genpd_freeze_late(genpd, dev);
836 if (ret) 964 if (ret)
837 return ret; 965 return ret;
838 966
839 if (genpd->stop_device) 967 genpd_stop_dev(genpd, dev);
840 genpd->stop_device(dev);
841 968
842 return 0; 969 return 0;
843} 970}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
864 if (genpd->suspend_power_off) 991 if (genpd->suspend_power_off)
865 return 0; 992 return 0;
866 993
867 if (genpd->start_device) 994 genpd_start_dev(genpd, dev);
868 genpd->start_device(dev);
869 995
870 return pm_generic_thaw_noirq(dev); 996 return genpd_thaw_early(genpd, dev);
871} 997}
872 998
873/** 999/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
888 if (IS_ERR(genpd)) 1014 if (IS_ERR(genpd))
889 return -EINVAL; 1015 return -EINVAL;
890 1016
891 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1017 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
892}
893
894/**
895 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
896 * @dev: Device to suspend.
897 *
898 * Power off a device under the assumption that its pm_domain field points to
899 * the domain member of an object of type struct generic_pm_domain representing
900 * a PM domain consisting of I/O devices.
901 */
902static int pm_genpd_dev_poweroff(struct device *dev)
903{
904 struct generic_pm_domain *genpd;
905
906 dev_dbg(dev, "%s()\n", __func__);
907
908 genpd = dev_to_genpd(dev);
909 if (IS_ERR(genpd))
910 return -EINVAL;
911
912 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
913}
914
915/**
916 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
917 * @dev: Device to suspend.
918 *
919 * Carry out a late powering off of a device under the assumption that its
920 * pm_domain field points to the domain member of an object of type
921 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
922 */
923static int pm_genpd_dev_poweroff_noirq(struct device *dev)
924{
925 struct generic_pm_domain *genpd;
926 int ret;
927
928 dev_dbg(dev, "%s()\n", __func__);
929
930 genpd = dev_to_genpd(dev);
931 if (IS_ERR(genpd))
932 return -EINVAL;
933
934 if (genpd->suspend_power_off)
935 return 0;
936
937 ret = pm_generic_poweroff_noirq(dev);
938 if (ret)
939 return ret;
940
941 if (dev->power.wakeup_path
942 && genpd->active_wakeup && genpd->active_wakeup(dev))
943 return 0;
944
945 if (genpd->stop_device)
946 genpd->stop_device(dev);
947
948 /*
949 * Since all of the "noirq" callbacks are executed sequentially, it is
950 * guaranteed that this function will never run twice in parallel for
951 * the same PM domain, so it is not necessary to use locking here.
952 */
953 genpd->suspended_count++;
954 pm_genpd_sync_poweroff(genpd);
955
956 return 0;
957} 1018}
958 1019
959/** 1020/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
993 1054
994 pm_genpd_poweron(genpd); 1055 pm_genpd_poweron(genpd);
995 genpd->suspended_count--; 1056 genpd->suspended_count--;
996 if (genpd->start_device) 1057 genpd_start_dev(genpd, dev);
997 genpd->start_device(dev);
998
999 return pm_generic_restore_noirq(dev);
1000}
1001
1002/**
1003 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
1004 * @dev: Device to resume.
1005 *
1006 * Restore a device under the assumption that its pm_domain field points to the
1007 * domain member of an object of type struct generic_pm_domain representing
1008 * a power domain consisting of I/O devices.
1009 */
1010static int pm_genpd_restore(struct device *dev)
1011{
1012 struct generic_pm_domain *genpd;
1013
1014 dev_dbg(dev, "%s()\n", __func__);
1015
1016 genpd = dev_to_genpd(dev);
1017 if (IS_ERR(genpd))
1018 return -EINVAL;
1019 1058
1020 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); 1059 return genpd_resume_early(genpd, dev);
1021} 1060}
1022 1061
1023/** 1062/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
1067#define pm_genpd_freeze_noirq NULL 1106#define pm_genpd_freeze_noirq NULL
1068#define pm_genpd_thaw_noirq NULL 1107#define pm_genpd_thaw_noirq NULL
1069#define pm_genpd_thaw NULL 1108#define pm_genpd_thaw NULL
1070#define pm_genpd_dev_poweroff_noirq NULL
1071#define pm_genpd_dev_poweroff NULL
1072#define pm_genpd_restore_noirq NULL 1109#define pm_genpd_restore_noirq NULL
1073#define pm_genpd_restore NULL
1074#define pm_genpd_complete NULL 1110#define pm_genpd_complete NULL
1075 1111
1076#endif /* CONFIG_PM_SLEEP */ 1112#endif /* CONFIG_PM_SLEEP */
1077 1113
1078/** 1114/**
1079 * pm_genpd_add_device - Add a device to an I/O PM domain. 1115 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1080 * @genpd: PM domain to add the device to. 1116 * @genpd: PM domain to add the device to.
1081 * @dev: Device to be added. 1117 * @dev: Device to be added.
1118 * @td: Set of PM QoS timing parameters to attach to the device.
1082 */ 1119 */
1083int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1120int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1121 struct gpd_timing_data *td)
1084{ 1122{
1085 struct generic_pm_domain_data *gpd_data; 1123 struct generic_pm_domain_data *gpd_data;
1086 struct pm_domain_data *pdd; 1124 struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1123 gpd_data->base.dev = dev; 1161 gpd_data->base.dev = dev;
1124 gpd_data->need_restore = false; 1162 gpd_data->need_restore = false;
1125 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1163 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1164 if (td)
1165 gpd_data->td = *td;
1126 1166
1127 out: 1167 out:
1128 genpd_release_lock(genpd); 1168 genpd_release_lock(genpd);
@@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1280} 1320}
1281 1321
1282/** 1322/**
1323 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1324 * @dev: Device to add the callbacks to.
1325 * @ops: Set of callbacks to add.
1326 * @td: Timing data to add to the device along with the callbacks (optional).
1327 */
1328int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1329 struct gpd_timing_data *td)
1330{
1331 struct pm_domain_data *pdd;
1332 int ret = 0;
1333
1334 if (!(dev && dev->power.subsys_data && ops))
1335 return -EINVAL;
1336
1337 pm_runtime_disable(dev);
1338 device_pm_lock();
1339
1340 pdd = dev->power.subsys_data->domain_data;
1341 if (pdd) {
1342 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1343
1344 gpd_data->ops = *ops;
1345 if (td)
1346 gpd_data->td = *td;
1347 } else {
1348 ret = -EINVAL;
1349 }
1350
1351 device_pm_unlock();
1352 pm_runtime_enable(dev);
1353
1354 return ret;
1355}
1356EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1357
1358/**
1359 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1360 * @dev: Device to remove the callbacks from.
1361 * @clear_td: If set, clear the device's timing data too.
1362 */
1363int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1364{
1365 struct pm_domain_data *pdd;
1366 int ret = 0;
1367
1368 if (!(dev && dev->power.subsys_data))
1369 return -EINVAL;
1370
1371 pm_runtime_disable(dev);
1372 device_pm_lock();
1373
1374 pdd = dev->power.subsys_data->domain_data;
1375 if (pdd) {
1376 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1377
1378 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1379 if (clear_td)
1380 gpd_data->td = (struct gpd_timing_data){ 0 };
1381 } else {
1382 ret = -EINVAL;
1383 }
1384
1385 device_pm_unlock();
1386 pm_runtime_enable(dev);
1387
1388 return ret;
1389}
1390EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1391
1392/* Default device callbacks for generic PM domains. */
1393
1394/**
1395 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1396 * @dev: Device to handle.
1397 */
1398static int pm_genpd_default_save_state(struct device *dev)
1399{
1400 int (*cb)(struct device *__dev);
1401 struct device_driver *drv = dev->driver;
1402
1403 cb = dev_gpd_data(dev)->ops.save_state;
1404 if (cb)
1405 return cb(dev);
1406
1407 if (drv && drv->pm && drv->pm->runtime_suspend)
1408 return drv->pm->runtime_suspend(dev);
1409
1410 return 0;
1411}
1412
1413/**
1414 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1415 * @dev: Device to handle.
1416 */
1417static int pm_genpd_default_restore_state(struct device *dev)
1418{
1419 int (*cb)(struct device *__dev);
1420 struct device_driver *drv = dev->driver;
1421
1422 cb = dev_gpd_data(dev)->ops.restore_state;
1423 if (cb)
1424 return cb(dev);
1425
1426 if (drv && drv->pm && drv->pm->runtime_resume)
1427 return drv->pm->runtime_resume(dev);
1428
1429 return 0;
1430}
1431
1432/**
1433 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1434 * @dev: Device to handle.
1435 */
1436static int pm_genpd_default_suspend(struct device *dev)
1437{
1438 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1439
1440 return cb ? cb(dev) : pm_generic_suspend(dev);
1441}
1442
1443/**
1444 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1445 * @dev: Device to handle.
1446 */
1447static int pm_genpd_default_suspend_late(struct device *dev)
1448{
1449 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1450
1451 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1452}
1453
1454/**
1455 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1456 * @dev: Device to handle.
1457 */
1458static int pm_genpd_default_resume_early(struct device *dev)
1459{
1460 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1461
1462 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1463}
1464
1465/**
1466 * pm_genpd_default_resume - Default "device resume" for PM domians.
1467 * @dev: Device to handle.
1468 */
1469static int pm_genpd_default_resume(struct device *dev)
1470{
1471 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1472
1473 return cb ? cb(dev) : pm_generic_resume(dev);
1474}
1475
1476/**
1477 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1478 * @dev: Device to handle.
1479 */
1480static int pm_genpd_default_freeze(struct device *dev)
1481{
1482 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1483
1484 return cb ? cb(dev) : pm_generic_freeze(dev);
1485}
1486
1487/**
1488 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1489 * @dev: Device to handle.
1490 */
1491static int pm_genpd_default_freeze_late(struct device *dev)
1492{
1493 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1494
1495 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1496}
1497
1498/**
1499 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1500 * @dev: Device to handle.
1501 */
1502static int pm_genpd_default_thaw_early(struct device *dev)
1503{
1504 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1505
1506 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1507}
1508
1509/**
1510 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1511 * @dev: Device to handle.
1512 */
1513static int pm_genpd_default_thaw(struct device *dev)
1514{
1515 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1516
1517 return cb ? cb(dev) : pm_generic_thaw(dev);
1518}
1519
1520/**
1283 * pm_genpd_init - Initialize a generic I/O PM domain object. 1521 * pm_genpd_init - Initialize a generic I/O PM domain object.
1284 * @genpd: PM domain object to initialize. 1522 * @genpd: PM domain object to initialize.
1285 * @gov: PM domain governor to associate with the domain (may be NULL). 1523 * @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1305 genpd->resume_count = 0; 1543 genpd->resume_count = 0;
1306 genpd->device_count = 0; 1544 genpd->device_count = 0;
1307 genpd->suspended_count = 0; 1545 genpd->suspended_count = 0;
1546 genpd->max_off_time_ns = -1;
1308 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1547 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1309 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1548 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1310 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1549 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1317 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1556 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1557 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319 genpd->domain.ops.thaw = pm_genpd_thaw; 1558 genpd->domain.ops.thaw = pm_genpd_thaw;
1320 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; 1559 genpd->domain.ops.poweroff = pm_genpd_suspend;
1321 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; 1560 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1322 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1561 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1323 genpd->domain.ops.restore = pm_genpd_restore; 1562 genpd->domain.ops.restore = pm_genpd_resume;
1324 genpd->domain.ops.complete = pm_genpd_complete; 1563 genpd->domain.ops.complete = pm_genpd_complete;
1564 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1565 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1566 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1567 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1568 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1569 genpd->dev_ops.resume = pm_genpd_default_resume;
1570 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1571 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1572 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1573 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1325 mutex_lock(&gpd_list_lock); 1574 mutex_lock(&gpd_list_lock);
1326 list_add(&genpd->gpd_list_node, &gpd_list); 1575 list_add(&genpd->gpd_list_node, &gpd_list);
1327 mutex_unlock(&gpd_list_lock); 1576 mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000000..51527ee92d10
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,156 @@
1/*
2 * drivers/base/power/domain_governor.c - Governors for device PM domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/pm_domain.h>
12#include <linux/pm_qos.h>
13#include <linux/hrtimer.h>
14
15/**
16 * default_stop_ok - Default PM domain governor routine for stopping devices.
17 * @dev: Device to check.
18 */
19bool default_stop_ok(struct device *dev)
20{
21 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
22
23 dev_dbg(dev, "%s()\n", __func__);
24
25 if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
26 return true;
27
28 return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
29 && td->break_even_ns < dev->power.max_time_suspended_ns;
30}
31
32/**
33 * default_power_down_ok - Default generic PM domain power off governor routine.
34 * @pd: PM domain to check.
35 *
36 * This routine must be executed under the PM domain's lock.
37 */
38static bool default_power_down_ok(struct dev_pm_domain *pd)
39{
40 struct generic_pm_domain *genpd = pd_to_genpd(pd);
41 struct gpd_link *link;
42 struct pm_domain_data *pdd;
43 s64 min_dev_off_time_ns;
44 s64 off_on_time_ns;
45 ktime_t time_now = ktime_get();
46
47 off_on_time_ns = genpd->power_off_latency_ns +
48 genpd->power_on_latency_ns;
49 /*
50 * It doesn't make sense to remove power from the domain if saving
51 * the state of all devices in it and the power off/power on operations
52 * take too much time.
53 *
54 * All devices in this domain have been stopped already at this point.
55 */
56 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
57 if (pdd->dev->driver)
58 off_on_time_ns +=
59 to_gpd_data(pdd)->td.save_state_latency_ns;
60 }
61
62 /*
63 * Check if subdomains can be off for enough time.
64 *
65 * All subdomains have been powered off already at this point.
66 */
67 list_for_each_entry(link, &genpd->master_links, master_node) {
68 struct generic_pm_domain *sd = link->slave;
69 s64 sd_max_off_ns = sd->max_off_time_ns;
70
71 if (sd_max_off_ns < 0)
72 continue;
73
74 sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
75 sd->power_off_time));
76 /*
77 * Check if the subdomain is allowed to be off long enough for
78 * the current domain to turn off and on (that's how much time
79 * it will have to wait worst case).
80 */
81 if (sd_max_off_ns <= off_on_time_ns)
82 return false;
83 }
84
85 /*
86 * Check if the devices in the domain can be off enough time.
87 */
88 min_dev_off_time_ns = -1;
89 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
90 struct gpd_timing_data *td;
91 struct device *dev = pdd->dev;
92 s64 dev_off_time_ns;
93
94 if (!dev->driver || dev->power.max_time_suspended_ns < 0)
95 continue;
96
97 td = &to_gpd_data(pdd)->td;
98 dev_off_time_ns = dev->power.max_time_suspended_ns -
99 (td->start_latency_ns + td->restore_state_latency_ns +
100 ktime_to_ns(ktime_sub(time_now,
101 dev->power.suspend_time)));
102 if (dev_off_time_ns <= off_on_time_ns)
103 return false;
104
105 if (min_dev_off_time_ns > dev_off_time_ns
106 || min_dev_off_time_ns < 0)
107 min_dev_off_time_ns = dev_off_time_ns;
108 }
109
110 if (min_dev_off_time_ns < 0) {
111 /*
112 * There are no latency constraints, so the domain can spend
113 * arbitrary time in the "off" state.
114 */
115 genpd->max_off_time_ns = -1;
116 return true;
117 }
118
119 /*
120 * The difference between the computed minimum delta and the time needed
121 * to turn the domain on is the maximum theoretical time this domain can
122 * spend in the "off" state.
123 */
124 min_dev_off_time_ns -= genpd->power_on_latency_ns;
125
126 /*
127 * If the difference between the computed minimum delta and the time
128 * needed to turn the domain off and back on on is smaller than the
129 * domain's power break even time, removing power from the domain is not
130 * worth it.
131 */
132 if (genpd->break_even_ns >
133 min_dev_off_time_ns - genpd->power_off_latency_ns)
134 return false;
135
136 genpd->max_off_time_ns = min_dev_off_time_ns;
137 return true;
138}
139
140struct dev_power_governor simple_qos_governor = {
141 .stop_ok = default_stop_ok,
142 .power_down_ok = default_power_down_ok,
143};
144
145static bool always_on_power_down_ok(struct dev_pm_domain *domain)
146{
147 return false;
148}
149
150/**
151 * pm_genpd_gov_always_on - A governor implementing an always-on policy
152 */
153struct dev_power_governor pm_domain_always_on_gov = {
154 .power_down_ok = always_on_power_down_ok,
155 .stop_ok = default_stop_ok,
156};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee3b49e..10bdd793f0bd 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
97 * @event: PM transition of the system under way. 97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage. 98 * @bool: Whether or not this is the "noirq" stage.
99 * 99 *
100 * If the device has not been suspended at run time, execute the 100 * Execute the PM callback corresponding to @event provided by the driver of
101 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and 101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * return its error code. Otherwise, return zero. 102 * not present.
103 */ 103 */
104static int __pm_generic_call(struct device *dev, int event, bool noirq) 104static int __pm_generic_call(struct device *dev, int event, bool noirq)
105{ 105{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *); 107 int (*callback)(struct device *);
108 108
109 if (!pm || pm_runtime_suspended(dev)) 109 if (!pm)
110 return 0; 110 return 0;
111 111
112 switch (event) { 112 switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
119 case PM_EVENT_HIBERNATE: 119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff; 120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break; 121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
122 case PM_EVENT_THAW: 125 case PM_EVENT_THAW:
123 callback = noirq ? pm->thaw_noirq : pm->thaw; 126 callback = noirq ? pm->thaw_noirq : pm->thaw;
124 break; 127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
125 default: 131 default:
126 callback = NULL; 132 callback = NULL;
127 break; 133 break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
211EXPORT_SYMBOL_GPL(pm_generic_thaw); 217EXPORT_SYMBOL_GPL(pm_generic_thaw);
212 218
213/** 219/**
214 * __pm_generic_resume - Generic resume/restore callback for subsystems.
215 * @dev: Device to handle.
216 * @event: PM transition of the system under way.
217 * @bool: Whether or not this is the "noirq" stage.
218 *
219 * Execute the resume/resotre callback provided by the @dev's driver, if
220 * defined. If it returns 0, change the device's runtime PM status to 'active'.
221 * Return the callback's error code.
222 */
223static int __pm_generic_resume(struct device *dev, int event, bool noirq)
224{
225 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
226 int (*callback)(struct device *);
227 int ret;
228
229 if (!pm)
230 return 0;
231
232 switch (event) {
233 case PM_EVENT_RESUME:
234 callback = noirq ? pm->resume_noirq : pm->resume;
235 break;
236 case PM_EVENT_RESTORE:
237 callback = noirq ? pm->restore_noirq : pm->restore;
238 break;
239 default:
240 callback = NULL;
241 break;
242 }
243
244 if (!callback)
245 return 0;
246
247 ret = callback(dev);
248 if (!ret && !noirq && pm_runtime_enabled(dev)) {
249 pm_runtime_disable(dev);
250 pm_runtime_set_active(dev);
251 pm_runtime_enable(dev);
252 }
253
254 return ret;
255}
256
257/**
258 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. 220 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
259 * @dev: Device to resume. 221 * @dev: Device to resume.
260 */ 222 */
261int pm_generic_resume_noirq(struct device *dev) 223int pm_generic_resume_noirq(struct device *dev)
262{ 224{
263 return __pm_generic_resume(dev, PM_EVENT_RESUME, true); 225 return __pm_generic_call(dev, PM_EVENT_RESUME, true);
264} 226}
265EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 227EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
266 228
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
270 */ 232 */
271int pm_generic_resume(struct device *dev) 233int pm_generic_resume(struct device *dev)
272{ 234{
273 return __pm_generic_resume(dev, PM_EVENT_RESUME, false); 235 return __pm_generic_call(dev, PM_EVENT_RESUME, false);
274} 236}
275EXPORT_SYMBOL_GPL(pm_generic_resume); 237EXPORT_SYMBOL_GPL(pm_generic_resume);
276 238
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
280 */ 242 */
281int pm_generic_restore_noirq(struct device *dev) 243int pm_generic_restore_noirq(struct device *dev)
282{ 244{
283 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); 245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
284} 246}
285EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 247EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
286 248
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
290 */ 252 */
291int pm_generic_restore(struct device *dev) 253int pm_generic_restore(struct device *dev)
292{ 254{
293 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); 255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
294} 256}
295EXPORT_SYMBOL_GPL(pm_generic_restore); 257EXPORT_SYMBOL_GPL(pm_generic_restore);
296 258
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
314 pm_runtime_idle(dev); 276 pm_runtime_idle(dev);
315} 277}
316#endif /* CONFIG_PM_SLEEP */ 278#endif /* CONFIG_PM_SLEEP */
317
318struct dev_pm_ops generic_subsys_pm_ops = {
319#ifdef CONFIG_PM_SLEEP
320 .prepare = pm_generic_prepare,
321 .suspend = pm_generic_suspend,
322 .suspend_noirq = pm_generic_suspend_noirq,
323 .resume = pm_generic_resume,
324 .resume_noirq = pm_generic_resume_noirq,
325 .freeze = pm_generic_freeze,
326 .freeze_noirq = pm_generic_freeze_noirq,
327 .thaw = pm_generic_thaw,
328 .thaw_noirq = pm_generic_thaw_noirq,
329 .poweroff = pm_generic_poweroff,
330 .poweroff_noirq = pm_generic_poweroff_noirq,
331 .restore = pm_generic_restore,
332 .restore_noirq = pm_generic_restore_noirq,
333 .complete = pm_generic_complete,
334#endif
335#ifdef CONFIG_PM_RUNTIME
336 .runtime_suspend = pm_generic_runtime_suspend,
337 .runtime_resume = pm_generic_runtime_resume,
338 .runtime_idle = pm_generic_runtime_idle,
339#endif
340};
341EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfcf438d..e2cc3d2e0ecc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
32#include "../base.h" 32#include "../base.h"
33#include "power.h" 33#include "power.h"
34 34
35typedef int (*pm_callback_t)(struct device *);
36
35/* 37/*
36 * The entries in the dpm_list list are in a depth first order, simply 38 * The entries in the dpm_list list are in a depth first order, simply
37 * because children are guaranteed to be discovered after parents, and 39 * because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
164 ktime_t calltime = ktime_set(0, 0); 166 ktime_t calltime = ktime_set(0, 0);
165 167
166 if (initcall_debug) { 168 if (initcall_debug) {
167 pr_info("calling %s+ @ %i\n", 169 pr_info("calling %s+ @ %i, parent: %s\n",
168 dev_name(dev), task_pid_nr(current)); 170 dev_name(dev), task_pid_nr(current),
171 dev->parent ? dev_name(dev->parent) : "none");
169 calltime = ktime_get(); 172 calltime = ktime_get();
170 } 173 }
171 174
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
211} 214}
212 215
213/** 216/**
214 * pm_op - Execute the PM operation appropriate for given PM event. 217 * pm_op - Return the PM operation appropriate for given PM event.
215 * @dev: Device to handle.
216 * @ops: PM operations to choose from. 218 * @ops: PM operations to choose from.
217 * @state: PM transition of the system being carried out. 219 * @state: PM transition of the system being carried out.
218 */ 220 */
219static int pm_op(struct device *dev, 221static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 const struct dev_pm_ops *ops,
221 pm_message_t state)
222{ 222{
223 int error = 0;
224 ktime_t calltime;
225
226 calltime = initcall_debug_start(dev);
227
228 switch (state.event) { 223 switch (state.event) {
229#ifdef CONFIG_SUSPEND 224#ifdef CONFIG_SUSPEND
230 case PM_EVENT_SUSPEND: 225 case PM_EVENT_SUSPEND:
231 if (ops->suspend) { 226 return ops->suspend;
232 error = ops->suspend(dev);
233 suspend_report_result(ops->suspend, error);
234 }
235 break;
236 case PM_EVENT_RESUME: 227 case PM_EVENT_RESUME:
237 if (ops->resume) { 228 return ops->resume;
238 error = ops->resume(dev);
239 suspend_report_result(ops->resume, error);
240 }
241 break;
242#endif /* CONFIG_SUSPEND */ 229#endif /* CONFIG_SUSPEND */
243#ifdef CONFIG_HIBERNATE_CALLBACKS 230#ifdef CONFIG_HIBERNATE_CALLBACKS
244 case PM_EVENT_FREEZE: 231 case PM_EVENT_FREEZE:
245 case PM_EVENT_QUIESCE: 232 case PM_EVENT_QUIESCE:
246 if (ops->freeze) { 233 return ops->freeze;
247 error = ops->freeze(dev);
248 suspend_report_result(ops->freeze, error);
249 }
250 break;
251 case PM_EVENT_HIBERNATE: 234 case PM_EVENT_HIBERNATE:
252 if (ops->poweroff) { 235 return ops->poweroff;
253 error = ops->poweroff(dev);
254 suspend_report_result(ops->poweroff, error);
255 }
256 break;
257 case PM_EVENT_THAW: 236 case PM_EVENT_THAW:
258 case PM_EVENT_RECOVER: 237 case PM_EVENT_RECOVER:
259 if (ops->thaw) { 238 return ops->thaw;
260 error = ops->thaw(dev);
261 suspend_report_result(ops->thaw, error);
262 }
263 break; 239 break;
264 case PM_EVENT_RESTORE: 240 case PM_EVENT_RESTORE:
265 if (ops->restore) { 241 return ops->restore;
266 error = ops->restore(dev);
267 suspend_report_result(ops->restore, error);
268 }
269 break;
270#endif /* CONFIG_HIBERNATE_CALLBACKS */ 242#endif /* CONFIG_HIBERNATE_CALLBACKS */
271 default:
272 error = -EINVAL;
273 } 243 }
274 244
275 initcall_debug_report(dev, calltime, error); 245 return NULL;
276
277 return error;
278} 246}
279 247
280/** 248/**
281 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 249 * pm_noirq_op - Return the PM operation appropriate for given PM event.
282 * @dev: Device to handle.
283 * @ops: PM operations to choose from. 250 * @ops: PM operations to choose from.
284 * @state: PM transition of the system being carried out. 251 * @state: PM transition of the system being carried out.
285 * 252 *
286 * The driver of @dev will not receive interrupts while this function is being 253 * The driver of @dev will not receive interrupts while this function is being
287 * executed. 254 * executed.
288 */ 255 */
289static int pm_noirq_op(struct device *dev, 256static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
290 const struct dev_pm_ops *ops,
291 pm_message_t state)
292{ 257{
293 int error = 0;
294 ktime_t calltime = ktime_set(0, 0), delta, rettime;
295
296 if (initcall_debug) {
297 pr_info("calling %s+ @ %i, parent: %s\n",
298 dev_name(dev), task_pid_nr(current),
299 dev->parent ? dev_name(dev->parent) : "none");
300 calltime = ktime_get();
301 }
302
303 switch (state.event) { 258 switch (state.event) {
304#ifdef CONFIG_SUSPEND 259#ifdef CONFIG_SUSPEND
305 case PM_EVENT_SUSPEND: 260 case PM_EVENT_SUSPEND:
306 if (ops->suspend_noirq) { 261 return ops->suspend_noirq;
307 error = ops->suspend_noirq(dev);
308 suspend_report_result(ops->suspend_noirq, error);
309 }
310 break;
311 case PM_EVENT_RESUME: 262 case PM_EVENT_RESUME:
312 if (ops->resume_noirq) { 263 return ops->resume_noirq;
313 error = ops->resume_noirq(dev);
314 suspend_report_result(ops->resume_noirq, error);
315 }
316 break;
317#endif /* CONFIG_SUSPEND */ 264#endif /* CONFIG_SUSPEND */
318#ifdef CONFIG_HIBERNATE_CALLBACKS 265#ifdef CONFIG_HIBERNATE_CALLBACKS
319 case PM_EVENT_FREEZE: 266 case PM_EVENT_FREEZE:
320 case PM_EVENT_QUIESCE: 267 case PM_EVENT_QUIESCE:
321 if (ops->freeze_noirq) { 268 return ops->freeze_noirq;
322 error = ops->freeze_noirq(dev);
323 suspend_report_result(ops->freeze_noirq, error);
324 }
325 break;
326 case PM_EVENT_HIBERNATE: 269 case PM_EVENT_HIBERNATE:
327 if (ops->poweroff_noirq) { 270 return ops->poweroff_noirq;
328 error = ops->poweroff_noirq(dev);
329 suspend_report_result(ops->poweroff_noirq, error);
330 }
331 break;
332 case PM_EVENT_THAW: 271 case PM_EVENT_THAW:
333 case PM_EVENT_RECOVER: 272 case PM_EVENT_RECOVER:
334 if (ops->thaw_noirq) { 273 return ops->thaw_noirq;
335 error = ops->thaw_noirq(dev);
336 suspend_report_result(ops->thaw_noirq, error);
337 }
338 break;
339 case PM_EVENT_RESTORE: 274 case PM_EVENT_RESTORE:
340 if (ops->restore_noirq) { 275 return ops->restore_noirq;
341 error = ops->restore_noirq(dev);
342 suspend_report_result(ops->restore_noirq, error);
343 }
344 break;
345#endif /* CONFIG_HIBERNATE_CALLBACKS */ 276#endif /* CONFIG_HIBERNATE_CALLBACKS */
346 default:
347 error = -EINVAL;
348 }
349
350 if (initcall_debug) {
351 rettime = ktime_get();
352 delta = ktime_sub(rettime, calltime);
353 printk("initcall %s_i+ returned %d after %Ld usecs\n",
354 dev_name(dev), error,
355 (unsigned long long)ktime_to_ns(delta) >> 10);
356 } 277 }
357 278
358 return error; 279 return NULL;
359} 280}
360 281
361static char *pm_verb(int event) 282static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
413 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 334 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
414} 335}
415 336
337static int dpm_run_callback(pm_callback_t cb, struct device *dev,
338 pm_message_t state, char *info)
339{
340 ktime_t calltime;
341 int error;
342
343 if (!cb)
344 return 0;
345
346 calltime = initcall_debug_start(dev);
347
348 pm_dev_dbg(dev, state, info);
349 error = cb(dev);
350 suspend_report_result(cb, error);
351
352 initcall_debug_report(dev, calltime, error);
353
354 return error;
355}
356
416/*------------------------- Resume routines -------------------------*/ 357/*------------------------- Resume routines -------------------------*/
417 358
418/** 359/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
425 */ 366 */
426static int device_resume_noirq(struct device *dev, pm_message_t state) 367static int device_resume_noirq(struct device *dev, pm_message_t state)
427{ 368{
369 pm_callback_t callback = NULL;
370 char *info = NULL;
428 int error = 0; 371 int error = 0;
429 372
430 TRACE_DEVICE(dev); 373 TRACE_DEVICE(dev);
431 TRACE_RESUME(0); 374 TRACE_RESUME(0);
432 375
433 if (dev->pm_domain) { 376 if (dev->pm_domain) {
434 pm_dev_dbg(dev, state, "EARLY power domain "); 377 info = "EARLY power domain ";
435 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 378 callback = pm_noirq_op(&dev->pm_domain->ops, state);
436 } else if (dev->type && dev->type->pm) { 379 } else if (dev->type && dev->type->pm) {
437 pm_dev_dbg(dev, state, "EARLY type "); 380 info = "EARLY type ";
438 error = pm_noirq_op(dev, dev->type->pm, state); 381 callback = pm_noirq_op(dev->type->pm, state);
439 } else if (dev->class && dev->class->pm) { 382 } else if (dev->class && dev->class->pm) {
440 pm_dev_dbg(dev, state, "EARLY class "); 383 info = "EARLY class ";
441 error = pm_noirq_op(dev, dev->class->pm, state); 384 callback = pm_noirq_op(dev->class->pm, state);
442 } else if (dev->bus && dev->bus->pm) { 385 } else if (dev->bus && dev->bus->pm) {
443 pm_dev_dbg(dev, state, "EARLY "); 386 info = "EARLY bus ";
444 error = pm_noirq_op(dev, dev->bus->pm, state); 387 callback = pm_noirq_op(dev->bus->pm, state);
445 } 388 }
446 389
390 if (!callback && dev->driver && dev->driver->pm) {
391 info = "EARLY driver ";
392 callback = pm_noirq_op(dev->driver->pm, state);
393 }
394
395 error = dpm_run_callback(callback, dev, state, info);
396
447 TRACE_RESUME(error); 397 TRACE_RESUME(error);
448 return error; 398 return error;
449} 399}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
486EXPORT_SYMBOL_GPL(dpm_resume_noirq); 436EXPORT_SYMBOL_GPL(dpm_resume_noirq);
487 437
488/** 438/**
489 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
490 * @dev: Device to resume.
491 * @cb: Resume callback to execute.
492 */
493static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
494{
495 int error;
496 ktime_t calltime;
497
498 calltime = initcall_debug_start(dev);
499
500 error = cb(dev);
501 suspend_report_result(cb, error);
502
503 initcall_debug_report(dev, calltime, error);
504
505 return error;
506}
507
508/**
509 * device_resume - Execute "resume" callbacks for given device. 439 * device_resume - Execute "resume" callbacks for given device.
510 * @dev: Device to handle. 440 * @dev: Device to handle.
511 * @state: PM transition of the system being carried out. 441 * @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
513 */ 443 */
514static int device_resume(struct device *dev, pm_message_t state, bool async) 444static int device_resume(struct device *dev, pm_message_t state, bool async)
515{ 445{
446 pm_callback_t callback = NULL;
447 char *info = NULL;
516 int error = 0; 448 int error = 0;
517 bool put = false; 449 bool put = false;
518 450
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
535 put = true; 467 put = true;
536 468
537 if (dev->pm_domain) { 469 if (dev->pm_domain) {
538 pm_dev_dbg(dev, state, "power domain "); 470 info = "power domain ";
539 error = pm_op(dev, &dev->pm_domain->ops, state); 471 callback = pm_op(&dev->pm_domain->ops, state);
540 goto End; 472 goto Driver;
541 } 473 }
542 474
543 if (dev->type && dev->type->pm) { 475 if (dev->type && dev->type->pm) {
544 pm_dev_dbg(dev, state, "type "); 476 info = "type ";
545 error = pm_op(dev, dev->type->pm, state); 477 callback = pm_op(dev->type->pm, state);
546 goto End; 478 goto Driver;
547 } 479 }
548 480
549 if (dev->class) { 481 if (dev->class) {
550 if (dev->class->pm) { 482 if (dev->class->pm) {
551 pm_dev_dbg(dev, state, "class "); 483 info = "class ";
552 error = pm_op(dev, dev->class->pm, state); 484 callback = pm_op(dev->class->pm, state);
553 goto End; 485 goto Driver;
554 } else if (dev->class->resume) { 486 } else if (dev->class->resume) {
555 pm_dev_dbg(dev, state, "legacy class "); 487 info = "legacy class ";
556 error = legacy_resume(dev, dev->class->resume); 488 callback = dev->class->resume;
557 goto End; 489 goto End;
558 } 490 }
559 } 491 }
560 492
561 if (dev->bus) { 493 if (dev->bus) {
562 if (dev->bus->pm) { 494 if (dev->bus->pm) {
563 pm_dev_dbg(dev, state, ""); 495 info = "bus ";
564 error = pm_op(dev, dev->bus->pm, state); 496 callback = pm_op(dev->bus->pm, state);
565 } else if (dev->bus->resume) { 497 } else if (dev->bus->resume) {
566 pm_dev_dbg(dev, state, "legacy "); 498 info = "legacy bus ";
567 error = legacy_resume(dev, dev->bus->resume); 499 callback = dev->bus->resume;
500 goto End;
568 } 501 }
569 } 502 }
570 503
504 Driver:
505 if (!callback && dev->driver && dev->driver->pm) {
506 info = "driver ";
507 callback = pm_op(dev->driver->pm, state);
508 }
509
571 End: 510 End:
511 error = dpm_run_callback(callback, dev, state, info);
572 dev->power.is_suspended = false; 512 dev->power.is_suspended = false;
573 513
574 Unlock: 514 Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
660 */ 600 */
661static void device_complete(struct device *dev, pm_message_t state) 601static void device_complete(struct device *dev, pm_message_t state)
662{ 602{
603 void (*callback)(struct device *) = NULL;
604 char *info = NULL;
605
663 device_lock(dev); 606 device_lock(dev);
664 607
665 if (dev->pm_domain) { 608 if (dev->pm_domain) {
666 pm_dev_dbg(dev, state, "completing power domain "); 609 info = "completing power domain ";
667 if (dev->pm_domain->ops.complete) 610 callback = dev->pm_domain->ops.complete;
668 dev->pm_domain->ops.complete(dev);
669 } else if (dev->type && dev->type->pm) { 611 } else if (dev->type && dev->type->pm) {
670 pm_dev_dbg(dev, state, "completing type "); 612 info = "completing type ";
671 if (dev->type->pm->complete) 613 callback = dev->type->pm->complete;
672 dev->type->pm->complete(dev);
673 } else if (dev->class && dev->class->pm) { 614 } else if (dev->class && dev->class->pm) {
674 pm_dev_dbg(dev, state, "completing class "); 615 info = "completing class ";
675 if (dev->class->pm->complete) 616 callback = dev->class->pm->complete;
676 dev->class->pm->complete(dev);
677 } else if (dev->bus && dev->bus->pm) { 617 } else if (dev->bus && dev->bus->pm) {
678 pm_dev_dbg(dev, state, "completing "); 618 info = "completing bus ";
679 if (dev->bus->pm->complete) 619 callback = dev->bus->pm->complete;
680 dev->bus->pm->complete(dev); 620 }
621
622 if (!callback && dev->driver && dev->driver->pm) {
623 info = "completing driver ";
624 callback = dev->driver->pm->complete;
625 }
626
627 if (callback) {
628 pm_dev_dbg(dev, state, info);
629 callback(dev);
681 } 630 }
682 631
683 device_unlock(dev); 632 device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
763 */ 712 */
764static int device_suspend_noirq(struct device *dev, pm_message_t state) 713static int device_suspend_noirq(struct device *dev, pm_message_t state)
765{ 714{
766 int error; 715 pm_callback_t callback = NULL;
716 char *info = NULL;
767 717
768 if (dev->pm_domain) { 718 if (dev->pm_domain) {
769 pm_dev_dbg(dev, state, "LATE power domain "); 719 info = "LATE power domain ";
770 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 720 callback = pm_noirq_op(&dev->pm_domain->ops, state);
771 if (error)
772 return error;
773 } else if (dev->type && dev->type->pm) { 721 } else if (dev->type && dev->type->pm) {
774 pm_dev_dbg(dev, state, "LATE type "); 722 info = "LATE type ";
775 error = pm_noirq_op(dev, dev->type->pm, state); 723 callback = pm_noirq_op(dev->type->pm, state);
776 if (error)
777 return error;
778 } else if (dev->class && dev->class->pm) { 724 } else if (dev->class && dev->class->pm) {
779 pm_dev_dbg(dev, state, "LATE class "); 725 info = "LATE class ";
780 error = pm_noirq_op(dev, dev->class->pm, state); 726 callback = pm_noirq_op(dev->class->pm, state);
781 if (error)
782 return error;
783 } else if (dev->bus && dev->bus->pm) { 727 } else if (dev->bus && dev->bus->pm) {
784 pm_dev_dbg(dev, state, "LATE "); 728 info = "LATE bus ";
785 error = pm_noirq_op(dev, dev->bus->pm, state); 729 callback = pm_noirq_op(dev->bus->pm, state);
786 if (error)
787 return error;
788 } 730 }
789 731
790 return 0; 732 if (!callback && dev->driver && dev->driver->pm) {
733 info = "LATE driver ";
734 callback = pm_noirq_op(dev->driver->pm, state);
735 }
736
737 return dpm_run_callback(callback, dev, state, info);
791} 738}
792 739
793/** 740/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
864 */ 811 */
865static int __device_suspend(struct device *dev, pm_message_t state, bool async) 812static int __device_suspend(struct device *dev, pm_message_t state, bool async)
866{ 813{
814 pm_callback_t callback = NULL;
815 char *info = NULL;
867 int error = 0; 816 int error = 0;
868 817
869 dpm_wait_for_children(dev, async); 818 dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
884 device_lock(dev); 833 device_lock(dev);
885 834
886 if (dev->pm_domain) { 835 if (dev->pm_domain) {
887 pm_dev_dbg(dev, state, "power domain "); 836 info = "power domain ";
888 error = pm_op(dev, &dev->pm_domain->ops, state); 837 callback = pm_op(&dev->pm_domain->ops, state);
889 goto End; 838 goto Run;
890 } 839 }
891 840
892 if (dev->type && dev->type->pm) { 841 if (dev->type && dev->type->pm) {
893 pm_dev_dbg(dev, state, "type "); 842 info = "type ";
894 error = pm_op(dev, dev->type->pm, state); 843 callback = pm_op(dev->type->pm, state);
895 goto End; 844 goto Run;
896 } 845 }
897 846
898 if (dev->class) { 847 if (dev->class) {
899 if (dev->class->pm) { 848 if (dev->class->pm) {
900 pm_dev_dbg(dev, state, "class "); 849 info = "class ";
901 error = pm_op(dev, dev->class->pm, state); 850 callback = pm_op(dev->class->pm, state);
902 goto End; 851 goto Run;
903 } else if (dev->class->suspend) { 852 } else if (dev->class->suspend) {
904 pm_dev_dbg(dev, state, "legacy class "); 853 pm_dev_dbg(dev, state, "legacy class ");
905 error = legacy_suspend(dev, state, dev->class->suspend); 854 error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
909 858
910 if (dev->bus) { 859 if (dev->bus) {
911 if (dev->bus->pm) { 860 if (dev->bus->pm) {
912 pm_dev_dbg(dev, state, ""); 861 info = "bus ";
913 error = pm_op(dev, dev->bus->pm, state); 862 callback = pm_op(dev->bus->pm, state);
914 } else if (dev->bus->suspend) { 863 } else if (dev->bus->suspend) {
915 pm_dev_dbg(dev, state, "legacy "); 864 pm_dev_dbg(dev, state, "legacy bus ");
916 error = legacy_suspend(dev, state, dev->bus->suspend); 865 error = legacy_suspend(dev, state, dev->bus->suspend);
866 goto End;
917 } 867 }
918 } 868 }
919 869
870 Run:
871 if (!callback && dev->driver && dev->driver->pm) {
872 info = "driver ";
873 callback = pm_op(dev->driver->pm, state);
874 }
875
876 error = dpm_run_callback(callback, dev, state, info);
877
920 End: 878 End:
921 if (!error) { 879 if (!error) {
922 dev->power.is_suspended = true; 880 dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
1022 */ 980 */
1023static int device_prepare(struct device *dev, pm_message_t state) 981static int device_prepare(struct device *dev, pm_message_t state)
1024{ 982{
983 int (*callback)(struct device *) = NULL;
984 char *info = NULL;
1025 int error = 0; 985 int error = 0;
1026 986
1027 device_lock(dev); 987 device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
1029 dev->power.wakeup_path = device_may_wakeup(dev); 989 dev->power.wakeup_path = device_may_wakeup(dev);
1030 990
1031 if (dev->pm_domain) { 991 if (dev->pm_domain) {
1032 pm_dev_dbg(dev, state, "preparing power domain "); 992 info = "preparing power domain ";
1033 if (dev->pm_domain->ops.prepare) 993 callback = dev->pm_domain->ops.prepare;
1034 error = dev->pm_domain->ops.prepare(dev);
1035 suspend_report_result(dev->pm_domain->ops.prepare, error);
1036 if (error)
1037 goto End;
1038 } else if (dev->type && dev->type->pm) { 994 } else if (dev->type && dev->type->pm) {
1039 pm_dev_dbg(dev, state, "preparing type "); 995 info = "preparing type ";
1040 if (dev->type->pm->prepare) 996 callback = dev->type->pm->prepare;
1041 error = dev->type->pm->prepare(dev);
1042 suspend_report_result(dev->type->pm->prepare, error);
1043 if (error)
1044 goto End;
1045 } else if (dev->class && dev->class->pm) { 997 } else if (dev->class && dev->class->pm) {
1046 pm_dev_dbg(dev, state, "preparing class "); 998 info = "preparing class ";
1047 if (dev->class->pm->prepare) 999 callback = dev->class->pm->prepare;
1048 error = dev->class->pm->prepare(dev);
1049 suspend_report_result(dev->class->pm->prepare, error);
1050 if (error)
1051 goto End;
1052 } else if (dev->bus && dev->bus->pm) { 1000 } else if (dev->bus && dev->bus->pm) {
1053 pm_dev_dbg(dev, state, "preparing "); 1001 info = "preparing bus ";
1054 if (dev->bus->pm->prepare) 1002 callback = dev->bus->pm->prepare;
1055 error = dev->bus->pm->prepare(dev); 1003 }
1056 suspend_report_result(dev->bus->pm->prepare, error); 1004
1005 if (!callback && dev->driver && dev->driver->pm) {
1006 info = "preparing driver ";
1007 callback = dev->driver->pm->prepare;
1008 }
1009
1010 if (callback) {
1011 error = callback(dev);
1012 suspend_report_result(callback, error);
1057 } 1013 }
1058 1014
1059 End:
1060 device_unlock(dev); 1015 device_unlock(dev);
1061 1016
1062 return error; 1017 return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c50fc41..c5d358837461 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
47static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 47static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
48 48
49/** 49/**
50 * dev_pm_qos_read_value - Get PM QoS constraint for a given device. 50 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
51 * @dev: Device to get the PM QoS constraint value for.
52 *
53 * This routine must be called with dev->power.lock held.
54 */
55s32 __dev_pm_qos_read_value(struct device *dev)
56{
57 struct pm_qos_constraints *c = dev->power.constraints;
58
59 return c ? pm_qos_read_value(c) : 0;
60}
61
62/**
63 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
51 * @dev: Device to get the PM QoS constraint value for. 64 * @dev: Device to get the PM QoS constraint value for.
52 */ 65 */
53s32 dev_pm_qos_read_value(struct device *dev) 66s32 dev_pm_qos_read_value(struct device *dev)
54{ 67{
55 struct pm_qos_constraints *c;
56 unsigned long flags; 68 unsigned long flags;
57 s32 ret = 0; 69 s32 ret;
58 70
59 spin_lock_irqsave(&dev->power.lock, flags); 71 spin_lock_irqsave(&dev->power.lock, flags);
60 72 ret = __dev_pm_qos_read_value(dev);
61 c = dev->power.constraints;
62 if (c)
63 ret = pm_qos_read_value(c);
64
65 spin_unlock_irqrestore(&dev->power.lock, flags); 73 spin_unlock_irqrestore(&dev->power.lock, flags);
66 74
67 return ret; 75 return ret;
@@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
412 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); 420 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
413} 421}
414EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); 422EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
423
424/**
425 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
426 * @dev: Device whose ancestor to add the request for.
427 * @req: Pointer to the preallocated handle.
428 * @value: Constraint latency value.
429 */
430int dev_pm_qos_add_ancestor_request(struct device *dev,
431 struct dev_pm_qos_request *req, s32 value)
432{
433 struct device *ancestor = dev->parent;
434 int error = -ENODEV;
435
436 while (ancestor && !ancestor->power.ignore_children)
437 ancestor = ancestor->parent;
438
439 if (ancestor)
440 error = dev_pm_qos_add_request(ancestor, req, value);
441
442 if (error)
443 req->dev = NULL;
444
445 return error;
446}
447EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443bca8f..541f821d4ea6 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
250 else 250 else
251 callback = NULL; 251 callback = NULL;
252 252
253 if (!callback && dev->driver && dev->driver->pm)
254 callback = dev->driver->pm->runtime_idle;
255
253 if (callback) 256 if (callback)
254 __rpm_callback(callback, dev); 257 __rpm_callback(callback, dev);
255 258
@@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
279 return retval != -EACCES ? retval : -EIO; 282 return retval != -EACCES ? retval : -EIO;
280} 283}
281 284
285struct rpm_qos_data {
286 ktime_t time_now;
287 s64 constraint_ns;
288};
289
290/**
291 * rpm_update_qos_constraint - Update a given PM QoS constraint data.
292 * @dev: Device whose timing data to use.
293 * @data: PM QoS constraint data to update.
294 *
295 * Use the suspend timing data of @dev to update PM QoS constraint data pointed
296 * to by @data.
297 */
298static int rpm_update_qos_constraint(struct device *dev, void *data)
299{
300 struct rpm_qos_data *qos = data;
301 unsigned long flags;
302 s64 delta_ns;
303 int ret = 0;
304
305 spin_lock_irqsave(&dev->power.lock, flags);
306
307 if (dev->power.max_time_suspended_ns < 0)
308 goto out;
309
310 delta_ns = dev->power.max_time_suspended_ns -
311 ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
312 if (delta_ns <= 0) {
313 ret = -EBUSY;
314 goto out;
315 }
316
317 if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
318 qos->constraint_ns = delta_ns;
319
320 out:
321 spin_unlock_irqrestore(&dev->power.lock, flags);
322
323 return ret;
324}
325
282/** 326/**
283 * rpm_suspend - Carry out runtime suspend of given device. 327 * rpm_suspend - Carry out runtime suspend of given device.
284 * @dev: Device to suspend. 328 * @dev: Device to suspend.
@@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
305{ 349{
306 int (*callback)(struct device *); 350 int (*callback)(struct device *);
307 struct device *parent = NULL; 351 struct device *parent = NULL;
352 struct rpm_qos_data qos;
308 int retval; 353 int retval;
309 354
310 trace_rpm_suspend(dev, rpmflags); 355 trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 goto out; 445 goto out;
401 } 446 }
402 447
448 qos.constraint_ns = __dev_pm_qos_read_value(dev);
449 if (qos.constraint_ns < 0) {
450 /* Negative constraint means "never suspend". */
451 retval = -EPERM;
452 goto out;
453 }
454 qos.constraint_ns *= NSEC_PER_USEC;
455 qos.time_now = ktime_get();
456
403 __update_runtime_status(dev, RPM_SUSPENDING); 457 __update_runtime_status(dev, RPM_SUSPENDING);
404 458
459 if (!dev->power.ignore_children) {
460 if (dev->power.irq_safe)
461 spin_unlock(&dev->power.lock);
462 else
463 spin_unlock_irq(&dev->power.lock);
464
465 retval = device_for_each_child(dev, &qos,
466 rpm_update_qos_constraint);
467
468 if (dev->power.irq_safe)
469 spin_lock(&dev->power.lock);
470 else
471 spin_lock_irq(&dev->power.lock);
472
473 if (retval)
474 goto fail;
475 }
476
477 dev->power.suspend_time = qos.time_now;
478 dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
479
405 if (dev->pm_domain) 480 if (dev->pm_domain)
406 callback = dev->pm_domain->ops.runtime_suspend; 481 callback = dev->pm_domain->ops.runtime_suspend;
407 else if (dev->type && dev->type->pm) 482 else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
413 else 488 else
414 callback = NULL; 489 callback = NULL;
415 490
491 if (!callback && dev->driver && dev->driver->pm)
492 callback = dev->driver->pm->runtime_suspend;
493
416 retval = rpm_callback(callback, dev); 494 retval = rpm_callback(callback, dev);
417 if (retval) { 495 if (retval)
418 __update_runtime_status(dev, RPM_ACTIVE); 496 goto fail;
419 dev->power.deferred_resume = false;
420 if (retval == -EAGAIN || retval == -EBUSY) {
421 dev->power.runtime_error = 0;
422 497
423 /*
424 * If the callback routine failed an autosuspend, and
425 * if the last_busy time has been updated so that there
426 * is a new autosuspend expiration time, automatically
427 * reschedule another autosuspend.
428 */
429 if ((rpmflags & RPM_AUTO) &&
430 pm_runtime_autosuspend_expiration(dev) != 0)
431 goto repeat;
432 } else {
433 pm_runtime_cancel_pending(dev);
434 }
435 wake_up_all(&dev->power.wait_queue);
436 goto out;
437 }
438 no_callback: 498 no_callback:
439 __update_runtime_status(dev, RPM_SUSPENDED); 499 __update_runtime_status(dev, RPM_SUSPENDED);
440 pm_runtime_deactivate_timer(dev); 500 pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
466 trace_rpm_return_int(dev, _THIS_IP_, retval); 526 trace_rpm_return_int(dev, _THIS_IP_, retval);
467 527
468 return retval; 528 return retval;
529
530 fail:
531 __update_runtime_status(dev, RPM_ACTIVE);
532 dev->power.suspend_time = ktime_set(0, 0);
533 dev->power.max_time_suspended_ns = -1;
534 dev->power.deferred_resume = false;
535 if (retval == -EAGAIN || retval == -EBUSY) {
536 dev->power.runtime_error = 0;
537
538 /*
539 * If the callback routine failed an autosuspend, and
540 * if the last_busy time has been updated so that there
541 * is a new autosuspend expiration time, automatically
542 * reschedule another autosuspend.
543 */
544 if ((rpmflags & RPM_AUTO) &&
545 pm_runtime_autosuspend_expiration(dev) != 0)
546 goto repeat;
547 } else {
548 pm_runtime_cancel_pending(dev);
549 }
550 wake_up_all(&dev->power.wait_queue);
551 goto out;
469} 552}
470 553
471/** 554/**
@@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
620 if (dev->power.no_callbacks) 703 if (dev->power.no_callbacks)
621 goto no_callback; /* Assume success. */ 704 goto no_callback; /* Assume success. */
622 705
706 dev->power.suspend_time = ktime_set(0, 0);
707 dev->power.max_time_suspended_ns = -1;
708
623 __update_runtime_status(dev, RPM_RESUMING); 709 __update_runtime_status(dev, RPM_RESUMING);
624 710
625 if (dev->pm_domain) 711 if (dev->pm_domain)
@@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
633 else 719 else
634 callback = NULL; 720 callback = NULL;
635 721
722 if (!callback && dev->driver && dev->driver->pm)
723 callback = dev->driver->pm->runtime_resume;
724
636 retval = rpm_callback(callback, dev); 725 retval = rpm_callback(callback, dev);
637 if (retval) { 726 if (retval) {
638 __update_runtime_status(dev, RPM_SUSPENDED); 727 __update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
1279 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1368 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1280 (unsigned long)dev); 1369 (unsigned long)dev);
1281 1370
1371 dev->power.suspend_time = ktime_set(0, 0);
1372 dev->power.max_time_suspended_ns = -1;
1373
1282 init_waitqueue_head(&dev->power.wait_queue); 1374 init_waitqueue_head(&dev->power.wait_queue);
1283} 1375}
1284 1376
@@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
1296 if (dev->power.irq_safe && dev->parent) 1388 if (dev->power.irq_safe && dev->parent)
1297 pm_runtime_put_sync(dev->parent); 1389 pm_runtime_put_sync(dev->parent);
1298} 1390}
1391
1392/**
1393 * pm_runtime_update_max_time_suspended - Update device's suspend time data.
1394 * @dev: Device to handle.
1395 * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
1396 *
1397 * Update the device's power.max_time_suspended_ns field by subtracting
1398 * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
1399 * never negative.
1400 */
1401void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
1402{
1403 unsigned long flags;
1404
1405 spin_lock_irqsave(&dev->power.lock, flags);
1406
1407 if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
1408 if (dev->power.max_time_suspended_ns > delta_ns)
1409 dev->power.max_time_suspended_ns -= delta_ns;
1410 else
1411 dev->power.max_time_suspended_ns = 0;
1412 }
1413
1414 spin_unlock_irqrestore(&dev->power.lock, flags);
1415}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c86162..6c3defa50845 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
475 475
476 init_waitqueue_entry(&wait, current); 476 init_waitqueue_entry(&wait, current);
477 477
478 current->flags |= PF_NOFREEZE;
479
480 for (;;) { 478 for (;;) {
481 add_wait_queue(&thread->wait_q, &wait); 479 add_wait_queue(&thread->wait_q, &wait);
482 480
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f0491037080..464fa2147dfb 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
65 65
66comment "DEVFREQ Drivers" 66comment "DEVFREQ Drivers"
67 67
68config ARM_EXYNOS4_BUS_DEVFREQ
69 bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
70 depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
71 select ARCH_HAS_OPP
72 select DEVFREQ_GOV_SIMPLE_ONDEMAND
73 help
74 This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
75 and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
76 It reads PPMU counters of memory controllers and adjusts
77 the operating frequencies and voltages with OPP support.
78 To operate with optimal voltages, ASV support is required
79 (CONFIG_EXYNOS_ASV).
80
68endif # PM_DEVFREQ 81endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89e970a..8c464234f7e7 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o 3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o 4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
5obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o 5obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
6
7# DEVFREQ Drivers
8obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9cb8c5..c189b82f5ece 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
347 if (!IS_ERR(devfreq)) { 347 if (!IS_ERR(devfreq)) {
348 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 348 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
349 err = -EINVAL; 349 err = -EINVAL;
350 goto out; 350 goto err_out;
351 } 351 }
352 } 352 }
353 353
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
356 dev_err(dev, "%s: Unable to create devfreq for the device\n", 356 dev_err(dev, "%s: Unable to create devfreq for the device\n",
357 __func__); 357 __func__);
358 err = -ENOMEM; 358 err = -ENOMEM;
359 goto out; 359 goto err_out;
360 } 360 }
361 361
362 mutex_init(&devfreq->lock); 362 mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
399 devfreq->next_polling); 399 devfreq->next_polling);
400 } 400 }
401 mutex_unlock(&devfreq_list_lock); 401 mutex_unlock(&devfreq_list_lock);
402 goto out; 402out:
403 return devfreq;
404
403err_init: 405err_init:
404 device_unregister(&devfreq->dev); 406 device_unregister(&devfreq->dev);
405err_dev: 407err_dev:
406 mutex_unlock(&devfreq->lock); 408 mutex_unlock(&devfreq->lock);
407 kfree(devfreq); 409 kfree(devfreq);
408out: 410err_out:
409 if (err) 411 return ERR_PTR(err);
410 return ERR_PTR(err);
411 else
412 return devfreq;
413} 412}
414 413
415/** 414/**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 000000000000..6460577d6701
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
1/* drivers/devfreq/exynos4210_memorybus.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
8 * This version supports EXYNOS4210 only. This changes bus frequencies
9 * and vddint voltages. Exynos4412/4212 should be able to be supported
10 * with minor modifications.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18#include <linux/io.h>
19#include <linux/slab.h>
20#include <linux/mutex.h>
21#include <linux/suspend.h>
22#include <linux/opp.h>
23#include <linux/devfreq.h>
24#include <linux/platform_device.h>
25#include <linux/regulator/consumer.h>
26#include <linux/module.h>
27
28/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
29#ifdef CONFIG_EXYNOS_ASV
30extern unsigned int exynos_result_of_asv;
31#endif
32
33#include <mach/regs-clock.h>
34
35#include <plat/map-s5p.h>
36
37#define MAX_SAFEVOLT 1200000 /* 1.2V */
38
39enum exynos4_busf_type {
40 TYPE_BUSF_EXYNOS4210,
41 TYPE_BUSF_EXYNOS4x12,
42};
43
44/* Assume that the bus is saturated if the utilization is 40% */
45#define BUS_SATURATION_RATIO 40
46
47enum ppmu_counter {
48 PPMU_PMNCNT0 = 0,
49 PPMU_PMCCNT1,
50 PPMU_PMNCNT2,
51 PPMU_PMNCNT3,
52 PPMU_PMNCNT_MAX,
53};
54struct exynos4_ppmu {
55 void __iomem *hw_base;
56 unsigned int ccnt;
57 unsigned int event;
58 unsigned int count[PPMU_PMNCNT_MAX];
59 bool ccnt_overflow;
60 bool count_overflow[PPMU_PMNCNT_MAX];
61};
62
63enum busclk_level_idx {
64 LV_0 = 0,
65 LV_1,
66 LV_2,
67 LV_3,
68 LV_4,
69 _LV_END
70};
71#define EX4210_LV_MAX LV_2
72#define EX4x12_LV_MAX LV_4
73#define EX4210_LV_NUM (LV_2 + 1)
74#define EX4x12_LV_NUM (LV_4 + 1)
75
76struct busfreq_data {
77 enum exynos4_busf_type type;
78 struct device *dev;
79 struct devfreq *devfreq;
80 bool disabled;
81 struct regulator *vdd_int;
82 struct regulator *vdd_mif; /* Exynos4412/4212 only */
83 struct opp *curr_opp;
84 struct exynos4_ppmu dmc[2];
85
86 struct notifier_block pm_notifier;
87 struct mutex lock;
88
89 /* Dividers calculated at boot/probe-time */
90 unsigned int dmc_divtable[_LV_END]; /* DMC0 */
91 unsigned int top_divtable[_LV_END];
92};
93
94struct bus_opp_table {
95 unsigned int idx;
96 unsigned long clk;
97 unsigned long volt;
98};
99
100/* 4210 controls clock of mif and voltage of int */
101static struct bus_opp_table exynos4210_busclk_table[] = {
102 {LV_0, 400000, 1150000},
103 {LV_1, 267000, 1050000},
104 {LV_2, 133000, 1025000},
105 {0, 0, 0},
106};
107
108/*
109 * MIF is the main control knob clock for exynox4x12 MIF/INT
110 * clock and voltage of both mif/int are controlled.
111 */
112static struct bus_opp_table exynos4x12_mifclk_table[] = {
113 {LV_0, 400000, 1100000},
114 {LV_1, 267000, 1000000},
115 {LV_2, 160000, 950000},
116 {LV_3, 133000, 950000},
117 {LV_4, 100000, 950000},
118 {0, 0, 0},
119};
120
121/*
122 * INT is not the control knob of 4x12. LV_x is not meant to represent
123 * the current performance. (MIF does)
124 */
125static struct bus_opp_table exynos4x12_intclk_table[] = {
126 {LV_0, 200000, 1000000},
127 {LV_1, 160000, 950000},
128 {LV_2, 133000, 925000},
129 {LV_3, 100000, 900000},
130 {0, 0, 0},
131};
132
133/* TODO: asv volt definitions are "__initdata"? */
134/* Some chips have different operating voltages */
135static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
136 {1150000, 1050000, 1050000},
137 {1125000, 1025000, 1025000},
138 {1100000, 1000000, 1000000},
139 {1075000, 975000, 975000},
140 {1050000, 950000, 950000},
141};
142
143static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
144 /* 400 267 160 133 100 */
145 {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
146 {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
147 {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
148 {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
149 {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
150 {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
151 {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
152 {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
153 {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
154};
155
156static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
157 /* 200 160 133 100 */
158 {1000000, 950000, 925000, 900000}, /* ASV0 */
159 {975000, 925000, 925000, 900000}, /* ASV1 */
160 {950000, 925000, 900000, 875000}, /* ASV2 */
161 {950000, 900000, 900000, 875000}, /* ASV3 */
162 {925000, 875000, 875000, 875000}, /* ASV4 */
163 {900000, 850000, 850000, 850000}, /* ASV5 */
164 {900000, 850000, 850000, 850000}, /* ASV6 */
165 {900000, 850000, 850000, 850000}, /* ASV7 */
166 {900000, 850000, 850000, 850000}, /* ASV8 */
167};
168
169/*** Clock Divider Data for Exynos4210 ***/
170static unsigned int exynos4210_clkdiv_dmc0[][8] = {
171 /*
172 * Clock divider value for following
173 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
174 * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
175 */
176
177 /* DMC L0: 400MHz */
178 { 3, 1, 1, 1, 1, 1, 3, 1 },
179 /* DMC L1: 266.7MHz */
180 { 4, 1, 1, 2, 1, 1, 3, 1 },
181 /* DMC L2: 133MHz */
182 { 5, 1, 1, 5, 1, 1, 3, 1 },
183};
184static unsigned int exynos4210_clkdiv_top[][5] = {
185 /*
186 * Clock divider value for following
187 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
188 */
189 /* ACLK200 L0: 200MHz */
190 { 3, 7, 4, 5, 1 },
191 /* ACLK200 L1: 160MHz */
192 { 4, 7, 5, 6, 1 },
193 /* ACLK200 L2: 133MHz */
194 { 5, 7, 7, 7, 1 },
195};
196static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
197 /*
198 * Clock divider value for following
199 * { DIVGDL/R, DIVGPL/R }
200 */
201 /* ACLK_GDL/R L1: 200MHz */
202 { 3, 1 },
203 /* ACLK_GDL/R L2: 160MHz */
204 { 4, 1 },
205 /* ACLK_GDL/R L3: 133MHz */
206 { 5, 1 },
207};
208
209/*** Clock Divider Data for Exynos4212/4412 ***/
210static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
211 /*
212 * Clock divider value for following
213 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
214 * DIVDMCP}
215 */
216
217 /* DMC L0: 400MHz */
218 {3, 1, 1, 1, 1, 1},
219 /* DMC L1: 266.7MHz */
220 {4, 1, 1, 2, 1, 1},
221 /* DMC L2: 160MHz */
222 {5, 1, 1, 4, 1, 1},
223 /* DMC L3: 133MHz */
224 {5, 1, 1, 5, 1, 1},
225 /* DMC L4: 100MHz */
226 {7, 1, 1, 7, 1, 1},
227};
228static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
229 /*
230 * Clock divider value for following
231 * { G2DACP, DIVC2C, DIVC2C_ACLK }
232 */
233
234 /* DMC L0: 400MHz */
235 {3, 1, 1},
236 /* DMC L1: 266.7MHz */
237 {4, 2, 1},
238 /* DMC L2: 160MHz */
239 {5, 4, 1},
240 /* DMC L3: 133MHz */
241 {5, 5, 1},
242 /* DMC L4: 100MHz */
243 {7, 7, 1},
244};
245static unsigned int exynos4x12_clkdiv_top[][5] = {
246 /*
247 * Clock divider value for following
248 * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
249 DIVACLK133, DIVONENAND }
250 */
251
252 /* ACLK_GDL/R L0: 200MHz */
253 {2, 7, 4, 5, 1},
254 /* ACLK_GDL/R L1: 200MHz */
255 {2, 7, 4, 5, 1},
256 /* ACLK_GDL/R L2: 160MHz */
257 {4, 7, 5, 7, 1},
258 /* ACLK_GDL/R L3: 133MHz */
259 {4, 7, 5, 7, 1},
260 /* ACLK_GDL/R L4: 100MHz */
261 {7, 7, 7, 7, 1},
262};
263static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
264 /*
265 * Clock divider value for following
266 * { DIVGDL/R, DIVGPL/R }
267 */
268
269 /* ACLK_GDL/R L0: 200MHz */
270 {3, 1},
271 /* ACLK_GDL/R L1: 200MHz */
272 {3, 1},
273 /* ACLK_GDL/R L2: 160MHz */
274 {4, 1},
275 /* ACLK_GDL/R L3: 133MHz */
276 {5, 1},
277 /* ACLK_GDL/R L4: 100MHz */
278 {7, 1},
279};
280static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
281 /*
282 * Clock divider value for following
283 * { DIVMFC, DIVJPEG, DIVFIMC0~3}
284 */
285
286 /* SCLK_MFC: 200MHz */
287 {3, 3, 4},
288 /* SCLK_MFC: 200MHz */
289 {3, 3, 4},
290 /* SCLK_MFC: 160MHz */
291 {4, 4, 5},
292 /* SCLK_MFC: 133MHz */
293 {5, 5, 5},
294 /* SCLK_MFC: 100MHz */
295 {7, 7, 7},
296};
297
298
299static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
300{
301 unsigned int index;
302 unsigned int tmp;
303
304 for (index = LV_0; index < EX4210_LV_NUM; index++)
305 if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
306 break;
307
308 if (index == EX4210_LV_NUM)
309 return -EINVAL;
310
311 /* Change Divider - DMC0 */
312 tmp = data->dmc_divtable[index];
313
314 __raw_writel(tmp, S5P_CLKDIV_DMC0);
315
316 do {
317 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
318 } while (tmp & 0x11111111);
319
320 /* Change Divider - TOP */
321 tmp = data->top_divtable[index];
322
323 __raw_writel(tmp, S5P_CLKDIV_TOP);
324
325 do {
326 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
327 } while (tmp & 0x11111);
328
329 /* Change Divider - LEFTBUS */
330 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
331
332 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
333
334 tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
335 S5P_CLKDIV_BUS_GDLR_SHIFT) |
336 (exynos4210_clkdiv_lr_bus[index][1] <<
337 S5P_CLKDIV_BUS_GPLR_SHIFT));
338
339 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
340
341 do {
342 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
343 } while (tmp & 0x11);
344
345 /* Change Divider - RIGHTBUS */
346 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
347
348 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
349
350 tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
351 S5P_CLKDIV_BUS_GDLR_SHIFT) |
352 (exynos4210_clkdiv_lr_bus[index][1] <<
353 S5P_CLKDIV_BUS_GPLR_SHIFT));
354
355 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
356
357 do {
358 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
359 } while (tmp & 0x11);
360
361 return 0;
362}
363
364static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
365{
366 unsigned int index;
367 unsigned int tmp;
368
369 for (index = LV_0; index < EX4x12_LV_NUM; index++)
370 if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
371 break;
372
373 if (index == EX4x12_LV_NUM)
374 return -EINVAL;
375
376 /* Change Divider - DMC0 */
377 tmp = data->dmc_divtable[index];
378
379 __raw_writel(tmp, S5P_CLKDIV_DMC0);
380
381 do {
382 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
383 } while (tmp & 0x11111111);
384
385 /* Change Divider - DMC1 */
386 tmp = __raw_readl(S5P_CLKDIV_DMC1);
387
388 tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
389 S5P_CLKDIV_DMC1_C2C_MASK |
390 S5P_CLKDIV_DMC1_C2CACLK_MASK);
391
392 tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
393 S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
394 (exynos4x12_clkdiv_dmc1[index][1] <<
395 S5P_CLKDIV_DMC1_C2C_SHIFT) |
396 (exynos4x12_clkdiv_dmc1[index][2] <<
397 S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
398
399 __raw_writel(tmp, S5P_CLKDIV_DMC1);
400
401 do {
402 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
403 } while (tmp & 0x111111);
404
405 /* Change Divider - TOP */
406 tmp = __raw_readl(S5P_CLKDIV_TOP);
407
408 tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
409 S5P_CLKDIV_TOP_ACLK100_MASK |
410 S5P_CLKDIV_TOP_ACLK160_MASK |
411 S5P_CLKDIV_TOP_ACLK133_MASK |
412 S5P_CLKDIV_TOP_ONENAND_MASK);
413
414 tmp |= ((exynos4x12_clkdiv_top[index][0] <<
415 S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
416 (exynos4x12_clkdiv_top[index][1] <<
417 S5P_CLKDIV_TOP_ACLK100_SHIFT) |
418 (exynos4x12_clkdiv_top[index][2] <<
419 S5P_CLKDIV_TOP_ACLK160_SHIFT) |
420 (exynos4x12_clkdiv_top[index][3] <<
421 S5P_CLKDIV_TOP_ACLK133_SHIFT) |
422 (exynos4x12_clkdiv_top[index][4] <<
423 S5P_CLKDIV_TOP_ONENAND_SHIFT));
424
425 __raw_writel(tmp, S5P_CLKDIV_TOP);
426
427 do {
428 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
429 } while (tmp & 0x11111);
430
431 /* Change Divider - LEFTBUS */
432 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
433
434 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
435
436 tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
437 S5P_CLKDIV_BUS_GDLR_SHIFT) |
438 (exynos4x12_clkdiv_lr_bus[index][1] <<
439 S5P_CLKDIV_BUS_GPLR_SHIFT));
440
441 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
442
443 do {
444 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
445 } while (tmp & 0x11);
446
447 /* Change Divider - RIGHTBUS */
448 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
449
450 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
451
452 tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
453 S5P_CLKDIV_BUS_GDLR_SHIFT) |
454 (exynos4x12_clkdiv_lr_bus[index][1] <<
455 S5P_CLKDIV_BUS_GPLR_SHIFT));
456
457 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
458
459 do {
460 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
461 } while (tmp & 0x11);
462
463 /* Change Divider - MFC */
464 tmp = __raw_readl(S5P_CLKDIV_MFC);
465
466 tmp &= ~(S5P_CLKDIV_MFC_MASK);
467
468 tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
469 S5P_CLKDIV_MFC_SHIFT));
470
471 __raw_writel(tmp, S5P_CLKDIV_MFC);
472
473 do {
474 tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
475 } while (tmp & 0x1);
476
477 /* Change Divider - JPEG */
478 tmp = __raw_readl(S5P_CLKDIV_CAM1);
479
480 tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
481
482 tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
483 S5P_CLKDIV_CAM1_JPEG_SHIFT));
484
485 __raw_writel(tmp, S5P_CLKDIV_CAM1);
486
487 do {
488 tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
489 } while (tmp & 0x1);
490
491 /* Change Divider - FIMC0~3 */
492 tmp = __raw_readl(S5P_CLKDIV_CAM);
493
494 tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
495 S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
496
497 tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
498 S5P_CLKDIV_CAM_FIMC0_SHIFT) |
499 (exynos4x12_clkdiv_sclkip[index][2] <<
500 S5P_CLKDIV_CAM_FIMC1_SHIFT) |
501 (exynos4x12_clkdiv_sclkip[index][2] <<
502 S5P_CLKDIV_CAM_FIMC2_SHIFT) |
503 (exynos4x12_clkdiv_sclkip[index][2] <<
504 S5P_CLKDIV_CAM_FIMC3_SHIFT));
505
506 __raw_writel(tmp, S5P_CLKDIV_CAM);
507
508 do {
509 tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
510 } while (tmp & 0x1111);
511
512 return 0;
513}
514
515
516static void busfreq_mon_reset(struct busfreq_data *data)
517{
518 unsigned int i;
519
520 for (i = 0; i < 2; i++) {
521 void __iomem *ppmu_base = data->dmc[i].hw_base;
522
523 /* Reset PPMU */
524 __raw_writel(0x8000000f, ppmu_base + 0xf010);
525 __raw_writel(0x8000000f, ppmu_base + 0xf050);
526 __raw_writel(0x6, ppmu_base + 0xf000);
527 __raw_writel(0x0, ppmu_base + 0xf100);
528
529 /* Set PPMU Event */
530 data->dmc[i].event = 0x6;
531 __raw_writel(((data->dmc[i].event << 12) | 0x1),
532 ppmu_base + 0xfc);
533
534 /* Start PPMU */
535 __raw_writel(0x1, ppmu_base + 0xf000);
536 }
537}
538
539static void exynos4_read_ppmu(struct busfreq_data *data)
540{
541 int i, j;
542
543 for (i = 0; i < 2; i++) {
544 void __iomem *ppmu_base = data->dmc[i].hw_base;
545 u32 overflow;
546
547 /* Stop PPMU */
548 __raw_writel(0x0, ppmu_base + 0xf000);
549
550 /* Update local data from PPMU */
551 overflow = __raw_readl(ppmu_base + 0xf050);
552
553 data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
554 data->dmc[i].ccnt_overflow = overflow & (1 << 31);
555
556 for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
557 data->dmc[i].count[j] = __raw_readl(
558 ppmu_base + (0xf110 + (0x10 * j)));
559 data->dmc[i].count_overflow[j] = overflow & (1 << j);
560 }
561 }
562
563 busfreq_mon_reset(data);
564}
565
566static int exynos4x12_get_intspec(unsigned long mifclk)
567{
568 int i = 0;
569
570 while (exynos4x12_intclk_table[i].clk) {
571 if (exynos4x12_intclk_table[i].clk <= mifclk)
572 return i;
573 i++;
574 }
575
576 return -EINVAL;
577}
578
579static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
580 struct opp *oldopp)
581{
582 int err = 0, tmp;
583 unsigned long volt = opp_get_voltage(opp);
584
585 switch (data->type) {
586 case TYPE_BUSF_EXYNOS4210:
587 /* OPP represents DMC clock + INT voltage */
588 err = regulator_set_voltage(data->vdd_int, volt,
589 MAX_SAFEVOLT);
590 break;
591 case TYPE_BUSF_EXYNOS4x12:
592 /* OPP represents MIF clock + MIF voltage */
593 err = regulator_set_voltage(data->vdd_mif, volt,
594 MAX_SAFEVOLT);
595 if (err)
596 break;
597
598 tmp = exynos4x12_get_intspec(opp_get_freq(opp));
599 if (tmp < 0) {
600 err = tmp;
601 regulator_set_voltage(data->vdd_mif,
602 opp_get_voltage(oldopp),
603 MAX_SAFEVOLT);
604 break;
605 }
606 err = regulator_set_voltage(data->vdd_int,
607 exynos4x12_intclk_table[tmp].volt,
608 MAX_SAFEVOLT);
609 /* Try to recover */
610 if (err)
611 regulator_set_voltage(data->vdd_mif,
612 opp_get_voltage(oldopp),
613 MAX_SAFEVOLT);
614 break;
615 default:
616 err = -EINVAL;
617 }
618
619 return err;
620}
621
622static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
623{
624 int err = 0;
625 struct platform_device *pdev = container_of(dev, struct platform_device,
626 dev);
627 struct busfreq_data *data = platform_get_drvdata(pdev);
628 struct opp *opp = devfreq_recommended_opp(dev, _freq);
629 unsigned long old_freq = opp_get_freq(data->curr_opp);
630 unsigned long freq = opp_get_freq(opp);
631
632 if (old_freq == freq)
633 return 0;
634
635 dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
636
637 mutex_lock(&data->lock);
638
639 if (data->disabled)
640 goto out;
641
642 if (old_freq < freq)
643 err = exynos4_bus_setvolt(data, opp, data->curr_opp);
644 if (err)
645 goto out;
646
647 if (old_freq != freq) {
648 switch (data->type) {
649 case TYPE_BUSF_EXYNOS4210:
650 err = exynos4210_set_busclk(data, opp);
651 break;
652 case TYPE_BUSF_EXYNOS4x12:
653 err = exynos4x12_set_busclk(data, opp);
654 break;
655 default:
656 err = -EINVAL;
657 }
658 }
659 if (err)
660 goto out;
661
662 if (old_freq > freq)
663 err = exynos4_bus_setvolt(data, opp, data->curr_opp);
664 if (err)
665 goto out;
666
667 data->curr_opp = opp;
668out:
669 mutex_unlock(&data->lock);
670 return err;
671}
672
673static int exynos4_get_busier_dmc(struct busfreq_data *data)
674{
675 u64 p0 = data->dmc[0].count[0];
676 u64 p1 = data->dmc[1].count[0];
677
678 p0 *= data->dmc[1].ccnt;
679 p1 *= data->dmc[0].ccnt;
680
681 if (data->dmc[1].ccnt == 0)
682 return 0;
683
684 if (p0 > p1)
685 return 0;
686 return 1;
687}
688
689static int exynos4_bus_get_dev_status(struct device *dev,
690 struct devfreq_dev_status *stat)
691{
692 struct platform_device *pdev = container_of(dev, struct platform_device,
693 dev);
694 struct busfreq_data *data = platform_get_drvdata(pdev);
695 int busier_dmc;
696 int cycles_x2 = 2; /* 2 x cycles */
697 void __iomem *addr;
698 u32 timing;
699 u32 memctrl;
700
701 exynos4_read_ppmu(data);
702 busier_dmc = exynos4_get_busier_dmc(data);
703 stat->current_frequency = opp_get_freq(data->curr_opp);
704
705 if (busier_dmc)
706 addr = S5P_VA_DMC1;
707 else
708 addr = S5P_VA_DMC0;
709
710 memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
711 timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
712
713 switch ((memctrl >> 8) & 0xf) {
714 case 0x4: /* DDR2 */
715 cycles_x2 = ((timing >> 16) & 0xf) * 2;
716 break;
717 case 0x5: /* LPDDR2 */
718 case 0x6: /* DDR3 */
719 cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
720 break;
721 default:
722 pr_err("%s: Unknown Memory Type(%d).\n", __func__,
723 (memctrl >> 8) & 0xf);
724 return -EINVAL;
725 }
726
727 /* Number of cycles spent on memory access */
728 stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
729 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
730 stat->total_time = data->dmc[busier_dmc].ccnt;
731
732 /* If the counters have overflown, retry */
733 if (data->dmc[busier_dmc].ccnt_overflow ||
734 data->dmc[busier_dmc].count_overflow[0])
735 return -EAGAIN;
736
737 return 0;
738}
739
740static void exynos4_bus_exit(struct device *dev)
741{
742 struct platform_device *pdev = container_of(dev, struct platform_device,
743 dev);
744 struct busfreq_data *data = platform_get_drvdata(pdev);
745
746 devfreq_unregister_opp_notifier(dev, data->devfreq);
747}
748
749static struct devfreq_dev_profile exynos4_devfreq_profile = {
750 .initial_freq = 400000,
751 .polling_ms = 50,
752 .target = exynos4_bus_target,
753 .get_dev_status = exynos4_bus_get_dev_status,
754 .exit = exynos4_bus_exit,
755};
756
757static int exynos4210_init_tables(struct busfreq_data *data)
758{
759 u32 tmp;
760 int mgrp;
761 int i, err = 0;
762
763 tmp = __raw_readl(S5P_CLKDIV_DMC0);
764 for (i = LV_0; i < EX4210_LV_NUM; i++) {
765 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
766 S5P_CLKDIV_DMC0_ACPPCLK_MASK |
767 S5P_CLKDIV_DMC0_DPHY_MASK |
768 S5P_CLKDIV_DMC0_DMC_MASK |
769 S5P_CLKDIV_DMC0_DMCD_MASK |
770 S5P_CLKDIV_DMC0_DMCP_MASK |
771 S5P_CLKDIV_DMC0_COPY2_MASK |
772 S5P_CLKDIV_DMC0_CORETI_MASK);
773
774 tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
775 S5P_CLKDIV_DMC0_ACP_SHIFT) |
776 (exynos4210_clkdiv_dmc0[i][1] <<
777 S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
778 (exynos4210_clkdiv_dmc0[i][2] <<
779 S5P_CLKDIV_DMC0_DPHY_SHIFT) |
780 (exynos4210_clkdiv_dmc0[i][3] <<
781 S5P_CLKDIV_DMC0_DMC_SHIFT) |
782 (exynos4210_clkdiv_dmc0[i][4] <<
783 S5P_CLKDIV_DMC0_DMCD_SHIFT) |
784 (exynos4210_clkdiv_dmc0[i][5] <<
785 S5P_CLKDIV_DMC0_DMCP_SHIFT) |
786 (exynos4210_clkdiv_dmc0[i][6] <<
787 S5P_CLKDIV_DMC0_COPY2_SHIFT) |
788 (exynos4210_clkdiv_dmc0[i][7] <<
789 S5P_CLKDIV_DMC0_CORETI_SHIFT));
790
791 data->dmc_divtable[i] = tmp;
792 }
793
794 tmp = __raw_readl(S5P_CLKDIV_TOP);
795 for (i = LV_0; i < EX4210_LV_NUM; i++) {
796 tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
797 S5P_CLKDIV_TOP_ACLK100_MASK |
798 S5P_CLKDIV_TOP_ACLK160_MASK |
799 S5P_CLKDIV_TOP_ACLK133_MASK |
800 S5P_CLKDIV_TOP_ONENAND_MASK);
801
802 tmp |= ((exynos4210_clkdiv_top[i][0] <<
803 S5P_CLKDIV_TOP_ACLK200_SHIFT) |
804 (exynos4210_clkdiv_top[i][1] <<
805 S5P_CLKDIV_TOP_ACLK100_SHIFT) |
806 (exynos4210_clkdiv_top[i][2] <<
807 S5P_CLKDIV_TOP_ACLK160_SHIFT) |
808 (exynos4210_clkdiv_top[i][3] <<
809 S5P_CLKDIV_TOP_ACLK133_SHIFT) |
810 (exynos4210_clkdiv_top[i][4] <<
811 S5P_CLKDIV_TOP_ONENAND_SHIFT));
812
813 data->top_divtable[i] = tmp;
814 }
815
816#ifdef CONFIG_EXYNOS_ASV
817 tmp = exynos4_result_of_asv;
818#else
819 tmp = 0; /* Max voltages for the reliability of the unknown */
820#endif
821
822 pr_debug("ASV Group of Exynos4 is %d\n", tmp);
823 /* Use merged grouping for voltage */
824 switch (tmp) {
825 case 0:
826 mgrp = 0;
827 break;
828 case 1:
829 case 2:
830 mgrp = 1;
831 break;
832 case 3:
833 case 4:
834 mgrp = 2;
835 break;
836 case 5:
837 case 6:
838 mgrp = 3;
839 break;
840 case 7:
841 mgrp = 4;
842 break;
843 default:
844 pr_warn("Unknown ASV Group. Use max voltage.\n");
845 mgrp = 0;
846 }
847
848 for (i = LV_0; i < EX4210_LV_NUM; i++)
849 exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
850
851 for (i = LV_0; i < EX4210_LV_NUM; i++) {
852 err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
853 exynos4210_busclk_table[i].volt);
854 if (err) {
855 dev_err(data->dev, "Cannot add opp entries.\n");
856 return err;
857 }
858 }
859
860
861 return 0;
862}
863
864static int exynos4x12_init_tables(struct busfreq_data *data)
865{
866 unsigned int i;
867 unsigned int tmp;
868 int ret;
869
870 /* Enable pause function for DREX2 DVFS */
871 tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
872 tmp |= DMC_PAUSE_ENABLE;
873 __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
874
875 tmp = __raw_readl(S5P_CLKDIV_DMC0);
876
877 for (i = 0; i < EX4x12_LV_NUM; i++) {
878 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
879 S5P_CLKDIV_DMC0_ACPPCLK_MASK |
880 S5P_CLKDIV_DMC0_DPHY_MASK |
881 S5P_CLKDIV_DMC0_DMC_MASK |
882 S5P_CLKDIV_DMC0_DMCD_MASK |
883 S5P_CLKDIV_DMC0_DMCP_MASK);
884
885 tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
886 S5P_CLKDIV_DMC0_ACP_SHIFT) |
887 (exynos4x12_clkdiv_dmc0[i][1] <<
888 S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
889 (exynos4x12_clkdiv_dmc0[i][2] <<
890 S5P_CLKDIV_DMC0_DPHY_SHIFT) |
891 (exynos4x12_clkdiv_dmc0[i][3] <<
892 S5P_CLKDIV_DMC0_DMC_SHIFT) |
893 (exynos4x12_clkdiv_dmc0[i][4] <<
894 S5P_CLKDIV_DMC0_DMCD_SHIFT) |
895 (exynos4x12_clkdiv_dmc0[i][5] <<
896 S5P_CLKDIV_DMC0_DMCP_SHIFT));
897
898 data->dmc_divtable[i] = tmp;
899 }
900
901#ifdef CONFIG_EXYNOS_ASV
902 tmp = exynos4_result_of_asv;
903#else
904 tmp = 0; /* Max voltages for the reliability of the unknown */
905#endif
906
907 if (tmp > 8)
908 tmp = 0;
909 pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
910
911 for (i = 0; i < EX4x12_LV_NUM; i++) {
912 exynos4x12_mifclk_table[i].volt =
913 exynos4x12_mif_step_50[tmp][i];
914 exynos4x12_intclk_table[i].volt =
915 exynos4x12_int_volt[tmp][i];
916 }
917
918 for (i = 0; i < EX4x12_LV_NUM; i++) {
919 ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
920 exynos4x12_mifclk_table[i].volt);
921 if (ret) {
922 dev_err(data->dev, "Fail to add opp entries.\n");
923 return ret;
924 }
925 }
926
927 return 0;
928}
929
930static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
931 unsigned long event, void *ptr)
932{
933 struct busfreq_data *data = container_of(this, struct busfreq_data,
934 pm_notifier);
935 struct opp *opp;
936 unsigned long maxfreq = ULONG_MAX;
937 int err = 0;
938
939 switch (event) {
940 case PM_SUSPEND_PREPARE:
941 /* Set Fastest and Deactivate DVFS */
942 mutex_lock(&data->lock);
943
944 data->disabled = true;
945
946 opp = opp_find_freq_floor(data->dev, &maxfreq);
947
948 err = exynos4_bus_setvolt(data, opp, data->curr_opp);
949 if (err)
950 goto unlock;
951
952 switch (data->type) {
953 case TYPE_BUSF_EXYNOS4210:
954 err = exynos4210_set_busclk(data, opp);
955 break;
956 case TYPE_BUSF_EXYNOS4x12:
957 err = exynos4x12_set_busclk(data, opp);
958 break;
959 default:
960 err = -EINVAL;
961 }
962 if (err)
963 goto unlock;
964
965 data->curr_opp = opp;
966unlock:
967 mutex_unlock(&data->lock);
968 if (err)
969 return err;
970 return NOTIFY_OK;
971 case PM_POST_RESTORE:
972 case PM_POST_SUSPEND:
973 /* Reactivate */
974 mutex_lock(&data->lock);
975 data->disabled = false;
976 mutex_unlock(&data->lock);
977 return NOTIFY_OK;
978 }
979
980 return NOTIFY_DONE;
981}
982
983static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
984{
985 struct busfreq_data *data;
986 struct opp *opp;
987 struct device *dev = &pdev->dev;
988 int err = 0;
989
990 data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
991 if (data == NULL) {
992 dev_err(dev, "Cannot allocate memory.\n");
993 return -ENOMEM;
994 }
995
996 data->type = pdev->id_entry->driver_data;
997 data->dmc[0].hw_base = S5P_VA_DMC0;
998 data->dmc[1].hw_base = S5P_VA_DMC1;
999 data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
1000 data->dev = dev;
1001 mutex_init(&data->lock);
1002
1003 switch (data->type) {
1004 case TYPE_BUSF_EXYNOS4210:
1005 err = exynos4210_init_tables(data);
1006 break;
1007 case TYPE_BUSF_EXYNOS4x12:
1008 err = exynos4x12_init_tables(data);
1009 break;
1010 default:
1011 dev_err(dev, "Cannot determine the device id %d\n", data->type);
1012 err = -EINVAL;
1013 }
1014 if (err)
1015 goto err_regulator;
1016
1017 data->vdd_int = regulator_get(dev, "vdd_int");
1018 if (IS_ERR(data->vdd_int)) {
1019 dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
1020 err = PTR_ERR(data->vdd_int);
1021 goto err_regulator;
1022 }
1023 if (data->type == TYPE_BUSF_EXYNOS4x12) {
1024 data->vdd_mif = regulator_get(dev, "vdd_mif");
1025 if (IS_ERR(data->vdd_mif)) {
1026 dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
1027 err = PTR_ERR(data->vdd_mif);
1028 regulator_put(data->vdd_int);
1029 goto err_regulator;
1030
1031 }
1032 }
1033
1034 opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
1035 if (IS_ERR(opp)) {
1036 dev_err(dev, "Invalid initial frequency %lu kHz.\n",
1037 exynos4_devfreq_profile.initial_freq);
1038 err = PTR_ERR(opp);
1039 goto err_opp_add;
1040 }
1041 data->curr_opp = opp;
1042
1043 platform_set_drvdata(pdev, data);
1044
1045 busfreq_mon_reset(data);
1046
1047 data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
1048 &devfreq_simple_ondemand, NULL);
1049 if (IS_ERR(data->devfreq)) {
1050 err = PTR_ERR(data->devfreq);
1051 goto err_opp_add;
1052 }
1053
1054 devfreq_register_opp_notifier(dev, data->devfreq);
1055
1056 err = register_pm_notifier(&data->pm_notifier);
1057 if (err) {
1058 dev_err(dev, "Failed to setup pm notifier\n");
1059 goto err_devfreq_add;
1060 }
1061
1062 return 0;
1063err_devfreq_add:
1064 devfreq_remove_device(data->devfreq);
1065err_opp_add:
1066 if (data->vdd_mif)
1067 regulator_put(data->vdd_mif);
1068 regulator_put(data->vdd_int);
1069err_regulator:
1070 kfree(data);
1071 return err;
1072}
1073
1074static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
1075{
1076 struct busfreq_data *data = platform_get_drvdata(pdev);
1077
1078 unregister_pm_notifier(&data->pm_notifier);
1079 devfreq_remove_device(data->devfreq);
1080 regulator_put(data->vdd_int);
1081 if (data->vdd_mif)
1082 regulator_put(data->vdd_mif);
1083 kfree(data);
1084
1085 return 0;
1086}
1087
1088static int exynos4_busfreq_resume(struct device *dev)
1089{
1090 struct platform_device *pdev = container_of(dev, struct platform_device,
1091 dev);
1092 struct busfreq_data *data = platform_get_drvdata(pdev);
1093
1094 busfreq_mon_reset(data);
1095 return 0;
1096}
1097
1098static const struct dev_pm_ops exynos4_busfreq_pm = {
1099 .resume = exynos4_busfreq_resume,
1100};
1101
1102static const struct platform_device_id exynos4_busfreq_id[] = {
1103 { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
1104 { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
1105 { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
1106 { },
1107};
1108
1109static struct platform_driver exynos4_busfreq_driver = {
1110 .probe = exynos4_busfreq_probe,
1111 .remove = __devexit_p(exynos4_busfreq_remove),
1112 .id_table = exynos4_busfreq_id,
1113 .driver = {
1114 .name = "exynos4-busfreq",
1115 .owner = THIS_MODULE,
1116 .pm = &exynos4_busfreq_pm,
1117 },
1118};
1119
1120static int __init exynos4_busfreq_init(void)
1121{
1122 return platform_driver_register(&exynos4_busfreq_driver);
1123}
1124late_initcall(exynos4_busfreq_init);
1125
1126static void __exit exynos4_busfreq_exit(void)
1127{
1128 platform_driver_unregister(&exynos4_busfreq_driver);
1129}
1130module_exit(exynos4_busfreq_exit);
1131
1132MODULE_LICENSE("GPL");
1133MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
1134MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1135MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d8641cf5c..2b8661b54eaf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
214 return error_count; 214 return error_count;
215} 215}
216 216
217static void dmatest_callback(void *completion) 217/* poor man's completion - we want to use wait_event_freezable() on it */
218struct dmatest_done {
219 bool done;
220 wait_queue_head_t *wait;
221};
222
223static void dmatest_callback(void *arg)
218{ 224{
219 complete(completion); 225 struct dmatest_done *done = arg;
226
227 done->done = true;
228 wake_up_all(done->wait);
220} 229}
221 230
222/* 231/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
235 */ 244 */
236static int dmatest_func(void *data) 245static int dmatest_func(void *data)
237{ 246{
247 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
238 struct dmatest_thread *thread = data; 248 struct dmatest_thread *thread = data;
249 struct dmatest_done done = { .wait = &done_wait };
239 struct dma_chan *chan; 250 struct dma_chan *chan;
240 const char *thread_name; 251 const char *thread_name;
241 unsigned int src_off, dst_off, len; 252 unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
252 int i; 263 int i;
253 264
254 thread_name = current->comm; 265 thread_name = current->comm;
255 set_freezable_with_signal(); 266 set_freezable();
256 267
257 ret = -ENOMEM; 268 ret = -ENOMEM;
258 269
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
306 struct dma_async_tx_descriptor *tx = NULL; 317 struct dma_async_tx_descriptor *tx = NULL;
307 dma_addr_t dma_srcs[src_cnt]; 318 dma_addr_t dma_srcs[src_cnt];
308 dma_addr_t dma_dsts[dst_cnt]; 319 dma_addr_t dma_dsts[dst_cnt];
309 struct completion cmp;
310 unsigned long start, tmo, end = 0 /* compiler... */;
311 bool reload = true;
312 u8 align = 0; 320 u8 align = 0;
313 321
314 total_tests++; 322 total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
391 continue; 399 continue;
392 } 400 }
393 401
394 init_completion(&cmp); 402 done.done = false;
395 tx->callback = dmatest_callback; 403 tx->callback = dmatest_callback;
396 tx->callback_param = &cmp; 404 tx->callback_param = &done;
397 cookie = tx->tx_submit(tx); 405 cookie = tx->tx_submit(tx);
398 406
399 if (dma_submit_error(cookie)) { 407 if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
407 } 415 }
408 dma_async_issue_pending(chan); 416 dma_async_issue_pending(chan);
409 417
410 do { 418 wait_event_freezable_timeout(done_wait, done.done,
411 start = jiffies; 419 msecs_to_jiffies(timeout));
412 if (reload)
413 end = start + msecs_to_jiffies(timeout);
414 else if (end <= start)
415 end = start + 1;
416 tmo = wait_for_completion_interruptible_timeout(&cmp,
417 end - start);
418 reload = try_to_freeze();
419 } while (tmo == -ERESTARTSYS);
420 420
421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
422 422
423 if (tmo == 0) { 423 if (!done.done) {
424 /*
425 * We're leaving the timed out dma operation with
426 * dangling pointer to done_wait. To make this
427 * correct, we'll need to allocate wait_done for
428 * each test iteration and perform "who's gonna
429 * free it this time?" dancing. For now, just
430 * leave it dangling.
431 */
424 pr_warning("%s: #%u: test timed out\n", 432 pr_warning("%s: #%u: test timed out\n",
425 thread_name, total_tests - 1); 433 thread_name, total_tests - 1);
426 failed_tests++; 434 failed_tests++;
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 4ab371358b33..8825fe37d433 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -23,6 +23,7 @@
23#include <linux/input.h> 23#include <linux/input.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/pm_qos.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/types.h> 28#include <linux/types.h>
28 29
@@ -46,6 +47,7 @@ struct st1232_ts_data {
46 struct i2c_client *client; 47 struct i2c_client *client;
47 struct input_dev *input_dev; 48 struct input_dev *input_dev;
48 struct st1232_ts_finger finger[MAX_FINGERS]; 49 struct st1232_ts_finger finger[MAX_FINGERS];
50 struct dev_pm_qos_request low_latency_req;
49}; 51};
50 52
51static int st1232_ts_read_data(struct st1232_ts_data *ts) 53static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -118,8 +120,17 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
118 } 120 }
119 121
120 /* SYN_MT_REPORT only if no contact */ 122 /* SYN_MT_REPORT only if no contact */
121 if (!count) 123 if (!count) {
122 input_mt_sync(input_dev); 124 input_mt_sync(input_dev);
125 if (ts->low_latency_req.dev) {
126 dev_pm_qos_remove_request(&ts->low_latency_req);
127 ts->low_latency_req.dev = NULL;
128 }
129 } else if (!ts->low_latency_req.dev) {
130 /* First contact, request 100 us latency. */
131 dev_pm_qos_add_ancestor_request(&ts->client->dev,
132 &ts->low_latency_req, 100);
133 }
123 134
124 /* SYN_REPORT */ 135 /* SYN_REPORT */
125 input_sync(input_dev); 136 input_sync(input_dev);
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45ffb096..c6b456ad7342 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
138 static const unsigned max_i2c_errors = 100; 138 static const unsigned max_i2c_errors = 100;
139 int ret; 139 int ret;
140 140
141 current->flags |= PF_NOFREEZE;
142
143 while (!kthread_should_stop()) { 141 while (!kthread_should_stop()) {
144 int i; 142 int i;
145 union { 143 union {
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 212868eb6f5f..e6e59a078ef4 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
750 750
751 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); 751 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
752 752
753 refrigerator(); 753 try_to_freeze();
754 754
755 if (change_speed(stir, stir->speed)) 755 if (change_speed(stir, stir->speed))
756 break; 756 break;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 455e1522253e..62533c105da4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
2456 u32 poll_mask, event_mask; 2456 u32 poll_mask, event_mask;
2457 unsigned int si, so; 2457 unsigned int si, so;
2458 unsigned long t; 2458 unsigned long t;
2459 unsigned int change_detector, must_reset; 2459 unsigned int change_detector;
2460 unsigned int poll_freq; 2460 unsigned int poll_freq;
2461 bool was_frozen;
2461 2462
2462 mutex_lock(&hotkey_thread_mutex); 2463 mutex_lock(&hotkey_thread_mutex);
2463 2464
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
2488 t = 100; /* should never happen... */ 2489 t = 100; /* should never happen... */
2489 } 2490 }
2490 t = msleep_interruptible(t); 2491 t = msleep_interruptible(t);
2491 if (unlikely(kthread_should_stop())) 2492 if (unlikely(kthread_freezable_should_stop(&was_frozen)))
2492 break; 2493 break;
2493 must_reset = try_to_freeze(); 2494
2494 if (t > 0 && !must_reset) 2495 if (t > 0 && !was_frozen)
2495 continue; 2496 continue;
2496 2497
2497 mutex_lock(&hotkey_thread_data_mutex); 2498 mutex_lock(&hotkey_thread_data_mutex);
2498 if (must_reset || hotkey_config_change != change_detector) { 2499 if (was_frozen || hotkey_config_change != change_detector) {
2499 /* forget old state on thaw or config change */ 2500 /* forget old state on thaw or config change */
2500 si = so; 2501 si = so;
2501 t = 0; 2502 t = 0;
@@ -2528,10 +2529,6 @@ exit:
2528static void hotkey_poll_stop_sync(void) 2529static void hotkey_poll_stop_sync(void)
2529{ 2530{
2530 if (tpacpi_hotkey_task) { 2531 if (tpacpi_hotkey_task) {
2531 if (frozen(tpacpi_hotkey_task) ||
2532 freezing(tpacpi_hotkey_task))
2533 thaw_process(tpacpi_hotkey_task);
2534
2535 kthread_stop(tpacpi_hotkey_task); 2532 kthread_stop(tpacpi_hotkey_task);
2536 tpacpi_hotkey_task = NULL; 2533 tpacpi_hotkey_task = NULL;
2537 mutex_lock(&hotkey_thread_mutex); 2534 mutex_lock(&hotkey_thread_mutex);
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index e85512dd9c72..e53e449b4eca 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -354,6 +354,8 @@ int __init register_intc_controller(struct intc_desc *desc)
354 if (desc->force_enable) 354 if (desc->force_enable)
355 intc_enable_disable_enum(desc, d, desc->force_enable, 1); 355 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
356 356
357 d->skip_suspend = desc->skip_syscore_suspend;
358
357 nr_intc_controllers++; 359 nr_intc_controllers++;
358 360
359 return 0; 361 return 0;
@@ -386,6 +388,9 @@ static int intc_suspend(void)
386 list_for_each_entry(d, &intc_list, list) { 388 list_for_each_entry(d, &intc_list, list) {
387 int irq; 389 int irq;
388 390
391 if (d->skip_suspend)
392 continue;
393
389 /* enable wakeup irqs belonging to this intc controller */ 394 /* enable wakeup irqs belonging to this intc controller */
390 for_each_active_irq(irq) { 395 for_each_active_irq(irq) {
391 struct irq_data *data; 396 struct irq_data *data;
@@ -409,6 +414,9 @@ static void intc_resume(void)
409 list_for_each_entry(d, &intc_list, list) { 414 list_for_each_entry(d, &intc_list, list) {
410 int irq; 415 int irq;
411 416
417 if (d->skip_suspend)
418 continue;
419
412 for_each_active_irq(irq) { 420 for_each_active_irq(irq) {
413 struct irq_data *data; 421 struct irq_data *data;
414 struct irq_chip *chip; 422 struct irq_chip *chip;
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 1c2722e5af3f..b0e9155ff739 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -67,6 +67,7 @@ struct intc_desc_int {
67 struct intc_window *window; 67 struct intc_window *window;
68 unsigned int nr_windows; 68 unsigned int nr_windows;
69 struct irq_chip chip; 69 struct irq_chip chip;
70 bool skip_suspend;
70}; 71};
71 72
72 73
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 115635f95024..a7feb3e328a0 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
466 struct rtsx_chip *chip = dev->chip; 466 struct rtsx_chip *chip = dev->chip;
467 struct Scsi_Host *host = rtsx_to_host(dev); 467 struct Scsi_Host *host = rtsx_to_host(dev);
468 468
469 current->flags |= PF_NOFREEZE;
470
471 for (;;) { 469 for (;;) {
472 if (wait_for_completion_interruptible(&dev->cmnd_ready)) 470 if (wait_for_completion_interruptible(&dev->cmnd_ready))
473 break; 471 break;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c325e69415a1..aa84b3d77274 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
831 831
832 dev_dbg(dev, "device found\n"); 832 dev_dbg(dev, "device found\n");
833 833
834 set_freezable_with_signal(); 834 set_freezable();
835
835 /* 836 /*
836 * Wait for the timeout to expire or for a disconnect 837 * Wait for the timeout to expire or for a disconnect
837 * 838 *
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
839 * fail to freeze, but we can't be non-freezable either. Nor can 840 * fail to freeze, but we can't be non-freezable either. Nor can
840 * khubd freeze while waiting for scanning to complete as it may 841 * khubd freeze while waiting for scanning to complete as it may
841 * hold the device lock, causing a hang when suspending devices. 842 * hold the device lock, causing a hang when suspending devices.
842 * So we request a fake signal when freezing and use 843 * So instead of using wait_event_freezable(), explicitly test
843 * interruptible sleep to kick us out of our wait early when 844 * for (DONT_SCAN || freezing) in interruptible wait and proceed
844 * freezing happens. 845 * if any of DONT_SCAN, freezing or timeout has happened.
845 */ 846 */
846 if (delay_use > 0) { 847 if (delay_use > 0) {
847 dev_dbg(dev, "waiting for device to settle " 848 dev_dbg(dev, "waiting for device to settle "
848 "before scanning\n"); 849 "before scanning\n");
849 wait_event_interruptible_timeout(us->delay_wait, 850 wait_event_interruptible_timeout(us->delay_wait,
850 test_bit(US_FLIDX_DONT_SCAN, &us->dflags), 851 test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
851 delay_use * HZ); 852 freezing(current), delay_use * HZ);
852 } 853 }
853 854
854 /* If the device is still connected, perform the scanning */ 855 /* If the device is still connected, perform the scanning */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0b394580d860..0cc20b35c1c4 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -334,7 +334,7 @@ again:
334 if (freezing(current)) { 334 if (freezing(current)) {
335 worker->working = 0; 335 worker->working = 0;
336 spin_unlock_irq(&worker->lock); 336 spin_unlock_irq(&worker->lock);
337 refrigerator(); 337 try_to_freeze();
338 } else { 338 } else {
339 spin_unlock_irq(&worker->lock); 339 spin_unlock_irq(&worker->lock);
340 if (!kthread_should_stop()) { 340 if (!kthread_should_stop()) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f44b3928dc2d..f99a099a7747 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
1579 btrfs_run_defrag_inodes(root->fs_info); 1579 btrfs_run_defrag_inodes(root->fs_info);
1580 } 1580 }
1581 1581
1582 if (freezing(current)) { 1582 if (!try_to_freeze()) {
1583 refrigerator();
1584 } else {
1585 set_current_state(TASK_INTERRUPTIBLE); 1583 set_current_state(TASK_INTERRUPTIBLE);
1586 if (!kthread_should_stop()) 1584 if (!kthread_should_stop())
1587 schedule(); 1585 schedule();
@@ -1635,9 +1633,7 @@ sleep:
1635 wake_up_process(root->fs_info->cleaner_kthread); 1633 wake_up_process(root->fs_info->cleaner_kthread);
1636 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 1634 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1637 1635
1638 if (freezing(current)) { 1636 if (!try_to_freeze()) {
1639 refrigerator();
1640 } else {
1641 set_current_state(TASK_INTERRUPTIBLE); 1637 set_current_state(TASK_INTERRUPTIBLE);
1642 if (!kthread_should_stop() && 1638 if (!kthread_should_stop() &&
1643 !btrfs_transaction_blocked(root->fs_info)) 1639 !btrfs_transaction_blocked(root->fs_info))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6733b3736b3b..64e2529ae9bb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2882,8 +2882,7 @@ cont_thread:
2882 } 2882 }
2883 mutex_unlock(&eli->li_list_mtx); 2883 mutex_unlock(&eli->li_list_mtx);
2884 2884
2885 if (freezing(current)) 2885 try_to_freeze();
2886 refrigerator();
2887 2886
2888 cur = jiffies; 2887 cur = jiffies;
2889 if ((time_after_eq(cur, next_wakeup)) || 2888 if ((time_after_eq(cur, next_wakeup)) ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 80a4574028f1..e2951506434d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -936,7 +936,7 @@ int bdi_writeback_thread(void *data)
936 936
937 trace_writeback_thread_start(bdi); 937 trace_writeback_thread_start(bdi);
938 938
939 while (!kthread_should_stop()) { 939 while (!kthread_freezable_should_stop(NULL)) {
940 /* 940 /*
941 * Remove own delayed wake-up timer, since we are already awake 941 * Remove own delayed wake-up timer, since we are already awake
942 * and we'll take care of the preriodic write-back. 942 * and we'll take care of the preriodic write-back.
@@ -966,8 +966,6 @@ int bdi_writeback_thread(void *data)
966 */ 966 */
967 schedule(); 967 schedule();
968 } 968 }
969
970 try_to_freeze();
971 } 969 }
972 970
973 /* Flush any work that raced with us exiting */ 971 /* Flush any work that raced with us exiting */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 2731e657cf7f..756fae9eaf8f 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -951,8 +951,8 @@ int gfs2_logd(void *data)
951 wake_up(&sdp->sd_log_waitq); 951 wake_up(&sdp->sd_log_waitq);
952 952
953 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; 953 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
954 if (freezing(current)) 954
955 refrigerator(); 955 try_to_freeze();
956 956
957 do { 957 do {
958 prepare_to_wait(&sdp->sd_logd_waitq, &wait, 958 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 98a01db1f6dc..a45b21b03915 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1417,8 +1417,8 @@ int gfs2_quotad(void *data)
1417 /* Check for & recover partially truncated inodes */ 1417 /* Check for & recover partially truncated inodes */
1418 quotad_check_trunc_list(sdp); 1418 quotad_check_trunc_list(sdp);
1419 1419
1420 if (freezing(current)) 1420 try_to_freeze();
1421 refrigerator(); 1421
1422 t = min(quotad_timeo, statfs_timeo); 1422 t = min(quotad_timeo, statfs_timeo);
1423 1423
1424 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); 1424 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index fea8dd661d2b..a96cff0c5f1d 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -166,7 +166,7 @@ loop:
166 */ 166 */
167 jbd_debug(1, "Now suspending kjournald\n"); 167 jbd_debug(1, "Now suspending kjournald\n");
168 spin_unlock(&journal->j_state_lock); 168 spin_unlock(&journal->j_state_lock);
169 refrigerator(); 169 try_to_freeze();
170 spin_lock(&journal->j_state_lock); 170 spin_lock(&journal->j_state_lock);
171 } else { 171 } else {
172 /* 172 /*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0fa0123151d3..c0a5f9f1b127 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -173,7 +173,7 @@ loop:
173 */ 173 */
174 jbd_debug(1, "Now suspending kjournald2\n"); 174 jbd_debug(1, "Now suspending kjournald2\n");
175 write_unlock(&journal->j_state_lock); 175 write_unlock(&journal->j_state_lock);
176 refrigerator(); 176 try_to_freeze();
177 write_lock(&journal->j_state_lock); 177 write_lock(&journal->j_state_lock);
178 } else { 178 } else {
179 /* 179 /*
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index cc5f811ed383..2eb952c41a69 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
2349 2349
2350 if (freezing(current)) { 2350 if (freezing(current)) {
2351 spin_unlock_irq(&log_redrive_lock); 2351 spin_unlock_irq(&log_redrive_lock);
2352 refrigerator(); 2352 try_to_freeze();
2353 } else { 2353 } else {
2354 set_current_state(TASK_INTERRUPTIBLE); 2354 set_current_state(TASK_INTERRUPTIBLE);
2355 spin_unlock_irq(&log_redrive_lock); 2355 spin_unlock_irq(&log_redrive_lock);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index af9606057dde..bb8b661bcc50 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
2800 2800
2801 if (freezing(current)) { 2801 if (freezing(current)) {
2802 LAZY_UNLOCK(flags); 2802 LAZY_UNLOCK(flags);
2803 refrigerator(); 2803 try_to_freeze();
2804 } else { 2804 } else {
2805 DECLARE_WAITQUEUE(wq, current); 2805 DECLARE_WAITQUEUE(wq, current);
2806 2806
@@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
2994 2994
2995 if (freezing(current)) { 2995 if (freezing(current)) {
2996 TXN_UNLOCK(); 2996 TXN_UNLOCK();
2997 refrigerator(); 2997 try_to_freeze();
2998 } else { 2998 } else {
2999 set_current_state(TASK_INTERRUPTIBLE); 2999 set_current_state(TASK_INTERRUPTIBLE);
3000 TXN_UNLOCK(); 3000 TXN_UNLOCK();
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6f00086e340f..81db25e92e10 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,6 +38,7 @@
38#include <linux/nfs_xdr.h> 38#include <linux/nfs_xdr.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/freezer.h>
41 42
42#include <asm/system.h> 43#include <asm/system.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
77{ 78{
78 if (fatal_signal_pending(current)) 79 if (fatal_signal_pending(current))
79 return -ERESTARTSYS; 80 return -ERESTARTSYS;
80 schedule(); 81 freezable_schedule();
81 return 0; 82 return 0;
82} 83}
83 84
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d4bc9ed91748..91943953a370 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -17,6 +17,7 @@
17#include <linux/nfs_page.h> 17#include <linux/nfs_page.h>
18#include <linux/lockd/bind.h> 18#include <linux/lockd/bind.h>
19#include <linux/nfs_mount.h> 19#include <linux/nfs_mount.h>
20#include <linux/freezer.h>
20 21
21#include "iostat.h" 22#include "iostat.h"
22#include "internal.h" 23#include "internal.h"
@@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
32 res = rpc_call_sync(clnt, msg, flags); 33 res = rpc_call_sync(clnt, msg, flags);
33 if (res != -EJUKEBOX && res != -EKEYEXPIRED) 34 if (res != -EJUKEBOX && res != -EKEYEXPIRED)
34 break; 35 break;
35 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 36 freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
36 res = -ERESTARTSYS; 37 res = -ERESTARTSYS;
37 } while (!fatal_signal_pending(current)); 38 } while (!fatal_signal_pending(current));
38 return res; 39 return res;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d9f4d78c3413..dcda0ba7af60 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -55,6 +55,7 @@
55#include <linux/sunrpc/bc_xprt.h> 55#include <linux/sunrpc/bc_xprt.h>
56#include <linux/xattr.h> 56#include <linux/xattr.h>
57#include <linux/utsname.h> 57#include <linux/utsname.h>
58#include <linux/freezer.h>
58 59
59#include "nfs4_fs.h" 60#include "nfs4_fs.h"
60#include "delegation.h" 61#include "delegation.h"
@@ -243,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
243 *timeout = NFS4_POLL_RETRY_MIN; 244 *timeout = NFS4_POLL_RETRY_MIN;
244 if (*timeout > NFS4_POLL_RETRY_MAX) 245 if (*timeout > NFS4_POLL_RETRY_MAX)
245 *timeout = NFS4_POLL_RETRY_MAX; 246 *timeout = NFS4_POLL_RETRY_MAX;
246 schedule_timeout_killable(*timeout); 247 freezable_schedule_timeout_killable(*timeout);
247 if (fatal_signal_pending(current)) 248 if (fatal_signal_pending(current))
248 res = -ERESTARTSYS; 249 res = -ERESTARTSYS;
249 *timeout <<= 1; 250 *timeout <<= 1;
@@ -3958,7 +3959,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
3958static unsigned long 3959static unsigned long
3959nfs4_set_lock_task_retry(unsigned long timeout) 3960nfs4_set_lock_task_retry(unsigned long timeout)
3960{ 3961{
3961 schedule_timeout_killable(timeout); 3962 freezable_schedule_timeout_killable(timeout);
3962 timeout <<= 1; 3963 timeout <<= 1;
3963 if (timeout > NFS4_LOCK_MAXTIMEOUT) 3964 if (timeout > NFS4_LOCK_MAXTIMEOUT)
3964 return NFS4_LOCK_MAXTIMEOUT; 3965 return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f48125da198a..0c672588fe5a 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -41,6 +41,7 @@
41#include <linux/nfs_fs.h> 41#include <linux/nfs_fs.h>
42#include <linux/nfs_page.h> 42#include <linux/nfs_page.h>
43#include <linux/lockd/bind.h> 43#include <linux/lockd/bind.h>
44#include <linux/freezer.h>
44#include "internal.h" 45#include "internal.h"
45 46
46#define NFSDBG_FACILITY NFSDBG_PROC 47#define NFSDBG_FACILITY NFSDBG_PROC
@@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
59 res = rpc_call_sync(clnt, msg, flags); 60 res = rpc_call_sync(clnt, msg, flags);
60 if (res != -EKEYEXPIRED) 61 if (res != -EKEYEXPIRED)
61 break; 62 break;
62 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 63 freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
63 res = -ERESTARTSYS; 64 res = -ERESTARTSYS;
64 } while (!fatal_signal_pending(current)); 65 } while (!fatal_signal_pending(current));
65 return res; 66 return res;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bb24ab6c282f..0e72ad6f22aa 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
2470 2470
2471 if (freezing(current)) { 2471 if (freezing(current)) {
2472 spin_unlock(&sci->sc_state_lock); 2472 spin_unlock(&sci->sc_state_lock);
2473 refrigerator(); 2473 try_to_freeze();
2474 spin_lock(&sci->sc_state_lock); 2474 spin_lock(&sci->sc_state_lock);
2475 } else { 2475 } else {
2476 DEFINE_WAIT(wait); 2476 DEFINE_WAIT(wait);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 2277bcae395f..ce6249dae90c 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1702,7 +1702,7 @@ xfsbufd(
1702 struct blk_plug plug; 1702 struct blk_plug plug;
1703 1703
1704 if (unlikely(freezing(current))) 1704 if (unlikely(freezing(current)))
1705 refrigerator(); 1705 try_to_freeze();
1706 1706
1707 /* sleep for a long time if there is nothing to do. */ 1707 /* sleep for a long time if there is nothing to do. */
1708 if (list_empty(&target->bt_delwri_queue)) 1708 if (list_empty(&target->bt_delwri_queue))
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a5386e3ee756..0ab54e16a91f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,71 +5,58 @@
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/wait.h> 7#include <linux/wait.h>
8#include <linux/atomic.h>
8 9
9#ifdef CONFIG_FREEZER 10#ifdef CONFIG_FREEZER
11extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
12extern bool pm_freezing; /* PM freezing in effect */
13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
14
10/* 15/*
11 * Check if a process has been frozen 16 * Check if a process has been frozen
12 */ 17 */
13static inline int frozen(struct task_struct *p) 18static inline bool frozen(struct task_struct *p)
14{ 19{
15 return p->flags & PF_FROZEN; 20 return p->flags & PF_FROZEN;
16} 21}
17 22
18/* 23extern bool freezing_slow_path(struct task_struct *p);
19 * Check if there is a request to freeze a process
20 */
21static inline int freezing(struct task_struct *p)
22{
23 return test_tsk_thread_flag(p, TIF_FREEZE);
24}
25
26/*
27 * Request that a process be frozen
28 */
29static inline void set_freeze_flag(struct task_struct *p)
30{
31 set_tsk_thread_flag(p, TIF_FREEZE);
32}
33 24
34/* 25/*
35 * Sometimes we may need to cancel the previous 'freeze' request 26 * Check if there is a request to freeze a process
36 */ 27 */
37static inline void clear_freeze_flag(struct task_struct *p) 28static inline bool freezing(struct task_struct *p)
38{
39 clear_tsk_thread_flag(p, TIF_FREEZE);
40}
41
42static inline bool should_send_signal(struct task_struct *p)
43{ 29{
44 return !(p->flags & PF_FREEZER_NOSIG); 30 if (likely(!atomic_read(&system_freezing_cnt)))
31 return false;
32 return freezing_slow_path(p);
45} 33}
46 34
47/* Takes and releases task alloc lock using task_lock() */ 35/* Takes and releases task alloc lock using task_lock() */
48extern int thaw_process(struct task_struct *p); 36extern void __thaw_task(struct task_struct *t);
49 37
50extern void refrigerator(void); 38extern bool __refrigerator(bool check_kthr_stop);
51extern int freeze_processes(void); 39extern int freeze_processes(void);
52extern int freeze_kernel_threads(void); 40extern int freeze_kernel_threads(void);
53extern void thaw_processes(void); 41extern void thaw_processes(void);
54 42
55static inline int try_to_freeze(void) 43static inline bool try_to_freeze(void)
56{ 44{
57 if (freezing(current)) { 45 might_sleep();
58 refrigerator(); 46 if (likely(!freezing(current)))
59 return 1; 47 return false;
60 } else 48 return __refrigerator(false);
61 return 0;
62} 49}
63 50
64extern bool freeze_task(struct task_struct *p, bool sig_only); 51extern bool freeze_task(struct task_struct *p);
65extern void cancel_freezing(struct task_struct *p); 52extern bool set_freezable(void);
66 53
67#ifdef CONFIG_CGROUP_FREEZER 54#ifdef CONFIG_CGROUP_FREEZER
68extern int cgroup_freezing_or_frozen(struct task_struct *task); 55extern bool cgroup_freezing(struct task_struct *task);
69#else /* !CONFIG_CGROUP_FREEZER */ 56#else /* !CONFIG_CGROUP_FREEZER */
70static inline int cgroup_freezing_or_frozen(struct task_struct *task) 57static inline bool cgroup_freezing(struct task_struct *task)
71{ 58{
72 return 0; 59 return false;
73} 60}
74#endif /* !CONFIG_CGROUP_FREEZER */ 61#endif /* !CONFIG_CGROUP_FREEZER */
75 62
@@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
80 * appropriately in case the child has exited before the freezing of tasks is 67 * appropriately in case the child has exited before the freezing of tasks is
81 * complete. However, we don't want kernel threads to be frozen in unexpected 68 * complete. However, we don't want kernel threads to be frozen in unexpected
82 * places, so we allow them to block freeze_processes() instead or to set 69 * places, so we allow them to block freeze_processes() instead or to set
83 * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork 70 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
84 * parents. Fortunately, in the ____call_usermodehelper() case the parent won't 71 * parent won't really block freeze_processes(), since ____call_usermodehelper()
85 * really block freeze_processes(), since ____call_usermodehelper() (the child) 72 * (the child) does a little before exec/exit and it can't be frozen before
86 * does a little before exec/exit and it can't be frozen before waking up the 73 * waking up the parent.
87 * parent.
88 */ 74 */
89 75
90/* 76
91 * If the current task is a user space one, tell the freezer not to count it as 77/* Tell the freezer not to count the current task as freezable. */
92 * freezable.
93 */
94static inline void freezer_do_not_count(void) 78static inline void freezer_do_not_count(void)
95{ 79{
96 if (current->mm) 80 current->flags |= PF_FREEZER_SKIP;
97 current->flags |= PF_FREEZER_SKIP;
98} 81}
99 82
100/* 83/*
101 * If the current task is a user space one, tell the freezer to count it as 84 * Tell the freezer to count the current task as freezable again and try to
102 * freezable again and try to freeze it. 85 * freeze it.
103 */ 86 */
104static inline void freezer_count(void) 87static inline void freezer_count(void)
105{ 88{
106 if (current->mm) { 89 current->flags &= ~PF_FREEZER_SKIP;
107 current->flags &= ~PF_FREEZER_SKIP; 90 try_to_freeze();
108 try_to_freeze();
109 }
110} 91}
111 92
112/* 93/*
@@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p)
118} 99}
119 100
120/* 101/*
121 * Tell the freezer that the current task should be frozen by it 102 * These macros are intended to be used whenever you want allow a task that's
103 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
104 * that neither return any clear indication of whether a freeze event happened
105 * while in this function.
122 */ 106 */
123static inline void set_freezable(void)
124{
125 current->flags &= ~PF_NOFREEZE;
126}
127 107
128/* 108/* Like schedule(), but should not block the freezer. */
129 * Tell the freezer that the current task should be frozen by it and that it 109#define freezable_schedule() \
130 * should send a fake signal to the task to freeze it. 110({ \
131 */ 111 freezer_do_not_count(); \
132static inline void set_freezable_with_signal(void) 112 schedule(); \
133{ 113 freezer_count(); \
134 current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); 114})
135} 115
116/* Like schedule_timeout_killable(), but should not block the freezer. */
117#define freezable_schedule_timeout_killable(timeout) \
118({ \
119 long __retval; \
120 freezer_do_not_count(); \
121 __retval = schedule_timeout_killable(timeout); \
122 freezer_count(); \
123 __retval; \
124})
136 125
137/* 126/*
138 * Freezer-friendly wrappers around wait_event_interruptible(), 127 * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void)
152#define wait_event_freezable(wq, condition) \ 141#define wait_event_freezable(wq, condition) \
153({ \ 142({ \
154 int __retval; \ 143 int __retval; \
155 do { \ 144 for (;;) { \
156 __retval = wait_event_interruptible(wq, \ 145 __retval = wait_event_interruptible(wq, \
157 (condition) || freezing(current)); \ 146 (condition) || freezing(current)); \
158 if (__retval && !freezing(current)) \ 147 if (__retval || (condition)) \
159 break; \ 148 break; \
160 else if (!(condition)) \ 149 try_to_freeze(); \
161 __retval = -ERESTARTSYS; \ 150 } \
162 } while (try_to_freeze()); \
163 __retval; \ 151 __retval; \
164}) 152})
165 153
166
167#define wait_event_freezable_timeout(wq, condition, timeout) \ 154#define wait_event_freezable_timeout(wq, condition, timeout) \
168({ \ 155({ \
169 long __retval = timeout; \ 156 long __retval = timeout; \
170 do { \ 157 for (;;) { \
171 __retval = wait_event_interruptible_timeout(wq, \ 158 __retval = wait_event_interruptible_timeout(wq, \
172 (condition) || freezing(current), \ 159 (condition) || freezing(current), \
173 __retval); \ 160 __retval); \
174 } while (try_to_freeze()); \ 161 if (__retval <= 0 || (condition)) \
162 break; \
163 try_to_freeze(); \
164 } \
175 __retval; \ 165 __retval; \
176}) 166})
167
177#else /* !CONFIG_FREEZER */ 168#else /* !CONFIG_FREEZER */
178static inline int frozen(struct task_struct *p) { return 0; } 169static inline bool frozen(struct task_struct *p) { return false; }
179static inline int freezing(struct task_struct *p) { return 0; } 170static inline bool freezing(struct task_struct *p) { return false; }
180static inline void set_freeze_flag(struct task_struct *p) {} 171static inline void __thaw_task(struct task_struct *t) {}
181static inline void clear_freeze_flag(struct task_struct *p) {}
182static inline int thaw_process(struct task_struct *p) { return 1; }
183 172
184static inline void refrigerator(void) {} 173static inline bool __refrigerator(bool check_kthr_stop) { return false; }
185static inline int freeze_processes(void) { return -ENOSYS; } 174static inline int freeze_processes(void) { return -ENOSYS; }
186static inline int freeze_kernel_threads(void) { return -ENOSYS; } 175static inline int freeze_kernel_threads(void) { return -ENOSYS; }
187static inline void thaw_processes(void) {} 176static inline void thaw_processes(void) {}
188 177
189static inline int try_to_freeze(void) { return 0; } 178static inline bool try_to_freeze(void) { return false; }
190 179
191static inline void freezer_do_not_count(void) {} 180static inline void freezer_do_not_count(void) {}
192static inline void freezer_count(void) {} 181static inline void freezer_count(void) {}
193static inline int freezer_should_skip(struct task_struct *p) { return 0; } 182static inline int freezer_should_skip(struct task_struct *p) { return 0; }
194static inline void set_freezable(void) {} 183static inline void set_freezable(void) {}
195static inline void set_freezable_with_signal(void) {} 184
185#define freezable_schedule() schedule()
186
187#define freezable_schedule_timeout_killable(timeout) \
188 schedule_timeout_killable(timeout)
196 189
197#define wait_event_freezable(wq, condition) \ 190#define wait_event_freezable(wq, condition) \
198 wait_event_interruptible(wq, condition) 191 wait_event_interruptible(wq, condition)
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index b16f65390734..722f477c4ef7 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
117extern int usermodehelper_disable(void); 117extern int usermodehelper_disable(void);
118extern void usermodehelper_enable(void); 118extern void usermodehelper_enable(void);
119extern bool usermodehelper_is_disabled(void); 119extern bool usermodehelper_is_disabled(void);
120extern void read_lock_usermodehelper(void);
121extern void read_unlock_usermodehelper(void);
120 122
121#endif /* __LINUX_KMOD_H__ */ 123#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 5cac19b3a266..0714b24c0e45 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
35void kthread_bind(struct task_struct *k, unsigned int cpu); 35void kthread_bind(struct task_struct *k, unsigned int cpu);
36int kthread_stop(struct task_struct *k); 36int kthread_stop(struct task_struct *k);
37int kthread_should_stop(void); 37int kthread_should_stop(void);
38bool kthread_freezable_should_stop(bool *was_frozen);
38void *kthread_data(struct task_struct *k); 39void *kthread_data(struct task_struct *k);
39 40
40int kthreadd(void *unused); 41int kthreadd(void *unused);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 5622fa24e97b..60e9994ef405 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -256,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \
256} 256}
257#endif /* MODULE */ 257#endif /* MODULE */
258 258
259#ifdef CONFIG_PM_SLEEP
260extern int platform_pm_prepare(struct device *dev);
261extern void platform_pm_complete(struct device *dev);
262#else
263#define platform_pm_prepare NULL
264#define platform_pm_complete NULL
265#endif
266
267#ifdef CONFIG_SUSPEND 259#ifdef CONFIG_SUSPEND
268extern int platform_pm_suspend(struct device *dev); 260extern int platform_pm_suspend(struct device *dev);
269extern int platform_pm_suspend_noirq(struct device *dev);
270extern int platform_pm_resume(struct device *dev); 261extern int platform_pm_resume(struct device *dev);
271extern int platform_pm_resume_noirq(struct device *dev);
272#else 262#else
273#define platform_pm_suspend NULL 263#define platform_pm_suspend NULL
274#define platform_pm_resume NULL 264#define platform_pm_resume NULL
275#define platform_pm_suspend_noirq NULL
276#define platform_pm_resume_noirq NULL
277#endif 265#endif
278 266
279#ifdef CONFIG_HIBERNATE_CALLBACKS 267#ifdef CONFIG_HIBERNATE_CALLBACKS
280extern int platform_pm_freeze(struct device *dev); 268extern int platform_pm_freeze(struct device *dev);
281extern int platform_pm_freeze_noirq(struct device *dev);
282extern int platform_pm_thaw(struct device *dev); 269extern int platform_pm_thaw(struct device *dev);
283extern int platform_pm_thaw_noirq(struct device *dev);
284extern int platform_pm_poweroff(struct device *dev); 270extern int platform_pm_poweroff(struct device *dev);
285extern int platform_pm_poweroff_noirq(struct device *dev);
286extern int platform_pm_restore(struct device *dev); 271extern int platform_pm_restore(struct device *dev);
287extern int platform_pm_restore_noirq(struct device *dev);
288#else 272#else
289#define platform_pm_freeze NULL 273#define platform_pm_freeze NULL
290#define platform_pm_thaw NULL 274#define platform_pm_thaw NULL
291#define platform_pm_poweroff NULL 275#define platform_pm_poweroff NULL
292#define platform_pm_restore NULL 276#define platform_pm_restore NULL
293#define platform_pm_freeze_noirq NULL
294#define platform_pm_thaw_noirq NULL
295#define platform_pm_poweroff_noirq NULL
296#define platform_pm_restore_noirq NULL
297#endif 277#endif
298 278
299#ifdef CONFIG_PM_SLEEP 279#ifdef CONFIG_PM_SLEEP
300#define USE_PLATFORM_PM_SLEEP_OPS \ 280#define USE_PLATFORM_PM_SLEEP_OPS \
301 .prepare = platform_pm_prepare, \
302 .complete = platform_pm_complete, \
303 .suspend = platform_pm_suspend, \ 281 .suspend = platform_pm_suspend, \
304 .resume = platform_pm_resume, \ 282 .resume = platform_pm_resume, \
305 .freeze = platform_pm_freeze, \ 283 .freeze = platform_pm_freeze, \
306 .thaw = platform_pm_thaw, \ 284 .thaw = platform_pm_thaw, \
307 .poweroff = platform_pm_poweroff, \ 285 .poweroff = platform_pm_poweroff, \
308 .restore = platform_pm_restore, \ 286 .restore = platform_pm_restore,
309 .suspend_noirq = platform_pm_suspend_noirq, \
310 .resume_noirq = platform_pm_resume_noirq, \
311 .freeze_noirq = platform_pm_freeze_noirq, \
312 .thaw_noirq = platform_pm_thaw_noirq, \
313 .poweroff_noirq = platform_pm_poweroff_noirq, \
314 .restore_noirq = platform_pm_restore_noirq,
315#else 287#else
316#define USE_PLATFORM_PM_SLEEP_OPS 288#define USE_PLATFORM_PM_SLEEP_OPS
317#endif 289#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3f3ed83a9aa5..e4982ac3fbbc 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
300 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 300 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
301} 301}
302 302
303/*
304 * Use this for subsystems (bus types, device types, device classes) that don't
305 * need any special suspend/resume handling in addition to invoking the PM
306 * callbacks provided by device drivers supporting both the system sleep PM and
307 * runtime PM, make the pm member point to generic_subsys_pm_ops.
308 */
309#ifdef CONFIG_PM
310extern struct dev_pm_ops generic_subsys_pm_ops;
311#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
312#else
313#define GENERIC_SUBSYS_PM_OPS NULL
314#endif
315
316/** 303/**
317 * PM_EVENT_ messages 304 * PM_EVENT_ messages
318 * 305 *
@@ -521,6 +508,8 @@ struct dev_pm_info {
521 unsigned long active_jiffies; 508 unsigned long active_jiffies;
522 unsigned long suspended_jiffies; 509 unsigned long suspended_jiffies;
523 unsigned long accounting_timestamp; 510 unsigned long accounting_timestamp;
511 ktime_t suspend_time;
512 s64 max_time_suspended_ns;
524#endif 513#endif
525 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 514 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
526 struct pm_qos_constraints *constraints; 515 struct pm_qos_constraints *constraints;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 65633e5a2bc0..a03a0ad998b8 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -10,6 +10,7 @@
10#define _LINUX_PM_DOMAIN_H 10#define _LINUX_PM_DOMAIN_H
11 11
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/err.h>
13 14
14enum gpd_status { 15enum gpd_status {
15 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 16 GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -21,6 +22,23 @@ enum gpd_status {
21 22
22struct dev_power_governor { 23struct dev_power_governor {
23 bool (*power_down_ok)(struct dev_pm_domain *domain); 24 bool (*power_down_ok)(struct dev_pm_domain *domain);
25 bool (*stop_ok)(struct device *dev);
26};
27
28struct gpd_dev_ops {
29 int (*start)(struct device *dev);
30 int (*stop)(struct device *dev);
31 int (*save_state)(struct device *dev);
32 int (*restore_state)(struct device *dev);
33 int (*suspend)(struct device *dev);
34 int (*suspend_late)(struct device *dev);
35 int (*resume_early)(struct device *dev);
36 int (*resume)(struct device *dev);
37 int (*freeze)(struct device *dev);
38 int (*freeze_late)(struct device *dev);
39 int (*thaw_early)(struct device *dev);
40 int (*thaw)(struct device *dev);
41 bool (*active_wakeup)(struct device *dev);
24}; 42};
25 43
26struct generic_pm_domain { 44struct generic_pm_domain {
@@ -32,6 +50,7 @@ struct generic_pm_domain {
32 struct mutex lock; 50 struct mutex lock;
33 struct dev_power_governor *gov; 51 struct dev_power_governor *gov;
34 struct work_struct power_off_work; 52 struct work_struct power_off_work;
53 char *name;
35 unsigned int in_progress; /* Number of devices being suspended now */ 54 unsigned int in_progress; /* Number of devices being suspended now */
36 atomic_t sd_count; /* Number of subdomains with power "on" */ 55 atomic_t sd_count; /* Number of subdomains with power "on" */
37 enum gpd_status status; /* Current state of the domain */ 56 enum gpd_status status; /* Current state of the domain */
@@ -44,10 +63,13 @@ struct generic_pm_domain {
44 bool suspend_power_off; /* Power status before system suspend */ 63 bool suspend_power_off; /* Power status before system suspend */
45 bool dev_irq_safe; /* Device callbacks are IRQ-safe */ 64 bool dev_irq_safe; /* Device callbacks are IRQ-safe */
46 int (*power_off)(struct generic_pm_domain *domain); 65 int (*power_off)(struct generic_pm_domain *domain);
66 s64 power_off_latency_ns;
47 int (*power_on)(struct generic_pm_domain *domain); 67 int (*power_on)(struct generic_pm_domain *domain);
48 int (*start_device)(struct device *dev); 68 s64 power_on_latency_ns;
49 int (*stop_device)(struct device *dev); 69 struct gpd_dev_ops dev_ops;
50 bool (*active_wakeup)(struct device *dev); 70 s64 break_even_ns; /* Power break even for the entire domain. */
71 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
72 ktime_t power_off_time;
51}; 73};
52 74
53static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 75static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -62,8 +84,18 @@ struct gpd_link {
62 struct list_head slave_node; 84 struct list_head slave_node;
63}; 85};
64 86
87struct gpd_timing_data {
88 s64 stop_latency_ns;
89 s64 start_latency_ns;
90 s64 save_state_latency_ns;
91 s64 restore_state_latency_ns;
92 s64 break_even_ns;
93};
94
65struct generic_pm_domain_data { 95struct generic_pm_domain_data {
66 struct pm_domain_data base; 96 struct pm_domain_data base;
97 struct gpd_dev_ops ops;
98 struct gpd_timing_data td;
67 bool need_restore; 99 bool need_restore;
68}; 100};
69 101
@@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
73} 105}
74 106
75#ifdef CONFIG_PM_GENERIC_DOMAINS 107#ifdef CONFIG_PM_GENERIC_DOMAINS
76extern int pm_genpd_add_device(struct generic_pm_domain *genpd, 108static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
77 struct device *dev); 109{
110 return to_gpd_data(dev->power.subsys_data->domain_data);
111}
112
113extern struct dev_power_governor simple_qos_governor;
114
115extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
116extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
117 struct device *dev,
118 struct gpd_timing_data *td);
119
120static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
121 struct device *dev)
122{
123 return __pm_genpd_add_device(genpd, dev, NULL);
124}
125
78extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, 126extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
79 struct device *dev); 127 struct device *dev);
80extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 128extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
81 struct generic_pm_domain *new_subdomain); 129 struct generic_pm_domain *new_subdomain);
82extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 130extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
83 struct generic_pm_domain *target); 131 struct generic_pm_domain *target);
132extern int pm_genpd_add_callbacks(struct device *dev,
133 struct gpd_dev_ops *ops,
134 struct gpd_timing_data *td);
135extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
84extern void pm_genpd_init(struct generic_pm_domain *genpd, 136extern void pm_genpd_init(struct generic_pm_domain *genpd,
85 struct dev_power_governor *gov, bool is_off); 137 struct dev_power_governor *gov, bool is_off);
138
86extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 139extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
140
141extern bool default_stop_ok(struct device *dev);
142
143extern struct dev_power_governor pm_domain_always_on_gov;
87#else 144#else
145
146static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
147{
148 return ERR_PTR(-ENOSYS);
149}
150static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
151 struct device *dev,
152 struct gpd_timing_data *td)
153{
154 return -ENOSYS;
155}
88static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 156static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
89 struct device *dev) 157 struct device *dev)
90{ 158{
@@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
105{ 173{
106 return -ENOSYS; 174 return -ENOSYS;
107} 175}
108static inline void pm_genpd_init(struct generic_pm_domain *genpd, 176static inline int pm_genpd_add_callbacks(struct device *dev,
109 struct dev_power_governor *gov, bool is_off) {} 177 struct gpd_dev_ops *ops,
178 struct gpd_timing_data *td)
179{
180 return -ENOSYS;
181}
182static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
183{
184 return -ENOSYS;
185}
186static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
187{
188}
110static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) 189static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
111{ 190{
112 return -ENOSYS; 191 return -ENOSYS;
113} 192}
193static inline bool default_stop_ok(struct device *dev)
194{
195 return false;
196}
197#define pm_domain_always_on_gov NULL
114#endif 198#endif
115 199
200static inline int pm_genpd_remove_callbacks(struct device *dev)
201{
202 return __pm_genpd_remove_callbacks(dev, true);
203}
204
116#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME 205#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
117extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); 206extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
118extern void pm_genpd_poweroff_unused(void); 207extern void pm_genpd_poweroff_unused(void);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 83b0ea302a80..e5bbcbaa6f57 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
78int pm_qos_request_active(struct pm_qos_request *req); 78int pm_qos_request_active(struct pm_qos_request *req);
79s32 pm_qos_read_value(struct pm_qos_constraints *c); 79s32 pm_qos_read_value(struct pm_qos_constraints *c);
80 80
81s32 __dev_pm_qos_read_value(struct device *dev);
81s32 dev_pm_qos_read_value(struct device *dev); 82s32 dev_pm_qos_read_value(struct device *dev);
82int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 83int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
83 s32 value); 84 s32 value);
@@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
91int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); 92int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
92void dev_pm_qos_constraints_init(struct device *dev); 93void dev_pm_qos_constraints_init(struct device *dev);
93void dev_pm_qos_constraints_destroy(struct device *dev); 94void dev_pm_qos_constraints_destroy(struct device *dev);
95int dev_pm_qos_add_ancestor_request(struct device *dev,
96 struct dev_pm_qos_request *req, s32 value);
94#else 97#else
95static inline int pm_qos_update_target(struct pm_qos_constraints *c, 98static inline int pm_qos_update_target(struct pm_qos_constraints *c,
96 struct plist_node *node, 99 struct plist_node *node,
@@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
119static inline s32 pm_qos_read_value(struct pm_qos_constraints *c) 122static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
120 { return 0; } 123 { return 0; }
121 124
125static inline s32 __dev_pm_qos_read_value(struct device *dev)
126 { return 0; }
122static inline s32 dev_pm_qos_read_value(struct device *dev) 127static inline s32 dev_pm_qos_read_value(struct device *dev)
123 { return 0; } 128 { return 0; }
124static inline int dev_pm_qos_add_request(struct device *dev, 129static inline int dev_pm_qos_add_request(struct device *dev,
@@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
150{ 155{
151 dev->power.power_state = PMSG_INVALID; 156 dev->power.power_state = PMSG_INVALID;
152} 157}
158static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
159 struct dev_pm_qos_request *req, s32 value)
160 { return 0; }
153#endif 161#endif
154 162
155#endif 163#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d3085e72a0ee..609daae7a014 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
45extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); 45extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
46extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); 46extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
47extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); 47extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
48extern void pm_runtime_update_max_time_suspended(struct device *dev,
49 s64 delta_ns);
48 50
49static inline bool pm_children_suspended(struct device *dev) 51static inline bool pm_children_suspended(struct device *dev)
50{ 52{
@@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
148static inline unsigned long pm_runtime_autosuspend_expiration( 150static inline unsigned long pm_runtime_autosuspend_expiration(
149 struct device *dev) { return 0; } 151 struct device *dev) { return 0; }
150 152
153static inline void pm_runtime_update_max_time_suspended(struct device *dev,
154 s64 delta_ns) {}
155
151#endif /* !CONFIG_PM_RUNTIME */ 156#endif /* !CONFIG_PM_RUNTIME */
152 157
153static inline int pm_runtime_idle(struct device *dev) 158static inline int pm_runtime_idle(struct device *dev)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cf0eb342bcba..ad93e1ec8c65 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
221#define task_contributes_to_load(task) \ 221#define task_contributes_to_load(task) \
222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
223 (task->flags & PF_FREEZING) == 0) 223 (task->flags & PF_FROZEN) == 0)
224 224
225#define __set_task_state(tsk, state_value) \ 225#define __set_task_state(tsk, state_value) \
226 do { (tsk)->state = (state_value); } while (0) 226 do { (tsk)->state = (state_value); } while (0)
@@ -1787,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1787#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1787#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1788#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1788#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1789#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1789#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1790#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1791#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1790#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1792#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1791#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1793#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1792#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -1803,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1803#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1802#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1804#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1803#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1805#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1804#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1806#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1807 1805
1808/* 1806/*
1809 * Only the _current_ task can read/write to tsk->flags, but other 1807 * Only the _current_ task can read/write to tsk->flags, but other
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 5812fefbcedf..b160645f5599 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -95,6 +95,7 @@ struct intc_desc {
95 unsigned int num_resources; 95 unsigned int num_resources;
96 intc_enum force_enable; 96 intc_enum force_enable;
97 intc_enum force_disable; 97 intc_enum force_disable;
98 bool skip_syscore_suspend;
98 struct intc_hw_desc hw; 99 struct intc_hw_desc hw;
99}; 100};
100 101
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 57a692432f8a..95040cc33107 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/freezer.h>
9#include <asm/errno.h> 10#include <asm/errno.h>
10 11
11#ifdef CONFIG_VT 12#ifdef CONFIG_VT
@@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
331#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ 332#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
332#define PM_POST_RESTORE 0x0006 /* Restore failed */ 333#define PM_POST_RESTORE 0x0006 /* Restore failed */
333 334
335extern struct mutex pm_mutex;
336
334#ifdef CONFIG_PM_SLEEP 337#ifdef CONFIG_PM_SLEEP
335void save_processor_state(void); 338void save_processor_state(void);
336void restore_processor_state(void); 339void restore_processor_state(void);
@@ -351,6 +354,19 @@ extern bool events_check_enabled;
351extern bool pm_wakeup_pending(void); 354extern bool pm_wakeup_pending(void);
352extern bool pm_get_wakeup_count(unsigned int *count); 355extern bool pm_get_wakeup_count(unsigned int *count);
353extern bool pm_save_wakeup_count(unsigned int count); 356extern bool pm_save_wakeup_count(unsigned int count);
357
358static inline void lock_system_sleep(void)
359{
360 freezer_do_not_count();
361 mutex_lock(&pm_mutex);
362}
363
364static inline void unlock_system_sleep(void)
365{
366 mutex_unlock(&pm_mutex);
367 freezer_count();
368}
369
354#else /* !CONFIG_PM_SLEEP */ 370#else /* !CONFIG_PM_SLEEP */
355 371
356static inline int register_pm_notifier(struct notifier_block *nb) 372static inline int register_pm_notifier(struct notifier_block *nb)
@@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
366#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 382#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
367 383
368static inline bool pm_wakeup_pending(void) { return false; } 384static inline bool pm_wakeup_pending(void) { return false; }
369#endif /* !CONFIG_PM_SLEEP */
370
371extern struct mutex pm_mutex;
372 385
373#ifndef CONFIG_HIBERNATE_CALLBACKS
374static inline void lock_system_sleep(void) {} 386static inline void lock_system_sleep(void) {}
375static inline void unlock_system_sleep(void) {} 387static inline void unlock_system_sleep(void) {}
376 388
377#else 389#endif /* !CONFIG_PM_SLEEP */
378
379/* Let some subsystems like memory hotadd exclude hibernation */
380
381static inline void lock_system_sleep(void)
382{
383 mutex_lock(&pm_mutex);
384}
385
386static inline void unlock_system_sleep(void)
387{
388 mutex_unlock(&pm_mutex);
389}
390#endif
391 390
392#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS 391#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
393/* 392/*
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 213c0351dad8..fcb93fca782d 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
48 struct freezer, css); 48 struct freezer, css);
49} 49}
50 50
51static inline int __cgroup_freezing_or_frozen(struct task_struct *task) 51bool cgroup_freezing(struct task_struct *task)
52{ 52{
53 enum freezer_state state = task_freezer(task)->state; 53 enum freezer_state state;
54 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); 54 bool ret;
55}
56 55
57int cgroup_freezing_or_frozen(struct task_struct *task) 56 rcu_read_lock();
58{ 57 state = task_freezer(task)->state;
59 int result; 58 ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
60 task_lock(task); 59 rcu_read_unlock();
61 result = __cgroup_freezing_or_frozen(task); 60
62 task_unlock(task); 61 return ret;
63 return result;
64} 62}
65 63
66/* 64/*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
102 * freezer_can_attach(): 100 * freezer_can_attach():
103 * cgroup_mutex (held by caller of can_attach) 101 * cgroup_mutex (held by caller of can_attach)
104 * 102 *
105 * cgroup_freezing_or_frozen():
106 * task->alloc_lock (to get task's cgroup)
107 *
108 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex): 103 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
109 * freezer->lock 104 * freezer->lock
110 * sighand->siglock (if the cgroup is freezing) 105 * sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
130 * write_lock css_set_lock (cgroup iterator start) 125 * write_lock css_set_lock (cgroup iterator start)
131 * task->alloc_lock 126 * task->alloc_lock
132 * read_lock css_set_lock (cgroup iterator start) 127 * read_lock css_set_lock (cgroup iterator start)
133 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator()) 128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
134 * sighand->siglock 129 * sighand->siglock
135 */ 130 */
136static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, 131static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
150static void freezer_destroy(struct cgroup_subsys *ss, 145static void freezer_destroy(struct cgroup_subsys *ss,
151 struct cgroup *cgroup) 146 struct cgroup *cgroup)
152{ 147{
153 kfree(cgroup_freezer(cgroup)); 148 struct freezer *freezer = cgroup_freezer(cgroup);
149
150 if (freezer->state != CGROUP_THAWED)
151 atomic_dec(&system_freezing_cnt);
152 kfree(freezer);
154} 153}
155 154
156/* task is frozen or will freeze immediately when next it gets woken */ 155/* task is frozen or will freeze immediately when next it gets woken */
@@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
184 183
185static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 184static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
186{ 185{
187 rcu_read_lock(); 186 return cgroup_freezing(tsk) ? -EBUSY : 0;
188 if (__cgroup_freezing_or_frozen(tsk)) {
189 rcu_read_unlock();
190 return -EBUSY;
191 }
192 rcu_read_unlock();
193 return 0;
194} 187}
195 188
196static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) 189static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
220 213
221 /* Locking avoids race with FREEZING -> THAWED transitions. */ 214 /* Locking avoids race with FREEZING -> THAWED transitions. */
222 if (freezer->state == CGROUP_FREEZING) 215 if (freezer->state == CGROUP_FREEZING)
223 freeze_task(task, true); 216 freeze_task(task);
224 spin_unlock_irq(&freezer->lock); 217 spin_unlock_irq(&freezer->lock);
225} 218}
226 219
@@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
238 cgroup_iter_start(cgroup, &it); 231 cgroup_iter_start(cgroup, &it);
239 while ((task = cgroup_iter_next(cgroup, &it))) { 232 while ((task = cgroup_iter_next(cgroup, &it))) {
240 ntotal++; 233 ntotal++;
241 if (is_task_frozen_enough(task)) 234 if (freezing(task) && is_task_frozen_enough(task))
242 nfrozen++; 235 nfrozen++;
243 } 236 }
244 237
@@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
286 struct task_struct *task; 279 struct task_struct *task;
287 unsigned int num_cant_freeze_now = 0; 280 unsigned int num_cant_freeze_now = 0;
288 281
289 freezer->state = CGROUP_FREEZING;
290 cgroup_iter_start(cgroup, &it); 282 cgroup_iter_start(cgroup, &it);
291 while ((task = cgroup_iter_next(cgroup, &it))) { 283 while ((task = cgroup_iter_next(cgroup, &it))) {
292 if (!freeze_task(task, true)) 284 if (!freeze_task(task))
293 continue; 285 continue;
294 if (is_task_frozen_enough(task)) 286 if (is_task_frozen_enough(task))
295 continue; 287 continue;
@@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
307 struct task_struct *task; 299 struct task_struct *task;
308 300
309 cgroup_iter_start(cgroup, &it); 301 cgroup_iter_start(cgroup, &it);
310 while ((task = cgroup_iter_next(cgroup, &it))) { 302 while ((task = cgroup_iter_next(cgroup, &it)))
311 thaw_process(task); 303 __thaw_task(task);
312 }
313 cgroup_iter_end(cgroup, &it); 304 cgroup_iter_end(cgroup, &it);
314
315 freezer->state = CGROUP_THAWED;
316} 305}
317 306
318static int freezer_change_state(struct cgroup *cgroup, 307static int freezer_change_state(struct cgroup *cgroup,
@@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
326 spin_lock_irq(&freezer->lock); 315 spin_lock_irq(&freezer->lock);
327 316
328 update_if_frozen(cgroup, freezer); 317 update_if_frozen(cgroup, freezer);
329 if (goal_state == freezer->state)
330 goto out;
331 318
332 switch (goal_state) { 319 switch (goal_state) {
333 case CGROUP_THAWED: 320 case CGROUP_THAWED:
321 if (freezer->state != CGROUP_THAWED)
322 atomic_dec(&system_freezing_cnt);
323 freezer->state = CGROUP_THAWED;
334 unfreeze_cgroup(cgroup, freezer); 324 unfreeze_cgroup(cgroup, freezer);
335 break; 325 break;
336 case CGROUP_FROZEN: 326 case CGROUP_FROZEN:
327 if (freezer->state == CGROUP_THAWED)
328 atomic_inc(&system_freezing_cnt);
329 freezer->state = CGROUP_FREEZING;
337 retval = try_to_freeze_cgroup(cgroup, freezer); 330 retval = try_to_freeze_cgroup(cgroup, freezer);
338 break; 331 break;
339 default: 332 default:
340 BUG(); 333 BUG();
341 } 334 }
342out: 335
343 spin_unlock_irq(&freezer->lock); 336 spin_unlock_irq(&freezer->lock);
344 337
345 return retval; 338 return retval;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5ca38d5d238a..2060c6e57027 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -470,7 +470,7 @@ out:
470 cpu_maps_update_done(); 470 cpu_maps_update_done();
471} 471}
472 472
473static int alloc_frozen_cpus(void) 473static int __init alloc_frozen_cpus(void)
474{ 474{
475 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 475 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
476 return -ENOMEM; 476 return -ENOMEM;
@@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
543} 543}
544 544
545 545
546int cpu_hotplug_pm_sync_init(void) 546static int __init cpu_hotplug_pm_sync_init(void)
547{ 547{
548 pm_notifier(cpu_hotplug_pm_callback, 0); 548 pm_notifier(cpu_hotplug_pm_callback, 0);
549 return 0; 549 return 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index d579a459309d..d9eab2e4b430 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
679 tsk->mm = NULL; 679 tsk->mm = NULL;
680 up_read(&mm->mmap_sem); 680 up_read(&mm->mmap_sem);
681 enter_lazy_tlb(mm, current); 681 enter_lazy_tlb(mm, current);
682 /* We don't want this task to be frozen prematurely */
683 clear_freeze_flag(tsk);
684 task_unlock(tsk); 682 task_unlock(tsk);
685 mm_update_next_owner(mm); 683 mm_update_next_owner(mm);
686 mmput(mm); 684 mmput(mm);
@@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
1040 exit_rcu(); 1038 exit_rcu();
1041 /* causes final put_task_struct in finish_task_switch(). */ 1039 /* causes final put_task_struct in finish_task_switch(). */
1042 tsk->state = TASK_DEAD; 1040 tsk->state = TASK_DEAD;
1041 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
1043 schedule(); 1042 schedule();
1044 BUG(); 1043 BUG();
1045 /* Avoid "noreturn function does return". */ 1044 /* Avoid "noreturn function does return". */
diff --git a/kernel/fork.c b/kernel/fork.c
index b058c5820ecd..f34f894c4b98 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
992 new_flags |= PF_FORKNOEXEC; 992 new_flags |= PF_FORKNOEXEC;
993 new_flags |= PF_STARTING; 993 new_flags |= PF_STARTING;
994 p->flags = new_flags; 994 p->flags = new_flags;
995 clear_freeze_flag(p);
996} 995}
997 996
998SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 997SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 7be56c534397..9815b8d1eed5 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -9,101 +9,114 @@
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/syscalls.h> 10#include <linux/syscalls.h>
11#include <linux/freezer.h> 11#include <linux/freezer.h>
12#include <linux/kthread.h>
12 13
13/* 14/* total number of freezing conditions in effect */
14 * freezing is complete, mark current process as frozen 15atomic_t system_freezing_cnt = ATOMIC_INIT(0);
16EXPORT_SYMBOL(system_freezing_cnt);
17
18/* indicate whether PM freezing is in effect, protected by pm_mutex */
19bool pm_freezing;
20bool pm_nosig_freezing;
21
22/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock);
24
25/**
26 * freezing_slow_path - slow path for testing whether a task needs to be frozen
27 * @p: task to be tested
28 *
29 * This function is called by freezing() if system_freezing_cnt isn't zero
30 * and tests whether @p needs to enter and stay in frozen state. Can be
31 * called under any context. The freezers are responsible for ensuring the
32 * target tasks see the updated state.
15 */ 33 */
16static inline void frozen_process(void) 34bool freezing_slow_path(struct task_struct *p)
17{ 35{
18 if (!unlikely(current->flags & PF_NOFREEZE)) { 36 if (p->flags & PF_NOFREEZE)
19 current->flags |= PF_FROZEN; 37 return false;
20 smp_wmb(); 38
21 } 39 if (pm_nosig_freezing || cgroup_freezing(p))
22 clear_freeze_flag(current); 40 return true;
41
42 if (pm_freezing && !(p->flags & PF_KTHREAD))
43 return true;
44
45 return false;
23} 46}
47EXPORT_SYMBOL(freezing_slow_path);
24 48
25/* Refrigerator is place where frozen processes are stored :-). */ 49/* Refrigerator is place where frozen processes are stored :-). */
26void refrigerator(void) 50bool __refrigerator(bool check_kthr_stop)
27{ 51{
28 /* Hmm, should we be allowed to suspend when there are realtime 52 /* Hmm, should we be allowed to suspend when there are realtime
29 processes around? */ 53 processes around? */
30 long save; 54 bool was_frozen = false;
55 long save = current->state;
31 56
32 task_lock(current);
33 if (freezing(current)) {
34 frozen_process();
35 task_unlock(current);
36 } else {
37 task_unlock(current);
38 return;
39 }
40 save = current->state;
41 pr_debug("%s entered refrigerator\n", current->comm); 57 pr_debug("%s entered refrigerator\n", current->comm);
42 58
43 spin_lock_irq(&current->sighand->siglock);
44 recalc_sigpending(); /* We sent fake signal, clean it up */
45 spin_unlock_irq(&current->sighand->siglock);
46
47 /* prevent accounting of that task to load */
48 current->flags |= PF_FREEZING;
49
50 for (;;) { 59 for (;;) {
51 set_current_state(TASK_UNINTERRUPTIBLE); 60 set_current_state(TASK_UNINTERRUPTIBLE);
52 if (!frozen(current)) 61
62 spin_lock_irq(&freezer_lock);
63 current->flags |= PF_FROZEN;
64 if (!freezing(current) ||
65 (check_kthr_stop && kthread_should_stop()))
66 current->flags &= ~PF_FROZEN;
67 spin_unlock_irq(&freezer_lock);
68
69 if (!(current->flags & PF_FROZEN))
53 break; 70 break;
71 was_frozen = true;
54 schedule(); 72 schedule();
55 } 73 }
56 74
57 /* Remove the accounting blocker */
58 current->flags &= ~PF_FREEZING;
59
60 pr_debug("%s left refrigerator\n", current->comm); 75 pr_debug("%s left refrigerator\n", current->comm);
61 __set_current_state(save); 76
77 /*
78 * Restore saved task state before returning. The mb'd version
79 * needs to be used; otherwise, it might silently break
80 * synchronization which depends on ordered task state change.
81 */
82 set_current_state(save);
83
84 return was_frozen;
62} 85}
63EXPORT_SYMBOL(refrigerator); 86EXPORT_SYMBOL(__refrigerator);
64 87
65static void fake_signal_wake_up(struct task_struct *p) 88static void fake_signal_wake_up(struct task_struct *p)
66{ 89{
67 unsigned long flags; 90 unsigned long flags;
68 91
69 spin_lock_irqsave(&p->sighand->siglock, flags); 92 if (lock_task_sighand(p, &flags)) {
70 signal_wake_up(p, 0); 93 signal_wake_up(p, 0);
71 spin_unlock_irqrestore(&p->sighand->siglock, flags); 94 unlock_task_sighand(p, &flags);
95 }
72} 96}
73 97
74/** 98/**
75 * freeze_task - send a freeze request to given task 99 * freeze_task - send a freeze request to given task
76 * @p: task to send the request to 100 * @p: task to send the request to
77 * @sig_only: if set, the request will only be sent if the task has the 101 *
78 * PF_FREEZER_NOSIG flag unset 102 * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
79 * Return value: 'false', if @sig_only is set and the task has 103 * flag and either sending a fake signal to it or waking it up, depending
80 * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise 104 * on whether it has %PF_FREEZER_NOSIG set.
81 * 105 *
82 * The freeze request is sent by setting the tasks's TIF_FREEZE flag and 106 * RETURNS:
83 * either sending a fake signal to it or waking it up, depending on whether 107 * %false, if @p is not freezing or already frozen; %true, otherwise
84 * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
85 * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
86 * TIF_FREEZE flag will not be set.
87 */ 108 */
88bool freeze_task(struct task_struct *p, bool sig_only) 109bool freeze_task(struct task_struct *p)
89{ 110{
90 /* 111 unsigned long flags;
91 * We first check if the task is freezing and next if it has already 112
92 * been frozen to avoid the race with frozen_process() which first marks 113 spin_lock_irqsave(&freezer_lock, flags);
93 * the task as frozen and next clears its TIF_FREEZE. 114 if (!freezing(p) || frozen(p)) {
94 */ 115 spin_unlock_irqrestore(&freezer_lock, flags);
95 if (!freezing(p)) { 116 return false;
96 smp_rmb();
97 if (frozen(p))
98 return false;
99
100 if (!sig_only || should_send_signal(p))
101 set_freeze_flag(p);
102 else
103 return false;
104 } 117 }
105 118
106 if (should_send_signal(p)) { 119 if (!(p->flags & PF_KTHREAD)) {
107 fake_signal_wake_up(p); 120 fake_signal_wake_up(p);
108 /* 121 /*
109 * fake_signal_wake_up() goes through p's scheduler 122 * fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
111 * TASK_RUNNING transition can't race with task state 124 * TASK_RUNNING transition can't race with task state
112 * testing in try_to_freeze_tasks(). 125 * testing in try_to_freeze_tasks().
113 */ 126 */
114 } else if (sig_only) {
115 return false;
116 } else { 127 } else {
117 wake_up_state(p, TASK_INTERRUPTIBLE); 128 wake_up_state(p, TASK_INTERRUPTIBLE);
118 } 129 }
119 130
131 spin_unlock_irqrestore(&freezer_lock, flags);
120 return true; 132 return true;
121} 133}
122 134
123void cancel_freezing(struct task_struct *p) 135void __thaw_task(struct task_struct *p)
124{ 136{
125 unsigned long flags; 137 unsigned long flags;
126 138
127 if (freezing(p)) { 139 /*
128 pr_debug(" clean up: %s\n", p->comm); 140 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
129 clear_freeze_flag(p); 141 * be visible to @p as waking up implies wmb. Waking up inside
130 spin_lock_irqsave(&p->sighand->siglock, flags); 142 * freezer_lock also prevents wakeups from leaking outside
131 recalc_sigpending_and_wake(p); 143 * refrigerator.
132 spin_unlock_irqrestore(&p->sighand->siglock, flags); 144 */
133 } 145 spin_lock_irqsave(&freezer_lock, flags);
134} 146 if (frozen(p))
135 147 wake_up_process(p);
136static int __thaw_process(struct task_struct *p) 148 spin_unlock_irqrestore(&freezer_lock, flags);
137{
138 if (frozen(p)) {
139 p->flags &= ~PF_FROZEN;
140 return 1;
141 }
142 clear_freeze_flag(p);
143 return 0;
144} 149}
145 150
146/* 151/**
147 * Wake up a frozen process 152 * set_freezable - make %current freezable
148 * 153 *
149 * task_lock() is needed to prevent the race with refrigerator() which may 154 * Mark %current freezable and enter refrigerator if necessary.
150 * occur if the freezing of tasks fails. Namely, without the lock, if the
151 * freezing of tasks failed, thaw_tasks() might have run before a task in
152 * refrigerator() could call frozen_process(), in which case the task would be
153 * frozen and no one would thaw it.
154 */ 155 */
155int thaw_process(struct task_struct *p) 156bool set_freezable(void)
156{ 157{
157 task_lock(p); 158 might_sleep();
158 if (__thaw_process(p) == 1) { 159
159 task_unlock(p); 160 /*
160 wake_up_process(p); 161 * Modify flags while holding freezer_lock. This ensures the
161 return 1; 162 * freezer notices that we aren't frozen yet or the freezing
162 } 163 * condition is visible to try_to_freeze() below.
163 task_unlock(p); 164 */
164 return 0; 165 spin_lock_irq(&freezer_lock);
166 current->flags &= ~PF_NOFREEZE;
167 spin_unlock_irq(&freezer_lock);
168
169 return try_to_freeze();
165} 170}
166EXPORT_SYMBOL(thaw_process); 171EXPORT_SYMBOL(set_freezable);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index dc7bc0829286..090ee10d9604 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1523,7 +1523,7 @@ int kernel_kexec(void)
1523 1523
1524#ifdef CONFIG_KEXEC_JUMP 1524#ifdef CONFIG_KEXEC_JUMP
1525 if (kexec_image->preserve_context) { 1525 if (kexec_image->preserve_context) {
1526 mutex_lock(&pm_mutex); 1526 lock_system_sleep();
1527 pm_prepare_console(); 1527 pm_prepare_console();
1528 error = freeze_processes(); 1528 error = freeze_processes();
1529 if (error) { 1529 if (error) {
@@ -1576,7 +1576,7 @@ int kernel_kexec(void)
1576 thaw_processes(); 1576 thaw_processes();
1577 Restore_console: 1577 Restore_console:
1578 pm_restore_console(); 1578 pm_restore_console();
1579 mutex_unlock(&pm_mutex); 1579 unlock_system_sleep();
1580 } 1580 }
1581#endif 1581#endif
1582 1582
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a4bea97c75b6..a0a88543934e 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -36,6 +36,7 @@
36#include <linux/resource.h> 36#include <linux/resource.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/suspend.h> 38#include <linux/suspend.h>
39#include <linux/rwsem.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40 41
41#include <trace/events/module.h> 42#include <trace/events/module.h>
@@ -50,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
50static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; 51static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
51static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 52static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
52static DEFINE_SPINLOCK(umh_sysctl_lock); 53static DEFINE_SPINLOCK(umh_sysctl_lock);
54static DECLARE_RWSEM(umhelper_sem);
53 55
54#ifdef CONFIG_MODULES 56#ifdef CONFIG_MODULES
55 57
@@ -275,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
275 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY 277 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
276 * (used for preventing user land processes from being created after the user 278 * (used for preventing user land processes from being created after the user
277 * land has been frozen during a system-wide hibernation or suspend operation). 279 * land has been frozen during a system-wide hibernation or suspend operation).
280 * Should always be manipulated under umhelper_sem acquired for write.
278 */ 281 */
279static int usermodehelper_disabled = 1; 282static int usermodehelper_disabled = 1;
280 283
@@ -282,17 +285,29 @@ static int usermodehelper_disabled = 1;
282static atomic_t running_helpers = ATOMIC_INIT(0); 285static atomic_t running_helpers = ATOMIC_INIT(0);
283 286
284/* 287/*
285 * Wait queue head used by usermodehelper_pm_callback() to wait for all running 288 * Wait queue head used by usermodehelper_disable() to wait for all running
286 * helpers to finish. 289 * helpers to finish.
287 */ 290 */
288static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); 291static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
289 292
290/* 293/*
291 * Time to wait for running_helpers to become zero before the setting of 294 * Time to wait for running_helpers to become zero before the setting of
292 * usermodehelper_disabled in usermodehelper_pm_callback() fails 295 * usermodehelper_disabled in usermodehelper_disable() fails
293 */ 296 */
294#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 297#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
295 298
299void read_lock_usermodehelper(void)
300{
301 down_read(&umhelper_sem);
302}
303EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
304
305void read_unlock_usermodehelper(void)
306{
307 up_read(&umhelper_sem);
308}
309EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
310
296/** 311/**
297 * usermodehelper_disable - prevent new helpers from being started 312 * usermodehelper_disable - prevent new helpers from being started
298 */ 313 */
@@ -300,8 +315,10 @@ int usermodehelper_disable(void)
300{ 315{
301 long retval; 316 long retval;
302 317
318 down_write(&umhelper_sem);
303 usermodehelper_disabled = 1; 319 usermodehelper_disabled = 1;
304 smp_mb(); 320 up_write(&umhelper_sem);
321
305 /* 322 /*
306 * From now on call_usermodehelper_exec() won't start any new 323 * From now on call_usermodehelper_exec() won't start any new
307 * helpers, so it is sufficient if running_helpers turns out to 324 * helpers, so it is sufficient if running_helpers turns out to
@@ -314,7 +331,9 @@ int usermodehelper_disable(void)
314 if (retval) 331 if (retval)
315 return 0; 332 return 0;
316 333
334 down_write(&umhelper_sem);
317 usermodehelper_disabled = 0; 335 usermodehelper_disabled = 0;
336 up_write(&umhelper_sem);
318 return -EAGAIN; 337 return -EAGAIN;
319} 338}
320 339
@@ -323,7 +342,9 @@ int usermodehelper_disable(void)
323 */ 342 */
324void usermodehelper_enable(void) 343void usermodehelper_enable(void)
325{ 344{
345 down_write(&umhelper_sem);
326 usermodehelper_disabled = 0; 346 usermodehelper_disabled = 0;
347 up_write(&umhelper_sem);
327} 348}
328 349
329/** 350/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b6d216a92639..3d3de633702e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -59,6 +59,31 @@ int kthread_should_stop(void)
59EXPORT_SYMBOL(kthread_should_stop); 59EXPORT_SYMBOL(kthread_should_stop);
60 60
61/** 61/**
62 * kthread_freezable_should_stop - should this freezable kthread return now?
63 * @was_frozen: optional out parameter, indicates whether %current was frozen
64 *
65 * kthread_should_stop() for freezable kthreads, which will enter
66 * refrigerator if necessary. This function is safe from kthread_stop() /
67 * freezer deadlock and freezable kthreads should use this function instead
68 * of calling try_to_freeze() directly.
69 */
70bool kthread_freezable_should_stop(bool *was_frozen)
71{
72 bool frozen = false;
73
74 might_sleep();
75
76 if (unlikely(freezing(current)))
77 frozen = __refrigerator(true);
78
79 if (was_frozen)
80 *was_frozen = frozen;
81
82 return kthread_should_stop();
83}
84EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
85
86/**
62 * kthread_data - return data value specified on kthread creation 87 * kthread_data - return data value specified on kthread creation
63 * @task: kthread task in question 88 * @task: kthread task in question
64 * 89 *
@@ -257,7 +282,7 @@ int kthreadd(void *unused)
257 set_cpus_allowed_ptr(tsk, cpu_all_mask); 282 set_cpus_allowed_ptr(tsk, cpu_all_mask);
258 set_mems_allowed(node_states[N_HIGH_MEMORY]); 283 set_mems_allowed(node_states[N_HIGH_MEMORY]);
259 284
260 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 285 current->flags |= PF_NOFREEZE;
261 286
262 for (;;) { 287 for (;;) {
263 set_current_state(TASK_INTERRUPTIBLE); 288 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a6b0503574ee..6d6d28870335 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -43,8 +43,6 @@ int in_suspend __nosavedata;
43enum { 43enum {
44 HIBERNATION_INVALID, 44 HIBERNATION_INVALID,
45 HIBERNATION_PLATFORM, 45 HIBERNATION_PLATFORM,
46 HIBERNATION_TEST,
47 HIBERNATION_TESTPROC,
48 HIBERNATION_SHUTDOWN, 46 HIBERNATION_SHUTDOWN,
49 HIBERNATION_REBOOT, 47 HIBERNATION_REBOOT,
50 /* keep last */ 48 /* keep last */
@@ -55,7 +53,7 @@ enum {
55 53
56static int hibernation_mode = HIBERNATION_SHUTDOWN; 54static int hibernation_mode = HIBERNATION_SHUTDOWN;
57 55
58static bool freezer_test_done; 56bool freezer_test_done;
59 57
60static const struct platform_hibernation_ops *hibernation_ops; 58static const struct platform_hibernation_ops *hibernation_ops;
61 59
@@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
71 WARN_ON(1); 69 WARN_ON(1);
72 return; 70 return;
73 } 71 }
74 mutex_lock(&pm_mutex); 72 lock_system_sleep();
75 hibernation_ops = ops; 73 hibernation_ops = ops;
76 if (ops) 74 if (ops)
77 hibernation_mode = HIBERNATION_PLATFORM; 75 hibernation_mode = HIBERNATION_PLATFORM;
78 else if (hibernation_mode == HIBERNATION_PLATFORM) 76 else if (hibernation_mode == HIBERNATION_PLATFORM)
79 hibernation_mode = HIBERNATION_SHUTDOWN; 77 hibernation_mode = HIBERNATION_SHUTDOWN;
80 78
81 mutex_unlock(&pm_mutex); 79 unlock_system_sleep();
82} 80}
83 81
84static bool entering_platform_hibernation; 82static bool entering_platform_hibernation;
@@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
96 mdelay(5000); 94 mdelay(5000);
97} 95}
98 96
99static int hibernation_testmode(int mode)
100{
101 if (hibernation_mode == mode) {
102 hibernation_debug_sleep();
103 return 1;
104 }
105 return 0;
106}
107
108static int hibernation_test(int level) 97static int hibernation_test(int level)
109{ 98{
110 if (pm_test_level == level) { 99 if (pm_test_level == level) {
@@ -114,7 +103,6 @@ static int hibernation_test(int level)
114 return 0; 103 return 0;
115} 104}
116#else /* !CONFIG_PM_DEBUG */ 105#else /* !CONFIG_PM_DEBUG */
117static int hibernation_testmode(int mode) { return 0; }
118static int hibernation_test(int level) { return 0; } 106static int hibernation_test(int level) { return 0; }
119#endif /* !CONFIG_PM_DEBUG */ 107#endif /* !CONFIG_PM_DEBUG */
120 108
@@ -278,8 +266,7 @@ static int create_image(int platform_mode)
278 goto Platform_finish; 266 goto Platform_finish;
279 267
280 error = disable_nonboot_cpus(); 268 error = disable_nonboot_cpus();
281 if (error || hibernation_test(TEST_CPUS) 269 if (error || hibernation_test(TEST_CPUS))
282 || hibernation_testmode(HIBERNATION_TEST))
283 goto Enable_cpus; 270 goto Enable_cpus;
284 271
285 local_irq_disable(); 272 local_irq_disable();
@@ -333,7 +320,7 @@ static int create_image(int platform_mode)
333 */ 320 */
334int hibernation_snapshot(int platform_mode) 321int hibernation_snapshot(int platform_mode)
335{ 322{
336 pm_message_t msg = PMSG_RECOVER; 323 pm_message_t msg;
337 int error; 324 int error;
338 325
339 error = platform_begin(platform_mode); 326 error = platform_begin(platform_mode);
@@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
349 if (error) 336 if (error)
350 goto Cleanup; 337 goto Cleanup;
351 338
352 if (hibernation_test(TEST_FREEZER) || 339 if (hibernation_test(TEST_FREEZER)) {
353 hibernation_testmode(HIBERNATION_TESTPROC)) {
354 340
355 /* 341 /*
356 * Indicate to the caller that we are returning due to a 342 * Indicate to the caller that we are returning due to a
@@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
362 348
363 error = dpm_prepare(PMSG_FREEZE); 349 error = dpm_prepare(PMSG_FREEZE);
364 if (error) { 350 if (error) {
365 dpm_complete(msg); 351 dpm_complete(PMSG_RECOVER);
366 goto Cleanup; 352 goto Cleanup;
367 } 353 }
368 354
369 suspend_console(); 355 suspend_console();
370 pm_restrict_gfp_mask(); 356 pm_restrict_gfp_mask();
357
371 error = dpm_suspend(PMSG_FREEZE); 358 error = dpm_suspend(PMSG_FREEZE);
372 if (error)
373 goto Recover_platform;
374 359
375 if (hibernation_test(TEST_DEVICES)) 360 if (error || hibernation_test(TEST_DEVICES))
376 goto Recover_platform; 361 platform_recover(platform_mode);
362 else
363 error = create_image(platform_mode);
377 364
378 error = create_image(platform_mode);
379 /* 365 /*
380 * Control returns here (1) after the image has been created or the 366 * In the case that we call create_image() above, the control
367 * returns here (1) after the image has been created or the
381 * image creation has failed and (2) after a successful restore. 368 * image creation has failed and (2) after a successful restore.
382 */ 369 */
383 370
384 Resume_devices:
385 /* We may need to release the preallocated image pages here. */ 371 /* We may need to release the preallocated image pages here. */
386 if (error || !in_suspend) 372 if (error || !in_suspend)
387 swsusp_free(); 373 swsusp_free();
@@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
399 platform_end(platform_mode); 385 platform_end(platform_mode);
400 return error; 386 return error;
401 387
402 Recover_platform:
403 platform_recover(platform_mode);
404 goto Resume_devices;
405
406 Cleanup: 388 Cleanup:
407 swsusp_free(); 389 swsusp_free();
408 goto Close; 390 goto Close;
@@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
590static void power_down(void) 572static void power_down(void)
591{ 573{
592 switch (hibernation_mode) { 574 switch (hibernation_mode) {
593 case HIBERNATION_TEST:
594 case HIBERNATION_TESTPROC:
595 break;
596 case HIBERNATION_REBOOT: 575 case HIBERNATION_REBOOT:
597 kernel_restart(NULL); 576 kernel_restart(NULL);
598 break; 577 break;
@@ -611,17 +590,6 @@ static void power_down(void)
611 while(1); 590 while(1);
612} 591}
613 592
614static int prepare_processes(void)
615{
616 int error = 0;
617
618 if (freeze_processes()) {
619 error = -EBUSY;
620 thaw_processes();
621 }
622 return error;
623}
624
625/** 593/**
626 * hibernate - Carry out system hibernation, including saving the image. 594 * hibernate - Carry out system hibernation, including saving the image.
627 */ 595 */
@@ -629,7 +597,7 @@ int hibernate(void)
629{ 597{
630 int error; 598 int error;
631 599
632 mutex_lock(&pm_mutex); 600 lock_system_sleep();
633 /* The snapshot device should not be opened while we're running */ 601 /* The snapshot device should not be opened while we're running */
634 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 602 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
635 error = -EBUSY; 603 error = -EBUSY;
@@ -654,7 +622,7 @@ int hibernate(void)
654 sys_sync(); 622 sys_sync();
655 printk("done.\n"); 623 printk("done.\n");
656 624
657 error = prepare_processes(); 625 error = freeze_processes();
658 if (error) 626 if (error)
659 goto Finish; 627 goto Finish;
660 628
@@ -697,7 +665,7 @@ int hibernate(void)
697 pm_restore_console(); 665 pm_restore_console();
698 atomic_inc(&snapshot_device_available); 666 atomic_inc(&snapshot_device_available);
699 Unlock: 667 Unlock:
700 mutex_unlock(&pm_mutex); 668 unlock_system_sleep();
701 return error; 669 return error;
702} 670}
703 671
@@ -811,11 +779,13 @@ static int software_resume(void)
811 goto close_finish; 779 goto close_finish;
812 780
813 error = create_basic_memory_bitmaps(); 781 error = create_basic_memory_bitmaps();
814 if (error) 782 if (error) {
783 usermodehelper_enable();
815 goto close_finish; 784 goto close_finish;
785 }
816 786
817 pr_debug("PM: Preparing processes for restore.\n"); 787 pr_debug("PM: Preparing processes for restore.\n");
818 error = prepare_processes(); 788 error = freeze_processes();
819 if (error) { 789 if (error) {
820 swsusp_close(FMODE_READ); 790 swsusp_close(FMODE_READ);
821 goto Done; 791 goto Done;
@@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
855 [HIBERNATION_PLATFORM] = "platform", 825 [HIBERNATION_PLATFORM] = "platform",
856 [HIBERNATION_SHUTDOWN] = "shutdown", 826 [HIBERNATION_SHUTDOWN] = "shutdown",
857 [HIBERNATION_REBOOT] = "reboot", 827 [HIBERNATION_REBOOT] = "reboot",
858 [HIBERNATION_TEST] = "test",
859 [HIBERNATION_TESTPROC] = "testproc",
860}; 828};
861 829
862/* 830/*
@@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
865 * Hibernation can be handled in several ways. There are a few different ways 833 * Hibernation can be handled in several ways. There are a few different ways
866 * to put the system into the sleep state: using the platform driver (e.g. ACPI 834 * to put the system into the sleep state: using the platform driver (e.g. ACPI
867 * or other hibernation_ops), powering it off or rebooting it (for testing 835 * or other hibernation_ops), powering it off or rebooting it (for testing
868 * mostly), or using one of the two available test modes. 836 * mostly).
869 * 837 *
870 * The sysfs file /sys/power/disk provides an interface for selecting the 838 * The sysfs file /sys/power/disk provides an interface for selecting the
871 * hibernation mode to use. Reading from this file causes the available modes 839 * hibernation mode to use. Reading from this file causes the available modes
872 * to be printed. There are 5 modes that can be supported: 840 * to be printed. There are 3 modes that can be supported:
873 * 841 *
874 * 'platform' 842 * 'platform'
875 * 'shutdown' 843 * 'shutdown'
876 * 'reboot' 844 * 'reboot'
877 * 'test'
878 * 'testproc'
879 * 845 *
880 * If a platform hibernation driver is in use, 'platform' will be supported 846 * If a platform hibernation driver is in use, 'platform' will be supported
881 * and will be used by default. Otherwise, 'shutdown' will be used by default. 847 * and will be used by default. Otherwise, 'shutdown' will be used by default.
@@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
899 switch (i) { 865 switch (i) {
900 case HIBERNATION_SHUTDOWN: 866 case HIBERNATION_SHUTDOWN:
901 case HIBERNATION_REBOOT: 867 case HIBERNATION_REBOOT:
902 case HIBERNATION_TEST:
903 case HIBERNATION_TESTPROC:
904 break; 868 break;
905 case HIBERNATION_PLATFORM: 869 case HIBERNATION_PLATFORM:
906 if (hibernation_ops) 870 if (hibernation_ops)
@@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
929 p = memchr(buf, '\n', n); 893 p = memchr(buf, '\n', n);
930 len = p ? p - buf : n; 894 len = p ? p - buf : n;
931 895
932 mutex_lock(&pm_mutex); 896 lock_system_sleep();
933 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 897 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
934 if (len == strlen(hibernation_modes[i]) 898 if (len == strlen(hibernation_modes[i])
935 && !strncmp(buf, hibernation_modes[i], len)) { 899 && !strncmp(buf, hibernation_modes[i], len)) {
@@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
941 switch (mode) { 905 switch (mode) {
942 case HIBERNATION_SHUTDOWN: 906 case HIBERNATION_SHUTDOWN:
943 case HIBERNATION_REBOOT: 907 case HIBERNATION_REBOOT:
944 case HIBERNATION_TEST:
945 case HIBERNATION_TESTPROC:
946 hibernation_mode = mode; 908 hibernation_mode = mode;
947 break; 909 break;
948 case HIBERNATION_PLATFORM: 910 case HIBERNATION_PLATFORM:
@@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
957 if (!error) 919 if (!error)
958 pr_debug("PM: Hibernation mode set to '%s'\n", 920 pr_debug("PM: Hibernation mode set to '%s'\n",
959 hibernation_modes[mode]); 921 hibernation_modes[mode]);
960 mutex_unlock(&pm_mutex); 922 unlock_system_sleep();
961 return error ? error : n; 923 return error ? error : n;
962} 924}
963 925
@@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
984 if (maj != MAJOR(res) || min != MINOR(res)) 946 if (maj != MAJOR(res) || min != MINOR(res))
985 goto out; 947 goto out;
986 948
987 mutex_lock(&pm_mutex); 949 lock_system_sleep();
988 swsusp_resume_device = res; 950 swsusp_resume_device = res;
989 mutex_unlock(&pm_mutex); 951 unlock_system_sleep();
990 printk(KERN_INFO "PM: Starting manual resume from disk\n"); 952 printk(KERN_INFO "PM: Starting manual resume from disk\n");
991 noresume = 0; 953 noresume = 0;
992 software_resume(); 954 software_resume();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 36e0f0903c32..9824b41e5a18 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * 6 *
7 * This file is released under the GPLv2 7 * This file is released under the GPLv2
8 * 8 *
9 */ 9 */
@@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
116 p = memchr(buf, '\n', n); 116 p = memchr(buf, '\n', n);
117 len = p ? p - buf : n; 117 len = p ? p - buf : n;
118 118
119 mutex_lock(&pm_mutex); 119 lock_system_sleep();
120 120
121 level = TEST_FIRST; 121 level = TEST_FIRST;
122 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 122 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
126 break; 126 break;
127 } 127 }
128 128
129 mutex_unlock(&pm_mutex); 129 unlock_system_sleep();
130 130
131 return error ? error : n; 131 return error ? error : n;
132} 132}
@@ -240,7 +240,7 @@ struct kobject *power_kobj;
240 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 240 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
241 * 'disk' (Suspend-to-Disk). 241 * 'disk' (Suspend-to-Disk).
242 * 242 *
243 * store() accepts one of those strings, translates it into the 243 * store() accepts one of those strings, translates it into the
244 * proper enumerated value, and initiates a suspend transition. 244 * proper enumerated value, and initiates a suspend transition.
245 */ 245 */
246static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 246static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
282 /* First, check if we are requested to hibernate */ 282 /* First, check if we are requested to hibernate */
283 if (len == 4 && !strncmp(buf, "disk", len)) { 283 if (len == 4 && !strncmp(buf, "disk", len)) {
284 error = hibernate(); 284 error = hibernate();
285 goto Exit; 285 goto Exit;
286 } 286 }
287 287
288#ifdef CONFIG_SUSPEND 288#ifdef CONFIG_SUSPEND
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 23a2db1ec442..0c4defe6d3b8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
50#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 50#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
51 51
52/* kernel/power/hibernate.c */ 52/* kernel/power/hibernate.c */
53extern bool freezer_test_done;
54
53extern int hibernation_snapshot(int platform_mode); 55extern int hibernation_snapshot(int platform_mode);
54extern int hibernation_restore(int platform_mode); 56extern int hibernation_restore(int platform_mode);
55extern int hibernation_platform_enter(void); 57extern int hibernation_platform_enter(void);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index addbbe5531bc..77274c9ba2f1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,16 +22,7 @@
22 */ 22 */
23#define TIMEOUT (20 * HZ) 23#define TIMEOUT (20 * HZ)
24 24
25static inline int freezable(struct task_struct * p) 25static int try_to_freeze_tasks(bool user_only)
26{
27 if ((p == current) ||
28 (p->flags & PF_NOFREEZE) ||
29 (p->exit_state != 0))
30 return 0;
31 return 1;
32}
33
34static int try_to_freeze_tasks(bool sig_only)
35{ 26{
36 struct task_struct *g, *p; 27 struct task_struct *g, *p;
37 unsigned long end_time; 28 unsigned long end_time;
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
46 37
47 end_time = jiffies + TIMEOUT; 38 end_time = jiffies + TIMEOUT;
48 39
49 if (!sig_only) 40 if (!user_only)
50 freeze_workqueues_begin(); 41 freeze_workqueues_begin();
51 42
52 while (true) { 43 while (true) {
53 todo = 0; 44 todo = 0;
54 read_lock(&tasklist_lock); 45 read_lock(&tasklist_lock);
55 do_each_thread(g, p) { 46 do_each_thread(g, p) {
56 if (frozen(p) || !freezable(p)) 47 if (p == current || !freeze_task(p))
57 continue;
58
59 if (!freeze_task(p, sig_only))
60 continue; 48 continue;
61 49
62 /* 50 /*
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
77 } while_each_thread(g, p); 65 } while_each_thread(g, p);
78 read_unlock(&tasklist_lock); 66 read_unlock(&tasklist_lock);
79 67
80 if (!sig_only) { 68 if (!user_only) {
81 wq_busy = freeze_workqueues_busy(); 69 wq_busy = freeze_workqueues_busy();
82 todo += wq_busy; 70 todo += wq_busy;
83 } 71 }
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
103 elapsed_csecs = elapsed_csecs64; 91 elapsed_csecs = elapsed_csecs64;
104 92
105 if (todo) { 93 if (todo) {
106 /* This does not unfreeze processes that are already frozen
107 * (we have slightly ugly calling convention in that respect,
108 * and caller must call thaw_processes() if something fails),
109 * but it cleans up leftover PF_FREEZE requests.
110 */
111 printk("\n"); 94 printk("\n");
112 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " 95 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
113 "(%d tasks refusing to freeze, wq_busy=%d):\n", 96 "(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
115 elapsed_csecs / 100, elapsed_csecs % 100, 98 elapsed_csecs / 100, elapsed_csecs % 100,
116 todo - wq_busy, wq_busy); 99 todo - wq_busy, wq_busy);
117 100
118 thaw_workqueues();
119
120 read_lock(&tasklist_lock); 101 read_lock(&tasklist_lock);
121 do_each_thread(g, p) { 102 do_each_thread(g, p) {
122 task_lock(p); 103 if (!wakeup && !freezer_should_skip(p) &&
123 if (!wakeup && freezing(p) && !freezer_should_skip(p)) 104 p != current && freezing(p) && !frozen(p))
124 sched_show_task(p); 105 sched_show_task(p);
125 cancel_freezing(p);
126 task_unlock(p);
127 } while_each_thread(g, p); 106 } while_each_thread(g, p);
128 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
129 } else { 108 } else {
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
136 115
137/** 116/**
138 * freeze_processes - Signal user space processes to enter the refrigerator. 117 * freeze_processes - Signal user space processes to enter the refrigerator.
118 *
119 * On success, returns 0. On failure, -errno and system is fully thawed.
139 */ 120 */
140int freeze_processes(void) 121int freeze_processes(void)
141{ 122{
142 int error; 123 int error;
143 124
125 if (!pm_freezing)
126 atomic_inc(&system_freezing_cnt);
127
144 printk("Freezing user space processes ... "); 128 printk("Freezing user space processes ... ");
129 pm_freezing = true;
145 error = try_to_freeze_tasks(true); 130 error = try_to_freeze_tasks(true);
146 if (!error) { 131 if (!error) {
147 printk("done."); 132 printk("done.");
@@ -150,17 +135,22 @@ int freeze_processes(void)
150 printk("\n"); 135 printk("\n");
151 BUG_ON(in_atomic()); 136 BUG_ON(in_atomic());
152 137
138 if (error)
139 thaw_processes();
153 return error; 140 return error;
154} 141}
155 142
156/** 143/**
157 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 144 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
145 *
146 * On success, returns 0. On failure, -errno and system is fully thawed.
158 */ 147 */
159int freeze_kernel_threads(void) 148int freeze_kernel_threads(void)
160{ 149{
161 int error; 150 int error;
162 151
163 printk("Freezing remaining freezable tasks ... "); 152 printk("Freezing remaining freezable tasks ... ");
153 pm_nosig_freezing = true;
164 error = try_to_freeze_tasks(false); 154 error = try_to_freeze_tasks(false);
165 if (!error) 155 if (!error)
166 printk("done."); 156 printk("done.");
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
168 printk("\n"); 158 printk("\n");
169 BUG_ON(in_atomic()); 159 BUG_ON(in_atomic());
170 160
161 if (error)
162 thaw_processes();
171 return error; 163 return error;
172} 164}
173 165
174static void thaw_tasks(bool nosig_only) 166void thaw_processes(void)
175{ 167{
176 struct task_struct *g, *p; 168 struct task_struct *g, *p;
177 169
178 read_lock(&tasklist_lock); 170 if (pm_freezing)
179 do_each_thread(g, p) { 171 atomic_dec(&system_freezing_cnt);
180 if (!freezable(p)) 172 pm_freezing = false;
181 continue; 173 pm_nosig_freezing = false;
182 174
183 if (nosig_only && should_send_signal(p)) 175 oom_killer_enable();
184 continue; 176
177 printk("Restarting tasks ... ");
185 178
186 if (cgroup_freezing_or_frozen(p)) 179 thaw_workqueues();
187 continue;
188 180
189 thaw_process(p); 181 read_lock(&tasklist_lock);
182 do_each_thread(g, p) {
183 __thaw_task(p);
190 } while_each_thread(g, p); 184 } while_each_thread(g, p);
191 read_unlock(&tasklist_lock); 185 read_unlock(&tasklist_lock);
192}
193 186
194void thaw_processes(void)
195{
196 oom_killer_enable();
197
198 printk("Restarting tasks ... ");
199 thaw_workqueues();
200 thaw_tasks(true);
201 thaw_tasks(false);
202 schedule(); 187 schedule();
203 printk("done.\n"); 188 printk("done.\n");
204} 189}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4953dc054c53..4fd51beed879 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
42 */ 42 */
43void suspend_set_ops(const struct platform_suspend_ops *ops) 43void suspend_set_ops(const struct platform_suspend_ops *ops)
44{ 44{
45 mutex_lock(&pm_mutex); 45 lock_system_sleep();
46 suspend_ops = ops; 46 suspend_ops = ops;
47 mutex_unlock(&pm_mutex); 47 unlock_system_sleep();
48} 48}
49EXPORT_SYMBOL_GPL(suspend_set_ops); 49EXPORT_SYMBOL_GPL(suspend_set_ops);
50 50
@@ -106,13 +106,11 @@ static int suspend_prepare(void)
106 goto Finish; 106 goto Finish;
107 107
108 error = suspend_freeze_processes(); 108 error = suspend_freeze_processes();
109 if (error) { 109 if (!error)
110 suspend_stats.failed_freeze++;
111 dpm_save_failed_step(SUSPEND_FREEZE);
112 } else
113 return 0; 110 return 0;
114 111
115 suspend_thaw_processes(); 112 suspend_stats.failed_freeze++;
113 dpm_save_failed_step(SUSPEND_FREEZE);
116 usermodehelper_enable(); 114 usermodehelper_enable();
117 Finish: 115 Finish:
118 pm_notifier_call_chain(PM_POST_SUSPEND); 116 pm_notifier_call_chain(PM_POST_SUSPEND);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6d8f535c2b88..6b1ab7a88522 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -21,6 +21,7 @@
21#include <linux/swapops.h> 21#include <linux/swapops.h>
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/compat.h>
24#include <linux/console.h> 25#include <linux/console.h>
25#include <linux/cpu.h> 26#include <linux/cpu.h>
26#include <linux/freezer.h> 27#include <linux/freezer.h>
@@ -30,28 +31,6 @@
30 31
31#include "power.h" 32#include "power.h"
32 33
33/*
34 * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
35 * will be removed in the future. They are only preserved here for
36 * compatibility with existing userland utilities.
37 */
38#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
39#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
40
41#define PMOPS_PREPARE 1
42#define PMOPS_ENTER 2
43#define PMOPS_FINISH 3
44
45/*
46 * NOTE: The following ioctl definitions are wrong and have been replaced with
47 * correct ones. They are only preserved here for compatibility with existing
48 * userland utilities and will be removed in the future.
49 */
50#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
51#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
52#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
53#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
54
55 34
56#define SNAPSHOT_MINOR 231 35#define SNAPSHOT_MINOR 231
57 36
@@ -71,7 +50,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
71 struct snapshot_data *data; 50 struct snapshot_data *data;
72 int error; 51 int error;
73 52
74 mutex_lock(&pm_mutex); 53 lock_system_sleep();
75 54
76 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 55 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
77 error = -EBUSY; 56 error = -EBUSY;
@@ -123,7 +102,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
123 data->platform_support = 0; 102 data->platform_support = 0;
124 103
125 Unlock: 104 Unlock:
126 mutex_unlock(&pm_mutex); 105 unlock_system_sleep();
127 106
128 return error; 107 return error;
129} 108}
@@ -132,7 +111,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
132{ 111{
133 struct snapshot_data *data; 112 struct snapshot_data *data;
134 113
135 mutex_lock(&pm_mutex); 114 lock_system_sleep();
136 115
137 swsusp_free(); 116 swsusp_free();
138 free_basic_memory_bitmaps(); 117 free_basic_memory_bitmaps();
@@ -146,7 +125,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
146 PM_POST_HIBERNATION : PM_POST_RESTORE); 125 PM_POST_HIBERNATION : PM_POST_RESTORE);
147 atomic_inc(&snapshot_device_available); 126 atomic_inc(&snapshot_device_available);
148 127
149 mutex_unlock(&pm_mutex); 128 unlock_system_sleep();
150 129
151 return 0; 130 return 0;
152} 131}
@@ -158,7 +137,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
158 ssize_t res; 137 ssize_t res;
159 loff_t pg_offp = *offp & ~PAGE_MASK; 138 loff_t pg_offp = *offp & ~PAGE_MASK;
160 139
161 mutex_lock(&pm_mutex); 140 lock_system_sleep();
162 141
163 data = filp->private_data; 142 data = filp->private_data;
164 if (!data->ready) { 143 if (!data->ready) {
@@ -179,7 +158,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
179 *offp += res; 158 *offp += res;
180 159
181 Unlock: 160 Unlock:
182 mutex_unlock(&pm_mutex); 161 unlock_system_sleep();
183 162
184 return res; 163 return res;
185} 164}
@@ -191,7 +170,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
191 ssize_t res; 170 ssize_t res;
192 loff_t pg_offp = *offp & ~PAGE_MASK; 171 loff_t pg_offp = *offp & ~PAGE_MASK;
193 172
194 mutex_lock(&pm_mutex); 173 lock_system_sleep();
195 174
196 data = filp->private_data; 175 data = filp->private_data;
197 176
@@ -208,20 +187,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
208 if (res > 0) 187 if (res > 0)
209 *offp += res; 188 *offp += res;
210unlock: 189unlock:
211 mutex_unlock(&pm_mutex); 190 unlock_system_sleep();
212 191
213 return res; 192 return res;
214} 193}
215 194
216static void snapshot_deprecated_ioctl(unsigned int cmd)
217{
218 if (printk_ratelimit())
219 printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
220 "be removed soon, update your suspend-to-disk "
221 "utilities\n",
222 __builtin_return_address(0), cmd);
223}
224
225static long snapshot_ioctl(struct file *filp, unsigned int cmd, 195static long snapshot_ioctl(struct file *filp, unsigned int cmd,
226 unsigned long arg) 196 unsigned long arg)
227{ 197{
@@ -257,11 +227,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
257 break; 227 break;
258 228
259 error = freeze_processes(); 229 error = freeze_processes();
260 if (error) { 230 if (error)
261 thaw_processes();
262 usermodehelper_enable(); 231 usermodehelper_enable();
263 } 232 else
264 if (!error)
265 data->frozen = 1; 233 data->frozen = 1;
266 break; 234 break;
267 235
@@ -274,8 +242,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
274 data->frozen = 0; 242 data->frozen = 0;
275 break; 243 break;
276 244
277 case SNAPSHOT_ATOMIC_SNAPSHOT:
278 snapshot_deprecated_ioctl(cmd);
279 case SNAPSHOT_CREATE_IMAGE: 245 case SNAPSHOT_CREATE_IMAGE:
280 if (data->mode != O_RDONLY || !data->frozen || data->ready) { 246 if (data->mode != O_RDONLY || !data->frozen || data->ready) {
281 error = -EPERM; 247 error = -EPERM;
@@ -283,10 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
283 } 249 }
284 pm_restore_gfp_mask(); 250 pm_restore_gfp_mask();
285 error = hibernation_snapshot(data->platform_support); 251 error = hibernation_snapshot(data->platform_support);
286 if (!error) 252 if (!error) {
287 error = put_user(in_suspend, (int __user *)arg); 253 error = put_user(in_suspend, (int __user *)arg);
288 if (!error) 254 if (!error && !freezer_test_done)
289 data->ready = 1; 255 data->ready = 1;
256 if (freezer_test_done) {
257 freezer_test_done = false;
258 thaw_processes();
259 }
260 }
290 break; 261 break;
291 262
292 case SNAPSHOT_ATOMIC_RESTORE: 263 case SNAPSHOT_ATOMIC_RESTORE:
@@ -305,8 +276,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
305 data->ready = 0; 276 data->ready = 0;
306 break; 277 break;
307 278
308 case SNAPSHOT_SET_IMAGE_SIZE:
309 snapshot_deprecated_ioctl(cmd);
310 case SNAPSHOT_PREF_IMAGE_SIZE: 279 case SNAPSHOT_PREF_IMAGE_SIZE:
311 image_size = arg; 280 image_size = arg;
312 break; 281 break;
@@ -321,16 +290,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
321 error = put_user(size, (loff_t __user *)arg); 290 error = put_user(size, (loff_t __user *)arg);
322 break; 291 break;
323 292
324 case SNAPSHOT_AVAIL_SWAP:
325 snapshot_deprecated_ioctl(cmd);
326 case SNAPSHOT_AVAIL_SWAP_SIZE: 293 case SNAPSHOT_AVAIL_SWAP_SIZE:
327 size = count_swap_pages(data->swap, 1); 294 size = count_swap_pages(data->swap, 1);
328 size <<= PAGE_SHIFT; 295 size <<= PAGE_SHIFT;
329 error = put_user(size, (loff_t __user *)arg); 296 error = put_user(size, (loff_t __user *)arg);
330 break; 297 break;
331 298
332 case SNAPSHOT_GET_SWAP_PAGE:
333 snapshot_deprecated_ioctl(cmd);
334 case SNAPSHOT_ALLOC_SWAP_PAGE: 299 case SNAPSHOT_ALLOC_SWAP_PAGE:
335 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { 300 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
336 error = -ENODEV; 301 error = -ENODEV;
@@ -353,27 +318,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
353 free_all_swap_pages(data->swap); 318 free_all_swap_pages(data->swap);
354 break; 319 break;
355 320
356 case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
357 snapshot_deprecated_ioctl(cmd);
358 if (!swsusp_swap_in_use()) {
359 /*
360 * User space encodes device types as two-byte values,
361 * so we need to recode them
362 */
363 if (old_decode_dev(arg)) {
364 data->swap = swap_type_of(old_decode_dev(arg),
365 0, NULL);
366 if (data->swap < 0)
367 error = -ENODEV;
368 } else {
369 data->swap = -1;
370 error = -EINVAL;
371 }
372 } else {
373 error = -EPERM;
374 }
375 break;
376
377 case SNAPSHOT_S2RAM: 321 case SNAPSHOT_S2RAM:
378 if (!data->frozen) { 322 if (!data->frozen) {
379 error = -EPERM; 323 error = -EPERM;
@@ -396,33 +340,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
396 error = hibernation_platform_enter(); 340 error = hibernation_platform_enter();
397 break; 341 break;
398 342
399 case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
400 snapshot_deprecated_ioctl(cmd);
401 error = -EINVAL;
402
403 switch (arg) {
404
405 case PMOPS_PREPARE:
406 data->platform_support = 1;
407 error = 0;
408 break;
409
410 case PMOPS_ENTER:
411 if (data->platform_support)
412 error = hibernation_platform_enter();
413 break;
414
415 case PMOPS_FINISH:
416 if (data->platform_support)
417 error = 0;
418 break;
419
420 default:
421 printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
422
423 }
424 break;
425
426 case SNAPSHOT_SET_SWAP_AREA: 343 case SNAPSHOT_SET_SWAP_AREA:
427 if (swsusp_swap_in_use()) { 344 if (swsusp_swap_in_use()) {
428 error = -EPERM; 345 error = -EPERM;
@@ -464,6 +381,66 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
464 return error; 381 return error;
465} 382}
466 383
384#ifdef CONFIG_COMPAT
385
386struct compat_resume_swap_area {
387 compat_loff_t offset;
388 u32 dev;
389} __packed;
390
391static long
392snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
393{
394 BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
395
396 switch (cmd) {
397 case SNAPSHOT_GET_IMAGE_SIZE:
398 case SNAPSHOT_AVAIL_SWAP_SIZE:
399 case SNAPSHOT_ALLOC_SWAP_PAGE: {
400 compat_loff_t __user *uoffset = compat_ptr(arg);
401 loff_t offset;
402 mm_segment_t old_fs;
403 int err;
404
405 old_fs = get_fs();
406 set_fs(KERNEL_DS);
407 err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
408 set_fs(old_fs);
409 if (!err && put_user(offset, uoffset))
410 err = -EFAULT;
411 return err;
412 }
413
414 case SNAPSHOT_CREATE_IMAGE:
415 return snapshot_ioctl(file, cmd,
416 (unsigned long) compat_ptr(arg));
417
418 case SNAPSHOT_SET_SWAP_AREA: {
419 struct compat_resume_swap_area __user *u_swap_area =
420 compat_ptr(arg);
421 struct resume_swap_area swap_area;
422 mm_segment_t old_fs;
423 int err;
424
425 err = get_user(swap_area.offset, &u_swap_area->offset);
426 err |= get_user(swap_area.dev, &u_swap_area->dev);
427 if (err)
428 return -EFAULT;
429 old_fs = get_fs();
430 set_fs(KERNEL_DS);
431 err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
432 (unsigned long) &swap_area);
433 set_fs(old_fs);
434 return err;
435 }
436
437 default:
438 return snapshot_ioctl(file, cmd, arg);
439 }
440}
441
442#endif /* CONFIG_COMPAT */
443
467static const struct file_operations snapshot_fops = { 444static const struct file_operations snapshot_fops = {
468 .open = snapshot_open, 445 .open = snapshot_open,
469 .release = snapshot_release, 446 .release = snapshot_release,
@@ -471,6 +448,9 @@ static const struct file_operations snapshot_fops = {
471 .write = snapshot_write, 448 .write = snapshot_write,
472 .llseek = no_llseek, 449 .llseek = no_llseek,
473 .unlocked_ioctl = snapshot_ioctl, 450 .unlocked_ioctl = snapshot_ioctl,
451#ifdef CONFIG_COMPAT
452 .compat_ioctl = snapshot_compat_ioctl,
453#endif
474}; 454};
475 455
476static struct miscdevice snapshot_device = { 456static struct miscdevice snapshot_device = {
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 71034f41a2ba..7ba8feae11b8 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
600 600
601 /* 601 /*
602 * Finally, kill the kernel thread. We don't need to be RCU 602 * Finally, kill the kernel thread. We don't need to be RCU
603 * safe anymore, since the bdi is gone from visibility. Force 603 * safe anymore, since the bdi is gone from visibility.
604 * unfreeze of the thread before calling kthread_stop(), otherwise
605 * it would never exet if it is currently stuck in the refrigerator.
606 */ 604 */
607 if (bdi->wb.task) { 605 if (bdi->wb.task)
608 thaw_process(bdi->wb.task);
609 kthread_stop(bdi->wb.task); 606 kthread_stop(bdi->wb.task);
610 }
611} 607}
612 608
613/* 609/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 069b64e521fc..eeb27e27dce3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
328 */ 328 */
329 if (test_tsk_thread_flag(p, TIF_MEMDIE)) { 329 if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
330 if (unlikely(frozen(p))) 330 if (unlikely(frozen(p)))
331 thaw_process(p); 331 __thaw_task(p);
332 return ERR_PTR(-1UL); 332 return ERR_PTR(-1UL);
333 } 333 }
334 if (!p->mm) 334 if (!p->mm)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 00a1a2acd587..3341d8962786 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -18,6 +18,7 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/freezer.h>
21 22
22#include <linux/sunrpc/clnt.h> 23#include <linux/sunrpc/clnt.h>
23 24
@@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)
231{ 232{
232 if (fatal_signal_pending(current)) 233 if (fatal_signal_pending(current))
233 return -ERESTARTSYS; 234 return -ERESTARTSYS;
234 schedule(); 235 freezable_schedule();
235 return 0; 236 return 0;
236} 237}
237 238