diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 09:18:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 09:18:39 -0400 |
commit | 7e0bb71e75020348bee523720a0c2f04cc72f540 (patch) | |
tree | 1a22d65bbce34e8cc0f82c543c9486ffb58332f7 | |
parent | b9e2780d576a010d4aba1e69f247170bf3718d6b (diff) | |
parent | 0ab1e79b825a5cd8aeb3b34d89c9a89dea900056 (diff) |
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (63 commits)
PM / Clocks: Remove redundant NULL checks before kfree()
PM / Documentation: Update docs about suspend and CPU hotplug
ACPI / PM: Add Sony VGN-FW21E to nonvs blacklist.
ARM: mach-shmobile: sh7372 A4R support (v4)
ARM: mach-shmobile: sh7372 A3SP support (v4)
PM / Sleep: Mark devices involved in wakeup signaling during suspend
PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image
PM / Hibernate: Do not initialize static and extern variables to 0
PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too
PM / Hibernate: Add resumedelay kernel param in addition to resumewait
MAINTAINERS: Update linux-pm list address
PM / ACPI: Blacklist Vaio VGN-FW520F machine known to require acpi_sleep=nonvs
PM / ACPI: Blacklist Sony Vaio known to require acpi_sleep=nonvs
PM / Hibernate: Add resumewait param to support MMC-like devices as resume file
PM / Hibernate: Fix typo in a kerneldoc comment
PM / Hibernate: Freeze kernel threads after preallocating memory
PM: Update the policy on default wakeup settings
PM / VT: Cleanup #if defined uglyness and fix compile error
PM / Suspend: Off by one in pm_suspend()
PM / Hibernate: Include storage keys in hibernation image on s390
...
99 files changed, 4720 insertions, 1044 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq new file mode 100644 index 00000000000..23d78b5aab1 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-devfreq | |||
@@ -0,0 +1,52 @@ | |||
1 | What: /sys/class/devfreq/.../ | ||
2 | Date: September 2011 | ||
3 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
4 | Description: | ||
5 | Provide a place in sysfs for the devfreq objects. | ||
6 | This allows accessing various devfreq specific variables. | ||
7 | The name of devfreq object denoted as ... is same as the | ||
8 | name of device using devfreq. | ||
9 | |||
10 | What: /sys/class/devfreq/.../governor | ||
11 | Date: September 2011 | ||
12 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
13 | Description: | ||
14 | The /sys/class/devfreq/.../governor shows the name of the | ||
15 | governor used by the corresponding devfreq object. | ||
16 | |||
17 | What: /sys/class/devfreq/.../cur_freq | ||
18 | Date: September 2011 | ||
19 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
20 | Description: | ||
21 | The /sys/class/devfreq/.../cur_freq shows the current | ||
22 | frequency of the corresponding devfreq object. | ||
23 | |||
24 | What: /sys/class/devfreq/.../central_polling | ||
25 | Date: September 2011 | ||
26 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
27 | Description: | ||
28 | The /sys/class/devfreq/.../central_polling shows whether | ||
29 | the devfreq ojbect is using devfreq-provided central | ||
30 | polling mechanism or not. | ||
31 | |||
32 | What: /sys/class/devfreq/.../polling_interval | ||
33 | Date: September 2011 | ||
34 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
35 | Description: | ||
36 | The /sys/class/devfreq/.../polling_interval shows and sets | ||
37 | the requested polling interval of the corresponding devfreq | ||
38 | object. The values are represented in ms. If the value is | ||
39 | less than 1 jiffy, it is considered to be 0, which means | ||
40 | no polling. This value is meaningless if the governor is | ||
41 | not polling; thus. If the governor is not using | ||
42 | devfreq-provided central polling | ||
43 | (/sys/class/devfreq/.../central_polling is 0), this value | ||
44 | may be useless. | ||
45 | |||
46 | What: /sys/class/devfreq/.../userspace/set_freq | ||
47 | Date: September 2011 | ||
48 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
49 | Description: | ||
50 | The /sys/class/devfreq/.../userspace/set_freq shows and | ||
51 | sets the requested frequency for the devfreq object if | ||
52 | userspace governor is in effect. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 0e6a1290f04..2af94a23a6a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2246,6 +2246,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2246 | in <PAGE_SIZE> units (needed only for swap files). | 2246 | in <PAGE_SIZE> units (needed only for swap files). |
2247 | See Documentation/power/swsusp-and-swap-files.txt | 2247 | See Documentation/power/swsusp-and-swap-files.txt |
2248 | 2248 | ||
2249 | resumedelay= [HIBERNATION] Delay (in seconds) to pause before attempting to | ||
2250 | read the resume files | ||
2251 | |||
2252 | resumewait [HIBERNATION] Wait (indefinitely) for resume device to show up. | ||
2253 | Useful for devices that are detected asynchronously | ||
2254 | (e.g. USB and MMC devices). | ||
2255 | |||
2249 | hibernate= [HIBERNATION] | 2256 | hibernate= [HIBERNATION] |
2250 | noresume Don't check if there's a hibernation image | 2257 | noresume Don't check if there's a hibernation image |
2251 | present during boot. | 2258 | present during boot. |
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX index 45e9d4a9128..a4d682f5423 100644 --- a/Documentation/power/00-INDEX +++ b/Documentation/power/00-INDEX | |||
@@ -26,6 +26,8 @@ s2ram.txt | |||
26 | - How to get suspend to ram working (and debug it when it isn't) | 26 | - How to get suspend to ram working (and debug it when it isn't) |
27 | states.txt | 27 | states.txt |
28 | - System power management states | 28 | - System power management states |
29 | suspend-and-cpuhotplug.txt | ||
30 | - Explains the interaction between Suspend-to-RAM (S3) and CPU hotplug | ||
29 | swsusp-and-swap-files.txt | 31 | swsusp-and-swap-files.txt |
30 | - Using swap files with software suspend (to disk) | 32 | - Using swap files with software suspend (to disk) |
31 | swsusp-dmcrypt.txt | 33 | swsusp-dmcrypt.txt |
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt index 05a7fe76232..40a4c65f380 100644 --- a/Documentation/power/basic-pm-debugging.txt +++ b/Documentation/power/basic-pm-debugging.txt | |||
@@ -201,3 +201,27 @@ case, you may be able to search for failing drivers by following the procedure | |||
201 | analogous to the one described in section 1. If you find some failing drivers, | 201 | analogous to the one described in section 1. If you find some failing drivers, |
202 | you will have to unload them every time before an STR transition (ie. before | 202 | you will have to unload them every time before an STR transition (ie. before |
203 | you run s2ram), and please report the problems with them. | 203 | you run s2ram), and please report the problems with them. |
204 | |||
205 | There is a debugfs entry which shows the suspend to RAM statistics. Here is an | ||
206 | example of its output. | ||
207 | # mount -t debugfs none /sys/kernel/debug | ||
208 | # cat /sys/kernel/debug/suspend_stats | ||
209 | success: 20 | ||
210 | fail: 5 | ||
211 | failed_freeze: 0 | ||
212 | failed_prepare: 0 | ||
213 | failed_suspend: 5 | ||
214 | failed_suspend_noirq: 0 | ||
215 | failed_resume: 0 | ||
216 | failed_resume_noirq: 0 | ||
217 | failures: | ||
218 | last_failed_dev: alarm | ||
219 | adc | ||
220 | last_failed_errno: -16 | ||
221 | -16 | ||
222 | last_failed_step: suspend | ||
223 | suspend | ||
224 | Field success means the success number of suspend to RAM, and field fail means | ||
225 | the failure number. Others are the failure number of different steps of suspend | ||
226 | to RAM. suspend_stats just lists the last 2 failed devices, error number and | ||
227 | failed step of suspend. | ||
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 3384d5996be..646a89e0c07 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -152,7 +152,9 @@ try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag; | |||
152 | for the most part drivers should not change its value. The initial value of | 152 | for the most part drivers should not change its value. The initial value of |
153 | should_wakeup is supposed to be false for the majority of devices; the major | 153 | should_wakeup is supposed to be false for the majority of devices; the major |
154 | exceptions are power buttons, keyboards, and Ethernet adapters whose WoL | 154 | exceptions are power buttons, keyboards, and Ethernet adapters whose WoL |
155 | (wake-on-LAN) feature has been set up with ethtool. | 155 | (wake-on-LAN) feature has been set up with ethtool. It should also default |
156 | to true for devices that don't generate wakeup requests on their own but merely | ||
157 | forward wakeup requests from one bus to another (like PCI bridges). | ||
156 | 158 | ||
157 | Whether or not a device is capable of issuing wakeup events is a hardware | 159 | Whether or not a device is capable of issuing wakeup events is a hardware |
158 | matter, and the kernel is responsible for keeping track of it. By contrast, | 160 | matter, and the kernel is responsible for keeping track of it. By contrast, |
@@ -279,10 +281,6 @@ When the system goes into the standby or memory sleep state, the phases are: | |||
279 | time.) Unlike the other suspend-related phases, during the prepare | 281 | time.) Unlike the other suspend-related phases, during the prepare |
280 | phase the device tree is traversed top-down. | 282 | phase the device tree is traversed top-down. |
281 | 283 | ||
282 | In addition to that, if device drivers need to allocate additional | ||
283 | memory to be able to hadle device suspend correctly, that should be | ||
284 | done in the prepare phase. | ||
285 | |||
286 | After the prepare callback method returns, no new children may be | 284 | After the prepare callback method returns, no new children may be |
287 | registered below the device. The method may also prepare the device or | 285 | registered below the device. The method may also prepare the device or |
288 | driver in some way for the upcoming system power transition (for | 286 | driver in some way for the upcoming system power transition (for |
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index bfed898a03f..17e130a8034 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt | |||
@@ -4,14 +4,19 @@ This interface provides a kernel and user mode interface for registering | |||
4 | performance expectations by drivers, subsystems and user space applications on | 4 | performance expectations by drivers, subsystems and user space applications on |
5 | one of the parameters. | 5 | one of the parameters. |
6 | 6 | ||
7 | Currently we have {cpu_dma_latency, network_latency, network_throughput} as the | 7 | Two different PM QoS frameworks are available: |
8 | initial set of pm_qos parameters. | 8 | 1. PM QoS classes for cpu_dma_latency, network_latency, network_throughput. |
9 | 2. the per-device PM QoS framework provides the API to manage the per-device latency | ||
10 | constraints. | ||
9 | 11 | ||
10 | Each parameters have defined units: | 12 | Each parameters have defined units: |
11 | * latency: usec | 13 | * latency: usec |
12 | * timeout: usec | 14 | * timeout: usec |
13 | * throughput: kbs (kilo bit / sec) | 15 | * throughput: kbs (kilo bit / sec) |
14 | 16 | ||
17 | |||
18 | 1. PM QoS framework | ||
19 | |||
15 | The infrastructure exposes multiple misc device nodes one per implemented | 20 | The infrastructure exposes multiple misc device nodes one per implemented |
16 | parameter. The set of parameters implement is defined by pm_qos_power_init() | 21 | parameter. The set of parameters implement is defined by pm_qos_power_init() |
17 | and pm_qos_params.h. This is done because having the available parameters | 22 | and pm_qos_params.h. This is done because having the available parameters |
@@ -23,14 +28,18 @@ an aggregated target value. The aggregated target value is updated with | |||
23 | changes to the request list or elements of the list. Typically the | 28 | changes to the request list or elements of the list. Typically the |
24 | aggregated target value is simply the max or min of the request values held | 29 | aggregated target value is simply the max or min of the request values held |
25 | in the parameter list elements. | 30 | in the parameter list elements. |
31 | Note: the aggregated target value is implemented as an atomic variable so that | ||
32 | reading the aggregated value does not require any locking mechanism. | ||
33 | |||
26 | 34 | ||
27 | From kernel mode the use of this interface is simple: | 35 | From kernel mode the use of this interface is simple: |
28 | 36 | ||
29 | handle = pm_qos_add_request(param_class, target_value): | 37 | void pm_qos_add_request(handle, param_class, target_value): |
30 | Will insert an element into the list for that identified PM_QOS class with the | 38 | Will insert an element into the list for that identified PM QoS class with the |
31 | target value. Upon change to this list the new target is recomputed and any | 39 | target value. Upon change to this list the new target is recomputed and any |
32 | registered notifiers are called only if the target value is now different. | 40 | registered notifiers are called only if the target value is now different. |
33 | Clients of pm_qos need to save the returned handle. | 41 | Clients of pm_qos need to save the returned handle for future use in other |
42 | pm_qos API functions. | ||
34 | 43 | ||
35 | void pm_qos_update_request(handle, new_target_value): | 44 | void pm_qos_update_request(handle, new_target_value): |
36 | Will update the list element pointed to by the handle with the new target value | 45 | Will update the list element pointed to by the handle with the new target value |
@@ -42,6 +51,20 @@ Will remove the element. After removal it will update the aggregate target and | |||
42 | call the notification tree if the target was changed as a result of removing | 51 | call the notification tree if the target was changed as a result of removing |
43 | the request. | 52 | the request. |
44 | 53 | ||
54 | int pm_qos_request(param_class): | ||
55 | Returns the aggregated value for a given PM QoS class. | ||
56 | |||
57 | int pm_qos_request_active(handle): | ||
58 | Returns if the request is still active, i.e. it has not been removed from a | ||
59 | PM QoS class constraints list. | ||
60 | |||
61 | int pm_qos_add_notifier(param_class, notifier): | ||
62 | Adds a notification callback function to the PM QoS class. The callback is | ||
63 | called when the aggregated value for the PM QoS class is changed. | ||
64 | |||
65 | int pm_qos_remove_notifier(int param_class, notifier): | ||
66 | Removes the notification callback function for the PM QoS class. | ||
67 | |||
45 | 68 | ||
46 | From user mode: | 69 | From user mode: |
47 | Only processes can register a pm_qos request. To provide for automatic | 70 | Only processes can register a pm_qos request. To provide for automatic |
@@ -63,4 +86,63 @@ To remove the user mode request for a target value simply close the device | |||
63 | node. | 86 | node. |
64 | 87 | ||
65 | 88 | ||
89 | 2. PM QoS per-device latency framework | ||
90 | |||
91 | For each device a list of performance requests is maintained along with | ||
92 | an aggregated target value. The aggregated target value is updated with | ||
93 | changes to the request list or elements of the list. Typically the | ||
94 | aggregated target value is simply the max or min of the request values held | ||
95 | in the parameter list elements. | ||
96 | Note: the aggregated target value is implemented as an atomic variable so that | ||
97 | reading the aggregated value does not require any locking mechanism. | ||
98 | |||
99 | |||
100 | From kernel mode the use of this interface is the following: | ||
101 | |||
102 | int dev_pm_qos_add_request(device, handle, value): | ||
103 | Will insert an element into the list for that identified device with the | ||
104 | target value. Upon change to this list the new target is recomputed and any | ||
105 | registered notifiers are called only if the target value is now different. | ||
106 | Clients of dev_pm_qos need to save the handle for future use in other | ||
107 | dev_pm_qos API functions. | ||
108 | |||
109 | int dev_pm_qos_update_request(handle, new_value): | ||
110 | Will update the list element pointed to by the handle with the new target value | ||
111 | and recompute the new aggregated target, calling the notification trees if the | ||
112 | target is changed. | ||
113 | |||
114 | int dev_pm_qos_remove_request(handle): | ||
115 | Will remove the element. After removal it will update the aggregate target and | ||
116 | call the notification trees if the target was changed as a result of removing | ||
117 | the request. | ||
118 | |||
119 | s32 dev_pm_qos_read_value(device): | ||
120 | Returns the aggregated value for a given device's constraints list. | ||
121 | |||
122 | |||
123 | Notification mechanisms: | ||
124 | The per-device PM QoS framework has 2 different and distinct notification trees: | ||
125 | a per-device notification tree and a global notification tree. | ||
126 | |||
127 | int dev_pm_qos_add_notifier(device, notifier): | ||
128 | Adds a notification callback function for the device. | ||
129 | The callback is called when the aggregated value of the device constraints list | ||
130 | is changed. | ||
131 | |||
132 | int dev_pm_qos_remove_notifier(device, notifier): | ||
133 | Removes the notification callback function for the device. | ||
134 | |||
135 | int dev_pm_qos_add_global_notifier(notifier): | ||
136 | Adds a notification callback function in the global notification tree of the | ||
137 | framework. | ||
138 | The callback is called when the aggregated value for any device is changed. | ||
139 | |||
140 | int dev_pm_qos_remove_global_notifier(notifier): | ||
141 | Removes the notification callback function from the global notification tree | ||
142 | of the framework. | ||
143 | |||
144 | |||
145 | From user mode: | ||
146 | No API for user space access to the per-device latency constraints is provided | ||
147 | yet - still under discussion. | ||
66 | 148 | ||
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 6066e3a6b9a..0e856088db7 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
@@ -43,13 +43,18 @@ struct dev_pm_ops { | |||
43 | ... | 43 | ... |
44 | }; | 44 | }; |
45 | 45 | ||
46 | The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are | 46 | The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks |
47 | executed by the PM core for either the device type, or the class (if the device | 47 | are executed by the PM core for either the power domain, or the device type |
48 | type's struct dev_pm_ops object does not exist), or the bus type (if the | 48 | (if the device power domain's struct dev_pm_ops does not exist), or the class |
49 | device type's and class' struct dev_pm_ops objects do not exist) of the given | 49 | (if the device power domain's and type's struct dev_pm_ops object does not |
50 | device (this allows device types to override callbacks provided by bus types or | 50 | exist), or the bus type (if the device power domain's, type's and class' |
51 | classes if necessary). The bus type, device type and class callbacks are | 51 | struct dev_pm_ops objects do not exist) of the given device, so the priority |
52 | referred to as subsystem-level callbacks in what follows. | 52 | order of callbacks from high to low is that power domain callbacks, device |
53 | type callbacks, class callbacks and bus type callbacks, and the high priority | ||
54 | one will take precedence over low priority one. The bus type, device type and | ||
55 | class callbacks are referred to as subsystem-level callbacks in what follows, | ||
56 | and generally speaking, the power domain callbacks are used for representing | ||
57 | power domains within a SoC. | ||
53 | 58 | ||
54 | By default, the callbacks are always invoked in process context with interrupts | 59 | By default, the callbacks are always invoked in process context with interrupts |
55 | enabled. However, subsystems can use the pm_runtime_irq_safe() helper function | 60 | enabled. However, subsystems can use the pm_runtime_irq_safe() helper function |
@@ -477,12 +482,14 @@ pm_runtime_autosuspend_expiration() | |||
477 | If pm_runtime_irq_safe() has been called for a device then the following helper | 482 | If pm_runtime_irq_safe() has been called for a device then the following helper |
478 | functions may also be used in interrupt context: | 483 | functions may also be used in interrupt context: |
479 | 484 | ||
485 | pm_runtime_idle() | ||
480 | pm_runtime_suspend() | 486 | pm_runtime_suspend() |
481 | pm_runtime_autosuspend() | 487 | pm_runtime_autosuspend() |
482 | pm_runtime_resume() | 488 | pm_runtime_resume() |
483 | pm_runtime_get_sync() | 489 | pm_runtime_get_sync() |
484 | pm_runtime_put_sync() | 490 | pm_runtime_put_sync() |
485 | pm_runtime_put_sync_suspend() | 491 | pm_runtime_put_sync_suspend() |
492 | pm_runtime_put_sync_autosuspend() | ||
486 | 493 | ||
487 | 5. Runtime PM Initialization, Device Probing and Removal | 494 | 5. Runtime PM Initialization, Device Probing and Removal |
488 | 495 | ||
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt new file mode 100644 index 00000000000..f28f9a6f034 --- /dev/null +++ b/Documentation/power/suspend-and-cpuhotplug.txt | |||
@@ -0,0 +1,275 @@ | |||
1 | Interaction of Suspend code (S3) with the CPU hotplug infrastructure | ||
2 | |||
3 | (C) 2011 Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> | ||
4 | |||
5 | |||
6 | I. How does the regular CPU hotplug code differ from how the Suspend-to-RAM | ||
7 | infrastructure uses it internally? And where do they share common code? | ||
8 | |||
9 | Well, a picture is worth a thousand words... So ASCII art follows :-) | ||
10 | |||
11 | [This depicts the current design in the kernel, and focusses only on the | ||
12 | interactions involving the freezer and CPU hotplug and also tries to explain | ||
13 | the locking involved. It outlines the notifications involved as well. | ||
14 | But please note that here, only the call paths are illustrated, with the aim | ||
15 | of describing where they take different paths and where they share code. | ||
16 | What happens when regular CPU hotplug and Suspend-to-RAM race with each other | ||
17 | is not depicted here.] | ||
18 | |||
19 | On a high level, the suspend-resume cycle goes like this: | ||
20 | |||
21 | |Freeze| -> |Disable nonboot| -> |Do suspend| -> |Enable nonboot| -> |Thaw | | ||
22 | |tasks | | cpus | | | | cpus | |tasks| | ||
23 | |||
24 | |||
25 | More details follow: | ||
26 | |||
27 | Suspend call path | ||
28 | ----------------- | ||
29 | |||
30 | Write 'mem' to | ||
31 | /sys/power/state | ||
32 | syfs file | ||
33 | | | ||
34 | v | ||
35 | Acquire pm_mutex lock | ||
36 | | | ||
37 | v | ||
38 | Send PM_SUSPEND_PREPARE | ||
39 | notifications | ||
40 | | | ||
41 | v | ||
42 | Freeze tasks | ||
43 | | | ||
44 | | | ||
45 | v | ||
46 | disable_nonboot_cpus() | ||
47 | /* start */ | ||
48 | | | ||
49 | v | ||
50 | Acquire cpu_add_remove_lock | ||
51 | | | ||
52 | v | ||
53 | Iterate over CURRENTLY | ||
54 | online CPUs | ||
55 | | | ||
56 | | | ||
57 | | ---------- | ||
58 | v | L | ||
59 | ======> _cpu_down() | | ||
60 | | [This takes cpuhotplug.lock | | ||
61 | Common | before taking down the CPU | | ||
62 | code | and releases it when done] | O | ||
63 | | While it is at it, notifications | | ||
64 | | are sent when notable events occur, | | ||
65 | ======> by running all registered callbacks. | | ||
66 | | | O | ||
67 | | | | ||
68 | | | | ||
69 | v | | ||
70 | Note down these cpus in | P | ||
71 | frozen_cpus mask ---------- | ||
72 | | | ||
73 | v | ||
74 | Disable regular cpu hotplug | ||
75 | by setting cpu_hotplug_disabled=1 | ||
76 | | | ||
77 | v | ||
78 | Release cpu_add_remove_lock | ||
79 | | | ||
80 | v | ||
81 | /* disable_nonboot_cpus() complete */ | ||
82 | | | ||
83 | v | ||
84 | Do suspend | ||
85 | |||
86 | |||
87 | |||
88 | Resuming back is likewise, with the counterparts being (in the order of | ||
89 | execution during resume): | ||
90 | * enable_nonboot_cpus() which involves: | ||
91 | | Acquire cpu_add_remove_lock | ||
92 | | Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug | ||
93 | | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop] | ||
94 | | Release cpu_add_remove_lock | ||
95 | v | ||
96 | |||
97 | * thaw tasks | ||
98 | * send PM_POST_SUSPEND notifications | ||
99 | * Release pm_mutex lock. | ||
100 | |||
101 | |||
102 | It is to be noted here that the pm_mutex lock is acquired at the very | ||
103 | beginning, when we are just starting out to suspend, and then released only | ||
104 | after the entire cycle is complete (i.e., suspend + resume). | ||
105 | |||
106 | |||
107 | |||
108 | Regular CPU hotplug call path | ||
109 | ----------------------------- | ||
110 | |||
111 | Write 0 (or 1) to | ||
112 | /sys/devices/system/cpu/cpu*/online | ||
113 | sysfs file | ||
114 | | | ||
115 | | | ||
116 | v | ||
117 | cpu_down() | ||
118 | | | ||
119 | v | ||
120 | Acquire cpu_add_remove_lock | ||
121 | | | ||
122 | v | ||
123 | If cpu_hotplug_disabled is 1 | ||
124 | return gracefully | ||
125 | | | ||
126 | | | ||
127 | v | ||
128 | ======> _cpu_down() | ||
129 | | [This takes cpuhotplug.lock | ||
130 | Common | before taking down the CPU | ||
131 | code | and releases it when done] | ||
132 | | While it is at it, notifications | ||
133 | | are sent when notable events occur, | ||
134 | ======> by running all registered callbacks. | ||
135 | | | ||
136 | | | ||
137 | v | ||
138 | Release cpu_add_remove_lock | ||
139 | [That's it!, for | ||
140 | regular CPU hotplug] | ||
141 | |||
142 | |||
143 | |||
144 | So, as can be seen from the two diagrams (the parts marked as "Common code"), | ||
145 | regular CPU hotplug and the suspend code path converge at the _cpu_down() and | ||
146 | _cpu_up() functions. They differ in the arguments passed to these functions, | ||
147 | in that during regular CPU hotplug, 0 is passed for the 'tasks_frozen' | ||
148 | argument. But during suspend, since the tasks are already frozen by the time | ||
149 | the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called | ||
150 | with the 'tasks_frozen' argument set to 1. | ||
151 | [See below for some known issues regarding this.] | ||
152 | |||
153 | |||
154 | Important files and functions/entry points: | ||
155 | ------------------------------------------ | ||
156 | |||
157 | kernel/power/process.c : freeze_processes(), thaw_processes() | ||
158 | kernel/power/suspend.c : suspend_prepare(), suspend_enter(), suspend_finish() | ||
159 | kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](), [disable|enable]_nonboot_cpus() | ||
160 | |||
161 | |||
162 | |||
163 | II. What are the issues involved in CPU hotplug? | ||
164 | ------------------------------------------- | ||
165 | |||
166 | There are some interesting situations involving CPU hotplug and microcode | ||
167 | update on the CPUs, as discussed below: | ||
168 | |||
169 | [Please bear in mind that the kernel requests the microcode images from | ||
170 | userspace, using the request_firmware() function defined in | ||
171 | drivers/base/firmware_class.c] | ||
172 | |||
173 | |||
174 | a. When all the CPUs are identical: | ||
175 | |||
176 | This is the most common situation and it is quite straightforward: we want | ||
177 | to apply the same microcode revision to each of the CPUs. | ||
178 | To give an example of x86, the collect_cpu_info() function defined in | ||
179 | arch/x86/kernel/microcode_core.c helps in discovering the type of the CPU | ||
180 | and thereby in applying the correct microcode revision to it. | ||
181 | But note that the kernel does not maintain a common microcode image for the | ||
182 | all CPUs, in order to handle case 'b' described below. | ||
183 | |||
184 | |||
185 | b. When some of the CPUs are different than the rest: | ||
186 | |||
187 | In this case since we probably need to apply different microcode revisions | ||
188 | to different CPUs, the kernel maintains a copy of the correct microcode | ||
189 | image for each CPU (after appropriate CPU type/model discovery using | ||
190 | functions such as collect_cpu_info()). | ||
191 | |||
192 | |||
193 | c. When a CPU is physically hot-unplugged and a new (and possibly different | ||
194 | type of) CPU is hot-plugged into the system: | ||
195 | |||
196 | In the current design of the kernel, whenever a CPU is taken offline during | ||
197 | a regular CPU hotplug operation, upon receiving the CPU_DEAD notification | ||
198 | (which is sent by the CPU hotplug code), the microcode update driver's | ||
199 | callback for that event reacts by freeing the kernel's copy of the | ||
200 | microcode image for that CPU. | ||
201 | |||
202 | Hence, when a new CPU is brought online, since the kernel finds that it | ||
203 | doesn't have the microcode image, it does the CPU type/model discovery | ||
204 | afresh and then requests the userspace for the appropriate microcode image | ||
205 | for that CPU, which is subsequently applied. | ||
206 | |||
207 | For example, in x86, the mc_cpu_callback() function (which is the microcode | ||
208 | update driver's callback registered for CPU hotplug events) calls | ||
209 | microcode_update_cpu() which would call microcode_init_cpu() in this case, | ||
210 | instead of microcode_resume_cpu() when it finds that the kernel doesn't | ||
211 | have a valid microcode image. This ensures that the CPU type/model | ||
212 | discovery is performed and the right microcode is applied to the CPU after | ||
213 | getting it from userspace. | ||
214 | |||
215 | |||
216 | d. Handling microcode update during suspend/hibernate: | ||
217 | |||
218 | Strictly speaking, during a CPU hotplug operation which does not involve | ||
219 | physically removing or inserting CPUs, the CPUs are not actually powered | ||
220 | off during a CPU offline. They are just put to the lowest C-states possible. | ||
221 | Hence, in such a case, it is not really necessary to re-apply microcode | ||
222 | when the CPUs are brought back online, since they wouldn't have lost the | ||
223 | image during the CPU offline operation. | ||
224 | |||
225 | This is the usual scenario encountered during a resume after a suspend. | ||
226 | However, in the case of hibernation, since all the CPUs are completely | ||
227 | powered off, during restore it becomes necessary to apply the microcode | ||
228 | images to all the CPUs. | ||
229 | |||
230 | [Note that we don't expect someone to physically pull out nodes and insert | ||
231 | nodes with a different type of CPUs in-between a suspend-resume or a | ||
232 | hibernate/restore cycle.] | ||
233 | |||
234 | In the current design of the kernel however, during a CPU offline operation | ||
235 | as part of the suspend/hibernate cycle (the CPU_DEAD_FROZEN notification), | ||
236 | the existing copy of microcode image in the kernel is not freed up. | ||
237 | And during the CPU online operations (during resume/restore), since the | ||
238 | kernel finds that it already has copies of the microcode images for all the | ||
239 | CPUs, it just applies them to the CPUs, avoiding any re-discovery of CPU | ||
240 | type/model and the need for validating whether the microcode revisions are | ||
241 | right for the CPUs or not (due to the above assumption that physical CPU | ||
242 | hotplug will not be done in-between suspend/resume or hibernate/restore | ||
243 | cycles). | ||
244 | |||
245 | |||
246 | III. Are there any known problems when regular CPU hotplug and suspend race | ||
247 | with each other? | ||
248 | |||
249 | Yes, they are listed below: | ||
250 | |||
251 | 1. When invoking regular CPU hotplug, the 'tasks_frozen' argument passed to | ||
252 | the _cpu_down() and _cpu_up() functions is *always* 0. | ||
253 | This might not reflect the true current state of the system, since the | ||
254 | tasks could have been frozen by an out-of-band event such as a suspend | ||
255 | operation in progress. Hence, it will lead to wrong notifications being | ||
256 | sent during the cpu online/offline events (eg, CPU_ONLINE notification | ||
257 | instead of CPU_ONLINE_FROZEN) which in turn will lead to execution of | ||
258 | inappropriate code by the callbacks registered for such CPU hotplug events. | ||
259 | |||
260 | 2. If a regular CPU hotplug stress test happens to race with the freezer due | ||
261 | to a suspend operation in progress at the same time, then we could hit the | ||
262 | situation described below: | ||
263 | |||
264 | * A regular cpu online operation continues its journey from userspace | ||
265 | into the kernel, since the freezing has not yet begun. | ||
266 | * Then freezer gets to work and freezes userspace. | ||
267 | * If cpu online has not yet completed the microcode update stuff by now, | ||
268 | it will now start waiting on the frozen userspace in the | ||
269 | TASK_UNINTERRUPTIBLE state, in order to get the microcode image. | ||
270 | * Now the freezer continues and tries to freeze the remaining tasks. But | ||
271 | due to this wait mentioned above, the freezer won't be able to freeze | ||
272 | the cpu online hotplug task and hence freezing of tasks fails. | ||
273 | |||
274 | As a result of this task freezing failure, the suspend operation gets | ||
275 | aborted. | ||
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt index 9d85d96ec6e..12511c98cc4 100644 --- a/Documentation/usb/power-management.txt +++ b/Documentation/usb/power-management.txt | |||
@@ -439,10 +439,10 @@ cause autosuspends to fail with -EBUSY if the driver needs to use the | |||
439 | device. | 439 | device. |
440 | 440 | ||
441 | External suspend calls should never be allowed to fail in this way, | 441 | External suspend calls should never be allowed to fail in this way, |
442 | only autosuspend calls. The driver can tell them apart by checking | 442 | only autosuspend calls. The driver can tell them apart by applying |
443 | the PM_EVENT_AUTO bit in the message.event argument to the suspend | 443 | the PMSG_IS_AUTO() macro to the message argument to the suspend |
444 | method; this bit will be set for internal PM events (autosuspend) and | 444 | method; it will return True for internal PM events (autosuspend) and |
445 | clear for external PM events. | 445 | False for external PM events. |
446 | 446 | ||
447 | 447 | ||
448 | Mutual exclusion | 448 | Mutual exclusion |
diff --git a/MAINTAINERS b/MAINTAINERS index 04c0dd8d992..566ab1c23a3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2760,7 +2760,7 @@ F: fs/freevxfs/ | |||
2760 | FREEZER | 2760 | FREEZER |
2761 | M: Pavel Machek <pavel@ucw.cz> | 2761 | M: Pavel Machek <pavel@ucw.cz> |
2762 | M: "Rafael J. Wysocki" <rjw@sisk.pl> | 2762 | M: "Rafael J. Wysocki" <rjw@sisk.pl> |
2763 | L: linux-pm@lists.linux-foundation.org | 2763 | L: linux-pm@vger.kernel.org |
2764 | S: Supported | 2764 | S: Supported |
2765 | F: Documentation/power/freezing-of-tasks.txt | 2765 | F: Documentation/power/freezing-of-tasks.txt |
2766 | F: include/linux/freezer.h | 2766 | F: include/linux/freezer.h |
@@ -3022,7 +3022,7 @@ F: drivers/video/hgafb.c | |||
3022 | HIBERNATION (aka Software Suspend, aka swsusp) | 3022 | HIBERNATION (aka Software Suspend, aka swsusp) |
3023 | M: Pavel Machek <pavel@ucw.cz> | 3023 | M: Pavel Machek <pavel@ucw.cz> |
3024 | M: "Rafael J. Wysocki" <rjw@sisk.pl> | 3024 | M: "Rafael J. Wysocki" <rjw@sisk.pl> |
3025 | L: linux-pm@lists.linux-foundation.org | 3025 | L: linux-pm@vger.kernel.org |
3026 | S: Supported | 3026 | S: Supported |
3027 | F: arch/x86/power/ | 3027 | F: arch/x86/power/ |
3028 | F: drivers/base/power/ | 3028 | F: drivers/base/power/ |
@@ -3217,7 +3217,7 @@ F: drivers/ide/ide-cd* | |||
3217 | 3217 | ||
3218 | IDLE-I7300 | 3218 | IDLE-I7300 |
3219 | M: Andy Henroid <andrew.d.henroid@intel.com> | 3219 | M: Andy Henroid <andrew.d.henroid@intel.com> |
3220 | L: linux-pm@lists.linux-foundation.org | 3220 | L: linux-pm@vger.kernel.org |
3221 | S: Supported | 3221 | S: Supported |
3222 | F: drivers/idle/i7300_idle.c | 3222 | F: drivers/idle/i7300_idle.c |
3223 | 3223 | ||
@@ -3300,7 +3300,7 @@ F: firmware/isci/ | |||
3300 | 3300 | ||
3301 | INTEL IDLE DRIVER | 3301 | INTEL IDLE DRIVER |
3302 | M: Len Brown <lenb@kernel.org> | 3302 | M: Len Brown <lenb@kernel.org> |
3303 | L: linux-pm@lists.linux-foundation.org | 3303 | L: linux-pm@vger.kernel.org |
3304 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git | 3304 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git |
3305 | S: Supported | 3305 | S: Supported |
3306 | F: drivers/idle/intel_idle.c | 3306 | F: drivers/idle/intel_idle.c |
@@ -3397,7 +3397,7 @@ F: drivers/net/ethernet/intel/ | |||
3397 | 3397 | ||
3398 | INTEL MRST PMU DRIVER | 3398 | INTEL MRST PMU DRIVER |
3399 | M: Len Brown <len.brown@intel.com> | 3399 | M: Len Brown <len.brown@intel.com> |
3400 | L: linux-pm@lists.linux-foundation.org | 3400 | L: linux-pm@vger.kernel.org |
3401 | S: Supported | 3401 | S: Supported |
3402 | F: arch/x86/platform/mrst/pmu.* | 3402 | F: arch/x86/platform/mrst/pmu.* |
3403 | 3403 | ||
@@ -6338,7 +6338,7 @@ SUSPEND TO RAM | |||
6338 | M: Len Brown <len.brown@intel.com> | 6338 | M: Len Brown <len.brown@intel.com> |
6339 | M: Pavel Machek <pavel@ucw.cz> | 6339 | M: Pavel Machek <pavel@ucw.cz> |
6340 | M: "Rafael J. Wysocki" <rjw@sisk.pl> | 6340 | M: "Rafael J. Wysocki" <rjw@sisk.pl> |
6341 | L: linux-pm@lists.linux-foundation.org | 6341 | L: linux-pm@vger.kernel.org |
6342 | S: Supported | 6342 | S: Supported |
6343 | F: Documentation/power/ | 6343 | F: Documentation/power/ |
6344 | F: arch/x86/kernel/acpi/ | 6344 | F: arch/x86/kernel/acpi/ |
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c index 22a53766962..d9145dfc2a3 100644 --- a/arch/arm/mach-msm/clock.c +++ b/arch/arm/mach-msm/clock.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/pm_qos_params.h> | 21 | #include <linux/pm_qos.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 943072d5a1d..7868e75ad07 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/pm_runtime.h> | 15 | #include <linux/pm_runtime.h> |
16 | #include <linux/pm_clock.h> | ||
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
18 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 523f608eb8c..7e90d064ebc 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/leds.h> | 42 | #include <linux/leds.h> |
43 | #include <linux/input/sh_keysc.h> | 43 | #include <linux/input/sh_keysc.h> |
44 | #include <linux/usb/r8a66597.h> | 44 | #include <linux/usb/r8a66597.h> |
45 | #include <linux/pm_clock.h> | ||
45 | 46 | ||
46 | #include <media/sh_mobile_ceu.h> | 47 | #include <media/sh_mobile_ceu.h> |
47 | #include <media/sh_mobile_csi2.h> | 48 | #include <media/sh_mobile_csi2.h> |
@@ -1408,6 +1409,11 @@ static void __init ap4evb_init(void) | |||
1408 | sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); | 1409 | sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); |
1409 | sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); | 1410 | sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); |
1410 | 1411 | ||
1412 | sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); | ||
1413 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); | ||
1414 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device); | ||
1415 | sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device); | ||
1416 | |||
1411 | hdmi_init_pm_clock(); | 1417 | hdmi_init_pm_clock(); |
1412 | fsi_init_pm_clock(); | 1418 | fsi_init_pm_clock(); |
1413 | sh7372_pm_init(); | 1419 | sh7372_pm_init(); |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 802aa58ef9d..00273dad5bf 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <linux/mtd/mtd.h> | 39 | #include <linux/mtd/mtd.h> |
40 | #include <linux/mtd/partitions.h> | 40 | #include <linux/mtd/partitions.h> |
41 | #include <linux/mtd/physmap.h> | 41 | #include <linux/mtd/physmap.h> |
42 | #include <linux/pm_runtime.h> | 42 | #include <linux/pm_clock.h> |
43 | #include <linux/smsc911x.h> | 43 | #include <linux/smsc911x.h> |
44 | #include <linux/sh_intc.h> | 44 | #include <linux/sh_intc.h> |
45 | #include <linux/tca6416_keypad.h> | 45 | #include <linux/tca6416_keypad.h> |
@@ -1589,6 +1589,15 @@ static void __init mackerel_init(void) | |||
1589 | sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); | 1589 | sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); |
1590 | sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); | 1590 | sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); |
1591 | sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); | 1591 | sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); |
1592 | sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device); | ||
1593 | sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device); | ||
1594 | sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); | ||
1595 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); | ||
1596 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) | ||
1597 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device); | ||
1598 | #endif | ||
1599 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi2_device); | ||
1600 | sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device); | ||
1592 | 1601 | ||
1593 | hdmi_init_pm_clock(); | 1602 | hdmi_init_pm_clock(); |
1594 | sh7372_pm_init(); | 1603 | sh7372_pm_init(); |
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 06aecb31d9c..c0cdbf997c9 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h | |||
@@ -35,8 +35,8 @@ extern void sh7372_add_standard_devices(void); | |||
35 | extern void sh7372_clock_init(void); | 35 | extern void sh7372_clock_init(void); |
36 | extern void sh7372_pinmux_init(void); | 36 | extern void sh7372_pinmux_init(void); |
37 | extern void sh7372_pm_init(void); | 37 | extern void sh7372_pm_init(void); |
38 | extern void sh7372_cpu_suspend(void); | 38 | extern void sh7372_resume_core_standby_a3sm(void); |
39 | extern void sh7372_cpu_resume(void); | 39 | extern int sh7372_do_idle_a3sm(unsigned long unused); |
40 | extern struct clk sh7372_extal1_clk; | 40 | extern struct clk sh7372_extal1_clk; |
41 | extern struct clk sh7372_extal2_clk; | 41 | extern struct clk sh7372_extal2_clk; |
42 | 42 | ||
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index 24e63a85e66..84532f9629b 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h | |||
@@ -479,7 +479,12 @@ struct platform_device; | |||
479 | 479 | ||
480 | struct sh7372_pm_domain { | 480 | struct sh7372_pm_domain { |
481 | struct generic_pm_domain genpd; | 481 | struct generic_pm_domain genpd; |
482 | struct dev_power_governor *gov; | ||
483 | void (*suspend)(void); | ||
484 | void (*resume)(void); | ||
482 | unsigned int bit_shift; | 485 | unsigned int bit_shift; |
486 | bool no_debug; | ||
487 | bool stay_on; | ||
483 | }; | 488 | }; |
484 | 489 | ||
485 | static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) | 490 | static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) |
@@ -491,16 +496,24 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) | |||
491 | extern struct sh7372_pm_domain sh7372_a4lc; | 496 | extern struct sh7372_pm_domain sh7372_a4lc; |
492 | extern struct sh7372_pm_domain sh7372_a4mp; | 497 | extern struct sh7372_pm_domain sh7372_a4mp; |
493 | extern struct sh7372_pm_domain sh7372_d4; | 498 | extern struct sh7372_pm_domain sh7372_d4; |
499 | extern struct sh7372_pm_domain sh7372_a4r; | ||
494 | extern struct sh7372_pm_domain sh7372_a3rv; | 500 | extern struct sh7372_pm_domain sh7372_a3rv; |
495 | extern struct sh7372_pm_domain sh7372_a3ri; | 501 | extern struct sh7372_pm_domain sh7372_a3ri; |
502 | extern struct sh7372_pm_domain sh7372_a3sp; | ||
496 | extern struct sh7372_pm_domain sh7372_a3sg; | 503 | extern struct sh7372_pm_domain sh7372_a3sg; |
497 | 504 | ||
498 | extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); | 505 | extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); |
499 | extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, | 506 | extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, |
500 | struct platform_device *pdev); | 507 | struct platform_device *pdev); |
508 | extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, | ||
509 | struct sh7372_pm_domain *sh7372_sd); | ||
501 | #else | 510 | #else |
502 | #define sh7372_init_pm_domain(pd) do { } while(0) | 511 | #define sh7372_init_pm_domain(pd) do { } while(0) |
503 | #define sh7372_add_device_to_domain(pd, pdev) do { } while(0) | 512 | #define sh7372_add_device_to_domain(pd, pdev) do { } while(0) |
513 | #define sh7372_pm_add_subdomain(pd, sd) do { } while(0) | ||
504 | #endif /* CONFIG_PM */ | 514 | #endif /* CONFIG_PM */ |
505 | 515 | ||
516 | extern void sh7372_intcs_suspend(void); | ||
517 | extern void sh7372_intcs_resume(void); | ||
518 | |||
506 | #endif /* __ASM_SH7372_H__ */ | 519 | #endif /* __ASM_SH7372_H__ */ |
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index 739315e30eb..29cdc0522d9 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c | |||
@@ -606,9 +606,16 @@ static void intcs_demux(unsigned int irq, struct irq_desc *desc) | |||
606 | generic_handle_irq(intcs_evt2irq(evtcodeas)); | 606 | generic_handle_irq(intcs_evt2irq(evtcodeas)); |
607 | } | 607 | } |
608 | 608 | ||
609 | static void __iomem *intcs_ffd2; | ||
610 | static void __iomem *intcs_ffd5; | ||
611 | |||
609 | void __init sh7372_init_irq(void) | 612 | void __init sh7372_init_irq(void) |
610 | { | 613 | { |
611 | void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); | 614 | void __iomem *intevtsa; |
615 | |||
616 | intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE); | ||
617 | intevtsa = intcs_ffd2 + 0x100; | ||
618 | intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE); | ||
612 | 619 | ||
613 | register_intc_controller(&intca_desc); | 620 | register_intc_controller(&intca_desc); |
614 | register_intc_controller(&intcs_desc); | 621 | register_intc_controller(&intcs_desc); |
@@ -617,3 +624,46 @@ void __init sh7372_init_irq(void) | |||
617 | irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); | 624 | irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); |
618 | irq_set_chained_handler(evt2irq(0xf80), intcs_demux); | 625 | irq_set_chained_handler(evt2irq(0xf80), intcs_demux); |
619 | } | 626 | } |
627 | |||
628 | static unsigned short ffd2[0x200]; | ||
629 | static unsigned short ffd5[0x100]; | ||
630 | |||
631 | void sh7372_intcs_suspend(void) | ||
632 | { | ||
633 | int k; | ||
634 | |||
635 | for (k = 0x00; k <= 0x30; k += 4) | ||
636 | ffd2[k] = __raw_readw(intcs_ffd2 + k); | ||
637 | |||
638 | for (k = 0x80; k <= 0xb0; k += 4) | ||
639 | ffd2[k] = __raw_readb(intcs_ffd2 + k); | ||
640 | |||
641 | for (k = 0x180; k <= 0x188; k += 4) | ||
642 | ffd2[k] = __raw_readb(intcs_ffd2 + k); | ||
643 | |||
644 | for (k = 0x00; k <= 0x3c; k += 4) | ||
645 | ffd5[k] = __raw_readw(intcs_ffd5 + k); | ||
646 | |||
647 | for (k = 0x80; k <= 0x9c; k += 4) | ||
648 | ffd5[k] = __raw_readb(intcs_ffd5 + k); | ||
649 | } | ||
650 | |||
651 | void sh7372_intcs_resume(void) | ||
652 | { | ||
653 | int k; | ||
654 | |||
655 | for (k = 0x00; k <= 0x30; k += 4) | ||
656 | __raw_writew(ffd2[k], intcs_ffd2 + k); | ||
657 | |||
658 | for (k = 0x80; k <= 0xb0; k += 4) | ||
659 | __raw_writeb(ffd2[k], intcs_ffd2 + k); | ||
660 | |||
661 | for (k = 0x180; k <= 0x188; k += 4) | ||
662 | __raw_writeb(ffd2[k], intcs_ffd2 + k); | ||
663 | |||
664 | for (k = 0x00; k <= 0x3c; k += 4) | ||
665 | __raw_writew(ffd5[k], intcs_ffd5 + k); | ||
666 | |||
667 | for (k = 0x80; k <= 0x9c; k += 4) | ||
668 | __raw_writeb(ffd5[k], intcs_ffd5 + k); | ||
669 | } | ||
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 933fb411be0..79612737c5b 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c | |||
@@ -15,23 +15,61 @@ | |||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_clock.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/irq.h> | ||
22 | #include <linux/bitrev.h> | ||
21 | #include <asm/system.h> | 23 | #include <asm/system.h> |
22 | #include <asm/io.h> | 24 | #include <asm/io.h> |
23 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/suspend.h> | ||
24 | #include <mach/common.h> | 27 | #include <mach/common.h> |
25 | #include <mach/sh7372.h> | 28 | #include <mach/sh7372.h> |
26 | 29 | ||
27 | #define SMFRAM 0xe6a70000 | 30 | /* DBG */ |
28 | #define SYSTBCR 0xe6150024 | 31 | #define DBGREG1 0xe6100020 |
29 | #define SBAR 0xe6180020 | 32 | #define DBGREG9 0xe6100040 |
30 | #define APARMBAREA 0xe6f10020 | ||
31 | 33 | ||
34 | /* CPGA */ | ||
35 | #define SYSTBCR 0xe6150024 | ||
36 | #define MSTPSR0 0xe6150030 | ||
37 | #define MSTPSR1 0xe6150038 | ||
38 | #define MSTPSR2 0xe6150040 | ||
39 | #define MSTPSR3 0xe6150048 | ||
40 | #define MSTPSR4 0xe615004c | ||
41 | #define PLLC01STPCR 0xe61500c8 | ||
42 | |||
43 | /* SYSC */ | ||
32 | #define SPDCR 0xe6180008 | 44 | #define SPDCR 0xe6180008 |
33 | #define SWUCR 0xe6180014 | 45 | #define SWUCR 0xe6180014 |
46 | #define SBAR 0xe6180020 | ||
47 | #define WUPRMSK 0xe6180028 | ||
48 | #define WUPSMSK 0xe618002c | ||
49 | #define WUPSMSK2 0xe6180048 | ||
34 | #define PSTR 0xe6180080 | 50 | #define PSTR 0xe6180080 |
51 | #define WUPSFAC 0xe6180098 | ||
52 | #define IRQCR 0xe618022c | ||
53 | #define IRQCR2 0xe6180238 | ||
54 | #define IRQCR3 0xe6180244 | ||
55 | #define IRQCR4 0xe6180248 | ||
56 | #define PDNSEL 0xe6180254 | ||
57 | |||
58 | /* INTC */ | ||
59 | #define ICR1A 0xe6900000 | ||
60 | #define ICR2A 0xe6900004 | ||
61 | #define ICR3A 0xe6900008 | ||
62 | #define ICR4A 0xe690000c | ||
63 | #define INTMSK00A 0xe6900040 | ||
64 | #define INTMSK10A 0xe6900044 | ||
65 | #define INTMSK20A 0xe6900048 | ||
66 | #define INTMSK30A 0xe690004c | ||
67 | |||
68 | /* MFIS */ | ||
69 | #define SMFRAM 0xe6a70000 | ||
70 | |||
71 | /* AP-System Core */ | ||
72 | #define APARMBAREA 0xe6f10020 | ||
35 | 73 | ||
36 | #define PSTR_RETRIES 100 | 74 | #define PSTR_RETRIES 100 |
37 | #define PSTR_DELAY_US 10 | 75 | #define PSTR_DELAY_US 10 |
@@ -43,6 +81,12 @@ static int pd_power_down(struct generic_pm_domain *genpd) | |||
43 | struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); | 81 | struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); |
44 | unsigned int mask = 1 << sh7372_pd->bit_shift; | 82 | unsigned int mask = 1 << sh7372_pd->bit_shift; |
45 | 83 | ||
84 | if (sh7372_pd->suspend) | ||
85 | sh7372_pd->suspend(); | ||
86 | |||
87 | if (sh7372_pd->stay_on) | ||
88 | return 0; | ||
89 | |||
46 | if (__raw_readl(PSTR) & mask) { | 90 | if (__raw_readl(PSTR) & mask) { |
47 | unsigned int retry_count; | 91 | unsigned int retry_count; |
48 | 92 | ||
@@ -55,8 +99,9 @@ static int pd_power_down(struct generic_pm_domain *genpd) | |||
55 | } | 99 | } |
56 | } | 100 | } |
57 | 101 | ||
58 | pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", | 102 | if (!sh7372_pd->no_debug) |
59 | mask, __raw_readl(PSTR)); | 103 | pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", |
104 | mask, __raw_readl(PSTR)); | ||
60 | 105 | ||
61 | return 0; | 106 | return 0; |
62 | } | 107 | } |
@@ -68,6 +113,9 @@ static int pd_power_up(struct generic_pm_domain *genpd) | |||
68 | unsigned int retry_count; | 113 | unsigned int retry_count; |
69 | int ret = 0; | 114 | int ret = 0; |
70 | 115 | ||
116 | if (sh7372_pd->stay_on) | ||
117 | goto out; | ||
118 | |||
71 | if (__raw_readl(PSTR) & mask) | 119 | if (__raw_readl(PSTR) & mask) |
72 | goto out; | 120 | goto out; |
73 | 121 | ||
@@ -84,66 +132,48 @@ static int pd_power_up(struct generic_pm_domain *genpd) | |||
84 | if (__raw_readl(SWUCR) & mask) | 132 | if (__raw_readl(SWUCR) & mask) |
85 | ret = -EIO; | 133 | ret = -EIO; |
86 | 134 | ||
135 | if (!sh7372_pd->no_debug) | ||
136 | pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", | ||
137 | mask, __raw_readl(PSTR)); | ||
138 | |||
87 | out: | 139 | out: |
88 | pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", | 140 | if (ret == 0 && sh7372_pd->resume) |
89 | mask, __raw_readl(PSTR)); | 141 | sh7372_pd->resume(); |
90 | 142 | ||
91 | return ret; | 143 | return ret; |
92 | } | 144 | } |
93 | 145 | ||
94 | static int pd_power_up_a3rv(struct generic_pm_domain *genpd) | 146 | static void sh7372_a4r_suspend(void) |
95 | { | 147 | { |
96 | int ret = pd_power_up(genpd); | 148 | sh7372_intcs_suspend(); |
97 | 149 | __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ | |
98 | /* force A4LC on after A3RV has been requested on */ | ||
99 | pm_genpd_poweron(&sh7372_a4lc.genpd); | ||
100 | |||
101 | return ret; | ||
102 | } | 150 | } |
103 | 151 | ||
104 | static int pd_power_down_a3rv(struct generic_pm_domain *genpd) | 152 | static bool pd_active_wakeup(struct device *dev) |
105 | { | 153 | { |
106 | int ret = pd_power_down(genpd); | 154 | return true; |
107 | |||
108 | /* try to power down A4LC after A3RV is requested off */ | ||
109 | genpd_queue_power_off_work(&sh7372_a4lc.genpd); | ||
110 | |||
111 | return ret; | ||
112 | } | 155 | } |
113 | 156 | ||
114 | static int pd_power_down_a4lc(struct generic_pm_domain *genpd) | 157 | static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain) |
115 | { | 158 | { |
116 | /* only power down A4LC if A3RV is off */ | 159 | return false; |
117 | if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) | ||
118 | return pd_power_down(genpd); | ||
119 | |||
120 | return -EBUSY; | ||
121 | } | 160 | } |
122 | 161 | ||
123 | static bool pd_active_wakeup(struct device *dev) | 162 | struct dev_power_governor sh7372_always_on_gov = { |
124 | { | 163 | .power_down_ok = sh7372_power_down_forbidden, |
125 | return true; | 164 | }; |
126 | } | ||
127 | 165 | ||
128 | void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) | 166 | void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) |
129 | { | 167 | { |
130 | struct generic_pm_domain *genpd = &sh7372_pd->genpd; | 168 | struct generic_pm_domain *genpd = &sh7372_pd->genpd; |
131 | 169 | ||
132 | pm_genpd_init(genpd, NULL, false); | 170 | pm_genpd_init(genpd, sh7372_pd->gov, false); |
133 | genpd->stop_device = pm_clk_suspend; | 171 | genpd->stop_device = pm_clk_suspend; |
134 | genpd->start_device = pm_clk_resume; | 172 | genpd->start_device = pm_clk_resume; |
173 | genpd->dev_irq_safe = true; | ||
135 | genpd->active_wakeup = pd_active_wakeup; | 174 | genpd->active_wakeup = pd_active_wakeup; |
136 | 175 | genpd->power_off = pd_power_down; | |
137 | if (sh7372_pd == &sh7372_a4lc) { | 176 | genpd->power_on = pd_power_up; |
138 | genpd->power_off = pd_power_down_a4lc; | ||
139 | genpd->power_on = pd_power_up; | ||
140 | } else if (sh7372_pd == &sh7372_a3rv) { | ||
141 | genpd->power_off = pd_power_down_a3rv; | ||
142 | genpd->power_on = pd_power_up_a3rv; | ||
143 | } else { | ||
144 | genpd->power_off = pd_power_down; | ||
145 | genpd->power_on = pd_power_up; | ||
146 | } | ||
147 | genpd->power_on(&sh7372_pd->genpd); | 177 | genpd->power_on(&sh7372_pd->genpd); |
148 | } | 178 | } |
149 | 179 | ||
@@ -152,11 +182,15 @@ void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, | |||
152 | { | 182 | { |
153 | struct device *dev = &pdev->dev; | 183 | struct device *dev = &pdev->dev; |
154 | 184 | ||
155 | if (!dev->power.subsys_data) { | ||
156 | pm_clk_init(dev); | ||
157 | pm_clk_add(dev, NULL); | ||
158 | } | ||
159 | pm_genpd_add_device(&sh7372_pd->genpd, dev); | 185 | pm_genpd_add_device(&sh7372_pd->genpd, dev); |
186 | if (pm_clk_no_clocks(dev)) | ||
187 | pm_clk_add(dev, NULL); | ||
188 | } | ||
189 | |||
190 | void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, | ||
191 | struct sh7372_pm_domain *sh7372_sd) | ||
192 | { | ||
193 | pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd); | ||
160 | } | 194 | } |
161 | 195 | ||
162 | struct sh7372_pm_domain sh7372_a4lc = { | 196 | struct sh7372_pm_domain sh7372_a4lc = { |
@@ -171,6 +205,14 @@ struct sh7372_pm_domain sh7372_d4 = { | |||
171 | .bit_shift = 3, | 205 | .bit_shift = 3, |
172 | }; | 206 | }; |
173 | 207 | ||
208 | struct sh7372_pm_domain sh7372_a4r = { | ||
209 | .bit_shift = 5, | ||
210 | .gov = &sh7372_always_on_gov, | ||
211 | .suspend = sh7372_a4r_suspend, | ||
212 | .resume = sh7372_intcs_resume, | ||
213 | .stay_on = true, | ||
214 | }; | ||
215 | |||
174 | struct sh7372_pm_domain sh7372_a3rv = { | 216 | struct sh7372_pm_domain sh7372_a3rv = { |
175 | .bit_shift = 6, | 217 | .bit_shift = 6, |
176 | }; | 218 | }; |
@@ -179,39 +221,187 @@ struct sh7372_pm_domain sh7372_a3ri = { | |||
179 | .bit_shift = 8, | 221 | .bit_shift = 8, |
180 | }; | 222 | }; |
181 | 223 | ||
224 | struct sh7372_pm_domain sh7372_a3sp = { | ||
225 | .bit_shift = 11, | ||
226 | .gov = &sh7372_always_on_gov, | ||
227 | .no_debug = true, | ||
228 | }; | ||
229 | |||
182 | struct sh7372_pm_domain sh7372_a3sg = { | 230 | struct sh7372_pm_domain sh7372_a3sg = { |
183 | .bit_shift = 13, | 231 | .bit_shift = 13, |
184 | }; | 232 | }; |
185 | 233 | ||
186 | #endif /* CONFIG_PM */ | 234 | #endif /* CONFIG_PM */ |
187 | 235 | ||
236 | #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) | ||
237 | static int sh7372_do_idle_core_standby(unsigned long unused) | ||
238 | { | ||
239 | cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ | ||
240 | return 0; | ||
241 | } | ||
242 | |||
188 | static void sh7372_enter_core_standby(void) | 243 | static void sh7372_enter_core_standby(void) |
189 | { | 244 | { |
190 | void __iomem *smfram = (void __iomem *)SMFRAM; | 245 | /* set reset vector, translate 4k */ |
246 | __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); | ||
247 | __raw_writel(0, APARMBAREA); | ||
191 | 248 | ||
192 | __raw_writel(0, APARMBAREA); /* translate 4k */ | 249 | /* enter sleep mode with SYSTBCR to 0x10 */ |
193 | __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */ | 250 | __raw_writel(0x10, SYSTBCR); |
194 | __raw_writel(0x10, SYSTBCR); /* enable core standby */ | 251 | cpu_suspend(0, sh7372_do_idle_core_standby); |
252 | __raw_writel(0, SYSTBCR); | ||
195 | 253 | ||
196 | __raw_writel(0, smfram + 0x3c); /* clear page table address */ | 254 | /* disable reset vector translation */ |
255 | __raw_writel(0, SBAR); | ||
256 | } | ||
257 | #endif | ||
258 | |||
259 | #ifdef CONFIG_SUSPEND | ||
260 | static void sh7372_enter_a3sm_common(int pllc0_on) | ||
261 | { | ||
262 | /* set reset vector, translate 4k */ | ||
263 | __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); | ||
264 | __raw_writel(0, APARMBAREA); | ||
265 | |||
266 | if (pllc0_on) | ||
267 | __raw_writel(0, PLLC01STPCR); | ||
268 | else | ||
269 | __raw_writel(1 << 28, PLLC01STPCR); | ||
270 | |||
271 | __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */ | ||
272 | __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ | ||
273 | cpu_suspend(0, sh7372_do_idle_a3sm); | ||
274 | __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ | ||
275 | |||
276 | /* disable reset vector translation */ | ||
277 | __raw_writel(0, SBAR); | ||
278 | } | ||
279 | |||
280 | static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p) | ||
281 | { | ||
282 | unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; | ||
283 | unsigned long msk, msk2; | ||
284 | |||
285 | /* check active clocks to determine potential wakeup sources */ | ||
286 | |||
287 | mstpsr0 = __raw_readl(MSTPSR0); | ||
288 | if ((mstpsr0 & 0x00000003) != 0x00000003) { | ||
289 | pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | mstpsr1 = __raw_readl(MSTPSR1); | ||
294 | if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) { | ||
295 | pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1); | ||
296 | return 0; | ||
297 | } | ||
197 | 298 | ||
198 | sh7372_cpu_suspend(); | 299 | mstpsr2 = __raw_readl(MSTPSR2); |
199 | cpu_init(); | 300 | if ((mstpsr2 & 0x000741ff) != 0x000741ff) { |
301 | pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2); | ||
302 | return 0; | ||
303 | } | ||
200 | 304 | ||
201 | /* if page table address is non-NULL then we have been powered down */ | 305 | mstpsr3 = __raw_readl(MSTPSR3); |
202 | if (__raw_readl(smfram + 0x3c)) { | 306 | if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) { |
203 | __raw_writel(__raw_readl(smfram + 0x40), | 307 | pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3); |
204 | __va(__raw_readl(smfram + 0x3c))); | 308 | return 0; |
309 | } | ||
205 | 310 | ||
206 | flush_tlb_all(); | 311 | mstpsr4 = __raw_readl(MSTPSR4); |
207 | set_cr(__raw_readl(smfram + 0x38)); | 312 | if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) { |
313 | pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4); | ||
314 | return 0; | ||
208 | } | 315 | } |
209 | 316 | ||
210 | __raw_writel(0, SYSTBCR); /* disable core standby */ | 317 | msk = 0; |
211 | __raw_writel(0, SBAR); /* disable reset vector translation */ | 318 | msk2 = 0; |
319 | |||
320 | /* make bitmaps of limited number of wakeup sources */ | ||
321 | |||
322 | if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */ | ||
323 | msk |= 1 << 31; | ||
324 | |||
325 | if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */ | ||
326 | msk |= 1 << 21; | ||
327 | |||
328 | if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */ | ||
329 | msk |= 1 << 2; | ||
330 | |||
331 | if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */ | ||
332 | msk |= 1 << 1; | ||
333 | |||
334 | if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */ | ||
335 | msk |= 1 << 1; | ||
336 | |||
337 | if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */ | ||
338 | msk |= 1 << 1; | ||
339 | |||
340 | if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */ | ||
341 | msk2 |= 1 << 17; | ||
342 | |||
343 | *mskp = msk; | ||
344 | *msk2p = msk2; | ||
345 | |||
346 | return 1; | ||
347 | } | ||
348 | |||
349 | static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p) | ||
350 | { | ||
351 | u16 tmp, irqcr1, irqcr2; | ||
352 | int k; | ||
353 | |||
354 | irqcr1 = 0; | ||
355 | irqcr2 = 0; | ||
356 | |||
357 | /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */ | ||
358 | for (k = 0; k <= 7; k++) { | ||
359 | tmp = (icr >> ((7 - k) * 4)) & 0xf; | ||
360 | irqcr1 |= (tmp & 0x03) << (k * 2); | ||
361 | irqcr2 |= (tmp >> 2) << (k * 2); | ||
362 | } | ||
363 | |||
364 | *irqcr1p = irqcr1; | ||
365 | *irqcr2p = irqcr2; | ||
366 | } | ||
367 | |||
368 | static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2) | ||
369 | { | ||
370 | u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; | ||
371 | unsigned long tmp; | ||
372 | |||
373 | /* read IRQ0A -> IRQ15A mask */ | ||
374 | tmp = bitrev8(__raw_readb(INTMSK00A)); | ||
375 | tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8; | ||
376 | |||
377 | /* setup WUPSMSK from clocks and external IRQ mask */ | ||
378 | msk = (~msk & 0xc030000f) | (tmp << 4); | ||
379 | __raw_writel(msk, WUPSMSK); | ||
380 | |||
381 | /* propage level/edge trigger for external IRQ 0->15 */ | ||
382 | sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low); | ||
383 | sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high); | ||
384 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR); | ||
385 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2); | ||
386 | |||
387 | /* read IRQ16A -> IRQ31A mask */ | ||
388 | tmp = bitrev8(__raw_readb(INTMSK20A)); | ||
389 | tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8; | ||
390 | |||
391 | /* setup WUPSMSK2 from clocks and external IRQ mask */ | ||
392 | msk2 = (~msk2 & 0x00030000) | tmp; | ||
393 | __raw_writel(msk2, WUPSMSK2); | ||
394 | |||
395 | /* propage level/edge trigger for external IRQ 16->31 */ | ||
396 | sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low); | ||
397 | sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high); | ||
398 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); | ||
399 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); | ||
212 | } | 400 | } |
401 | #endif | ||
213 | 402 | ||
214 | #ifdef CONFIG_CPU_IDLE | 403 | #ifdef CONFIG_CPU_IDLE |
404 | |||
215 | static void sh7372_cpuidle_setup(struct cpuidle_device *dev) | 405 | static void sh7372_cpuidle_setup(struct cpuidle_device *dev) |
216 | { | 406 | { |
217 | struct cpuidle_state *state; | 407 | struct cpuidle_state *state; |
@@ -239,9 +429,25 @@ static void sh7372_cpuidle_init(void) {} | |||
239 | #endif | 429 | #endif |
240 | 430 | ||
241 | #ifdef CONFIG_SUSPEND | 431 | #ifdef CONFIG_SUSPEND |
432 | |||
242 | static int sh7372_enter_suspend(suspend_state_t suspend_state) | 433 | static int sh7372_enter_suspend(suspend_state_t suspend_state) |
243 | { | 434 | { |
244 | sh7372_enter_core_standby(); | 435 | unsigned long msk, msk2; |
436 | |||
437 | /* check active clocks to determine potential wakeup sources */ | ||
438 | if (sh7372_a3sm_valid(&msk, &msk2)) { | ||
439 | |||
440 | /* convert INTC mask and sense to SYSC mask and sense */ | ||
441 | sh7372_setup_a3sm(msk, msk2); | ||
442 | |||
443 | /* enter A3SM sleep with PLLC0 off */ | ||
444 | pr_debug("entering A3SM\n"); | ||
445 | sh7372_enter_a3sm_common(0); | ||
446 | } else { | ||
447 | /* default to Core Standby that supports all wakeup sources */ | ||
448 | pr_debug("entering Core Standby\n"); | ||
449 | sh7372_enter_core_standby(); | ||
450 | } | ||
245 | return 0; | 451 | return 0; |
246 | } | 452 | } |
247 | 453 | ||
@@ -253,9 +459,6 @@ static void sh7372_suspend_init(void) | |||
253 | static void sh7372_suspend_init(void) {} | 459 | static void sh7372_suspend_init(void) {} |
254 | #endif | 460 | #endif |
255 | 461 | ||
256 | #define DBGREG1 0xe6100020 | ||
257 | #define DBGREG9 0xe6100040 | ||
258 | |||
259 | void __init sh7372_pm_init(void) | 462 | void __init sh7372_pm_init(void) |
260 | { | 463 | { |
261 | /* enable DBG hardware block to kick SYSC */ | 464 | /* enable DBG hardware block to kick SYSC */ |
@@ -263,6 +466,9 @@ void __init sh7372_pm_init(void) | |||
263 | __raw_writel(0x0000a501, DBGREG9); | 466 | __raw_writel(0x0000a501, DBGREG9); |
264 | __raw_writel(0x00000000, DBGREG1); | 467 | __raw_writel(0x00000000, DBGREG1); |
265 | 468 | ||
469 | /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ | ||
470 | __raw_writel(0, PDNSEL); | ||
471 | |||
266 | sh7372_suspend_init(); | 472 | sh7372_suspend_init(); |
267 | sh7372_cpuidle_init(); | 473 | sh7372_cpuidle_init(); |
268 | } | 474 | } |
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 6ec454e1e06..bd5c6a3b8c5 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/pm_runtime.h> | 16 | #include <linux/pm_runtime.h> |
17 | #include <linux/pm_domain.h> | 17 | #include <linux/pm_domain.h> |
18 | #include <linux/pm_clock.h> | ||
18 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
20 | #include <linux/sh_clk.h> | 21 | #include <linux/sh_clk.h> |
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 2d9b1b1a253..2380389e6ac 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/sh_dma.h> | 30 | #include <linux/sh_dma.h> |
31 | #include <linux/sh_intc.h> | 31 | #include <linux/sh_intc.h> |
32 | #include <linux/sh_timer.h> | 32 | #include <linux/sh_timer.h> |
33 | #include <linux/pm_domain.h> | ||
33 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
34 | #include <mach/sh7372.h> | 35 | #include <mach/sh7372.h> |
35 | #include <asm/mach-types.h> | 36 | #include <asm/mach-types.h> |
@@ -990,9 +991,14 @@ void __init sh7372_add_standard_devices(void) | |||
990 | sh7372_init_pm_domain(&sh7372_a4lc); | 991 | sh7372_init_pm_domain(&sh7372_a4lc); |
991 | sh7372_init_pm_domain(&sh7372_a4mp); | 992 | sh7372_init_pm_domain(&sh7372_a4mp); |
992 | sh7372_init_pm_domain(&sh7372_d4); | 993 | sh7372_init_pm_domain(&sh7372_d4); |
994 | sh7372_init_pm_domain(&sh7372_a4r); | ||
993 | sh7372_init_pm_domain(&sh7372_a3rv); | 995 | sh7372_init_pm_domain(&sh7372_a3rv); |
994 | sh7372_init_pm_domain(&sh7372_a3ri); | 996 | sh7372_init_pm_domain(&sh7372_a3ri); |
995 | sh7372_init_pm_domain(&sh7372_a3sg); | 997 | sh7372_init_pm_domain(&sh7372_a3sg); |
998 | sh7372_init_pm_domain(&sh7372_a3sp); | ||
999 | |||
1000 | sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv); | ||
1001 | sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc); | ||
996 | 1002 | ||
997 | platform_add_devices(sh7372_early_devices, | 1003 | platform_add_devices(sh7372_early_devices, |
998 | ARRAY_SIZE(sh7372_early_devices)); | 1004 | ARRAY_SIZE(sh7372_early_devices)); |
@@ -1003,6 +1009,25 @@ void __init sh7372_add_standard_devices(void) | |||
1003 | sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); | 1009 | sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); |
1004 | sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device); | 1010 | sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device); |
1005 | sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device); | 1011 | sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device); |
1012 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif0_device); | ||
1013 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif1_device); | ||
1014 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif2_device); | ||
1015 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif3_device); | ||
1016 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif4_device); | ||
1017 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif5_device); | ||
1018 | sh7372_add_device_to_domain(&sh7372_a3sp, &scif6_device); | ||
1019 | sh7372_add_device_to_domain(&sh7372_a3sp, &iic1_device); | ||
1020 | sh7372_add_device_to_domain(&sh7372_a3sp, &dma0_device); | ||
1021 | sh7372_add_device_to_domain(&sh7372_a3sp, &dma1_device); | ||
1022 | sh7372_add_device_to_domain(&sh7372_a3sp, &dma2_device); | ||
1023 | sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma0_device); | ||
1024 | sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma1_device); | ||
1025 | sh7372_add_device_to_domain(&sh7372_a4r, &iic0_device); | ||
1026 | sh7372_add_device_to_domain(&sh7372_a4r, &veu0_device); | ||
1027 | sh7372_add_device_to_domain(&sh7372_a4r, &veu1_device); | ||
1028 | sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device); | ||
1029 | sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device); | ||
1030 | sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device); | ||
1006 | } | 1031 | } |
1007 | 1032 | ||
1008 | void __init sh7372_add_early_devices(void) | 1033 | void __init sh7372_add_early_devices(void) |
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S index d37d3ca4d18..f3ab3c5810e 100644 --- a/arch/arm/mach-shmobile/sleep-sh7372.S +++ b/arch/arm/mach-shmobile/sleep-sh7372.S | |||
@@ -30,58 +30,20 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
33 | #include <linux/init.h> | ||
34 | #include <asm/memory.h> | ||
33 | #include <asm/assembler.h> | 35 | #include <asm/assembler.h> |
34 | 36 | ||
35 | #define SMFRAM 0xe6a70000 | 37 | #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) |
36 | 38 | .align 12 | |
37 | .align | 39 | .text |
38 | kernel_flush: | 40 | .global sh7372_resume_core_standby_a3sm |
39 | .word v7_flush_dcache_all | 41 | sh7372_resume_core_standby_a3sm: |
40 | 42 | ldr pc, 1f | |
41 | .align 3 | 43 | 1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET |
42 | ENTRY(sh7372_cpu_suspend) | ||
43 | stmfd sp!, {r0-r12, lr} @ save registers on stack | ||
44 | |||
45 | ldr r8, =SMFRAM | ||
46 | |||
47 | mov r4, sp @ Store sp | ||
48 | mrs r5, spsr @ Store spsr | ||
49 | mov r6, lr @ Store lr | ||
50 | stmia r8!, {r4-r6} | ||
51 | |||
52 | mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register | ||
53 | mrc p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
54 | mrc p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
55 | mrc p15, 0, r7, c2, c0, 2 @ TTBCR | ||
56 | stmia r8!, {r4-r7} | ||
57 | |||
58 | mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
59 | mrc p15, 0, r5, c10, c2, 0 @ PRRR | ||
60 | mrc p15, 0, r6, c10, c2, 1 @ NMRR | ||
61 | stmia r8!,{r4-r6} | ||
62 | |||
63 | mrc p15, 0, r4, c13, c0, 1 @ Context ID | ||
64 | mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
65 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
66 | mrs r7, cpsr @ Store current cpsr | ||
67 | stmia r8!, {r4-r7} | ||
68 | |||
69 | mrc p15, 0, r4, c1, c0, 0 @ save control register | ||
70 | stmia r8!, {r4} | ||
71 | |||
72 | /* | ||
73 | * jump out to kernel flush routine | ||
74 | * - reuse that code is better | ||
75 | * - it executes in a cached space so is faster than refetch per-block | ||
76 | * - should be faster and will change with kernel | ||
77 | * - 'might' have to copy address, load and jump to it | ||
78 | * Flush all data from the L1 data cache before disabling | ||
79 | * SCTLR.C bit. | ||
80 | */ | ||
81 | ldr r1, kernel_flush | ||
82 | mov lr, pc | ||
83 | bx r1 | ||
84 | 44 | ||
45 | .global sh7372_do_idle_a3sm | ||
46 | sh7372_do_idle_a3sm: | ||
85 | /* | 47 | /* |
86 | * Clear the SCTLR.C bit to prevent further data cache | 48 | * Clear the SCTLR.C bit to prevent further data cache |
87 | * allocation. Clearing SCTLR.C would make all the data accesses | 49 | * allocation. Clearing SCTLR.C would make all the data accesses |
@@ -92,10 +54,13 @@ ENTRY(sh7372_cpu_suspend) | |||
92 | mcr p15, 0, r0, c1, c0, 0 | 54 | mcr p15, 0, r0, c1, c0, 0 |
93 | isb | 55 | isb |
94 | 56 | ||
57 | /* disable L2 cache in the aux control register */ | ||
58 | mrc p15, 0, r10, c1, c0, 1 | ||
59 | bic r10, r10, #2 | ||
60 | mcr p15, 0, r10, c1, c0, 1 | ||
61 | |||
95 | /* | 62 | /* |
96 | * Invalidate L1 data cache. Even though only invalidate is | 63 | * Invalidate data cache again. |
97 | * necessary exported flush API is used here. Doing clean | ||
98 | * on already clean cache would be almost NOP. | ||
99 | */ | 64 | */ |
100 | ldr r1, kernel_flush | 65 | ldr r1, kernel_flush |
101 | blx r1 | 66 | blx r1 |
@@ -115,146 +80,16 @@ ENTRY(sh7372_cpu_suspend) | |||
115 | dsb | 80 | dsb |
116 | dmb | 81 | dmb |
117 | 82 | ||
118 | /* | 83 | #define SPDCR 0xe6180008 |
119 | * =================================== | 84 | #define A3SM (1 << 12) |
120 | * == WFI instruction => Enter idle == | ||
121 | * =================================== | ||
122 | */ | ||
123 | wfi @ wait for interrupt | ||
124 | |||
125 | /* | ||
126 | * =================================== | ||
127 | * == Resume path for non-OFF modes == | ||
128 | * =================================== | ||
129 | */ | ||
130 | mrc p15, 0, r0, c1, c0, 0 | ||
131 | tst r0, #(1 << 2) @ Check C bit enabled? | ||
132 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared | ||
133 | mcreq p15, 0, r0, c1, c0, 0 | ||
134 | isb | ||
135 | |||
136 | /* | ||
137 | * =================================== | ||
138 | * == Exit point from non-OFF modes == | ||
139 | * =================================== | ||
140 | */ | ||
141 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | ||
142 | 85 | ||
143 | .pool | 86 | /* A3SM power down */ |
87 | ldr r0, =SPDCR | ||
88 | ldr r1, =A3SM | ||
89 | str r1, [r0] | ||
90 | 1: | ||
91 | b 1b | ||
144 | 92 | ||
145 | .align 12 | 93 | kernel_flush: |
146 | .text | 94 | .word v7_flush_dcache_all |
147 | .global sh7372_cpu_resume | 95 | #endif |
148 | sh7372_cpu_resume: | ||
149 | |||
150 | mov r1, #0 | ||
151 | /* | ||
152 | * Invalidate all instruction caches to PoU | ||
153 | * and flush branch target cache | ||
154 | */ | ||
155 | mcr p15, 0, r1, c7, c5, 0 | ||
156 | |||
157 | ldr r3, =SMFRAM | ||
158 | |||
159 | ldmia r3!, {r4-r6} | ||
160 | mov sp, r4 @ Restore sp | ||
161 | msr spsr_cxsf, r5 @ Restore spsr | ||
162 | mov lr, r6 @ Restore lr | ||
163 | |||
164 | ldmia r3!, {r4-r7} | ||
165 | mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register | ||
166 | mcr p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
167 | mcr p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
168 | mcr p15, 0, r7, c2, c0, 2 @ TTBCR | ||
169 | |||
170 | ldmia r3!,{r4-r6} | ||
171 | mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
172 | mcr p15, 0, r5, c10, c2, 0 @ PRRR | ||
173 | mcr p15, 0, r6, c10, c2, 1 @ NMRR | ||
174 | |||
175 | ldmia r3!,{r4-r7} | ||
176 | mcr p15, 0, r4, c13, c0, 1 @ Context ID | ||
177 | mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
178 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
179 | msr cpsr, r7 @ store cpsr | ||
180 | |||
181 | /* Starting to enable MMU here */ | ||
182 | mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl | ||
183 | /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ | ||
184 | and r7, #0x7 | ||
185 | cmp r7, #0x0 | ||
186 | beq usettbr0 | ||
187 | ttbr_error: | ||
188 | /* | ||
189 | * More work needs to be done to support N[0:2] value other than 0 | ||
190 | * So looping here so that the error can be detected | ||
191 | */ | ||
192 | b ttbr_error | ||
193 | |||
194 | .align | ||
195 | cache_pred_disable_mask: | ||
196 | .word 0xFFFFE7FB | ||
197 | ttbrbit_mask: | ||
198 | .word 0xFFFFC000 | ||
199 | table_index_mask: | ||
200 | .word 0xFFF00000 | ||
201 | table_entry: | ||
202 | .word 0x00000C02 | ||
203 | usettbr0: | ||
204 | |||
205 | mrc p15, 0, r2, c2, c0, 0 | ||
206 | ldr r5, ttbrbit_mask | ||
207 | and r2, r5 | ||
208 | mov r4, pc | ||
209 | ldr r5, table_index_mask | ||
210 | and r4, r5 @ r4 = 31 to 20 bits of pc | ||
211 | /* Extract the value to be written to table entry */ | ||
212 | ldr r6, table_entry | ||
213 | /* r6 has the value to be written to table entry */ | ||
214 | add r6, r6, r4 | ||
215 | /* Getting the address of table entry to modify */ | ||
216 | lsr r4, #18 | ||
217 | /* r2 has the location which needs to be modified */ | ||
218 | add r2, r4 | ||
219 | ldr r4, [r2] | ||
220 | str r6, [r2] /* modify the table entry */ | ||
221 | |||
222 | mov r7, r6 | ||
223 | mov r5, r2 | ||
224 | mov r6, r4 | ||
225 | /* r5 = original page table address */ | ||
226 | /* r6 = original page table data */ | ||
227 | |||
228 | mov r0, #0 | ||
229 | mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer | ||
230 | mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array | ||
231 | mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB | ||
232 | mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB | ||
233 | |||
234 | /* | ||
235 | * Restore control register. This enables the MMU. | ||
236 | * The caches and prediction are not enabled here, they | ||
237 | * will be enabled after restoring the MMU table entry. | ||
238 | */ | ||
239 | ldmia r3!, {r4} | ||
240 | stmia r3!, {r5} /* save original page table address */ | ||
241 | stmia r3!, {r6} /* save original page table data */ | ||
242 | stmia r3!, {r7} /* save modified page table data */ | ||
243 | |||
244 | ldr r2, cache_pred_disable_mask | ||
245 | and r4, r2 | ||
246 | mcr p15, 0, r4, c1, c0, 0 | ||
247 | dsb | ||
248 | isb | ||
249 | |||
250 | ldr r0, =restoremmu_on | ||
251 | bx r0 | ||
252 | |||
253 | /* | ||
254 | * ============================== | ||
255 | * == Exit point from OFF mode == | ||
256 | * ============================== | ||
257 | */ | ||
258 | restoremmu_on: | ||
259 | |||
260 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ed5cb5af528..6b99fc3f9b6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -91,6 +91,7 @@ config S390 | |||
91 | select HAVE_ARCH_MUTEX_CPU_RELAX | 91 | select HAVE_ARCH_MUTEX_CPU_RELAX |
92 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | 92 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 |
93 | select HAVE_RCU_TABLE_FREE if SMP | 93 | select HAVE_RCU_TABLE_FREE if SMP |
94 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION | ||
94 | select ARCH_INLINE_SPIN_TRYLOCK | 95 | select ARCH_INLINE_SPIN_TRYLOCK |
95 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 96 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
96 | select ARCH_INLINE_SPIN_LOCK | 97 | select ARCH_INLINE_SPIN_LOCK |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index cf9e5c6d552..b6f9afed74e 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/pfn.h> | 9 | #include <linux/pfn.h> |
10 | #include <linux/mm.h> | ||
10 | #include <asm/system.h> | 11 | #include <asm/system.h> |
11 | 12 | ||
12 | /* | 13 | /* |
@@ -14,6 +15,123 @@ | |||
14 | */ | 15 | */ |
15 | extern const void __nosave_begin, __nosave_end; | 16 | extern const void __nosave_begin, __nosave_end; |
16 | 17 | ||
18 | /* | ||
19 | * The restore of the saved pages in an hibernation image will set | ||
20 | * the change and referenced bits in the storage key for each page. | ||
21 | * Overindication of the referenced bits after an hibernation cycle | ||
22 | * does not cause any harm but the overindication of the change bits | ||
23 | * would cause trouble. | ||
24 | * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each | ||
25 | * page to the most significant byte of the associated page frame | ||
26 | * number in the hibernation image. | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Key storage is allocated as a linked list of pages. | ||
31 | * The size of the keys array is (PAGE_SIZE - sizeof(long)) | ||
32 | */ | ||
33 | struct page_key_data { | ||
34 | struct page_key_data *next; | ||
35 | unsigned char data[]; | ||
36 | }; | ||
37 | |||
38 | #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) | ||
39 | |||
40 | static struct page_key_data *page_key_data; | ||
41 | static struct page_key_data *page_key_rp, *page_key_wp; | ||
42 | static unsigned long page_key_rx, page_key_wx; | ||
43 | |||
44 | /* | ||
45 | * For each page in the hibernation image one additional byte is | ||
46 | * stored in the most significant byte of the page frame number. | ||
47 | * On suspend no additional memory is required but on resume the | ||
48 | * keys need to be memorized until the page data has been restored. | ||
49 | * Only then can the storage keys be set to their old state. | ||
50 | */ | ||
51 | unsigned long page_key_additional_pages(unsigned long pages) | ||
52 | { | ||
53 | return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Free page_key_data list of arrays. | ||
58 | */ | ||
59 | void page_key_free(void) | ||
60 | { | ||
61 | struct page_key_data *pkd; | ||
62 | |||
63 | while (page_key_data) { | ||
64 | pkd = page_key_data; | ||
65 | page_key_data = pkd->next; | ||
66 | free_page((unsigned long) pkd); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Allocate page_key_data list of arrays with enough room to store | ||
72 | * one byte for each page in the hibernation image. | ||
73 | */ | ||
74 | int page_key_alloc(unsigned long pages) | ||
75 | { | ||
76 | struct page_key_data *pk; | ||
77 | unsigned long size; | ||
78 | |||
79 | size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); | ||
80 | while (size--) { | ||
81 | pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); | ||
82 | if (!pk) { | ||
83 | page_key_free(); | ||
84 | return -ENOMEM; | ||
85 | } | ||
86 | pk->next = page_key_data; | ||
87 | page_key_data = pk; | ||
88 | } | ||
89 | page_key_rp = page_key_wp = page_key_data; | ||
90 | page_key_rx = page_key_wx = 0; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Save the storage key into the upper 8 bits of the page frame number. | ||
96 | */ | ||
97 | void page_key_read(unsigned long *pfn) | ||
98 | { | ||
99 | unsigned long addr; | ||
100 | |||
101 | addr = (unsigned long) page_address(pfn_to_page(*pfn)); | ||
102 | *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Extract the storage key from the upper 8 bits of the page frame number | ||
107 | * and store it in the page_key_data list of arrays. | ||
108 | */ | ||
109 | void page_key_memorize(unsigned long *pfn) | ||
110 | { | ||
111 | page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; | ||
112 | *(unsigned char *) pfn = 0; | ||
113 | if (++page_key_wx < PAGE_KEY_DATA_SIZE) | ||
114 | return; | ||
115 | page_key_wp = page_key_wp->next; | ||
116 | page_key_wx = 0; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Get the next key from the page_key_data list of arrays and set the | ||
121 | * storage key of the page referred by @address. If @address refers to | ||
122 | * a "safe" page the swsusp_arch_resume code will transfer the storage | ||
123 | * key from the buffer page to the original page. | ||
124 | */ | ||
125 | void page_key_write(void *address) | ||
126 | { | ||
127 | page_set_storage_key((unsigned long) address, | ||
128 | page_key_rp->data[page_key_rx], 0); | ||
129 | if (++page_key_rx >= PAGE_KEY_DATA_SIZE) | ||
130 | return; | ||
131 | page_key_rp = page_key_rp->next; | ||
132 | page_key_rx = 0; | ||
133 | } | ||
134 | |||
17 | int pfn_is_nosave(unsigned long pfn) | 135 | int pfn_is_nosave(unsigned long pfn) |
18 | { | 136 | { |
19 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); | 137 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 51bcdb50a23..acb78cdee89 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -136,11 +136,14 @@ ENTRY(swsusp_arch_resume) | |||
136 | 0: | 136 | 0: |
137 | lg %r2,8(%r1) | 137 | lg %r2,8(%r1) |
138 | lg %r4,0(%r1) | 138 | lg %r4,0(%r1) |
139 | iske %r0,%r4 | ||
139 | lghi %r3,PAGE_SIZE | 140 | lghi %r3,PAGE_SIZE |
140 | lghi %r5,PAGE_SIZE | 141 | lghi %r5,PAGE_SIZE |
141 | 1: | 142 | 1: |
142 | mvcle %r2,%r4,0 | 143 | mvcle %r2,%r4,0 |
143 | jo 1b | 144 | jo 1b |
145 | lg %r2,8(%r1) | ||
146 | sske %r0,%r2 | ||
144 | lg %r1,16(%r1) | 147 | lg %r1,16(%r1) |
145 | ltgr %r1,%r1 | 148 | ltgr %r1,%r1 |
146 | jnz 0b | 149 | jnz 0b |
diff --git a/drivers/Kconfig b/drivers/Kconfig index e73aaaee013..6268167a1bb 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -132,4 +132,6 @@ source "drivers/iommu/Kconfig" | |||
132 | 132 | ||
133 | source "drivers/virt/Kconfig" | 133 | source "drivers/virt/Kconfig" |
134 | 134 | ||
135 | source "drivers/devfreq/Kconfig" | ||
136 | |||
135 | endmenu | 137 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index e7afb3acbc6..755eaf7a728 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -129,3 +129,5 @@ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ | |||
129 | 129 | ||
130 | # Virtualization drivers | 130 | # Virtualization drivers |
131 | obj-$(CONFIG_VIRT_DRIVERS) += virt/ | 131 | obj-$(CONFIG_VIRT_DRIVERS) += virt/ |
132 | |||
133 | obj-$(CONFIG_PM_DEVFREQ) += devfreq/ | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 431ab11c8c1..2e69e09ff03 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/dmi.h> | 37 | #include <linux/dmi.h> |
38 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
39 | #include <linux/sched.h> /* need_resched() */ | 39 | #include <linux/sched.h> /* need_resched() */ |
40 | #include <linux/pm_qos_params.h> | 40 | #include <linux/pm_qos.h> |
41 | #include <linux/clockchips.h> | 41 | #include <linux/clockchips.h> |
42 | #include <linux/cpuidle.h> | 42 | #include <linux/cpuidle.h> |
43 | #include <linux/irqflags.h> | 43 | #include <linux/irqflags.h> |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 3ed80b2ca90..0e46faef1d3 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -390,6 +390,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
390 | }, | 390 | }, |
391 | { | 391 | { |
392 | .callback = init_nvs_nosave, | 392 | .callback = init_nvs_nosave, |
393 | .ident = "Sony Vaio VGN-FW21E", | ||
394 | .matches = { | ||
395 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
396 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), | ||
397 | }, | ||
398 | }, | ||
399 | { | ||
400 | .callback = init_nvs_nosave, | ||
393 | .ident = "Sony Vaio VGN-SR11M", | 401 | .ident = "Sony Vaio VGN-SR11M", |
394 | .matches = { | 402 | .matches = { |
395 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | 403 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), |
@@ -444,6 +452,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
444 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), | 452 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), |
445 | }, | 453 | }, |
446 | }, | 454 | }, |
455 | { | ||
456 | .callback = init_nvs_nosave, | ||
457 | .ident = "Sony Vaio VGN-SR26GN_P", | ||
458 | .matches = { | ||
459 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
460 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), | ||
461 | }, | ||
462 | }, | ||
463 | { | ||
464 | .callback = init_nvs_nosave, | ||
465 | .ident = "Sony Vaio VGN-FW520F", | ||
466 | .matches = { | ||
467 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
468 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), | ||
469 | }, | ||
470 | }, | ||
447 | {}, | 471 | {}, |
448 | }; | 472 | }; |
449 | #endif /* CONFIG_SUSPEND */ | 473 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2639ae79a37..81676dd1790 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
@@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP) += opp.o | |||
6 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o | 6 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o |
7 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | 7 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o |
8 | 8 | ||
9 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file | 9 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index b97294e2d95..5f0f85d5c57 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -10,18 +10,13 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
13 | #include <linux/pm_runtime.h> | 13 | #include <linux/pm_clock.h> |
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | 17 | ||
18 | #ifdef CONFIG_PM | 18 | #ifdef CONFIG_PM |
19 | 19 | ||
20 | struct pm_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | spinlock_t lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | 20 | enum pce_status { |
26 | PCE_STATUS_NONE = 0, | 21 | PCE_STATUS_NONE = 0, |
27 | PCE_STATUS_ACQUIRED, | 22 | PCE_STATUS_ACQUIRED, |
@@ -36,11 +31,6 @@ struct pm_clock_entry { | |||
36 | enum pce_status status; | 31 | enum pce_status status; |
37 | }; | 32 | }; |
38 | 33 | ||
39 | static struct pm_clk_data *__to_pcd(struct device *dev) | ||
40 | { | ||
41 | return dev ? dev->power.subsys_data : NULL; | ||
42 | } | ||
43 | |||
44 | /** | 34 | /** |
45 | * pm_clk_acquire - Acquire a device clock. | 35 | * pm_clk_acquire - Acquire a device clock. |
46 | * @dev: Device whose clock is to be acquired. | 36 | * @dev: Device whose clock is to be acquired. |
@@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | |||
67 | */ | 57 | */ |
68 | int pm_clk_add(struct device *dev, const char *con_id) | 58 | int pm_clk_add(struct device *dev, const char *con_id) |
69 | { | 59 | { |
70 | struct pm_clk_data *pcd = __to_pcd(dev); | 60 | struct pm_subsys_data *psd = dev_to_psd(dev); |
71 | struct pm_clock_entry *ce; | 61 | struct pm_clock_entry *ce; |
72 | 62 | ||
73 | if (!pcd) | 63 | if (!psd) |
74 | return -EINVAL; | 64 | return -EINVAL; |
75 | 65 | ||
76 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | 66 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); |
@@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
91 | 81 | ||
92 | pm_clk_acquire(dev, ce); | 82 | pm_clk_acquire(dev, ce); |
93 | 83 | ||
94 | spin_lock_irq(&pcd->lock); | 84 | spin_lock_irq(&psd->lock); |
95 | list_add_tail(&ce->node, &pcd->clock_list); | 85 | list_add_tail(&ce->node, &psd->clock_list); |
96 | spin_unlock_irq(&pcd->lock); | 86 | spin_unlock_irq(&psd->lock); |
97 | return 0; | 87 | return 0; |
98 | } | 88 | } |
99 | 89 | ||
@@ -114,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
114 | clk_put(ce->clk); | 104 | clk_put(ce->clk); |
115 | } | 105 | } |
116 | 106 | ||
117 | if (ce->con_id) | 107 | kfree(ce->con_id); |
118 | kfree(ce->con_id); | ||
119 | |||
120 | kfree(ce); | 108 | kfree(ce); |
121 | } | 109 | } |
122 | 110 | ||
@@ -130,15 +118,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
130 | */ | 118 | */ |
131 | void pm_clk_remove(struct device *dev, const char *con_id) | 119 | void pm_clk_remove(struct device *dev, const char *con_id) |
132 | { | 120 | { |
133 | struct pm_clk_data *pcd = __to_pcd(dev); | 121 | struct pm_subsys_data *psd = dev_to_psd(dev); |
134 | struct pm_clock_entry *ce; | 122 | struct pm_clock_entry *ce; |
135 | 123 | ||
136 | if (!pcd) | 124 | if (!psd) |
137 | return; | 125 | return; |
138 | 126 | ||
139 | spin_lock_irq(&pcd->lock); | 127 | spin_lock_irq(&psd->lock); |
140 | 128 | ||
141 | list_for_each_entry(ce, &pcd->clock_list, node) { | 129 | list_for_each_entry(ce, &psd->clock_list, node) { |
142 | if (!con_id && !ce->con_id) | 130 | if (!con_id && !ce->con_id) |
143 | goto remove; | 131 | goto remove; |
144 | else if (!con_id || !ce->con_id) | 132 | else if (!con_id || !ce->con_id) |
@@ -147,12 +135,12 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
147 | goto remove; | 135 | goto remove; |
148 | } | 136 | } |
149 | 137 | ||
150 | spin_unlock_irq(&pcd->lock); | 138 | spin_unlock_irq(&psd->lock); |
151 | return; | 139 | return; |
152 | 140 | ||
153 | remove: | 141 | remove: |
154 | list_del(&ce->node); | 142 | list_del(&ce->node); |
155 | spin_unlock_irq(&pcd->lock); | 143 | spin_unlock_irq(&psd->lock); |
156 | 144 | ||
157 | __pm_clk_remove(ce); | 145 | __pm_clk_remove(ce); |
158 | } | 146 | } |
@@ -161,23 +149,27 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
161 | * pm_clk_init - Initialize a device's list of power management clocks. | 149 | * pm_clk_init - Initialize a device's list of power management clocks. |
162 | * @dev: Device to initialize the list of PM clocks for. | 150 | * @dev: Device to initialize the list of PM clocks for. |
163 | * | 151 | * |
164 | * Allocate a struct pm_clk_data object, initialize its lock member and | 152 | * Initialize the lock and clock_list members of the device's pm_subsys_data |
165 | * make the @dev's power.subsys_data field point to it. | 153 | * object. |
166 | */ | 154 | */ |
167 | int pm_clk_init(struct device *dev) | 155 | void pm_clk_init(struct device *dev) |
168 | { | 156 | { |
169 | struct pm_clk_data *pcd; | 157 | struct pm_subsys_data *psd = dev_to_psd(dev); |
170 | 158 | if (psd) | |
171 | pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); | 159 | INIT_LIST_HEAD(&psd->clock_list); |
172 | if (!pcd) { | 160 | } |
173 | dev_err(dev, "Not enough memory for PM clock data.\n"); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | 161 | ||
177 | INIT_LIST_HEAD(&pcd->clock_list); | 162 | /** |
178 | spin_lock_init(&pcd->lock); | 163 | * pm_clk_create - Create and initialize a device's list of PM clocks. |
179 | dev->power.subsys_data = pcd; | 164 | * @dev: Device to create and initialize the list of PM clocks for. |
180 | return 0; | 165 | * |
166 | * Allocate a struct pm_subsys_data object, initialize its lock and clock_list | ||
167 | * members and make the @dev's power.subsys_data field point to it. | ||
168 | */ | ||
169 | int pm_clk_create(struct device *dev) | ||
170 | { | ||
171 | int ret = dev_pm_get_subsys_data(dev); | ||
172 | return ret < 0 ? ret : 0; | ||
181 | } | 173 | } |
182 | 174 | ||
183 | /** | 175 | /** |
@@ -185,29 +177,28 @@ int pm_clk_init(struct device *dev) | |||
185 | * @dev: Device to destroy the list of PM clocks for. | 177 | * @dev: Device to destroy the list of PM clocks for. |
186 | * | 178 | * |
187 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | 179 | * Clear the @dev's power.subsys_data field, remove the list of clock entries |
188 | * from the struct pm_clk_data object pointed to by it before and free | 180 | * from the struct pm_subsys_data object pointed to by it before and free |
189 | * that object. | 181 | * that object. |
190 | */ | 182 | */ |
191 | void pm_clk_destroy(struct device *dev) | 183 | void pm_clk_destroy(struct device *dev) |
192 | { | 184 | { |
193 | struct pm_clk_data *pcd = __to_pcd(dev); | 185 | struct pm_subsys_data *psd = dev_to_psd(dev); |
194 | struct pm_clock_entry *ce, *c; | 186 | struct pm_clock_entry *ce, *c; |
195 | struct list_head list; | 187 | struct list_head list; |
196 | 188 | ||
197 | if (!pcd) | 189 | if (!psd) |
198 | return; | 190 | return; |
199 | 191 | ||
200 | dev->power.subsys_data = NULL; | ||
201 | INIT_LIST_HEAD(&list); | 192 | INIT_LIST_HEAD(&list); |
202 | 193 | ||
203 | spin_lock_irq(&pcd->lock); | 194 | spin_lock_irq(&psd->lock); |
204 | 195 | ||
205 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 196 | list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) |
206 | list_move(&ce->node, &list); | 197 | list_move(&ce->node, &list); |
207 | 198 | ||
208 | spin_unlock_irq(&pcd->lock); | 199 | spin_unlock_irq(&psd->lock); |
209 | 200 | ||
210 | kfree(pcd); | 201 | dev_pm_put_subsys_data(dev); |
211 | 202 | ||
212 | list_for_each_entry_safe_reverse(ce, c, &list, node) { | 203 | list_for_each_entry_safe_reverse(ce, c, &list, node) { |
213 | list_del(&ce->node); | 204 | list_del(&ce->node); |
@@ -225,25 +216,25 @@ void pm_clk_destroy(struct device *dev) | |||
225 | */ | 216 | */ |
226 | int pm_clk_suspend(struct device *dev) | 217 | int pm_clk_suspend(struct device *dev) |
227 | { | 218 | { |
228 | struct pm_clk_data *pcd = __to_pcd(dev); | 219 | struct pm_subsys_data *psd = dev_to_psd(dev); |
229 | struct pm_clock_entry *ce; | 220 | struct pm_clock_entry *ce; |
230 | unsigned long flags; | 221 | unsigned long flags; |
231 | 222 | ||
232 | dev_dbg(dev, "%s()\n", __func__); | 223 | dev_dbg(dev, "%s()\n", __func__); |
233 | 224 | ||
234 | if (!pcd) | 225 | if (!psd) |
235 | return 0; | 226 | return 0; |
236 | 227 | ||
237 | spin_lock_irqsave(&pcd->lock, flags); | 228 | spin_lock_irqsave(&psd->lock, flags); |
238 | 229 | ||
239 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | 230 | list_for_each_entry_reverse(ce, &psd->clock_list, node) { |
240 | if (ce->status < PCE_STATUS_ERROR) { | 231 | if (ce->status < PCE_STATUS_ERROR) { |
241 | clk_disable(ce->clk); | 232 | clk_disable(ce->clk); |
242 | ce->status = PCE_STATUS_ACQUIRED; | 233 | ce->status = PCE_STATUS_ACQUIRED; |
243 | } | 234 | } |
244 | } | 235 | } |
245 | 236 | ||
246 | spin_unlock_irqrestore(&pcd->lock, flags); | 237 | spin_unlock_irqrestore(&psd->lock, flags); |
247 | 238 | ||
248 | return 0; | 239 | return 0; |
249 | } | 240 | } |
@@ -254,25 +245,25 @@ int pm_clk_suspend(struct device *dev) | |||
254 | */ | 245 | */ |
255 | int pm_clk_resume(struct device *dev) | 246 | int pm_clk_resume(struct device *dev) |
256 | { | 247 | { |
257 | struct pm_clk_data *pcd = __to_pcd(dev); | 248 | struct pm_subsys_data *psd = dev_to_psd(dev); |
258 | struct pm_clock_entry *ce; | 249 | struct pm_clock_entry *ce; |
259 | unsigned long flags; | 250 | unsigned long flags; |
260 | 251 | ||
261 | dev_dbg(dev, "%s()\n", __func__); | 252 | dev_dbg(dev, "%s()\n", __func__); |
262 | 253 | ||
263 | if (!pcd) | 254 | if (!psd) |
264 | return 0; | 255 | return 0; |
265 | 256 | ||
266 | spin_lock_irqsave(&pcd->lock, flags); | 257 | spin_lock_irqsave(&psd->lock, flags); |
267 | 258 | ||
268 | list_for_each_entry(ce, &pcd->clock_list, node) { | 259 | list_for_each_entry(ce, &psd->clock_list, node) { |
269 | if (ce->status < PCE_STATUS_ERROR) { | 260 | if (ce->status < PCE_STATUS_ERROR) { |
270 | clk_enable(ce->clk); | 261 | clk_enable(ce->clk); |
271 | ce->status = PCE_STATUS_ENABLED; | 262 | ce->status = PCE_STATUS_ENABLED; |
272 | } | 263 | } |
273 | } | 264 | } |
274 | 265 | ||
275 | spin_unlock_irqrestore(&pcd->lock, flags); | 266 | spin_unlock_irqrestore(&psd->lock, flags); |
276 | 267 | ||
277 | return 0; | 268 | return 0; |
278 | } | 269 | } |
@@ -310,7 +301,7 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
310 | if (dev->pm_domain) | 301 | if (dev->pm_domain) |
311 | break; | 302 | break; |
312 | 303 | ||
313 | error = pm_clk_init(dev); | 304 | error = pm_clk_create(dev); |
314 | if (error) | 305 | if (error) |
315 | break; | 306 | break; |
316 | 307 | ||
@@ -345,22 +336,22 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
345 | */ | 336 | */ |
346 | int pm_clk_suspend(struct device *dev) | 337 | int pm_clk_suspend(struct device *dev) |
347 | { | 338 | { |
348 | struct pm_clk_data *pcd = __to_pcd(dev); | 339 | struct pm_subsys_data *psd = dev_to_psd(dev); |
349 | struct pm_clock_entry *ce; | 340 | struct pm_clock_entry *ce; |
350 | unsigned long flags; | 341 | unsigned long flags; |
351 | 342 | ||
352 | dev_dbg(dev, "%s()\n", __func__); | 343 | dev_dbg(dev, "%s()\n", __func__); |
353 | 344 | ||
354 | /* If there is no driver, the clocks are already disabled. */ | 345 | /* If there is no driver, the clocks are already disabled. */ |
355 | if (!pcd || !dev->driver) | 346 | if (!psd || !dev->driver) |
356 | return 0; | 347 | return 0; |
357 | 348 | ||
358 | spin_lock_irqsave(&pcd->lock, flags); | 349 | spin_lock_irqsave(&psd->lock, flags); |
359 | 350 | ||
360 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) | 351 | list_for_each_entry_reverse(ce, &psd->clock_list, node) |
361 | clk_disable(ce->clk); | 352 | clk_disable(ce->clk); |
362 | 353 | ||
363 | spin_unlock_irqrestore(&pcd->lock, flags); | 354 | spin_unlock_irqrestore(&psd->lock, flags); |
364 | 355 | ||
365 | return 0; | 356 | return 0; |
366 | } | 357 | } |
@@ -371,22 +362,22 @@ int pm_clk_suspend(struct device *dev) | |||
371 | */ | 362 | */ |
372 | int pm_clk_resume(struct device *dev) | 363 | int pm_clk_resume(struct device *dev) |
373 | { | 364 | { |
374 | struct pm_clk_data *pcd = __to_pcd(dev); | 365 | struct pm_subsys_data *psd = dev_to_psd(dev); |
375 | struct pm_clock_entry *ce; | 366 | struct pm_clock_entry *ce; |
376 | unsigned long flags; | 367 | unsigned long flags; |
377 | 368 | ||
378 | dev_dbg(dev, "%s()\n", __func__); | 369 | dev_dbg(dev, "%s()\n", __func__); |
379 | 370 | ||
380 | /* If there is no driver, the clocks should remain disabled. */ | 371 | /* If there is no driver, the clocks should remain disabled. */ |
381 | if (!pcd || !dev->driver) | 372 | if (!psd || !dev->driver) |
382 | return 0; | 373 | return 0; |
383 | 374 | ||
384 | spin_lock_irqsave(&pcd->lock, flags); | 375 | spin_lock_irqsave(&psd->lock, flags); |
385 | 376 | ||
386 | list_for_each_entry(ce, &pcd->clock_list, node) | 377 | list_for_each_entry(ce, &psd->clock_list, node) |
387 | clk_enable(ce->clk); | 378 | clk_enable(ce->clk); |
388 | 379 | ||
389 | spin_unlock_irqrestore(&pcd->lock, flags); | 380 | spin_unlock_irqrestore(&psd->lock, flags); |
390 | 381 | ||
391 | return 0; | 382 | return 0; |
392 | } | 383 | } |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 00000000000..29820c39618 --- /dev/null +++ b/drivers/base/power/common.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * drivers/base/power/common.c - Common device power management code. | ||
3 | * | ||
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/pm_clock.h> | ||
14 | |||
15 | /** | ||
16 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. | ||
17 | * @dev: Device to handle. | ||
18 | * | ||
19 | * If power.subsys_data is NULL, point it to a new object, otherwise increment | ||
20 | * its reference counter. Return 1 if a new object has been created, otherwise | ||
21 | * return 0 or error code. | ||
22 | */ | ||
23 | int dev_pm_get_subsys_data(struct device *dev) | ||
24 | { | ||
25 | struct pm_subsys_data *psd; | ||
26 | int ret = 0; | ||
27 | |||
28 | psd = kzalloc(sizeof(*psd), GFP_KERNEL); | ||
29 | if (!psd) | ||
30 | return -ENOMEM; | ||
31 | |||
32 | spin_lock_irq(&dev->power.lock); | ||
33 | |||
34 | if (dev->power.subsys_data) { | ||
35 | dev->power.subsys_data->refcount++; | ||
36 | } else { | ||
37 | spin_lock_init(&psd->lock); | ||
38 | psd->refcount = 1; | ||
39 | dev->power.subsys_data = psd; | ||
40 | pm_clk_init(dev); | ||
41 | psd = NULL; | ||
42 | ret = 1; | ||
43 | } | ||
44 | |||
45 | spin_unlock_irq(&dev->power.lock); | ||
46 | |||
47 | /* kfree() verifies that its argument is nonzero. */ | ||
48 | kfree(psd); | ||
49 | |||
50 | return ret; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | ||
53 | |||
54 | /** | ||
55 | * dev_pm_put_subsys_data - Drop reference to power.subsys_data. | ||
56 | * @dev: Device to handle. | ||
57 | * | ||
58 | * If the reference counter of power.subsys_data is zero after dropping the | ||
59 | * reference, power.subsys_data is removed. Return 1 if that happens or 0 | ||
60 | * otherwise. | ||
61 | */ | ||
62 | int dev_pm_put_subsys_data(struct device *dev) | ||
63 | { | ||
64 | struct pm_subsys_data *psd; | ||
65 | int ret = 0; | ||
66 | |||
67 | spin_lock_irq(&dev->power.lock); | ||
68 | |||
69 | psd = dev_to_psd(dev); | ||
70 | if (!psd) { | ||
71 | ret = -EINVAL; | ||
72 | goto out; | ||
73 | } | ||
74 | |||
75 | if (--psd->refcount == 0) { | ||
76 | dev->power.subsys_data = NULL; | ||
77 | kfree(psd); | ||
78 | ret = 1; | ||
79 | } | ||
80 | |||
81 | out: | ||
82 | spin_unlock_irq(&dev->power.lock); | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1c374579407..6790cf7eba5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
29 | return pd_to_genpd(dev->pm_domain); | 29 | return pd_to_genpd(dev->pm_domain); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 32 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
33 | { | 33 | { |
34 | if (!WARN_ON(genpd->sd_count == 0)) | 34 | bool ret = false; |
35 | genpd->sd_count--; | 35 | |
36 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) | ||
37 | ret = !!atomic_dec_and_test(&genpd->sd_count); | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | ||
43 | { | ||
44 | atomic_inc(&genpd->sd_count); | ||
45 | smp_mb__after_atomic_inc(); | ||
36 | } | 46 | } |
37 | 47 | ||
38 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 48 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd) | |||
71 | } | 81 | } |
72 | 82 | ||
73 | /** | 83 | /** |
74 | * pm_genpd_poweron - Restore power to a given PM domain and its parents. | 84 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. |
75 | * @genpd: PM domain to power up. | 85 | * @genpd: PM domain to power up. |
76 | * | 86 | * |
77 | * Restore power to @genpd and all of its parents so that it is possible to | 87 | * Restore power to @genpd and all of its masters so that it is possible to |
78 | * resume a device belonging to it. | 88 | * resume a device belonging to it. |
79 | */ | 89 | */ |
80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 90 | int __pm_genpd_poweron(struct generic_pm_domain *genpd) |
91 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
81 | { | 92 | { |
82 | struct generic_pm_domain *parent = genpd->parent; | 93 | struct gpd_link *link; |
94 | DEFINE_WAIT(wait); | ||
83 | int ret = 0; | 95 | int ret = 0; |
84 | 96 | ||
85 | start: | 97 | /* If the domain's master is being waited for, we have to wait too. */ |
86 | if (parent) { | 98 | for (;;) { |
87 | genpd_acquire_lock(parent); | 99 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
88 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | 100 | TASK_UNINTERRUPTIBLE); |
89 | } else { | 101 | if (genpd->status != GPD_STATE_WAIT_MASTER) |
102 | break; | ||
103 | mutex_unlock(&genpd->lock); | ||
104 | |||
105 | schedule(); | ||
106 | |||
90 | mutex_lock(&genpd->lock); | 107 | mutex_lock(&genpd->lock); |
91 | } | 108 | } |
109 | finish_wait(&genpd->status_wait_queue, &wait); | ||
92 | 110 | ||
93 | if (genpd->status == GPD_STATE_ACTIVE | 111 | if (genpd->status == GPD_STATE_ACTIVE |
94 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 112 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
95 | goto out; | 113 | return 0; |
96 | 114 | ||
97 | if (genpd->status != GPD_STATE_POWER_OFF) { | 115 | if (genpd->status != GPD_STATE_POWER_OFF) { |
98 | genpd_set_active(genpd); | 116 | genpd_set_active(genpd); |
99 | goto out; | 117 | return 0; |
100 | } | 118 | } |
101 | 119 | ||
102 | if (parent && parent->status != GPD_STATE_ACTIVE) { | 120 | /* |
121 | * The list is guaranteed not to change while the loop below is being | ||
122 | * executed, unless one of the masters' .power_on() callbacks fiddles | ||
123 | * with it. | ||
124 | */ | ||
125 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
126 | genpd_sd_counter_inc(link->master); | ||
127 | genpd->status = GPD_STATE_WAIT_MASTER; | ||
128 | |||
103 | mutex_unlock(&genpd->lock); | 129 | mutex_unlock(&genpd->lock); |
104 | genpd_release_lock(parent); | ||
105 | 130 | ||
106 | ret = pm_genpd_poweron(parent); | 131 | ret = pm_genpd_poweron(link->master); |
107 | if (ret) | ||
108 | return ret; | ||
109 | 132 | ||
110 | goto start; | 133 | mutex_lock(&genpd->lock); |
134 | |||
135 | /* | ||
136 | * The "wait for parent" status is guaranteed not to change | ||
137 | * while the master is powering on. | ||
138 | */ | ||
139 | genpd->status = GPD_STATE_POWER_OFF; | ||
140 | wake_up_all(&genpd->status_wait_queue); | ||
141 | if (ret) { | ||
142 | genpd_sd_counter_dec(link->master); | ||
143 | goto err; | ||
144 | } | ||
111 | } | 145 | } |
112 | 146 | ||
113 | if (genpd->power_on) { | 147 | if (genpd->power_on) { |
114 | ret = genpd->power_on(genpd); | 148 | ret = genpd->power_on(genpd); |
115 | if (ret) | 149 | if (ret) |
116 | goto out; | 150 | goto err; |
117 | } | 151 | } |
118 | 152 | ||
119 | genpd_set_active(genpd); | 153 | genpd_set_active(genpd); |
120 | if (parent) | ||
121 | parent->sd_count++; | ||
122 | 154 | ||
123 | out: | 155 | return 0; |
124 | mutex_unlock(&genpd->lock); | 156 | |
125 | if (parent) | 157 | err: |
126 | genpd_release_lock(parent); | 158 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) |
159 | genpd_sd_counter_dec(link->master); | ||
127 | 160 | ||
128 | return ret; | 161 | return ret; |
129 | } | 162 | } |
130 | 163 | ||
164 | /** | ||
165 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | ||
166 | * @genpd: PM domain to power up. | ||
167 | */ | ||
168 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
169 | { | ||
170 | int ret; | ||
171 | |||
172 | mutex_lock(&genpd->lock); | ||
173 | ret = __pm_genpd_poweron(genpd); | ||
174 | mutex_unlock(&genpd->lock); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
131 | #endif /* CONFIG_PM */ | 178 | #endif /* CONFIG_PM */ |
132 | 179 | ||
133 | #ifdef CONFIG_PM_RUNTIME | 180 | #ifdef CONFIG_PM_RUNTIME |
134 | 181 | ||
135 | /** | 182 | /** |
136 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | 183 | * __pm_genpd_save_device - Save the pre-suspend state of a device. |
137 | * @dle: Device list entry of the device to save the state of. | 184 | * @pdd: Domain data of the device to save the state of. |
138 | * @genpd: PM domain the device belongs to. | 185 | * @genpd: PM domain the device belongs to. |
139 | */ | 186 | */ |
140 | static int __pm_genpd_save_device(struct dev_list_entry *dle, | 187 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, |
141 | struct generic_pm_domain *genpd) | 188 | struct generic_pm_domain *genpd) |
142 | __releases(&genpd->lock) __acquires(&genpd->lock) | 189 | __releases(&genpd->lock) __acquires(&genpd->lock) |
143 | { | 190 | { |
144 | struct device *dev = dle->dev; | 191 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
192 | struct device *dev = pdd->dev; | ||
145 | struct device_driver *drv = dev->driver; | 193 | struct device_driver *drv = dev->driver; |
146 | int ret = 0; | 194 | int ret = 0; |
147 | 195 | ||
148 | if (dle->need_restore) | 196 | if (gpd_data->need_restore) |
149 | return 0; | 197 | return 0; |
150 | 198 | ||
151 | mutex_unlock(&genpd->lock); | 199 | mutex_unlock(&genpd->lock); |
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, | |||
163 | mutex_lock(&genpd->lock); | 211 | mutex_lock(&genpd->lock); |
164 | 212 | ||
165 | if (!ret) | 213 | if (!ret) |
166 | dle->need_restore = true; | 214 | gpd_data->need_restore = true; |
167 | 215 | ||
168 | return ret; | 216 | return ret; |
169 | } | 217 | } |
170 | 218 | ||
171 | /** | 219 | /** |
172 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | 220 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. |
173 | * @dle: Device list entry of the device to restore the state of. | 221 | * @pdd: Domain data of the device to restore the state of. |
174 | * @genpd: PM domain the device belongs to. | 222 | * @genpd: PM domain the device belongs to. |
175 | */ | 223 | */ |
176 | static void __pm_genpd_restore_device(struct dev_list_entry *dle, | 224 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, |
177 | struct generic_pm_domain *genpd) | 225 | struct generic_pm_domain *genpd) |
178 | __releases(&genpd->lock) __acquires(&genpd->lock) | 226 | __releases(&genpd->lock) __acquires(&genpd->lock) |
179 | { | 227 | { |
180 | struct device *dev = dle->dev; | 228 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
229 | struct device *dev = pdd->dev; | ||
181 | struct device_driver *drv = dev->driver; | 230 | struct device_driver *drv = dev->driver; |
182 | 231 | ||
183 | if (!dle->need_restore) | 232 | if (!gpd_data->need_restore) |
184 | return; | 233 | return; |
185 | 234 | ||
186 | mutex_unlock(&genpd->lock); | 235 | mutex_unlock(&genpd->lock); |
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
197 | 246 | ||
198 | mutex_lock(&genpd->lock); | 247 | mutex_lock(&genpd->lock); |
199 | 248 | ||
200 | dle->need_restore = false; | 249 | gpd_data->need_restore = false; |
201 | } | 250 | } |
202 | 251 | ||
203 | /** | 252 | /** |
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
211 | */ | 260 | */ |
212 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | 261 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) |
213 | { | 262 | { |
214 | return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | 263 | return genpd->status == GPD_STATE_WAIT_MASTER |
264 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | ||
215 | } | 265 | } |
216 | 266 | ||
217 | /** | 267 | /** |
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
238 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 288 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) |
239 | __releases(&genpd->lock) __acquires(&genpd->lock) | 289 | __releases(&genpd->lock) __acquires(&genpd->lock) |
240 | { | 290 | { |
241 | struct generic_pm_domain *parent; | 291 | struct pm_domain_data *pdd; |
242 | struct dev_list_entry *dle; | 292 | struct gpd_link *link; |
243 | unsigned int not_suspended; | 293 | unsigned int not_suspended; |
244 | int ret = 0; | 294 | int ret = 0; |
245 | 295 | ||
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
247 | /* | 297 | /* |
248 | * Do not try to power off the domain in the following situations: | 298 | * Do not try to power off the domain in the following situations: |
249 | * (1) The domain is already in the "power off" state. | 299 | * (1) The domain is already in the "power off" state. |
250 | * (2) System suspend is in progress. | 300 | * (2) The domain is waiting for its master to power up. |
251 | * (3) One of the domain's devices is being resumed right now. | 301 | * (3) One of the domain's devices is being resumed right now. |
302 | * (4) System suspend is in progress. | ||
252 | */ | 303 | */ |
253 | if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 | 304 | if (genpd->status == GPD_STATE_POWER_OFF |
254 | || genpd->resume_count > 0) | 305 | || genpd->status == GPD_STATE_WAIT_MASTER |
306 | || genpd->resume_count > 0 || genpd->prepared_count > 0) | ||
255 | return 0; | 307 | return 0; |
256 | 308 | ||
257 | if (genpd->sd_count > 0) | 309 | if (atomic_read(&genpd->sd_count) > 0) |
258 | return -EBUSY; | 310 | return -EBUSY; |
259 | 311 | ||
260 | not_suspended = 0; | 312 | not_suspended = 0; |
261 | list_for_each_entry(dle, &genpd->dev_list, node) | 313 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
262 | if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) | 314 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
315 | || pdd->dev->power.irq_safe)) | ||
263 | not_suspended++; | 316 | not_suspended++; |
264 | 317 | ||
265 | if (not_suspended > genpd->in_progress) | 318 | if (not_suspended > genpd->in_progress) |
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
282 | genpd->status = GPD_STATE_BUSY; | 335 | genpd->status = GPD_STATE_BUSY; |
283 | genpd->poweroff_task = current; | 336 | genpd->poweroff_task = current; |
284 | 337 | ||
285 | list_for_each_entry_reverse(dle, &genpd->dev_list, node) { | 338 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { |
286 | ret = __pm_genpd_save_device(dle, genpd); | 339 | ret = atomic_read(&genpd->sd_count) == 0 ? |
340 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; | ||
341 | |||
342 | if (genpd_abort_poweroff(genpd)) | ||
343 | goto out; | ||
344 | |||
287 | if (ret) { | 345 | if (ret) { |
288 | genpd_set_active(genpd); | 346 | genpd_set_active(genpd); |
289 | goto out; | 347 | goto out; |
290 | } | 348 | } |
291 | 349 | ||
292 | if (genpd_abort_poweroff(genpd)) | ||
293 | goto out; | ||
294 | |||
295 | if (genpd->status == GPD_STATE_REPEAT) { | 350 | if (genpd->status == GPD_STATE_REPEAT) { |
296 | genpd->poweroff_task = NULL; | 351 | genpd->poweroff_task = NULL; |
297 | goto start; | 352 | goto start; |
298 | } | 353 | } |
299 | } | 354 | } |
300 | 355 | ||
301 | parent = genpd->parent; | 356 | if (genpd->power_off) { |
302 | if (parent) { | 357 | if (atomic_read(&genpd->sd_count) > 0) { |
303 | mutex_unlock(&genpd->lock); | 358 | ret = -EBUSY; |
304 | |||
305 | genpd_acquire_lock(parent); | ||
306 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
307 | |||
308 | if (genpd_abort_poweroff(genpd)) { | ||
309 | genpd_release_lock(parent); | ||
310 | goto out; | 359 | goto out; |
311 | } | 360 | } |
312 | } | ||
313 | 361 | ||
314 | if (genpd->power_off) { | 362 | /* |
363 | * If sd_count > 0 at this point, one of the subdomains hasn't | ||
364 | * managed to call pm_genpd_poweron() for the master yet after | ||
365 | * incrementing it. In that case pm_genpd_poweron() will wait | ||
366 | * for us to drop the lock, so we can call .power_off() and let | ||
367 | * the pm_genpd_poweron() restore power for us (this shouldn't | ||
368 | * happen very often). | ||
369 | */ | ||
315 | ret = genpd->power_off(genpd); | 370 | ret = genpd->power_off(genpd); |
316 | if (ret == -EBUSY) { | 371 | if (ret == -EBUSY) { |
317 | genpd_set_active(genpd); | 372 | genpd_set_active(genpd); |
318 | if (parent) | ||
319 | genpd_release_lock(parent); | ||
320 | |||
321 | goto out; | 373 | goto out; |
322 | } | 374 | } |
323 | } | 375 | } |
324 | 376 | ||
325 | genpd->status = GPD_STATE_POWER_OFF; | 377 | genpd->status = GPD_STATE_POWER_OFF; |
326 | 378 | ||
327 | if (parent) { | 379 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
328 | genpd_sd_counter_dec(parent); | 380 | genpd_sd_counter_dec(link->master); |
329 | if (parent->sd_count == 0) | 381 | genpd_queue_power_off_work(link->master); |
330 | genpd_queue_power_off_work(parent); | ||
331 | |||
332 | genpd_release_lock(parent); | ||
333 | } | 382 | } |
334 | 383 | ||
335 | out: | 384 | out: |
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
371 | if (IS_ERR(genpd)) | 420 | if (IS_ERR(genpd)) |
372 | return -EINVAL; | 421 | return -EINVAL; |
373 | 422 | ||
423 | might_sleep_if(!genpd->dev_irq_safe); | ||
424 | |||
374 | if (genpd->stop_device) { | 425 | if (genpd->stop_device) { |
375 | int ret = genpd->stop_device(dev); | 426 | int ret = genpd->stop_device(dev); |
376 | if (ret) | 427 | if (ret) |
377 | return ret; | 428 | return ret; |
378 | } | 429 | } |
379 | 430 | ||
431 | /* | ||
432 | * If power.irq_safe is set, this routine will be run with interrupts | ||
433 | * off, so it can't use mutexes. | ||
434 | */ | ||
435 | if (dev->power.irq_safe) | ||
436 | return 0; | ||
437 | |||
380 | mutex_lock(&genpd->lock); | 438 | mutex_lock(&genpd->lock); |
381 | genpd->in_progress++; | 439 | genpd->in_progress++; |
382 | pm_genpd_poweroff(genpd); | 440 | pm_genpd_poweroff(genpd); |
@@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
387 | } | 445 | } |
388 | 446 | ||
389 | /** | 447 | /** |
390 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
391 | * @dev: Device to resume. | ||
392 | * @genpd: PM domain the device belongs to. | ||
393 | */ | ||
394 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
395 | struct generic_pm_domain *genpd) | ||
396 | { | ||
397 | struct dev_list_entry *dle; | ||
398 | |||
399 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
400 | if (dle->dev == dev) { | ||
401 | __pm_genpd_restore_device(dle, genpd); | ||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 448 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
409 | * @dev: Device to resume. | 449 | * @dev: Device to resume. |
410 | * | 450 | * |
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
424 | if (IS_ERR(genpd)) | 464 | if (IS_ERR(genpd)) |
425 | return -EINVAL; | 465 | return -EINVAL; |
426 | 466 | ||
427 | ret = pm_genpd_poweron(genpd); | 467 | might_sleep_if(!genpd->dev_irq_safe); |
428 | if (ret) | 468 | |
429 | return ret; | 469 | /* If power.irq_safe, the PM domain is never powered off. */ |
470 | if (dev->power.irq_safe) | ||
471 | goto out; | ||
430 | 472 | ||
431 | mutex_lock(&genpd->lock); | 473 | mutex_lock(&genpd->lock); |
474 | ret = __pm_genpd_poweron(genpd); | ||
475 | if (ret) { | ||
476 | mutex_unlock(&genpd->lock); | ||
477 | return ret; | ||
478 | } | ||
432 | genpd->status = GPD_STATE_BUSY; | 479 | genpd->status = GPD_STATE_BUSY; |
433 | genpd->resume_count++; | 480 | genpd->resume_count++; |
434 | for (;;) { | 481 | for (;;) { |
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
448 | mutex_lock(&genpd->lock); | 495 | mutex_lock(&genpd->lock); |
449 | } | 496 | } |
450 | finish_wait(&genpd->status_wait_queue, &wait); | 497 | finish_wait(&genpd->status_wait_queue, &wait); |
451 | __pm_genpd_runtime_resume(dev, genpd); | 498 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); |
452 | genpd->resume_count--; | 499 | genpd->resume_count--; |
453 | genpd_set_active(genpd); | 500 | genpd_set_active(genpd); |
454 | wake_up_all(&genpd->status_wait_queue); | 501 | wake_up_all(&genpd->status_wait_queue); |
455 | mutex_unlock(&genpd->lock); | 502 | mutex_unlock(&genpd->lock); |
456 | 503 | ||
504 | out: | ||
457 | if (genpd->start_device) | 505 | if (genpd->start_device) |
458 | genpd->start_device(dev); | 506 | genpd->start_device(dev); |
459 | 507 | ||
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void) | |||
478 | #else | 526 | #else |
479 | 527 | ||
480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 528 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
481 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
482 | struct generic_pm_domain *genpd) {} | ||
483 | 529 | ||
484 | #define pm_genpd_runtime_suspend NULL | 530 | #define pm_genpd_runtime_suspend NULL |
485 | #define pm_genpd_runtime_resume NULL | 531 | #define pm_genpd_runtime_resume NULL |
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
489 | #ifdef CONFIG_PM_SLEEP | 535 | #ifdef CONFIG_PM_SLEEP |
490 | 536 | ||
491 | /** | 537 | /** |
492 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. | 538 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. |
493 | * @genpd: PM domain to power off, if possible. | 539 | * @genpd: PM domain to power off, if possible. |
494 | * | 540 | * |
495 | * Check if the given PM domain can be powered off (during system suspend or | 541 | * Check if the given PM domain can be powered off (during system suspend or |
496 | * hibernation) and do that if so. Also, in that case propagate to its parent. | 542 | * hibernation) and do that if so. Also, in that case propagate to its masters. |
497 | * | 543 | * |
498 | * This function is only called in "noirq" stages of system power transitions, | 544 | * This function is only called in "noirq" stages of system power transitions, |
499 | * so it need not acquire locks (all of the "noirq" callbacks are executed | 545 | * so it need not acquire locks (all of the "noirq" callbacks are executed |
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
501 | */ | 547 | */ |
502 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 548 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
503 | { | 549 | { |
504 | struct generic_pm_domain *parent = genpd->parent; | 550 | struct gpd_link *link; |
505 | 551 | ||
506 | if (genpd->status == GPD_STATE_POWER_OFF) | 552 | if (genpd->status == GPD_STATE_POWER_OFF) |
507 | return; | 553 | return; |
508 | 554 | ||
509 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) | 555 | if (genpd->suspended_count != genpd->device_count |
556 | || atomic_read(&genpd->sd_count) > 0) | ||
510 | return; | 557 | return; |
511 | 558 | ||
512 | if (genpd->power_off) | 559 | if (genpd->power_off) |
513 | genpd->power_off(genpd); | 560 | genpd->power_off(genpd); |
514 | 561 | ||
515 | genpd->status = GPD_STATE_POWER_OFF; | 562 | genpd->status = GPD_STATE_POWER_OFF; |
516 | if (parent) { | 563 | |
517 | genpd_sd_counter_dec(parent); | 564 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
518 | pm_genpd_sync_poweroff(parent); | 565 | genpd_sd_counter_dec(link->master); |
566 | pm_genpd_sync_poweroff(link->master); | ||
519 | } | 567 | } |
520 | } | 568 | } |
521 | 569 | ||
@@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) | |||
666 | if (ret) | 714 | if (ret) |
667 | return ret; | 715 | return ret; |
668 | 716 | ||
669 | if (device_may_wakeup(dev) | 717 | if (dev->power.wakeup_path |
670 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | 718 | && genpd->active_wakeup && genpd->active_wakeup(dev)) |
671 | return 0; | 719 | return 0; |
672 | 720 | ||
@@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev) | |||
890 | if (ret) | 938 | if (ret) |
891 | return ret; | 939 | return ret; |
892 | 940 | ||
893 | if (device_may_wakeup(dev) | 941 | if (dev->power.wakeup_path |
894 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | 942 | && genpd->active_wakeup && genpd->active_wakeup(dev)) |
895 | return 0; | 943 | return 0; |
896 | 944 | ||
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev) | |||
1034 | */ | 1082 | */ |
1035 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | 1083 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) |
1036 | { | 1084 | { |
1037 | struct dev_list_entry *dle; | 1085 | struct generic_pm_domain_data *gpd_data; |
1086 | struct pm_domain_data *pdd; | ||
1038 | int ret = 0; | 1087 | int ret = 0; |
1039 | 1088 | ||
1040 | dev_dbg(dev, "%s()\n", __func__); | 1089 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
1054 | goto out; | 1103 | goto out; |
1055 | } | 1104 | } |
1056 | 1105 | ||
1057 | list_for_each_entry(dle, &genpd->dev_list, node) | 1106 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
1058 | if (dle->dev == dev) { | 1107 | if (pdd->dev == dev) { |
1059 | ret = -EINVAL; | 1108 | ret = -EINVAL; |
1060 | goto out; | 1109 | goto out; |
1061 | } | 1110 | } |
1062 | 1111 | ||
1063 | dle = kzalloc(sizeof(*dle), GFP_KERNEL); | 1112 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); |
1064 | if (!dle) { | 1113 | if (!gpd_data) { |
1065 | ret = -ENOMEM; | 1114 | ret = -ENOMEM; |
1066 | goto out; | 1115 | goto out; |
1067 | } | 1116 | } |
1068 | 1117 | ||
1069 | dle->dev = dev; | ||
1070 | dle->need_restore = false; | ||
1071 | list_add_tail(&dle->node, &genpd->dev_list); | ||
1072 | genpd->device_count++; | 1118 | genpd->device_count++; |
1073 | 1119 | ||
1074 | spin_lock_irq(&dev->power.lock); | ||
1075 | dev->pm_domain = &genpd->domain; | 1120 | dev->pm_domain = &genpd->domain; |
1076 | spin_unlock_irq(&dev->power.lock); | 1121 | dev_pm_get_subsys_data(dev); |
1122 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
1123 | gpd_data->base.dev = dev; | ||
1124 | gpd_data->need_restore = false; | ||
1125 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | ||
1077 | 1126 | ||
1078 | out: | 1127 | out: |
1079 | genpd_release_lock(genpd); | 1128 | genpd_release_lock(genpd); |
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
1089 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 1138 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
1090 | struct device *dev) | 1139 | struct device *dev) |
1091 | { | 1140 | { |
1092 | struct dev_list_entry *dle; | 1141 | struct pm_domain_data *pdd; |
1093 | int ret = -EINVAL; | 1142 | int ret = -EINVAL; |
1094 | 1143 | ||
1095 | dev_dbg(dev, "%s()\n", __func__); | 1144 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1104 | goto out; | 1153 | goto out; |
1105 | } | 1154 | } |
1106 | 1155 | ||
1107 | list_for_each_entry(dle, &genpd->dev_list, node) { | 1156 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
1108 | if (dle->dev != dev) | 1157 | if (pdd->dev != dev) |
1109 | continue; | 1158 | continue; |
1110 | 1159 | ||
1111 | spin_lock_irq(&dev->power.lock); | 1160 | list_del_init(&pdd->list_node); |
1161 | pdd->dev = NULL; | ||
1162 | dev_pm_put_subsys_data(dev); | ||
1112 | dev->pm_domain = NULL; | 1163 | dev->pm_domain = NULL; |
1113 | spin_unlock_irq(&dev->power.lock); | 1164 | kfree(to_gpd_data(pdd)); |
1114 | 1165 | ||
1115 | genpd->device_count--; | 1166 | genpd->device_count--; |
1116 | list_del(&dle->node); | ||
1117 | kfree(dle); | ||
1118 | 1167 | ||
1119 | ret = 0; | 1168 | ret = 0; |
1120 | break; | 1169 | break; |
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1129 | /** | 1178 | /** |
1130 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1179 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
1131 | * @genpd: Master PM domain to add the subdomain to. | 1180 | * @genpd: Master PM domain to add the subdomain to. |
1132 | * @new_subdomain: Subdomain to be added. | 1181 | * @subdomain: Subdomain to be added. |
1133 | */ | 1182 | */ |
1134 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 1183 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
1135 | struct generic_pm_domain *new_subdomain) | 1184 | struct generic_pm_domain *subdomain) |
1136 | { | 1185 | { |
1137 | struct generic_pm_domain *subdomain; | 1186 | struct gpd_link *link; |
1138 | int ret = 0; | 1187 | int ret = 0; |
1139 | 1188 | ||
1140 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) | 1189 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1141 | return -EINVAL; | 1190 | return -EINVAL; |
1142 | 1191 | ||
1143 | start: | 1192 | start: |
1144 | genpd_acquire_lock(genpd); | 1193 | genpd_acquire_lock(genpd); |
1145 | mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); | 1194 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
1146 | 1195 | ||
1147 | if (new_subdomain->status != GPD_STATE_POWER_OFF | 1196 | if (subdomain->status != GPD_STATE_POWER_OFF |
1148 | && new_subdomain->status != GPD_STATE_ACTIVE) { | 1197 | && subdomain->status != GPD_STATE_ACTIVE) { |
1149 | mutex_unlock(&new_subdomain->lock); | 1198 | mutex_unlock(&subdomain->lock); |
1150 | genpd_release_lock(genpd); | 1199 | genpd_release_lock(genpd); |
1151 | goto start; | 1200 | goto start; |
1152 | } | 1201 | } |
1153 | 1202 | ||
1154 | if (genpd->status == GPD_STATE_POWER_OFF | 1203 | if (genpd->status == GPD_STATE_POWER_OFF |
1155 | && new_subdomain->status != GPD_STATE_POWER_OFF) { | 1204 | && subdomain->status != GPD_STATE_POWER_OFF) { |
1156 | ret = -EINVAL; | 1205 | ret = -EINVAL; |
1157 | goto out; | 1206 | goto out; |
1158 | } | 1207 | } |
1159 | 1208 | ||
1160 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1209 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
1161 | if (subdomain == new_subdomain) { | 1210 | if (link->slave == subdomain && link->master == genpd) { |
1162 | ret = -EINVAL; | 1211 | ret = -EINVAL; |
1163 | goto out; | 1212 | goto out; |
1164 | } | 1213 | } |
1165 | } | 1214 | } |
1166 | 1215 | ||
1167 | list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); | 1216 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
1168 | new_subdomain->parent = genpd; | 1217 | if (!link) { |
1218 | ret = -ENOMEM; | ||
1219 | goto out; | ||
1220 | } | ||
1221 | link->master = genpd; | ||
1222 | list_add_tail(&link->master_node, &genpd->master_links); | ||
1223 | link->slave = subdomain; | ||
1224 | list_add_tail(&link->slave_node, &subdomain->slave_links); | ||
1169 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1225 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1170 | genpd->sd_count++; | 1226 | genpd_sd_counter_inc(genpd); |
1171 | 1227 | ||
1172 | out: | 1228 | out: |
1173 | mutex_unlock(&new_subdomain->lock); | 1229 | mutex_unlock(&subdomain->lock); |
1174 | genpd_release_lock(genpd); | 1230 | genpd_release_lock(genpd); |
1175 | 1231 | ||
1176 | return ret; | 1232 | return ret; |
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
1179 | /** | 1235 | /** |
1180 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1236 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
1181 | * @genpd: Master PM domain to remove the subdomain from. | 1237 | * @genpd: Master PM domain to remove the subdomain from. |
1182 | * @target: Subdomain to be removed. | 1238 | * @subdomain: Subdomain to be removed. |
1183 | */ | 1239 | */ |
1184 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 1240 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
1185 | struct generic_pm_domain *target) | 1241 | struct generic_pm_domain *subdomain) |
1186 | { | 1242 | { |
1187 | struct generic_pm_domain *subdomain; | 1243 | struct gpd_link *link; |
1188 | int ret = -EINVAL; | 1244 | int ret = -EINVAL; |
1189 | 1245 | ||
1190 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) | 1246 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1191 | return -EINVAL; | 1247 | return -EINVAL; |
1192 | 1248 | ||
1193 | start: | 1249 | start: |
1194 | genpd_acquire_lock(genpd); | 1250 | genpd_acquire_lock(genpd); |
1195 | 1251 | ||
1196 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1252 | list_for_each_entry(link, &genpd->master_links, master_node) { |
1197 | if (subdomain != target) | 1253 | if (link->slave != subdomain) |
1198 | continue; | 1254 | continue; |
1199 | 1255 | ||
1200 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1256 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
1206 | goto start; | 1262 | goto start; |
1207 | } | 1263 | } |
1208 | 1264 | ||
1209 | list_del(&subdomain->sd_node); | 1265 | list_del(&link->master_node); |
1210 | subdomain->parent = NULL; | 1266 | list_del(&link->slave_node); |
1267 | kfree(link); | ||
1211 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1268 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1212 | genpd_sd_counter_dec(genpd); | 1269 | genpd_sd_counter_dec(genpd); |
1213 | 1270 | ||
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1234 | if (IS_ERR_OR_NULL(genpd)) | 1291 | if (IS_ERR_OR_NULL(genpd)) |
1235 | return; | 1292 | return; |
1236 | 1293 | ||
1237 | INIT_LIST_HEAD(&genpd->sd_node); | 1294 | INIT_LIST_HEAD(&genpd->master_links); |
1238 | genpd->parent = NULL; | 1295 | INIT_LIST_HEAD(&genpd->slave_links); |
1239 | INIT_LIST_HEAD(&genpd->dev_list); | 1296 | INIT_LIST_HEAD(&genpd->dev_list); |
1240 | INIT_LIST_HEAD(&genpd->sd_list); | ||
1241 | mutex_init(&genpd->lock); | 1297 | mutex_init(&genpd->lock); |
1242 | genpd->gov = gov; | 1298 | genpd->gov = gov; |
1243 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1299 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
1244 | genpd->in_progress = 0; | 1300 | genpd->in_progress = 0; |
1245 | genpd->sd_count = 0; | 1301 | atomic_set(&genpd->sd_count, 0); |
1246 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1302 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
1247 | init_waitqueue_head(&genpd->status_wait_queue); | 1303 | init_waitqueue_head(&genpd->status_wait_queue); |
1248 | genpd->poweroff_task = NULL; | 1304 | genpd->poweroff_task = NULL; |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a85459126bc..59f8ab23548 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list); | |||
46 | LIST_HEAD(dpm_suspended_list); | 46 | LIST_HEAD(dpm_suspended_list); |
47 | LIST_HEAD(dpm_noirq_list); | 47 | LIST_HEAD(dpm_noirq_list); |
48 | 48 | ||
49 | struct suspend_stats suspend_stats; | ||
49 | static DEFINE_MUTEX(dpm_list_mtx); | 50 | static DEFINE_MUTEX(dpm_list_mtx); |
50 | static pm_message_t pm_transition; | 51 | static pm_message_t pm_transition; |
51 | 52 | ||
@@ -65,6 +66,7 @@ void device_pm_init(struct device *dev) | |||
65 | spin_lock_init(&dev->power.lock); | 66 | spin_lock_init(&dev->power.lock); |
66 | pm_runtime_init(dev); | 67 | pm_runtime_init(dev); |
67 | INIT_LIST_HEAD(&dev->power.entry); | 68 | INIT_LIST_HEAD(&dev->power.entry); |
69 | dev->power.power_state = PMSG_INVALID; | ||
68 | } | 70 | } |
69 | 71 | ||
70 | /** | 72 | /** |
@@ -96,6 +98,7 @@ void device_pm_add(struct device *dev) | |||
96 | dev_warn(dev, "parent %s should not be sleeping\n", | 98 | dev_warn(dev, "parent %s should not be sleeping\n", |
97 | dev_name(dev->parent)); | 99 | dev_name(dev->parent)); |
98 | list_add_tail(&dev->power.entry, &dpm_list); | 100 | list_add_tail(&dev->power.entry, &dpm_list); |
101 | dev_pm_qos_constraints_init(dev); | ||
99 | mutex_unlock(&dpm_list_mtx); | 102 | mutex_unlock(&dpm_list_mtx); |
100 | } | 103 | } |
101 | 104 | ||
@@ -109,6 +112,7 @@ void device_pm_remove(struct device *dev) | |||
109 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 112 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
110 | complete_all(&dev->power.completion); | 113 | complete_all(&dev->power.completion); |
111 | mutex_lock(&dpm_list_mtx); | 114 | mutex_lock(&dpm_list_mtx); |
115 | dev_pm_qos_constraints_destroy(dev); | ||
112 | list_del_init(&dev->power.entry); | 116 | list_del_init(&dev->power.entry); |
113 | mutex_unlock(&dpm_list_mtx); | 117 | mutex_unlock(&dpm_list_mtx); |
114 | device_wakeup_disable(dev); | 118 | device_wakeup_disable(dev); |
@@ -464,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state) | |||
464 | mutex_unlock(&dpm_list_mtx); | 468 | mutex_unlock(&dpm_list_mtx); |
465 | 469 | ||
466 | error = device_resume_noirq(dev, state); | 470 | error = device_resume_noirq(dev, state); |
467 | if (error) | 471 | if (error) { |
472 | suspend_stats.failed_resume_noirq++; | ||
473 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); | ||
474 | dpm_save_failed_dev(dev_name(dev)); | ||
468 | pm_dev_err(dev, state, " early", error); | 475 | pm_dev_err(dev, state, " early", error); |
476 | } | ||
469 | 477 | ||
470 | mutex_lock(&dpm_list_mtx); | 478 | mutex_lock(&dpm_list_mtx); |
471 | put_device(dev); | 479 | put_device(dev); |
@@ -626,8 +634,12 @@ void dpm_resume(pm_message_t state) | |||
626 | mutex_unlock(&dpm_list_mtx); | 634 | mutex_unlock(&dpm_list_mtx); |
627 | 635 | ||
628 | error = device_resume(dev, state, false); | 636 | error = device_resume(dev, state, false); |
629 | if (error) | 637 | if (error) { |
638 | suspend_stats.failed_resume++; | ||
639 | dpm_save_failed_step(SUSPEND_RESUME); | ||
640 | dpm_save_failed_dev(dev_name(dev)); | ||
630 | pm_dev_err(dev, state, "", error); | 641 | pm_dev_err(dev, state, "", error); |
642 | } | ||
631 | 643 | ||
632 | mutex_lock(&dpm_list_mtx); | 644 | mutex_lock(&dpm_list_mtx); |
633 | } | 645 | } |
@@ -802,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state) | |||
802 | mutex_lock(&dpm_list_mtx); | 814 | mutex_lock(&dpm_list_mtx); |
803 | if (error) { | 815 | if (error) { |
804 | pm_dev_err(dev, state, " late", error); | 816 | pm_dev_err(dev, state, " late", error); |
817 | suspend_stats.failed_suspend_noirq++; | ||
818 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); | ||
819 | dpm_save_failed_dev(dev_name(dev)); | ||
805 | put_device(dev); | 820 | put_device(dev); |
806 | break; | 821 | break; |
807 | } | 822 | } |
@@ -902,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
902 | } | 917 | } |
903 | 918 | ||
904 | End: | 919 | End: |
905 | dev->power.is_suspended = !error; | 920 | if (!error) { |
921 | dev->power.is_suspended = true; | ||
922 | if (dev->power.wakeup_path && dev->parent) | ||
923 | dev->parent->power.wakeup_path = true; | ||
924 | } | ||
906 | 925 | ||
907 | device_unlock(dev); | 926 | device_unlock(dev); |
908 | complete_all(&dev->power.completion); | 927 | complete_all(&dev->power.completion); |
@@ -923,8 +942,10 @@ static void async_suspend(void *data, async_cookie_t cookie) | |||
923 | int error; | 942 | int error; |
924 | 943 | ||
925 | error = __device_suspend(dev, pm_transition, true); | 944 | error = __device_suspend(dev, pm_transition, true); |
926 | if (error) | 945 | if (error) { |
946 | dpm_save_failed_dev(dev_name(dev)); | ||
927 | pm_dev_err(dev, pm_transition, " async", error); | 947 | pm_dev_err(dev, pm_transition, " async", error); |
948 | } | ||
928 | 949 | ||
929 | put_device(dev); | 950 | put_device(dev); |
930 | } | 951 | } |
@@ -967,6 +988,7 @@ int dpm_suspend(pm_message_t state) | |||
967 | mutex_lock(&dpm_list_mtx); | 988 | mutex_lock(&dpm_list_mtx); |
968 | if (error) { | 989 | if (error) { |
969 | pm_dev_err(dev, state, "", error); | 990 | pm_dev_err(dev, state, "", error); |
991 | dpm_save_failed_dev(dev_name(dev)); | ||
970 | put_device(dev); | 992 | put_device(dev); |
971 | break; | 993 | break; |
972 | } | 994 | } |
@@ -980,7 +1002,10 @@ int dpm_suspend(pm_message_t state) | |||
980 | async_synchronize_full(); | 1002 | async_synchronize_full(); |
981 | if (!error) | 1003 | if (!error) |
982 | error = async_error; | 1004 | error = async_error; |
983 | if (!error) | 1005 | if (error) { |
1006 | suspend_stats.failed_suspend++; | ||
1007 | dpm_save_failed_step(SUSPEND_SUSPEND); | ||
1008 | } else | ||
984 | dpm_show_time(starttime, state, NULL); | 1009 | dpm_show_time(starttime, state, NULL); |
985 | return error; | 1010 | return error; |
986 | } | 1011 | } |
@@ -999,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
999 | 1024 | ||
1000 | device_lock(dev); | 1025 | device_lock(dev); |
1001 | 1026 | ||
1027 | dev->power.wakeup_path = device_may_wakeup(dev); | ||
1028 | |||
1002 | if (dev->pm_domain) { | 1029 | if (dev->pm_domain) { |
1003 | pm_dev_dbg(dev, state, "preparing power domain "); | 1030 | pm_dev_dbg(dev, state, "preparing power domain "); |
1004 | if (dev->pm_domain->ops.prepare) | 1031 | if (dev->pm_domain->ops.prepare) |
@@ -1088,7 +1115,10 @@ int dpm_suspend_start(pm_message_t state) | |||
1088 | int error; | 1115 | int error; |
1089 | 1116 | ||
1090 | error = dpm_prepare(state); | 1117 | error = dpm_prepare(state); |
1091 | if (!error) | 1118 | if (error) { |
1119 | suspend_stats.failed_prepare++; | ||
1120 | dpm_save_failed_step(SUSPEND_PREPARE); | ||
1121 | } else | ||
1092 | error = dpm_suspend(state); | 1122 | error = dpm_suspend(state); |
1093 | return error; | 1123 | return error; |
1094 | } | 1124 | } |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index b23de185cb0..434a6c01167 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -73,6 +73,7 @@ struct opp { | |||
73 | * RCU usage: nodes are not modified in the list of device_opp, | 73 | * RCU usage: nodes are not modified in the list of device_opp, |
74 | * however addition is possible and is secured by dev_opp_list_lock | 74 | * however addition is possible and is secured by dev_opp_list_lock |
75 | * @dev: device pointer | 75 | * @dev: device pointer |
76 | * @head: notifier head to notify the OPP availability changes. | ||
76 | * @opp_list: list of opps | 77 | * @opp_list: list of opps |
77 | * | 78 | * |
78 | * This is an internal data structure maintaining the link to opps attached to | 79 | * This is an internal data structure maintaining the link to opps attached to |
@@ -83,6 +84,7 @@ struct device_opp { | |||
83 | struct list_head node; | 84 | struct list_head node; |
84 | 85 | ||
85 | struct device *dev; | 86 | struct device *dev; |
87 | struct srcu_notifier_head head; | ||
86 | struct list_head opp_list; | 88 | struct list_head opp_list; |
87 | }; | 89 | }; |
88 | 90 | ||
@@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
404 | } | 406 | } |
405 | 407 | ||
406 | dev_opp->dev = dev; | 408 | dev_opp->dev = dev; |
409 | srcu_init_notifier_head(&dev_opp->head); | ||
407 | INIT_LIST_HEAD(&dev_opp->opp_list); | 410 | INIT_LIST_HEAD(&dev_opp->opp_list); |
408 | 411 | ||
409 | /* Secure the device list modification */ | 412 | /* Secure the device list modification */ |
@@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
428 | list_add_rcu(&new_opp->node, head); | 431 | list_add_rcu(&new_opp->node, head); |
429 | mutex_unlock(&dev_opp_list_lock); | 432 | mutex_unlock(&dev_opp_list_lock); |
430 | 433 | ||
434 | /* | ||
435 | * Notify the changes in the availability of the operable | ||
436 | * frequency/voltage list. | ||
437 | */ | ||
438 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); | ||
431 | return 0; | 439 | return 0; |
432 | } | 440 | } |
433 | 441 | ||
@@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
504 | mutex_unlock(&dev_opp_list_lock); | 512 | mutex_unlock(&dev_opp_list_lock); |
505 | synchronize_rcu(); | 513 | synchronize_rcu(); |
506 | 514 | ||
515 | /* Notify the change of the OPP availability */ | ||
516 | if (availability_req) | ||
517 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, | ||
518 | new_opp); | ||
519 | else | ||
520 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, | ||
521 | new_opp); | ||
522 | |||
507 | /* clean up old opp */ | 523 | /* clean up old opp */ |
508 | new_opp = opp; | 524 | new_opp = opp; |
509 | goto out; | 525 | goto out; |
@@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev, | |||
643 | *table = NULL; | 659 | *table = NULL; |
644 | } | 660 | } |
645 | #endif /* CONFIG_CPU_FREQ */ | 661 | #endif /* CONFIG_CPU_FREQ */ |
662 | |||
663 | /** | ||
664 | * opp_get_notifier() - find notifier_head of the device with opp | ||
665 | * @dev: device pointer used to lookup device OPPs. | ||
666 | */ | ||
667 | struct srcu_notifier_head *opp_get_notifier(struct device *dev) | ||
668 | { | ||
669 | struct device_opp *dev_opp = find_device_opp(dev); | ||
670 | |||
671 | if (IS_ERR(dev_opp)) | ||
672 | return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ | ||
673 | |||
674 | return &dev_opp->head; | ||
675 | } | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f2a25f18fde..9bf62323aaf 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <linux/pm_qos.h> | ||
2 | |||
1 | #ifdef CONFIG_PM_RUNTIME | 3 | #ifdef CONFIG_PM_RUNTIME |
2 | 4 | ||
3 | extern void pm_runtime_init(struct device *dev); | 5 | extern void pm_runtime_init(struct device *dev); |
@@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *); | |||
35 | static inline void device_pm_init(struct device *dev) | 37 | static inline void device_pm_init(struct device *dev) |
36 | { | 38 | { |
37 | spin_lock_init(&dev->power.lock); | 39 | spin_lock_init(&dev->power.lock); |
40 | dev->power.power_state = PMSG_INVALID; | ||
38 | pm_runtime_init(dev); | 41 | pm_runtime_init(dev); |
39 | } | 42 | } |
40 | 43 | ||
44 | static inline void device_pm_add(struct device *dev) | ||
45 | { | ||
46 | dev_pm_qos_constraints_init(dev); | ||
47 | } | ||
48 | |||
41 | static inline void device_pm_remove(struct device *dev) | 49 | static inline void device_pm_remove(struct device *dev) |
42 | { | 50 | { |
51 | dev_pm_qos_constraints_destroy(dev); | ||
43 | pm_runtime_remove(dev); | 52 | pm_runtime_remove(dev); |
44 | } | 53 | } |
45 | 54 | ||
46 | static inline void device_pm_add(struct device *dev) {} | ||
47 | static inline void device_pm_move_before(struct device *deva, | 55 | static inline void device_pm_move_before(struct device *deva, |
48 | struct device *devb) {} | 56 | struct device *devb) {} |
49 | static inline void device_pm_move_after(struct device *deva, | 57 | static inline void device_pm_move_after(struct device *deva, |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c new file mode 100644 index 00000000000..91e06141738 --- /dev/null +++ b/drivers/base/power/qos.c | |||
@@ -0,0 +1,419 @@ | |||
1 | /* | ||
2 | * Devices PM QoS constraints management | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * | ||
11 | * This module exposes the interface to kernel space for specifying | ||
12 | * per-device PM QoS dependencies. It provides infrastructure for registration | ||
13 | * of: | ||
14 | * | ||
15 | * Dependents on a QoS value : register requests | ||
16 | * Watchers of QoS value : get notified when target QoS value changes | ||
17 | * | ||
18 | * This QoS design is best effort based. Dependents register their QoS needs. | ||
19 | * Watchers register to keep track of the current QoS needs of the system. | ||
20 | * Watchers can register different types of notification callbacks: | ||
21 | * . a per-device notification callback using the dev_pm_qos_*_notifier API. | ||
22 | * The notification chain data is stored in the per-device constraint | ||
23 | * data struct. | ||
24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier | ||
25 | * API. The notification chain data is stored in a static variable. | ||
26 | * | ||
27 | * Note about the per-device constraint data struct allocation: | ||
28 | * . The per-device constraints data struct ptr is tored into the device | ||
29 | * dev_pm_info. | ||
30 | * . To minimize the data usage by the per-device constraints, the data struct | ||
31 | * is only allocated at the first call to dev_pm_qos_add_request. | ||
32 | * . The data is later free'd when the device is removed from the system. | ||
33 | * . A global mutex protects the constraints users from the data being | ||
34 | * allocated and free'd. | ||
35 | */ | ||
36 | |||
37 | #include <linux/pm_qos.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/mutex.h> | ||
42 | |||
43 | |||
44 | static DEFINE_MUTEX(dev_pm_qos_mtx); | ||
45 | |||
46 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | ||
47 | |||
48 | /** | ||
49 | * dev_pm_qos_read_value - Get PM QoS constraint for a given device. | ||
50 | * @dev: Device to get the PM QoS constraint value for. | ||
51 | */ | ||
52 | s32 dev_pm_qos_read_value(struct device *dev) | ||
53 | { | ||
54 | struct pm_qos_constraints *c; | ||
55 | unsigned long flags; | ||
56 | s32 ret = 0; | ||
57 | |||
58 | spin_lock_irqsave(&dev->power.lock, flags); | ||
59 | |||
60 | c = dev->power.constraints; | ||
61 | if (c) | ||
62 | ret = pm_qos_read_value(c); | ||
63 | |||
64 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * apply_constraint | ||
71 | * @req: constraint request to apply | ||
72 | * @action: action to perform add/update/remove, of type enum pm_qos_req_action | ||
73 | * @value: defines the qos request | ||
74 | * | ||
75 | * Internal function to update the constraints list using the PM QoS core | ||
76 | * code and if needed call the per-device and the global notification | ||
77 | * callbacks | ||
78 | */ | ||
79 | static int apply_constraint(struct dev_pm_qos_request *req, | ||
80 | enum pm_qos_req_action action, int value) | ||
81 | { | ||
82 | int ret, curr_value; | ||
83 | |||
84 | ret = pm_qos_update_target(req->dev->power.constraints, | ||
85 | &req->node, action, value); | ||
86 | |||
87 | if (ret) { | ||
88 | /* Call the global callbacks if needed */ | ||
89 | curr_value = pm_qos_read_value(req->dev->power.constraints); | ||
90 | blocking_notifier_call_chain(&dev_pm_notifiers, | ||
91 | (unsigned long)curr_value, | ||
92 | req); | ||
93 | } | ||
94 | |||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * dev_pm_qos_constraints_allocate | ||
100 | * @dev: device to allocate data for | ||
101 | * | ||
102 | * Called at the first call to add_request, for constraint data allocation | ||
103 | * Must be called with the dev_pm_qos_mtx mutex held | ||
104 | */ | ||
105 | static int dev_pm_qos_constraints_allocate(struct device *dev) | ||
106 | { | ||
107 | struct pm_qos_constraints *c; | ||
108 | struct blocking_notifier_head *n; | ||
109 | |||
110 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
111 | if (!c) | ||
112 | return -ENOMEM; | ||
113 | |||
114 | n = kzalloc(sizeof(*n), GFP_KERNEL); | ||
115 | if (!n) { | ||
116 | kfree(c); | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | BLOCKING_INIT_NOTIFIER_HEAD(n); | ||
120 | |||
121 | plist_head_init(&c->list); | ||
122 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | ||
123 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | ||
124 | c->type = PM_QOS_MIN; | ||
125 | c->notifiers = n; | ||
126 | |||
127 | spin_lock_irq(&dev->power.lock); | ||
128 | dev->power.constraints = c; | ||
129 | spin_unlock_irq(&dev->power.lock); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. | ||
136 | * @dev: target device | ||
137 | * | ||
138 | * Called from the device PM subsystem during device insertion under | ||
139 | * device_pm_lock(). | ||
140 | */ | ||
141 | void dev_pm_qos_constraints_init(struct device *dev) | ||
142 | { | ||
143 | mutex_lock(&dev_pm_qos_mtx); | ||
144 | dev->power.constraints = NULL; | ||
145 | dev->power.power_state = PMSG_ON; | ||
146 | mutex_unlock(&dev_pm_qos_mtx); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * dev_pm_qos_constraints_destroy | ||
151 | * @dev: target device | ||
152 | * | ||
153 | * Called from the device PM subsystem on device removal under device_pm_lock(). | ||
154 | */ | ||
155 | void dev_pm_qos_constraints_destroy(struct device *dev) | ||
156 | { | ||
157 | struct dev_pm_qos_request *req, *tmp; | ||
158 | struct pm_qos_constraints *c; | ||
159 | |||
160 | mutex_lock(&dev_pm_qos_mtx); | ||
161 | |||
162 | dev->power.power_state = PMSG_INVALID; | ||
163 | c = dev->power.constraints; | ||
164 | if (!c) | ||
165 | goto out; | ||
166 | |||
167 | /* Flush the constraints list for the device */ | ||
168 | plist_for_each_entry_safe(req, tmp, &c->list, node) { | ||
169 | /* | ||
170 | * Update constraints list and call the notification | ||
171 | * callbacks if needed | ||
172 | */ | ||
173 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | ||
174 | memset(req, 0, sizeof(*req)); | ||
175 | } | ||
176 | |||
177 | spin_lock_irq(&dev->power.lock); | ||
178 | dev->power.constraints = NULL; | ||
179 | spin_unlock_irq(&dev->power.lock); | ||
180 | |||
181 | kfree(c->notifiers); | ||
182 | kfree(c); | ||
183 | |||
184 | out: | ||
185 | mutex_unlock(&dev_pm_qos_mtx); | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * dev_pm_qos_add_request - inserts new qos request into the list | ||
190 | * @dev: target device for the constraint | ||
191 | * @req: pointer to a preallocated handle | ||
192 | * @value: defines the qos request | ||
193 | * | ||
194 | * This function inserts a new entry in the device constraints list of | ||
195 | * requested qos performance characteristics. It recomputes the aggregate | ||
196 | * QoS expectations of parameters and initializes the dev_pm_qos_request | ||
197 | * handle. Caller needs to save this handle for later use in updates and | ||
198 | * removal. | ||
199 | * | ||
200 | * Returns 1 if the aggregated constraint value has changed, | ||
201 | * 0 if the aggregated constraint value has not changed, | ||
202 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory | ||
203 | * to allocate for data structures, -ENODEV if the device has just been removed | ||
204 | * from the system. | ||
205 | */ | ||
206 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | ||
207 | s32 value) | ||
208 | { | ||
209 | int ret = 0; | ||
210 | |||
211 | if (!dev || !req) /*guard against callers passing in null */ | ||
212 | return -EINVAL; | ||
213 | |||
214 | if (dev_pm_qos_request_active(req)) { | ||
215 | WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " | ||
216 | "added request\n"); | ||
217 | return -EINVAL; | ||
218 | } | ||
219 | |||
220 | req->dev = dev; | ||
221 | |||
222 | mutex_lock(&dev_pm_qos_mtx); | ||
223 | |||
224 | if (!dev->power.constraints) { | ||
225 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | ||
226 | /* The device has been removed from the system. */ | ||
227 | req->dev = NULL; | ||
228 | ret = -ENODEV; | ||
229 | goto out; | ||
230 | } else { | ||
231 | /* | ||
232 | * Allocate the constraints data on the first call to | ||
233 | * add_request, i.e. only if the data is not already | ||
234 | * allocated and if the device has not been removed. | ||
235 | */ | ||
236 | ret = dev_pm_qos_constraints_allocate(dev); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | if (!ret) | ||
241 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | ||
242 | |||
243 | out: | ||
244 | mutex_unlock(&dev_pm_qos_mtx); | ||
245 | |||
246 | return ret; | ||
247 | } | ||
248 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | ||
249 | |||
250 | /** | ||
251 | * dev_pm_qos_update_request - modifies an existing qos request | ||
252 | * @req : handle to list element holding a dev_pm_qos request to use | ||
253 | * @new_value: defines the qos request | ||
254 | * | ||
255 | * Updates an existing dev PM qos request along with updating the | ||
256 | * target value. | ||
257 | * | ||
258 | * Attempts are made to make this code callable on hot code paths. | ||
259 | * | ||
260 | * Returns 1 if the aggregated constraint value has changed, | ||
261 | * 0 if the aggregated constraint value has not changed, | ||
262 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | ||
263 | * removed from the system | ||
264 | */ | ||
265 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | ||
266 | s32 new_value) | ||
267 | { | ||
268 | int ret = 0; | ||
269 | |||
270 | if (!req) /*guard against callers passing in null */ | ||
271 | return -EINVAL; | ||
272 | |||
273 | if (!dev_pm_qos_request_active(req)) { | ||
274 | WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " | ||
275 | "unknown object\n"); | ||
276 | return -EINVAL; | ||
277 | } | ||
278 | |||
279 | mutex_lock(&dev_pm_qos_mtx); | ||
280 | |||
281 | if (req->dev->power.constraints) { | ||
282 | if (new_value != req->node.prio) | ||
283 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, | ||
284 | new_value); | ||
285 | } else { | ||
286 | /* Return if the device has been removed */ | ||
287 | ret = -ENODEV; | ||
288 | } | ||
289 | |||
290 | mutex_unlock(&dev_pm_qos_mtx); | ||
291 | return ret; | ||
292 | } | ||
293 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | ||
294 | |||
295 | /** | ||
296 | * dev_pm_qos_remove_request - modifies an existing qos request | ||
297 | * @req: handle to request list element | ||
298 | * | ||
299 | * Will remove pm qos request from the list of constraints and | ||
300 | * recompute the current target value. Call this on slow code paths. | ||
301 | * | ||
302 | * Returns 1 if the aggregated constraint value has changed, | ||
303 | * 0 if the aggregated constraint value has not changed, | ||
304 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | ||
305 | * removed from the system | ||
306 | */ | ||
307 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | ||
308 | { | ||
309 | int ret = 0; | ||
310 | |||
311 | if (!req) /*guard against callers passing in null */ | ||
312 | return -EINVAL; | ||
313 | |||
314 | if (!dev_pm_qos_request_active(req)) { | ||
315 | WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " | ||
316 | "unknown object\n"); | ||
317 | return -EINVAL; | ||
318 | } | ||
319 | |||
320 | mutex_lock(&dev_pm_qos_mtx); | ||
321 | |||
322 | if (req->dev->power.constraints) { | ||
323 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | ||
324 | PM_QOS_DEFAULT_VALUE); | ||
325 | memset(req, 0, sizeof(*req)); | ||
326 | } else { | ||
327 | /* Return if the device has been removed */ | ||
328 | ret = -ENODEV; | ||
329 | } | ||
330 | |||
331 | mutex_unlock(&dev_pm_qos_mtx); | ||
332 | return ret; | ||
333 | } | ||
334 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | ||
335 | |||
336 | /** | ||
337 | * dev_pm_qos_add_notifier - sets notification entry for changes to target value | ||
338 | * of per-device PM QoS constraints | ||
339 | * | ||
340 | * @dev: target device for the constraint | ||
341 | * @notifier: notifier block managed by caller. | ||
342 | * | ||
343 | * Will register the notifier into a notification chain that gets called | ||
344 | * upon changes to the target value for the device. | ||
345 | */ | ||
346 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | ||
347 | { | ||
348 | int retval = 0; | ||
349 | |||
350 | mutex_lock(&dev_pm_qos_mtx); | ||
351 | |||
352 | /* Silently return if the constraints object is not present. */ | ||
353 | if (dev->power.constraints) | ||
354 | retval = blocking_notifier_chain_register( | ||
355 | dev->power.constraints->notifiers, | ||
356 | notifier); | ||
357 | |||
358 | mutex_unlock(&dev_pm_qos_mtx); | ||
359 | return retval; | ||
360 | } | ||
361 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); | ||
362 | |||
363 | /** | ||
364 | * dev_pm_qos_remove_notifier - deletes notification for changes to target value | ||
365 | * of per-device PM QoS constraints | ||
366 | * | ||
367 | * @dev: target device for the constraint | ||
368 | * @notifier: notifier block to be removed. | ||
369 | * | ||
370 | * Will remove the notifier from the notification chain that gets called | ||
371 | * upon changes to the target value. | ||
372 | */ | ||
373 | int dev_pm_qos_remove_notifier(struct device *dev, | ||
374 | struct notifier_block *notifier) | ||
375 | { | ||
376 | int retval = 0; | ||
377 | |||
378 | mutex_lock(&dev_pm_qos_mtx); | ||
379 | |||
380 | /* Silently return if the constraints object is not present. */ | ||
381 | if (dev->power.constraints) | ||
382 | retval = blocking_notifier_chain_unregister( | ||
383 | dev->power.constraints->notifiers, | ||
384 | notifier); | ||
385 | |||
386 | mutex_unlock(&dev_pm_qos_mtx); | ||
387 | return retval; | ||
388 | } | ||
389 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); | ||
390 | |||
391 | /** | ||
392 | * dev_pm_qos_add_global_notifier - sets notification entry for changes to | ||
393 | * target value of the PM QoS constraints for any device | ||
394 | * | ||
395 | * @notifier: notifier block managed by caller. | ||
396 | * | ||
397 | * Will register the notifier into a notification chain that gets called | ||
398 | * upon changes to the target value for any device. | ||
399 | */ | ||
400 | int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) | ||
401 | { | ||
402 | return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); | ||
403 | } | ||
404 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); | ||
405 | |||
406 | /** | ||
407 | * dev_pm_qos_remove_global_notifier - deletes notification for changes to | ||
408 | * target value of PM QoS constraints for any device | ||
409 | * | ||
410 | * @notifier: notifier block to be removed. | ||
411 | * | ||
412 | * Will remove the notifier from the notification chain that gets called | ||
413 | * upon changes to the target value for any device. | ||
414 | */ | ||
415 | int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) | ||
416 | { | ||
417 | return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); | ||
418 | } | ||
419 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index acb3f83b807..6bb3aafa85e 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
12 | #include <trace/events/rpm.h> | ||
12 | #include "power.h" | 13 | #include "power.h" |
13 | 14 | ||
14 | static int rpm_resume(struct device *dev, int rpmflags); | 15 | static int rpm_resume(struct device *dev, int rpmflags); |
@@ -155,6 +156,31 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
155 | } | 156 | } |
156 | 157 | ||
157 | /** | 158 | /** |
159 | * __rpm_callback - Run a given runtime PM callback for a given device. | ||
160 | * @cb: Runtime PM callback to run. | ||
161 | * @dev: Device to run the callback for. | ||
162 | */ | ||
163 | static int __rpm_callback(int (*cb)(struct device *), struct device *dev) | ||
164 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
165 | { | ||
166 | int retval; | ||
167 | |||
168 | if (dev->power.irq_safe) | ||
169 | spin_unlock(&dev->power.lock); | ||
170 | else | ||
171 | spin_unlock_irq(&dev->power.lock); | ||
172 | |||
173 | retval = cb(dev); | ||
174 | |||
175 | if (dev->power.irq_safe) | ||
176 | spin_lock(&dev->power.lock); | ||
177 | else | ||
178 | spin_lock_irq(&dev->power.lock); | ||
179 | |||
180 | return retval; | ||
181 | } | ||
182 | |||
183 | /** | ||
158 | * rpm_idle - Notify device bus type if the device can be suspended. | 184 | * rpm_idle - Notify device bus type if the device can be suspended. |
159 | * @dev: Device to notify the bus type about. | 185 | * @dev: Device to notify the bus type about. |
160 | * @rpmflags: Flag bits. | 186 | * @rpmflags: Flag bits. |
@@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
171 | int (*callback)(struct device *); | 197 | int (*callback)(struct device *); |
172 | int retval; | 198 | int retval; |
173 | 199 | ||
200 | trace_rpm_idle(dev, rpmflags); | ||
174 | retval = rpm_check_suspend_allowed(dev); | 201 | retval = rpm_check_suspend_allowed(dev); |
175 | if (retval < 0) | 202 | if (retval < 0) |
176 | ; /* Conditions are wrong. */ | 203 | ; /* Conditions are wrong. */ |
@@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
225 | else | 252 | else |
226 | callback = NULL; | 253 | callback = NULL; |
227 | 254 | ||
228 | if (callback) { | 255 | if (callback) |
229 | if (dev->power.irq_safe) | 256 | __rpm_callback(callback, dev); |
230 | spin_unlock(&dev->power.lock); | ||
231 | else | ||
232 | spin_unlock_irq(&dev->power.lock); | ||
233 | |||
234 | callback(dev); | ||
235 | |||
236 | if (dev->power.irq_safe) | ||
237 | spin_lock(&dev->power.lock); | ||
238 | else | ||
239 | spin_lock_irq(&dev->power.lock); | ||
240 | } | ||
241 | 257 | ||
242 | dev->power.idle_notification = false; | 258 | dev->power.idle_notification = false; |
243 | wake_up_all(&dev->power.wait_queue); | 259 | wake_up_all(&dev->power.wait_queue); |
244 | 260 | ||
245 | out: | 261 | out: |
262 | trace_rpm_return_int(dev, _THIS_IP_, retval); | ||
246 | return retval; | 263 | return retval; |
247 | } | 264 | } |
248 | 265 | ||
@@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
252 | * @dev: Device to run the callback for. | 269 | * @dev: Device to run the callback for. |
253 | */ | 270 | */ |
254 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) | 271 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
255 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
256 | { | 272 | { |
257 | int retval; | 273 | int retval; |
258 | 274 | ||
259 | if (!cb) | 275 | if (!cb) |
260 | return -ENOSYS; | 276 | return -ENOSYS; |
261 | 277 | ||
262 | if (dev->power.irq_safe) { | 278 | retval = __rpm_callback(cb, dev); |
263 | retval = cb(dev); | ||
264 | } else { | ||
265 | spin_unlock_irq(&dev->power.lock); | ||
266 | |||
267 | retval = cb(dev); | ||
268 | 279 | ||
269 | spin_lock_irq(&dev->power.lock); | ||
270 | } | ||
271 | dev->power.runtime_error = retval; | 280 | dev->power.runtime_error = retval; |
272 | return retval != -EACCES ? retval : -EIO; | 281 | return retval != -EACCES ? retval : -EIO; |
273 | } | 282 | } |
@@ -277,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) | |||
277 | * @dev: Device to suspend. | 286 | * @dev: Device to suspend. |
278 | * @rpmflags: Flag bits. | 287 | * @rpmflags: Flag bits. |
279 | * | 288 | * |
280 | * Check if the device's runtime PM status allows it to be suspended. If | 289 | * Check if the device's runtime PM status allows it to be suspended. |
281 | * another suspend has been started earlier, either return immediately or wait | 290 | * Cancel a pending idle notification, autosuspend or suspend. If |
282 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a | 291 | * another suspend has been started earlier, either return immediately |
283 | * pending idle notification. If the RPM_ASYNC flag is set then queue a | 292 | * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC |
284 | * suspend request; otherwise run the ->runtime_suspend() callback directly. | 293 | * flags. If the RPM_ASYNC flag is set then queue a suspend request; |
285 | * If a deferred resume was requested while the callback was running then carry | 294 | * otherwise run the ->runtime_suspend() callback directly. When |
286 | * it out; otherwise send an idle notification for the device (if the suspend | 295 | * ->runtime_suspend succeeded, if a deferred resume was requested while |
287 | * failed) or for its parent (if the suspend succeeded). | 296 | * the callback was running then carry it out, otherwise send an idle |
297 | * notification for its parent (if the suspend succeeded and both | ||
298 | * ignore_children of parent->power and irq_safe of dev->power are not set). | ||
288 | * | 299 | * |
289 | * This function must be called under dev->power.lock with interrupts disabled. | 300 | * This function must be called under dev->power.lock with interrupts disabled. |
290 | */ | 301 | */ |
@@ -295,7 +306,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
295 | struct device *parent = NULL; | 306 | struct device *parent = NULL; |
296 | int retval; | 307 | int retval; |
297 | 308 | ||
298 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); | 309 | trace_rpm_suspend(dev, rpmflags); |
299 | 310 | ||
300 | repeat: | 311 | repeat: |
301 | retval = rpm_check_suspend_allowed(dev); | 312 | retval = rpm_check_suspend_allowed(dev); |
@@ -347,6 +358,15 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
347 | goto out; | 358 | goto out; |
348 | } | 359 | } |
349 | 360 | ||
361 | if (dev->power.irq_safe) { | ||
362 | spin_unlock(&dev->power.lock); | ||
363 | |||
364 | cpu_relax(); | ||
365 | |||
366 | spin_lock(&dev->power.lock); | ||
367 | goto repeat; | ||
368 | } | ||
369 | |||
350 | /* Wait for the other suspend running in parallel with us. */ | 370 | /* Wait for the other suspend running in parallel with us. */ |
351 | for (;;) { | 371 | for (;;) { |
352 | prepare_to_wait(&dev->power.wait_queue, &wait, | 372 | prepare_to_wait(&dev->power.wait_queue, &wait, |
@@ -400,15 +420,16 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
400 | dev->power.runtime_error = 0; | 420 | dev->power.runtime_error = 0; |
401 | else | 421 | else |
402 | pm_runtime_cancel_pending(dev); | 422 | pm_runtime_cancel_pending(dev); |
403 | } else { | 423 | wake_up_all(&dev->power.wait_queue); |
424 | goto out; | ||
425 | } | ||
404 | no_callback: | 426 | no_callback: |
405 | __update_runtime_status(dev, RPM_SUSPENDED); | 427 | __update_runtime_status(dev, RPM_SUSPENDED); |
406 | pm_runtime_deactivate_timer(dev); | 428 | pm_runtime_deactivate_timer(dev); |
407 | 429 | ||
408 | if (dev->parent) { | 430 | if (dev->parent) { |
409 | parent = dev->parent; | 431 | parent = dev->parent; |
410 | atomic_add_unless(&parent->power.child_count, -1, 0); | 432 | atomic_add_unless(&parent->power.child_count, -1, 0); |
411 | } | ||
412 | } | 433 | } |
413 | wake_up_all(&dev->power.wait_queue); | 434 | wake_up_all(&dev->power.wait_queue); |
414 | 435 | ||
@@ -430,7 +451,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
430 | } | 451 | } |
431 | 452 | ||
432 | out: | 453 | out: |
433 | dev_dbg(dev, "%s returns %d\n", __func__, retval); | 454 | trace_rpm_return_int(dev, _THIS_IP_, retval); |
434 | 455 | ||
435 | return retval; | 456 | return retval; |
436 | } | 457 | } |
@@ -459,7 +480,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
459 | struct device *parent = NULL; | 480 | struct device *parent = NULL; |
460 | int retval = 0; | 481 | int retval = 0; |
461 | 482 | ||
462 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); | 483 | trace_rpm_resume(dev, rpmflags); |
463 | 484 | ||
464 | repeat: | 485 | repeat: |
465 | if (dev->power.runtime_error) | 486 | if (dev->power.runtime_error) |
@@ -496,6 +517,15 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
496 | goto out; | 517 | goto out; |
497 | } | 518 | } |
498 | 519 | ||
520 | if (dev->power.irq_safe) { | ||
521 | spin_unlock(&dev->power.lock); | ||
522 | |||
523 | cpu_relax(); | ||
524 | |||
525 | spin_lock(&dev->power.lock); | ||
526 | goto repeat; | ||
527 | } | ||
528 | |||
499 | /* Wait for the operation carried out in parallel with us. */ | 529 | /* Wait for the operation carried out in parallel with us. */ |
500 | for (;;) { | 530 | for (;;) { |
501 | prepare_to_wait(&dev->power.wait_queue, &wait, | 531 | prepare_to_wait(&dev->power.wait_queue, &wait, |
@@ -615,7 +645,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
615 | spin_lock_irq(&dev->power.lock); | 645 | spin_lock_irq(&dev->power.lock); |
616 | } | 646 | } |
617 | 647 | ||
618 | dev_dbg(dev, "%s returns %d\n", __func__, retval); | 648 | trace_rpm_return_int(dev, _THIS_IP_, retval); |
619 | 649 | ||
620 | return retval; | 650 | return retval; |
621 | } | 651 | } |
@@ -732,13 +762,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend); | |||
732 | * return immediately if it is larger than zero. Then carry out an idle | 762 | * return immediately if it is larger than zero. Then carry out an idle |
733 | * notification, either synchronous or asynchronous. | 763 | * notification, either synchronous or asynchronous. |
734 | * | 764 | * |
735 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | 765 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, |
766 | * or if pm_runtime_irq_safe() has been called. | ||
736 | */ | 767 | */ |
737 | int __pm_runtime_idle(struct device *dev, int rpmflags) | 768 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
738 | { | 769 | { |
739 | unsigned long flags; | 770 | unsigned long flags; |
740 | int retval; | 771 | int retval; |
741 | 772 | ||
773 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
774 | |||
742 | if (rpmflags & RPM_GET_PUT) { | 775 | if (rpmflags & RPM_GET_PUT) { |
743 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 776 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
744 | return 0; | 777 | return 0; |
@@ -761,13 +794,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle); | |||
761 | * return immediately if it is larger than zero. Then carry out a suspend, | 794 | * return immediately if it is larger than zero. Then carry out a suspend, |
762 | * either synchronous or asynchronous. | 795 | * either synchronous or asynchronous. |
763 | * | 796 | * |
764 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | 797 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, |
798 | * or if pm_runtime_irq_safe() has been called. | ||
765 | */ | 799 | */ |
766 | int __pm_runtime_suspend(struct device *dev, int rpmflags) | 800 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
767 | { | 801 | { |
768 | unsigned long flags; | 802 | unsigned long flags; |
769 | int retval; | 803 | int retval; |
770 | 804 | ||
805 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
806 | |||
771 | if (rpmflags & RPM_GET_PUT) { | 807 | if (rpmflags & RPM_GET_PUT) { |
772 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 808 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
773 | return 0; | 809 | return 0; |
@@ -789,13 +825,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend); | |||
789 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | 825 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then |
790 | * carry out a resume, either synchronous or asynchronous. | 826 | * carry out a resume, either synchronous or asynchronous. |
791 | * | 827 | * |
792 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | 828 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, |
829 | * or if pm_runtime_irq_safe() has been called. | ||
793 | */ | 830 | */ |
794 | int __pm_runtime_resume(struct device *dev, int rpmflags) | 831 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
795 | { | 832 | { |
796 | unsigned long flags; | 833 | unsigned long flags; |
797 | int retval; | 834 | int retval; |
798 | 835 | ||
836 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
837 | |||
799 | if (rpmflags & RPM_GET_PUT) | 838 | if (rpmflags & RPM_GET_PUT) |
800 | atomic_inc(&dev->power.usage_count); | 839 | atomic_inc(&dev->power.usage_count); |
801 | 840 | ||
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 84f7c7d5a09..14ee07e9cc4 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | |||
276 | * | 276 | * |
277 | * By default, most devices should leave wakeup disabled. The exceptions are | 277 | * By default, most devices should leave wakeup disabled. The exceptions are |
278 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, | 278 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, |
279 | * possibly network interfaces, etc. | 279 | * possibly network interfaces, etc. Also, devices that don't generate their |
280 | * own wakeup requests but merely forward requests from one bus to another | ||
281 | * (like PCI bridges) should have wakeup enabled by default. | ||
280 | */ | 282 | */ |
281 | int device_init_wakeup(struct device *dev, bool enable) | 283 | int device_init_wakeup(struct device *dev, bool enable) |
282 | { | 284 | { |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 675246a6f7e..f9b726091ad 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -1118,7 +1118,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) | |||
1118 | return 0; | 1118 | return 0; |
1119 | 1119 | ||
1120 | spin_lock_irq(&data->txlock); | 1120 | spin_lock_irq(&data->txlock); |
1121 | if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { | 1121 | if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) { |
1122 | set_bit(BTUSB_SUSPENDING, &data->flags); | 1122 | set_bit(BTUSB_SUSPENDING, &data->flags); |
1123 | spin_unlock_irq(&data->txlock); | 1123 | spin_unlock_irq(&data->txlock); |
1124 | } else { | 1124 | } else { |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index d4c54237288..0df01411009 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/notifier.h> | 14 | #include <linux/notifier.h> |
15 | #include <linux/pm_qos_params.h> | 15 | #include <linux/pm_qos.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/cpuidle.h> | 17 | #include <linux/cpuidle.h> |
18 | #include <linux/ktime.h> | 18 | #include <linux/ktime.h> |
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 12c98900dcf..f62fde21e96 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/cpuidle.h> | 16 | #include <linux/cpuidle.h> |
17 | #include <linux/pm_qos_params.h> | 17 | #include <linux/pm_qos.h> |
18 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
19 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
20 | 20 | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index c47f3d09c1e..3600f1955e4 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/cpuidle.h> | 14 | #include <linux/cpuidle.h> |
15 | #include <linux/pm_qos_params.h> | 15 | #include <linux/pm_qos.h> |
16 | #include <linux/time.h> | 16 | #include <linux/time.h> |
17 | #include <linux/ktime.h> | 17 | #include <linux/ktime.h> |
18 | #include <linux/hrtimer.h> | 18 | #include <linux/hrtimer.h> |
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig new file mode 100644 index 00000000000..643b055ed3c --- /dev/null +++ b/drivers/devfreq/Kconfig | |||
@@ -0,0 +1,75 @@ | |||
1 | config ARCH_HAS_DEVFREQ | ||
2 | bool | ||
3 | depends on ARCH_HAS_OPP | ||
4 | help | ||
5 | Denotes that the architecture supports DEVFREQ. If the architecture | ||
6 | supports multiple OPP entries per device and the frequency of the | ||
7 | devices with OPPs may be altered dynamically, the architecture | ||
8 | supports DEVFREQ. | ||
9 | |||
10 | menuconfig PM_DEVFREQ | ||
11 | bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" | ||
12 | depends on PM_OPP && ARCH_HAS_DEVFREQ | ||
13 | help | ||
14 | With OPP support, a device may have a list of frequencies and | ||
15 | voltages available. DEVFREQ, a generic DVFS framework can be | ||
16 | registered for a device with OPP support in order to let the | ||
17 | governor provided to DEVFREQ choose an operating frequency | ||
18 | based on the OPP's list and the policy given with DEVFREQ. | ||
19 | |||
20 | Each device may have its own governor and policy. DEVFREQ can | ||
21 | reevaluate the device state periodically and/or based on the | ||
22 | OPP list changes (each frequency/voltage pair in OPP may be | ||
23 | disabled or enabled). | ||
24 | |||
25 | Like some CPUs with CPUFREQ, a device may have multiple clocks. | ||
26 | However, because the clock frequencies of a single device are | ||
27 | determined by the single device's state, an instance of DEVFREQ | ||
28 | is attached to a single device and returns a "representative" | ||
29 | clock frequency from the OPP of the device, which is also attached | ||
30 | to a device by 1-to-1. The device registering DEVFREQ takes the | ||
31 | responsiblity to "interpret" the frequency listed in OPP and | ||
32 | to set its every clock accordingly with the "target" callback | ||
33 | given to DEVFREQ. | ||
34 | |||
35 | if PM_DEVFREQ | ||
36 | |||
37 | comment "DEVFREQ Governors" | ||
38 | |||
39 | config DEVFREQ_GOV_SIMPLE_ONDEMAND | ||
40 | bool "Simple Ondemand" | ||
41 | help | ||
42 | Chooses frequency based on the recent load on the device. Works | ||
43 | similar as ONDEMAND governor of CPUFREQ does. A device with | ||
44 | Simple-Ondemand should be able to provide busy/total counter | ||
45 | values that imply the usage rate. A device may provide tuned | ||
46 | values to the governor with data field at devfreq_add_device(). | ||
47 | |||
48 | config DEVFREQ_GOV_PERFORMANCE | ||
49 | bool "Performance" | ||
50 | help | ||
51 | Sets the frequency at the maximum available frequency. | ||
52 | This governor always returns UINT_MAX as frequency so that | ||
53 | the DEVFREQ framework returns the highest frequency available | ||
54 | at any time. | ||
55 | |||
56 | config DEVFREQ_GOV_POWERSAVE | ||
57 | bool "Powersave" | ||
58 | help | ||
59 | Sets the frequency at the minimum available frequency. | ||
60 | This governor always returns 0 as frequency so that | ||
61 | the DEVFREQ framework returns the lowest frequency available | ||
62 | at any time. | ||
63 | |||
64 | config DEVFREQ_GOV_USERSPACE | ||
65 | bool "Userspace" | ||
66 | help | ||
67 | Sets the frequency at the user specified one. | ||
68 | This governor returns the user configured frequency if there | ||
69 | has been an input to /sys/devices/.../power/devfreq_set_freq. | ||
70 | Otherwise, the governor does not change the frequnecy | ||
71 | given at the initialization. | ||
72 | |||
73 | comment "DEVFREQ Drivers" | ||
74 | |||
75 | endif # PM_DEVFREQ | ||
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile new file mode 100644 index 00000000000..4564a89e970 --- /dev/null +++ b/drivers/devfreq/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-$(CONFIG_PM_DEVFREQ) += devfreq.o | ||
2 | obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o | ||
3 | obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o | ||
4 | obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o | ||
5 | obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o | ||
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c new file mode 100644 index 00000000000..5d15b812377 --- /dev/null +++ b/drivers/devfreq/devfreq.c | |||
@@ -0,0 +1,601 @@ | |||
1 | /* | ||
2 | * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework | ||
3 | * for Non-CPU Devices. | ||
4 | * | ||
5 | * Copyright (C) 2011 Samsung Electronics | ||
6 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/opp.h> | ||
20 | #include <linux/devfreq.h> | ||
21 | #include <linux/workqueue.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/printk.h> | ||
25 | #include <linux/hrtimer.h> | ||
26 | #include "governor.h" | ||
27 | |||
28 | struct class *devfreq_class; | ||
29 | |||
30 | /* | ||
31 | * devfreq_work periodically monitors every registered device. | ||
32 | * The minimum polling interval is one jiffy. The polling interval is | ||
33 | * determined by the minimum polling period among all polling devfreq | ||
34 | * devices. The resolution of polling interval is one jiffy. | ||
35 | */ | ||
36 | static bool polling; | ||
37 | static struct workqueue_struct *devfreq_wq; | ||
38 | static struct delayed_work devfreq_work; | ||
39 | |||
40 | /* wait removing if this is to be removed */ | ||
41 | static struct devfreq *wait_remove_device; | ||
42 | |||
43 | /* The list of all device-devfreq */ | ||
44 | static LIST_HEAD(devfreq_list); | ||
45 | static DEFINE_MUTEX(devfreq_list_lock); | ||
46 | |||
47 | /** | ||
48 | * find_device_devfreq() - find devfreq struct using device pointer | ||
49 | * @dev: device pointer used to lookup device devfreq. | ||
50 | * | ||
51 | * Search the list of device devfreqs and return the matched device's | ||
52 | * devfreq info. devfreq_list_lock should be held by the caller. | ||
53 | */ | ||
54 | static struct devfreq *find_device_devfreq(struct device *dev) | ||
55 | { | ||
56 | struct devfreq *tmp_devfreq; | ||
57 | |||
58 | if (unlikely(IS_ERR_OR_NULL(dev))) { | ||
59 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); | ||
60 | return ERR_PTR(-EINVAL); | ||
61 | } | ||
62 | WARN(!mutex_is_locked(&devfreq_list_lock), | ||
63 | "devfreq_list_lock must be locked."); | ||
64 | |||
65 | list_for_each_entry(tmp_devfreq, &devfreq_list, node) { | ||
66 | if (tmp_devfreq->dev.parent == dev) | ||
67 | return tmp_devfreq; | ||
68 | } | ||
69 | |||
70 | return ERR_PTR(-ENODEV); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * update_devfreq() - Reevaluate the device and configure frequency. | ||
75 | * @devfreq: the devfreq instance. | ||
76 | * | ||
77 | * Note: Lock devfreq->lock before calling update_devfreq | ||
78 | * This function is exported for governors. | ||
79 | */ | ||
80 | int update_devfreq(struct devfreq *devfreq) | ||
81 | { | ||
82 | unsigned long freq; | ||
83 | int err = 0; | ||
84 | |||
85 | if (!mutex_is_locked(&devfreq->lock)) { | ||
86 | WARN(true, "devfreq->lock must be locked by the caller.\n"); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | /* Reevaluate the proper frequency */ | ||
91 | err = devfreq->governor->get_target_freq(devfreq, &freq); | ||
92 | if (err) | ||
93 | return err; | ||
94 | |||
95 | err = devfreq->profile->target(devfreq->dev.parent, &freq); | ||
96 | if (err) | ||
97 | return err; | ||
98 | |||
99 | devfreq->previous_freq = freq; | ||
100 | return err; | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * devfreq_notifier_call() - Notify that the device frequency requirements | ||
105 | * has been changed out of devfreq framework. | ||
106 | * @nb the notifier_block (supposed to be devfreq->nb) | ||
107 | * @type not used | ||
108 | * @devp not used | ||
109 | * | ||
110 | * Called by a notifier that uses devfreq->nb. | ||
111 | */ | ||
112 | static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, | ||
113 | void *devp) | ||
114 | { | ||
115 | struct devfreq *devfreq = container_of(nb, struct devfreq, nb); | ||
116 | int ret; | ||
117 | |||
118 | mutex_lock(&devfreq->lock); | ||
119 | ret = update_devfreq(devfreq); | ||
120 | mutex_unlock(&devfreq->lock); | ||
121 | |||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * _remove_devfreq() - Remove devfreq from the device. | ||
127 | * @devfreq: the devfreq struct | ||
128 | * @skip: skip calling device_unregister(). | ||
129 | * | ||
130 | * Note that the caller should lock devfreq->lock before calling | ||
131 | * this. _remove_devfreq() will unlock it and free devfreq | ||
132 | * internally. devfreq_list_lock should be locked by the caller | ||
133 | * as well (not relased at return) | ||
134 | * | ||
135 | * Lock usage: | ||
136 | * devfreq->lock: locked before call. | ||
137 | * unlocked at return (and freed) | ||
138 | * devfreq_list_lock: locked before call. | ||
139 | * kept locked at return. | ||
140 | * if devfreq is centrally polled. | ||
141 | * | ||
142 | * Freed memory: | ||
143 | * devfreq | ||
144 | */ | ||
145 | static void _remove_devfreq(struct devfreq *devfreq, bool skip) | ||
146 | { | ||
147 | if (!mutex_is_locked(&devfreq->lock)) { | ||
148 | WARN(true, "devfreq->lock must be locked by the caller.\n"); | ||
149 | return; | ||
150 | } | ||
151 | if (!devfreq->governor->no_central_polling && | ||
152 | !mutex_is_locked(&devfreq_list_lock)) { | ||
153 | WARN(true, "devfreq_list_lock must be locked by the caller.\n"); | ||
154 | return; | ||
155 | } | ||
156 | |||
157 | if (devfreq->being_removed) | ||
158 | return; | ||
159 | |||
160 | devfreq->being_removed = true; | ||
161 | |||
162 | if (devfreq->profile->exit) | ||
163 | devfreq->profile->exit(devfreq->dev.parent); | ||
164 | |||
165 | if (devfreq->governor->exit) | ||
166 | devfreq->governor->exit(devfreq); | ||
167 | |||
168 | if (!skip && get_device(&devfreq->dev)) { | ||
169 | device_unregister(&devfreq->dev); | ||
170 | put_device(&devfreq->dev); | ||
171 | } | ||
172 | |||
173 | if (!devfreq->governor->no_central_polling) | ||
174 | list_del(&devfreq->node); | ||
175 | |||
176 | mutex_unlock(&devfreq->lock); | ||
177 | mutex_destroy(&devfreq->lock); | ||
178 | |||
179 | kfree(devfreq); | ||
180 | } | ||
181 | |||
182 | /** | ||
183 | * devfreq_dev_release() - Callback for struct device to release the device. | ||
184 | * @dev: the devfreq device | ||
185 | * | ||
186 | * This calls _remove_devfreq() if _remove_devfreq() is not called. | ||
187 | * Note that devfreq_dev_release() could be called by _remove_devfreq() as | ||
188 | * well as by others unregistering the device. | ||
189 | */ | ||
190 | static void devfreq_dev_release(struct device *dev) | ||
191 | { | ||
192 | struct devfreq *devfreq = to_devfreq(dev); | ||
193 | bool central_polling = !devfreq->governor->no_central_polling; | ||
194 | |||
195 | /* | ||
196 | * If devfreq_dev_release() was called by device_unregister() of | ||
197 | * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and | ||
198 | * being_removed is already set. This also partially checks the case | ||
199 | * where devfreq_dev_release() is called from a thread other than | ||
200 | * the one called _remove_devfreq(); however, this case is | ||
201 | * dealt completely with another following being_removed check. | ||
202 | * | ||
203 | * Because being_removed is never being | ||
204 | * unset, we do not need to worry about race conditions on | ||
205 | * being_removed. | ||
206 | */ | ||
207 | if (devfreq->being_removed) | ||
208 | return; | ||
209 | |||
210 | if (central_polling) | ||
211 | mutex_lock(&devfreq_list_lock); | ||
212 | |||
213 | mutex_lock(&devfreq->lock); | ||
214 | |||
215 | /* | ||
216 | * Check being_removed flag again for the case where | ||
217 | * devfreq_dev_release() was called in a thread other than the one | ||
218 | * possibly called _remove_devfreq(). | ||
219 | */ | ||
220 | if (devfreq->being_removed) { | ||
221 | mutex_unlock(&devfreq->lock); | ||
222 | goto out; | ||
223 | } | ||
224 | |||
225 | /* devfreq->lock is unlocked and removed in _removed_devfreq() */ | ||
226 | _remove_devfreq(devfreq, true); | ||
227 | |||
228 | out: | ||
229 | if (central_polling) | ||
230 | mutex_unlock(&devfreq_list_lock); | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * devfreq_monitor() - Periodically poll devfreq objects. | ||
235 | * @work: the work struct used to run devfreq_monitor periodically. | ||
236 | * | ||
237 | */ | ||
238 | static void devfreq_monitor(struct work_struct *work) | ||
239 | { | ||
240 | static unsigned long last_polled_at; | ||
241 | struct devfreq *devfreq, *tmp; | ||
242 | int error; | ||
243 | unsigned long jiffies_passed; | ||
244 | unsigned long next_jiffies = ULONG_MAX, now = jiffies; | ||
245 | struct device *dev; | ||
246 | |||
247 | /* Initially last_polled_at = 0, polling every device at bootup */ | ||
248 | jiffies_passed = now - last_polled_at; | ||
249 | last_polled_at = now; | ||
250 | if (jiffies_passed == 0) | ||
251 | jiffies_passed = 1; | ||
252 | |||
253 | mutex_lock(&devfreq_list_lock); | ||
254 | list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { | ||
255 | mutex_lock(&devfreq->lock); | ||
256 | dev = devfreq->dev.parent; | ||
257 | |||
258 | /* Do not remove tmp for a while */ | ||
259 | wait_remove_device = tmp; | ||
260 | |||
261 | if (devfreq->governor->no_central_polling || | ||
262 | devfreq->next_polling == 0) { | ||
263 | mutex_unlock(&devfreq->lock); | ||
264 | continue; | ||
265 | } | ||
266 | mutex_unlock(&devfreq_list_lock); | ||
267 | |||
268 | /* | ||
269 | * Reduce more next_polling if devfreq_wq took an extra | ||
270 | * delay. (i.e., CPU has been idled.) | ||
271 | */ | ||
272 | if (devfreq->next_polling <= jiffies_passed) { | ||
273 | error = update_devfreq(devfreq); | ||
274 | |||
275 | /* Remove a devfreq with an error. */ | ||
276 | if (error && error != -EAGAIN) { | ||
277 | |||
278 | dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", | ||
279 | error, devfreq->governor->name); | ||
280 | |||
281 | /* | ||
282 | * Unlock devfreq before locking the list | ||
283 | * in order to avoid deadlock with | ||
284 | * find_device_devfreq or others | ||
285 | */ | ||
286 | mutex_unlock(&devfreq->lock); | ||
287 | mutex_lock(&devfreq_list_lock); | ||
288 | /* Check if devfreq is already removed */ | ||
289 | if (IS_ERR(find_device_devfreq(dev))) | ||
290 | continue; | ||
291 | mutex_lock(&devfreq->lock); | ||
292 | /* This unlocks devfreq->lock and free it */ | ||
293 | _remove_devfreq(devfreq, false); | ||
294 | continue; | ||
295 | } | ||
296 | devfreq->next_polling = devfreq->polling_jiffies; | ||
297 | } else { | ||
298 | devfreq->next_polling -= jiffies_passed; | ||
299 | } | ||
300 | |||
301 | if (devfreq->next_polling) | ||
302 | next_jiffies = (next_jiffies > devfreq->next_polling) ? | ||
303 | devfreq->next_polling : next_jiffies; | ||
304 | |||
305 | mutex_unlock(&devfreq->lock); | ||
306 | mutex_lock(&devfreq_list_lock); | ||
307 | } | ||
308 | wait_remove_device = NULL; | ||
309 | mutex_unlock(&devfreq_list_lock); | ||
310 | |||
311 | if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { | ||
312 | polling = true; | ||
313 | queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); | ||
314 | } else { | ||
315 | polling = false; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * devfreq_add_device() - Add devfreq feature to the device | ||
321 | * @dev: the device to add devfreq feature. | ||
322 | * @profile: device-specific profile to run devfreq. | ||
323 | * @governor: the policy to choose frequency. | ||
324 | * @data: private data for the governor. The devfreq framework does not | ||
325 | * touch this value. | ||
326 | */ | ||
327 | struct devfreq *devfreq_add_device(struct device *dev, | ||
328 | struct devfreq_dev_profile *profile, | ||
329 | const struct devfreq_governor *governor, | ||
330 | void *data) | ||
331 | { | ||
332 | struct devfreq *devfreq; | ||
333 | int err = 0; | ||
334 | |||
335 | if (!dev || !profile || !governor) { | ||
336 | dev_err(dev, "%s: Invalid parameters.\n", __func__); | ||
337 | return ERR_PTR(-EINVAL); | ||
338 | } | ||
339 | |||
340 | |||
341 | if (!governor->no_central_polling) { | ||
342 | mutex_lock(&devfreq_list_lock); | ||
343 | devfreq = find_device_devfreq(dev); | ||
344 | mutex_unlock(&devfreq_list_lock); | ||
345 | if (!IS_ERR(devfreq)) { | ||
346 | dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); | ||
347 | err = -EINVAL; | ||
348 | goto out; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); | ||
353 | if (!devfreq) { | ||
354 | dev_err(dev, "%s: Unable to create devfreq for the device\n", | ||
355 | __func__); | ||
356 | err = -ENOMEM; | ||
357 | goto out; | ||
358 | } | ||
359 | |||
360 | mutex_init(&devfreq->lock); | ||
361 | mutex_lock(&devfreq->lock); | ||
362 | devfreq->dev.parent = dev; | ||
363 | devfreq->dev.class = devfreq_class; | ||
364 | devfreq->dev.release = devfreq_dev_release; | ||
365 | devfreq->profile = profile; | ||
366 | devfreq->governor = governor; | ||
367 | devfreq->previous_freq = profile->initial_freq; | ||
368 | devfreq->data = data; | ||
369 | devfreq->next_polling = devfreq->polling_jiffies | ||
370 | = msecs_to_jiffies(devfreq->profile->polling_ms); | ||
371 | devfreq->nb.notifier_call = devfreq_notifier_call; | ||
372 | |||
373 | dev_set_name(&devfreq->dev, dev_name(dev)); | ||
374 | err = device_register(&devfreq->dev); | ||
375 | if (err) { | ||
376 | put_device(&devfreq->dev); | ||
377 | goto err_dev; | ||
378 | } | ||
379 | |||
380 | if (governor->init) | ||
381 | err = governor->init(devfreq); | ||
382 | if (err) | ||
383 | goto err_init; | ||
384 | |||
385 | mutex_unlock(&devfreq->lock); | ||
386 | |||
387 | if (governor->no_central_polling) | ||
388 | goto out; | ||
389 | |||
390 | mutex_lock(&devfreq_list_lock); | ||
391 | |||
392 | list_add(&devfreq->node, &devfreq_list); | ||
393 | |||
394 | if (devfreq_wq && devfreq->next_polling && !polling) { | ||
395 | polling = true; | ||
396 | queue_delayed_work(devfreq_wq, &devfreq_work, | ||
397 | devfreq->next_polling); | ||
398 | } | ||
399 | mutex_unlock(&devfreq_list_lock); | ||
400 | goto out; | ||
401 | err_init: | ||
402 | device_unregister(&devfreq->dev); | ||
403 | err_dev: | ||
404 | mutex_unlock(&devfreq->lock); | ||
405 | kfree(devfreq); | ||
406 | out: | ||
407 | if (err) | ||
408 | return ERR_PTR(err); | ||
409 | else | ||
410 | return devfreq; | ||
411 | } | ||
412 | |||
413 | /** | ||
414 | * devfreq_remove_device() - Remove devfreq feature from a device. | ||
415 | * @devfreq the devfreq instance to be removed | ||
416 | */ | ||
417 | int devfreq_remove_device(struct devfreq *devfreq) | ||
418 | { | ||
419 | if (!devfreq) | ||
420 | return -EINVAL; | ||
421 | |||
422 | if (!devfreq->governor->no_central_polling) { | ||
423 | mutex_lock(&devfreq_list_lock); | ||
424 | while (wait_remove_device == devfreq) { | ||
425 | mutex_unlock(&devfreq_list_lock); | ||
426 | schedule(); | ||
427 | mutex_lock(&devfreq_list_lock); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | mutex_lock(&devfreq->lock); | ||
432 | _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ | ||
433 | |||
434 | if (!devfreq->governor->no_central_polling) | ||
435 | mutex_unlock(&devfreq_list_lock); | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static ssize_t show_governor(struct device *dev, | ||
441 | struct device_attribute *attr, char *buf) | ||
442 | { | ||
443 | return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); | ||
444 | } | ||
445 | |||
446 | static ssize_t show_freq(struct device *dev, | ||
447 | struct device_attribute *attr, char *buf) | ||
448 | { | ||
449 | return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); | ||
450 | } | ||
451 | |||
452 | static ssize_t show_polling_interval(struct device *dev, | ||
453 | struct device_attribute *attr, char *buf) | ||
454 | { | ||
455 | return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); | ||
456 | } | ||
457 | |||
458 | static ssize_t store_polling_interval(struct device *dev, | ||
459 | struct device_attribute *attr, | ||
460 | const char *buf, size_t count) | ||
461 | { | ||
462 | struct devfreq *df = to_devfreq(dev); | ||
463 | unsigned int value; | ||
464 | int ret; | ||
465 | |||
466 | ret = sscanf(buf, "%u", &value); | ||
467 | if (ret != 1) | ||
468 | goto out; | ||
469 | |||
470 | mutex_lock(&df->lock); | ||
471 | df->profile->polling_ms = value; | ||
472 | df->next_polling = df->polling_jiffies | ||
473 | = msecs_to_jiffies(value); | ||
474 | mutex_unlock(&df->lock); | ||
475 | |||
476 | ret = count; | ||
477 | |||
478 | if (df->governor->no_central_polling) | ||
479 | goto out; | ||
480 | |||
481 | mutex_lock(&devfreq_list_lock); | ||
482 | if (df->next_polling > 0 && !polling) { | ||
483 | polling = true; | ||
484 | queue_delayed_work(devfreq_wq, &devfreq_work, | ||
485 | df->next_polling); | ||
486 | } | ||
487 | mutex_unlock(&devfreq_list_lock); | ||
488 | out: | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | static ssize_t show_central_polling(struct device *dev, | ||
493 | struct device_attribute *attr, char *buf) | ||
494 | { | ||
495 | return sprintf(buf, "%d\n", | ||
496 | !to_devfreq(dev)->governor->no_central_polling); | ||
497 | } | ||
498 | |||
499 | static struct device_attribute devfreq_attrs[] = { | ||
500 | __ATTR(governor, S_IRUGO, show_governor, NULL), | ||
501 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), | ||
502 | __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), | ||
503 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, | ||
504 | store_polling_interval), | ||
505 | { }, | ||
506 | }; | ||
507 | |||
508 | /** | ||
509 | * devfreq_start_polling() - Initialize data structure for devfreq framework and | ||
510 | * start polling registered devfreq devices. | ||
511 | */ | ||
512 | static int __init devfreq_start_polling(void) | ||
513 | { | ||
514 | mutex_lock(&devfreq_list_lock); | ||
515 | polling = false; | ||
516 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | ||
517 | INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor); | ||
518 | mutex_unlock(&devfreq_list_lock); | ||
519 | |||
520 | devfreq_monitor(&devfreq_work.work); | ||
521 | return 0; | ||
522 | } | ||
523 | late_initcall(devfreq_start_polling); | ||
524 | |||
525 | static int __init devfreq_init(void) | ||
526 | { | ||
527 | devfreq_class = class_create(THIS_MODULE, "devfreq"); | ||
528 | if (IS_ERR(devfreq_class)) { | ||
529 | pr_err("%s: couldn't create class\n", __FILE__); | ||
530 | return PTR_ERR(devfreq_class); | ||
531 | } | ||
532 | devfreq_class->dev_attrs = devfreq_attrs; | ||
533 | return 0; | ||
534 | } | ||
535 | subsys_initcall(devfreq_init); | ||
536 | |||
537 | static void __exit devfreq_exit(void) | ||
538 | { | ||
539 | class_destroy(devfreq_class); | ||
540 | } | ||
541 | module_exit(devfreq_exit); | ||
542 | |||
543 | /* | ||
544 | * The followings are helper functions for devfreq user device drivers with | ||
545 | * OPP framework. | ||
546 | */ | ||
547 | |||
548 | /** | ||
549 | * devfreq_recommended_opp() - Helper function to get proper OPP for the | ||
550 | * freq value given to target callback. | ||
551 | * @dev The devfreq user device. (parent of devfreq) | ||
552 | * @freq The frequency given to target function | ||
553 | * | ||
554 | */ | ||
555 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq) | ||
556 | { | ||
557 | struct opp *opp = opp_find_freq_ceil(dev, freq); | ||
558 | |||
559 | if (opp == ERR_PTR(-ENODEV)) | ||
560 | opp = opp_find_freq_floor(dev, freq); | ||
561 | return opp; | ||
562 | } | ||
563 | |||
564 | /** | ||
565 | * devfreq_register_opp_notifier() - Helper function to get devfreq notified | ||
566 | * for any changes in the OPP availability | ||
567 | * changes | ||
568 | * @dev The devfreq user device. (parent of devfreq) | ||
569 | * @devfreq The devfreq object. | ||
570 | */ | ||
571 | int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) | ||
572 | { | ||
573 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | ||
574 | |||
575 | if (IS_ERR(nh)) | ||
576 | return PTR_ERR(nh); | ||
577 | return srcu_notifier_chain_register(nh, &devfreq->nb); | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq | ||
582 | * notified for any changes in the OPP | ||
583 | * availability changes anymore. | ||
584 | * @dev The devfreq user device. (parent of devfreq) | ||
585 | * @devfreq The devfreq object. | ||
586 | * | ||
587 | * At exit() callback of devfreq_dev_profile, this must be included if | ||
588 | * devfreq_recommended_opp is used. | ||
589 | */ | ||
590 | int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) | ||
591 | { | ||
592 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | ||
593 | |||
594 | if (IS_ERR(nh)) | ||
595 | return PTR_ERR(nh); | ||
596 | return srcu_notifier_chain_unregister(nh, &devfreq->nb); | ||
597 | } | ||
598 | |||
599 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | ||
600 | MODULE_DESCRIPTION("devfreq class support"); | ||
601 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h new file mode 100644 index 00000000000..ea7f13c58de --- /dev/null +++ b/drivers/devfreq/governor.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * governor.h - internal header for devfreq governors. | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This header is for devfreq governors in drivers/devfreq/ | ||
12 | */ | ||
13 | |||
14 | #ifndef _GOVERNOR_H | ||
15 | #define _GOVERNOR_H | ||
16 | |||
17 | #include <linux/devfreq.h> | ||
18 | |||
19 | #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) | ||
20 | |||
21 | /* Caution: devfreq->lock must be locked before calling update_devfreq */ | ||
22 | extern int update_devfreq(struct devfreq *devfreq); | ||
23 | |||
24 | #endif /* _GOVERNOR_H */ | ||
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c new file mode 100644 index 00000000000..c0596b29176 --- /dev/null +++ b/drivers/devfreq/governor_performance.c | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * linux/drivers/devfreq/governor_performance.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/devfreq.h> | ||
13 | |||
14 | static int devfreq_performance_func(struct devfreq *df, | ||
15 | unsigned long *freq) | ||
16 | { | ||
17 | /* | ||
18 | * target callback should be able to get floor value as | ||
19 | * said in devfreq.h | ||
20 | */ | ||
21 | *freq = UINT_MAX; | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | const struct devfreq_governor devfreq_performance = { | ||
26 | .name = "performance", | ||
27 | .get_target_freq = devfreq_performance_func, | ||
28 | .no_central_polling = true, | ||
29 | }; | ||
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c new file mode 100644 index 00000000000..2483a85a266 --- /dev/null +++ b/drivers/devfreq/governor_powersave.c | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * linux/drivers/devfreq/governor_powersave.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/devfreq.h> | ||
13 | |||
14 | static int devfreq_powersave_func(struct devfreq *df, | ||
15 | unsigned long *freq) | ||
16 | { | ||
17 | /* | ||
18 | * target callback should be able to get ceiling value as | ||
19 | * said in devfreq.h | ||
20 | */ | ||
21 | *freq = 0; | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | const struct devfreq_governor devfreq_powersave = { | ||
26 | .name = "powersave", | ||
27 | .get_target_freq = devfreq_powersave_func, | ||
28 | .no_central_polling = true, | ||
29 | }; | ||
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c new file mode 100644 index 00000000000..efad8dcf902 --- /dev/null +++ b/drivers/devfreq/governor_simpleondemand.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * linux/drivers/devfreq/governor_simpleondemand.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <linux/devfreq.h> | ||
14 | #include <linux/math64.h> | ||
15 | |||
16 | /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ | ||
17 | #define DFSO_UPTHRESHOLD (90) | ||
18 | #define DFSO_DOWNDIFFERENCTIAL (5) | ||
19 | static int devfreq_simple_ondemand_func(struct devfreq *df, | ||
20 | unsigned long *freq) | ||
21 | { | ||
22 | struct devfreq_dev_status stat; | ||
23 | int err = df->profile->get_dev_status(df->dev.parent, &stat); | ||
24 | unsigned long long a, b; | ||
25 | unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; | ||
26 | unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; | ||
27 | struct devfreq_simple_ondemand_data *data = df->data; | ||
28 | |||
29 | if (err) | ||
30 | return err; | ||
31 | |||
32 | if (data) { | ||
33 | if (data->upthreshold) | ||
34 | dfso_upthreshold = data->upthreshold; | ||
35 | if (data->downdifferential) | ||
36 | dfso_downdifferential = data->downdifferential; | ||
37 | } | ||
38 | if (dfso_upthreshold > 100 || | ||
39 | dfso_upthreshold < dfso_downdifferential) | ||
40 | return -EINVAL; | ||
41 | |||
42 | /* Assume MAX if it is going to be divided by zero */ | ||
43 | if (stat.total_time == 0) { | ||
44 | *freq = UINT_MAX; | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /* Prevent overflow */ | ||
49 | if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) { | ||
50 | stat.busy_time >>= 7; | ||
51 | stat.total_time >>= 7; | ||
52 | } | ||
53 | |||
54 | /* Set MAX if it's busy enough */ | ||
55 | if (stat.busy_time * 100 > | ||
56 | stat.total_time * dfso_upthreshold) { | ||
57 | *freq = UINT_MAX; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | /* Set MAX if we do not know the initial frequency */ | ||
62 | if (stat.current_frequency == 0) { | ||
63 | *freq = UINT_MAX; | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | /* Keep the current frequency */ | ||
68 | if (stat.busy_time * 100 > | ||
69 | stat.total_time * (dfso_upthreshold - dfso_downdifferential)) { | ||
70 | *freq = stat.current_frequency; | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | /* Set the desired frequency based on the load */ | ||
75 | a = stat.busy_time; | ||
76 | a *= stat.current_frequency; | ||
77 | b = div_u64(a, stat.total_time); | ||
78 | b *= 100; | ||
79 | b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); | ||
80 | *freq = (unsigned long) b; | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | const struct devfreq_governor devfreq_simple_ondemand = { | ||
86 | .name = "simple_ondemand", | ||
87 | .get_target_freq = devfreq_simple_ondemand_func, | ||
88 | }; | ||
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c new file mode 100644 index 00000000000..4f8b563da78 --- /dev/null +++ b/drivers/devfreq/governor_userspace.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * linux/drivers/devfreq/governor_simpleondemand.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/slab.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/devfreq.h> | ||
15 | #include <linux/pm.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include "governor.h" | ||
18 | |||
19 | struct userspace_data { | ||
20 | unsigned long user_frequency; | ||
21 | bool valid; | ||
22 | }; | ||
23 | |||
24 | static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) | ||
25 | { | ||
26 | struct userspace_data *data = df->data; | ||
27 | |||
28 | if (!data->valid) | ||
29 | *freq = df->previous_freq; /* No user freq specified yet */ | ||
30 | else | ||
31 | *freq = data->user_frequency; | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static ssize_t store_freq(struct device *dev, struct device_attribute *attr, | ||
36 | const char *buf, size_t count) | ||
37 | { | ||
38 | struct devfreq *devfreq = to_devfreq(dev); | ||
39 | struct userspace_data *data; | ||
40 | unsigned long wanted; | ||
41 | int err = 0; | ||
42 | |||
43 | |||
44 | mutex_lock(&devfreq->lock); | ||
45 | data = devfreq->data; | ||
46 | |||
47 | sscanf(buf, "%lu", &wanted); | ||
48 | data->user_frequency = wanted; | ||
49 | data->valid = true; | ||
50 | err = update_devfreq(devfreq); | ||
51 | if (err == 0) | ||
52 | err = count; | ||
53 | mutex_unlock(&devfreq->lock); | ||
54 | return err; | ||
55 | } | ||
56 | |||
57 | static ssize_t show_freq(struct device *dev, struct device_attribute *attr, | ||
58 | char *buf) | ||
59 | { | ||
60 | struct devfreq *devfreq = to_devfreq(dev); | ||
61 | struct userspace_data *data; | ||
62 | int err = 0; | ||
63 | |||
64 | mutex_lock(&devfreq->lock); | ||
65 | data = devfreq->data; | ||
66 | |||
67 | if (data->valid) | ||
68 | err = sprintf(buf, "%lu\n", data->user_frequency); | ||
69 | else | ||
70 | err = sprintf(buf, "undefined\n"); | ||
71 | mutex_unlock(&devfreq->lock); | ||
72 | return err; | ||
73 | } | ||
74 | |||
75 | static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq); | ||
76 | static struct attribute *dev_entries[] = { | ||
77 | &dev_attr_set_freq.attr, | ||
78 | NULL, | ||
79 | }; | ||
80 | static struct attribute_group dev_attr_group = { | ||
81 | .name = "userspace", | ||
82 | .attrs = dev_entries, | ||
83 | }; | ||
84 | |||
85 | static int userspace_init(struct devfreq *devfreq) | ||
86 | { | ||
87 | int err = 0; | ||
88 | struct userspace_data *data = kzalloc(sizeof(struct userspace_data), | ||
89 | GFP_KERNEL); | ||
90 | |||
91 | if (!data) { | ||
92 | err = -ENOMEM; | ||
93 | goto out; | ||
94 | } | ||
95 | data->valid = false; | ||
96 | devfreq->data = data; | ||
97 | |||
98 | err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); | ||
99 | out: | ||
100 | return err; | ||
101 | } | ||
102 | |||
103 | static void userspace_exit(struct devfreq *devfreq) | ||
104 | { | ||
105 | sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); | ||
106 | kfree(devfreq->data); | ||
107 | devfreq->data = NULL; | ||
108 | } | ||
109 | |||
110 | const struct devfreq_governor devfreq_userspace = { | ||
111 | .name = "userspace", | ||
112 | .get_target_freq = devfreq_userspace_func, | ||
113 | .init = userspace_init, | ||
114 | .exit = userspace_exit, | ||
115 | .no_central_polling = true, | ||
116 | }; | ||
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c index 9d8710f8bc7..1782693819f 100644 --- a/drivers/hid/hid-picolcd.c +++ b/drivers/hid/hid-picolcd.c | |||
@@ -2409,7 +2409,7 @@ static int picolcd_raw_event(struct hid_device *hdev, | |||
2409 | #ifdef CONFIG_PM | 2409 | #ifdef CONFIG_PM |
2410 | static int picolcd_suspend(struct hid_device *hdev, pm_message_t message) | 2410 | static int picolcd_suspend(struct hid_device *hdev, pm_message_t message) |
2411 | { | 2411 | { |
2412 | if (message.event & PM_EVENT_AUTO) | 2412 | if (PMSG_IS_AUTO(message)) |
2413 | return 0; | 2413 | return 0; |
2414 | 2414 | ||
2415 | picolcd_suspend_backlight(hid_get_drvdata(hdev)); | 2415 | picolcd_suspend_backlight(hid_get_drvdata(hdev)); |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 77e705c2209..b403fcef0b8 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -1332,7 +1332,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) | |||
1332 | struct usbhid_device *usbhid = hid->driver_data; | 1332 | struct usbhid_device *usbhid = hid->driver_data; |
1333 | int status; | 1333 | int status; |
1334 | 1334 | ||
1335 | if (message.event & PM_EVENT_AUTO) { | 1335 | if (PMSG_IS_AUTO(message)) { |
1336 | spin_lock_irq(&usbhid->lock); /* Sync with error handler */ | 1336 | spin_lock_irq(&usbhid->lock); /* Sync with error handler */ |
1337 | if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) | 1337 | if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) |
1338 | && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) | 1338 | && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) |
@@ -1367,7 +1367,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) | |||
1367 | return -EIO; | 1367 | return -EIO; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | if (!ignoreled && (message.event & PM_EVENT_AUTO)) { | 1370 | if (!ignoreled && PMSG_IS_AUTO(message)) { |
1371 | spin_lock_irq(&usbhid->lock); | 1371 | spin_lock_irq(&usbhid->lock); |
1372 | if (test_bit(HID_LED_ON, &usbhid->iofl)) { | 1372 | if (test_bit(HID_LED_ON, &usbhid->iofl)) { |
1373 | spin_unlock_irq(&usbhid->lock); | 1373 | spin_unlock_irq(&usbhid->lock); |
@@ -1380,8 +1380,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) | |||
1380 | hid_cancel_delayed_stuff(usbhid); | 1380 | hid_cancel_delayed_stuff(usbhid); |
1381 | hid_cease_io(usbhid); | 1381 | hid_cease_io(usbhid); |
1382 | 1382 | ||
1383 | if ((message.event & PM_EVENT_AUTO) && | 1383 | if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { |
1384 | test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { | ||
1385 | /* lost race against keypresses */ | 1384 | /* lost race against keypresses */ |
1386 | status = hid_start_in(hid); | 1385 | status = hid_start_in(hid); |
1387 | if (status < 0) | 1386 | if (status < 0) |
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index bb7f17f2a33..cbf13d09b4a 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <media/videobuf-dma-sg.h> | 21 | #include <media/videobuf-dma-sg.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
24 | #include <linux/pm_qos_params.h> | 24 | #include <linux/pm_qos.h> |
25 | #include <linux/via-core.h> | 25 | #include <linux/via-core.h> |
26 | #include <linux/via-gpio.h> | 26 | #include <linux/via-gpio.h> |
27 | #include <linux/via_i2c.h> | 27 | #include <linux/via_i2c.h> |
@@ -69,7 +69,7 @@ struct via_camera { | |||
69 | struct mutex lock; | 69 | struct mutex lock; |
70 | enum viacam_opstate opstate; | 70 | enum viacam_opstate opstate; |
71 | unsigned long flags; | 71 | unsigned long flags; |
72 | struct pm_qos_request_list qos_request; | 72 | struct pm_qos_request qos_request; |
73 | /* | 73 | /* |
74 | * GPIO info for power/reset management | 74 | * GPIO info for power/reset management |
75 | */ | 75 | */ |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 680312710a7..a855db1ad24 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <linux/if_vlan.h> | 47 | #include <linux/if_vlan.h> |
48 | #include <linux/cpu.h> | 48 | #include <linux/cpu.h> |
49 | #include <linux/smp.h> | 49 | #include <linux/smp.h> |
50 | #include <linux/pm_qos_params.h> | 50 | #include <linux/pm_qos.h> |
51 | #include <linux/pm_runtime.h> | 51 | #include <linux/pm_runtime.h> |
52 | #include <linux/aer.h> | 52 | #include <linux/aer.h> |
53 | #include <linux/prefetch.h> | 53 | #include <linux/prefetch.h> |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index cdb958875ba..7d6082160bc 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1476,7 +1476,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) | |||
1476 | if (!dev->suspend_count++) { | 1476 | if (!dev->suspend_count++) { |
1477 | spin_lock_irq(&dev->txq.lock); | 1477 | spin_lock_irq(&dev->txq.lock); |
1478 | /* don't autosuspend while transmitting */ | 1478 | /* don't autosuspend while transmitting */ |
1479 | if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) { | 1479 | if (dev->txq.qlen && PMSG_IS_AUTO(message)) { |
1480 | spin_unlock_irq(&dev->txq.lock); | 1480 | spin_unlock_irq(&dev->txq.lock); |
1481 | return -EBUSY; | 1481 | return -EBUSY; |
1482 | } else { | 1482 | } else { |
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 298f2b0b631..9a644d052f1 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c | |||
@@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface) | |||
599 | * | 599 | * |
600 | * As well, the device might refuse going to sleep for whichever | 600 | * As well, the device might refuse going to sleep for whichever |
601 | * reason. In this case we just fail. For system suspend/hibernate, | 601 | * reason. In this case we just fail. For system suspend/hibernate, |
602 | * we *can't* fail. We check PM_EVENT_AUTO to see if the | 602 | * we *can't* fail. We check PMSG_IS_AUTO to see if the |
603 | * suspend call comes from the USB stack or from the system and act | 603 | * suspend call comes from the USB stack or from the system and act |
604 | * in consequence. | 604 | * in consequence. |
605 | * | 605 | * |
@@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) | |||
615 | struct i2400m *i2400m = &i2400mu->i2400m; | 615 | struct i2400m *i2400m = &i2400mu->i2400m; |
616 | 616 | ||
617 | #ifdef CONFIG_PM | 617 | #ifdef CONFIG_PM |
618 | if (pm_msg.event & PM_EVENT_AUTO) | 618 | if (PMSG_IS_AUTO(pm_msg)) |
619 | is_autosuspend = 1; | 619 | is_autosuspend = 1; |
620 | #endif | 620 | #endif |
621 | 621 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index ef9ad79d1bf..127e9c63bea 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -161,7 +161,7 @@ that only one external action is invoked at a time. | |||
161 | #include <linux/firmware.h> | 161 | #include <linux/firmware.h> |
162 | #include <linux/acpi.h> | 162 | #include <linux/acpi.h> |
163 | #include <linux/ctype.h> | 163 | #include <linux/ctype.h> |
164 | #include <linux/pm_qos_params.h> | 164 | #include <linux/pm_qos.h> |
165 | 165 | ||
166 | #include <net/lib80211.h> | 166 | #include <net/lib80211.h> |
167 | 167 | ||
@@ -174,7 +174,7 @@ that only one external action is invoked at a time. | |||
174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" | 174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" |
175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" | 175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
176 | 176 | ||
177 | static struct pm_qos_request_list ipw2100_pm_qos_req; | 177 | static struct pm_qos_request ipw2100_pm_qos_req; |
178 | 178 | ||
179 | /* Debugging stuff */ | 179 | /* Debugging stuff */ |
180 | #ifdef CONFIG_IPW2100_DEBUG | 180 | #ifdef CONFIG_IPW2100_DEBUG |
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index f462fa5f937..33175504bb3 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig | |||
@@ -60,6 +60,10 @@ config VT_CONSOLE | |||
60 | 60 | ||
61 | If unsure, say Y. | 61 | If unsure, say Y. |
62 | 62 | ||
63 | config VT_CONSOLE_SLEEP | ||
64 | def_bool y | ||
65 | depends on VT_CONSOLE && PM_SLEEP | ||
66 | |||
63 | config HW_CONSOLE | 67 | config HW_CONSOLE |
64 | bool | 68 | bool |
65 | depends on VT && !UML | 69 | depends on VT && !UML |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3ec6699ab72..6960715c506 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1305,7 +1305,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) | |||
1305 | struct acm *acm = usb_get_intfdata(intf); | 1305 | struct acm *acm = usb_get_intfdata(intf); |
1306 | int cnt; | 1306 | int cnt; |
1307 | 1307 | ||
1308 | if (message.event & PM_EVENT_AUTO) { | 1308 | if (PMSG_IS_AUTO(message)) { |
1309 | int b; | 1309 | int b; |
1310 | 1310 | ||
1311 | spin_lock_irq(&acm->write_lock); | 1311 | spin_lock_irq(&acm->write_lock); |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 1d26a7135dd..efe684908c1 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -798,11 +798,11 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) | |||
798 | dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); | 798 | dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); |
799 | 799 | ||
800 | /* if this is an autosuspend the caller does the locking */ | 800 | /* if this is an autosuspend the caller does the locking */ |
801 | if (!(message.event & PM_EVENT_AUTO)) | 801 | if (!PMSG_IS_AUTO(message)) |
802 | mutex_lock(&desc->lock); | 802 | mutex_lock(&desc->lock); |
803 | spin_lock_irq(&desc->iuspin); | 803 | spin_lock_irq(&desc->iuspin); |
804 | 804 | ||
805 | if ((message.event & PM_EVENT_AUTO) && | 805 | if (PMSG_IS_AUTO(message) && |
806 | (test_bit(WDM_IN_USE, &desc->flags) | 806 | (test_bit(WDM_IN_USE, &desc->flags) |
807 | || test_bit(WDM_RESPONDING, &desc->flags))) { | 807 | || test_bit(WDM_RESPONDING, &desc->flags))) { |
808 | spin_unlock_irq(&desc->iuspin); | 808 | spin_unlock_irq(&desc->iuspin); |
@@ -815,7 +815,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) | |||
815 | kill_urbs(desc); | 815 | kill_urbs(desc); |
816 | cancel_work_sync(&desc->rxwork); | 816 | cancel_work_sync(&desc->rxwork); |
817 | } | 817 | } |
818 | if (!(message.event & PM_EVENT_AUTO)) | 818 | if (!PMSG_IS_AUTO(message)) |
819 | mutex_unlock(&desc->lock); | 819 | mutex_unlock(&desc->lock); |
820 | 820 | ||
821 | return rv; | 821 | return rv; |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index adf5ca8a239..3b029a0a478 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -1046,8 +1046,7 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg) | |||
1046 | /* Non-root devices on a full/low-speed bus must wait for their | 1046 | /* Non-root devices on a full/low-speed bus must wait for their |
1047 | * companion high-speed root hub, in case a handoff is needed. | 1047 | * companion high-speed root hub, in case a handoff is needed. |
1048 | */ | 1048 | */ |
1049 | if (!(msg.event & PM_EVENT_AUTO) && udev->parent && | 1049 | if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion) |
1050 | udev->bus->hs_companion) | ||
1051 | device_pm_wait_for_dev(&udev->dev, | 1050 | device_pm_wait_for_dev(&udev->dev, |
1052 | &udev->bus->hs_companion->root_hub->dev); | 1051 | &udev->bus->hs_companion->root_hub->dev); |
1053 | 1052 | ||
@@ -1075,7 +1074,7 @@ static int usb_suspend_interface(struct usb_device *udev, | |||
1075 | 1074 | ||
1076 | if (driver->suspend) { | 1075 | if (driver->suspend) { |
1077 | status = driver->suspend(intf, msg); | 1076 | status = driver->suspend(intf, msg); |
1078 | if (status && !(msg.event & PM_EVENT_AUTO)) | 1077 | if (status && !PMSG_IS_AUTO(msg)) |
1079 | dev_err(&intf->dev, "%s error %d\n", | 1078 | dev_err(&intf->dev, "%s error %d\n", |
1080 | "suspend", status); | 1079 | "suspend", status); |
1081 | } else { | 1080 | } else { |
@@ -1189,7 +1188,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1189 | status = usb_suspend_interface(udev, intf, msg); | 1188 | status = usb_suspend_interface(udev, intf, msg); |
1190 | 1189 | ||
1191 | /* Ignore errors during system sleep transitions */ | 1190 | /* Ignore errors during system sleep transitions */ |
1192 | if (!(msg.event & PM_EVENT_AUTO)) | 1191 | if (!PMSG_IS_AUTO(msg)) |
1193 | status = 0; | 1192 | status = 0; |
1194 | if (status != 0) | 1193 | if (status != 0) |
1195 | break; | 1194 | break; |
@@ -1199,7 +1198,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1199 | status = usb_suspend_device(udev, msg); | 1198 | status = usb_suspend_device(udev, msg); |
1200 | 1199 | ||
1201 | /* Again, ignore errors during system sleep transitions */ | 1200 | /* Again, ignore errors during system sleep transitions */ |
1202 | if (!(msg.event & PM_EVENT_AUTO)) | 1201 | if (!PMSG_IS_AUTO(msg)) |
1203 | status = 0; | 1202 | status = 0; |
1204 | } | 1203 | } |
1205 | 1204 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index b3b7d062906..13222d352a6 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1975,8 +1975,9 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) | |||
1975 | int status; | 1975 | int status; |
1976 | int old_state = hcd->state; | 1976 | int old_state = hcd->state; |
1977 | 1977 | ||
1978 | dev_dbg(&rhdev->dev, "bus %s%s\n", | 1978 | dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n", |
1979 | (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); | 1979 | (PMSG_IS_AUTO(msg) ? "auto-" : ""), |
1980 | rhdev->do_remote_wakeup); | ||
1980 | if (HCD_DEAD(hcd)) { | 1981 | if (HCD_DEAD(hcd)) { |
1981 | dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); | 1982 | dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); |
1982 | return 0; | 1983 | return 0; |
@@ -2011,8 +2012,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) | |||
2011 | int status; | 2012 | int status; |
2012 | int old_state = hcd->state; | 2013 | int old_state = hcd->state; |
2013 | 2014 | ||
2014 | dev_dbg(&rhdev->dev, "usb %s%s\n", | 2015 | dev_dbg(&rhdev->dev, "usb %sresume\n", |
2015 | (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); | 2016 | (PMSG_IS_AUTO(msg) ? "auto-" : "")); |
2016 | if (HCD_DEAD(hcd)) { | 2017 | if (HCD_DEAD(hcd)) { |
2017 | dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); | 2018 | dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); |
2018 | return 0; | 2019 | return 0; |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d6cc8324934..96f05b29c9a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2369,8 +2369,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
2369 | int port1 = udev->portnum; | 2369 | int port1 = udev->portnum; |
2370 | int status; | 2370 | int status; |
2371 | 2371 | ||
2372 | // dev_dbg(hub->intfdev, "suspend port %d\n", port1); | ||
2373 | |||
2374 | /* enable remote wakeup when appropriate; this lets the device | 2372 | /* enable remote wakeup when appropriate; this lets the device |
2375 | * wake up the upstream hub (including maybe the root hub). | 2373 | * wake up the upstream hub (including maybe the root hub). |
2376 | * | 2374 | * |
@@ -2387,7 +2385,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
2387 | dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", | 2385 | dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", |
2388 | status); | 2386 | status); |
2389 | /* bail if autosuspend is requested */ | 2387 | /* bail if autosuspend is requested */ |
2390 | if (msg.event & PM_EVENT_AUTO) | 2388 | if (PMSG_IS_AUTO(msg)) |
2391 | return status; | 2389 | return status; |
2392 | } | 2390 | } |
2393 | } | 2391 | } |
@@ -2416,12 +2414,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
2416 | USB_CTRL_SET_TIMEOUT); | 2414 | USB_CTRL_SET_TIMEOUT); |
2417 | 2415 | ||
2418 | /* System sleep transitions should never fail */ | 2416 | /* System sleep transitions should never fail */ |
2419 | if (!(msg.event & PM_EVENT_AUTO)) | 2417 | if (!PMSG_IS_AUTO(msg)) |
2420 | status = 0; | 2418 | status = 0; |
2421 | } else { | 2419 | } else { |
2422 | /* device has up to 10 msec to fully suspend */ | 2420 | /* device has up to 10 msec to fully suspend */ |
2423 | dev_dbg(&udev->dev, "usb %ssuspend\n", | 2421 | dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", |
2424 | (msg.event & PM_EVENT_AUTO ? "auto-" : "")); | 2422 | (PMSG_IS_AUTO(msg) ? "auto-" : ""), |
2423 | udev->do_remote_wakeup); | ||
2425 | usb_set_device_state(udev, USB_STATE_SUSPENDED); | 2424 | usb_set_device_state(udev, USB_STATE_SUSPENDED); |
2426 | msleep(10); | 2425 | msleep(10); |
2427 | } | 2426 | } |
@@ -2572,7 +2571,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) | |||
2572 | } else { | 2571 | } else { |
2573 | /* drive resume for at least 20 msec */ | 2572 | /* drive resume for at least 20 msec */ |
2574 | dev_dbg(&udev->dev, "usb %sresume\n", | 2573 | dev_dbg(&udev->dev, "usb %sresume\n", |
2575 | (msg.event & PM_EVENT_AUTO ? "auto-" : "")); | 2574 | (PMSG_IS_AUTO(msg) ? "auto-" : "")); |
2576 | msleep(25); | 2575 | msleep(25); |
2577 | 2576 | ||
2578 | /* Virtual root hubs can trigger on GET_PORT_STATUS to | 2577 | /* Virtual root hubs can trigger on GET_PORT_STATUS to |
@@ -2679,7 +2678,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg) | |||
2679 | udev = hdev->children [port1-1]; | 2678 | udev = hdev->children [port1-1]; |
2680 | if (udev && udev->can_submit) { | 2679 | if (udev && udev->can_submit) { |
2681 | dev_warn(&intf->dev, "port %d nyet suspended\n", port1); | 2680 | dev_warn(&intf->dev, "port %d nyet suspended\n", port1); |
2682 | if (msg.event & PM_EVENT_AUTO) | 2681 | if (PMSG_IS_AUTO(msg)) |
2683 | return -EBUSY; | 2682 | return -EBUSY; |
2684 | } | 2683 | } |
2685 | } | 2684 | } |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d5d136a53b6..b18179bda0d 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -1009,7 +1009,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message) | |||
1009 | struct sierra_intf_private *intfdata; | 1009 | struct sierra_intf_private *intfdata; |
1010 | int b; | 1010 | int b; |
1011 | 1011 | ||
1012 | if (message.event & PM_EVENT_AUTO) { | 1012 | if (PMSG_IS_AUTO(message)) { |
1013 | intfdata = serial->private; | 1013 | intfdata = serial->private; |
1014 | spin_lock_irq(&intfdata->susp_lock); | 1014 | spin_lock_irq(&intfdata->susp_lock); |
1015 | b = intfdata->in_flight; | 1015 | b = intfdata->in_flight; |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index e4fad5e643d..d555ca9567b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -651,7 +651,7 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) | |||
651 | 651 | ||
652 | dbg("%s entered", __func__); | 652 | dbg("%s entered", __func__); |
653 | 653 | ||
654 | if (message.event & PM_EVENT_AUTO) { | 654 | if (PMSG_IS_AUTO(message)) { |
655 | spin_lock_irq(&intfdata->susp_lock); | 655 | spin_lock_irq(&intfdata->susp_lock); |
656 | b = intfdata->in_flight; | 656 | b = intfdata->in_flight; |
657 | spin_unlock_irq(&intfdata->susp_lock); | 657 | spin_unlock_irq(&intfdata->susp_lock); |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h new file mode 100644 index 00000000000..afb94583960 --- /dev/null +++ b/include/linux/devfreq.h | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework | ||
3 | * for Non-CPU Devices. | ||
4 | * | ||
5 | * Copyright (C) 2011 Samsung Electronics | ||
6 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __LINUX_DEVFREQ_H__ | ||
14 | #define __LINUX_DEVFREQ_H__ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/opp.h> | ||
19 | |||
20 | #define DEVFREQ_NAME_LEN 16 | ||
21 | |||
22 | struct devfreq; | ||
23 | |||
24 | /** | ||
25 | * struct devfreq_dev_status - Data given from devfreq user device to | ||
26 | * governors. Represents the performance | ||
27 | * statistics. | ||
28 | * @total_time The total time represented by this instance of | ||
29 | * devfreq_dev_status | ||
30 | * @busy_time The time that the device was working among the | ||
31 | * total_time. | ||
32 | * @current_frequency The operating frequency. | ||
33 | * @private_data An entry not specified by the devfreq framework. | ||
34 | * A device and a specific governor may have their | ||
35 | * own protocol with private_data. However, because | ||
36 | * this is governor-specific, a governor using this | ||
37 | * will be only compatible with devices aware of it. | ||
38 | */ | ||
39 | struct devfreq_dev_status { | ||
40 | /* both since the last measure */ | ||
41 | unsigned long total_time; | ||
42 | unsigned long busy_time; | ||
43 | unsigned long current_frequency; | ||
44 | void *private_date; | ||
45 | }; | ||
46 | |||
47 | /** | ||
48 | * struct devfreq_dev_profile - Devfreq's user device profile | ||
49 | * @initial_freq The operating frequency when devfreq_add_device() is | ||
50 | * called. | ||
51 | * @polling_ms The polling interval in ms. 0 disables polling. | ||
52 | * @target The device should set its operating frequency at | ||
53 | * freq or lowest-upper-than-freq value. If freq is | ||
54 | * higher than any operable frequency, set maximum. | ||
55 | * Before returning, target function should set | ||
56 | * freq at the current frequency. | ||
57 | * @get_dev_status The device should provide the current performance | ||
58 | * status to devfreq, which is used by governors. | ||
59 | * @exit An optional callback that is called when devfreq | ||
60 | * is removing the devfreq object due to error or | ||
61 | * from devfreq_remove_device() call. If the user | ||
62 | * has registered devfreq->nb at a notifier-head, | ||
63 | * this is the time to unregister it. | ||
64 | */ | ||
65 | struct devfreq_dev_profile { | ||
66 | unsigned long initial_freq; | ||
67 | unsigned int polling_ms; | ||
68 | |||
69 | int (*target)(struct device *dev, unsigned long *freq); | ||
70 | int (*get_dev_status)(struct device *dev, | ||
71 | struct devfreq_dev_status *stat); | ||
72 | void (*exit)(struct device *dev); | ||
73 | }; | ||
74 | |||
75 | /** | ||
76 | * struct devfreq_governor - Devfreq policy governor | ||
77 | * @name Governor's name | ||
78 | * @get_target_freq Returns desired operating frequency for the device. | ||
79 | * Basically, get_target_freq will run | ||
80 | * devfreq_dev_profile.get_dev_status() to get the | ||
81 | * status of the device (load = busy_time / total_time). | ||
82 | * If no_central_polling is set, this callback is called | ||
83 | * only with update_devfreq() notified by OPP. | ||
84 | * @init Called when the devfreq is being attached to a device | ||
85 | * @exit Called when the devfreq is being removed from a | ||
86 | * device. Governor should stop any internal routines | ||
87 | * before return because related data may be | ||
88 | * freed after exit(). | ||
89 | * @no_central_polling Do not use devfreq's central polling mechanism. | ||
90 | * When this is set, devfreq will not call | ||
91 | * get_target_freq with devfreq_monitor(). However, | ||
92 | * devfreq will call get_target_freq with | ||
93 | * devfreq_update() notified by OPP framework. | ||
94 | * | ||
95 | * Note that the callbacks are called with devfreq->lock locked by devfreq. | ||
96 | */ | ||
97 | struct devfreq_governor { | ||
98 | const char name[DEVFREQ_NAME_LEN]; | ||
99 | int (*get_target_freq)(struct devfreq *this, unsigned long *freq); | ||
100 | int (*init)(struct devfreq *this); | ||
101 | void (*exit)(struct devfreq *this); | ||
102 | const bool no_central_polling; | ||
103 | }; | ||
104 | |||
105 | /** | ||
106 | * struct devfreq - Device devfreq structure | ||
107 | * @node list node - contains the devices with devfreq that have been | ||
108 | * registered. | ||
109 | * @lock a mutex to protect accessing devfreq. | ||
110 | * @dev device registered by devfreq class. dev.parent is the device | ||
111 | * using devfreq. | ||
112 | * @profile device-specific devfreq profile | ||
113 | * @governor method how to choose frequency based on the usage. | ||
114 | * @nb notifier block used to notify devfreq object that it should | ||
115 | * reevaluate operable frequencies. Devfreq users may use | ||
116 | * devfreq.nb to the corresponding register notifier call chain. | ||
117 | * @polling_jiffies interval in jiffies. | ||
118 | * @previous_freq previously configured frequency value. | ||
119 | * @next_polling the number of remaining jiffies to poll with | ||
120 | * "devfreq_monitor" executions to reevaluate | ||
121 | * frequency/voltage of the device. Set by | ||
122 | * profile's polling_ms interval. | ||
123 | * @data Private data of the governor. The devfreq framework does not | ||
124 | * touch this. | ||
125 | * @being_removed a flag to mark that this object is being removed in | ||
126 | * order to prevent trying to remove the object multiple times. | ||
127 | * | ||
128 | * This structure stores the devfreq information for a give device. | ||
129 | * | ||
130 | * Note that when a governor accesses entries in struct devfreq in its | ||
131 | * functions except for the context of callbacks defined in struct | ||
132 | * devfreq_governor, the governor should protect its access with the | ||
133 | * struct mutex lock in struct devfreq. A governor may use this mutex | ||
134 | * to protect its own private data in void *data as well. | ||
135 | */ | ||
136 | struct devfreq { | ||
137 | struct list_head node; | ||
138 | |||
139 | struct mutex lock; | ||
140 | struct device dev; | ||
141 | struct devfreq_dev_profile *profile; | ||
142 | const struct devfreq_governor *governor; | ||
143 | struct notifier_block nb; | ||
144 | |||
145 | unsigned long polling_jiffies; | ||
146 | unsigned long previous_freq; | ||
147 | unsigned int next_polling; | ||
148 | |||
149 | void *data; /* private data for governors */ | ||
150 | |||
151 | bool being_removed; | ||
152 | }; | ||
153 | |||
154 | #if defined(CONFIG_PM_DEVFREQ) | ||
155 | extern struct devfreq *devfreq_add_device(struct device *dev, | ||
156 | struct devfreq_dev_profile *profile, | ||
157 | const struct devfreq_governor *governor, | ||
158 | void *data); | ||
159 | extern int devfreq_remove_device(struct devfreq *devfreq); | ||
160 | |||
161 | /* Helper functions for devfreq user device driver with OPP. */ | ||
162 | extern struct opp *devfreq_recommended_opp(struct device *dev, | ||
163 | unsigned long *freq); | ||
164 | extern int devfreq_register_opp_notifier(struct device *dev, | ||
165 | struct devfreq *devfreq); | ||
166 | extern int devfreq_unregister_opp_notifier(struct device *dev, | ||
167 | struct devfreq *devfreq); | ||
168 | |||
169 | #ifdef CONFIG_DEVFREQ_GOV_POWERSAVE | ||
170 | extern const struct devfreq_governor devfreq_powersave; | ||
171 | #endif | ||
172 | #ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE | ||
173 | extern const struct devfreq_governor devfreq_performance; | ||
174 | #endif | ||
175 | #ifdef CONFIG_DEVFREQ_GOV_USERSPACE | ||
176 | extern const struct devfreq_governor devfreq_userspace; | ||
177 | #endif | ||
178 | #ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND | ||
179 | extern const struct devfreq_governor devfreq_simple_ondemand; | ||
180 | /** | ||
181 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq | ||
182 | * and devfreq_add_device | ||
183 | * @ upthreshold If the load is over this value, the frequency jumps. | ||
184 | * Specify 0 to use the default. Valid value = 0 to 100. | ||
185 | * @ downdifferential If the load is under upthreshold - downdifferential, | ||
186 | * the governor may consider slowing the frequency down. | ||
187 | * Specify 0 to use the default. Valid value = 0 to 100. | ||
188 | * downdifferential < upthreshold must hold. | ||
189 | * | ||
190 | * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor, | ||
191 | * the governor uses the default values. | ||
192 | */ | ||
193 | struct devfreq_simple_ondemand_data { | ||
194 | unsigned int upthreshold; | ||
195 | unsigned int downdifferential; | ||
196 | }; | ||
197 | #endif | ||
198 | |||
199 | #else /* !CONFIG_PM_DEVFREQ */ | ||
200 | static struct devfreq *devfreq_add_device(struct device *dev, | ||
201 | struct devfreq_dev_profile *profile, | ||
202 | struct devfreq_governor *governor, | ||
203 | void *data); | ||
204 | { | ||
205 | return NULL; | ||
206 | } | ||
207 | |||
208 | static int devfreq_remove_device(struct devfreq *devfreq); | ||
209 | { | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | static struct opp *devfreq_recommended_opp(struct device *dev, | ||
214 | unsigned long *freq) | ||
215 | { | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | |||
219 | static int devfreq_register_opp_notifier(struct device *dev, | ||
220 | struct devfreq *devfreq) | ||
221 | { | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | |||
225 | static int devfreq_unregister_opp_notifier(struct device *dev, | ||
226 | struct devfreq *devfreq) | ||
227 | { | ||
228 | return -EINVAL; | ||
229 | } | ||
230 | |||
231 | #define devfreq_powersave NULL | ||
232 | #define devfreq_performance NULL | ||
233 | #define devfreq_userspace NULL | ||
234 | #define devfreq_simple_ondemand NULL | ||
235 | |||
236 | #endif /* CONFIG_PM_DEVFREQ */ | ||
237 | |||
238 | #endif /* __LINUX_DEVFREQ_H__ */ | ||
diff --git a/include/linux/device.h b/include/linux/device.h index 46751bdb71d..bdcf361ca93 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -638,6 +638,11 @@ static inline void set_dev_node(struct device *dev, int node) | |||
638 | } | 638 | } |
639 | #endif | 639 | #endif |
640 | 640 | ||
641 | static inline struct pm_subsys_data *dev_to_psd(struct device *dev) | ||
642 | { | ||
643 | return dev ? dev->power.subsys_data : NULL; | ||
644 | } | ||
645 | |||
641 | static inline unsigned int dev_get_uevent_suppress(const struct device *dev) | 646 | static inline unsigned int dev_get_uevent_suppress(const struct device *dev) |
642 | { | 647 | { |
643 | return dev->kobj.uevent_suppress; | 648 | return dev->kobj.uevent_suppress; |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 1effc8b56b4..aa56cf31f7f 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -49,6 +49,7 @@ extern int thaw_process(struct task_struct *p); | |||
49 | 49 | ||
50 | extern void refrigerator(void); | 50 | extern void refrigerator(void); |
51 | extern int freeze_processes(void); | 51 | extern int freeze_processes(void); |
52 | extern int freeze_kernel_threads(void); | ||
52 | extern void thaw_processes(void); | 53 | extern void thaw_processes(void); |
53 | 54 | ||
54 | static inline int try_to_freeze(void) | 55 | static inline int try_to_freeze(void) |
@@ -171,7 +172,8 @@ static inline void clear_freeze_flag(struct task_struct *p) {} | |||
171 | static inline int thaw_process(struct task_struct *p) { return 1; } | 172 | static inline int thaw_process(struct task_struct *p) { return 1; } |
172 | 173 | ||
173 | static inline void refrigerator(void) {} | 174 | static inline void refrigerator(void) {} |
174 | static inline int freeze_processes(void) { BUG(); return 0; } | 175 | static inline int freeze_processes(void) { return -ENOSYS; } |
176 | static inline int freeze_kernel_threads(void) { return -ENOSYS; } | ||
175 | static inline void thaw_processes(void) {} | 177 | static inline void thaw_processes(void) {} |
176 | 178 | ||
177 | static inline int try_to_freeze(void) { return 0; } | 179 | static inline int try_to_freeze(void) { return 0; } |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c8615cd0b2f..df1c836e694 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/if_link.h> | 31 | #include <linux/if_link.h> |
32 | 32 | ||
33 | #ifdef __KERNEL__ | 33 | #ifdef __KERNEL__ |
34 | #include <linux/pm_qos_params.h> | 34 | #include <linux/pm_qos.h> |
35 | #include <linux/timer.h> | 35 | #include <linux/timer.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/atomic.h> | 37 | #include <linux/atomic.h> |
@@ -969,7 +969,7 @@ struct net_device { | |||
969 | */ | 969 | */ |
970 | char name[IFNAMSIZ]; | 970 | char name[IFNAMSIZ]; |
971 | 971 | ||
972 | struct pm_qos_request_list pm_qos_req; | 972 | struct pm_qos_request pm_qos_req; |
973 | 973 | ||
974 | /* device name hash chain */ | 974 | /* device name hash chain */ |
975 | struct hlist_node name_hlist; | 975 | struct hlist_node name_hlist; |
diff --git a/include/linux/opp.h b/include/linux/opp.h index 7020e9736fc..87a9208f8ae 100644 --- a/include/linux/opp.h +++ b/include/linux/opp.h | |||
@@ -16,9 +16,14 @@ | |||
16 | 16 | ||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
19 | #include <linux/notifier.h> | ||
19 | 20 | ||
20 | struct opp; | 21 | struct opp; |
21 | 22 | ||
23 | enum opp_event { | ||
24 | OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, | ||
25 | }; | ||
26 | |||
22 | #if defined(CONFIG_PM_OPP) | 27 | #if defined(CONFIG_PM_OPP) |
23 | 28 | ||
24 | unsigned long opp_get_voltage(struct opp *opp); | 29 | unsigned long opp_get_voltage(struct opp *opp); |
@@ -40,6 +45,8 @@ int opp_enable(struct device *dev, unsigned long freq); | |||
40 | 45 | ||
41 | int opp_disable(struct device *dev, unsigned long freq); | 46 | int opp_disable(struct device *dev, unsigned long freq); |
42 | 47 | ||
48 | struct srcu_notifier_head *opp_get_notifier(struct device *dev); | ||
49 | |||
43 | #else | 50 | #else |
44 | static inline unsigned long opp_get_voltage(struct opp *opp) | 51 | static inline unsigned long opp_get_voltage(struct opp *opp) |
45 | { | 52 | { |
@@ -89,6 +96,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq) | |||
89 | { | 96 | { |
90 | return 0; | 97 | return 0; |
91 | } | 98 | } |
99 | |||
100 | struct srcu_notifier_head *opp_get_notifier(struct device *dev) | ||
101 | { | ||
102 | return ERR_PTR(-EINVAL); | ||
103 | } | ||
92 | #endif /* CONFIG_PM */ | 104 | #endif /* CONFIG_PM */ |
93 | 105 | ||
94 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) | 106 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) |
diff --git a/include/linux/pm.h b/include/linux/pm.h index f7c84c9abd3..f15acb64681 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -326,6 +326,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops; | |||
326 | * requested by a driver. | 326 | * requested by a driver. |
327 | */ | 327 | */ |
328 | 328 | ||
329 | #define PM_EVENT_INVALID (-1) | ||
329 | #define PM_EVENT_ON 0x0000 | 330 | #define PM_EVENT_ON 0x0000 |
330 | #define PM_EVENT_FREEZE 0x0001 | 331 | #define PM_EVENT_FREEZE 0x0001 |
331 | #define PM_EVENT_SUSPEND 0x0002 | 332 | #define PM_EVENT_SUSPEND 0x0002 |
@@ -346,6 +347,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops; | |||
346 | #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) | 347 | #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) |
347 | #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) | 348 | #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) |
348 | 349 | ||
350 | #define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) | ||
349 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) | 351 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) |
350 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) | 352 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) |
351 | #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) | 353 | #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) |
@@ -366,6 +368,8 @@ extern struct dev_pm_ops generic_subsys_pm_ops; | |||
366 | #define PMSG_AUTO_RESUME ((struct pm_message) \ | 368 | #define PMSG_AUTO_RESUME ((struct pm_message) \ |
367 | { .event = PM_EVENT_AUTO_RESUME, }) | 369 | { .event = PM_EVENT_AUTO_RESUME, }) |
368 | 370 | ||
371 | #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) | ||
372 | |||
369 | /** | 373 | /** |
370 | * Device run-time power management status. | 374 | * Device run-time power management status. |
371 | * | 375 | * |
@@ -421,6 +425,22 @@ enum rpm_request { | |||
421 | 425 | ||
422 | struct wakeup_source; | 426 | struct wakeup_source; |
423 | 427 | ||
428 | struct pm_domain_data { | ||
429 | struct list_head list_node; | ||
430 | struct device *dev; | ||
431 | }; | ||
432 | |||
433 | struct pm_subsys_data { | ||
434 | spinlock_t lock; | ||
435 | unsigned int refcount; | ||
436 | #ifdef CONFIG_PM_CLK | ||
437 | struct list_head clock_list; | ||
438 | #endif | ||
439 | #ifdef CONFIG_PM_GENERIC_DOMAINS | ||
440 | struct pm_domain_data *domain_data; | ||
441 | #endif | ||
442 | }; | ||
443 | |||
424 | struct dev_pm_info { | 444 | struct dev_pm_info { |
425 | pm_message_t power_state; | 445 | pm_message_t power_state; |
426 | unsigned int can_wakeup:1; | 446 | unsigned int can_wakeup:1; |
@@ -432,6 +452,7 @@ struct dev_pm_info { | |||
432 | struct list_head entry; | 452 | struct list_head entry; |
433 | struct completion completion; | 453 | struct completion completion; |
434 | struct wakeup_source *wakeup; | 454 | struct wakeup_source *wakeup; |
455 | bool wakeup_path:1; | ||
435 | #else | 456 | #else |
436 | unsigned int should_wakeup:1; | 457 | unsigned int should_wakeup:1; |
437 | #endif | 458 | #endif |
@@ -462,10 +483,13 @@ struct dev_pm_info { | |||
462 | unsigned long suspended_jiffies; | 483 | unsigned long suspended_jiffies; |
463 | unsigned long accounting_timestamp; | 484 | unsigned long accounting_timestamp; |
464 | #endif | 485 | #endif |
465 | void *subsys_data; /* Owned by the subsystem. */ | 486 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ |
487 | struct pm_qos_constraints *constraints; | ||
466 | }; | 488 | }; |
467 | 489 | ||
468 | extern void update_pm_runtime_accounting(struct device *dev); | 490 | extern void update_pm_runtime_accounting(struct device *dev); |
491 | extern int dev_pm_get_subsys_data(struct device *dev); | ||
492 | extern int dev_pm_put_subsys_data(struct device *dev); | ||
469 | 493 | ||
470 | /* | 494 | /* |
471 | * Power domains provide callbacks that are executed during system suspend, | 495 | * Power domains provide callbacks that are executed during system suspend, |
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h new file mode 100644 index 00000000000..8348866e7b0 --- /dev/null +++ b/include/linux/pm_clock.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * pm_clock.h - Definitions and headers related to device clocks. | ||
3 | * | ||
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #ifndef _LINUX_PM_CLOCK_H | ||
10 | #define _LINUX_PM_CLOCK_H | ||
11 | |||
12 | #include <linux/device.h> | ||
13 | #include <linux/notifier.h> | ||
14 | |||
15 | struct pm_clk_notifier_block { | ||
16 | struct notifier_block nb; | ||
17 | struct dev_pm_domain *pm_domain; | ||
18 | char *con_ids[]; | ||
19 | }; | ||
20 | |||
21 | #ifdef CONFIG_PM_CLK | ||
22 | static inline bool pm_clk_no_clocks(struct device *dev) | ||
23 | { | ||
24 | return dev && dev->power.subsys_data | ||
25 | && list_empty(&dev->power.subsys_data->clock_list); | ||
26 | } | ||
27 | |||
28 | extern void pm_clk_init(struct device *dev); | ||
29 | extern int pm_clk_create(struct device *dev); | ||
30 | extern void pm_clk_destroy(struct device *dev); | ||
31 | extern int pm_clk_add(struct device *dev, const char *con_id); | ||
32 | extern void pm_clk_remove(struct device *dev, const char *con_id); | ||
33 | extern int pm_clk_suspend(struct device *dev); | ||
34 | extern int pm_clk_resume(struct device *dev); | ||
35 | #else | ||
36 | static inline bool pm_clk_no_clocks(struct device *dev) | ||
37 | { | ||
38 | return true; | ||
39 | } | ||
40 | static inline void pm_clk_init(struct device *dev) | ||
41 | { | ||
42 | } | ||
43 | static inline int pm_clk_create(struct device *dev) | ||
44 | { | ||
45 | return -EINVAL; | ||
46 | } | ||
47 | static inline void pm_clk_destroy(struct device *dev) | ||
48 | { | ||
49 | } | ||
50 | static inline int pm_clk_add(struct device *dev, const char *con_id) | ||
51 | { | ||
52 | return -EINVAL; | ||
53 | } | ||
54 | static inline void pm_clk_remove(struct device *dev, const char *con_id) | ||
55 | { | ||
56 | } | ||
57 | #define pm_clk_suspend NULL | ||
58 | #define pm_clk_resume NULL | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_HAVE_CLK | ||
62 | extern void pm_clk_add_notifier(struct bus_type *bus, | ||
63 | struct pm_clk_notifier_block *clknb); | ||
64 | #else | ||
65 | static inline void pm_clk_add_notifier(struct bus_type *bus, | ||
66 | struct pm_clk_notifier_block *clknb) | ||
67 | { | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | #endif | ||
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f9ec1736a11..65633e5a2bc 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | enum gpd_status { | 14 | enum gpd_status { |
15 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ | 15 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ |
16 | GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ | ||
16 | GPD_STATE_BUSY, /* Something is happening to the PM domain */ | 17 | GPD_STATE_BUSY, /* Something is happening to the PM domain */ |
17 | GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ | 18 | GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ |
18 | GPD_STATE_POWER_OFF, /* PM domain is off */ | 19 | GPD_STATE_POWER_OFF, /* PM domain is off */ |
@@ -25,15 +26,14 @@ struct dev_power_governor { | |||
25 | struct generic_pm_domain { | 26 | struct generic_pm_domain { |
26 | struct dev_pm_domain domain; /* PM domain operations */ | 27 | struct dev_pm_domain domain; /* PM domain operations */ |
27 | struct list_head gpd_list_node; /* Node in the global PM domains list */ | 28 | struct list_head gpd_list_node; /* Node in the global PM domains list */ |
28 | struct list_head sd_node; /* Node in the parent's subdomain list */ | 29 | struct list_head master_links; /* Links with PM domain as a master */ |
29 | struct generic_pm_domain *parent; /* Parent PM domain */ | 30 | struct list_head slave_links; /* Links with PM domain as a slave */ |
30 | struct list_head sd_list; /* List of dubdomains */ | ||
31 | struct list_head dev_list; /* List of devices */ | 31 | struct list_head dev_list; /* List of devices */ |
32 | struct mutex lock; | 32 | struct mutex lock; |
33 | struct dev_power_governor *gov; | 33 | struct dev_power_governor *gov; |
34 | struct work_struct power_off_work; | 34 | struct work_struct power_off_work; |
35 | unsigned int in_progress; /* Number of devices being suspended now */ | 35 | unsigned int in_progress; /* Number of devices being suspended now */ |
36 | unsigned int sd_count; /* Number of subdomains with power "on" */ | 36 | atomic_t sd_count; /* Number of subdomains with power "on" */ |
37 | enum gpd_status status; /* Current state of the domain */ | 37 | enum gpd_status status; /* Current state of the domain */ |
38 | wait_queue_head_t status_wait_queue; | 38 | wait_queue_head_t status_wait_queue; |
39 | struct task_struct *poweroff_task; /* Powering off task */ | 39 | struct task_struct *poweroff_task; /* Powering off task */ |
@@ -42,6 +42,7 @@ struct generic_pm_domain { | |||
42 | unsigned int suspended_count; /* System suspend device counter */ | 42 | unsigned int suspended_count; /* System suspend device counter */ |
43 | unsigned int prepared_count; /* Suspend counter of prepared devices */ | 43 | unsigned int prepared_count; /* Suspend counter of prepared devices */ |
44 | bool suspend_power_off; /* Power status before system suspend */ | 44 | bool suspend_power_off; /* Power status before system suspend */ |
45 | bool dev_irq_safe; /* Device callbacks are IRQ-safe */ | ||
45 | int (*power_off)(struct generic_pm_domain *domain); | 46 | int (*power_off)(struct generic_pm_domain *domain); |
46 | int (*power_on)(struct generic_pm_domain *domain); | 47 | int (*power_on)(struct generic_pm_domain *domain); |
47 | int (*start_device)(struct device *dev); | 48 | int (*start_device)(struct device *dev); |
@@ -54,12 +55,23 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) | |||
54 | return container_of(pd, struct generic_pm_domain, domain); | 55 | return container_of(pd, struct generic_pm_domain, domain); |
55 | } | 56 | } |
56 | 57 | ||
57 | struct dev_list_entry { | 58 | struct gpd_link { |
58 | struct list_head node; | 59 | struct generic_pm_domain *master; |
59 | struct device *dev; | 60 | struct list_head master_node; |
61 | struct generic_pm_domain *slave; | ||
62 | struct list_head slave_node; | ||
63 | }; | ||
64 | |||
65 | struct generic_pm_domain_data { | ||
66 | struct pm_domain_data base; | ||
60 | bool need_restore; | 67 | bool need_restore; |
61 | }; | 68 | }; |
62 | 69 | ||
70 | static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) | ||
71 | { | ||
72 | return container_of(pdd, struct generic_pm_domain_data, base); | ||
73 | } | ||
74 | |||
63 | #ifdef CONFIG_PM_GENERIC_DOMAINS | 75 | #ifdef CONFIG_PM_GENERIC_DOMAINS |
64 | extern int pm_genpd_add_device(struct generic_pm_domain *genpd, | 76 | extern int pm_genpd_add_device(struct generic_pm_domain *genpd, |
65 | struct device *dev); | 77 | struct device *dev); |
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h new file mode 100644 index 00000000000..83b0ea302a8 --- /dev/null +++ b/include/linux/pm_qos.h | |||
@@ -0,0 +1,155 @@ | |||
1 | #ifndef _LINUX_PM_QOS_H | ||
2 | #define _LINUX_PM_QOS_H | ||
3 | /* interface for the pm_qos_power infrastructure of the linux kernel. | ||
4 | * | ||
5 | * Mark Gross <mgross@linux.intel.com> | ||
6 | */ | ||
7 | #include <linux/plist.h> | ||
8 | #include <linux/notifier.h> | ||
9 | #include <linux/miscdevice.h> | ||
10 | #include <linux/device.h> | ||
11 | |||
12 | #define PM_QOS_RESERVED 0 | ||
13 | #define PM_QOS_CPU_DMA_LATENCY 1 | ||
14 | #define PM_QOS_NETWORK_LATENCY 2 | ||
15 | #define PM_QOS_NETWORK_THROUGHPUT 3 | ||
16 | |||
17 | #define PM_QOS_NUM_CLASSES 4 | ||
18 | #define PM_QOS_DEFAULT_VALUE -1 | ||
19 | |||
20 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | ||
21 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | ||
22 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | ||
23 | #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 | ||
24 | |||
25 | struct pm_qos_request { | ||
26 | struct plist_node node; | ||
27 | int pm_qos_class; | ||
28 | }; | ||
29 | |||
30 | struct dev_pm_qos_request { | ||
31 | struct plist_node node; | ||
32 | struct device *dev; | ||
33 | }; | ||
34 | |||
35 | enum pm_qos_type { | ||
36 | PM_QOS_UNITIALIZED, | ||
37 | PM_QOS_MAX, /* return the largest value */ | ||
38 | PM_QOS_MIN /* return the smallest value */ | ||
39 | }; | ||
40 | |||
41 | /* | ||
42 | * Note: The lockless read path depends on the CPU accessing | ||
43 | * target_value atomically. Atomic access is only guaranteed on all CPU | ||
44 | * types linux supports for 32 bit quantites | ||
45 | */ | ||
46 | struct pm_qos_constraints { | ||
47 | struct plist_head list; | ||
48 | s32 target_value; /* Do not change to 64 bit */ | ||
49 | s32 default_value; | ||
50 | enum pm_qos_type type; | ||
51 | struct blocking_notifier_head *notifiers; | ||
52 | }; | ||
53 | |||
54 | /* Action requested to pm_qos_update_target */ | ||
55 | enum pm_qos_req_action { | ||
56 | PM_QOS_ADD_REQ, /* Add a new request */ | ||
57 | PM_QOS_UPDATE_REQ, /* Update an existing request */ | ||
58 | PM_QOS_REMOVE_REQ /* Remove an existing request */ | ||
59 | }; | ||
60 | |||
61 | static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) | ||
62 | { | ||
63 | return req->dev != 0; | ||
64 | } | ||
65 | |||
66 | #ifdef CONFIG_PM | ||
67 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | ||
68 | enum pm_qos_req_action action, int value); | ||
69 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, | ||
70 | s32 value); | ||
71 | void pm_qos_update_request(struct pm_qos_request *req, | ||
72 | s32 new_value); | ||
73 | void pm_qos_remove_request(struct pm_qos_request *req); | ||
74 | |||
75 | int pm_qos_request(int pm_qos_class); | ||
76 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); | ||
77 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); | ||
78 | int pm_qos_request_active(struct pm_qos_request *req); | ||
79 | s32 pm_qos_read_value(struct pm_qos_constraints *c); | ||
80 | |||
81 | s32 dev_pm_qos_read_value(struct device *dev); | ||
82 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | ||
83 | s32 value); | ||
84 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); | ||
85 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); | ||
86 | int dev_pm_qos_add_notifier(struct device *dev, | ||
87 | struct notifier_block *notifier); | ||
88 | int dev_pm_qos_remove_notifier(struct device *dev, | ||
89 | struct notifier_block *notifier); | ||
90 | int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); | ||
91 | int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); | ||
92 | void dev_pm_qos_constraints_init(struct device *dev); | ||
93 | void dev_pm_qos_constraints_destroy(struct device *dev); | ||
94 | #else | ||
95 | static inline int pm_qos_update_target(struct pm_qos_constraints *c, | ||
96 | struct plist_node *node, | ||
97 | enum pm_qos_req_action action, | ||
98 | int value) | ||
99 | { return 0; } | ||
100 | static inline void pm_qos_add_request(struct pm_qos_request *req, | ||
101 | int pm_qos_class, s32 value) | ||
102 | { return; } | ||
103 | static inline void pm_qos_update_request(struct pm_qos_request *req, | ||
104 | s32 new_value) | ||
105 | { return; } | ||
106 | static inline void pm_qos_remove_request(struct pm_qos_request *req) | ||
107 | { return; } | ||
108 | |||
109 | static inline int pm_qos_request(int pm_qos_class) | ||
110 | { return 0; } | ||
111 | static inline int pm_qos_add_notifier(int pm_qos_class, | ||
112 | struct notifier_block *notifier) | ||
113 | { return 0; } | ||
114 | static inline int pm_qos_remove_notifier(int pm_qos_class, | ||
115 | struct notifier_block *notifier) | ||
116 | { return 0; } | ||
117 | static inline int pm_qos_request_active(struct pm_qos_request *req) | ||
118 | { return 0; } | ||
119 | static inline s32 pm_qos_read_value(struct pm_qos_constraints *c) | ||
120 | { return 0; } | ||
121 | |||
122 | static inline s32 dev_pm_qos_read_value(struct device *dev) | ||
123 | { return 0; } | ||
124 | static inline int dev_pm_qos_add_request(struct device *dev, | ||
125 | struct dev_pm_qos_request *req, | ||
126 | s32 value) | ||
127 | { return 0; } | ||
128 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | ||
129 | s32 new_value) | ||
130 | { return 0; } | ||
131 | static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | ||
132 | { return 0; } | ||
133 | static inline int dev_pm_qos_add_notifier(struct device *dev, | ||
134 | struct notifier_block *notifier) | ||
135 | { return 0; } | ||
136 | static inline int dev_pm_qos_remove_notifier(struct device *dev, | ||
137 | struct notifier_block *notifier) | ||
138 | { return 0; } | ||
139 | static inline int dev_pm_qos_add_global_notifier( | ||
140 | struct notifier_block *notifier) | ||
141 | { return 0; } | ||
142 | static inline int dev_pm_qos_remove_global_notifier( | ||
143 | struct notifier_block *notifier) | ||
144 | { return 0; } | ||
145 | static inline void dev_pm_qos_constraints_init(struct device *dev) | ||
146 | { | ||
147 | dev->power.power_state = PMSG_ON; | ||
148 | } | ||
149 | static inline void dev_pm_qos_constraints_destroy(struct device *dev) | ||
150 | { | ||
151 | dev->power.power_state = PMSG_INVALID; | ||
152 | } | ||
153 | #endif | ||
154 | |||
155 | #endif | ||
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h deleted file mode 100644 index a7d87f911ca..00000000000 --- a/include/linux/pm_qos_params.h +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | #ifndef _LINUX_PM_QOS_PARAMS_H | ||
2 | #define _LINUX_PM_QOS_PARAMS_H | ||
3 | /* interface for the pm_qos_power infrastructure of the linux kernel. | ||
4 | * | ||
5 | * Mark Gross <mgross@linux.intel.com> | ||
6 | */ | ||
7 | #include <linux/plist.h> | ||
8 | #include <linux/notifier.h> | ||
9 | #include <linux/miscdevice.h> | ||
10 | |||
11 | #define PM_QOS_RESERVED 0 | ||
12 | #define PM_QOS_CPU_DMA_LATENCY 1 | ||
13 | #define PM_QOS_NETWORK_LATENCY 2 | ||
14 | #define PM_QOS_NETWORK_THROUGHPUT 3 | ||
15 | |||
16 | #define PM_QOS_NUM_CLASSES 4 | ||
17 | #define PM_QOS_DEFAULT_VALUE -1 | ||
18 | |||
19 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | ||
20 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | ||
21 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | ||
22 | |||
23 | struct pm_qos_request_list { | ||
24 | struct plist_node list; | ||
25 | int pm_qos_class; | ||
26 | }; | ||
27 | |||
28 | void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); | ||
29 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, | ||
30 | s32 new_value); | ||
31 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); | ||
32 | |||
33 | int pm_qos_request(int pm_qos_class); | ||
34 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); | ||
35 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); | ||
36 | int pm_qos_request_active(struct pm_qos_request_list *req); | ||
37 | |||
38 | #endif | ||
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index daac05d751b..70b284024d9 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -251,46 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | |||
251 | __pm_runtime_use_autosuspend(dev, false); | 251 | __pm_runtime_use_autosuspend(dev, false); |
252 | } | 252 | } |
253 | 253 | ||
254 | struct pm_clk_notifier_block { | ||
255 | struct notifier_block nb; | ||
256 | struct dev_pm_domain *pm_domain; | ||
257 | char *con_ids[]; | ||
258 | }; | ||
259 | |||
260 | #ifdef CONFIG_PM_CLK | ||
261 | extern int pm_clk_init(struct device *dev); | ||
262 | extern void pm_clk_destroy(struct device *dev); | ||
263 | extern int pm_clk_add(struct device *dev, const char *con_id); | ||
264 | extern void pm_clk_remove(struct device *dev, const char *con_id); | ||
265 | extern int pm_clk_suspend(struct device *dev); | ||
266 | extern int pm_clk_resume(struct device *dev); | ||
267 | #else | ||
268 | static inline int pm_clk_init(struct device *dev) | ||
269 | { | ||
270 | return -EINVAL; | ||
271 | } | ||
272 | static inline void pm_clk_destroy(struct device *dev) | ||
273 | { | ||
274 | } | ||
275 | static inline int pm_clk_add(struct device *dev, const char *con_id) | ||
276 | { | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | static inline void pm_clk_remove(struct device *dev, const char *con_id) | ||
280 | { | ||
281 | } | ||
282 | #define pm_clk_suspend NULL | ||
283 | #define pm_clk_resume NULL | ||
284 | #endif | ||
285 | |||
286 | #ifdef CONFIG_HAVE_CLK | ||
287 | extern void pm_clk_add_notifier(struct bus_type *bus, | ||
288 | struct pm_clk_notifier_block *clknb); | ||
289 | #else | ||
290 | static inline void pm_clk_add_notifier(struct bus_type *bus, | ||
291 | struct pm_clk_notifier_block *clknb) | ||
292 | { | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | #endif | 254 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 6bbcef22e10..57a692432f8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -8,15 +8,18 @@ | |||
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <asm/errno.h> | 9 | #include <asm/errno.h> |
10 | 10 | ||
11 | #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) | 11 | #ifdef CONFIG_VT |
12 | extern void pm_set_vt_switch(int); | 12 | extern void pm_set_vt_switch(int); |
13 | extern int pm_prepare_console(void); | ||
14 | extern void pm_restore_console(void); | ||
15 | #else | 13 | #else |
16 | static inline void pm_set_vt_switch(int do_switch) | 14 | static inline void pm_set_vt_switch(int do_switch) |
17 | { | 15 | { |
18 | } | 16 | } |
17 | #endif | ||
19 | 18 | ||
19 | #ifdef CONFIG_VT_CONSOLE_SLEEP | ||
20 | extern int pm_prepare_console(void); | ||
21 | extern void pm_restore_console(void); | ||
22 | #else | ||
20 | static inline int pm_prepare_console(void) | 23 | static inline int pm_prepare_console(void) |
21 | { | 24 | { |
22 | return 0; | 25 | return 0; |
@@ -34,6 +37,58 @@ typedef int __bitwise suspend_state_t; | |||
34 | #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) | 37 | #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) |
35 | #define PM_SUSPEND_MAX ((__force suspend_state_t) 4) | 38 | #define PM_SUSPEND_MAX ((__force suspend_state_t) 4) |
36 | 39 | ||
40 | enum suspend_stat_step { | ||
41 | SUSPEND_FREEZE = 1, | ||
42 | SUSPEND_PREPARE, | ||
43 | SUSPEND_SUSPEND, | ||
44 | SUSPEND_SUSPEND_NOIRQ, | ||
45 | SUSPEND_RESUME_NOIRQ, | ||
46 | SUSPEND_RESUME | ||
47 | }; | ||
48 | |||
49 | struct suspend_stats { | ||
50 | int success; | ||
51 | int fail; | ||
52 | int failed_freeze; | ||
53 | int failed_prepare; | ||
54 | int failed_suspend; | ||
55 | int failed_suspend_noirq; | ||
56 | int failed_resume; | ||
57 | int failed_resume_noirq; | ||
58 | #define REC_FAILED_NUM 2 | ||
59 | int last_failed_dev; | ||
60 | char failed_devs[REC_FAILED_NUM][40]; | ||
61 | int last_failed_errno; | ||
62 | int errno[REC_FAILED_NUM]; | ||
63 | int last_failed_step; | ||
64 | enum suspend_stat_step failed_steps[REC_FAILED_NUM]; | ||
65 | }; | ||
66 | |||
67 | extern struct suspend_stats suspend_stats; | ||
68 | |||
69 | static inline void dpm_save_failed_dev(const char *name) | ||
70 | { | ||
71 | strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], | ||
72 | name, | ||
73 | sizeof(suspend_stats.failed_devs[0])); | ||
74 | suspend_stats.last_failed_dev++; | ||
75 | suspend_stats.last_failed_dev %= REC_FAILED_NUM; | ||
76 | } | ||
77 | |||
78 | static inline void dpm_save_failed_errno(int err) | ||
79 | { | ||
80 | suspend_stats.errno[suspend_stats.last_failed_errno] = err; | ||
81 | suspend_stats.last_failed_errno++; | ||
82 | suspend_stats.last_failed_errno %= REC_FAILED_NUM; | ||
83 | } | ||
84 | |||
85 | static inline void dpm_save_failed_step(enum suspend_stat_step step) | ||
86 | { | ||
87 | suspend_stats.failed_steps[suspend_stats.last_failed_step] = step; | ||
88 | suspend_stats.last_failed_step++; | ||
89 | suspend_stats.last_failed_step %= REC_FAILED_NUM; | ||
90 | } | ||
91 | |||
37 | /** | 92 | /** |
38 | * struct platform_suspend_ops - Callbacks for managing platform dependent | 93 | * struct platform_suspend_ops - Callbacks for managing platform dependent |
39 | * system sleep states. | 94 | * system sleep states. |
@@ -334,4 +389,38 @@ static inline void unlock_system_sleep(void) | |||
334 | } | 389 | } |
335 | #endif | 390 | #endif |
336 | 391 | ||
392 | #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS | ||
393 | /* | ||
394 | * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture | ||
395 | * to save/restore additional information to/from the array of page | ||
396 | * frame numbers in the hibernation image. For s390 this is used to | ||
397 | * save and restore the storage key for each page that is included | ||
398 | * in the hibernation image. | ||
399 | */ | ||
400 | unsigned long page_key_additional_pages(unsigned long pages); | ||
401 | int page_key_alloc(unsigned long pages); | ||
402 | void page_key_free(void); | ||
403 | void page_key_read(unsigned long *pfn); | ||
404 | void page_key_memorize(unsigned long *pfn); | ||
405 | void page_key_write(void *address); | ||
406 | |||
407 | #else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ | ||
408 | |||
409 | static inline unsigned long page_key_additional_pages(unsigned long pages) | ||
410 | { | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static inline int page_key_alloc(unsigned long pages) | ||
415 | { | ||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static inline void page_key_free(void) {} | ||
420 | static inline void page_key_read(unsigned long *pfn) {} | ||
421 | static inline void page_key_memorize(unsigned long *pfn) {} | ||
422 | static inline void page_key_write(void *address) {} | ||
423 | |||
424 | #endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ | ||
425 | |||
337 | #endif /* _LINUX_SUSPEND_H */ | 426 | #endif /* _LINUX_SUSPEND_H */ |
diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 57e71fa33f7..54cb079b7bf 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
30 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/pm_qos_params.h> | 32 | #include <linux/pm_qos.h> |
33 | 33 | ||
34 | #define snd_pcm_substream_chip(substream) ((substream)->private_data) | 34 | #define snd_pcm_substream_chip(substream) ((substream)->private_data) |
35 | #define snd_pcm_chip(pcm) ((pcm)->private_data) | 35 | #define snd_pcm_chip(pcm) ((pcm)->private_data) |
@@ -373,7 +373,7 @@ struct snd_pcm_substream { | |||
373 | int number; | 373 | int number; |
374 | char name[32]; /* substream name */ | 374 | char name[32]; /* substream name */ |
375 | int stream; /* stream (direction) */ | 375 | int stream; /* stream (direction) */ |
376 | struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */ | 376 | struct pm_qos_request latency_pm_qos_req; /* pm_qos request */ |
377 | size_t buffer_bytes_max; /* limit ring buffer size */ | 377 | size_t buffer_bytes_max; /* limit ring buffer size */ |
378 | struct snd_dma_buffer dma_buffer; | 378 | struct snd_dma_buffer dma_buffer; |
379 | unsigned int dma_buf_id; | 379 | unsigned int dma_buf_id; |
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h new file mode 100644 index 00000000000..d62c558bf64 --- /dev/null +++ b/include/trace/events/rpm.h | |||
@@ -0,0 +1,99 @@ | |||
1 | |||
2 | #undef TRACE_SYSTEM | ||
3 | #define TRACE_SYSTEM rpm | ||
4 | |||
5 | #if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ) | ||
6 | #define _TRACE_RUNTIME_POWER_H | ||
7 | |||
8 | #include <linux/ktime.h> | ||
9 | #include <linux/tracepoint.h> | ||
10 | #include <linux/device.h> | ||
11 | |||
12 | /* | ||
13 | * The rpm_internal events are used for tracing some important | ||
14 | * runtime pm internal functions. | ||
15 | */ | ||
16 | DECLARE_EVENT_CLASS(rpm_internal, | ||
17 | |||
18 | TP_PROTO(struct device *dev, int flags), | ||
19 | |||
20 | TP_ARGS(dev, flags), | ||
21 | |||
22 | TP_STRUCT__entry( | ||
23 | __string( name, dev_name(dev) ) | ||
24 | __field( int, flags ) | ||
25 | __field( int , usage_count ) | ||
26 | __field( int , disable_depth ) | ||
27 | __field( int , runtime_auto ) | ||
28 | __field( int , request_pending ) | ||
29 | __field( int , irq_safe ) | ||
30 | __field( int , child_count ) | ||
31 | ), | ||
32 | |||
33 | TP_fast_assign( | ||
34 | __assign_str(name, dev_name(dev)); | ||
35 | __entry->flags = flags; | ||
36 | __entry->usage_count = atomic_read( | ||
37 | &dev->power.usage_count); | ||
38 | __entry->disable_depth = dev->power.disable_depth; | ||
39 | __entry->runtime_auto = dev->power.runtime_auto; | ||
40 | __entry->request_pending = dev->power.request_pending; | ||
41 | __entry->irq_safe = dev->power.irq_safe; | ||
42 | __entry->child_count = atomic_read( | ||
43 | &dev->power.child_count); | ||
44 | ), | ||
45 | |||
46 | TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d" | ||
47 | " irq-%-1d child-%d", | ||
48 | __get_str(name), __entry->flags, | ||
49 | __entry->usage_count, | ||
50 | __entry->disable_depth, | ||
51 | __entry->runtime_auto, | ||
52 | __entry->request_pending, | ||
53 | __entry->irq_safe, | ||
54 | __entry->child_count | ||
55 | ) | ||
56 | ); | ||
57 | DEFINE_EVENT(rpm_internal, rpm_suspend, | ||
58 | |||
59 | TP_PROTO(struct device *dev, int flags), | ||
60 | |||
61 | TP_ARGS(dev, flags) | ||
62 | ); | ||
63 | DEFINE_EVENT(rpm_internal, rpm_resume, | ||
64 | |||
65 | TP_PROTO(struct device *dev, int flags), | ||
66 | |||
67 | TP_ARGS(dev, flags) | ||
68 | ); | ||
69 | DEFINE_EVENT(rpm_internal, rpm_idle, | ||
70 | |||
71 | TP_PROTO(struct device *dev, int flags), | ||
72 | |||
73 | TP_ARGS(dev, flags) | ||
74 | ); | ||
75 | |||
76 | TRACE_EVENT(rpm_return_int, | ||
77 | TP_PROTO(struct device *dev, unsigned long ip, int ret), | ||
78 | TP_ARGS(dev, ip, ret), | ||
79 | |||
80 | TP_STRUCT__entry( | ||
81 | __string( name, dev_name(dev)) | ||
82 | __field( unsigned long, ip ) | ||
83 | __field( int, ret ) | ||
84 | ), | ||
85 | |||
86 | TP_fast_assign( | ||
87 | __assign_str(name, dev_name(dev)); | ||
88 | __entry->ip = ip; | ||
89 | __entry->ret = ret; | ||
90 | ), | ||
91 | |||
92 | TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name), | ||
93 | __entry->ret) | ||
94 | ); | ||
95 | |||
96 | #endif /* _TRACE_RUNTIME_POWER_H */ | ||
97 | |||
98 | /* This part must be outside protection */ | ||
99 | #include <trace/define_trace.h> | ||
diff --git a/kernel/Makefile b/kernel/Makefile index eca595e2fd5..2da48d3515e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ | 12 | notifier.o ksysfs.o sched_clock.o cred.o \ |
13 | async.o range.o | 13 | async.o range.o |
14 | obj-y += groups.o | 14 | obj-y += groups.o |
15 | 15 | ||
diff --git a/kernel/freezer.c b/kernel/freezer.c index 7b01de98bb6..66a594e8ad2 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p) | |||
67 | unsigned long flags; | 67 | unsigned long flags; |
68 | 68 | ||
69 | spin_lock_irqsave(&p->sighand->siglock, flags); | 69 | spin_lock_irqsave(&p->sighand->siglock, flags); |
70 | signal_wake_up(p, 0); | 70 | signal_wake_up(p, 1); |
71 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 71 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
72 | } | 72 | } |
73 | 73 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 3744c594b19..cedd9982306 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -27,6 +27,7 @@ config HIBERNATION | |||
27 | select HIBERNATE_CALLBACKS | 27 | select HIBERNATE_CALLBACKS |
28 | select LZO_COMPRESS | 28 | select LZO_COMPRESS |
29 | select LZO_DECOMPRESS | 29 | select LZO_DECOMPRESS |
30 | select CRC32 | ||
30 | ---help--- | 31 | ---help--- |
31 | Enable the suspend to disk (STD) functionality, which is usually | 32 | Enable the suspend to disk (STD) functionality, which is usually |
32 | called "hibernation" in user interfaces. STD checkpoints the | 33 | called "hibernation" in user interfaces. STD checkpoints the |
@@ -65,6 +66,9 @@ config HIBERNATION | |||
65 | 66 | ||
66 | For more information take a look at <file:Documentation/power/swsusp.txt>. | 67 | For more information take a look at <file:Documentation/power/swsusp.txt>. |
67 | 68 | ||
69 | config ARCH_SAVE_PAGE_KEYS | ||
70 | bool | ||
71 | |||
68 | config PM_STD_PARTITION | 72 | config PM_STD_PARTITION |
69 | string "Default resume partition" | 73 | string "Default resume partition" |
70 | depends on HIBERNATION | 74 | depends on HIBERNATION |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index c5ebc6a9064..07e0e28ffba 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | 1 | ||
2 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG | 2 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG |
3 | 3 | ||
4 | obj-$(CONFIG_PM) += main.o | 4 | obj-$(CONFIG_PM) += main.o qos.o |
5 | obj-$(CONFIG_PM_SLEEP) += console.o | 5 | obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o |
6 | obj-$(CONFIG_FREEZER) += process.o | 6 | obj-$(CONFIG_FREEZER) += process.o |
7 | obj-$(CONFIG_SUSPEND) += suspend.o | 7 | obj-$(CONFIG_SUSPEND) += suspend.o |
8 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | 8 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
diff --git a/kernel/power/console.c b/kernel/power/console.c index 218e5af9015..b1dc456474b 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/power/process.c - Functions for saving/restoring console. | 2 | * Functions for saving/restoring console. |
3 | * | 3 | * |
4 | * Originally from swsusp. | 4 | * Originally from swsusp. |
5 | */ | 5 | */ |
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include "power.h" | 11 | #include "power.h" |
12 | 12 | ||
13 | #if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) | ||
14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | 13 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
15 | 14 | ||
16 | static int orig_fgconsole, orig_kmsg; | 15 | static int orig_fgconsole, orig_kmsg; |
@@ -32,4 +31,3 @@ void pm_restore_console(void) | |||
32 | vt_kmsg_redirect(orig_kmsg); | 31 | vt_kmsg_redirect(orig_kmsg); |
33 | } | 32 | } |
34 | } | 33 | } |
35 | #endif | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 8f7b1db1ece..1c53f7fad5f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/reboot.h> | 14 | #include <linux/reboot.h> |
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/async.h> | ||
17 | #include <linux/kmod.h> | 18 | #include <linux/kmod.h> |
18 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
19 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
@@ -29,12 +30,14 @@ | |||
29 | #include "power.h" | 30 | #include "power.h" |
30 | 31 | ||
31 | 32 | ||
32 | static int nocompress = 0; | 33 | static int nocompress; |
33 | static int noresume = 0; | 34 | static int noresume; |
35 | static int resume_wait; | ||
36 | static int resume_delay; | ||
34 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; | 37 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; |
35 | dev_t swsusp_resume_device; | 38 | dev_t swsusp_resume_device; |
36 | sector_t swsusp_resume_block; | 39 | sector_t swsusp_resume_block; |
37 | int in_suspend __nosavedata = 0; | 40 | int in_suspend __nosavedata; |
38 | 41 | ||
39 | enum { | 42 | enum { |
40 | HIBERNATION_INVALID, | 43 | HIBERNATION_INVALID, |
@@ -334,13 +337,17 @@ int hibernation_snapshot(int platform_mode) | |||
334 | if (error) | 337 | if (error) |
335 | goto Close; | 338 | goto Close; |
336 | 339 | ||
337 | error = dpm_prepare(PMSG_FREEZE); | ||
338 | if (error) | ||
339 | goto Complete_devices; | ||
340 | |||
341 | /* Preallocate image memory before shutting down devices. */ | 340 | /* Preallocate image memory before shutting down devices. */ |
342 | error = hibernate_preallocate_memory(); | 341 | error = hibernate_preallocate_memory(); |
343 | if (error) | 342 | if (error) |
343 | goto Close; | ||
344 | |||
345 | error = freeze_kernel_threads(); | ||
346 | if (error) | ||
347 | goto Close; | ||
348 | |||
349 | error = dpm_prepare(PMSG_FREEZE); | ||
350 | if (error) | ||
344 | goto Complete_devices; | 351 | goto Complete_devices; |
345 | 352 | ||
346 | suspend_console(); | 353 | suspend_console(); |
@@ -463,7 +470,7 @@ static int resume_target_kernel(bool platform_mode) | |||
463 | * @platform_mode: If set, use platform driver to prepare for the transition. | 470 | * @platform_mode: If set, use platform driver to prepare for the transition. |
464 | * | 471 | * |
465 | * This routine must be called with pm_mutex held. If it is successful, control | 472 | * This routine must be called with pm_mutex held. If it is successful, control |
466 | * reappears in the restored target kernel in hibernation_snaphot(). | 473 | * reappears in the restored target kernel in hibernation_snapshot(). |
467 | */ | 474 | */ |
468 | int hibernation_restore(int platform_mode) | 475 | int hibernation_restore(int platform_mode) |
469 | { | 476 | { |
@@ -650,6 +657,9 @@ int hibernate(void) | |||
650 | flags |= SF_PLATFORM_MODE; | 657 | flags |= SF_PLATFORM_MODE; |
651 | if (nocompress) | 658 | if (nocompress) |
652 | flags |= SF_NOCOMPRESS_MODE; | 659 | flags |= SF_NOCOMPRESS_MODE; |
660 | else | ||
661 | flags |= SF_CRC32_MODE; | ||
662 | |||
653 | pr_debug("PM: writing image.\n"); | 663 | pr_debug("PM: writing image.\n"); |
654 | error = swsusp_write(flags); | 664 | error = swsusp_write(flags); |
655 | swsusp_free(); | 665 | swsusp_free(); |
@@ -724,6 +734,12 @@ static int software_resume(void) | |||
724 | 734 | ||
725 | pr_debug("PM: Checking hibernation image partition %s\n", resume_file); | 735 | pr_debug("PM: Checking hibernation image partition %s\n", resume_file); |
726 | 736 | ||
737 | if (resume_delay) { | ||
738 | printk(KERN_INFO "Waiting %dsec before reading resume device...\n", | ||
739 | resume_delay); | ||
740 | ssleep(resume_delay); | ||
741 | } | ||
742 | |||
727 | /* Check if the device is there */ | 743 | /* Check if the device is there */ |
728 | swsusp_resume_device = name_to_dev_t(resume_file); | 744 | swsusp_resume_device = name_to_dev_t(resume_file); |
729 | if (!swsusp_resume_device) { | 745 | if (!swsusp_resume_device) { |
@@ -732,6 +748,13 @@ static int software_resume(void) | |||
732 | * to wait for this to finish. | 748 | * to wait for this to finish. |
733 | */ | 749 | */ |
734 | wait_for_device_probe(); | 750 | wait_for_device_probe(); |
751 | |||
752 | if (resume_wait) { | ||
753 | while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0) | ||
754 | msleep(10); | ||
755 | async_synchronize_full(); | ||
756 | } | ||
757 | |||
735 | /* | 758 | /* |
736 | * We can't depend on SCSI devices being available after loading | 759 | * We can't depend on SCSI devices being available after loading |
737 | * one of their modules until scsi_complete_async_scans() is | 760 | * one of their modules until scsi_complete_async_scans() is |
@@ -1060,7 +1083,21 @@ static int __init noresume_setup(char *str) | |||
1060 | return 1; | 1083 | return 1; |
1061 | } | 1084 | } |
1062 | 1085 | ||
1086 | static int __init resumewait_setup(char *str) | ||
1087 | { | ||
1088 | resume_wait = 1; | ||
1089 | return 1; | ||
1090 | } | ||
1091 | |||
1092 | static int __init resumedelay_setup(char *str) | ||
1093 | { | ||
1094 | resume_delay = simple_strtoul(str, NULL, 0); | ||
1095 | return 1; | ||
1096 | } | ||
1097 | |||
1063 | __setup("noresume", noresume_setup); | 1098 | __setup("noresume", noresume_setup); |
1064 | __setup("resume_offset=", resume_offset_setup); | 1099 | __setup("resume_offset=", resume_offset_setup); |
1065 | __setup("resume=", resume_setup); | 1100 | __setup("resume=", resume_setup); |
1066 | __setup("hibernate=", hibernate_setup); | 1101 | __setup("hibernate=", hibernate_setup); |
1102 | __setup("resumewait", resumewait_setup); | ||
1103 | __setup("resumedelay=", resumedelay_setup); | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 6c601f87196..a52e88425a3 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/resume-trace.h> | 13 | #include <linux/resume-trace.h> |
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/debugfs.h> | ||
16 | #include <linux/seq_file.h> | ||
15 | 17 | ||
16 | #include "power.h" | 18 | #include "power.h" |
17 | 19 | ||
@@ -131,6 +133,101 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
131 | power_attr(pm_test); | 133 | power_attr(pm_test); |
132 | #endif /* CONFIG_PM_DEBUG */ | 134 | #endif /* CONFIG_PM_DEBUG */ |
133 | 135 | ||
136 | #ifdef CONFIG_DEBUG_FS | ||
137 | static char *suspend_step_name(enum suspend_stat_step step) | ||
138 | { | ||
139 | switch (step) { | ||
140 | case SUSPEND_FREEZE: | ||
141 | return "freeze"; | ||
142 | case SUSPEND_PREPARE: | ||
143 | return "prepare"; | ||
144 | case SUSPEND_SUSPEND: | ||
145 | return "suspend"; | ||
146 | case SUSPEND_SUSPEND_NOIRQ: | ||
147 | return "suspend_noirq"; | ||
148 | case SUSPEND_RESUME_NOIRQ: | ||
149 | return "resume_noirq"; | ||
150 | case SUSPEND_RESUME: | ||
151 | return "resume"; | ||
152 | default: | ||
153 | return ""; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static int suspend_stats_show(struct seq_file *s, void *unused) | ||
158 | { | ||
159 | int i, index, last_dev, last_errno, last_step; | ||
160 | |||
161 | last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; | ||
162 | last_dev %= REC_FAILED_NUM; | ||
163 | last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; | ||
164 | last_errno %= REC_FAILED_NUM; | ||
165 | last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; | ||
166 | last_step %= REC_FAILED_NUM; | ||
167 | seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n" | ||
168 | "%s: %d\n%s: %d\n%s: %d\n%s: %d\n", | ||
169 | "success", suspend_stats.success, | ||
170 | "fail", suspend_stats.fail, | ||
171 | "failed_freeze", suspend_stats.failed_freeze, | ||
172 | "failed_prepare", suspend_stats.failed_prepare, | ||
173 | "failed_suspend", suspend_stats.failed_suspend, | ||
174 | "failed_suspend_noirq", | ||
175 | suspend_stats.failed_suspend_noirq, | ||
176 | "failed_resume", suspend_stats.failed_resume, | ||
177 | "failed_resume_noirq", | ||
178 | suspend_stats.failed_resume_noirq); | ||
179 | seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", | ||
180 | suspend_stats.failed_devs[last_dev]); | ||
181 | for (i = 1; i < REC_FAILED_NUM; i++) { | ||
182 | index = last_dev + REC_FAILED_NUM - i; | ||
183 | index %= REC_FAILED_NUM; | ||
184 | seq_printf(s, "\t\t\t%-s\n", | ||
185 | suspend_stats.failed_devs[index]); | ||
186 | } | ||
187 | seq_printf(s, " last_failed_errno:\t%-d\n", | ||
188 | suspend_stats.errno[last_errno]); | ||
189 | for (i = 1; i < REC_FAILED_NUM; i++) { | ||
190 | index = last_errno + REC_FAILED_NUM - i; | ||
191 | index %= REC_FAILED_NUM; | ||
192 | seq_printf(s, "\t\t\t%-d\n", | ||
193 | suspend_stats.errno[index]); | ||
194 | } | ||
195 | seq_printf(s, " last_failed_step:\t%-s\n", | ||
196 | suspend_step_name( | ||
197 | suspend_stats.failed_steps[last_step])); | ||
198 | for (i = 1; i < REC_FAILED_NUM; i++) { | ||
199 | index = last_step + REC_FAILED_NUM - i; | ||
200 | index %= REC_FAILED_NUM; | ||
201 | seq_printf(s, "\t\t\t%-s\n", | ||
202 | suspend_step_name( | ||
203 | suspend_stats.failed_steps[index])); | ||
204 | } | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int suspend_stats_open(struct inode *inode, struct file *file) | ||
210 | { | ||
211 | return single_open(file, suspend_stats_show, NULL); | ||
212 | } | ||
213 | |||
214 | static const struct file_operations suspend_stats_operations = { | ||
215 | .open = suspend_stats_open, | ||
216 | .read = seq_read, | ||
217 | .llseek = seq_lseek, | ||
218 | .release = single_release, | ||
219 | }; | ||
220 | |||
221 | static int __init pm_debugfs_init(void) | ||
222 | { | ||
223 | debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, | ||
224 | NULL, NULL, &suspend_stats_operations); | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | late_initcall(pm_debugfs_init); | ||
229 | #endif /* CONFIG_DEBUG_FS */ | ||
230 | |||
134 | #endif /* CONFIG_PM_SLEEP */ | 231 | #endif /* CONFIG_PM_SLEEP */ |
135 | 232 | ||
136 | struct kobject *power_kobj; | 233 | struct kobject *power_kobj; |
@@ -194,6 +291,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
194 | } | 291 | } |
195 | if (state < PM_SUSPEND_MAX && *s) | 292 | if (state < PM_SUSPEND_MAX && *s) |
196 | error = enter_state(state); | 293 | error = enter_state(state); |
294 | if (error) { | ||
295 | suspend_stats.fail++; | ||
296 | dpm_save_failed_errno(error); | ||
297 | } else | ||
298 | suspend_stats.success++; | ||
197 | #endif | 299 | #endif |
198 | 300 | ||
199 | Exit: | 301 | Exit: |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 9a00a0a2628..23a2db1ec44 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -146,6 +146,7 @@ extern int swsusp_swap_in_use(void); | |||
146 | */ | 146 | */ |
147 | #define SF_PLATFORM_MODE 1 | 147 | #define SF_PLATFORM_MODE 1 |
148 | #define SF_NOCOMPRESS_MODE 2 | 148 | #define SF_NOCOMPRESS_MODE 2 |
149 | #define SF_CRC32_MODE 4 | ||
149 | 150 | ||
150 | /* kernel/power/hibernate.c */ | 151 | /* kernel/power/hibernate.c */ |
151 | extern int swsusp_check(void); | 152 | extern int swsusp_check(void); |
@@ -228,7 +229,8 @@ extern int pm_test_level; | |||
228 | #ifdef CONFIG_SUSPEND_FREEZER | 229 | #ifdef CONFIG_SUSPEND_FREEZER |
229 | static inline int suspend_freeze_processes(void) | 230 | static inline int suspend_freeze_processes(void) |
230 | { | 231 | { |
231 | return freeze_processes(); | 232 | int error = freeze_processes(); |
233 | return error ? : freeze_kernel_threads(); | ||
232 | } | 234 | } |
233 | 235 | ||
234 | static inline void suspend_thaw_processes(void) | 236 | static inline void suspend_thaw_processes(void) |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 0cf3a27a6c9..addbbe5531b 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -135,7 +135,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
135 | } | 135 | } |
136 | 136 | ||
137 | /** | 137 | /** |
138 | * freeze_processes - tell processes to enter the refrigerator | 138 | * freeze_processes - Signal user space processes to enter the refrigerator. |
139 | */ | 139 | */ |
140 | int freeze_processes(void) | 140 | int freeze_processes(void) |
141 | { | 141 | { |
@@ -143,20 +143,30 @@ int freeze_processes(void) | |||
143 | 143 | ||
144 | printk("Freezing user space processes ... "); | 144 | printk("Freezing user space processes ... "); |
145 | error = try_to_freeze_tasks(true); | 145 | error = try_to_freeze_tasks(true); |
146 | if (error) | 146 | if (!error) { |
147 | goto Exit; | 147 | printk("done."); |
148 | printk("done.\n"); | 148 | oom_killer_disable(); |
149 | } | ||
150 | printk("\n"); | ||
151 | BUG_ON(in_atomic()); | ||
152 | |||
153 | return error; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. | ||
158 | */ | ||
159 | int freeze_kernel_threads(void) | ||
160 | { | ||
161 | int error; | ||
149 | 162 | ||
150 | printk("Freezing remaining freezable tasks ... "); | 163 | printk("Freezing remaining freezable tasks ... "); |
151 | error = try_to_freeze_tasks(false); | 164 | error = try_to_freeze_tasks(false); |
152 | if (error) | 165 | if (!error) |
153 | goto Exit; | 166 | printk("done."); |
154 | printk("done."); | ||
155 | 167 | ||
156 | oom_killer_disable(); | ||
157 | Exit: | ||
158 | BUG_ON(in_atomic()); | ||
159 | printk("\n"); | 168 | printk("\n"); |
169 | BUG_ON(in_atomic()); | ||
160 | 170 | ||
161 | return error; | 171 | return error; |
162 | } | 172 | } |
diff --git a/kernel/pm_qos_params.c b/kernel/power/qos.c index 37f05d0f079..1c1797dd1d1 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/power/qos.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | /*#define DEBUG*/ | 30 | /*#define DEBUG*/ |
31 | 31 | ||
32 | #include <linux/pm_qos_params.h> | 32 | #include <linux/pm_qos.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
@@ -45,62 +45,57 @@ | |||
45 | #include <linux/uaccess.h> | 45 | #include <linux/uaccess.h> |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * locking rule: all changes to requests or notifiers lists | 48 | * locking rule: all changes to constraints or notifiers lists |
49 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | 49 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
50 | * held, taken with _irqsave. One lock to rule them all | 50 | * held, taken with _irqsave. One lock to rule them all |
51 | */ | 51 | */ |
52 | enum pm_qos_type { | ||
53 | PM_QOS_MAX, /* return the largest value */ | ||
54 | PM_QOS_MIN /* return the smallest value */ | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * Note: The lockless read path depends on the CPU accessing | ||
59 | * target_value atomically. Atomic access is only guaranteed on all CPU | ||
60 | * types linux supports for 32 bit quantites | ||
61 | */ | ||
62 | struct pm_qos_object { | 52 | struct pm_qos_object { |
63 | struct plist_head requests; | 53 | struct pm_qos_constraints *constraints; |
64 | struct blocking_notifier_head *notifiers; | ||
65 | struct miscdevice pm_qos_power_miscdev; | 54 | struct miscdevice pm_qos_power_miscdev; |
66 | char *name; | 55 | char *name; |
67 | s32 target_value; /* Do not change to 64 bit */ | ||
68 | s32 default_value; | ||
69 | enum pm_qos_type type; | ||
70 | }; | 56 | }; |
71 | 57 | ||
72 | static DEFINE_SPINLOCK(pm_qos_lock); | 58 | static DEFINE_SPINLOCK(pm_qos_lock); |
73 | 59 | ||
74 | static struct pm_qos_object null_pm_qos; | 60 | static struct pm_qos_object null_pm_qos; |
61 | |||
75 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); | 62 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); |
76 | static struct pm_qos_object cpu_dma_pm_qos = { | 63 | static struct pm_qos_constraints cpu_dma_constraints = { |
77 | .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests), | 64 | .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), |
78 | .notifiers = &cpu_dma_lat_notifier, | ||
79 | .name = "cpu_dma_latency", | ||
80 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | 65 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
81 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | 66 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
82 | .type = PM_QOS_MIN, | 67 | .type = PM_QOS_MIN, |
68 | .notifiers = &cpu_dma_lat_notifier, | ||
69 | }; | ||
70 | static struct pm_qos_object cpu_dma_pm_qos = { | ||
71 | .constraints = &cpu_dma_constraints, | ||
83 | }; | 72 | }; |
84 | 73 | ||
85 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); | 74 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); |
86 | static struct pm_qos_object network_lat_pm_qos = { | 75 | static struct pm_qos_constraints network_lat_constraints = { |
87 | .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests), | 76 | .list = PLIST_HEAD_INIT(network_lat_constraints.list), |
88 | .notifiers = &network_lat_notifier, | ||
89 | .name = "network_latency", | ||
90 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | 77 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
91 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | 78 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
92 | .type = PM_QOS_MIN | 79 | .type = PM_QOS_MIN, |
80 | .notifiers = &network_lat_notifier, | ||
81 | }; | ||
82 | static struct pm_qos_object network_lat_pm_qos = { | ||
83 | .constraints = &network_lat_constraints, | ||
84 | .name = "network_latency", | ||
93 | }; | 85 | }; |
94 | 86 | ||
95 | 87 | ||
96 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); | 88 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); |
97 | static struct pm_qos_object network_throughput_pm_qos = { | 89 | static struct pm_qos_constraints network_tput_constraints = { |
98 | .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests), | 90 | .list = PLIST_HEAD_INIT(network_tput_constraints.list), |
99 | .notifiers = &network_throughput_notifier, | ||
100 | .name = "network_throughput", | ||
101 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | 91 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
102 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | 92 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
103 | .type = PM_QOS_MAX, | 93 | .type = PM_QOS_MAX, |
94 | .notifiers = &network_throughput_notifier, | ||
95 | }; | ||
96 | static struct pm_qos_object network_throughput_pm_qos = { | ||
97 | .constraints = &network_tput_constraints, | ||
98 | .name = "network_throughput", | ||
104 | }; | 99 | }; |
105 | 100 | ||
106 | 101 | ||
@@ -127,17 +122,17 @@ static const struct file_operations pm_qos_power_fops = { | |||
127 | }; | 122 | }; |
128 | 123 | ||
129 | /* unlocked internal variant */ | 124 | /* unlocked internal variant */ |
130 | static inline int pm_qos_get_value(struct pm_qos_object *o) | 125 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) |
131 | { | 126 | { |
132 | if (plist_head_empty(&o->requests)) | 127 | if (plist_head_empty(&c->list)) |
133 | return o->default_value; | 128 | return c->default_value; |
134 | 129 | ||
135 | switch (o->type) { | 130 | switch (c->type) { |
136 | case PM_QOS_MIN: | 131 | case PM_QOS_MIN: |
137 | return plist_first(&o->requests)->prio; | 132 | return plist_first(&c->list)->prio; |
138 | 133 | ||
139 | case PM_QOS_MAX: | 134 | case PM_QOS_MAX: |
140 | return plist_last(&o->requests)->prio; | 135 | return plist_last(&c->list)->prio; |
141 | 136 | ||
142 | default: | 137 | default: |
143 | /* runtime check for not using enum */ | 138 | /* runtime check for not using enum */ |
@@ -145,69 +140,73 @@ static inline int pm_qos_get_value(struct pm_qos_object *o) | |||
145 | } | 140 | } |
146 | } | 141 | } |
147 | 142 | ||
148 | static inline s32 pm_qos_read_value(struct pm_qos_object *o) | 143 | s32 pm_qos_read_value(struct pm_qos_constraints *c) |
149 | { | 144 | { |
150 | return o->target_value; | 145 | return c->target_value; |
151 | } | 146 | } |
152 | 147 | ||
153 | static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value) | 148 | static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value) |
154 | { | 149 | { |
155 | o->target_value = value; | 150 | c->target_value = value; |
156 | } | 151 | } |
157 | 152 | ||
158 | static void update_target(struct pm_qos_object *o, struct plist_node *node, | 153 | /** |
159 | int del, int value) | 154 | * pm_qos_update_target - manages the constraints list and calls the notifiers |
155 | * if needed | ||
156 | * @c: constraints data struct | ||
157 | * @node: request to add to the list, to update or to remove | ||
158 | * @action: action to take on the constraints list | ||
159 | * @value: value of the request to add or update | ||
160 | * | ||
161 | * This function returns 1 if the aggregated constraint value has changed, 0 | ||
162 | * otherwise. | ||
163 | */ | ||
164 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | ||
165 | enum pm_qos_req_action action, int value) | ||
160 | { | 166 | { |
161 | unsigned long flags; | 167 | unsigned long flags; |
162 | int prev_value, curr_value; | 168 | int prev_value, curr_value, new_value; |
163 | 169 | ||
164 | spin_lock_irqsave(&pm_qos_lock, flags); | 170 | spin_lock_irqsave(&pm_qos_lock, flags); |
165 | prev_value = pm_qos_get_value(o); | 171 | prev_value = pm_qos_get_value(c); |
166 | /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ | 172 | if (value == PM_QOS_DEFAULT_VALUE) |
167 | if (value != PM_QOS_DEFAULT_VALUE) { | 173 | new_value = c->default_value; |
174 | else | ||
175 | new_value = value; | ||
176 | |||
177 | switch (action) { | ||
178 | case PM_QOS_REMOVE_REQ: | ||
179 | plist_del(node, &c->list); | ||
180 | break; | ||
181 | case PM_QOS_UPDATE_REQ: | ||
168 | /* | 182 | /* |
169 | * to change the list, we atomically remove, reinit | 183 | * to change the list, we atomically remove, reinit |
170 | * with new value and add, then see if the extremal | 184 | * with new value and add, then see if the extremal |
171 | * changed | 185 | * changed |
172 | */ | 186 | */ |
173 | plist_del(node, &o->requests); | 187 | plist_del(node, &c->list); |
174 | plist_node_init(node, value); | 188 | case PM_QOS_ADD_REQ: |
175 | plist_add(node, &o->requests); | 189 | plist_node_init(node, new_value); |
176 | } else if (del) { | 190 | plist_add(node, &c->list); |
177 | plist_del(node, &o->requests); | 191 | break; |
178 | } else { | 192 | default: |
179 | plist_add(node, &o->requests); | 193 | /* no action */ |
194 | ; | ||
180 | } | 195 | } |
181 | curr_value = pm_qos_get_value(o); | 196 | |
182 | pm_qos_set_value(o, curr_value); | 197 | curr_value = pm_qos_get_value(c); |
198 | pm_qos_set_value(c, curr_value); | ||
199 | |||
183 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 200 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
184 | 201 | ||
185 | if (prev_value != curr_value) | 202 | if (prev_value != curr_value) { |
186 | blocking_notifier_call_chain(o->notifiers, | 203 | blocking_notifier_call_chain(c->notifiers, |
187 | (unsigned long)curr_value, | 204 | (unsigned long)curr_value, |
188 | NULL); | 205 | NULL); |
189 | } | 206 | return 1; |
190 | 207 | } else { | |
191 | static int register_pm_qos_misc(struct pm_qos_object *qos) | 208 | return 0; |
192 | { | ||
193 | qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; | ||
194 | qos->pm_qos_power_miscdev.name = qos->name; | ||
195 | qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; | ||
196 | |||
197 | return misc_register(&qos->pm_qos_power_miscdev); | ||
198 | } | ||
199 | |||
200 | static int find_pm_qos_object_by_minor(int minor) | ||
201 | { | ||
202 | int pm_qos_class; | ||
203 | |||
204 | for (pm_qos_class = 0; | ||
205 | pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) { | ||
206 | if (minor == | ||
207 | pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor) | ||
208 | return pm_qos_class; | ||
209 | } | 209 | } |
210 | return -1; | ||
211 | } | 210 | } |
212 | 211 | ||
213 | /** | 212 | /** |
@@ -218,11 +217,11 @@ static int find_pm_qos_object_by_minor(int minor) | |||
218 | */ | 217 | */ |
219 | int pm_qos_request(int pm_qos_class) | 218 | int pm_qos_request(int pm_qos_class) |
220 | { | 219 | { |
221 | return pm_qos_read_value(pm_qos_array[pm_qos_class]); | 220 | return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints); |
222 | } | 221 | } |
223 | EXPORT_SYMBOL_GPL(pm_qos_request); | 222 | EXPORT_SYMBOL_GPL(pm_qos_request); |
224 | 223 | ||
225 | int pm_qos_request_active(struct pm_qos_request_list *req) | 224 | int pm_qos_request_active(struct pm_qos_request *req) |
226 | { | 225 | { |
227 | return req->pm_qos_class != 0; | 226 | return req->pm_qos_class != 0; |
228 | } | 227 | } |
@@ -230,40 +229,36 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active); | |||
230 | 229 | ||
231 | /** | 230 | /** |
232 | * pm_qos_add_request - inserts new qos request into the list | 231 | * pm_qos_add_request - inserts new qos request into the list |
233 | * @dep: pointer to a preallocated handle | 232 | * @req: pointer to a preallocated handle |
234 | * @pm_qos_class: identifies which list of qos request to use | 233 | * @pm_qos_class: identifies which list of qos request to use |
235 | * @value: defines the qos request | 234 | * @value: defines the qos request |
236 | * | 235 | * |
237 | * This function inserts a new entry in the pm_qos_class list of requested qos | 236 | * This function inserts a new entry in the pm_qos_class list of requested qos |
238 | * performance characteristics. It recomputes the aggregate QoS expectations | 237 | * performance characteristics. It recomputes the aggregate QoS expectations |
239 | * for the pm_qos_class of parameters and initializes the pm_qos_request_list | 238 | * for the pm_qos_class of parameters and initializes the pm_qos_request |
240 | * handle. Caller needs to save this handle for later use in updates and | 239 | * handle. Caller needs to save this handle for later use in updates and |
241 | * removal. | 240 | * removal. |
242 | */ | 241 | */ |
243 | 242 | ||
244 | void pm_qos_add_request(struct pm_qos_request_list *dep, | 243 | void pm_qos_add_request(struct pm_qos_request *req, |
245 | int pm_qos_class, s32 value) | 244 | int pm_qos_class, s32 value) |
246 | { | 245 | { |
247 | struct pm_qos_object *o = pm_qos_array[pm_qos_class]; | 246 | if (!req) /*guard against callers passing in null */ |
248 | int new_value; | 247 | return; |
249 | 248 | ||
250 | if (pm_qos_request_active(dep)) { | 249 | if (pm_qos_request_active(req)) { |
251 | WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); | 250 | WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); |
252 | return; | 251 | return; |
253 | } | 252 | } |
254 | if (value == PM_QOS_DEFAULT_VALUE) | 253 | req->pm_qos_class = pm_qos_class; |
255 | new_value = o->default_value; | 254 | pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, |
256 | else | 255 | &req->node, PM_QOS_ADD_REQ, value); |
257 | new_value = value; | ||
258 | plist_node_init(&dep->list, new_value); | ||
259 | dep->pm_qos_class = pm_qos_class; | ||
260 | update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); | ||
261 | } | 256 | } |
262 | EXPORT_SYMBOL_GPL(pm_qos_add_request); | 257 | EXPORT_SYMBOL_GPL(pm_qos_add_request); |
263 | 258 | ||
264 | /** | 259 | /** |
265 | * pm_qos_update_request - modifies an existing qos request | 260 | * pm_qos_update_request - modifies an existing qos request |
266 | * @pm_qos_req : handle to list element holding a pm_qos request to use | 261 | * @req : handle to list element holding a pm_qos request to use |
267 | * @value: defines the qos request | 262 | * @value: defines the qos request |
268 | * | 263 | * |
269 | * Updates an existing qos request for the pm_qos_class of parameters along | 264 | * Updates an existing qos request for the pm_qos_class of parameters along |
@@ -271,56 +266,47 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request); | |||
271 | * | 266 | * |
272 | * Attempts are made to make this code callable on hot code paths. | 267 | * Attempts are made to make this code callable on hot code paths. |
273 | */ | 268 | */ |
274 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, | 269 | void pm_qos_update_request(struct pm_qos_request *req, |
275 | s32 new_value) | 270 | s32 new_value) |
276 | { | 271 | { |
277 | s32 temp; | 272 | if (!req) /*guard against callers passing in null */ |
278 | struct pm_qos_object *o; | ||
279 | |||
280 | if (!pm_qos_req) /*guard against callers passing in null */ | ||
281 | return; | 273 | return; |
282 | 274 | ||
283 | if (!pm_qos_request_active(pm_qos_req)) { | 275 | if (!pm_qos_request_active(req)) { |
284 | WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); | 276 | WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); |
285 | return; | 277 | return; |
286 | } | 278 | } |
287 | 279 | ||
288 | o = pm_qos_array[pm_qos_req->pm_qos_class]; | 280 | if (new_value != req->node.prio) |
289 | 281 | pm_qos_update_target( | |
290 | if (new_value == PM_QOS_DEFAULT_VALUE) | 282 | pm_qos_array[req->pm_qos_class]->constraints, |
291 | temp = o->default_value; | 283 | &req->node, PM_QOS_UPDATE_REQ, new_value); |
292 | else | ||
293 | temp = new_value; | ||
294 | |||
295 | if (temp != pm_qos_req->list.prio) | ||
296 | update_target(o, &pm_qos_req->list, 0, temp); | ||
297 | } | 284 | } |
298 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 285 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
299 | 286 | ||
300 | /** | 287 | /** |
301 | * pm_qos_remove_request - modifies an existing qos request | 288 | * pm_qos_remove_request - modifies an existing qos request |
302 | * @pm_qos_req: handle to request list element | 289 | * @req: handle to request list element |
303 | * | 290 | * |
304 | * Will remove pm qos request from the list of requests and | 291 | * Will remove pm qos request from the list of constraints and |
305 | * recompute the current target value for the pm_qos_class. Call this | 292 | * recompute the current target value for the pm_qos_class. Call this |
306 | * on slow code paths. | 293 | * on slow code paths. |
307 | */ | 294 | */ |
308 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) | 295 | void pm_qos_remove_request(struct pm_qos_request *req) |
309 | { | 296 | { |
310 | struct pm_qos_object *o; | 297 | if (!req) /*guard against callers passing in null */ |
311 | |||
312 | if (pm_qos_req == NULL) | ||
313 | return; | 298 | return; |
314 | /* silent return to keep pcm code cleaner */ | 299 | /* silent return to keep pcm code cleaner */ |
315 | 300 | ||
316 | if (!pm_qos_request_active(pm_qos_req)) { | 301 | if (!pm_qos_request_active(req)) { |
317 | WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); | 302 | WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); |
318 | return; | 303 | return; |
319 | } | 304 | } |
320 | 305 | ||
321 | o = pm_qos_array[pm_qos_req->pm_qos_class]; | 306 | pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, |
322 | update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); | 307 | &req->node, PM_QOS_REMOVE_REQ, |
323 | memset(pm_qos_req, 0, sizeof(*pm_qos_req)); | 308 | PM_QOS_DEFAULT_VALUE); |
309 | memset(req, 0, sizeof(*req)); | ||
324 | } | 310 | } |
325 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); | 311 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); |
326 | 312 | ||
@@ -337,7 +323,8 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) | |||
337 | int retval; | 323 | int retval; |
338 | 324 | ||
339 | retval = blocking_notifier_chain_register( | 325 | retval = blocking_notifier_chain_register( |
340 | pm_qos_array[pm_qos_class]->notifiers, notifier); | 326 | pm_qos_array[pm_qos_class]->constraints->notifiers, |
327 | notifier); | ||
341 | 328 | ||
342 | return retval; | 329 | return retval; |
343 | } | 330 | } |
@@ -356,19 +343,43 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) | |||
356 | int retval; | 343 | int retval; |
357 | 344 | ||
358 | retval = blocking_notifier_chain_unregister( | 345 | retval = blocking_notifier_chain_unregister( |
359 | pm_qos_array[pm_qos_class]->notifiers, notifier); | 346 | pm_qos_array[pm_qos_class]->constraints->notifiers, |
347 | notifier); | ||
360 | 348 | ||
361 | return retval; | 349 | return retval; |
362 | } | 350 | } |
363 | EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); | 351 | EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); |
364 | 352 | ||
353 | /* User space interface to PM QoS classes via misc devices */ | ||
354 | static int register_pm_qos_misc(struct pm_qos_object *qos) | ||
355 | { | ||
356 | qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; | ||
357 | qos->pm_qos_power_miscdev.name = qos->name; | ||
358 | qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; | ||
359 | |||
360 | return misc_register(&qos->pm_qos_power_miscdev); | ||
361 | } | ||
362 | |||
363 | static int find_pm_qos_object_by_minor(int minor) | ||
364 | { | ||
365 | int pm_qos_class; | ||
366 | |||
367 | for (pm_qos_class = 0; | ||
368 | pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) { | ||
369 | if (minor == | ||
370 | pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor) | ||
371 | return pm_qos_class; | ||
372 | } | ||
373 | return -1; | ||
374 | } | ||
375 | |||
365 | static int pm_qos_power_open(struct inode *inode, struct file *filp) | 376 | static int pm_qos_power_open(struct inode *inode, struct file *filp) |
366 | { | 377 | { |
367 | long pm_qos_class; | 378 | long pm_qos_class; |
368 | 379 | ||
369 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); | 380 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); |
370 | if (pm_qos_class >= 0) { | 381 | if (pm_qos_class >= 0) { |
371 | struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL); | 382 | struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL); |
372 | if (!req) | 383 | if (!req) |
373 | return -ENOMEM; | 384 | return -ENOMEM; |
374 | 385 | ||
@@ -383,7 +394,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) | |||
383 | 394 | ||
384 | static int pm_qos_power_release(struct inode *inode, struct file *filp) | 395 | static int pm_qos_power_release(struct inode *inode, struct file *filp) |
385 | { | 396 | { |
386 | struct pm_qos_request_list *req; | 397 | struct pm_qos_request *req; |
387 | 398 | ||
388 | req = filp->private_data; | 399 | req = filp->private_data; |
389 | pm_qos_remove_request(req); | 400 | pm_qos_remove_request(req); |
@@ -398,17 +409,15 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, | |||
398 | { | 409 | { |
399 | s32 value; | 410 | s32 value; |
400 | unsigned long flags; | 411 | unsigned long flags; |
401 | struct pm_qos_object *o; | 412 | struct pm_qos_request *req = filp->private_data; |
402 | struct pm_qos_request_list *pm_qos_req = filp->private_data; | ||
403 | 413 | ||
404 | if (!pm_qos_req) | 414 | if (!req) |
405 | return -EINVAL; | 415 | return -EINVAL; |
406 | if (!pm_qos_request_active(pm_qos_req)) | 416 | if (!pm_qos_request_active(req)) |
407 | return -EINVAL; | 417 | return -EINVAL; |
408 | 418 | ||
409 | o = pm_qos_array[pm_qos_req->pm_qos_class]; | ||
410 | spin_lock_irqsave(&pm_qos_lock, flags); | 419 | spin_lock_irqsave(&pm_qos_lock, flags); |
411 | value = pm_qos_get_value(o); | 420 | value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints); |
412 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 421 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
413 | 422 | ||
414 | return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); | 423 | return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); |
@@ -418,7 +427,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
418 | size_t count, loff_t *f_pos) | 427 | size_t count, loff_t *f_pos) |
419 | { | 428 | { |
420 | s32 value; | 429 | s32 value; |
421 | struct pm_qos_request_list *pm_qos_req; | 430 | struct pm_qos_request *req; |
422 | 431 | ||
423 | if (count == sizeof(s32)) { | 432 | if (count == sizeof(s32)) { |
424 | if (copy_from_user(&value, buf, sizeof(s32))) | 433 | if (copy_from_user(&value, buf, sizeof(s32))) |
@@ -449,8 +458,8 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
449 | return -EINVAL; | 458 | return -EINVAL; |
450 | } | 459 | } |
451 | 460 | ||
452 | pm_qos_req = filp->private_data; | 461 | req = filp->private_data; |
453 | pm_qos_update_request(pm_qos_req, value); | 462 | pm_qos_update_request(req, value); |
454 | 463 | ||
455 | return count; | 464 | return count; |
456 | } | 465 | } |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 06efa54f93d..cbe2c144139 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1339,6 +1339,9 @@ int hibernate_preallocate_memory(void) | |||
1339 | count += highmem; | 1339 | count += highmem; |
1340 | count -= totalreserve_pages; | 1340 | count -= totalreserve_pages; |
1341 | 1341 | ||
1342 | /* Add number of pages required for page keys (s390 only). */ | ||
1343 | size += page_key_additional_pages(saveable); | ||
1344 | |||
1342 | /* Compute the maximum number of saveable pages to leave in memory. */ | 1345 | /* Compute the maximum number of saveable pages to leave in memory. */ |
1343 | max_size = (count - (size + PAGES_FOR_IO)) / 2 | 1346 | max_size = (count - (size + PAGES_FOR_IO)) / 2 |
1344 | - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); | 1347 | - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); |
@@ -1662,6 +1665,8 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm) | |||
1662 | buf[j] = memory_bm_next_pfn(bm); | 1665 | buf[j] = memory_bm_next_pfn(bm); |
1663 | if (unlikely(buf[j] == BM_END_OF_MAP)) | 1666 | if (unlikely(buf[j] == BM_END_OF_MAP)) |
1664 | break; | 1667 | break; |
1668 | /* Save page key for data page (s390 only). */ | ||
1669 | page_key_read(buf + j); | ||
1665 | } | 1670 | } |
1666 | } | 1671 | } |
1667 | 1672 | ||
@@ -1821,6 +1826,9 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |||
1821 | if (unlikely(buf[j] == BM_END_OF_MAP)) | 1826 | if (unlikely(buf[j] == BM_END_OF_MAP)) |
1822 | break; | 1827 | break; |
1823 | 1828 | ||
1829 | /* Extract and buffer page key for data page (s390 only). */ | ||
1830 | page_key_memorize(buf + j); | ||
1831 | |||
1824 | if (memory_bm_pfn_present(bm, buf[j])) | 1832 | if (memory_bm_pfn_present(bm, buf[j])) |
1825 | memory_bm_set_bit(bm, buf[j]); | 1833 | memory_bm_set_bit(bm, buf[j]); |
1826 | else | 1834 | else |
@@ -2223,6 +2231,11 @@ int snapshot_write_next(struct snapshot_handle *handle) | |||
2223 | if (error) | 2231 | if (error) |
2224 | return error; | 2232 | return error; |
2225 | 2233 | ||
2234 | /* Allocate buffer for page keys. */ | ||
2235 | error = page_key_alloc(nr_copy_pages); | ||
2236 | if (error) | ||
2237 | return error; | ||
2238 | |||
2226 | } else if (handle->cur <= nr_meta_pages + 1) { | 2239 | } else if (handle->cur <= nr_meta_pages + 1) { |
2227 | error = unpack_orig_pfns(buffer, ©_bm); | 2240 | error = unpack_orig_pfns(buffer, ©_bm); |
2228 | if (error) | 2241 | if (error) |
@@ -2243,6 +2256,8 @@ int snapshot_write_next(struct snapshot_handle *handle) | |||
2243 | } | 2256 | } |
2244 | } else { | 2257 | } else { |
2245 | copy_last_highmem_page(); | 2258 | copy_last_highmem_page(); |
2259 | /* Restore page key for data page (s390 only). */ | ||
2260 | page_key_write(handle->buffer); | ||
2246 | handle->buffer = get_buffer(&orig_bm, &ca); | 2261 | handle->buffer = get_buffer(&orig_bm, &ca); |
2247 | if (IS_ERR(handle->buffer)) | 2262 | if (IS_ERR(handle->buffer)) |
2248 | return PTR_ERR(handle->buffer); | 2263 | return PTR_ERR(handle->buffer); |
@@ -2264,6 +2279,9 @@ int snapshot_write_next(struct snapshot_handle *handle) | |||
2264 | void snapshot_write_finalize(struct snapshot_handle *handle) | 2279 | void snapshot_write_finalize(struct snapshot_handle *handle) |
2265 | { | 2280 | { |
2266 | copy_last_highmem_page(); | 2281 | copy_last_highmem_page(); |
2282 | /* Restore page key for data page (s390 only). */ | ||
2283 | page_key_write(handle->buffer); | ||
2284 | page_key_free(); | ||
2267 | /* Free only if we have loaded the image entirely */ | 2285 | /* Free only if we have loaded the image entirely */ |
2268 | if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { | 2286 | if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { |
2269 | memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); | 2287 | memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index b6b71ad2208..fdd4263b995 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -104,7 +104,10 @@ static int suspend_prepare(void) | |||
104 | goto Finish; | 104 | goto Finish; |
105 | 105 | ||
106 | error = suspend_freeze_processes(); | 106 | error = suspend_freeze_processes(); |
107 | if (!error) | 107 | if (error) { |
108 | suspend_stats.failed_freeze++; | ||
109 | dpm_save_failed_step(SUSPEND_FREEZE); | ||
110 | } else | ||
108 | return 0; | 111 | return 0; |
109 | 112 | ||
110 | suspend_thaw_processes(); | 113 | suspend_thaw_processes(); |
@@ -315,8 +318,16 @@ int enter_state(suspend_state_t state) | |||
315 | */ | 318 | */ |
316 | int pm_suspend(suspend_state_t state) | 319 | int pm_suspend(suspend_state_t state) |
317 | { | 320 | { |
318 | if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX) | 321 | int ret; |
319 | return enter_state(state); | 322 | if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) { |
323 | ret = enter_state(state); | ||
324 | if (ret) { | ||
325 | suspend_stats.fail++; | ||
326 | dpm_save_failed_errno(ret); | ||
327 | } else | ||
328 | suspend_stats.success++; | ||
329 | return ret; | ||
330 | } | ||
320 | return -EINVAL; | 331 | return -EINVAL; |
321 | } | 332 | } |
322 | EXPORT_SYMBOL(pm_suspend); | 333 | EXPORT_SYMBOL(pm_suspend); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 7c97c3a0eee..11a594c4ba2 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/lzo.h> | 28 | #include <linux/lzo.h> |
29 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
30 | #include <linux/cpumask.h> | ||
31 | #include <linux/atomic.h> | ||
32 | #include <linux/kthread.h> | ||
33 | #include <linux/crc32.h> | ||
30 | 34 | ||
31 | #include "power.h" | 35 | #include "power.h" |
32 | 36 | ||
@@ -43,8 +47,7 @@ | |||
43 | * allocated and populated one at a time, so we only need one memory | 47 | * allocated and populated one at a time, so we only need one memory |
44 | * page to set up the entire structure. | 48 | * page to set up the entire structure. |
45 | * | 49 | * |
46 | * During resume we also only need to use one swap_map_page structure | 50 | * During resume we pick up all swap_map_page structures into a list. |
47 | * at a time. | ||
48 | */ | 51 | */ |
49 | 52 | ||
50 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) | 53 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) |
@@ -54,6 +57,11 @@ struct swap_map_page { | |||
54 | sector_t next_swap; | 57 | sector_t next_swap; |
55 | }; | 58 | }; |
56 | 59 | ||
60 | struct swap_map_page_list { | ||
61 | struct swap_map_page *map; | ||
62 | struct swap_map_page_list *next; | ||
63 | }; | ||
64 | |||
57 | /** | 65 | /** |
58 | * The swap_map_handle structure is used for handling swap in | 66 | * The swap_map_handle structure is used for handling swap in |
59 | * a file-alike way | 67 | * a file-alike way |
@@ -61,13 +69,18 @@ struct swap_map_page { | |||
61 | 69 | ||
62 | struct swap_map_handle { | 70 | struct swap_map_handle { |
63 | struct swap_map_page *cur; | 71 | struct swap_map_page *cur; |
72 | struct swap_map_page_list *maps; | ||
64 | sector_t cur_swap; | 73 | sector_t cur_swap; |
65 | sector_t first_sector; | 74 | sector_t first_sector; |
66 | unsigned int k; | 75 | unsigned int k; |
76 | unsigned long nr_free_pages, written; | ||
77 | u32 crc32; | ||
67 | }; | 78 | }; |
68 | 79 | ||
69 | struct swsusp_header { | 80 | struct swsusp_header { |
70 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)]; | 81 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - |
82 | sizeof(u32)]; | ||
83 | u32 crc32; | ||
71 | sector_t image; | 84 | sector_t image; |
72 | unsigned int flags; /* Flags to pass to the "boot" kernel */ | 85 | unsigned int flags; /* Flags to pass to the "boot" kernel */ |
73 | char orig_sig[10]; | 86 | char orig_sig[10]; |
@@ -199,6 +212,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
199 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); | 212 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); |
200 | swsusp_header->image = handle->first_sector; | 213 | swsusp_header->image = handle->first_sector; |
201 | swsusp_header->flags = flags; | 214 | swsusp_header->flags = flags; |
215 | if (flags & SF_CRC32_MODE) | ||
216 | swsusp_header->crc32 = handle->crc32; | ||
202 | error = hib_bio_write_page(swsusp_resume_block, | 217 | error = hib_bio_write_page(swsusp_resume_block, |
203 | swsusp_header, NULL); | 218 | swsusp_header, NULL); |
204 | } else { | 219 | } else { |
@@ -245,6 +260,7 @@ static int swsusp_swap_check(void) | |||
245 | static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | 260 | static int write_page(void *buf, sector_t offset, struct bio **bio_chain) |
246 | { | 261 | { |
247 | void *src; | 262 | void *src; |
263 | int ret; | ||
248 | 264 | ||
249 | if (!offset) | 265 | if (!offset) |
250 | return -ENOSPC; | 266 | return -ENOSPC; |
@@ -254,9 +270,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | |||
254 | if (src) { | 270 | if (src) { |
255 | copy_page(src, buf); | 271 | copy_page(src, buf); |
256 | } else { | 272 | } else { |
257 | WARN_ON_ONCE(1); | 273 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ |
258 | bio_chain = NULL; /* Go synchronous */ | 274 | if (ret) |
259 | src = buf; | 275 | return ret; |
276 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | ||
277 | if (src) { | ||
278 | copy_page(src, buf); | ||
279 | } else { | ||
280 | WARN_ON_ONCE(1); | ||
281 | bio_chain = NULL; /* Go synchronous */ | ||
282 | src = buf; | ||
283 | } | ||
260 | } | 284 | } |
261 | } else { | 285 | } else { |
262 | src = buf; | 286 | src = buf; |
@@ -293,6 +317,8 @@ static int get_swap_writer(struct swap_map_handle *handle) | |||
293 | goto err_rel; | 317 | goto err_rel; |
294 | } | 318 | } |
295 | handle->k = 0; | 319 | handle->k = 0; |
320 | handle->nr_free_pages = nr_free_pages() >> 1; | ||
321 | handle->written = 0; | ||
296 | handle->first_sector = handle->cur_swap; | 322 | handle->first_sector = handle->cur_swap; |
297 | return 0; | 323 | return 0; |
298 | err_rel: | 324 | err_rel: |
@@ -316,20 +342,23 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, | |||
316 | return error; | 342 | return error; |
317 | handle->cur->entries[handle->k++] = offset; | 343 | handle->cur->entries[handle->k++] = offset; |
318 | if (handle->k >= MAP_PAGE_ENTRIES) { | 344 | if (handle->k >= MAP_PAGE_ENTRIES) { |
319 | error = hib_wait_on_bio_chain(bio_chain); | ||
320 | if (error) | ||
321 | goto out; | ||
322 | offset = alloc_swapdev_block(root_swap); | 345 | offset = alloc_swapdev_block(root_swap); |
323 | if (!offset) | 346 | if (!offset) |
324 | return -ENOSPC; | 347 | return -ENOSPC; |
325 | handle->cur->next_swap = offset; | 348 | handle->cur->next_swap = offset; |
326 | error = write_page(handle->cur, handle->cur_swap, NULL); | 349 | error = write_page(handle->cur, handle->cur_swap, bio_chain); |
327 | if (error) | 350 | if (error) |
328 | goto out; | 351 | goto out; |
329 | clear_page(handle->cur); | 352 | clear_page(handle->cur); |
330 | handle->cur_swap = offset; | 353 | handle->cur_swap = offset; |
331 | handle->k = 0; | 354 | handle->k = 0; |
332 | } | 355 | } |
356 | if (bio_chain && ++handle->written > handle->nr_free_pages) { | ||
357 | error = hib_wait_on_bio_chain(bio_chain); | ||
358 | if (error) | ||
359 | goto out; | ||
360 | handle->written = 0; | ||
361 | } | ||
333 | out: | 362 | out: |
334 | return error; | 363 | return error; |
335 | } | 364 | } |
@@ -372,6 +401,13 @@ static int swap_writer_finish(struct swap_map_handle *handle, | |||
372 | LZO_HEADER, PAGE_SIZE) | 401 | LZO_HEADER, PAGE_SIZE) |
373 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) | 402 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) |
374 | 403 | ||
404 | /* Maximum number of threads for compression/decompression. */ | ||
405 | #define LZO_THREADS 3 | ||
406 | |||
407 | /* Maximum number of pages for read buffering. */ | ||
408 | #define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) | ||
409 | |||
410 | |||
375 | /** | 411 | /** |
376 | * save_image - save the suspend image data | 412 | * save_image - save the suspend image data |
377 | */ | 413 | */ |
@@ -419,6 +455,92 @@ static int save_image(struct swap_map_handle *handle, | |||
419 | return ret; | 455 | return ret; |
420 | } | 456 | } |
421 | 457 | ||
458 | /** | ||
459 | * Structure used for CRC32. | ||
460 | */ | ||
461 | struct crc_data { | ||
462 | struct task_struct *thr; /* thread */ | ||
463 | atomic_t ready; /* ready to start flag */ | ||
464 | atomic_t stop; /* ready to stop flag */ | ||
465 | unsigned run_threads; /* nr current threads */ | ||
466 | wait_queue_head_t go; /* start crc update */ | ||
467 | wait_queue_head_t done; /* crc update done */ | ||
468 | u32 *crc32; /* points to handle's crc32 */ | ||
469 | size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ | ||
470 | unsigned char *unc[LZO_THREADS]; /* uncompressed data */ | ||
471 | }; | ||
472 | |||
473 | /** | ||
474 | * CRC32 update function that runs in its own thread. | ||
475 | */ | ||
476 | static int crc32_threadfn(void *data) | ||
477 | { | ||
478 | struct crc_data *d = data; | ||
479 | unsigned i; | ||
480 | |||
481 | while (1) { | ||
482 | wait_event(d->go, atomic_read(&d->ready) || | ||
483 | kthread_should_stop()); | ||
484 | if (kthread_should_stop()) { | ||
485 | d->thr = NULL; | ||
486 | atomic_set(&d->stop, 1); | ||
487 | wake_up(&d->done); | ||
488 | break; | ||
489 | } | ||
490 | atomic_set(&d->ready, 0); | ||
491 | |||
492 | for (i = 0; i < d->run_threads; i++) | ||
493 | *d->crc32 = crc32_le(*d->crc32, | ||
494 | d->unc[i], *d->unc_len[i]); | ||
495 | atomic_set(&d->stop, 1); | ||
496 | wake_up(&d->done); | ||
497 | } | ||
498 | return 0; | ||
499 | } | ||
500 | /** | ||
501 | * Structure used for LZO data compression. | ||
502 | */ | ||
503 | struct cmp_data { | ||
504 | struct task_struct *thr; /* thread */ | ||
505 | atomic_t ready; /* ready to start flag */ | ||
506 | atomic_t stop; /* ready to stop flag */ | ||
507 | int ret; /* return code */ | ||
508 | wait_queue_head_t go; /* start compression */ | ||
509 | wait_queue_head_t done; /* compression done */ | ||
510 | size_t unc_len; /* uncompressed length */ | ||
511 | size_t cmp_len; /* compressed length */ | ||
512 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ | ||
513 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ | ||
514 | unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ | ||
515 | }; | ||
516 | |||
517 | /** | ||
518 | * Compression function that runs in its own thread. | ||
519 | */ | ||
520 | static int lzo_compress_threadfn(void *data) | ||
521 | { | ||
522 | struct cmp_data *d = data; | ||
523 | |||
524 | while (1) { | ||
525 | wait_event(d->go, atomic_read(&d->ready) || | ||
526 | kthread_should_stop()); | ||
527 | if (kthread_should_stop()) { | ||
528 | d->thr = NULL; | ||
529 | d->ret = -1; | ||
530 | atomic_set(&d->stop, 1); | ||
531 | wake_up(&d->done); | ||
532 | break; | ||
533 | } | ||
534 | atomic_set(&d->ready, 0); | ||
535 | |||
536 | d->ret = lzo1x_1_compress(d->unc, d->unc_len, | ||
537 | d->cmp + LZO_HEADER, &d->cmp_len, | ||
538 | d->wrk); | ||
539 | atomic_set(&d->stop, 1); | ||
540 | wake_up(&d->done); | ||
541 | } | ||
542 | return 0; | ||
543 | } | ||
422 | 544 | ||
423 | /** | 545 | /** |
424 | * save_image_lzo - Save the suspend image data compressed with LZO. | 546 | * save_image_lzo - Save the suspend image data compressed with LZO. |
@@ -437,42 +559,93 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
437 | struct bio *bio; | 559 | struct bio *bio; |
438 | struct timeval start; | 560 | struct timeval start; |
439 | struct timeval stop; | 561 | struct timeval stop; |
440 | size_t off, unc_len, cmp_len; | 562 | size_t off; |
441 | unsigned char *unc, *cmp, *wrk, *page; | 563 | unsigned thr, run_threads, nr_threads; |
564 | unsigned char *page = NULL; | ||
565 | struct cmp_data *data = NULL; | ||
566 | struct crc_data *crc = NULL; | ||
567 | |||
568 | /* | ||
569 | * We'll limit the number of threads for compression to limit memory | ||
570 | * footprint. | ||
571 | */ | ||
572 | nr_threads = num_online_cpus() - 1; | ||
573 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | ||
442 | 574 | ||
443 | page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 575 | page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
444 | if (!page) { | 576 | if (!page) { |
445 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 577 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
446 | return -ENOMEM; | 578 | ret = -ENOMEM; |
579 | goto out_clean; | ||
447 | } | 580 | } |
448 | 581 | ||
449 | wrk = vmalloc(LZO1X_1_MEM_COMPRESS); | 582 | data = vmalloc(sizeof(*data) * nr_threads); |
450 | if (!wrk) { | 583 | if (!data) { |
451 | printk(KERN_ERR "PM: Failed to allocate LZO workspace\n"); | 584 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); |
452 | free_page((unsigned long)page); | 585 | ret = -ENOMEM; |
453 | return -ENOMEM; | 586 | goto out_clean; |
454 | } | 587 | } |
588 | for (thr = 0; thr < nr_threads; thr++) | ||
589 | memset(&data[thr], 0, offsetof(struct cmp_data, go)); | ||
455 | 590 | ||
456 | unc = vmalloc(LZO_UNC_SIZE); | 591 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); |
457 | if (!unc) { | 592 | if (!crc) { |
458 | printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); | 593 | printk(KERN_ERR "PM: Failed to allocate crc\n"); |
459 | vfree(wrk); | 594 | ret = -ENOMEM; |
460 | free_page((unsigned long)page); | 595 | goto out_clean; |
461 | return -ENOMEM; | 596 | } |
597 | memset(crc, 0, offsetof(struct crc_data, go)); | ||
598 | |||
599 | /* | ||
600 | * Start the compression threads. | ||
601 | */ | ||
602 | for (thr = 0; thr < nr_threads; thr++) { | ||
603 | init_waitqueue_head(&data[thr].go); | ||
604 | init_waitqueue_head(&data[thr].done); | ||
605 | |||
606 | data[thr].thr = kthread_run(lzo_compress_threadfn, | ||
607 | &data[thr], | ||
608 | "image_compress/%u", thr); | ||
609 | if (IS_ERR(data[thr].thr)) { | ||
610 | data[thr].thr = NULL; | ||
611 | printk(KERN_ERR | ||
612 | "PM: Cannot start compression threads\n"); | ||
613 | ret = -ENOMEM; | ||
614 | goto out_clean; | ||
615 | } | ||
462 | } | 616 | } |
463 | 617 | ||
464 | cmp = vmalloc(LZO_CMP_SIZE); | 618 | /* |
465 | if (!cmp) { | 619 | * Adjust number of free pages after all allocations have been done. |
466 | printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); | 620 | * We don't want to run out of pages when writing. |
467 | vfree(unc); | 621 | */ |
468 | vfree(wrk); | 622 | handle->nr_free_pages = nr_free_pages() >> 1; |
469 | free_page((unsigned long)page); | 623 | |
470 | return -ENOMEM; | 624 | /* |
625 | * Start the CRC32 thread. | ||
626 | */ | ||
627 | init_waitqueue_head(&crc->go); | ||
628 | init_waitqueue_head(&crc->done); | ||
629 | |||
630 | handle->crc32 = 0; | ||
631 | crc->crc32 = &handle->crc32; | ||
632 | for (thr = 0; thr < nr_threads; thr++) { | ||
633 | crc->unc[thr] = data[thr].unc; | ||
634 | crc->unc_len[thr] = &data[thr].unc_len; | ||
635 | } | ||
636 | |||
637 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | ||
638 | if (IS_ERR(crc->thr)) { | ||
639 | crc->thr = NULL; | ||
640 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | ||
641 | ret = -ENOMEM; | ||
642 | goto out_clean; | ||
471 | } | 643 | } |
472 | 644 | ||
473 | printk(KERN_INFO | 645 | printk(KERN_INFO |
646 | "PM: Using %u thread(s) for compression.\n" | ||
474 | "PM: Compressing and saving image data (%u pages) ... ", | 647 | "PM: Compressing and saving image data (%u pages) ... ", |
475 | nr_to_write); | 648 | nr_threads, nr_to_write); |
476 | m = nr_to_write / 100; | 649 | m = nr_to_write / 100; |
477 | if (!m) | 650 | if (!m) |
478 | m = 1; | 651 | m = 1; |
@@ -480,55 +653,83 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
480 | bio = NULL; | 653 | bio = NULL; |
481 | do_gettimeofday(&start); | 654 | do_gettimeofday(&start); |
482 | for (;;) { | 655 | for (;;) { |
483 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { | 656 | for (thr = 0; thr < nr_threads; thr++) { |
484 | ret = snapshot_read_next(snapshot); | 657 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { |
485 | if (ret < 0) | 658 | ret = snapshot_read_next(snapshot); |
486 | goto out_finish; | 659 | if (ret < 0) |
487 | 660 | goto out_finish; | |
488 | if (!ret) | 661 | |
662 | if (!ret) | ||
663 | break; | ||
664 | |||
665 | memcpy(data[thr].unc + off, | ||
666 | data_of(*snapshot), PAGE_SIZE); | ||
667 | |||
668 | if (!(nr_pages % m)) | ||
669 | printk(KERN_CONT "\b\b\b\b%3d%%", | ||
670 | nr_pages / m); | ||
671 | nr_pages++; | ||
672 | } | ||
673 | if (!off) | ||
489 | break; | 674 | break; |
490 | 675 | ||
491 | memcpy(unc + off, data_of(*snapshot), PAGE_SIZE); | 676 | data[thr].unc_len = off; |
492 | 677 | ||
493 | if (!(nr_pages % m)) | 678 | atomic_set(&data[thr].ready, 1); |
494 | printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); | 679 | wake_up(&data[thr].go); |
495 | nr_pages++; | ||
496 | } | 680 | } |
497 | 681 | ||
498 | if (!off) | 682 | if (!thr) |
499 | break; | 683 | break; |
500 | 684 | ||
501 | unc_len = off; | 685 | crc->run_threads = thr; |
502 | ret = lzo1x_1_compress(unc, unc_len, | 686 | atomic_set(&crc->ready, 1); |
503 | cmp + LZO_HEADER, &cmp_len, wrk); | 687 | wake_up(&crc->go); |
504 | if (ret < 0) { | ||
505 | printk(KERN_ERR "PM: LZO compression failed\n"); | ||
506 | break; | ||
507 | } | ||
508 | 688 | ||
509 | if (unlikely(!cmp_len || | 689 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
510 | cmp_len > lzo1x_worst_compress(unc_len))) { | 690 | wait_event(data[thr].done, |
511 | printk(KERN_ERR "PM: Invalid LZO compressed length\n"); | 691 | atomic_read(&data[thr].stop)); |
512 | ret = -1; | 692 | atomic_set(&data[thr].stop, 0); |
513 | break; | ||
514 | } | ||
515 | 693 | ||
516 | *(size_t *)cmp = cmp_len; | 694 | ret = data[thr].ret; |
517 | 695 | ||
518 | /* | 696 | if (ret < 0) { |
519 | * Given we are writing one page at a time to disk, we copy | 697 | printk(KERN_ERR "PM: LZO compression failed\n"); |
520 | * that much from the buffer, although the last bit will likely | 698 | goto out_finish; |
521 | * be smaller than full page. This is OK - we saved the length | 699 | } |
522 | * of the compressed data, so any garbage at the end will be | ||
523 | * discarded when we read it. | ||
524 | */ | ||
525 | for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { | ||
526 | memcpy(page, cmp + off, PAGE_SIZE); | ||
527 | 700 | ||
528 | ret = swap_write_page(handle, page, &bio); | 701 | if (unlikely(!data[thr].cmp_len || |
529 | if (ret) | 702 | data[thr].cmp_len > |
703 | lzo1x_worst_compress(data[thr].unc_len))) { | ||
704 | printk(KERN_ERR | ||
705 | "PM: Invalid LZO compressed length\n"); | ||
706 | ret = -1; | ||
530 | goto out_finish; | 707 | goto out_finish; |
708 | } | ||
709 | |||
710 | *(size_t *)data[thr].cmp = data[thr].cmp_len; | ||
711 | |||
712 | /* | ||
713 | * Given we are writing one page at a time to disk, we | ||
714 | * copy that much from the buffer, although the last | ||
715 | * bit will likely be smaller than full page. This is | ||
716 | * OK - we saved the length of the compressed data, so | ||
717 | * any garbage at the end will be discarded when we | ||
718 | * read it. | ||
719 | */ | ||
720 | for (off = 0; | ||
721 | off < LZO_HEADER + data[thr].cmp_len; | ||
722 | off += PAGE_SIZE) { | ||
723 | memcpy(page, data[thr].cmp + off, PAGE_SIZE); | ||
724 | |||
725 | ret = swap_write_page(handle, page, &bio); | ||
726 | if (ret) | ||
727 | goto out_finish; | ||
728 | } | ||
531 | } | 729 | } |
730 | |||
731 | wait_event(crc->done, atomic_read(&crc->stop)); | ||
732 | atomic_set(&crc->stop, 0); | ||
532 | } | 733 | } |
533 | 734 | ||
534 | out_finish: | 735 | out_finish: |
@@ -536,16 +737,25 @@ out_finish: | |||
536 | do_gettimeofday(&stop); | 737 | do_gettimeofday(&stop); |
537 | if (!ret) | 738 | if (!ret) |
538 | ret = err2; | 739 | ret = err2; |
539 | if (!ret) | 740 | if (!ret) { |
540 | printk(KERN_CONT "\b\b\b\bdone\n"); | 741 | printk(KERN_CONT "\b\b\b\bdone\n"); |
541 | else | 742 | } else { |
542 | printk(KERN_CONT "\n"); | 743 | printk(KERN_CONT "\n"); |
744 | } | ||
543 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 745 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
544 | 746 | out_clean: | |
545 | vfree(cmp); | 747 | if (crc) { |
546 | vfree(unc); | 748 | if (crc->thr) |
547 | vfree(wrk); | 749 | kthread_stop(crc->thr); |
548 | free_page((unsigned long)page); | 750 | kfree(crc); |
751 | } | ||
752 | if (data) { | ||
753 | for (thr = 0; thr < nr_threads; thr++) | ||
754 | if (data[thr].thr) | ||
755 | kthread_stop(data[thr].thr); | ||
756 | vfree(data); | ||
757 | } | ||
758 | if (page) free_page((unsigned long)page); | ||
549 | 759 | ||
550 | return ret; | 760 | return ret; |
551 | } | 761 | } |
@@ -625,8 +835,15 @@ out_finish: | |||
625 | 835 | ||
626 | static void release_swap_reader(struct swap_map_handle *handle) | 836 | static void release_swap_reader(struct swap_map_handle *handle) |
627 | { | 837 | { |
628 | if (handle->cur) | 838 | struct swap_map_page_list *tmp; |
629 | free_page((unsigned long)handle->cur); | 839 | |
840 | while (handle->maps) { | ||
841 | if (handle->maps->map) | ||
842 | free_page((unsigned long)handle->maps->map); | ||
843 | tmp = handle->maps; | ||
844 | handle->maps = handle->maps->next; | ||
845 | kfree(tmp); | ||
846 | } | ||
630 | handle->cur = NULL; | 847 | handle->cur = NULL; |
631 | } | 848 | } |
632 | 849 | ||
@@ -634,22 +851,46 @@ static int get_swap_reader(struct swap_map_handle *handle, | |||
634 | unsigned int *flags_p) | 851 | unsigned int *flags_p) |
635 | { | 852 | { |
636 | int error; | 853 | int error; |
854 | struct swap_map_page_list *tmp, *last; | ||
855 | sector_t offset; | ||
637 | 856 | ||
638 | *flags_p = swsusp_header->flags; | 857 | *flags_p = swsusp_header->flags; |
639 | 858 | ||
640 | if (!swsusp_header->image) /* how can this happen? */ | 859 | if (!swsusp_header->image) /* how can this happen? */ |
641 | return -EINVAL; | 860 | return -EINVAL; |
642 | 861 | ||
643 | handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH); | 862 | handle->cur = NULL; |
644 | if (!handle->cur) | 863 | last = handle->maps = NULL; |
645 | return -ENOMEM; | 864 | offset = swsusp_header->image; |
865 | while (offset) { | ||
866 | tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); | ||
867 | if (!tmp) { | ||
868 | release_swap_reader(handle); | ||
869 | return -ENOMEM; | ||
870 | } | ||
871 | memset(tmp, 0, sizeof(*tmp)); | ||
872 | if (!handle->maps) | ||
873 | handle->maps = tmp; | ||
874 | if (last) | ||
875 | last->next = tmp; | ||
876 | last = tmp; | ||
877 | |||
878 | tmp->map = (struct swap_map_page *) | ||
879 | __get_free_page(__GFP_WAIT | __GFP_HIGH); | ||
880 | if (!tmp->map) { | ||
881 | release_swap_reader(handle); | ||
882 | return -ENOMEM; | ||
883 | } | ||
646 | 884 | ||
647 | error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL); | 885 | error = hib_bio_read_page(offset, tmp->map, NULL); |
648 | if (error) { | 886 | if (error) { |
649 | release_swap_reader(handle); | 887 | release_swap_reader(handle); |
650 | return error; | 888 | return error; |
889 | } | ||
890 | offset = tmp->map->next_swap; | ||
651 | } | 891 | } |
652 | handle->k = 0; | 892 | handle->k = 0; |
893 | handle->cur = handle->maps->map; | ||
653 | return 0; | 894 | return 0; |
654 | } | 895 | } |
655 | 896 | ||
@@ -658,6 +899,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, | |||
658 | { | 899 | { |
659 | sector_t offset; | 900 | sector_t offset; |
660 | int error; | 901 | int error; |
902 | struct swap_map_page_list *tmp; | ||
661 | 903 | ||
662 | if (!handle->cur) | 904 | if (!handle->cur) |
663 | return -EINVAL; | 905 | return -EINVAL; |
@@ -668,13 +910,15 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, | |||
668 | if (error) | 910 | if (error) |
669 | return error; | 911 | return error; |
670 | if (++handle->k >= MAP_PAGE_ENTRIES) { | 912 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
671 | error = hib_wait_on_bio_chain(bio_chain); | ||
672 | handle->k = 0; | 913 | handle->k = 0; |
673 | offset = handle->cur->next_swap; | 914 | free_page((unsigned long)handle->maps->map); |
674 | if (!offset) | 915 | tmp = handle->maps; |
916 | handle->maps = handle->maps->next; | ||
917 | kfree(tmp); | ||
918 | if (!handle->maps) | ||
675 | release_swap_reader(handle); | 919 | release_swap_reader(handle); |
676 | else if (!error) | 920 | else |
677 | error = hib_bio_read_page(offset, handle->cur, NULL); | 921 | handle->cur = handle->maps->map; |
678 | } | 922 | } |
679 | return error; | 923 | return error; |
680 | } | 924 | } |
@@ -697,7 +941,7 @@ static int load_image(struct swap_map_handle *handle, | |||
697 | unsigned int nr_to_read) | 941 | unsigned int nr_to_read) |
698 | { | 942 | { |
699 | unsigned int m; | 943 | unsigned int m; |
700 | int error = 0; | 944 | int ret = 0; |
701 | struct timeval start; | 945 | struct timeval start; |
702 | struct timeval stop; | 946 | struct timeval stop; |
703 | struct bio *bio; | 947 | struct bio *bio; |
@@ -713,15 +957,15 @@ static int load_image(struct swap_map_handle *handle, | |||
713 | bio = NULL; | 957 | bio = NULL; |
714 | do_gettimeofday(&start); | 958 | do_gettimeofday(&start); |
715 | for ( ; ; ) { | 959 | for ( ; ; ) { |
716 | error = snapshot_write_next(snapshot); | 960 | ret = snapshot_write_next(snapshot); |
717 | if (error <= 0) | 961 | if (ret <= 0) |
718 | break; | 962 | break; |
719 | error = swap_read_page(handle, data_of(*snapshot), &bio); | 963 | ret = swap_read_page(handle, data_of(*snapshot), &bio); |
720 | if (error) | 964 | if (ret) |
721 | break; | 965 | break; |
722 | if (snapshot->sync_read) | 966 | if (snapshot->sync_read) |
723 | error = hib_wait_on_bio_chain(&bio); | 967 | ret = hib_wait_on_bio_chain(&bio); |
724 | if (error) | 968 | if (ret) |
725 | break; | 969 | break; |
726 | if (!(nr_pages % m)) | 970 | if (!(nr_pages % m)) |
727 | printk("\b\b\b\b%3d%%", nr_pages / m); | 971 | printk("\b\b\b\b%3d%%", nr_pages / m); |
@@ -729,17 +973,61 @@ static int load_image(struct swap_map_handle *handle, | |||
729 | } | 973 | } |
730 | err2 = hib_wait_on_bio_chain(&bio); | 974 | err2 = hib_wait_on_bio_chain(&bio); |
731 | do_gettimeofday(&stop); | 975 | do_gettimeofday(&stop); |
732 | if (!error) | 976 | if (!ret) |
733 | error = err2; | 977 | ret = err2; |
734 | if (!error) { | 978 | if (!ret) { |
735 | printk("\b\b\b\bdone\n"); | 979 | printk("\b\b\b\bdone\n"); |
736 | snapshot_write_finalize(snapshot); | 980 | snapshot_write_finalize(snapshot); |
737 | if (!snapshot_image_loaded(snapshot)) | 981 | if (!snapshot_image_loaded(snapshot)) |
738 | error = -ENODATA; | 982 | ret = -ENODATA; |
739 | } else | 983 | } else |
740 | printk("\n"); | 984 | printk("\n"); |
741 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 985 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
742 | return error; | 986 | return ret; |
987 | } | ||
988 | |||
989 | /** | ||
990 | * Structure used for LZO data decompression. | ||
991 | */ | ||
992 | struct dec_data { | ||
993 | struct task_struct *thr; /* thread */ | ||
994 | atomic_t ready; /* ready to start flag */ | ||
995 | atomic_t stop; /* ready to stop flag */ | ||
996 | int ret; /* return code */ | ||
997 | wait_queue_head_t go; /* start decompression */ | ||
998 | wait_queue_head_t done; /* decompression done */ | ||
999 | size_t unc_len; /* uncompressed length */ | ||
1000 | size_t cmp_len; /* compressed length */ | ||
1001 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ | ||
1002 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ | ||
1003 | }; | ||
1004 | |||
1005 | /** | ||
1006 | * Deompression function that runs in its own thread. | ||
1007 | */ | ||
1008 | static int lzo_decompress_threadfn(void *data) | ||
1009 | { | ||
1010 | struct dec_data *d = data; | ||
1011 | |||
1012 | while (1) { | ||
1013 | wait_event(d->go, atomic_read(&d->ready) || | ||
1014 | kthread_should_stop()); | ||
1015 | if (kthread_should_stop()) { | ||
1016 | d->thr = NULL; | ||
1017 | d->ret = -1; | ||
1018 | atomic_set(&d->stop, 1); | ||
1019 | wake_up(&d->done); | ||
1020 | break; | ||
1021 | } | ||
1022 | atomic_set(&d->ready, 0); | ||
1023 | |||
1024 | d->unc_len = LZO_UNC_SIZE; | ||
1025 | d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, | ||
1026 | d->unc, &d->unc_len); | ||
1027 | atomic_set(&d->stop, 1); | ||
1028 | wake_up(&d->done); | ||
1029 | } | ||
1030 | return 0; | ||
743 | } | 1031 | } |
744 | 1032 | ||
745 | /** | 1033 | /** |
@@ -753,50 +1041,120 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
753 | unsigned int nr_to_read) | 1041 | unsigned int nr_to_read) |
754 | { | 1042 | { |
755 | unsigned int m; | 1043 | unsigned int m; |
756 | int error = 0; | 1044 | int ret = 0; |
1045 | int eof = 0; | ||
757 | struct bio *bio; | 1046 | struct bio *bio; |
758 | struct timeval start; | 1047 | struct timeval start; |
759 | struct timeval stop; | 1048 | struct timeval stop; |
760 | unsigned nr_pages; | 1049 | unsigned nr_pages; |
761 | size_t i, off, unc_len, cmp_len; | 1050 | size_t off; |
762 | unsigned char *unc, *cmp, *page[LZO_CMP_PAGES]; | 1051 | unsigned i, thr, run_threads, nr_threads; |
763 | 1052 | unsigned ring = 0, pg = 0, ring_size = 0, | |
764 | for (i = 0; i < LZO_CMP_PAGES; i++) { | 1053 | have = 0, want, need, asked = 0; |
765 | page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 1054 | unsigned long read_pages; |
766 | if (!page[i]) { | 1055 | unsigned char **page = NULL; |
767 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1056 | struct dec_data *data = NULL; |
1057 | struct crc_data *crc = NULL; | ||
1058 | |||
1059 | /* | ||
1060 | * We'll limit the number of threads for decompression to limit memory | ||
1061 | * footprint. | ||
1062 | */ | ||
1063 | nr_threads = num_online_cpus() - 1; | ||
1064 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | ||
1065 | |||
1066 | page = vmalloc(sizeof(*page) * LZO_READ_PAGES); | ||
1067 | if (!page) { | ||
1068 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | ||
1069 | ret = -ENOMEM; | ||
1070 | goto out_clean; | ||
1071 | } | ||
768 | 1072 | ||
769 | while (i) | 1073 | data = vmalloc(sizeof(*data) * nr_threads); |
770 | free_page((unsigned long)page[--i]); | 1074 | if (!data) { |
1075 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | ||
1076 | ret = -ENOMEM; | ||
1077 | goto out_clean; | ||
1078 | } | ||
1079 | for (thr = 0; thr < nr_threads; thr++) | ||
1080 | memset(&data[thr], 0, offsetof(struct dec_data, go)); | ||
771 | 1081 | ||
772 | return -ENOMEM; | 1082 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); |
1083 | if (!crc) { | ||
1084 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | ||
1085 | ret = -ENOMEM; | ||
1086 | goto out_clean; | ||
1087 | } | ||
1088 | memset(crc, 0, offsetof(struct crc_data, go)); | ||
1089 | |||
1090 | /* | ||
1091 | * Start the decompression threads. | ||
1092 | */ | ||
1093 | for (thr = 0; thr < nr_threads; thr++) { | ||
1094 | init_waitqueue_head(&data[thr].go); | ||
1095 | init_waitqueue_head(&data[thr].done); | ||
1096 | |||
1097 | data[thr].thr = kthread_run(lzo_decompress_threadfn, | ||
1098 | &data[thr], | ||
1099 | "image_decompress/%u", thr); | ||
1100 | if (IS_ERR(data[thr].thr)) { | ||
1101 | data[thr].thr = NULL; | ||
1102 | printk(KERN_ERR | ||
1103 | "PM: Cannot start decompression threads\n"); | ||
1104 | ret = -ENOMEM; | ||
1105 | goto out_clean; | ||
773 | } | 1106 | } |
774 | } | 1107 | } |
775 | 1108 | ||
776 | unc = vmalloc(LZO_UNC_SIZE); | 1109 | /* |
777 | if (!unc) { | 1110 | * Start the CRC32 thread. |
778 | printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); | 1111 | */ |
779 | 1112 | init_waitqueue_head(&crc->go); | |
780 | for (i = 0; i < LZO_CMP_PAGES; i++) | 1113 | init_waitqueue_head(&crc->done); |
781 | free_page((unsigned long)page[i]); | 1114 | |
782 | 1115 | handle->crc32 = 0; | |
783 | return -ENOMEM; | 1116 | crc->crc32 = &handle->crc32; |
1117 | for (thr = 0; thr < nr_threads; thr++) { | ||
1118 | crc->unc[thr] = data[thr].unc; | ||
1119 | crc->unc_len[thr] = &data[thr].unc_len; | ||
784 | } | 1120 | } |
785 | 1121 | ||
786 | cmp = vmalloc(LZO_CMP_SIZE); | 1122 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
787 | if (!cmp) { | 1123 | if (IS_ERR(crc->thr)) { |
788 | printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); | 1124 | crc->thr = NULL; |
1125 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | ||
1126 | ret = -ENOMEM; | ||
1127 | goto out_clean; | ||
1128 | } | ||
789 | 1129 | ||
790 | vfree(unc); | 1130 | /* |
791 | for (i = 0; i < LZO_CMP_PAGES; i++) | 1131 | * Adjust number of pages for read buffering, in case we are short. |
792 | free_page((unsigned long)page[i]); | 1132 | */ |
1133 | read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; | ||
1134 | read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); | ||
793 | 1135 | ||
794 | return -ENOMEM; | 1136 | for (i = 0; i < read_pages; i++) { |
1137 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? | ||
1138 | __GFP_WAIT | __GFP_HIGH : | ||
1139 | __GFP_WAIT); | ||
1140 | if (!page[i]) { | ||
1141 | if (i < LZO_CMP_PAGES) { | ||
1142 | ring_size = i; | ||
1143 | printk(KERN_ERR | ||
1144 | "PM: Failed to allocate LZO pages\n"); | ||
1145 | ret = -ENOMEM; | ||
1146 | goto out_clean; | ||
1147 | } else { | ||
1148 | break; | ||
1149 | } | ||
1150 | } | ||
795 | } | 1151 | } |
1152 | want = ring_size = i; | ||
796 | 1153 | ||
797 | printk(KERN_INFO | 1154 | printk(KERN_INFO |
1155 | "PM: Using %u thread(s) for decompression.\n" | ||
798 | "PM: Loading and decompressing image data (%u pages) ... ", | 1156 | "PM: Loading and decompressing image data (%u pages) ... ", |
799 | nr_to_read); | 1157 | nr_threads, nr_to_read); |
800 | m = nr_to_read / 100; | 1158 | m = nr_to_read / 100; |
801 | if (!m) | 1159 | if (!m) |
802 | m = 1; | 1160 | m = 1; |
@@ -804,85 +1162,189 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
804 | bio = NULL; | 1162 | bio = NULL; |
805 | do_gettimeofday(&start); | 1163 | do_gettimeofday(&start); |
806 | 1164 | ||
807 | error = snapshot_write_next(snapshot); | 1165 | ret = snapshot_write_next(snapshot); |
808 | if (error <= 0) | 1166 | if (ret <= 0) |
809 | goto out_finish; | 1167 | goto out_finish; |
810 | 1168 | ||
811 | for (;;) { | 1169 | for(;;) { |
812 | error = swap_read_page(handle, page[0], NULL); /* sync */ | 1170 | for (i = 0; !eof && i < want; i++) { |
813 | if (error) | 1171 | ret = swap_read_page(handle, page[ring], &bio); |
814 | break; | 1172 | if (ret) { |
815 | 1173 | /* | |
816 | cmp_len = *(size_t *)page[0]; | 1174 | * On real read error, finish. On end of data, |
817 | if (unlikely(!cmp_len || | 1175 | * set EOF flag and just exit the read loop. |
818 | cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { | 1176 | */ |
819 | printk(KERN_ERR "PM: Invalid LZO compressed length\n"); | 1177 | if (handle->cur && |
820 | error = -1; | 1178 | handle->cur->entries[handle->k]) { |
821 | break; | 1179 | goto out_finish; |
1180 | } else { | ||
1181 | eof = 1; | ||
1182 | break; | ||
1183 | } | ||
1184 | } | ||
1185 | if (++ring >= ring_size) | ||
1186 | ring = 0; | ||
822 | } | 1187 | } |
1188 | asked += i; | ||
1189 | want -= i; | ||
823 | 1190 | ||
824 | for (off = PAGE_SIZE, i = 1; | 1191 | /* |
825 | off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { | 1192 | * We are out of data, wait for some more. |
826 | error = swap_read_page(handle, page[i], &bio); | 1193 | */ |
827 | if (error) | 1194 | if (!have) { |
1195 | if (!asked) | ||
1196 | break; | ||
1197 | |||
1198 | ret = hib_wait_on_bio_chain(&bio); | ||
1199 | if (ret) | ||
828 | goto out_finish; | 1200 | goto out_finish; |
1201 | have += asked; | ||
1202 | asked = 0; | ||
1203 | if (eof) | ||
1204 | eof = 2; | ||
829 | } | 1205 | } |
830 | 1206 | ||
831 | error = hib_wait_on_bio_chain(&bio); /* need all data now */ | 1207 | if (crc->run_threads) { |
832 | if (error) | 1208 | wait_event(crc->done, atomic_read(&crc->stop)); |
833 | goto out_finish; | 1209 | atomic_set(&crc->stop, 0); |
834 | 1210 | crc->run_threads = 0; | |
835 | for (off = 0, i = 0; | ||
836 | off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { | ||
837 | memcpy(cmp + off, page[i], PAGE_SIZE); | ||
838 | } | 1211 | } |
839 | 1212 | ||
840 | unc_len = LZO_UNC_SIZE; | 1213 | for (thr = 0; have && thr < nr_threads; thr++) { |
841 | error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len, | 1214 | data[thr].cmp_len = *(size_t *)page[pg]; |
842 | unc, &unc_len); | 1215 | if (unlikely(!data[thr].cmp_len || |
843 | if (error < 0) { | 1216 | data[thr].cmp_len > |
844 | printk(KERN_ERR "PM: LZO decompression failed\n"); | 1217 | lzo1x_worst_compress(LZO_UNC_SIZE))) { |
845 | break; | 1218 | printk(KERN_ERR |
1219 | "PM: Invalid LZO compressed length\n"); | ||
1220 | ret = -1; | ||
1221 | goto out_finish; | ||
1222 | } | ||
1223 | |||
1224 | need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, | ||
1225 | PAGE_SIZE); | ||
1226 | if (need > have) { | ||
1227 | if (eof > 1) { | ||
1228 | ret = -1; | ||
1229 | goto out_finish; | ||
1230 | } | ||
1231 | break; | ||
1232 | } | ||
1233 | |||
1234 | for (off = 0; | ||
1235 | off < LZO_HEADER + data[thr].cmp_len; | ||
1236 | off += PAGE_SIZE) { | ||
1237 | memcpy(data[thr].cmp + off, | ||
1238 | page[pg], PAGE_SIZE); | ||
1239 | have--; | ||
1240 | want++; | ||
1241 | if (++pg >= ring_size) | ||
1242 | pg = 0; | ||
1243 | } | ||
1244 | |||
1245 | atomic_set(&data[thr].ready, 1); | ||
1246 | wake_up(&data[thr].go); | ||
846 | } | 1247 | } |
847 | 1248 | ||
848 | if (unlikely(!unc_len || | 1249 | /* |
849 | unc_len > LZO_UNC_SIZE || | 1250 | * Wait for more data while we are decompressing. |
850 | unc_len & (PAGE_SIZE - 1))) { | 1251 | */ |
851 | printk(KERN_ERR "PM: Invalid LZO uncompressed length\n"); | 1252 | if (have < LZO_CMP_PAGES && asked) { |
852 | error = -1; | 1253 | ret = hib_wait_on_bio_chain(&bio); |
853 | break; | 1254 | if (ret) |
1255 | goto out_finish; | ||
1256 | have += asked; | ||
1257 | asked = 0; | ||
1258 | if (eof) | ||
1259 | eof = 2; | ||
854 | } | 1260 | } |
855 | 1261 | ||
856 | for (off = 0; off < unc_len; off += PAGE_SIZE) { | 1262 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
857 | memcpy(data_of(*snapshot), unc + off, PAGE_SIZE); | 1263 | wait_event(data[thr].done, |
1264 | atomic_read(&data[thr].stop)); | ||
1265 | atomic_set(&data[thr].stop, 0); | ||
1266 | |||
1267 | ret = data[thr].ret; | ||
858 | 1268 | ||
859 | if (!(nr_pages % m)) | 1269 | if (ret < 0) { |
860 | printk("\b\b\b\b%3d%%", nr_pages / m); | 1270 | printk(KERN_ERR |
861 | nr_pages++; | 1271 | "PM: LZO decompression failed\n"); |
1272 | goto out_finish; | ||
1273 | } | ||
862 | 1274 | ||
863 | error = snapshot_write_next(snapshot); | 1275 | if (unlikely(!data[thr].unc_len || |
864 | if (error <= 0) | 1276 | data[thr].unc_len > LZO_UNC_SIZE || |
1277 | data[thr].unc_len & (PAGE_SIZE - 1))) { | ||
1278 | printk(KERN_ERR | ||
1279 | "PM: Invalid LZO uncompressed length\n"); | ||
1280 | ret = -1; | ||
865 | goto out_finish; | 1281 | goto out_finish; |
1282 | } | ||
1283 | |||
1284 | for (off = 0; | ||
1285 | off < data[thr].unc_len; off += PAGE_SIZE) { | ||
1286 | memcpy(data_of(*snapshot), | ||
1287 | data[thr].unc + off, PAGE_SIZE); | ||
1288 | |||
1289 | if (!(nr_pages % m)) | ||
1290 | printk("\b\b\b\b%3d%%", nr_pages / m); | ||
1291 | nr_pages++; | ||
1292 | |||
1293 | ret = snapshot_write_next(snapshot); | ||
1294 | if (ret <= 0) { | ||
1295 | crc->run_threads = thr + 1; | ||
1296 | atomic_set(&crc->ready, 1); | ||
1297 | wake_up(&crc->go); | ||
1298 | goto out_finish; | ||
1299 | } | ||
1300 | } | ||
866 | } | 1301 | } |
1302 | |||
1303 | crc->run_threads = thr; | ||
1304 | atomic_set(&crc->ready, 1); | ||
1305 | wake_up(&crc->go); | ||
867 | } | 1306 | } |
868 | 1307 | ||
869 | out_finish: | 1308 | out_finish: |
1309 | if (crc->run_threads) { | ||
1310 | wait_event(crc->done, atomic_read(&crc->stop)); | ||
1311 | atomic_set(&crc->stop, 0); | ||
1312 | } | ||
870 | do_gettimeofday(&stop); | 1313 | do_gettimeofday(&stop); |
871 | if (!error) { | 1314 | if (!ret) { |
872 | printk("\b\b\b\bdone\n"); | 1315 | printk("\b\b\b\bdone\n"); |
873 | snapshot_write_finalize(snapshot); | 1316 | snapshot_write_finalize(snapshot); |
874 | if (!snapshot_image_loaded(snapshot)) | 1317 | if (!snapshot_image_loaded(snapshot)) |
875 | error = -ENODATA; | 1318 | ret = -ENODATA; |
1319 | if (!ret) { | ||
1320 | if (swsusp_header->flags & SF_CRC32_MODE) { | ||
1321 | if(handle->crc32 != swsusp_header->crc32) { | ||
1322 | printk(KERN_ERR | ||
1323 | "PM: Invalid image CRC32!\n"); | ||
1324 | ret = -ENODATA; | ||
1325 | } | ||
1326 | } | ||
1327 | } | ||
876 | } else | 1328 | } else |
877 | printk("\n"); | 1329 | printk("\n"); |
878 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 1330 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
879 | 1331 | out_clean: | |
880 | vfree(cmp); | 1332 | for (i = 0; i < ring_size; i++) |
881 | vfree(unc); | ||
882 | for (i = 0; i < LZO_CMP_PAGES; i++) | ||
883 | free_page((unsigned long)page[i]); | 1333 | free_page((unsigned long)page[i]); |
1334 | if (crc) { | ||
1335 | if (crc->thr) | ||
1336 | kthread_stop(crc->thr); | ||
1337 | kfree(crc); | ||
1338 | } | ||
1339 | if (data) { | ||
1340 | for (thr = 0; thr < nr_threads; thr++) | ||
1341 | if (data[thr].thr) | ||
1342 | kthread_stop(data[thr].thr); | ||
1343 | vfree(data); | ||
1344 | } | ||
1345 | if (page) vfree(page); | ||
884 | 1346 | ||
885 | return error; | 1347 | return ret; |
886 | } | 1348 | } |
887 | 1349 | ||
888 | /** | 1350 | /** |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 761c510a06c..f49405f842f 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -53,6 +53,9 @@ endif | |||
53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
55 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o | 55 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
56 | ifeq ($(CONFIG_PM_RUNTIME),y) | ||
57 | obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o | ||
58 | endif | ||
56 | ifeq ($(CONFIG_TRACING),y) | 59 | ifeq ($(CONFIG_TRACING),y) |
57 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | 60 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o |
58 | endif | 61 | endif |
diff --git a/kernel/trace/rpm-traces.c b/kernel/trace/rpm-traces.c new file mode 100644 index 00000000000..4b3b5eaf94d --- /dev/null +++ b/kernel/trace/rpm-traces.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Power trace points | ||
3 | * | ||
4 | * Copyright (C) 2009 Ming Lei <ming.lei@canonical.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/workqueue.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/usb.h> | ||
13 | |||
14 | #define CREATE_TRACE_POINTS | ||
15 | #include <trace/events/rpm.h> | ||
16 | |||
17 | EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_return_int); | ||
18 | EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_idle); | ||
19 | EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_suspend); | ||
20 | EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_resume); | ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index d4ee6d234a7..d999bf3b84e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/if_arp.h> | 19 | #include <linux/if_arp.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/bitmap.h> | 21 | #include <linux/bitmap.h> |
22 | #include <linux/pm_qos_params.h> | 22 | #include <linux/pm_qos.h> |
23 | #include <linux/inetdevice.h> | 23 | #include <linux/inetdevice.h> |
24 | #include <net/net_namespace.h> | 24 | #include <net/net_namespace.h> |
25 | #include <net/cfg80211.h> | 25 | #include <net/cfg80211.h> |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0e5d8daba1e..ba2da11a997 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/if_arp.h> | 17 | #include <linux/if_arp.h> |
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/pm_qos_params.h> | 20 | #include <linux/pm_qos.h> |
21 | #include <linux/crc32.h> | 21 | #include <linux/crc32.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <net/mac80211.h> | 23 | #include <net/mac80211.h> |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 397343a5927..83a0b050b37 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
16 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
17 | #include <linux/pm_qos_params.h> | 17 | #include <linux/pm_qos.h> |
18 | #include <net/sch_generic.h> | 18 | #include <net/sch_generic.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <net/mac80211.h> | 20 | #include <net/mac80211.h> |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 1c6be91dfb9..c74e228731e 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/file.h> | 23 | #include <linux/file.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/time.h> | 25 | #include <linux/time.h> |
26 | #include <linux/pm_qos_params.h> | 26 | #include <linux/pm_qos.h> |
27 | #include <linux/uio.h> | 27 | #include <linux/uio.h> |
28 | #include <linux/dma-mapping.h> | 28 | #include <linux/dma-mapping.h> |
29 | #include <sound/core.h> | 29 | #include <sound/core.h> |
diff --git a/sound/usb/card.c b/sound/usb/card.c index d8f2bf40145..3068f043099 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -631,7 +631,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) | |||
631 | if (chip == (void *)-1L) | 631 | if (chip == (void *)-1L) |
632 | return 0; | 632 | return 0; |
633 | 633 | ||
634 | if (!(message.event & PM_EVENT_AUTO)) { | 634 | if (!PMSG_IS_AUTO(message)) { |
635 | snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); | 635 | snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); |
636 | if (!chip->num_suspended_intf++) { | 636 | if (!chip->num_suspended_intf++) { |
637 | list_for_each(p, &chip->pcm_list) { | 637 | list_for_each(p, &chip->pcm_list) { |