diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2011-10-07 17:17:02 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2011-10-07 17:17:02 -0400 |
commit | c28b56b1d46b1bbb1be33c8f2632a88b0de1ef68 (patch) | |
tree | a7caddb9f58c968f6e77f36d2d398ec06983509e /drivers/base | |
parent | d727b60659a1173eb4142a5fc521ce67c28b34e1 (diff) | |
parent | cd0ea672f58d5cfdea271c45cec0c897f2b792aa (diff) |
Merge branch 'pm-domains' into pm-for-linus
* pm-domains:
PM / Domains: Split device PM domain data into base and need_restore
ARM: mach-shmobile: sh7372 sleep warning fixes
ARM: mach-shmobile: sh7372 A3SM support
ARM: mach-shmobile: sh7372 generic suspend/resume support
PM / Domains: Preliminary support for devices with power.irq_safe set
PM: Move clock-related definitions and headers to separate file
PM / Domains: Use power.sybsys_data to reduce overhead
PM: Reference counting of power.subsys_data
PM: Introduce struct pm_subsys_data
ARM / shmobile: Make A3RV be a subdomain of A4LC on SH7372
PM / Domains: Rename argument of pm_genpd_add_subdomain()
PM / Domains: Rename GPD_STATE_WAIT_PARENT to GPD_STATE_WAIT_MASTER
PM / Domains: Allow generic PM domains to have multiple masters
PM / Domains: Add "wait for parent" status for generic PM domains
PM / Domains: Make pm_genpd_poweron() always survive parent removal
PM / Domains: Do not take parent locks to modify subdomain counters
PM / Domains: Implement subdomain counters as atomic fields
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/Makefile | 2 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 123 | ||||
-rw-r--r-- | drivers/base/power/common.c | 86 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 348 |
4 files changed, 347 insertions, 212 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2639ae79a372..6488ce12f586 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index b97294e2d95b..b876e60a53ef 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -10,18 +10,13 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
13 | #include <linux/pm_runtime.h> | 13 | #include <linux/pm_clock.h> |
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | 17 | ||
18 | #ifdef CONFIG_PM | 18 | #ifdef CONFIG_PM |
19 | 19 | ||
20 | struct pm_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | spinlock_t lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | 20 | enum pce_status { |
26 | PCE_STATUS_NONE = 0, | 21 | PCE_STATUS_NONE = 0, |
27 | PCE_STATUS_ACQUIRED, | 22 | PCE_STATUS_ACQUIRED, |
@@ -36,11 +31,6 @@ struct pm_clock_entry { | |||
36 | enum pce_status status; | 31 | enum pce_status status; |
37 | }; | 32 | }; |
38 | 33 | ||
39 | static struct pm_clk_data *__to_pcd(struct device *dev) | ||
40 | { | ||
41 | return dev ? dev->power.subsys_data : NULL; | ||
42 | } | ||
43 | |||
44 | /** | 34 | /** |
45 | * pm_clk_acquire - Acquire a device clock. | 35 | * pm_clk_acquire - Acquire a device clock. |
46 | * @dev: Device whose clock is to be acquired. | 36 | * @dev: Device whose clock is to be acquired. |
@@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | |||
67 | */ | 57 | */ |
68 | int pm_clk_add(struct device *dev, const char *con_id) | 58 | int pm_clk_add(struct device *dev, const char *con_id) |
69 | { | 59 | { |
70 | struct pm_clk_data *pcd = __to_pcd(dev); | 60 | struct pm_subsys_data *psd = dev_to_psd(dev); |
71 | struct pm_clock_entry *ce; | 61 | struct pm_clock_entry *ce; |
72 | 62 | ||
73 | if (!pcd) | 63 | if (!psd) |
74 | return -EINVAL; | 64 | return -EINVAL; |
75 | 65 | ||
76 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | 66 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); |
@@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
91 | 81 | ||
92 | pm_clk_acquire(dev, ce); | 82 | pm_clk_acquire(dev, ce); |
93 | 83 | ||
94 | spin_lock_irq(&pcd->lock); | 84 | spin_lock_irq(&psd->lock); |
95 | list_add_tail(&ce->node, &pcd->clock_list); | 85 | list_add_tail(&ce->node, &psd->clock_list); |
96 | spin_unlock_irq(&pcd->lock); | 86 | spin_unlock_irq(&psd->lock); |
97 | return 0; | 87 | return 0; |
98 | } | 88 | } |
99 | 89 | ||
@@ -130,15 +120,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
130 | */ | 120 | */ |
131 | void pm_clk_remove(struct device *dev, const char *con_id) | 121 | void pm_clk_remove(struct device *dev, const char *con_id) |
132 | { | 122 | { |
133 | struct pm_clk_data *pcd = __to_pcd(dev); | 123 | struct pm_subsys_data *psd = dev_to_psd(dev); |
134 | struct pm_clock_entry *ce; | 124 | struct pm_clock_entry *ce; |
135 | 125 | ||
136 | if (!pcd) | 126 | if (!psd) |
137 | return; | 127 | return; |
138 | 128 | ||
139 | spin_lock_irq(&pcd->lock); | 129 | spin_lock_irq(&psd->lock); |
140 | 130 | ||
141 | list_for_each_entry(ce, &pcd->clock_list, node) { | 131 | list_for_each_entry(ce, &psd->clock_list, node) { |
142 | if (!con_id && !ce->con_id) | 132 | if (!con_id && !ce->con_id) |
143 | goto remove; | 133 | goto remove; |
144 | else if (!con_id || !ce->con_id) | 134 | else if (!con_id || !ce->con_id) |
@@ -147,12 +137,12 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
147 | goto remove; | 137 | goto remove; |
148 | } | 138 | } |
149 | 139 | ||
150 | spin_unlock_irq(&pcd->lock); | 140 | spin_unlock_irq(&psd->lock); |
151 | return; | 141 | return; |
152 | 142 | ||
153 | remove: | 143 | remove: |
154 | list_del(&ce->node); | 144 | list_del(&ce->node); |
155 | spin_unlock_irq(&pcd->lock); | 145 | spin_unlock_irq(&psd->lock); |
156 | 146 | ||
157 | __pm_clk_remove(ce); | 147 | __pm_clk_remove(ce); |
158 | } | 148 | } |
@@ -161,23 +151,27 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
161 | * pm_clk_init - Initialize a device's list of power management clocks. | 151 | * pm_clk_init - Initialize a device's list of power management clocks. |
162 | * @dev: Device to initialize the list of PM clocks for. | 152 | * @dev: Device to initialize the list of PM clocks for. |
163 | * | 153 | * |
164 | * Allocate a struct pm_clk_data object, initialize its lock member and | 154 | * Initialize the lock and clock_list members of the device's pm_subsys_data |
165 | * make the @dev's power.subsys_data field point to it. | 155 | * object. |
166 | */ | 156 | */ |
167 | int pm_clk_init(struct device *dev) | 157 | void pm_clk_init(struct device *dev) |
168 | { | 158 | { |
169 | struct pm_clk_data *pcd; | 159 | struct pm_subsys_data *psd = dev_to_psd(dev); |
170 | 160 | if (psd) | |
171 | pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); | 161 | INIT_LIST_HEAD(&psd->clock_list); |
172 | if (!pcd) { | 162 | } |
173 | dev_err(dev, "Not enough memory for PM clock data.\n"); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | 163 | ||
177 | INIT_LIST_HEAD(&pcd->clock_list); | 164 | /** |
178 | spin_lock_init(&pcd->lock); | 165 | * pm_clk_create - Create and initialize a device's list of PM clocks. |
179 | dev->power.subsys_data = pcd; | 166 | * @dev: Device to create and initialize the list of PM clocks for. |
180 | return 0; | 167 | * |
168 | * Allocate a struct pm_subsys_data object, initialize its lock and clock_list | ||
169 | * members and make the @dev's power.subsys_data field point to it. | ||
170 | */ | ||
171 | int pm_clk_create(struct device *dev) | ||
172 | { | ||
173 | int ret = dev_pm_get_subsys_data(dev); | ||
174 | return ret < 0 ? ret : 0; | ||
181 | } | 175 | } |
182 | 176 | ||
183 | /** | 177 | /** |
@@ -185,29 +179,28 @@ int pm_clk_init(struct device *dev) | |||
185 | * @dev: Device to destroy the list of PM clocks for. | 179 | * @dev: Device to destroy the list of PM clocks for. |
186 | * | 180 | * |
187 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | 181 | * Clear the @dev's power.subsys_data field, remove the list of clock entries |
188 | * from the struct pm_clk_data object pointed to by it before and free | 182 | * from the struct pm_subsys_data object pointed to by it before and free |
189 | * that object. | 183 | * that object. |
190 | */ | 184 | */ |
191 | void pm_clk_destroy(struct device *dev) | 185 | void pm_clk_destroy(struct device *dev) |
192 | { | 186 | { |
193 | struct pm_clk_data *pcd = __to_pcd(dev); | 187 | struct pm_subsys_data *psd = dev_to_psd(dev); |
194 | struct pm_clock_entry *ce, *c; | 188 | struct pm_clock_entry *ce, *c; |
195 | struct list_head list; | 189 | struct list_head list; |
196 | 190 | ||
197 | if (!pcd) | 191 | if (!psd) |
198 | return; | 192 | return; |
199 | 193 | ||
200 | dev->power.subsys_data = NULL; | ||
201 | INIT_LIST_HEAD(&list); | 194 | INIT_LIST_HEAD(&list); |
202 | 195 | ||
203 | spin_lock_irq(&pcd->lock); | 196 | spin_lock_irq(&psd->lock); |
204 | 197 | ||
205 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 198 | list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) |
206 | list_move(&ce->node, &list); | 199 | list_move(&ce->node, &list); |
207 | 200 | ||
208 | spin_unlock_irq(&pcd->lock); | 201 | spin_unlock_irq(&psd->lock); |
209 | 202 | ||
210 | kfree(pcd); | 203 | dev_pm_put_subsys_data(dev); |
211 | 204 | ||
212 | list_for_each_entry_safe_reverse(ce, c, &list, node) { | 205 | list_for_each_entry_safe_reverse(ce, c, &list, node) { |
213 | list_del(&ce->node); | 206 | list_del(&ce->node); |
@@ -225,25 +218,25 @@ void pm_clk_destroy(struct device *dev) | |||
225 | */ | 218 | */ |
226 | int pm_clk_suspend(struct device *dev) | 219 | int pm_clk_suspend(struct device *dev) |
227 | { | 220 | { |
228 | struct pm_clk_data *pcd = __to_pcd(dev); | 221 | struct pm_subsys_data *psd = dev_to_psd(dev); |
229 | struct pm_clock_entry *ce; | 222 | struct pm_clock_entry *ce; |
230 | unsigned long flags; | 223 | unsigned long flags; |
231 | 224 | ||
232 | dev_dbg(dev, "%s()\n", __func__); | 225 | dev_dbg(dev, "%s()\n", __func__); |
233 | 226 | ||
234 | if (!pcd) | 227 | if (!psd) |
235 | return 0; | 228 | return 0; |
236 | 229 | ||
237 | spin_lock_irqsave(&pcd->lock, flags); | 230 | spin_lock_irqsave(&psd->lock, flags); |
238 | 231 | ||
239 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | 232 | list_for_each_entry_reverse(ce, &psd->clock_list, node) { |
240 | if (ce->status < PCE_STATUS_ERROR) { | 233 | if (ce->status < PCE_STATUS_ERROR) { |
241 | clk_disable(ce->clk); | 234 | clk_disable(ce->clk); |
242 | ce->status = PCE_STATUS_ACQUIRED; | 235 | ce->status = PCE_STATUS_ACQUIRED; |
243 | } | 236 | } |
244 | } | 237 | } |
245 | 238 | ||
246 | spin_unlock_irqrestore(&pcd->lock, flags); | 239 | spin_unlock_irqrestore(&psd->lock, flags); |
247 | 240 | ||
248 | return 0; | 241 | return 0; |
249 | } | 242 | } |
@@ -254,25 +247,25 @@ int pm_clk_suspend(struct device *dev) | |||
254 | */ | 247 | */ |
255 | int pm_clk_resume(struct device *dev) | 248 | int pm_clk_resume(struct device *dev) |
256 | { | 249 | { |
257 | struct pm_clk_data *pcd = __to_pcd(dev); | 250 | struct pm_subsys_data *psd = dev_to_psd(dev); |
258 | struct pm_clock_entry *ce; | 251 | struct pm_clock_entry *ce; |
259 | unsigned long flags; | 252 | unsigned long flags; |
260 | 253 | ||
261 | dev_dbg(dev, "%s()\n", __func__); | 254 | dev_dbg(dev, "%s()\n", __func__); |
262 | 255 | ||
263 | if (!pcd) | 256 | if (!psd) |
264 | return 0; | 257 | return 0; |
265 | 258 | ||
266 | spin_lock_irqsave(&pcd->lock, flags); | 259 | spin_lock_irqsave(&psd->lock, flags); |
267 | 260 | ||
268 | list_for_each_entry(ce, &pcd->clock_list, node) { | 261 | list_for_each_entry(ce, &psd->clock_list, node) { |
269 | if (ce->status < PCE_STATUS_ERROR) { | 262 | if (ce->status < PCE_STATUS_ERROR) { |
270 | clk_enable(ce->clk); | 263 | clk_enable(ce->clk); |
271 | ce->status = PCE_STATUS_ENABLED; | 264 | ce->status = PCE_STATUS_ENABLED; |
272 | } | 265 | } |
273 | } | 266 | } |
274 | 267 | ||
275 | spin_unlock_irqrestore(&pcd->lock, flags); | 268 | spin_unlock_irqrestore(&psd->lock, flags); |
276 | 269 | ||
277 | return 0; | 270 | return 0; |
278 | } | 271 | } |
@@ -310,7 +303,7 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
310 | if (dev->pm_domain) | 303 | if (dev->pm_domain) |
311 | break; | 304 | break; |
312 | 305 | ||
313 | error = pm_clk_init(dev); | 306 | error = pm_clk_create(dev); |
314 | if (error) | 307 | if (error) |
315 | break; | 308 | break; |
316 | 309 | ||
@@ -345,22 +338,22 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
345 | */ | 338 | */ |
346 | int pm_clk_suspend(struct device *dev) | 339 | int pm_clk_suspend(struct device *dev) |
347 | { | 340 | { |
348 | struct pm_clk_data *pcd = __to_pcd(dev); | 341 | struct pm_subsys_data *psd = dev_to_psd(dev); |
349 | struct pm_clock_entry *ce; | 342 | struct pm_clock_entry *ce; |
350 | unsigned long flags; | 343 | unsigned long flags; |
351 | 344 | ||
352 | dev_dbg(dev, "%s()\n", __func__); | 345 | dev_dbg(dev, "%s()\n", __func__); |
353 | 346 | ||
354 | /* If there is no driver, the clocks are already disabled. */ | 347 | /* If there is no driver, the clocks are already disabled. */ |
355 | if (!pcd || !dev->driver) | 348 | if (!psd || !dev->driver) |
356 | return 0; | 349 | return 0; |
357 | 350 | ||
358 | spin_lock_irqsave(&pcd->lock, flags); | 351 | spin_lock_irqsave(&psd->lock, flags); |
359 | 352 | ||
360 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) | 353 | list_for_each_entry_reverse(ce, &psd->clock_list, node) |
361 | clk_disable(ce->clk); | 354 | clk_disable(ce->clk); |
362 | 355 | ||
363 | spin_unlock_irqrestore(&pcd->lock, flags); | 356 | spin_unlock_irqrestore(&psd->lock, flags); |
364 | 357 | ||
365 | return 0; | 358 | return 0; |
366 | } | 359 | } |
@@ -371,22 +364,22 @@ int pm_clk_suspend(struct device *dev) | |||
371 | */ | 364 | */ |
372 | int pm_clk_resume(struct device *dev) | 365 | int pm_clk_resume(struct device *dev) |
373 | { | 366 | { |
374 | struct pm_clk_data *pcd = __to_pcd(dev); | 367 | struct pm_subsys_data *psd = dev_to_psd(dev); |
375 | struct pm_clock_entry *ce; | 368 | struct pm_clock_entry *ce; |
376 | unsigned long flags; | 369 | unsigned long flags; |
377 | 370 | ||
378 | dev_dbg(dev, "%s()\n", __func__); | 371 | dev_dbg(dev, "%s()\n", __func__); |
379 | 372 | ||
380 | /* If there is no driver, the clocks should remain disabled. */ | 373 | /* If there is no driver, the clocks should remain disabled. */ |
381 | if (!pcd || !dev->driver) | 374 | if (!psd || !dev->driver) |
382 | return 0; | 375 | return 0; |
383 | 376 | ||
384 | spin_lock_irqsave(&pcd->lock, flags); | 377 | spin_lock_irqsave(&psd->lock, flags); |
385 | 378 | ||
386 | list_for_each_entry(ce, &pcd->clock_list, node) | 379 | list_for_each_entry(ce, &psd->clock_list, node) |
387 | clk_enable(ce->clk); | 380 | clk_enable(ce->clk); |
388 | 381 | ||
389 | spin_unlock_irqrestore(&pcd->lock, flags); | 382 | spin_unlock_irqrestore(&psd->lock, flags); |
390 | 383 | ||
391 | return 0; | 384 | return 0; |
392 | } | 385 | } |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 000000000000..29820c396182 --- /dev/null +++ b/drivers/base/power/common.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * drivers/base/power/common.c - Common device power management code. | ||
3 | * | ||
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/pm_clock.h> | ||
14 | |||
15 | /** | ||
16 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. | ||
17 | * @dev: Device to handle. | ||
18 | * | ||
19 | * If power.subsys_data is NULL, point it to a new object, otherwise increment | ||
20 | * its reference counter. Return 1 if a new object has been created, otherwise | ||
21 | * return 0 or error code. | ||
22 | */ | ||
23 | int dev_pm_get_subsys_data(struct device *dev) | ||
24 | { | ||
25 | struct pm_subsys_data *psd; | ||
26 | int ret = 0; | ||
27 | |||
28 | psd = kzalloc(sizeof(*psd), GFP_KERNEL); | ||
29 | if (!psd) | ||
30 | return -ENOMEM; | ||
31 | |||
32 | spin_lock_irq(&dev->power.lock); | ||
33 | |||
34 | if (dev->power.subsys_data) { | ||
35 | dev->power.subsys_data->refcount++; | ||
36 | } else { | ||
37 | spin_lock_init(&psd->lock); | ||
38 | psd->refcount = 1; | ||
39 | dev->power.subsys_data = psd; | ||
40 | pm_clk_init(dev); | ||
41 | psd = NULL; | ||
42 | ret = 1; | ||
43 | } | ||
44 | |||
45 | spin_unlock_irq(&dev->power.lock); | ||
46 | |||
47 | /* kfree() verifies that its argument is nonzero. */ | ||
48 | kfree(psd); | ||
49 | |||
50 | return ret; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | ||
53 | |||
54 | /** | ||
55 | * dev_pm_put_subsys_data - Drop reference to power.subsys_data. | ||
56 | * @dev: Device to handle. | ||
57 | * | ||
58 | * If the reference counter of power.subsys_data is zero after dropping the | ||
59 | * reference, power.subsys_data is removed. Return 1 if that happens or 0 | ||
60 | * otherwise. | ||
61 | */ | ||
62 | int dev_pm_put_subsys_data(struct device *dev) | ||
63 | { | ||
64 | struct pm_subsys_data *psd; | ||
65 | int ret = 0; | ||
66 | |||
67 | spin_lock_irq(&dev->power.lock); | ||
68 | |||
69 | psd = dev_to_psd(dev); | ||
70 | if (!psd) { | ||
71 | ret = -EINVAL; | ||
72 | goto out; | ||
73 | } | ||
74 | |||
75 | if (--psd->refcount == 0) { | ||
76 | dev->power.subsys_data = NULL; | ||
77 | kfree(psd); | ||
78 | ret = 1; | ||
79 | } | ||
80 | |||
81 | out: | ||
82 | spin_unlock_irq(&dev->power.lock); | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1c374579407c..22fe029ca212 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
29 | return pd_to_genpd(dev->pm_domain); | 29 | return pd_to_genpd(dev->pm_domain); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 32 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
33 | { | 33 | { |
34 | if (!WARN_ON(genpd->sd_count == 0)) | 34 | bool ret = false; |
35 | genpd->sd_count--; | 35 | |
36 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) | ||
37 | ret = !!atomic_dec_and_test(&genpd->sd_count); | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | ||
43 | { | ||
44 | atomic_inc(&genpd->sd_count); | ||
45 | smp_mb__after_atomic_inc(); | ||
36 | } | 46 | } |
37 | 47 | ||
38 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 48 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd) | |||
71 | } | 81 | } |
72 | 82 | ||
73 | /** | 83 | /** |
74 | * pm_genpd_poweron - Restore power to a given PM domain and its parents. | 84 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. |
75 | * @genpd: PM domain to power up. | 85 | * @genpd: PM domain to power up. |
76 | * | 86 | * |
77 | * Restore power to @genpd and all of its parents so that it is possible to | 87 | * Restore power to @genpd and all of its masters so that it is possible to |
78 | * resume a device belonging to it. | 88 | * resume a device belonging to it. |
79 | */ | 89 | */ |
80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 90 | int __pm_genpd_poweron(struct generic_pm_domain *genpd) |
91 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
81 | { | 92 | { |
82 | struct generic_pm_domain *parent = genpd->parent; | 93 | struct gpd_link *link; |
94 | DEFINE_WAIT(wait); | ||
83 | int ret = 0; | 95 | int ret = 0; |
84 | 96 | ||
85 | start: | 97 | /* If the domain's master is being waited for, we have to wait too. */ |
86 | if (parent) { | 98 | for (;;) { |
87 | genpd_acquire_lock(parent); | 99 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
88 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | 100 | TASK_UNINTERRUPTIBLE); |
89 | } else { | 101 | if (genpd->status != GPD_STATE_WAIT_MASTER) |
102 | break; | ||
103 | mutex_unlock(&genpd->lock); | ||
104 | |||
105 | schedule(); | ||
106 | |||
90 | mutex_lock(&genpd->lock); | 107 | mutex_lock(&genpd->lock); |
91 | } | 108 | } |
109 | finish_wait(&genpd->status_wait_queue, &wait); | ||
92 | 110 | ||
93 | if (genpd->status == GPD_STATE_ACTIVE | 111 | if (genpd->status == GPD_STATE_ACTIVE |
94 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 112 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
95 | goto out; | 113 | return 0; |
96 | 114 | ||
97 | if (genpd->status != GPD_STATE_POWER_OFF) { | 115 | if (genpd->status != GPD_STATE_POWER_OFF) { |
98 | genpd_set_active(genpd); | 116 | genpd_set_active(genpd); |
99 | goto out; | 117 | return 0; |
100 | } | 118 | } |
101 | 119 | ||
102 | if (parent && parent->status != GPD_STATE_ACTIVE) { | 120 | /* |
121 | * The list is guaranteed not to change while the loop below is being | ||
122 | * executed, unless one of the masters' .power_on() callbacks fiddles | ||
123 | * with it. | ||
124 | */ | ||
125 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
126 | genpd_sd_counter_inc(link->master); | ||
127 | genpd->status = GPD_STATE_WAIT_MASTER; | ||
128 | |||
103 | mutex_unlock(&genpd->lock); | 129 | mutex_unlock(&genpd->lock); |
104 | genpd_release_lock(parent); | ||
105 | 130 | ||
106 | ret = pm_genpd_poweron(parent); | 131 | ret = pm_genpd_poweron(link->master); |
107 | if (ret) | ||
108 | return ret; | ||
109 | 132 | ||
110 | goto start; | 133 | mutex_lock(&genpd->lock); |
134 | |||
135 | /* | ||
136 | * The "wait for parent" status is guaranteed not to change | ||
137 | * while the master is powering on. | ||
138 | */ | ||
139 | genpd->status = GPD_STATE_POWER_OFF; | ||
140 | wake_up_all(&genpd->status_wait_queue); | ||
141 | if (ret) { | ||
142 | genpd_sd_counter_dec(link->master); | ||
143 | goto err; | ||
144 | } | ||
111 | } | 145 | } |
112 | 146 | ||
113 | if (genpd->power_on) { | 147 | if (genpd->power_on) { |
114 | ret = genpd->power_on(genpd); | 148 | ret = genpd->power_on(genpd); |
115 | if (ret) | 149 | if (ret) |
116 | goto out; | 150 | goto err; |
117 | } | 151 | } |
118 | 152 | ||
119 | genpd_set_active(genpd); | 153 | genpd_set_active(genpd); |
120 | if (parent) | ||
121 | parent->sd_count++; | ||
122 | 154 | ||
123 | out: | 155 | return 0; |
124 | mutex_unlock(&genpd->lock); | 156 | |
125 | if (parent) | 157 | err: |
126 | genpd_release_lock(parent); | 158 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) |
159 | genpd_sd_counter_dec(link->master); | ||
127 | 160 | ||
128 | return ret; | 161 | return ret; |
129 | } | 162 | } |
130 | 163 | ||
164 | /** | ||
165 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | ||
166 | * @genpd: PM domain to power up. | ||
167 | */ | ||
168 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
169 | { | ||
170 | int ret; | ||
171 | |||
172 | mutex_lock(&genpd->lock); | ||
173 | ret = __pm_genpd_poweron(genpd); | ||
174 | mutex_unlock(&genpd->lock); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
131 | #endif /* CONFIG_PM */ | 178 | #endif /* CONFIG_PM */ |
132 | 179 | ||
133 | #ifdef CONFIG_PM_RUNTIME | 180 | #ifdef CONFIG_PM_RUNTIME |
134 | 181 | ||
135 | /** | 182 | /** |
136 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | 183 | * __pm_genpd_save_device - Save the pre-suspend state of a device. |
137 | * @dle: Device list entry of the device to save the state of. | 184 | * @pdd: Domain data of the device to save the state of. |
138 | * @genpd: PM domain the device belongs to. | 185 | * @genpd: PM domain the device belongs to. |
139 | */ | 186 | */ |
140 | static int __pm_genpd_save_device(struct dev_list_entry *dle, | 187 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, |
141 | struct generic_pm_domain *genpd) | 188 | struct generic_pm_domain *genpd) |
142 | __releases(&genpd->lock) __acquires(&genpd->lock) | 189 | __releases(&genpd->lock) __acquires(&genpd->lock) |
143 | { | 190 | { |
144 | struct device *dev = dle->dev; | 191 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
192 | struct device *dev = pdd->dev; | ||
145 | struct device_driver *drv = dev->driver; | 193 | struct device_driver *drv = dev->driver; |
146 | int ret = 0; | 194 | int ret = 0; |
147 | 195 | ||
148 | if (dle->need_restore) | 196 | if (gpd_data->need_restore) |
149 | return 0; | 197 | return 0; |
150 | 198 | ||
151 | mutex_unlock(&genpd->lock); | 199 | mutex_unlock(&genpd->lock); |
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, | |||
163 | mutex_lock(&genpd->lock); | 211 | mutex_lock(&genpd->lock); |
164 | 212 | ||
165 | if (!ret) | 213 | if (!ret) |
166 | dle->need_restore = true; | 214 | gpd_data->need_restore = true; |
167 | 215 | ||
168 | return ret; | 216 | return ret; |
169 | } | 217 | } |
170 | 218 | ||
171 | /** | 219 | /** |
172 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | 220 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. |
173 | * @dle: Device list entry of the device to restore the state of. | 221 | * @pdd: Domain data of the device to restore the state of. |
174 | * @genpd: PM domain the device belongs to. | 222 | * @genpd: PM domain the device belongs to. |
175 | */ | 223 | */ |
176 | static void __pm_genpd_restore_device(struct dev_list_entry *dle, | 224 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, |
177 | struct generic_pm_domain *genpd) | 225 | struct generic_pm_domain *genpd) |
178 | __releases(&genpd->lock) __acquires(&genpd->lock) | 226 | __releases(&genpd->lock) __acquires(&genpd->lock) |
179 | { | 227 | { |
180 | struct device *dev = dle->dev; | 228 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
229 | struct device *dev = pdd->dev; | ||
181 | struct device_driver *drv = dev->driver; | 230 | struct device_driver *drv = dev->driver; |
182 | 231 | ||
183 | if (!dle->need_restore) | 232 | if (!gpd_data->need_restore) |
184 | return; | 233 | return; |
185 | 234 | ||
186 | mutex_unlock(&genpd->lock); | 235 | mutex_unlock(&genpd->lock); |
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
197 | 246 | ||
198 | mutex_lock(&genpd->lock); | 247 | mutex_lock(&genpd->lock); |
199 | 248 | ||
200 | dle->need_restore = false; | 249 | gpd_data->need_restore = false; |
201 | } | 250 | } |
202 | 251 | ||
203 | /** | 252 | /** |
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
211 | */ | 260 | */ |
212 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | 261 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) |
213 | { | 262 | { |
214 | return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | 263 | return genpd->status == GPD_STATE_WAIT_MASTER |
264 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | ||
215 | } | 265 | } |
216 | 266 | ||
217 | /** | 267 | /** |
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
238 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 288 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) |
239 | __releases(&genpd->lock) __acquires(&genpd->lock) | 289 | __releases(&genpd->lock) __acquires(&genpd->lock) |
240 | { | 290 | { |
241 | struct generic_pm_domain *parent; | 291 | struct pm_domain_data *pdd; |
242 | struct dev_list_entry *dle; | 292 | struct gpd_link *link; |
243 | unsigned int not_suspended; | 293 | unsigned int not_suspended; |
244 | int ret = 0; | 294 | int ret = 0; |
245 | 295 | ||
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
247 | /* | 297 | /* |
248 | * Do not try to power off the domain in the following situations: | 298 | * Do not try to power off the domain in the following situations: |
249 | * (1) The domain is already in the "power off" state. | 299 | * (1) The domain is already in the "power off" state. |
250 | * (2) System suspend is in progress. | 300 | * (2) The domain is waiting for its master to power up. |
251 | * (3) One of the domain's devices is being resumed right now. | 301 | * (3) One of the domain's devices is being resumed right now. |
302 | * (4) System suspend is in progress. | ||
252 | */ | 303 | */ |
253 | if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 | 304 | if (genpd->status == GPD_STATE_POWER_OFF |
254 | || genpd->resume_count > 0) | 305 | || genpd->status == GPD_STATE_WAIT_MASTER |
306 | || genpd->resume_count > 0 || genpd->prepared_count > 0) | ||
255 | return 0; | 307 | return 0; |
256 | 308 | ||
257 | if (genpd->sd_count > 0) | 309 | if (atomic_read(&genpd->sd_count) > 0) |
258 | return -EBUSY; | 310 | return -EBUSY; |
259 | 311 | ||
260 | not_suspended = 0; | 312 | not_suspended = 0; |
261 | list_for_each_entry(dle, &genpd->dev_list, node) | 313 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
262 | if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) | 314 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
315 | || pdd->dev->power.irq_safe)) | ||
263 | not_suspended++; | 316 | not_suspended++; |
264 | 317 | ||
265 | if (not_suspended > genpd->in_progress) | 318 | if (not_suspended > genpd->in_progress) |
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
282 | genpd->status = GPD_STATE_BUSY; | 335 | genpd->status = GPD_STATE_BUSY; |
283 | genpd->poweroff_task = current; | 336 | genpd->poweroff_task = current; |
284 | 337 | ||
285 | list_for_each_entry_reverse(dle, &genpd->dev_list, node) { | 338 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { |
286 | ret = __pm_genpd_save_device(dle, genpd); | 339 | ret = atomic_read(&genpd->sd_count) == 0 ? |
340 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; | ||
341 | |||
342 | if (genpd_abort_poweroff(genpd)) | ||
343 | goto out; | ||
344 | |||
287 | if (ret) { | 345 | if (ret) { |
288 | genpd_set_active(genpd); | 346 | genpd_set_active(genpd); |
289 | goto out; | 347 | goto out; |
290 | } | 348 | } |
291 | 349 | ||
292 | if (genpd_abort_poweroff(genpd)) | ||
293 | goto out; | ||
294 | |||
295 | if (genpd->status == GPD_STATE_REPEAT) { | 350 | if (genpd->status == GPD_STATE_REPEAT) { |
296 | genpd->poweroff_task = NULL; | 351 | genpd->poweroff_task = NULL; |
297 | goto start; | 352 | goto start; |
298 | } | 353 | } |
299 | } | 354 | } |
300 | 355 | ||
301 | parent = genpd->parent; | 356 | if (genpd->power_off) { |
302 | if (parent) { | 357 | if (atomic_read(&genpd->sd_count) > 0) { |
303 | mutex_unlock(&genpd->lock); | 358 | ret = -EBUSY; |
304 | |||
305 | genpd_acquire_lock(parent); | ||
306 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
307 | |||
308 | if (genpd_abort_poweroff(genpd)) { | ||
309 | genpd_release_lock(parent); | ||
310 | goto out; | 359 | goto out; |
311 | } | 360 | } |
312 | } | ||
313 | 361 | ||
314 | if (genpd->power_off) { | 362 | /* |
363 | * If sd_count > 0 at this point, one of the subdomains hasn't | ||
364 | * managed to call pm_genpd_poweron() for the master yet after | ||
365 | * incrementing it. In that case pm_genpd_poweron() will wait | ||
366 | * for us to drop the lock, so we can call .power_off() and let | ||
367 | * the pm_genpd_poweron() restore power for us (this shouldn't | ||
368 | * happen very often). | ||
369 | */ | ||
315 | ret = genpd->power_off(genpd); | 370 | ret = genpd->power_off(genpd); |
316 | if (ret == -EBUSY) { | 371 | if (ret == -EBUSY) { |
317 | genpd_set_active(genpd); | 372 | genpd_set_active(genpd); |
318 | if (parent) | ||
319 | genpd_release_lock(parent); | ||
320 | |||
321 | goto out; | 373 | goto out; |
322 | } | 374 | } |
323 | } | 375 | } |
324 | 376 | ||
325 | genpd->status = GPD_STATE_POWER_OFF; | 377 | genpd->status = GPD_STATE_POWER_OFF; |
326 | 378 | ||
327 | if (parent) { | 379 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
328 | genpd_sd_counter_dec(parent); | 380 | genpd_sd_counter_dec(link->master); |
329 | if (parent->sd_count == 0) | 381 | genpd_queue_power_off_work(link->master); |
330 | genpd_queue_power_off_work(parent); | ||
331 | |||
332 | genpd_release_lock(parent); | ||
333 | } | 382 | } |
334 | 383 | ||
335 | out: | 384 | out: |
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
371 | if (IS_ERR(genpd)) | 420 | if (IS_ERR(genpd)) |
372 | return -EINVAL; | 421 | return -EINVAL; |
373 | 422 | ||
423 | might_sleep_if(!genpd->dev_irq_safe); | ||
424 | |||
374 | if (genpd->stop_device) { | 425 | if (genpd->stop_device) { |
375 | int ret = genpd->stop_device(dev); | 426 | int ret = genpd->stop_device(dev); |
376 | if (ret) | 427 | if (ret) |
377 | return ret; | 428 | return ret; |
378 | } | 429 | } |
379 | 430 | ||
431 | /* | ||
432 | * If power.irq_safe is set, this routine will be run with interrupts | ||
433 | * off, so it can't use mutexes. | ||
434 | */ | ||
435 | if (dev->power.irq_safe) | ||
436 | return 0; | ||
437 | |||
380 | mutex_lock(&genpd->lock); | 438 | mutex_lock(&genpd->lock); |
381 | genpd->in_progress++; | 439 | genpd->in_progress++; |
382 | pm_genpd_poweroff(genpd); | 440 | pm_genpd_poweroff(genpd); |
@@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
387 | } | 445 | } |
388 | 446 | ||
389 | /** | 447 | /** |
390 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
391 | * @dev: Device to resume. | ||
392 | * @genpd: PM domain the device belongs to. | ||
393 | */ | ||
394 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
395 | struct generic_pm_domain *genpd) | ||
396 | { | ||
397 | struct dev_list_entry *dle; | ||
398 | |||
399 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
400 | if (dle->dev == dev) { | ||
401 | __pm_genpd_restore_device(dle, genpd); | ||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 448 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
409 | * @dev: Device to resume. | 449 | * @dev: Device to resume. |
410 | * | 450 | * |
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
424 | if (IS_ERR(genpd)) | 464 | if (IS_ERR(genpd)) |
425 | return -EINVAL; | 465 | return -EINVAL; |
426 | 466 | ||
427 | ret = pm_genpd_poweron(genpd); | 467 | might_sleep_if(!genpd->dev_irq_safe); |
428 | if (ret) | 468 | |
429 | return ret; | 469 | /* If power.irq_safe, the PM domain is never powered off. */ |
470 | if (dev->power.irq_safe) | ||
471 | goto out; | ||
430 | 472 | ||
431 | mutex_lock(&genpd->lock); | 473 | mutex_lock(&genpd->lock); |
474 | ret = __pm_genpd_poweron(genpd); | ||
475 | if (ret) { | ||
476 | mutex_unlock(&genpd->lock); | ||
477 | return ret; | ||
478 | } | ||
432 | genpd->status = GPD_STATE_BUSY; | 479 | genpd->status = GPD_STATE_BUSY; |
433 | genpd->resume_count++; | 480 | genpd->resume_count++; |
434 | for (;;) { | 481 | for (;;) { |
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
448 | mutex_lock(&genpd->lock); | 495 | mutex_lock(&genpd->lock); |
449 | } | 496 | } |
450 | finish_wait(&genpd->status_wait_queue, &wait); | 497 | finish_wait(&genpd->status_wait_queue, &wait); |
451 | __pm_genpd_runtime_resume(dev, genpd); | 498 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); |
452 | genpd->resume_count--; | 499 | genpd->resume_count--; |
453 | genpd_set_active(genpd); | 500 | genpd_set_active(genpd); |
454 | wake_up_all(&genpd->status_wait_queue); | 501 | wake_up_all(&genpd->status_wait_queue); |
455 | mutex_unlock(&genpd->lock); | 502 | mutex_unlock(&genpd->lock); |
456 | 503 | ||
504 | out: | ||
457 | if (genpd->start_device) | 505 | if (genpd->start_device) |
458 | genpd->start_device(dev); | 506 | genpd->start_device(dev); |
459 | 507 | ||
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void) | |||
478 | #else | 526 | #else |
479 | 527 | ||
480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 528 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
481 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
482 | struct generic_pm_domain *genpd) {} | ||
483 | 529 | ||
484 | #define pm_genpd_runtime_suspend NULL | 530 | #define pm_genpd_runtime_suspend NULL |
485 | #define pm_genpd_runtime_resume NULL | 531 | #define pm_genpd_runtime_resume NULL |
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
489 | #ifdef CONFIG_PM_SLEEP | 535 | #ifdef CONFIG_PM_SLEEP |
490 | 536 | ||
491 | /** | 537 | /** |
492 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. | 538 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. |
493 | * @genpd: PM domain to power off, if possible. | 539 | * @genpd: PM domain to power off, if possible. |
494 | * | 540 | * |
495 | * Check if the given PM domain can be powered off (during system suspend or | 541 | * Check if the given PM domain can be powered off (during system suspend or |
496 | * hibernation) and do that if so. Also, in that case propagate to its parent. | 542 | * hibernation) and do that if so. Also, in that case propagate to its masters. |
497 | * | 543 | * |
498 | * This function is only called in "noirq" stages of system power transitions, | 544 | * This function is only called in "noirq" stages of system power transitions, |
499 | * so it need not acquire locks (all of the "noirq" callbacks are executed | 545 | * so it need not acquire locks (all of the "noirq" callbacks are executed |
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
501 | */ | 547 | */ |
502 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 548 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
503 | { | 549 | { |
504 | struct generic_pm_domain *parent = genpd->parent; | 550 | struct gpd_link *link; |
505 | 551 | ||
506 | if (genpd->status == GPD_STATE_POWER_OFF) | 552 | if (genpd->status == GPD_STATE_POWER_OFF) |
507 | return; | 553 | return; |
508 | 554 | ||
509 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) | 555 | if (genpd->suspended_count != genpd->device_count |
556 | || atomic_read(&genpd->sd_count) > 0) | ||
510 | return; | 557 | return; |
511 | 558 | ||
512 | if (genpd->power_off) | 559 | if (genpd->power_off) |
513 | genpd->power_off(genpd); | 560 | genpd->power_off(genpd); |
514 | 561 | ||
515 | genpd->status = GPD_STATE_POWER_OFF; | 562 | genpd->status = GPD_STATE_POWER_OFF; |
516 | if (parent) { | 563 | |
517 | genpd_sd_counter_dec(parent); | 564 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
518 | pm_genpd_sync_poweroff(parent); | 565 | genpd_sd_counter_dec(link->master); |
566 | pm_genpd_sync_poweroff(link->master); | ||
519 | } | 567 | } |
520 | } | 568 | } |
521 | 569 | ||
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev) | |||
1034 | */ | 1082 | */ |
1035 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | 1083 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) |
1036 | { | 1084 | { |
1037 | struct dev_list_entry *dle; | 1085 | struct generic_pm_domain_data *gpd_data; |
1086 | struct pm_domain_data *pdd; | ||
1038 | int ret = 0; | 1087 | int ret = 0; |
1039 | 1088 | ||
1040 | dev_dbg(dev, "%s()\n", __func__); | 1089 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
1054 | goto out; | 1103 | goto out; |
1055 | } | 1104 | } |
1056 | 1105 | ||
1057 | list_for_each_entry(dle, &genpd->dev_list, node) | 1106 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
1058 | if (dle->dev == dev) { | 1107 | if (pdd->dev == dev) { |
1059 | ret = -EINVAL; | 1108 | ret = -EINVAL; |
1060 | goto out; | 1109 | goto out; |
1061 | } | 1110 | } |
1062 | 1111 | ||
1063 | dle = kzalloc(sizeof(*dle), GFP_KERNEL); | 1112 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); |
1064 | if (!dle) { | 1113 | if (!gpd_data) { |
1065 | ret = -ENOMEM; | 1114 | ret = -ENOMEM; |
1066 | goto out; | 1115 | goto out; |
1067 | } | 1116 | } |
1068 | 1117 | ||
1069 | dle->dev = dev; | ||
1070 | dle->need_restore = false; | ||
1071 | list_add_tail(&dle->node, &genpd->dev_list); | ||
1072 | genpd->device_count++; | 1118 | genpd->device_count++; |
1073 | 1119 | ||
1074 | spin_lock_irq(&dev->power.lock); | ||
1075 | dev->pm_domain = &genpd->domain; | 1120 | dev->pm_domain = &genpd->domain; |
1076 | spin_unlock_irq(&dev->power.lock); | 1121 | dev_pm_get_subsys_data(dev); |
1122 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
1123 | gpd_data->base.dev = dev; | ||
1124 | gpd_data->need_restore = false; | ||
1125 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | ||
1077 | 1126 | ||
1078 | out: | 1127 | out: |
1079 | genpd_release_lock(genpd); | 1128 | genpd_release_lock(genpd); |
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
1089 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 1138 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
1090 | struct device *dev) | 1139 | struct device *dev) |
1091 | { | 1140 | { |
1092 | struct dev_list_entry *dle; | 1141 | struct pm_domain_data *pdd; |
1093 | int ret = -EINVAL; | 1142 | int ret = -EINVAL; |
1094 | 1143 | ||
1095 | dev_dbg(dev, "%s()\n", __func__); | 1144 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1104 | goto out; | 1153 | goto out; |
1105 | } | 1154 | } |
1106 | 1155 | ||
1107 | list_for_each_entry(dle, &genpd->dev_list, node) { | 1156 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
1108 | if (dle->dev != dev) | 1157 | if (pdd->dev != dev) |
1109 | continue; | 1158 | continue; |
1110 | 1159 | ||
1111 | spin_lock_irq(&dev->power.lock); | 1160 | list_del_init(&pdd->list_node); |
1161 | pdd->dev = NULL; | ||
1162 | dev_pm_put_subsys_data(dev); | ||
1112 | dev->pm_domain = NULL; | 1163 | dev->pm_domain = NULL; |
1113 | spin_unlock_irq(&dev->power.lock); | 1164 | kfree(to_gpd_data(pdd)); |
1114 | 1165 | ||
1115 | genpd->device_count--; | 1166 | genpd->device_count--; |
1116 | list_del(&dle->node); | ||
1117 | kfree(dle); | ||
1118 | 1167 | ||
1119 | ret = 0; | 1168 | ret = 0; |
1120 | break; | 1169 | break; |
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1129 | /** | 1178 | /** |
1130 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1179 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
1131 | * @genpd: Master PM domain to add the subdomain to. | 1180 | * @genpd: Master PM domain to add the subdomain to. |
1132 | * @new_subdomain: Subdomain to be added. | 1181 | * @subdomain: Subdomain to be added. |
1133 | */ | 1182 | */ |
1134 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 1183 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
1135 | struct generic_pm_domain *new_subdomain) | 1184 | struct generic_pm_domain *subdomain) |
1136 | { | 1185 | { |
1137 | struct generic_pm_domain *subdomain; | 1186 | struct gpd_link *link; |
1138 | int ret = 0; | 1187 | int ret = 0; |
1139 | 1188 | ||
1140 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) | 1189 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1141 | return -EINVAL; | 1190 | return -EINVAL; |
1142 | 1191 | ||
1143 | start: | 1192 | start: |
1144 | genpd_acquire_lock(genpd); | 1193 | genpd_acquire_lock(genpd); |
1145 | mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); | 1194 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
1146 | 1195 | ||
1147 | if (new_subdomain->status != GPD_STATE_POWER_OFF | 1196 | if (subdomain->status != GPD_STATE_POWER_OFF |
1148 | && new_subdomain->status != GPD_STATE_ACTIVE) { | 1197 | && subdomain->status != GPD_STATE_ACTIVE) { |
1149 | mutex_unlock(&new_subdomain->lock); | 1198 | mutex_unlock(&subdomain->lock); |
1150 | genpd_release_lock(genpd); | 1199 | genpd_release_lock(genpd); |
1151 | goto start; | 1200 | goto start; |
1152 | } | 1201 | } |
1153 | 1202 | ||
1154 | if (genpd->status == GPD_STATE_POWER_OFF | 1203 | if (genpd->status == GPD_STATE_POWER_OFF |
1155 | && new_subdomain->status != GPD_STATE_POWER_OFF) { | 1204 | && subdomain->status != GPD_STATE_POWER_OFF) { |
1156 | ret = -EINVAL; | 1205 | ret = -EINVAL; |
1157 | goto out; | 1206 | goto out; |
1158 | } | 1207 | } |
1159 | 1208 | ||
1160 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1209 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
1161 | if (subdomain == new_subdomain) { | 1210 | if (link->slave == subdomain && link->master == genpd) { |
1162 | ret = -EINVAL; | 1211 | ret = -EINVAL; |
1163 | goto out; | 1212 | goto out; |
1164 | } | 1213 | } |
1165 | } | 1214 | } |
1166 | 1215 | ||
1167 | list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); | 1216 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
1168 | new_subdomain->parent = genpd; | 1217 | if (!link) { |
1218 | ret = -ENOMEM; | ||
1219 | goto out; | ||
1220 | } | ||
1221 | link->master = genpd; | ||
1222 | list_add_tail(&link->master_node, &genpd->master_links); | ||
1223 | link->slave = subdomain; | ||
1224 | list_add_tail(&link->slave_node, &subdomain->slave_links); | ||
1169 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1225 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1170 | genpd->sd_count++; | 1226 | genpd_sd_counter_inc(genpd); |
1171 | 1227 | ||
1172 | out: | 1228 | out: |
1173 | mutex_unlock(&new_subdomain->lock); | 1229 | mutex_unlock(&subdomain->lock); |
1174 | genpd_release_lock(genpd); | 1230 | genpd_release_lock(genpd); |
1175 | 1231 | ||
1176 | return ret; | 1232 | return ret; |
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
1179 | /** | 1235 | /** |
1180 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1236 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
1181 | * @genpd: Master PM domain to remove the subdomain from. | 1237 | * @genpd: Master PM domain to remove the subdomain from. |
1182 | * @target: Subdomain to be removed. | 1238 | * @subdomain: Subdomain to be removed. |
1183 | */ | 1239 | */ |
1184 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 1240 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
1185 | struct generic_pm_domain *target) | 1241 | struct generic_pm_domain *subdomain) |
1186 | { | 1242 | { |
1187 | struct generic_pm_domain *subdomain; | 1243 | struct gpd_link *link; |
1188 | int ret = -EINVAL; | 1244 | int ret = -EINVAL; |
1189 | 1245 | ||
1190 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) | 1246 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1191 | return -EINVAL; | 1247 | return -EINVAL; |
1192 | 1248 | ||
1193 | start: | 1249 | start: |
1194 | genpd_acquire_lock(genpd); | 1250 | genpd_acquire_lock(genpd); |
1195 | 1251 | ||
1196 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1252 | list_for_each_entry(link, &genpd->master_links, master_node) { |
1197 | if (subdomain != target) | 1253 | if (link->slave != subdomain) |
1198 | continue; | 1254 | continue; |
1199 | 1255 | ||
1200 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1256 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
1206 | goto start; | 1262 | goto start; |
1207 | } | 1263 | } |
1208 | 1264 | ||
1209 | list_del(&subdomain->sd_node); | 1265 | list_del(&link->master_node); |
1210 | subdomain->parent = NULL; | 1266 | list_del(&link->slave_node); |
1267 | kfree(link); | ||
1211 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1268 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1212 | genpd_sd_counter_dec(genpd); | 1269 | genpd_sd_counter_dec(genpd); |
1213 | 1270 | ||
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1234 | if (IS_ERR_OR_NULL(genpd)) | 1291 | if (IS_ERR_OR_NULL(genpd)) |
1235 | return; | 1292 | return; |
1236 | 1293 | ||
1237 | INIT_LIST_HEAD(&genpd->sd_node); | 1294 | INIT_LIST_HEAD(&genpd->master_links); |
1238 | genpd->parent = NULL; | 1295 | INIT_LIST_HEAD(&genpd->slave_links); |
1239 | INIT_LIST_HEAD(&genpd->dev_list); | 1296 | INIT_LIST_HEAD(&genpd->dev_list); |
1240 | INIT_LIST_HEAD(&genpd->sd_list); | ||
1241 | mutex_init(&genpd->lock); | 1297 | mutex_init(&genpd->lock); |
1242 | genpd->gov = gov; | 1298 | genpd->gov = gov; |
1243 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1299 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
1244 | genpd->in_progress = 0; | 1300 | genpd->in_progress = 0; |
1245 | genpd->sd_count = 0; | 1301 | atomic_set(&genpd->sd_count, 0); |
1246 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1302 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
1247 | init_waitqueue_head(&genpd->status_wait_queue); | 1303 | init_waitqueue_head(&genpd->status_wait_queue); |
1248 | genpd->poweroff_task = NULL; | 1304 | genpd->poweroff_task = NULL; |