diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-24 15:41:31 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-24 15:41:31 -0400 |
commit | b9142167a2bb979b58b98ffcd928a311b55cbd9f (patch) | |
tree | ed6f8d78257469d8caea08da8ce737bfbaaaff5a /drivers/base | |
parent | 057d51a1268fe4be039db8ff0791fcfcb63a4f1b (diff) | |
parent | 8376869e51f5094e87229aa6200c43ada85c9aaf (diff) |
Merge branch 'pm-qos'
* pm-qos:
Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code"
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/qos.c | 67 |
1 files changed, 26 insertions, 41 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 968a77145e81..74a67e0019a2 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -24,32 +24,26 @@ | |||
24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier | 24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier |
25 | * API. The notification chain data is stored in a static variable. | 25 | * API. The notification chain data is stored in a static variable. |
26 | * | 26 | * |
27 | * Notes about the per-device constraint data struct allocation: | 27 | * Note about the per-device constraint data struct allocation: |
28 | * . The per-device constraints data struct ptr is stored into the device | 28 | * . The per-device constraints data struct ptr is tored into the device |
29 | * dev_pm_info. | 29 | * dev_pm_info. |
30 | * . To minimize the data usage by the per-device constraints, the data struct | 30 | * . To minimize the data usage by the per-device constraints, the data struct |
31 | * is only allocated at the first call to dev_pm_qos_add_request. | 31 | * is only allocated at the first call to dev_pm_qos_add_request. |
32 | * . The data is later free'd when the device is removed from the system. | 32 | * . The data is later free'd when the device is removed from the system. |
33 | * | 33 | * . A global mutex protects the constraints users from the data being |
34 | * Notes about locking: | 34 | * allocated and free'd. |
35 | * . The dev->power.lock lock protects the constraints list | ||
36 | * (dev->power.constraints) allocation and free, as triggered by the | ||
37 | * driver core code at device insertion and removal, | ||
38 | * . A global lock dev_pm_qos_lock protects the constraints list entries | ||
39 | * from any modification and the notifiers registration and unregistration. | ||
40 | * . For both locks a spinlock is needed since this code can be called from | ||
41 | * interrupt context or spinlock protected context. | ||
42 | */ | 35 | */ |
43 | 36 | ||
44 | #include <linux/pm_qos.h> | 37 | #include <linux/pm_qos.h> |
45 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
46 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
47 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/mutex.h> | ||
48 | #include <linux/export.h> | 42 | #include <linux/export.h> |
49 | 43 | ||
50 | #include "power.h" | 44 | #include "power.h" |
51 | 45 | ||
52 | static DEFINE_SPINLOCK(dev_pm_qos_lock); | 46 | static DEFINE_MUTEX(dev_pm_qos_mtx); |
53 | 47 | ||
54 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 48 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
55 | 49 | ||
@@ -116,19 +110,18 @@ static int apply_constraint(struct dev_pm_qos_request *req, | |||
116 | * @dev: device to allocate data for | 110 | * @dev: device to allocate data for |
117 | * | 111 | * |
118 | * Called at the first call to add_request, for constraint data allocation | 112 | * Called at the first call to add_request, for constraint data allocation |
119 | * Must be called with the dev_pm_qos_lock lock held | 113 | * Must be called with the dev_pm_qos_mtx mutex held |
120 | */ | 114 | */ |
121 | static int dev_pm_qos_constraints_allocate(struct device *dev) | 115 | static int dev_pm_qos_constraints_allocate(struct device *dev) |
122 | { | 116 | { |
123 | struct pm_qos_constraints *c; | 117 | struct pm_qos_constraints *c; |
124 | struct blocking_notifier_head *n; | 118 | struct blocking_notifier_head *n; |
125 | unsigned long flags; | ||
126 | 119 | ||
127 | c = kzalloc(sizeof(*c), GFP_ATOMIC); | 120 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
128 | if (!c) | 121 | if (!c) |
129 | return -ENOMEM; | 122 | return -ENOMEM; |
130 | 123 | ||
131 | n = kzalloc(sizeof(*n), GFP_ATOMIC); | 124 | n = kzalloc(sizeof(*n), GFP_KERNEL); |
132 | if (!n) { | 125 | if (!n) { |
133 | kfree(c); | 126 | kfree(c); |
134 | return -ENOMEM; | 127 | return -ENOMEM; |
@@ -141,9 +134,9 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
141 | c->type = PM_QOS_MIN; | 134 | c->type = PM_QOS_MIN; |
142 | c->notifiers = n; | 135 | c->notifiers = n; |
143 | 136 | ||
144 | spin_lock_irqsave(&dev->power.lock, flags); | 137 | spin_lock_irq(&dev->power.lock); |
145 | dev->power.constraints = c; | 138 | dev->power.constraints = c; |
146 | spin_unlock_irqrestore(&dev->power.lock, flags); | 139 | spin_unlock_irq(&dev->power.lock); |
147 | 140 | ||
148 | return 0; | 141 | return 0; |
149 | } | 142 | } |
@@ -157,12 +150,10 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
157 | */ | 150 | */ |
158 | void dev_pm_qos_constraints_init(struct device *dev) | 151 | void dev_pm_qos_constraints_init(struct device *dev) |
159 | { | 152 | { |
160 | unsigned long flags; | 153 | mutex_lock(&dev_pm_qos_mtx); |
161 | |||
162 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | ||
163 | dev->power.constraints = NULL; | 154 | dev->power.constraints = NULL; |
164 | dev->power.power_state = PMSG_ON; | 155 | dev->power.power_state = PMSG_ON; |
165 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 156 | mutex_unlock(&dev_pm_qos_mtx); |
166 | } | 157 | } |
167 | 158 | ||
168 | /** | 159 | /** |
@@ -175,7 +166,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
175 | { | 166 | { |
176 | struct dev_pm_qos_request *req, *tmp; | 167 | struct dev_pm_qos_request *req, *tmp; |
177 | struct pm_qos_constraints *c; | 168 | struct pm_qos_constraints *c; |
178 | unsigned long flags; | ||
179 | 169 | ||
180 | /* | 170 | /* |
181 | * If the device's PM QoS resume latency limit has been exposed to user | 171 | * If the device's PM QoS resume latency limit has been exposed to user |
@@ -183,7 +173,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
183 | */ | 173 | */ |
184 | dev_pm_qos_hide_latency_limit(dev); | 174 | dev_pm_qos_hide_latency_limit(dev); |
185 | 175 | ||
186 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | 176 | mutex_lock(&dev_pm_qos_mtx); |
187 | 177 | ||
188 | dev->power.power_state = PMSG_INVALID; | 178 | dev->power.power_state = PMSG_INVALID; |
189 | c = dev->power.constraints; | 179 | c = dev->power.constraints; |
@@ -208,7 +198,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
208 | kfree(c); | 198 | kfree(c); |
209 | 199 | ||
210 | out: | 200 | out: |
211 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 201 | mutex_unlock(&dev_pm_qos_mtx); |
212 | } | 202 | } |
213 | 203 | ||
214 | /** | 204 | /** |
@@ -233,7 +223,6 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
233 | s32 value) | 223 | s32 value) |
234 | { | 224 | { |
235 | int ret = 0; | 225 | int ret = 0; |
236 | unsigned long flags; | ||
237 | 226 | ||
238 | if (!dev || !req) /*guard against callers passing in null */ | 227 | if (!dev || !req) /*guard against callers passing in null */ |
239 | return -EINVAL; | 228 | return -EINVAL; |
@@ -244,7 +233,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
244 | 233 | ||
245 | req->dev = dev; | 234 | req->dev = dev; |
246 | 235 | ||
247 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | 236 | mutex_lock(&dev_pm_qos_mtx); |
248 | 237 | ||
249 | if (!dev->power.constraints) { | 238 | if (!dev->power.constraints) { |
250 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | 239 | if (dev->power.power_state.event == PM_EVENT_INVALID) { |
@@ -266,7 +255,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
266 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | 255 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); |
267 | 256 | ||
268 | out: | 257 | out: |
269 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 258 | mutex_unlock(&dev_pm_qos_mtx); |
270 | 259 | ||
271 | return ret; | 260 | return ret; |
272 | } | 261 | } |
@@ -291,7 +280,6 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
291 | s32 new_value) | 280 | s32 new_value) |
292 | { | 281 | { |
293 | int ret = 0; | 282 | int ret = 0; |
294 | unsigned long flags; | ||
295 | 283 | ||
296 | if (!req) /*guard against callers passing in null */ | 284 | if (!req) /*guard against callers passing in null */ |
297 | return -EINVAL; | 285 | return -EINVAL; |
@@ -300,7 +288,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
300 | "%s() called for unknown object\n", __func__)) | 288 | "%s() called for unknown object\n", __func__)) |
301 | return -EINVAL; | 289 | return -EINVAL; |
302 | 290 | ||
303 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | 291 | mutex_lock(&dev_pm_qos_mtx); |
304 | 292 | ||
305 | if (req->dev->power.constraints) { | 293 | if (req->dev->power.constraints) { |
306 | if (new_value != req->node.prio) | 294 | if (new_value != req->node.prio) |
@@ -311,7 +299,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
311 | ret = -ENODEV; | 299 | ret = -ENODEV; |
312 | } | 300 | } |
313 | 301 | ||
314 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 302 | mutex_unlock(&dev_pm_qos_mtx); |
315 | return ret; | 303 | return ret; |
316 | } | 304 | } |
317 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | 305 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); |
@@ -331,7 +319,6 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | |||
331 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | 319 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
332 | { | 320 | { |
333 | int ret = 0; | 321 | int ret = 0; |
334 | unsigned long flags; | ||
335 | 322 | ||
336 | if (!req) /*guard against callers passing in null */ | 323 | if (!req) /*guard against callers passing in null */ |
337 | return -EINVAL; | 324 | return -EINVAL; |
@@ -340,7 +327,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |||
340 | "%s() called for unknown object\n", __func__)) | 327 | "%s() called for unknown object\n", __func__)) |
341 | return -EINVAL; | 328 | return -EINVAL; |
342 | 329 | ||
343 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | 330 | mutex_lock(&dev_pm_qos_mtx); |
344 | 331 | ||
345 | if (req->dev->power.constraints) { | 332 | if (req->dev->power.constraints) { |
346 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | 333 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, |
@@ -351,7 +338,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |||
351 | ret = -ENODEV; | 338 | ret = -ENODEV; |
352 | } | 339 | } |
353 | 340 | ||
354 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 341 | mutex_unlock(&dev_pm_qos_mtx); |
355 | return ret; | 342 | return ret; |
356 | } | 343 | } |
357 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | 344 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); |
@@ -372,9 +359,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | |||
372 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | 359 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) |
373 | { | 360 | { |
374 | int ret = 0; | 361 | int ret = 0; |
375 | unsigned long flags; | ||
376 | 362 | ||
377 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | 363 | mutex_lock(&dev_pm_qos_mtx); |
378 | 364 | ||
379 | if (!dev->power.constraints) | 365 | if (!dev->power.constraints) |
380 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? | 366 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? |
@@ -384,7 +370,7 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | |||
384 | ret = blocking_notifier_chain_register( | 370 | ret = blocking_notifier_chain_register( |
385 | dev->power.constraints->notifiers, notifier); | 371 | dev->power.constraints->notifiers, notifier); |
386 | 372 | ||
387 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 373 | mutex_unlock(&dev_pm_qos_mtx); |
388 | return ret; | 374 | return ret; |
389 | } | 375 | } |
390 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); | 376 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); |
@@ -403,9 +389,8 @@ int dev_pm_qos_remove_notifier(struct device *dev, | |||
403 | struct notifier_block *notifier) | 389 | struct notifier_block *notifier) |
404 | { | 390 | { |
405 | int retval = 0; | 391 | int retval = 0; |
406 | unsigned long flags; | ||
407 | 392 | ||
408 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | 393 | mutex_lock(&dev_pm_qos_mtx); |
409 | 394 | ||
410 | /* Silently return if the constraints object is not present. */ | 395 | /* Silently return if the constraints object is not present. */ |
411 | if (dev->power.constraints) | 396 | if (dev->power.constraints) |
@@ -413,7 +398,7 @@ int dev_pm_qos_remove_notifier(struct device *dev, | |||
413 | dev->power.constraints->notifiers, | 398 | dev->power.constraints->notifiers, |
414 | notifier); | 399 | notifier); |
415 | 400 | ||
416 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); | 401 | mutex_unlock(&dev_pm_qos_mtx); |
417 | return retval; | 402 | return retval; |
418 | } | 403 | } |
419 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); | 404 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); |