diff options
author | Jean Pihet <jean.pihet@newoldbits.com> | 2012-09-19 08:17:20 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-19 08:17:20 -0400 |
commit | fc2fb3a075c206927d3fbad251dae82ba82ccf2d (patch) | |
tree | 12306571287f05058831654bbe516e54e0e76546 | |
parent | c6a57bfffea5b673e5b4f9aeff85a00607e59077 (diff) |
PM QoS: Use spinlock in the per-device PM QoS constraints code
The per-device PM QoS locking requires a spinlock to be used. The reasons
are:
- an alignement with the PM QoS core code, which is used by the per-device
PM QoS code for the constraints lists management. The PM QoS core code
uses spinlocks to protect the constraints lists,
- some drivers need to use the per-device PM QoS functionality from
interrupt context or spinlock protected context.
An example of such a driver is the OMAP HSI (high-speed synchronous serial
interface) driver which needs to control the IP block idle state
depending on the FIFO empty state, from interrupt context.
Reported-by: Djamil Elaidi <d-elaidi@ti.com>
Signed-off-by: Jean Pihet <j-pihet@ti.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
-rw-r--r-- | drivers/base/power/qos.c | 67 |
1 files changed, 41 insertions, 26 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 74a67e0019a2..968a77145e81 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -24,26 +24,32 @@ | |||
24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier | 24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier |
25 | * API. The notification chain data is stored in a static variable. | 25 | * API. The notification chain data is stored in a static variable. |
26 | * | 26 | * |
27 | * Note about the per-device constraint data struct allocation: | 27 | * Notes about the per-device constraint data struct allocation: |
28 | * . The per-device constraints data struct ptr is tored into the device | 28 | * . The per-device constraints data struct ptr is stored into the device |
29 | * dev_pm_info. | 29 | * dev_pm_info. |
30 | * . To minimize the data usage by the per-device constraints, the data struct | 30 | * . To minimize the data usage by the per-device constraints, the data struct |
31 | * is only allocated at the first call to dev_pm_qos_add_request. | 31 | * is only allocated at the first call to dev_pm_qos_add_request. |
32 | * . The data is later free'd when the device is removed from the system. | 32 | * . The data is later free'd when the device is removed from the system. |
33 | * . A global mutex protects the constraints users from the data being | 33 | * |
34 | * allocated and free'd. | 34 | * Notes about locking: |
35 | * . The dev->power.lock lock protects the constraints list | ||
36 | * (dev->power.constraints) allocation and free, as triggered by the | ||
37 | * driver core code at device insertion and removal, | ||
38 | * . A global lock dev_pm_qos_lock protects the constraints list entries | ||
39 | * from any modification and the notifiers registration and unregistration. | ||
40 | * . For both locks a spinlock is needed since this code can be called from | ||
41 | * interrupt context or spinlock protected context. | ||
35 | */ | 42 | */ |
36 | 43 | ||
37 | #include <linux/pm_qos.h> | 44 | #include <linux/pm_qos.h> |
38 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
39 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
40 | #include <linux/device.h> | 47 | #include <linux/device.h> |
41 | #include <linux/mutex.h> | ||
42 | #include <linux/export.h> | 48 | #include <linux/export.h> |
43 | 49 | ||
44 | #include "power.h" | 50 | #include "power.h" |
45 | 51 | ||
46 | static DEFINE_MUTEX(dev_pm_qos_mtx); | 52 | static DEFINE_SPINLOCK(dev_pm_qos_lock); |
47 | 53 | ||
48 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 54 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
49 | 55 | ||
@@ -110,18 +116,19 @@ static int apply_constraint(struct dev_pm_qos_request *req, | |||
110 | * @dev: device to allocate data for | 116 | * @dev: device to allocate data for |
111 | * | 117 | * |
112 | * Called at the first call to add_request, for constraint data allocation | 118 | * Called at the first call to add_request, for constraint data allocation |
113 | * Must be called with the dev_pm_qos_mtx mutex held | 119 | * Must be called with the dev_pm_qos_lock lock held |
114 | */ | 120 | */ |
115 | static int dev_pm_qos_constraints_allocate(struct device *dev) | 121 | static int dev_pm_qos_constraints_allocate(struct device *dev) |
116 | { | 122 | { |
117 | struct pm_qos_constraints *c; | 123 | struct pm_qos_constraints *c; |
118 | struct blocking_notifier_head *n; | 124 | struct blocking_notifier_head *n; |
125 | unsigned long flags; | ||
119 | 126 | ||
120 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 127 | c = kzalloc(sizeof(*c), GFP_ATOMIC); |
121 | if (!c) | 128 | if (!c) |
122 | return -ENOMEM; | 129 | return -ENOMEM; |
123 | 130 | ||
124 | n = kzalloc(sizeof(*n), GFP_KERNEL); | 131 | n = kzalloc(sizeof(*n), GFP_ATOMIC); |
125 | if (!n) { | 132 | if (!n) { |
126 | kfree(c); | 133 | kfree(c); |
127 | return -ENOMEM; | 134 | return -ENOMEM; |
@@ -134,9 +141,9 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
134 | c->type = PM_QOS_MIN; | 141 | c->type = PM_QOS_MIN; |
135 | c->notifiers = n; | 142 | c->notifiers = n; |
136 | 143 | ||
137 | spin_lock_irq(&dev->power.lock); | 144 | spin_lock_irqsave(&dev->power.lock, flags); |
138 | dev->power.constraints = c; | 145 | dev->power.constraints = c; |
139 | spin_unlock_irq(&dev->power.lock); | 146 | spin_unlock_irqrestore(&dev->power.lock, flags); |
140 | 147 | ||
141 | return 0; | 148 | return 0; |
142 | } | 149 | } |
@@ -150,10 +157,12 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
150 | */ | 157 | */ |
151 | void dev_pm_qos_constraints_init(struct device *dev) | 158 | void dev_pm_qos_constraints_init(struct device *dev) |
152 | { | 159 | { |
153 | mutex_lock(&dev_pm_qos_mtx); | 160 | unsigned long flags; |
161 | |||
162 | spin_lock_irqsave(&dev_pm_qos_lock, flags); | ||
154 | dev->power.constraints = NULL; | 163 | dev->power.constraints = NULL; |
155 | dev->power.power_state = PMSG_ON; | 164 | dev->power.power_state = PMSG_ON; |
156 | mutex_unlock(&dev_pm_qos_mtx); | 165 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
157 | } | 166 | } |
158 | 167 | ||
159 | /** | 168 | /** |
@@ -166,6 +175,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
166 | { | 175 | { |
167 | struct dev_pm_qos_request *req, *tmp; | 176 | struct dev_pm_qos_request *req, *tmp; |
168 | struct pm_qos_constraints *c; | 177 | struct pm_qos_constraints *c; |
178 | unsigned long flags; | ||
169 | 179 | ||
170 | /* | 180 | /* |
171 | * If the device's PM QoS resume latency limit has been exposed to user | 181 | * If the device's PM QoS resume latency limit has been exposed to user |
@@ -173,7 +183,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
173 | */ | 183 | */ |
174 | dev_pm_qos_hide_latency_limit(dev); | 184 | dev_pm_qos_hide_latency_limit(dev); |
175 | 185 | ||
176 | mutex_lock(&dev_pm_qos_mtx); | 186 | spin_lock_irqsave(&dev_pm_qos_lock, flags); |
177 | 187 | ||
178 | dev->power.power_state = PMSG_INVALID; | 188 | dev->power.power_state = PMSG_INVALID; |
179 | c = dev->power.constraints; | 189 | c = dev->power.constraints; |
@@ -198,7 +208,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
198 | kfree(c); | 208 | kfree(c); |
199 | 209 | ||
200 | out: | 210 | out: |
201 | mutex_unlock(&dev_pm_qos_mtx); | 211 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
202 | } | 212 | } |
203 | 213 | ||
204 | /** | 214 | /** |
@@ -223,6 +233,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
223 | s32 value) | 233 | s32 value) |
224 | { | 234 | { |
225 | int ret = 0; | 235 | int ret = 0; |
236 | unsigned long flags; | ||
226 | 237 | ||
227 | if (!dev || !req) /*guard against callers passing in null */ | 238 | if (!dev || !req) /*guard against callers passing in null */ |
228 | return -EINVAL; | 239 | return -EINVAL; |
@@ -233,7 +244,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
233 | 244 | ||
234 | req->dev = dev; | 245 | req->dev = dev; |
235 | 246 | ||
236 | mutex_lock(&dev_pm_qos_mtx); | 247 | spin_lock_irqsave(&dev_pm_qos_lock, flags); |
237 | 248 | ||
238 | if (!dev->power.constraints) { | 249 | if (!dev->power.constraints) { |
239 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | 250 | if (dev->power.power_state.event == PM_EVENT_INVALID) { |
@@ -255,7 +266,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
255 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | 266 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); |
256 | 267 | ||
257 | out: | 268 | out: |
258 | mutex_unlock(&dev_pm_qos_mtx); | 269 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
259 | 270 | ||
260 | return ret; | 271 | return ret; |
261 | } | 272 | } |
@@ -280,6 +291,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
280 | s32 new_value) | 291 | s32 new_value) |
281 | { | 292 | { |
282 | int ret = 0; | 293 | int ret = 0; |
294 | unsigned long flags; | ||
283 | 295 | ||
284 | if (!req) /*guard against callers passing in null */ | 296 | if (!req) /*guard against callers passing in null */ |
285 | return -EINVAL; | 297 | return -EINVAL; |
@@ -288,7 +300,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
288 | "%s() called for unknown object\n", __func__)) | 300 | "%s() called for unknown object\n", __func__)) |
289 | return -EINVAL; | 301 | return -EINVAL; |
290 | 302 | ||
291 | mutex_lock(&dev_pm_qos_mtx); | 303 | spin_lock_irqsave(&dev_pm_qos_lock, flags); |
292 | 304 | ||
293 | if (req->dev->power.constraints) { | 305 | if (req->dev->power.constraints) { |
294 | if (new_value != req->node.prio) | 306 | if (new_value != req->node.prio) |
@@ -299,7 +311,7 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
299 | ret = -ENODEV; | 311 | ret = -ENODEV; |
300 | } | 312 | } |
301 | 313 | ||
302 | mutex_unlock(&dev_pm_qos_mtx); | 314 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
303 | return ret; | 315 | return ret; |
304 | } | 316 | } |
305 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | 317 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); |
@@ -319,6 +331,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | |||
319 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | 331 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
320 | { | 332 | { |
321 | int ret = 0; | 333 | int ret = 0; |
334 | unsigned long flags; | ||
322 | 335 | ||
323 | if (!req) /*guard against callers passing in null */ | 336 | if (!req) /*guard against callers passing in null */ |
324 | return -EINVAL; | 337 | return -EINVAL; |
@@ -327,7 +340,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |||
327 | "%s() called for unknown object\n", __func__)) | 340 | "%s() called for unknown object\n", __func__)) |
328 | return -EINVAL; | 341 | return -EINVAL; |
329 | 342 | ||
330 | mutex_lock(&dev_pm_qos_mtx); | 343 | spin_lock_irqsave(&dev_pm_qos_lock, flags); |
331 | 344 | ||
332 | if (req->dev->power.constraints) { | 345 | if (req->dev->power.constraints) { |
333 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | 346 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, |
@@ -338,7 +351,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |||
338 | ret = -ENODEV; | 351 | ret = -ENODEV; |
339 | } | 352 | } |
340 | 353 | ||
341 | mutex_unlock(&dev_pm_qos_mtx); | 354 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
342 | return ret; | 355 | return ret; |
343 | } | 356 | } |
344 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | 357 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); |
@@ -359,8 +372,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | |||
359 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | 372 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) |
360 | { | 373 | { |
361 | int ret = 0; | 374 | int ret = 0; |
375 | unsigned long flags; | ||
362 | 376 | ||
363 | mutex_lock(&dev_pm_qos_mtx); | 377 | spin_lock_irqsave(&dev_pm_qos_lock, flags); |
364 | 378 | ||
365 | if (!dev->power.constraints) | 379 | if (!dev->power.constraints) |
366 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? | 380 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? |
@@ -370,7 +384,7 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | |||
370 | ret = blocking_notifier_chain_register( | 384 | ret = blocking_notifier_chain_register( |
371 | dev->power.constraints->notifiers, notifier); | 385 | dev->power.constraints->notifiers, notifier); |
372 | 386 | ||
373 | mutex_unlock(&dev_pm_qos_mtx); | 387 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
374 | return ret; | 388 | return ret; |
375 | } | 389 | } |
376 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); | 390 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); |
@@ -389,8 +403,9 @@ int dev_pm_qos_remove_notifier(struct device *dev, | |||
389 | struct notifier_block *notifier) | 403 | struct notifier_block *notifier) |
390 | { | 404 | { |
391 | int retval = 0; | 405 | int retval = 0; |
406 | unsigned long flags; | ||
392 | 407 | ||
393 | mutex_lock(&dev_pm_qos_mtx); | 408 | spin_lock_irqsave(&dev_pm_qos_lock, flags); |
394 | 409 | ||
395 | /* Silently return if the constraints object is not present. */ | 410 | /* Silently return if the constraints object is not present. */ |
396 | if (dev->power.constraints) | 411 | if (dev->power.constraints) |
@@ -398,7 +413,7 @@ int dev_pm_qos_remove_notifier(struct device *dev, | |||
398 | dev->power.constraints->notifiers, | 413 | dev->power.constraints->notifiers, |
399 | notifier); | 414 | notifier); |
400 | 415 | ||
401 | mutex_unlock(&dev_pm_qos_mtx); | 416 | spin_unlock_irqrestore(&dev_pm_qos_lock, flags); |
402 | return retval; | 417 | return retval; |
403 | } | 418 | } |
404 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); | 419 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); |