diff options
author | Juan Gutierrez <jgutierrez@ti.com> | 2011-09-06 02:30:16 -0400 |
---|---|---|
committer | Ohad Ben-Cohen <ohad@wizery.com> | 2011-09-21 12:45:32 -0400 |
commit | 93b465c2e186d96fb90012ba0f9372eb9952e732 (patch) | |
tree | 3781c5443068f2fc79c2bb70c8793075b608d1f0 /drivers/hwspinlock | |
parent | c3c1250e93a7ab1327a9fc49d2a22405672f4204 (diff) |
hwspinlock/core: use a mutex to protect the radix tree
Since we're using non-atomic radix tree allocations, we
should be protecting the tree using a mutex and not a
spinlock.
Non-atomic allocations and process context locking is good enough,
as the tree is manipulated only when locks are registered/
unregistered/requested/freed.
The locks themselves are still protected by spinlocks of course,
and mutexes are not involved in the locking/unlocking paths.
Cc: <stable@kernel.org>
Signed-off-by: Juan Gutierrez <jgutierrez@ti.com>
[ohad@wizery.com: rewrite the commit log, #include mutex.h, add minor
commentary]
[ohad@wizery.com: update register/unregister parts in hwspinlock.txt]
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Diffstat (limited to 'drivers/hwspinlock')
-rw-r--r-- | drivers/hwspinlock/hwspinlock_core.c | 45 |
1 files changed, 20 insertions, 25 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 4eb85b4a320e..0d20b82df0a7 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/radix-tree.h> | 26 | #include <linux/radix-tree.h> |
27 | #include <linux/hwspinlock.h> | 27 | #include <linux/hwspinlock.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/mutex.h> | ||
29 | 30 | ||
30 | #include "hwspinlock_internal.h" | 31 | #include "hwspinlock_internal.h" |
31 | 32 | ||
@@ -52,10 +53,12 @@ | |||
52 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); | 53 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); |
53 | 54 | ||
54 | /* | 55 | /* |
55 | * Synchronization of access to the tree is achieved using this spinlock, | 56 | * Synchronization of access to the tree is achieved using this mutex, |
56 | * as the radix-tree API requires that users provide all synchronisation. | 57 | * as the radix-tree API requires that users provide all synchronisation. |
58 | * A mutex is needed because we're using non-atomic radix tree allocations. | ||
57 | */ | 59 | */ |
58 | static DEFINE_SPINLOCK(hwspinlock_tree_lock); | 60 | static DEFINE_MUTEX(hwspinlock_tree_lock); |
61 | |||
59 | 62 | ||
60 | /** | 63 | /** |
61 | * __hwspin_trylock() - attempt to lock a specific hwspinlock | 64 | * __hwspin_trylock() - attempt to lock a specific hwspinlock |
@@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock); | |||
261 | * This function should be called from the underlying platform-specific | 264 | * This function should be called from the underlying platform-specific |
262 | * implementation, to register a new hwspinlock instance. | 265 | * implementation, to register a new hwspinlock instance. |
263 | * | 266 | * |
264 | * Can be called from an atomic context (will not sleep) but not from | 267 | * Should be called from a process context (might sleep) |
265 | * within interrupt context. | ||
266 | * | 268 | * |
267 | * Returns 0 on success, or an appropriate error code on failure | 269 | * Returns 0 on success, or an appropriate error code on failure |
268 | */ | 270 | */ |
@@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock) | |||
279 | 281 | ||
280 | spin_lock_init(&hwlock->lock); | 282 | spin_lock_init(&hwlock->lock); |
281 | 283 | ||
282 | spin_lock(&hwspinlock_tree_lock); | 284 | mutex_lock(&hwspinlock_tree_lock); |
283 | 285 | ||
284 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); | 286 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); |
285 | if (ret == -EEXIST) | 287 | if (ret == -EEXIST) |
@@ -295,7 +297,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock) | |||
295 | WARN_ON(tmp != hwlock); | 297 | WARN_ON(tmp != hwlock); |
296 | 298 | ||
297 | out: | 299 | out: |
298 | spin_unlock(&hwspinlock_tree_lock); | 300 | mutex_unlock(&hwspinlock_tree_lock); |
299 | return ret; | 301 | return ret; |
300 | } | 302 | } |
301 | EXPORT_SYMBOL_GPL(hwspin_lock_register); | 303 | EXPORT_SYMBOL_GPL(hwspin_lock_register); |
@@ -307,8 +309,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register); | |||
307 | * This function should be called from the underlying platform-specific | 309 | * This function should be called from the underlying platform-specific |
308 | * implementation, to unregister an existing (and unused) hwspinlock. | 310 | * implementation, to unregister an existing (and unused) hwspinlock. |
309 | * | 311 | * |
310 | * Can be called from an atomic context (will not sleep) but not from | 312 | * Should be called from a process context (might sleep) |
311 | * within interrupt context. | ||
312 | * | 313 | * |
313 | * Returns the address of hwspinlock @id on success, or NULL on failure | 314 | * Returns the address of hwspinlock @id on success, or NULL on failure |
314 | */ | 315 | */ |
@@ -317,7 +318,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id) | |||
317 | struct hwspinlock *hwlock = NULL; | 318 | struct hwspinlock *hwlock = NULL; |
318 | int ret; | 319 | int ret; |
319 | 320 | ||
320 | spin_lock(&hwspinlock_tree_lock); | 321 | mutex_lock(&hwspinlock_tree_lock); |
321 | 322 | ||
322 | /* make sure the hwspinlock is not in use (tag is set) */ | 323 | /* make sure the hwspinlock is not in use (tag is set) */ |
323 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | 324 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
@@ -333,7 +334,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id) | |||
333 | } | 334 | } |
334 | 335 | ||
335 | out: | 336 | out: |
336 | spin_unlock(&hwspinlock_tree_lock); | 337 | mutex_unlock(&hwspinlock_tree_lock); |
337 | return hwlock; | 338 | return hwlock; |
338 | } | 339 | } |
339 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); | 340 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
@@ -402,9 +403,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id); | |||
402 | * to the remote core before it can be used for synchronization (to get the | 403 | * to the remote core before it can be used for synchronization (to get the |
403 | * id of a given hwlock, use hwspin_lock_get_id()). | 404 | * id of a given hwlock, use hwspin_lock_get_id()). |
404 | * | 405 | * |
405 | * Can be called from an atomic context (will not sleep) but not from | 406 | * Should be called from a process context (might sleep) |
406 | * within interrupt context (simply because there is no use case for | ||
407 | * that yet). | ||
408 | * | 407 | * |
409 | * Returns the address of the assigned hwspinlock, or NULL on error | 408 | * Returns the address of the assigned hwspinlock, or NULL on error |
410 | */ | 409 | */ |
@@ -413,7 +412,7 @@ struct hwspinlock *hwspin_lock_request(void) | |||
413 | struct hwspinlock *hwlock; | 412 | struct hwspinlock *hwlock; |
414 | int ret; | 413 | int ret; |
415 | 414 | ||
416 | spin_lock(&hwspinlock_tree_lock); | 415 | mutex_lock(&hwspinlock_tree_lock); |
417 | 416 | ||
418 | /* look for an unused lock */ | 417 | /* look for an unused lock */ |
419 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, | 418 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, |
@@ -433,7 +432,7 @@ struct hwspinlock *hwspin_lock_request(void) | |||
433 | hwlock = NULL; | 432 | hwlock = NULL; |
434 | 433 | ||
435 | out: | 434 | out: |
436 | spin_unlock(&hwspinlock_tree_lock); | 435 | mutex_unlock(&hwspinlock_tree_lock); |
437 | return hwlock; | 436 | return hwlock; |
438 | } | 437 | } |
439 | EXPORT_SYMBOL_GPL(hwspin_lock_request); | 438 | EXPORT_SYMBOL_GPL(hwspin_lock_request); |
@@ -447,9 +446,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request); | |||
447 | * Usually early board code will be calling this function in order to | 446 | * Usually early board code will be calling this function in order to |
448 | * reserve specific hwspinlock ids for predefined purposes. | 447 | * reserve specific hwspinlock ids for predefined purposes. |
449 | * | 448 | * |
450 | * Can be called from an atomic context (will not sleep) but not from | 449 | * Should be called from a process context (might sleep) |
451 | * within interrupt context (simply because there is no use case for | ||
452 | * that yet). | ||
453 | * | 450 | * |
454 | * Returns the address of the assigned hwspinlock, or NULL on error | 451 | * Returns the address of the assigned hwspinlock, or NULL on error |
455 | */ | 452 | */ |
@@ -458,7 +455,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | |||
458 | struct hwspinlock *hwlock; | 455 | struct hwspinlock *hwlock; |
459 | int ret; | 456 | int ret; |
460 | 457 | ||
461 | spin_lock(&hwspinlock_tree_lock); | 458 | mutex_lock(&hwspinlock_tree_lock); |
462 | 459 | ||
463 | /* make sure this hwspinlock exists */ | 460 | /* make sure this hwspinlock exists */ |
464 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); | 461 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); |
@@ -484,7 +481,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | |||
484 | hwlock = NULL; | 481 | hwlock = NULL; |
485 | 482 | ||
486 | out: | 483 | out: |
487 | spin_unlock(&hwspinlock_tree_lock); | 484 | mutex_unlock(&hwspinlock_tree_lock); |
488 | return hwlock; | 485 | return hwlock; |
489 | } | 486 | } |
490 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | 487 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); |
@@ -497,9 +494,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | |||
497 | * Should only be called with an @hwlock that was retrieved from | 494 | * Should only be called with an @hwlock that was retrieved from |
498 | * an earlier call to omap_hwspin_lock_request{_specific}. | 495 | * an earlier call to omap_hwspin_lock_request{_specific}. |
499 | * | 496 | * |
500 | * Can be called from an atomic context (will not sleep) but not from | 497 | * Should be called from a process context (might sleep) |
501 | * within interrupt context (simply because there is no use case for | ||
502 | * that yet). | ||
503 | * | 498 | * |
504 | * Returns 0 on success, or an appropriate error code on failure | 499 | * Returns 0 on success, or an appropriate error code on failure |
505 | */ | 500 | */ |
@@ -513,7 +508,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock) | |||
513 | return -EINVAL; | 508 | return -EINVAL; |
514 | } | 509 | } |
515 | 510 | ||
516 | spin_lock(&hwspinlock_tree_lock); | 511 | mutex_lock(&hwspinlock_tree_lock); |
517 | 512 | ||
518 | /* make sure the hwspinlock is used */ | 513 | /* make sure the hwspinlock is used */ |
519 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, | 514 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, |
@@ -540,7 +535,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock) | |||
540 | module_put(hwlock->dev->driver->owner); | 535 | module_put(hwlock->dev->driver->owner); |
541 | 536 | ||
542 | out: | 537 | out: |
543 | spin_unlock(&hwspinlock_tree_lock); | 538 | mutex_unlock(&hwspinlock_tree_lock); |
544 | return ret; | 539 | return ret; |
545 | } | 540 | } |
546 | EXPORT_SYMBOL_GPL(hwspin_lock_free); | 541 | EXPORT_SYMBOL_GPL(hwspin_lock_free); |