aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwspinlock
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwspinlock')
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 43a62714b4f..12f7c8300c7 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -26,6 +26,7 @@
26#include <linux/radix-tree.h> 26#include <linux/radix-tree.h>
27#include <linux/hwspinlock.h> 27#include <linux/hwspinlock.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <linux/mutex.h>
29 30
30#include "hwspinlock_internal.h" 31#include "hwspinlock_internal.h"
31 32
@@ -52,10 +53,12 @@
52static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); 53static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
53 54
54/* 55/*
55 * Synchronization of access to the tree is achieved using this spinlock, 56 * Synchronization of access to the tree is achieved using this mutex,
56 * as the radix-tree API requires that users provide all synchronisation. 57 * as the radix-tree API requires that users provide all synchronisation.
58 * A mutex is needed because we're using non-atomic radix tree allocations.
57 */ 59 */
58static DEFINE_SPINLOCK(hwspinlock_tree_lock); 60static DEFINE_MUTEX(hwspinlock_tree_lock);
61
59 62
60/** 63/**
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock 64 * __hwspin_trylock() - attempt to lock a specific hwspinlock
@@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
261 * This function should be called from the underlying platform-specific 264 * This function should be called from the underlying platform-specific
262 * implementation, to register a new hwspinlock instance. 265 * implementation, to register a new hwspinlock instance.
263 * 266 *
264 * Can be called from an atomic context (will not sleep) but not from 267 * Should be called from a process context (might sleep)
265 * within interrupt context.
266 * 268 *
267 * Returns 0 on success, or an appropriate error code on failure 269 * Returns 0 on success, or an appropriate error code on failure
268 */ 270 */
@@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
279 281
280 spin_lock_init(&hwlock->lock); 282 spin_lock_init(&hwlock->lock);
281 283
282 spin_lock(&hwspinlock_tree_lock); 284 mutex_lock(&hwspinlock_tree_lock);
283 285
284 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); 286 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
285 if (ret) 287 if (ret)
@@ -293,7 +295,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
293 WARN_ON(tmp != hwlock); 295 WARN_ON(tmp != hwlock);
294 296
295out: 297out:
296 spin_unlock(&hwspinlock_tree_lock); 298 mutex_unlock(&hwspinlock_tree_lock);
297 return ret; 299 return ret;
298} 300}
299EXPORT_SYMBOL_GPL(hwspin_lock_register); 301EXPORT_SYMBOL_GPL(hwspin_lock_register);
@@ -305,8 +307,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
305 * This function should be called from the underlying platform-specific 307 * This function should be called from the underlying platform-specific
306 * implementation, to unregister an existing (and unused) hwspinlock. 308 * implementation, to unregister an existing (and unused) hwspinlock.
307 * 309 *
308 * Can be called from an atomic context (will not sleep) but not from 310 * Should be called from a process context (might sleep)
309 * within interrupt context.
310 * 311 *
311 * Returns the address of hwspinlock @id on success, or NULL on failure 312 * Returns the address of hwspinlock @id on success, or NULL on failure
312 */ 313 */
@@ -315,7 +316,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
315 struct hwspinlock *hwlock = NULL; 316 struct hwspinlock *hwlock = NULL;
316 int ret; 317 int ret;
317 318
318 spin_lock(&hwspinlock_tree_lock); 319 mutex_lock(&hwspinlock_tree_lock);
319 320
320 /* make sure the hwspinlock is not in use (tag is set) */ 321 /* make sure the hwspinlock is not in use (tag is set) */
321 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 322 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -331,7 +332,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
331 } 332 }
332 333
333out: 334out:
334 spin_unlock(&hwspinlock_tree_lock); 335 mutex_unlock(&hwspinlock_tree_lock);
335 return hwlock; 336 return hwlock;
336} 337}
337EXPORT_SYMBOL_GPL(hwspin_lock_unregister); 338EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
@@ -400,9 +401,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
400 * to the remote core before it can be used for synchronization (to get the 401 * to the remote core before it can be used for synchronization (to get the
401 * id of a given hwlock, use hwspin_lock_get_id()). 402 * id of a given hwlock, use hwspin_lock_get_id()).
402 * 403 *
403 * Can be called from an atomic context (will not sleep) but not from 404 * Should be called from a process context (might sleep)
404 * within interrupt context (simply because there is no use case for
405 * that yet).
406 * 405 *
407 * Returns the address of the assigned hwspinlock, or NULL on error 406 * Returns the address of the assigned hwspinlock, or NULL on error
408 */ 407 */
@@ -411,7 +410,7 @@ struct hwspinlock *hwspin_lock_request(void)
411 struct hwspinlock *hwlock; 410 struct hwspinlock *hwlock;
412 int ret; 411 int ret;
413 412
414 spin_lock(&hwspinlock_tree_lock); 413 mutex_lock(&hwspinlock_tree_lock);
415 414
416 /* look for an unused lock */ 415 /* look for an unused lock */
417 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, 416 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
@@ -431,7 +430,7 @@ struct hwspinlock *hwspin_lock_request(void)
431 hwlock = NULL; 430 hwlock = NULL;
432 431
433out: 432out:
434 spin_unlock(&hwspinlock_tree_lock); 433 mutex_unlock(&hwspinlock_tree_lock);
435 return hwlock; 434 return hwlock;
436} 435}
437EXPORT_SYMBOL_GPL(hwspin_lock_request); 436EXPORT_SYMBOL_GPL(hwspin_lock_request);
@@ -445,9 +444,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
445 * Usually early board code will be calling this function in order to 444 * Usually early board code will be calling this function in order to
446 * reserve specific hwspinlock ids for predefined purposes. 445 * reserve specific hwspinlock ids for predefined purposes.
447 * 446 *
448 * Can be called from an atomic context (will not sleep) but not from 447 * Should be called from a process context (might sleep)
449 * within interrupt context (simply because there is no use case for
450 * that yet).
451 * 448 *
452 * Returns the address of the assigned hwspinlock, or NULL on error 449 * Returns the address of the assigned hwspinlock, or NULL on error
453 */ 450 */
@@ -456,7 +453,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
456 struct hwspinlock *hwlock; 453 struct hwspinlock *hwlock;
457 int ret; 454 int ret;
458 455
459 spin_lock(&hwspinlock_tree_lock); 456 mutex_lock(&hwspinlock_tree_lock);
460 457
461 /* make sure this hwspinlock exists */ 458 /* make sure this hwspinlock exists */
462 hwlock = radix_tree_lookup(&hwspinlock_tree, id); 459 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
@@ -482,7 +479,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
482 hwlock = NULL; 479 hwlock = NULL;
483 480
484out: 481out:
485 spin_unlock(&hwspinlock_tree_lock); 482 mutex_unlock(&hwspinlock_tree_lock);
486 return hwlock; 483 return hwlock;
487} 484}
488EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); 485EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
@@ -495,9 +492,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
495 * Should only be called with an @hwlock that was retrieved from 492 * Should only be called with an @hwlock that was retrieved from
496 * an earlier call to omap_hwspin_lock_request{_specific}. 493 * an earlier call to omap_hwspin_lock_request{_specific}.
497 * 494 *
498 * Can be called from an atomic context (will not sleep) but not from 495 * Should be called from a process context (might sleep)
499 * within interrupt context (simply because there is no use case for
500 * that yet).
501 * 496 *
502 * Returns 0 on success, or an appropriate error code on failure 497 * Returns 0 on success, or an appropriate error code on failure
503 */ 498 */
@@ -511,7 +506,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
511 return -EINVAL; 506 return -EINVAL;
512 } 507 }
513 508
514 spin_lock(&hwspinlock_tree_lock); 509 mutex_lock(&hwspinlock_tree_lock);
515 510
516 /* make sure the hwspinlock is used */ 511 /* make sure the hwspinlock is used */
517 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, 512 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
@@ -538,7 +533,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
538 module_put(hwlock->owner); 533 module_put(hwlock->owner);
539 534
540out: 535out:
541 spin_unlock(&hwspinlock_tree_lock); 536 mutex_unlock(&hwspinlock_tree_lock);
542 return ret; 537 return ret;
543} 538}
544EXPORT_SYMBOL_GPL(hwspin_lock_free); 539EXPORT_SYMBOL_GPL(hwspin_lock_free);