diff options
-rw-r--r-- | Documentation/hwspinlock.txt | 18 | ||||
-rw-r--r-- | drivers/hwspinlock/hwspinlock_core.c | 45 |
2 files changed, 27 insertions, 36 deletions
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt index bbaa4649b637..9171f9120143 100644 --- a/Documentation/hwspinlock.txt +++ b/Documentation/hwspinlock.txt | |||
@@ -39,23 +39,20 @@ independent, drivers. | |||
39 | in case an unused hwspinlock isn't available. Users of this | 39 | in case an unused hwspinlock isn't available. Users of this |
40 | API will usually want to communicate the lock's id to the remote core | 40 | API will usually want to communicate the lock's id to the remote core |
41 | before it can be used to achieve synchronization. | 41 | before it can be used to achieve synchronization. |
42 | Can be called from an atomic context (this function will not sleep) but | 42 | Should be called from a process context (might sleep). |
43 | not from within interrupt context. | ||
44 | 43 | ||
45 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id); | 44 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id); |
46 | - assign a specific hwspinlock id and return its address, or NULL | 45 | - assign a specific hwspinlock id and return its address, or NULL |
47 | if that hwspinlock is already in use. Usually board code will | 46 | if that hwspinlock is already in use. Usually board code will |
48 | be calling this function in order to reserve specific hwspinlock | 47 | be calling this function in order to reserve specific hwspinlock |
49 | ids for predefined purposes. | 48 | ids for predefined purposes. |
50 | Can be called from an atomic context (this function will not sleep) but | 49 | Should be called from a process context (might sleep). |
51 | not from within interrupt context. | ||
52 | 50 | ||
53 | int hwspin_lock_free(struct hwspinlock *hwlock); | 51 | int hwspin_lock_free(struct hwspinlock *hwlock); |
54 | - free a previously-assigned hwspinlock; returns 0 on success, or an | 52 | - free a previously-assigned hwspinlock; returns 0 on success, or an |
55 | appropriate error code on failure (e.g. -EINVAL if the hwspinlock | 53 | appropriate error code on failure (e.g. -EINVAL if the hwspinlock |
56 | is already free). | 54 | is already free). |
57 | Can be called from an atomic context (this function will not sleep) but | 55 | Should be called from a process context (might sleep). |
58 | not from within interrupt context. | ||
59 | 56 | ||
60 | int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout); | 57 | int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout); |
61 | - lock a previously-assigned hwspinlock with a timeout limit (specified in | 58 | - lock a previously-assigned hwspinlock with a timeout limit (specified in |
@@ -232,15 +229,14 @@ int hwspinlock_example2(void) | |||
232 | 229 | ||
233 | int hwspin_lock_register(struct hwspinlock *hwlock); | 230 | int hwspin_lock_register(struct hwspinlock *hwlock); |
234 | - to be called from the underlying platform-specific implementation, in | 231 | - to be called from the underlying platform-specific implementation, in |
235 | order to register a new hwspinlock instance. Can be called from an atomic | 232 | order to register a new hwspinlock instance. Should be called from |
236 | context (this function will not sleep) but not from within interrupt | 233 | a process context (this function might sleep). |
237 | context. Returns 0 on success, or appropriate error code on failure. | 234 | Returns 0 on success, or appropriate error code on failure. |
238 | 235 | ||
239 | struct hwspinlock *hwspin_lock_unregister(unsigned int id); | 236 | struct hwspinlock *hwspin_lock_unregister(unsigned int id); |
240 | - to be called from the underlying vendor-specific implementation, in order | 237 | - to be called from the underlying vendor-specific implementation, in order |
241 | to unregister an existing (and unused) hwspinlock instance. | 238 | to unregister an existing (and unused) hwspinlock instance. |
242 | Can be called from an atomic context (will not sleep) but not from | 239 | Should be called from a process context (this function might sleep). |
243 | within interrupt context. | ||
244 | Returns the address of hwspinlock on success, or NULL on error (e.g. | 240 | Returns the address of hwspinlock on success, or NULL on error (e.g. |
245 | if the hwspinlock is sill in use). | 241 | if the hwspinlock is sill in use). |
246 | 242 | ||
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 4eb85b4a320e..0d20b82df0a7 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/radix-tree.h> | 26 | #include <linux/radix-tree.h> |
27 | #include <linux/hwspinlock.h> | 27 | #include <linux/hwspinlock.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/mutex.h> | ||
29 | 30 | ||
30 | #include "hwspinlock_internal.h" | 31 | #include "hwspinlock_internal.h" |
31 | 32 | ||
@@ -52,10 +53,12 @@ | |||
52 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); | 53 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); |
53 | 54 | ||
54 | /* | 55 | /* |
55 | * Synchronization of access to the tree is achieved using this spinlock, | 56 | * Synchronization of access to the tree is achieved using this mutex, |
56 | * as the radix-tree API requires that users provide all synchronisation. | 57 | * as the radix-tree API requires that users provide all synchronisation. |
58 | * A mutex is needed because we're using non-atomic radix tree allocations. | ||
57 | */ | 59 | */ |
58 | static DEFINE_SPINLOCK(hwspinlock_tree_lock); | 60 | static DEFINE_MUTEX(hwspinlock_tree_lock); |
61 | |||
59 | 62 | ||
60 | /** | 63 | /** |
61 | * __hwspin_trylock() - attempt to lock a specific hwspinlock | 64 | * __hwspin_trylock() - attempt to lock a specific hwspinlock |
@@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock); | |||
261 | * This function should be called from the underlying platform-specific | 264 | * This function should be called from the underlying platform-specific |
262 | * implementation, to register a new hwspinlock instance. | 265 | * implementation, to register a new hwspinlock instance. |
263 | * | 266 | * |
264 | * Can be called from an atomic context (will not sleep) but not from | 267 | * Should be called from a process context (might sleep) |
265 | * within interrupt context. | ||
266 | * | 268 | * |
267 | * Returns 0 on success, or an appropriate error code on failure | 269 | * Returns 0 on success, or an appropriate error code on failure |
268 | */ | 270 | */ |
@@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock) | |||
279 | 281 | ||
280 | spin_lock_init(&hwlock->lock); | 282 | spin_lock_init(&hwlock->lock); |
281 | 283 | ||
282 | spin_lock(&hwspinlock_tree_lock); | 284 | mutex_lock(&hwspinlock_tree_lock); |
283 | 285 | ||
284 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); | 286 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); |
285 | if (ret == -EEXIST) | 287 | if (ret == -EEXIST) |
@@ -295,7 +297,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock) | |||
295 | WARN_ON(tmp != hwlock); | 297 | WARN_ON(tmp != hwlock); |
296 | 298 | ||
297 | out: | 299 | out: |
298 | spin_unlock(&hwspinlock_tree_lock); | 300 | mutex_unlock(&hwspinlock_tree_lock); |
299 | return ret; | 301 | return ret; |
300 | } | 302 | } |
301 | EXPORT_SYMBOL_GPL(hwspin_lock_register); | 303 | EXPORT_SYMBOL_GPL(hwspin_lock_register); |
@@ -307,8 +309,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register); | |||
307 | * This function should be called from the underlying platform-specific | 309 | * This function should be called from the underlying platform-specific |
308 | * implementation, to unregister an existing (and unused) hwspinlock. | 310 | * implementation, to unregister an existing (and unused) hwspinlock. |
309 | * | 311 | * |
310 | * Can be called from an atomic context (will not sleep) but not from | 312 | * Should be called from a process context (might sleep) |
311 | * within interrupt context. | ||
312 | * | 313 | * |
313 | * Returns the address of hwspinlock @id on success, or NULL on failure | 314 | * Returns the address of hwspinlock @id on success, or NULL on failure |
314 | */ | 315 | */ |
@@ -317,7 +318,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id) | |||
317 | struct hwspinlock *hwlock = NULL; | 318 | struct hwspinlock *hwlock = NULL; |
318 | int ret; | 319 | int ret; |
319 | 320 | ||
320 | spin_lock(&hwspinlock_tree_lock); | 321 | mutex_lock(&hwspinlock_tree_lock); |
321 | 322 | ||
322 | /* make sure the hwspinlock is not in use (tag is set) */ | 323 | /* make sure the hwspinlock is not in use (tag is set) */ |
323 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | 324 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
@@ -333,7 +334,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id) | |||
333 | } | 334 | } |
334 | 335 | ||
335 | out: | 336 | out: |
336 | spin_unlock(&hwspinlock_tree_lock); | 337 | mutex_unlock(&hwspinlock_tree_lock); |
337 | return hwlock; | 338 | return hwlock; |
338 | } | 339 | } |
339 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); | 340 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
@@ -402,9 +403,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id); | |||
402 | * to the remote core before it can be used for synchronization (to get the | 403 | * to the remote core before it can be used for synchronization (to get the |
403 | * id of a given hwlock, use hwspin_lock_get_id()). | 404 | * id of a given hwlock, use hwspin_lock_get_id()). |
404 | * | 405 | * |
405 | * Can be called from an atomic context (will not sleep) but not from | 406 | * Should be called from a process context (might sleep) |
406 | * within interrupt context (simply because there is no use case for | ||
407 | * that yet). | ||
408 | * | 407 | * |
409 | * Returns the address of the assigned hwspinlock, or NULL on error | 408 | * Returns the address of the assigned hwspinlock, or NULL on error |
410 | */ | 409 | */ |
@@ -413,7 +412,7 @@ struct hwspinlock *hwspin_lock_request(void) | |||
413 | struct hwspinlock *hwlock; | 412 | struct hwspinlock *hwlock; |
414 | int ret; | 413 | int ret; |
415 | 414 | ||
416 | spin_lock(&hwspinlock_tree_lock); | 415 | mutex_lock(&hwspinlock_tree_lock); |
417 | 416 | ||
418 | /* look for an unused lock */ | 417 | /* look for an unused lock */ |
419 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, | 418 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, |
@@ -433,7 +432,7 @@ struct hwspinlock *hwspin_lock_request(void) | |||
433 | hwlock = NULL; | 432 | hwlock = NULL; |
434 | 433 | ||
435 | out: | 434 | out: |
436 | spin_unlock(&hwspinlock_tree_lock); | 435 | mutex_unlock(&hwspinlock_tree_lock); |
437 | return hwlock; | 436 | return hwlock; |
438 | } | 437 | } |
439 | EXPORT_SYMBOL_GPL(hwspin_lock_request); | 438 | EXPORT_SYMBOL_GPL(hwspin_lock_request); |
@@ -447,9 +446,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request); | |||
447 | * Usually early board code will be calling this function in order to | 446 | * Usually early board code will be calling this function in order to |
448 | * reserve specific hwspinlock ids for predefined purposes. | 447 | * reserve specific hwspinlock ids for predefined purposes. |
449 | * | 448 | * |
450 | * Can be called from an atomic context (will not sleep) but not from | 449 | * Should be called from a process context (might sleep) |
451 | * within interrupt context (simply because there is no use case for | ||
452 | * that yet). | ||
453 | * | 450 | * |
454 | * Returns the address of the assigned hwspinlock, or NULL on error | 451 | * Returns the address of the assigned hwspinlock, or NULL on error |
455 | */ | 452 | */ |
@@ -458,7 +455,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | |||
458 | struct hwspinlock *hwlock; | 455 | struct hwspinlock *hwlock; |
459 | int ret; | 456 | int ret; |
460 | 457 | ||
461 | spin_lock(&hwspinlock_tree_lock); | 458 | mutex_lock(&hwspinlock_tree_lock); |
462 | 459 | ||
463 | /* make sure this hwspinlock exists */ | 460 | /* make sure this hwspinlock exists */ |
464 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); | 461 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); |
@@ -484,7 +481,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | |||
484 | hwlock = NULL; | 481 | hwlock = NULL; |
485 | 482 | ||
486 | out: | 483 | out: |
487 | spin_unlock(&hwspinlock_tree_lock); | 484 | mutex_unlock(&hwspinlock_tree_lock); |
488 | return hwlock; | 485 | return hwlock; |
489 | } | 486 | } |
490 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | 487 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); |
@@ -497,9 +494,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | |||
497 | * Should only be called with an @hwlock that was retrieved from | 494 | * Should only be called with an @hwlock that was retrieved from |
498 | * an earlier call to omap_hwspin_lock_request{_specific}. | 495 | * an earlier call to omap_hwspin_lock_request{_specific}. |
499 | * | 496 | * |
500 | * Can be called from an atomic context (will not sleep) but not from | 497 | * Should be called from a process context (might sleep) |
501 | * within interrupt context (simply because there is no use case for | ||
502 | * that yet). | ||
503 | * | 498 | * |
504 | * Returns 0 on success, or an appropriate error code on failure | 499 | * Returns 0 on success, or an appropriate error code on failure |
505 | */ | 500 | */ |
@@ -513,7 +508,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock) | |||
513 | return -EINVAL; | 508 | return -EINVAL; |
514 | } | 509 | } |
515 | 510 | ||
516 | spin_lock(&hwspinlock_tree_lock); | 511 | mutex_lock(&hwspinlock_tree_lock); |
517 | 512 | ||
518 | /* make sure the hwspinlock is used */ | 513 | /* make sure the hwspinlock is used */ |
519 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, | 514 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, |
@@ -540,7 +535,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock) | |||
540 | module_put(hwlock->dev->driver->owner); | 535 | module_put(hwlock->dev->driver->owner); |
541 | 536 | ||
542 | out: | 537 | out: |
543 | spin_unlock(&hwspinlock_tree_lock); | 538 | mutex_unlock(&hwspinlock_tree_lock); |
544 | return ret; | 539 | return ret; |
545 | } | 540 | } |
546 | EXPORT_SYMBOL_GPL(hwspin_lock_free); | 541 | EXPORT_SYMBOL_GPL(hwspin_lock_free); |