aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwspinlock/hwspinlock_core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwspinlock/hwspinlock_core.c')
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c204
1 files changed, 127 insertions, 77 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 43a62714b4fb..61c9cf15fa52 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -26,6 +26,7 @@
26#include <linux/radix-tree.h> 26#include <linux/radix-tree.h>
27#include <linux/hwspinlock.h> 27#include <linux/hwspinlock.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <linux/mutex.h>
29 30
30#include "hwspinlock_internal.h" 31#include "hwspinlock_internal.h"
31 32
@@ -52,10 +53,12 @@
52static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); 53static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
53 54
54/* 55/*
55 * Synchronization of access to the tree is achieved using this spinlock, 56 * Synchronization of access to the tree is achieved using this mutex,
56 * as the radix-tree API requires that users provide all synchronisation. 57 * as the radix-tree API requires that users provide all synchronisation.
58 * A mutex is needed because we're using non-atomic radix tree allocations.
57 */ 59 */
58static DEFINE_SPINLOCK(hwspinlock_tree_lock); 60static DEFINE_MUTEX(hwspinlock_tree_lock);
61
59 62
60/** 63/**
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock 64 * __hwspin_trylock() - attempt to lock a specific hwspinlock
@@ -114,7 +117,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
114 return -EBUSY; 117 return -EBUSY;
115 118
116 /* try to take the hwspinlock device */ 119 /* try to take the hwspinlock device */
117 ret = hwlock->ops->trylock(hwlock); 120 ret = hwlock->bank->ops->trylock(hwlock);
118 121
119 /* if hwlock is already taken, undo spin_trylock_* and exit */ 122 /* if hwlock is already taken, undo spin_trylock_* and exit */
120 if (!ret) { 123 if (!ret) {
@@ -196,8 +199,8 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
196 * Allow platform-specific relax handlers to prevent 199 * Allow platform-specific relax handlers to prevent
197 * hogging the interconnect (no sleeping, though) 200 * hogging the interconnect (no sleeping, though)
198 */ 201 */
199 if (hwlock->ops->relax) 202 if (hwlock->bank->ops->relax)
200 hwlock->ops->relax(hwlock); 203 hwlock->bank->ops->relax(hwlock);
201 } 204 }
202 205
203 return ret; 206 return ret;
@@ -242,7 +245,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
242 */ 245 */
243 mb(); 246 mb();
244 247
245 hwlock->ops->unlock(hwlock); 248 hwlock->bank->ops->unlock(hwlock);
246 249
247 /* Undo the spin_trylock{_irq, _irqsave} called while locking */ 250 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
248 if (mode == HWLOCK_IRQSTATE) 251 if (mode == HWLOCK_IRQSTATE)
@@ -254,68 +257,37 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
254} 257}
255EXPORT_SYMBOL_GPL(__hwspin_unlock); 258EXPORT_SYMBOL_GPL(__hwspin_unlock);
256 259
257/** 260static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
258 * hwspin_lock_register() - register a new hw spinlock
259 * @hwlock: hwspinlock to register.
260 *
261 * This function should be called from the underlying platform-specific
262 * implementation, to register a new hwspinlock instance.
263 *
264 * Can be called from an atomic context (will not sleep) but not from
265 * within interrupt context.
266 *
267 * Returns 0 on success, or an appropriate error code on failure
268 */
269int hwspin_lock_register(struct hwspinlock *hwlock)
270{ 261{
271 struct hwspinlock *tmp; 262 struct hwspinlock *tmp;
272 int ret; 263 int ret;
273 264
274 if (!hwlock || !hwlock->ops || 265 mutex_lock(&hwspinlock_tree_lock);
275 !hwlock->ops->trylock || !hwlock->ops->unlock) {
276 pr_err("invalid parameters\n");
277 return -EINVAL;
278 }
279
280 spin_lock_init(&hwlock->lock);
281
282 spin_lock(&hwspinlock_tree_lock);
283 266
284 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); 267 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
285 if (ret) 268 if (ret) {
269 if (ret == -EEXIST)
270 pr_err("hwspinlock id %d already exists!\n", id);
286 goto out; 271 goto out;
272 }
287 273
288 /* mark this hwspinlock as available */ 274 /* mark this hwspinlock as available */
289 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, 275 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
290 HWSPINLOCK_UNUSED);
291 276
292 /* self-sanity check which should never fail */ 277 /* self-sanity check which should never fail */
293 WARN_ON(tmp != hwlock); 278 WARN_ON(tmp != hwlock);
294 279
295out: 280out:
296 spin_unlock(&hwspinlock_tree_lock); 281 mutex_unlock(&hwspinlock_tree_lock);
297 return ret; 282 return 0;
298} 283}
299EXPORT_SYMBOL_GPL(hwspin_lock_register);
300 284
301/** 285static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
302 * hwspin_lock_unregister() - unregister an hw spinlock
303 * @id: index of the specific hwspinlock to unregister
304 *
305 * This function should be called from the underlying platform-specific
306 * implementation, to unregister an existing (and unused) hwspinlock.
307 *
308 * Can be called from an atomic context (will not sleep) but not from
309 * within interrupt context.
310 *
311 * Returns the address of hwspinlock @id on success, or NULL on failure
312 */
313struct hwspinlock *hwspin_lock_unregister(unsigned int id)
314{ 286{
315 struct hwspinlock *hwlock = NULL; 287 struct hwspinlock *hwlock = NULL;
316 int ret; 288 int ret;
317 289
318 spin_lock(&hwspinlock_tree_lock); 290 mutex_lock(&hwspinlock_tree_lock);
319 291
320 /* make sure the hwspinlock is not in use (tag is set) */ 292 /* make sure the hwspinlock is not in use (tag is set) */
321 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 293 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -331,9 +303,91 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
331 } 303 }
332 304
333out: 305out:
334 spin_unlock(&hwspinlock_tree_lock); 306 mutex_unlock(&hwspinlock_tree_lock);
335 return hwlock; 307 return hwlock;
336} 308}
309
310/**
311 * hwspin_lock_register() - register a new hw spinlock device
312 * @bank: the hwspinlock device, which usually provides numerous hw locks
313 * @dev: the backing device
314 * @ops: hwspinlock handlers for this device
315 * @base_id: id of the first hardware spinlock in this bank
316 * @num_locks: number of hwspinlocks provided by this device
317 *
318 * This function should be called from the underlying platform-specific
319 * implementation, to register a new hwspinlock device instance.
320 *
321 * Should be called from a process context (might sleep)
322 *
323 * Returns 0 on success, or an appropriate error code on failure
324 */
325int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
326 const struct hwspinlock_ops *ops, int base_id, int num_locks)
327{
328 struct hwspinlock *hwlock;
329 int ret = 0, i;
330
331 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
332 !ops->unlock) {
333 pr_err("invalid parameters\n");
334 return -EINVAL;
335 }
336
337 bank->dev = dev;
338 bank->ops = ops;
339 bank->base_id = base_id;
340 bank->num_locks = num_locks;
341
342 for (i = 0; i < num_locks; i++) {
343 hwlock = &bank->lock[i];
344
345 spin_lock_init(&hwlock->lock);
346 hwlock->bank = bank;
347
348 ret = hwspin_lock_register_single(hwlock, i);
349 if (ret)
350 goto reg_failed;
351 }
352
353 return 0;
354
355reg_failed:
356 while (--i >= 0)
357 hwspin_lock_unregister_single(i);
358 return ret;
359}
360EXPORT_SYMBOL_GPL(hwspin_lock_register);
361
362/**
363 * hwspin_lock_unregister() - unregister an hw spinlock device
364 * @bank: the hwspinlock device, which usually provides numerous hw locks
365 *
366 * This function should be called from the underlying platform-specific
367 * implementation, to unregister an existing (and unused) hwspinlock.
368 *
369 * Should be called from a process context (might sleep)
370 *
371 * Returns 0 on success, or an appropriate error code on failure
372 */
373int hwspin_lock_unregister(struct hwspinlock_device *bank)
374{
375 struct hwspinlock *hwlock, *tmp;
376 int i;
377
378 for (i = 0; i < bank->num_locks; i++) {
379 hwlock = &bank->lock[i];
380
381 tmp = hwspin_lock_unregister_single(bank->base_id + i);
382 if (!tmp)
383 return -EBUSY;
384
385 /* self-sanity check that should never fail */
386 WARN_ON(tmp != hwlock);
387 }
388
389 return 0;
390}
337EXPORT_SYMBOL_GPL(hwspin_lock_unregister); 391EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
338 392
339/** 393/**
@@ -348,24 +402,25 @@ EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
348 */ 402 */
349static int __hwspin_lock_request(struct hwspinlock *hwlock) 403static int __hwspin_lock_request(struct hwspinlock *hwlock)
350{ 404{
405 struct device *dev = hwlock->bank->dev;
351 struct hwspinlock *tmp; 406 struct hwspinlock *tmp;
352 int ret; 407 int ret;
353 408
354 /* prevent underlying implementation from being removed */ 409 /* prevent underlying implementation from being removed */
355 if (!try_module_get(hwlock->owner)) { 410 if (!try_module_get(dev->driver->owner)) {
356 dev_err(hwlock->dev, "%s: can't get owner\n", __func__); 411 dev_err(dev, "%s: can't get owner\n", __func__);
357 return -EINVAL; 412 return -EINVAL;
358 } 413 }
359 414
360 /* notify PM core that power is now needed */ 415 /* notify PM core that power is now needed */
361 ret = pm_runtime_get_sync(hwlock->dev); 416 ret = pm_runtime_get_sync(dev);
362 if (ret < 0) { 417 if (ret < 0) {
363 dev_err(hwlock->dev, "%s: can't power on device\n", __func__); 418 dev_err(dev, "%s: can't power on device\n", __func__);
364 return ret; 419 return ret;
365 } 420 }
366 421
367 /* mark hwspinlock as used, should not fail */ 422 /* mark hwspinlock as used, should not fail */
368 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id, 423 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
369 HWSPINLOCK_UNUSED); 424 HWSPINLOCK_UNUSED);
370 425
371 /* self-sanity check that should never fail */ 426 /* self-sanity check that should never fail */
@@ -387,7 +442,7 @@ int hwspin_lock_get_id(struct hwspinlock *hwlock)
387 return -EINVAL; 442 return -EINVAL;
388 } 443 }
389 444
390 return hwlock->id; 445 return hwlock_to_id(hwlock);
391} 446}
392EXPORT_SYMBOL_GPL(hwspin_lock_get_id); 447EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
393 448
@@ -400,9 +455,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
400 * to the remote core before it can be used for synchronization (to get the 455 * to the remote core before it can be used for synchronization (to get the
401 * id of a given hwlock, use hwspin_lock_get_id()). 456 * id of a given hwlock, use hwspin_lock_get_id()).
402 * 457 *
403 * Can be called from an atomic context (will not sleep) but not from 458 * Should be called from a process context (might sleep)
404 * within interrupt context (simply because there is no use case for
405 * that yet).
406 * 459 *
407 * Returns the address of the assigned hwspinlock, or NULL on error 460 * Returns the address of the assigned hwspinlock, or NULL on error
408 */ 461 */
@@ -411,7 +464,7 @@ struct hwspinlock *hwspin_lock_request(void)
411 struct hwspinlock *hwlock; 464 struct hwspinlock *hwlock;
412 int ret; 465 int ret;
413 466
414 spin_lock(&hwspinlock_tree_lock); 467 mutex_lock(&hwspinlock_tree_lock);
415 468
416 /* look for an unused lock */ 469 /* look for an unused lock */
417 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, 470 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
@@ -431,7 +484,7 @@ struct hwspinlock *hwspin_lock_request(void)
431 hwlock = NULL; 484 hwlock = NULL;
432 485
433out: 486out:
434 spin_unlock(&hwspinlock_tree_lock); 487 mutex_unlock(&hwspinlock_tree_lock);
435 return hwlock; 488 return hwlock;
436} 489}
437EXPORT_SYMBOL_GPL(hwspin_lock_request); 490EXPORT_SYMBOL_GPL(hwspin_lock_request);
@@ -445,9 +498,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
445 * Usually early board code will be calling this function in order to 498 * Usually early board code will be calling this function in order to
446 * reserve specific hwspinlock ids for predefined purposes. 499 * reserve specific hwspinlock ids for predefined purposes.
447 * 500 *
448 * Can be called from an atomic context (will not sleep) but not from 501 * Should be called from a process context (might sleep)
449 * within interrupt context (simply because there is no use case for
450 * that yet).
451 * 502 *
452 * Returns the address of the assigned hwspinlock, or NULL on error 503 * Returns the address of the assigned hwspinlock, or NULL on error
453 */ 504 */
@@ -456,7 +507,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
456 struct hwspinlock *hwlock; 507 struct hwspinlock *hwlock;
457 int ret; 508 int ret;
458 509
459 spin_lock(&hwspinlock_tree_lock); 510 mutex_lock(&hwspinlock_tree_lock);
460 511
461 /* make sure this hwspinlock exists */ 512 /* make sure this hwspinlock exists */
462 hwlock = radix_tree_lookup(&hwspinlock_tree, id); 513 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
@@ -466,7 +517,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
466 } 517 }
467 518
468 /* sanity check (this shouldn't happen) */ 519 /* sanity check (this shouldn't happen) */
469 WARN_ON(hwlock->id != id); 520 WARN_ON(hwlock_to_id(hwlock) != id);
470 521
471 /* make sure this hwspinlock is unused */ 522 /* make sure this hwspinlock is unused */
472 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 523 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -482,7 +533,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
482 hwlock = NULL; 533 hwlock = NULL;
483 534
484out: 535out:
485 spin_unlock(&hwspinlock_tree_lock); 536 mutex_unlock(&hwspinlock_tree_lock);
486 return hwlock; 537 return hwlock;
487} 538}
488EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); 539EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
@@ -495,14 +546,13 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
495 * Should only be called with an @hwlock that was retrieved from 546 * Should only be called with an @hwlock that was retrieved from
496 * an earlier call to omap_hwspin_lock_request{_specific}. 547 * an earlier call to omap_hwspin_lock_request{_specific}.
497 * 548 *
498 * Can be called from an atomic context (will not sleep) but not from 549 * Should be called from a process context (might sleep)
499 * within interrupt context (simply because there is no use case for
500 * that yet).
501 * 550 *
502 * Returns 0 on success, or an appropriate error code on failure 551 * Returns 0 on success, or an appropriate error code on failure
503 */ 552 */
504int hwspin_lock_free(struct hwspinlock *hwlock) 553int hwspin_lock_free(struct hwspinlock *hwlock)
505{ 554{
555 struct device *dev = hwlock->bank->dev;
506 struct hwspinlock *tmp; 556 struct hwspinlock *tmp;
507 int ret; 557 int ret;
508 558
@@ -511,34 +561,34 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
511 return -EINVAL; 561 return -EINVAL;
512 } 562 }
513 563
514 spin_lock(&hwspinlock_tree_lock); 564 mutex_lock(&hwspinlock_tree_lock);
515 565
516 /* make sure the hwspinlock is used */ 566 /* make sure the hwspinlock is used */
517 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, 567 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
518 HWSPINLOCK_UNUSED); 568 HWSPINLOCK_UNUSED);
519 if (ret == 1) { 569 if (ret == 1) {
520 dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__); 570 dev_err(dev, "%s: hwlock is already free\n", __func__);
521 dump_stack(); 571 dump_stack();
522 ret = -EINVAL; 572 ret = -EINVAL;
523 goto out; 573 goto out;
524 } 574 }
525 575
526 /* notify the underlying device that power is not needed */ 576 /* notify the underlying device that power is not needed */
527 ret = pm_runtime_put(hwlock->dev); 577 ret = pm_runtime_put(dev);
528 if (ret < 0) 578 if (ret < 0)
529 goto out; 579 goto out;
530 580
531 /* mark this hwspinlock as available */ 581 /* mark this hwspinlock as available */
532 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, 582 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
533 HWSPINLOCK_UNUSED); 583 HWSPINLOCK_UNUSED);
534 584
535 /* sanity check (this shouldn't happen) */ 585 /* sanity check (this shouldn't happen) */
536 WARN_ON(tmp != hwlock); 586 WARN_ON(tmp != hwlock);
537 587
538 module_put(hwlock->owner); 588 module_put(dev->driver->owner);
539 589
540out: 590out:
541 spin_unlock(&hwspinlock_tree_lock); 591 mutex_unlock(&hwspinlock_tree_lock);
542 return ret; 592 return ret;
543} 593}
544EXPORT_SYMBOL_GPL(hwspin_lock_free); 594EXPORT_SYMBOL_GPL(hwspin_lock_free);