aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwspinlock/hwspinlock_core.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-09-06 08:39:21 -0400
committerOhad Ben-Cohen <ohad@wizery.com>2011-09-21 12:45:34 -0400
commit300bab9770e2bd10262bcc78e7249fdce2c74b38 (patch)
tree5c23d7dce82b96fa177ea7c854de7f4b36992c80 /drivers/hwspinlock/hwspinlock_core.c
parentc536abfdf5227987b8a72ff955b64e62fd58fe91 (diff)
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known devices today support between 32 to 256 locks). Originally hwspinlock core required drivers to register (and later, when needed, unregister) each lock separately. That worked, but required hwspinlocks drivers to do a bit extra work when they were probed/removed. This patch changes hwspin_lock_{un}register() to allow a bank of hwspinlocks to be {un}registered in a single invocation. A new 'struct hwspinlock_device', which contains an array of 'struct hwspinlock's is now being passed to the core upon registration (so instead of wrapping each struct hwspinlock, a priv member has been added to allow drivers to piggyback their private data with each hwspinlock). While at it, several per-lock members were moved to be per-device: 1. struct device *dev 2. struct hwspinlock_ops *ops In addition, now that the array of locks is handled by the core, there's no reason to maintain a per-lock 'int id' member: the id of the lock anyway equals to its index in the bank's array plus the bank's base_id. Remove this per-lock id member too, and instead use a simple pointers arithmetic to derive it. As a result of this change, hwspinlocks drivers are now simpler and smaller (about %20 code reduction) and the memory footprint of the hwspinlock framework is reduced. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Diffstat (limited to 'drivers/hwspinlock/hwspinlock_core.c')
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c165
1 files changed, 109 insertions, 56 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 0d20b82df0a7..61c9cf15fa52 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -117,7 +117,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
117 return -EBUSY; 117 return -EBUSY;
118 118
119 /* try to take the hwspinlock device */ 119 /* try to take the hwspinlock device */
120 ret = hwlock->ops->trylock(hwlock); 120 ret = hwlock->bank->ops->trylock(hwlock);
121 121
122 /* if hwlock is already taken, undo spin_trylock_* and exit */ 122 /* if hwlock is already taken, undo spin_trylock_* and exit */
123 if (!ret) { 123 if (!ret) {
@@ -199,8 +199,8 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
199 * Allow platform-specific relax handlers to prevent 199 * Allow platform-specific relax handlers to prevent
200 * hogging the interconnect (no sleeping, though) 200 * hogging the interconnect (no sleeping, though)
201 */ 201 */
202 if (hwlock->ops->relax) 202 if (hwlock->bank->ops->relax)
203 hwlock->ops->relax(hwlock); 203 hwlock->bank->ops->relax(hwlock);
204 } 204 }
205 205
206 return ret; 206 return ret;
@@ -245,7 +245,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
245 */ 245 */
246 mb(); 246 mb();
247 247
248 hwlock->ops->unlock(hwlock); 248 hwlock->bank->ops->unlock(hwlock);
249 249
250 /* Undo the spin_trylock{_irq, _irqsave} called while locking */ 250 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
251 if (mode == HWLOCK_IRQSTATE) 251 if (mode == HWLOCK_IRQSTATE)
@@ -257,63 +257,32 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
257} 257}
258EXPORT_SYMBOL_GPL(__hwspin_unlock); 258EXPORT_SYMBOL_GPL(__hwspin_unlock);
259 259
260/** 260static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
261 * hwspin_lock_register() - register a new hw spinlock
262 * @hwlock: hwspinlock to register.
263 *
264 * This function should be called from the underlying platform-specific
265 * implementation, to register a new hwspinlock instance.
266 *
267 * Should be called from a process context (might sleep)
268 *
269 * Returns 0 on success, or an appropriate error code on failure
270 */
271int hwspin_lock_register(struct hwspinlock *hwlock)
272{ 261{
273 struct hwspinlock *tmp; 262 struct hwspinlock *tmp;
274 int ret; 263 int ret;
275 264
276 if (!hwlock || !hwlock->ops ||
277 !hwlock->ops->trylock || !hwlock->ops->unlock) {
278 pr_err("invalid parameters\n");
279 return -EINVAL;
280 }
281
282 spin_lock_init(&hwlock->lock);
283
284 mutex_lock(&hwspinlock_tree_lock); 265 mutex_lock(&hwspinlock_tree_lock);
285 266
286 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); 267 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
287 if (ret == -EEXIST) 268 if (ret) {
288 pr_err("hwspinlock id %d already exists!\n", hwlock->id); 269 if (ret == -EEXIST)
289 if (ret) 270 pr_err("hwspinlock id %d already exists!\n", id);
290 goto out; 271 goto out;
272 }
291 273
292 /* mark this hwspinlock as available */ 274 /* mark this hwspinlock as available */
293 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, 275 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
294 HWSPINLOCK_UNUSED);
295 276
296 /* self-sanity check which should never fail */ 277 /* self-sanity check which should never fail */
297 WARN_ON(tmp != hwlock); 278 WARN_ON(tmp != hwlock);
298 279
299out: 280out:
300 mutex_unlock(&hwspinlock_tree_lock); 281 mutex_unlock(&hwspinlock_tree_lock);
301 return ret; 282 return 0;
302} 283}
303EXPORT_SYMBOL_GPL(hwspin_lock_register);
304 284
305/** 285static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
306 * hwspin_lock_unregister() - unregister an hw spinlock
307 * @id: index of the specific hwspinlock to unregister
308 *
309 * This function should be called from the underlying platform-specific
310 * implementation, to unregister an existing (and unused) hwspinlock.
311 *
312 * Should be called from a process context (might sleep)
313 *
314 * Returns the address of hwspinlock @id on success, or NULL on failure
315 */
316struct hwspinlock *hwspin_lock_unregister(unsigned int id)
317{ 286{
318 struct hwspinlock *hwlock = NULL; 287 struct hwspinlock *hwlock = NULL;
319 int ret; 288 int ret;
@@ -337,6 +306,88 @@ out:
337 mutex_unlock(&hwspinlock_tree_lock); 306 mutex_unlock(&hwspinlock_tree_lock);
338 return hwlock; 307 return hwlock;
339} 308}
309
310/**
311 * hwspin_lock_register() - register a new hw spinlock device
312 * @bank: the hwspinlock device, which usually provides numerous hw locks
313 * @dev: the backing device
314 * @ops: hwspinlock handlers for this device
315 * @base_id: id of the first hardware spinlock in this bank
316 * @num_locks: number of hwspinlocks provided by this device
317 *
318 * This function should be called from the underlying platform-specific
319 * implementation, to register a new hwspinlock device instance.
320 *
321 * Should be called from a process context (might sleep)
322 *
323 * Returns 0 on success, or an appropriate error code on failure
324 */
325int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
326 const struct hwspinlock_ops *ops, int base_id, int num_locks)
327{
328 struct hwspinlock *hwlock;
329 int ret = 0, i;
330
331 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
332 !ops->unlock) {
333 pr_err("invalid parameters\n");
334 return -EINVAL;
335 }
336
337 bank->dev = dev;
338 bank->ops = ops;
339 bank->base_id = base_id;
340 bank->num_locks = num_locks;
341
342 for (i = 0; i < num_locks; i++) {
343 hwlock = &bank->lock[i];
344
345 spin_lock_init(&hwlock->lock);
346 hwlock->bank = bank;
347
348 ret = hwspin_lock_register_single(hwlock, i);
349 if (ret)
350 goto reg_failed;
351 }
352
353 return 0;
354
355reg_failed:
356 while (--i >= 0)
357 hwspin_lock_unregister_single(i);
358 return ret;
359}
360EXPORT_SYMBOL_GPL(hwspin_lock_register);
361
362/**
363 * hwspin_lock_unregister() - unregister an hw spinlock device
364 * @bank: the hwspinlock device, which usually provides numerous hw locks
365 *
366 * This function should be called from the underlying platform-specific
367 * implementation, to unregister an existing (and unused) hwspinlock.
368 *
369 * Should be called from a process context (might sleep)
370 *
371 * Returns 0 on success, or an appropriate error code on failure
372 */
373int hwspin_lock_unregister(struct hwspinlock_device *bank)
374{
375 struct hwspinlock *hwlock, *tmp;
376 int i;
377
378 for (i = 0; i < bank->num_locks; i++) {
379 hwlock = &bank->lock[i];
380
381 tmp = hwspin_lock_unregister_single(bank->base_id + i);
382 if (!tmp)
383 return -EBUSY;
384
385 /* self-sanity check that should never fail */
386 WARN_ON(tmp != hwlock);
387 }
388
389 return 0;
390}
340EXPORT_SYMBOL_GPL(hwspin_lock_unregister); 391EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
341 392
342/** 393/**
@@ -351,24 +402,25 @@ EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
351 */ 402 */
352static int __hwspin_lock_request(struct hwspinlock *hwlock) 403static int __hwspin_lock_request(struct hwspinlock *hwlock)
353{ 404{
405 struct device *dev = hwlock->bank->dev;
354 struct hwspinlock *tmp; 406 struct hwspinlock *tmp;
355 int ret; 407 int ret;
356 408
357 /* prevent underlying implementation from being removed */ 409 /* prevent underlying implementation from being removed */
358 if (!try_module_get(hwlock->dev->driver->owner)) { 410 if (!try_module_get(dev->driver->owner)) {
359 dev_err(hwlock->dev, "%s: can't get owner\n", __func__); 411 dev_err(dev, "%s: can't get owner\n", __func__);
360 return -EINVAL; 412 return -EINVAL;
361 } 413 }
362 414
363 /* notify PM core that power is now needed */ 415 /* notify PM core that power is now needed */
364 ret = pm_runtime_get_sync(hwlock->dev); 416 ret = pm_runtime_get_sync(dev);
365 if (ret < 0) { 417 if (ret < 0) {
366 dev_err(hwlock->dev, "%s: can't power on device\n", __func__); 418 dev_err(dev, "%s: can't power on device\n", __func__);
367 return ret; 419 return ret;
368 } 420 }
369 421
370 /* mark hwspinlock as used, should not fail */ 422 /* mark hwspinlock as used, should not fail */
371 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id, 423 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
372 HWSPINLOCK_UNUSED); 424 HWSPINLOCK_UNUSED);
373 425
374 /* self-sanity check that should never fail */ 426 /* self-sanity check that should never fail */
@@ -390,7 +442,7 @@ int hwspin_lock_get_id(struct hwspinlock *hwlock)
390 return -EINVAL; 442 return -EINVAL;
391 } 443 }
392 444
393 return hwlock->id; 445 return hwlock_to_id(hwlock);
394} 446}
395EXPORT_SYMBOL_GPL(hwspin_lock_get_id); 447EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
396 448
@@ -465,7 +517,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
465 } 517 }
466 518
467 /* sanity check (this shouldn't happen) */ 519 /* sanity check (this shouldn't happen) */
468 WARN_ON(hwlock->id != id); 520 WARN_ON(hwlock_to_id(hwlock) != id);
469 521
470 /* make sure this hwspinlock is unused */ 522 /* make sure this hwspinlock is unused */
471 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 523 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -500,6 +552,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
500 */ 552 */
501int hwspin_lock_free(struct hwspinlock *hwlock) 553int hwspin_lock_free(struct hwspinlock *hwlock)
502{ 554{
555 struct device *dev = hwlock->bank->dev;
503 struct hwspinlock *tmp; 556 struct hwspinlock *tmp;
504 int ret; 557 int ret;
505 558
@@ -511,28 +564,28 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
511 mutex_lock(&hwspinlock_tree_lock); 564 mutex_lock(&hwspinlock_tree_lock);
512 565
513 /* make sure the hwspinlock is used */ 566 /* make sure the hwspinlock is used */
514 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, 567 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
515 HWSPINLOCK_UNUSED); 568 HWSPINLOCK_UNUSED);
516 if (ret == 1) { 569 if (ret == 1) {
517 dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__); 570 dev_err(dev, "%s: hwlock is already free\n", __func__);
518 dump_stack(); 571 dump_stack();
519 ret = -EINVAL; 572 ret = -EINVAL;
520 goto out; 573 goto out;
521 } 574 }
522 575
523 /* notify the underlying device that power is not needed */ 576 /* notify the underlying device that power is not needed */
524 ret = pm_runtime_put(hwlock->dev); 577 ret = pm_runtime_put(dev);
525 if (ret < 0) 578 if (ret < 0)
526 goto out; 579 goto out;
527 580
528 /* mark this hwspinlock as available */ 581 /* mark this hwspinlock as available */
529 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, 582 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
530 HWSPINLOCK_UNUSED); 583 HWSPINLOCK_UNUSED);
531 584
532 /* sanity check (this shouldn't happen) */ 585 /* sanity check (this shouldn't happen) */
533 WARN_ON(tmp != hwlock); 586 WARN_ON(tmp != hwlock);
534 587
535 module_put(hwlock->dev->driver->owner); 588 module_put(dev->driver->owner);
536 589
537out: 590out:
538 mutex_unlock(&hwspinlock_tree_lock); 591 mutex_unlock(&hwspinlock_tree_lock);