aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@sandeen.net>2007-10-11 03:42:10 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 00:47:01 -0500
commitba74d0cba51dcaa99e4dc2e4fb62e6e13abbf703 (patch)
tree1dfedaeec9702659950c2ee19dccee4ea076c3ed /fs
parent703e1f0fd2edc2978bde3b4536e78b577318c090 (diff)
[XFS] Unwrap mru_lock.
Un-obfuscate mru_lock, remove mutex_lock->spin_lock macros, call spin_lock directly, remove extraneous cookie holdover from old xfs code. SGI-PV: 970382 SGI-Modid: xfs-linux-melb:xfs-kern:29745a Signed-off-by: Eric Sandeen <sandeen@sandeen.net> Signed-off-by: Donald Douwsma <donaldd@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_mru_cache.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index e0b358c1c533..dc64630e870e 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -245,7 +245,7 @@ _xfs_mru_cache_clear_reap_list(
245 */ 245 */
246 list_move(&elem->list_node, &tmp); 246 list_move(&elem->list_node, &tmp);
247 } 247 }
248 mutex_spinunlock(&mru->lock, 0); 248 spin_unlock(&mru->lock);
249 249
250 list_for_each_entry_safe(elem, next, &tmp, list_node) { 250 list_for_each_entry_safe(elem, next, &tmp, list_node) {
251 251
@@ -259,7 +259,7 @@ _xfs_mru_cache_clear_reap_list(
259 kmem_zone_free(xfs_mru_elem_zone, elem); 259 kmem_zone_free(xfs_mru_elem_zone, elem);
260 } 260 }
261 261
262 mutex_spinlock(&mru->lock); 262 spin_lock(&mru->lock);
263} 263}
264 264
265/* 265/*
@@ -280,7 +280,7 @@ _xfs_mru_cache_reap(
280 if (!mru || !mru->lists) 280 if (!mru || !mru->lists)
281 return; 281 return;
282 282
283 mutex_spinlock(&mru->lock); 283 spin_lock(&mru->lock);
284 next = _xfs_mru_cache_migrate(mru, jiffies); 284 next = _xfs_mru_cache_migrate(mru, jiffies);
285 _xfs_mru_cache_clear_reap_list(mru); 285 _xfs_mru_cache_clear_reap_list(mru);
286 286
@@ -294,7 +294,7 @@ _xfs_mru_cache_reap(
294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); 294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
295 } 295 }
296 296
297 mutex_spinunlock(&mru->lock, 0); 297 spin_unlock(&mru->lock);
298} 298}
299 299
300int 300int
@@ -398,17 +398,17 @@ xfs_mru_cache_flush(
398 if (!mru || !mru->lists) 398 if (!mru || !mru->lists)
399 return; 399 return;
400 400
401 mutex_spinlock(&mru->lock); 401 spin_lock(&mru->lock);
402 if (mru->queued) { 402 if (mru->queued) {
403 mutex_spinunlock(&mru->lock, 0); 403 spin_unlock(&mru->lock);
404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); 404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
405 mutex_spinlock(&mru->lock); 405 spin_lock(&mru->lock);
406 } 406 }
407 407
408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); 408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
409 _xfs_mru_cache_clear_reap_list(mru); 409 _xfs_mru_cache_clear_reap_list(mru);
410 410
411 mutex_spinunlock(&mru->lock, 0); 411 spin_unlock(&mru->lock);
412} 412}
413 413
414void 414void
@@ -454,13 +454,13 @@ xfs_mru_cache_insert(
454 elem->key = key; 454 elem->key = key;
455 elem->value = value; 455 elem->value = value;
456 456
457 mutex_spinlock(&mru->lock); 457 spin_lock(&mru->lock);
458 458
459 radix_tree_insert(&mru->store, key, elem); 459 radix_tree_insert(&mru->store, key, elem);
460 radix_tree_preload_end(); 460 radix_tree_preload_end();
461 _xfs_mru_cache_list_insert(mru, elem); 461 _xfs_mru_cache_list_insert(mru, elem);
462 462
463 mutex_spinunlock(&mru->lock, 0); 463 spin_unlock(&mru->lock);
464 464
465 return 0; 465 return 0;
466} 466}
@@ -483,14 +483,14 @@ xfs_mru_cache_remove(
483 if (!mru || !mru->lists) 483 if (!mru || !mru->lists)
484 return NULL; 484 return NULL;
485 485
486 mutex_spinlock(&mru->lock); 486 spin_lock(&mru->lock);
487 elem = radix_tree_delete(&mru->store, key); 487 elem = radix_tree_delete(&mru->store, key);
488 if (elem) { 488 if (elem) {
489 value = elem->value; 489 value = elem->value;
490 list_del(&elem->list_node); 490 list_del(&elem->list_node);
491 } 491 }
492 492
493 mutex_spinunlock(&mru->lock, 0); 493 spin_unlock(&mru->lock);
494 494
495 if (elem) 495 if (elem)
496 kmem_zone_free(xfs_mru_elem_zone, elem); 496 kmem_zone_free(xfs_mru_elem_zone, elem);
@@ -540,14 +540,14 @@ xfs_mru_cache_lookup(
540 if (!mru || !mru->lists) 540 if (!mru || !mru->lists)
541 return NULL; 541 return NULL;
542 542
543 mutex_spinlock(&mru->lock); 543 spin_lock(&mru->lock);
544 elem = radix_tree_lookup(&mru->store, key); 544 elem = radix_tree_lookup(&mru->store, key);
545 if (elem) { 545 if (elem) {
546 list_del(&elem->list_node); 546 list_del(&elem->list_node);
547 _xfs_mru_cache_list_insert(mru, elem); 547 _xfs_mru_cache_list_insert(mru, elem);
548 } 548 }
549 else 549 else
550 mutex_spinunlock(&mru->lock, 0); 550 spin_unlock(&mru->lock);
551 551
552 return elem ? elem->value : NULL; 552 return elem ? elem->value : NULL;
553} 553}
@@ -571,10 +571,10 @@ xfs_mru_cache_peek(
571 if (!mru || !mru->lists) 571 if (!mru || !mru->lists)
572 return NULL; 572 return NULL;
573 573
574 mutex_spinlock(&mru->lock); 574 spin_lock(&mru->lock);
575 elem = radix_tree_lookup(&mru->store, key); 575 elem = radix_tree_lookup(&mru->store, key);
576 if (!elem) 576 if (!elem)
577 mutex_spinunlock(&mru->lock, 0); 577 spin_unlock(&mru->lock);
578 578
579 return elem ? elem->value : NULL; 579 return elem ? elem->value : NULL;
580} 580}
@@ -588,5 +588,5 @@ void
588xfs_mru_cache_done( 588xfs_mru_cache_done(
589 xfs_mru_cache_t *mru) 589 xfs_mru_cache_t *mru)
590{ 590{
591 mutex_spinunlock(&mru->lock, 0); 591 spin_unlock(&mru->lock);
592} 592}