diff options
author | David Teigland <teigland@redhat.com> | 2009-01-07 17:50:41 -0500 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2009-01-08 16:12:39 -0500 |
commit | c7be761a8163d2f1ac0b606c21e4316b7abc5af7 (patch) | |
tree | 1687373b56379c1c310f43b1c05ac486c67dec48 /fs/dlm/debug_fs.c | |
parent | 892c4467e335e9050c95e0d8409c136c4dadaca2 (diff) |
dlm: change rsbtbl rwlock to spinlock
The rwlock is almost always used in write mode, so there's no reason
to not use a spinlock instead.
Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/debug_fs.c')
-rw-r--r-- | fs/dlm/debug_fs.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index bc4af3ef65a3..1d1d27442235 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c | |||
@@ -416,7 +416,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) | |||
416 | if (seq->op == &format3_seq_ops) | 416 | if (seq->op == &format3_seq_ops) |
417 | ri->format = 3; | 417 | ri->format = 3; |
418 | 418 | ||
419 | read_lock(&ls->ls_rsbtbl[bucket].lock); | 419 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
420 | if (!list_empty(&ls->ls_rsbtbl[bucket].list)) { | 420 | if (!list_empty(&ls->ls_rsbtbl[bucket].list)) { |
421 | list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, | 421 | list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, |
422 | res_hashchain) { | 422 | res_hashchain) { |
@@ -424,12 +424,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) | |||
424 | dlm_hold_rsb(r); | 424 | dlm_hold_rsb(r); |
425 | ri->rsb = r; | 425 | ri->rsb = r; |
426 | ri->bucket = bucket; | 426 | ri->bucket = bucket; |
427 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 427 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
428 | return ri; | 428 | return ri; |
429 | } | 429 | } |
430 | } | 430 | } |
431 | } | 431 | } |
432 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 432 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
433 | 433 | ||
434 | /* | 434 | /* |
435 | * move to the first rsb in the next non-empty bucket | 435 | * move to the first rsb in the next non-empty bucket |
@@ -447,18 +447,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) | |||
447 | return NULL; | 447 | return NULL; |
448 | } | 448 | } |
449 | 449 | ||
450 | read_lock(&ls->ls_rsbtbl[bucket].lock); | 450 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
451 | if (!list_empty(&ls->ls_rsbtbl[bucket].list)) { | 451 | if (!list_empty(&ls->ls_rsbtbl[bucket].list)) { |
452 | r = list_first_entry(&ls->ls_rsbtbl[bucket].list, | 452 | r = list_first_entry(&ls->ls_rsbtbl[bucket].list, |
453 | struct dlm_rsb, res_hashchain); | 453 | struct dlm_rsb, res_hashchain); |
454 | dlm_hold_rsb(r); | 454 | dlm_hold_rsb(r); |
455 | ri->rsb = r; | 455 | ri->rsb = r; |
456 | ri->bucket = bucket; | 456 | ri->bucket = bucket; |
457 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 457 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
458 | *pos = n; | 458 | *pos = n; |
459 | return ri; | 459 | return ri; |
460 | } | 460 | } |
461 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 461 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
462 | } | 462 | } |
463 | } | 463 | } |
464 | 464 | ||
@@ -477,7 +477,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) | |||
477 | * move to the next rsb in the same bucket | 477 | * move to the next rsb in the same bucket |
478 | */ | 478 | */ |
479 | 479 | ||
480 | read_lock(&ls->ls_rsbtbl[bucket].lock); | 480 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
481 | rp = ri->rsb; | 481 | rp = ri->rsb; |
482 | next = rp->res_hashchain.next; | 482 | next = rp->res_hashchain.next; |
483 | 483 | ||
@@ -485,12 +485,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) | |||
485 | r = list_entry(next, struct dlm_rsb, res_hashchain); | 485 | r = list_entry(next, struct dlm_rsb, res_hashchain); |
486 | dlm_hold_rsb(r); | 486 | dlm_hold_rsb(r); |
487 | ri->rsb = r; | 487 | ri->rsb = r; |
488 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 488 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
489 | dlm_put_rsb(rp); | 489 | dlm_put_rsb(rp); |
490 | ++*pos; | 490 | ++*pos; |
491 | return ri; | 491 | return ri; |
492 | } | 492 | } |
493 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 493 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
494 | dlm_put_rsb(rp); | 494 | dlm_put_rsb(rp); |
495 | 495 | ||
496 | /* | 496 | /* |
@@ -509,18 +509,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) | |||
509 | return NULL; | 509 | return NULL; |
510 | } | 510 | } |
511 | 511 | ||
512 | read_lock(&ls->ls_rsbtbl[bucket].lock); | 512 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
513 | if (!list_empty(&ls->ls_rsbtbl[bucket].list)) { | 513 | if (!list_empty(&ls->ls_rsbtbl[bucket].list)) { |
514 | r = list_first_entry(&ls->ls_rsbtbl[bucket].list, | 514 | r = list_first_entry(&ls->ls_rsbtbl[bucket].list, |
515 | struct dlm_rsb, res_hashchain); | 515 | struct dlm_rsb, res_hashchain); |
516 | dlm_hold_rsb(r); | 516 | dlm_hold_rsb(r); |
517 | ri->rsb = r; | 517 | ri->rsb = r; |
518 | ri->bucket = bucket; | 518 | ri->bucket = bucket; |
519 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 519 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
520 | *pos = n; | 520 | *pos = n; |
521 | return ri; | 521 | return ri; |
522 | } | 522 | } |
523 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 523 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
524 | } | 524 | } |
525 | } | 525 | } |
526 | 526 | ||