aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm/lockspace.c
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2012-05-10 11:18:07 -0400
committerDavid Teigland <teigland@redhat.com>2012-07-16 15:16:19 -0400
commitc04fecb4d9f7753e0cbff7edd03ec68f8721cdce (patch)
treeecd82017d49c7bb03b96a8ad1eb4e9a5bb84409a /fs/dlm/lockspace.c
parentecc728467fb0c3e350b57fc66ed7585c15be50f5 (diff)
dlm: use rsbtbl as resource directory
Remove the dir hash table (dirtbl), and use the rsb hash table (rsbtbl) as the resource directory. It has always been an unnecessary duplication of information. This improves efficiency by using a single rsbtbl lookup in many cases where both rsbtbl and dirtbl lookups were needed previously. This eliminates the need to handle cases of rsbtbl and dirtbl being out of sync. In many cases there will be memory savings because the dir hash table no longer exists. Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/lockspace.c')
-rw-r--r--fs/dlm/lockspace.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index ca506abbdd3b..065bb75ed609 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -509,17 +509,6 @@ static int new_lockspace(const char *name, const char *cluster,
509 idr_init(&ls->ls_lkbidr); 509 idr_init(&ls->ls_lkbidr);
510 spin_lock_init(&ls->ls_lkbidr_spin); 510 spin_lock_init(&ls->ls_lkbidr_spin);
511 511
512 size = dlm_config.ci_dirtbl_size;
513 ls->ls_dirtbl_size = size;
514
515 ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size);
516 if (!ls->ls_dirtbl)
517 goto out_lkbfree;
518 for (i = 0; i < size; i++) {
519 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
520 spin_lock_init(&ls->ls_dirtbl[i].lock);
521 }
522
523 INIT_LIST_HEAD(&ls->ls_waiters); 512 INIT_LIST_HEAD(&ls->ls_waiters);
524 mutex_init(&ls->ls_waiters_mutex); 513 mutex_init(&ls->ls_waiters_mutex);
525 INIT_LIST_HEAD(&ls->ls_orphans); 514 INIT_LIST_HEAD(&ls->ls_orphans);
@@ -567,7 +556,7 @@ static int new_lockspace(const char *name, const char *cluster,
567 556
568 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); 557 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
569 if (!ls->ls_recover_buf) 558 if (!ls->ls_recover_buf)
570 goto out_dirfree; 559 goto out_lkbfree;
571 560
572 ls->ls_slot = 0; 561 ls->ls_slot = 0;
573 ls->ls_num_slots = 0; 562 ls->ls_num_slots = 0;
@@ -648,8 +637,6 @@ static int new_lockspace(const char *name, const char *cluster,
648 list_del(&ls->ls_list); 637 list_del(&ls->ls_list);
649 spin_unlock(&lslist_lock); 638 spin_unlock(&lslist_lock);
650 kfree(ls->ls_recover_buf); 639 kfree(ls->ls_recover_buf);
651 out_dirfree:
652 vfree(ls->ls_dirtbl);
653 out_lkbfree: 640 out_lkbfree:
654 idr_destroy(&ls->ls_lkbidr); 641 idr_destroy(&ls->ls_lkbidr);
655 vfree(ls->ls_rsbtbl); 642 vfree(ls->ls_rsbtbl);
@@ -779,13 +766,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
779 kfree(ls->ls_recover_buf); 766 kfree(ls->ls_recover_buf);
780 767
781 /* 768 /*
782 * Free direntry structs.
783 */
784
785 dlm_dir_clear(ls);
786 vfree(ls->ls_dirtbl);
787
788 /*
789 * Free all lkb's in idr 769 * Free all lkb's in idr
790 */ 770 */
791 771
@@ -826,7 +806,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
826 806
827 dlm_purge_requestqueue(ls); 807 dlm_purge_requestqueue(ls);
828 kfree(ls->ls_recover_args); 808 kfree(ls->ls_recover_args);
829 dlm_clear_free_entries(ls);
830 dlm_clear_members(ls); 809 dlm_clear_members(ls);
831 dlm_clear_members_gone(ls); 810 dlm_clear_members_gone(ls);
832 kfree(ls->ls_node_array); 811 kfree(ls->ls_node_array);