aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm/lockspace.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dlm/lockspace.c')
-rw-r--r--fs/dlm/lockspace.c60
1 files changed, 34 insertions, 26 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index ca506abbdd3b..2e99fb0c9737 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -506,20 +506,18 @@ static int new_lockspace(const char *name, const char *cluster,
506 spin_lock_init(&ls->ls_rsbtbl[i].lock); 506 spin_lock_init(&ls->ls_rsbtbl[i].lock);
507 } 507 }
508 508
509 idr_init(&ls->ls_lkbidr); 509 spin_lock_init(&ls->ls_remove_spin);
510 spin_lock_init(&ls->ls_lkbidr_spin);
511
512 size = dlm_config.ci_dirtbl_size;
513 ls->ls_dirtbl_size = size;
514 510
515 ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size); 511 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
516 if (!ls->ls_dirtbl) 512 ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
517 goto out_lkbfree; 513 GFP_KERNEL);
518 for (i = 0; i < size; i++) { 514 if (!ls->ls_remove_names[i])
519 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list); 515 goto out_rsbtbl;
520 spin_lock_init(&ls->ls_dirtbl[i].lock);
521 } 516 }
522 517
518 idr_init(&ls->ls_lkbidr);
519 spin_lock_init(&ls->ls_lkbidr_spin);
520
523 INIT_LIST_HEAD(&ls->ls_waiters); 521 INIT_LIST_HEAD(&ls->ls_waiters);
524 mutex_init(&ls->ls_waiters_mutex); 522 mutex_init(&ls->ls_waiters_mutex);
525 INIT_LIST_HEAD(&ls->ls_orphans); 523 INIT_LIST_HEAD(&ls->ls_orphans);
@@ -567,7 +565,7 @@ static int new_lockspace(const char *name, const char *cluster,
567 565
568 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); 566 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
569 if (!ls->ls_recover_buf) 567 if (!ls->ls_recover_buf)
570 goto out_dirfree; 568 goto out_lkbidr;
571 569
572 ls->ls_slot = 0; 570 ls->ls_slot = 0;
573 ls->ls_num_slots = 0; 571 ls->ls_num_slots = 0;
@@ -576,14 +574,14 @@ static int new_lockspace(const char *name, const char *cluster,
576 574
577 INIT_LIST_HEAD(&ls->ls_recover_list); 575 INIT_LIST_HEAD(&ls->ls_recover_list);
578 spin_lock_init(&ls->ls_recover_list_lock); 576 spin_lock_init(&ls->ls_recover_list_lock);
577 idr_init(&ls->ls_recover_idr);
578 spin_lock_init(&ls->ls_recover_idr_lock);
579 ls->ls_recover_list_count = 0; 579 ls->ls_recover_list_count = 0;
580 ls->ls_local_handle = ls; 580 ls->ls_local_handle = ls;
581 init_waitqueue_head(&ls->ls_wait_general); 581 init_waitqueue_head(&ls->ls_wait_general);
582 INIT_LIST_HEAD(&ls->ls_root_list); 582 INIT_LIST_HEAD(&ls->ls_root_list);
583 init_rwsem(&ls->ls_root_sem); 583 init_rwsem(&ls->ls_root_sem);
584 584
585 down_write(&ls->ls_in_recovery);
586
587 spin_lock(&lslist_lock); 585 spin_lock(&lslist_lock);
588 ls->ls_create_count = 1; 586 ls->ls_create_count = 1;
589 list_add(&ls->ls_list, &lslist); 587 list_add(&ls->ls_list, &lslist);
@@ -597,13 +595,24 @@ static int new_lockspace(const char *name, const char *cluster,
597 } 595 }
598 } 596 }
599 597
600 /* needs to find ls in lslist */ 598 init_waitqueue_head(&ls->ls_recover_lock_wait);
599
600 /*
601 * Once started, dlm_recoverd first looks for ls in lslist, then
602 * initializes ls_in_recovery as locked in "down" mode. We need
603 * to wait for the wakeup from dlm_recoverd because in_recovery
604 * has to start out in down mode.
605 */
606
601 error = dlm_recoverd_start(ls); 607 error = dlm_recoverd_start(ls);
602 if (error) { 608 if (error) {
603 log_error(ls, "can't start dlm_recoverd %d", error); 609 log_error(ls, "can't start dlm_recoverd %d", error);
604 goto out_callback; 610 goto out_callback;
605 } 611 }
606 612
613 wait_event(ls->ls_recover_lock_wait,
614 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
615
607 ls->ls_kobj.kset = dlm_kset; 616 ls->ls_kobj.kset = dlm_kset;
608 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, 617 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
609 "%s", ls->ls_name); 618 "%s", ls->ls_name);
@@ -647,11 +656,15 @@ static int new_lockspace(const char *name, const char *cluster,
647 spin_lock(&lslist_lock); 656 spin_lock(&lslist_lock);
648 list_del(&ls->ls_list); 657 list_del(&ls->ls_list);
649 spin_unlock(&lslist_lock); 658 spin_unlock(&lslist_lock);
659 idr_destroy(&ls->ls_recover_idr);
650 kfree(ls->ls_recover_buf); 660 kfree(ls->ls_recover_buf);
651 out_dirfree: 661 out_lkbidr:
652 vfree(ls->ls_dirtbl);
653 out_lkbfree:
654 idr_destroy(&ls->ls_lkbidr); 662 idr_destroy(&ls->ls_lkbidr);
663 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
664 if (ls->ls_remove_names[i])
665 kfree(ls->ls_remove_names[i]);
666 }
667 out_rsbtbl:
655 vfree(ls->ls_rsbtbl); 668 vfree(ls->ls_rsbtbl);
656 out_lsfree: 669 out_lsfree:
657 if (do_unreg) 670 if (do_unreg)
@@ -779,13 +792,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
779 kfree(ls->ls_recover_buf); 792 kfree(ls->ls_recover_buf);
780 793
781 /* 794 /*
782 * Free direntry structs.
783 */
784
785 dlm_dir_clear(ls);
786 vfree(ls->ls_dirtbl);
787
788 /*
789 * Free all lkb's in idr 795 * Free all lkb's in idr
790 */ 796 */
791 797
@@ -813,6 +819,9 @@ static int release_lockspace(struct dlm_ls *ls, int force)
813 819
814 vfree(ls->ls_rsbtbl); 820 vfree(ls->ls_rsbtbl);
815 821
822 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
823 kfree(ls->ls_remove_names[i]);
824
816 while (!list_empty(&ls->ls_new_rsb)) { 825 while (!list_empty(&ls->ls_new_rsb)) {
817 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, 826 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
818 res_hashchain); 827 res_hashchain);
@@ -826,7 +835,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
826 835
827 dlm_purge_requestqueue(ls); 836 dlm_purge_requestqueue(ls);
828 kfree(ls->ls_recover_args); 837 kfree(ls->ls_recover_args);
829 dlm_clear_free_entries(ls);
830 dlm_clear_members(ls); 838 dlm_clear_members(ls);
831 dlm_clear_members_gone(ls); 839 dlm_clear_members_gone(ls);
832 kfree(ls->ls_node_array); 840 kfree(ls->ls_node_array);