aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2011-07-06 18:00:54 -0400
committerDavid Teigland <teigland@redhat.com>2011-07-11 09:43:45 -0400
commit3d6aa675fff9eee5a6339d67b355b63a6d69565f (patch)
treef401792f4e92f2473d361bfb185c517838ab2032 /fs/dlm
parenta22ca4806822154c163c6f220f4c2a05adf96fc7 (diff)
dlm: keep lkbs in idr
This is simpler and quicker than the hash table, and avoids needing to search the hash list for every new lkid to check if it's used. Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/config.c7
-rw-r--r--fs/dlm/config.h1
-rw-r--r--fs/dlm/dlm_internal.h14
-rw-r--r--fs/dlm/lock.c69
-rw-r--r--fs/dlm/lockspace.c116
5 files changed, 82 insertions, 125 deletions
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index ad3b5a8535d0..4e20f9317156 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -94,7 +94,6 @@ struct dlm_cluster {
94 unsigned int cl_tcp_port; 94 unsigned int cl_tcp_port;
95 unsigned int cl_buffer_size; 95 unsigned int cl_buffer_size;
96 unsigned int cl_rsbtbl_size; 96 unsigned int cl_rsbtbl_size;
97 unsigned int cl_lkbtbl_size;
98 unsigned int cl_dirtbl_size; 97 unsigned int cl_dirtbl_size;
99 unsigned int cl_recover_timer; 98 unsigned int cl_recover_timer;
100 unsigned int cl_toss_secs; 99 unsigned int cl_toss_secs;
@@ -109,7 +108,6 @@ enum {
109 CLUSTER_ATTR_TCP_PORT = 0, 108 CLUSTER_ATTR_TCP_PORT = 0,
110 CLUSTER_ATTR_BUFFER_SIZE, 109 CLUSTER_ATTR_BUFFER_SIZE,
111 CLUSTER_ATTR_RSBTBL_SIZE, 110 CLUSTER_ATTR_RSBTBL_SIZE,
112 CLUSTER_ATTR_LKBTBL_SIZE,
113 CLUSTER_ATTR_DIRTBL_SIZE, 111 CLUSTER_ATTR_DIRTBL_SIZE,
114 CLUSTER_ATTR_RECOVER_TIMER, 112 CLUSTER_ATTR_RECOVER_TIMER,
115 CLUSTER_ATTR_TOSS_SECS, 113 CLUSTER_ATTR_TOSS_SECS,
@@ -162,7 +160,6 @@ __CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
162CLUSTER_ATTR(tcp_port, 1); 160CLUSTER_ATTR(tcp_port, 1);
163CLUSTER_ATTR(buffer_size, 1); 161CLUSTER_ATTR(buffer_size, 1);
164CLUSTER_ATTR(rsbtbl_size, 1); 162CLUSTER_ATTR(rsbtbl_size, 1);
165CLUSTER_ATTR(lkbtbl_size, 1);
166CLUSTER_ATTR(dirtbl_size, 1); 163CLUSTER_ATTR(dirtbl_size, 1);
167CLUSTER_ATTR(recover_timer, 1); 164CLUSTER_ATTR(recover_timer, 1);
168CLUSTER_ATTR(toss_secs, 1); 165CLUSTER_ATTR(toss_secs, 1);
@@ -176,7 +173,6 @@ static struct configfs_attribute *cluster_attrs[] = {
176 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, 173 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
177 [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr, 174 [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
178 [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr, 175 [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
179 [CLUSTER_ATTR_LKBTBL_SIZE] = &cluster_attr_lkbtbl_size.attr,
180 [CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr, 176 [CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr,
181 [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr, 177 [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
182 [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr, 178 [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
@@ -446,7 +442,6 @@ static struct config_group *make_cluster(struct config_group *g,
446 cl->cl_tcp_port = dlm_config.ci_tcp_port; 442 cl->cl_tcp_port = dlm_config.ci_tcp_port;
447 cl->cl_buffer_size = dlm_config.ci_buffer_size; 443 cl->cl_buffer_size = dlm_config.ci_buffer_size;
448 cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size; 444 cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
449 cl->cl_lkbtbl_size = dlm_config.ci_lkbtbl_size;
450 cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size; 445 cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size;
451 cl->cl_recover_timer = dlm_config.ci_recover_timer; 446 cl->cl_recover_timer = dlm_config.ci_recover_timer;
452 cl->cl_toss_secs = dlm_config.ci_toss_secs; 447 cl->cl_toss_secs = dlm_config.ci_toss_secs;
@@ -1038,7 +1033,6 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
1038#define DEFAULT_TCP_PORT 21064 1033#define DEFAULT_TCP_PORT 21064
1039#define DEFAULT_BUFFER_SIZE 4096 1034#define DEFAULT_BUFFER_SIZE 4096
1040#define DEFAULT_RSBTBL_SIZE 1024 1035#define DEFAULT_RSBTBL_SIZE 1024
1041#define DEFAULT_LKBTBL_SIZE 1024
1042#define DEFAULT_DIRTBL_SIZE 1024 1036#define DEFAULT_DIRTBL_SIZE 1024
1043#define DEFAULT_RECOVER_TIMER 5 1037#define DEFAULT_RECOVER_TIMER 5
1044#define DEFAULT_TOSS_SECS 10 1038#define DEFAULT_TOSS_SECS 10
@@ -1052,7 +1046,6 @@ struct dlm_config_info dlm_config = {
1052 .ci_tcp_port = DEFAULT_TCP_PORT, 1046 .ci_tcp_port = DEFAULT_TCP_PORT,
1053 .ci_buffer_size = DEFAULT_BUFFER_SIZE, 1047 .ci_buffer_size = DEFAULT_BUFFER_SIZE,
1054 .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE, 1048 .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
1055 .ci_lkbtbl_size = DEFAULT_LKBTBL_SIZE,
1056 .ci_dirtbl_size = DEFAULT_DIRTBL_SIZE, 1049 .ci_dirtbl_size = DEFAULT_DIRTBL_SIZE,
1057 .ci_recover_timer = DEFAULT_RECOVER_TIMER, 1050 .ci_recover_timer = DEFAULT_RECOVER_TIMER,
1058 .ci_toss_secs = DEFAULT_TOSS_SECS, 1051 .ci_toss_secs = DEFAULT_TOSS_SECS,
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index dd0ce24d5a80..260574463d29 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -20,7 +20,6 @@ struct dlm_config_info {
20 int ci_tcp_port; 20 int ci_tcp_port;
21 int ci_buffer_size; 21 int ci_buffer_size;
22 int ci_rsbtbl_size; 22 int ci_rsbtbl_size;
23 int ci_lkbtbl_size;
24 int ci_dirtbl_size; 23 int ci_dirtbl_size;
25 int ci_recover_timer; 24 int ci_recover_timer;
26 int ci_toss_secs; 25 int ci_toss_secs;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 0262451eb9c6..23a234bddc60 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -37,6 +37,7 @@
37#include <linux/jhash.h> 37#include <linux/jhash.h>
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/idr.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41 42
42#include <linux/dlm.h> 43#include <linux/dlm.h>
@@ -52,7 +53,6 @@ struct dlm_ls;
52struct dlm_lkb; 53struct dlm_lkb;
53struct dlm_rsb; 54struct dlm_rsb;
54struct dlm_member; 55struct dlm_member;
55struct dlm_lkbtable;
56struct dlm_rsbtable; 56struct dlm_rsbtable;
57struct dlm_dirtable; 57struct dlm_dirtable;
58struct dlm_direntry; 58struct dlm_direntry;
@@ -108,11 +108,6 @@ struct dlm_rsbtable {
108 spinlock_t lock; 108 spinlock_t lock;
109}; 109};
110 110
111struct dlm_lkbtable {
112 struct list_head list;
113 rwlock_t lock;
114 uint16_t counter;
115};
116 111
117/* 112/*
118 * Lockspace member (per node in a ls) 113 * Lockspace member (per node in a ls)
@@ -248,7 +243,6 @@ struct dlm_lkb {
248 int8_t lkb_wait_count; 243 int8_t lkb_wait_count;
249 int lkb_wait_nodeid; /* for debugging */ 244 int lkb_wait_nodeid; /* for debugging */
250 245
251 struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
252 struct list_head lkb_statequeue; /* rsb g/c/w list */ 246 struct list_head lkb_statequeue; /* rsb g/c/w list */
253 struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */ 247 struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
254 struct list_head lkb_wait_reply; /* waiting for remote reply */ 248 struct list_head lkb_wait_reply; /* waiting for remote reply */
@@ -465,12 +459,12 @@ struct dlm_ls {
465 unsigned long ls_scan_time; 459 unsigned long ls_scan_time;
466 struct kobject ls_kobj; 460 struct kobject ls_kobj;
467 461
462 struct idr ls_lkbidr;
463 spinlock_t ls_lkbidr_spin;
464
468 struct dlm_rsbtable *ls_rsbtbl; 465 struct dlm_rsbtable *ls_rsbtbl;
469 uint32_t ls_rsbtbl_size; 466 uint32_t ls_rsbtbl_size;
470 467
471 struct dlm_lkbtable *ls_lkbtbl;
472 uint32_t ls_lkbtbl_size;
473
474 struct dlm_dirtable *ls_dirtbl; 468 struct dlm_dirtable *ls_dirtbl;
475 uint32_t ls_dirtbl_size; 469 uint32_t ls_dirtbl_size;
476 470
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 3c723489079a..784cde417ced 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -580,9 +580,8 @@ static void detach_lkb(struct dlm_lkb *lkb)
580 580
581static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) 581static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
582{ 582{
583 struct dlm_lkb *lkb, *tmp; 583 struct dlm_lkb *lkb;
584 uint32_t lkid = 0; 584 int rv, id;
585 uint16_t bucket;
586 585
587 lkb = dlm_allocate_lkb(ls); 586 lkb = dlm_allocate_lkb(ls);
588 if (!lkb) 587 if (!lkb)
@@ -596,58 +595,38 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
596 INIT_LIST_HEAD(&lkb->lkb_time_list); 595 INIT_LIST_HEAD(&lkb->lkb_time_list);
597 INIT_LIST_HEAD(&lkb->lkb_astqueue); 596 INIT_LIST_HEAD(&lkb->lkb_astqueue);
598 597
599 get_random_bytes(&bucket, sizeof(bucket)); 598 retry:
600 bucket &= (ls->ls_lkbtbl_size - 1); 599 rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
601 600 if (!rv)
602 write_lock(&ls->ls_lkbtbl[bucket].lock); 601 return -ENOMEM;
603 602
604 /* counter can roll over so we must verify lkid is not in use */ 603 spin_lock(&ls->ls_lkbidr_spin);
604 rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
605 if (!rv)
606 lkb->lkb_id = id;
607 spin_unlock(&ls->ls_lkbidr_spin);
605 608
606 while (lkid == 0) { 609 if (rv == -EAGAIN)
607 lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++; 610 goto retry;
608 611
609 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list, 612 if (rv < 0) {
610 lkb_idtbl_list) { 613 log_error(ls, "create_lkb idr error %d", rv);
611 if (tmp->lkb_id != lkid) 614 return rv;
612 continue;
613 lkid = 0;
614 break;
615 }
616 } 615 }
617 616
618 lkb->lkb_id = lkid;
619 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
620 write_unlock(&ls->ls_lkbtbl[bucket].lock);
621
622 *lkb_ret = lkb; 617 *lkb_ret = lkb;
623 return 0; 618 return 0;
624} 619}
625 620
626static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
627{
628 struct dlm_lkb *lkb;
629 uint16_t bucket = (lkid >> 16);
630
631 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
632 if (lkb->lkb_id == lkid)
633 return lkb;
634 }
635 return NULL;
636}
637
638static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) 621static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
639{ 622{
640 struct dlm_lkb *lkb; 623 struct dlm_lkb *lkb;
641 uint16_t bucket = (lkid >> 16);
642
643 if (bucket >= ls->ls_lkbtbl_size)
644 return -EBADSLT;
645 624
646 read_lock(&ls->ls_lkbtbl[bucket].lock); 625 spin_lock(&ls->ls_lkbidr_spin);
647 lkb = __find_lkb(ls, lkid); 626 lkb = idr_find(&ls->ls_lkbidr, lkid);
648 if (lkb) 627 if (lkb)
649 kref_get(&lkb->lkb_ref); 628 kref_get(&lkb->lkb_ref);
650 read_unlock(&ls->ls_lkbtbl[bucket].lock); 629 spin_unlock(&ls->ls_lkbidr_spin);
651 630
652 *lkb_ret = lkb; 631 *lkb_ret = lkb;
653 return lkb ? 0 : -ENOENT; 632 return lkb ? 0 : -ENOENT;
@@ -668,12 +647,12 @@ static void kill_lkb(struct kref *kref)
668 647
669static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) 648static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
670{ 649{
671 uint16_t bucket = (lkb->lkb_id >> 16); 650 uint32_t lkid = lkb->lkb_id;
672 651
673 write_lock(&ls->ls_lkbtbl[bucket].lock); 652 spin_lock(&ls->ls_lkbidr_spin);
674 if (kref_put(&lkb->lkb_ref, kill_lkb)) { 653 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
675 list_del(&lkb->lkb_idtbl_list); 654 idr_remove(&ls->ls_lkbidr, lkid);
676 write_unlock(&ls->ls_lkbtbl[bucket].lock); 655 spin_unlock(&ls->ls_lkbidr_spin);
677 656
678 detach_lkb(lkb); 657 detach_lkb(lkb);
679 658
@@ -683,7 +662,7 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
683 dlm_free_lkb(lkb); 662 dlm_free_lkb(lkb);
684 return 1; 663 return 1;
685 } else { 664 } else {
686 write_unlock(&ls->ls_lkbtbl[bucket].lock); 665 spin_unlock(&ls->ls_lkbidr_spin);
687 return 0; 666 return 0;
688 } 667 }
689} 668}
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 493d1e7161a4..871fe6deb5fa 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -472,17 +472,8 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
472 spin_lock_init(&ls->ls_rsbtbl[i].lock); 472 spin_lock_init(&ls->ls_rsbtbl[i].lock);
473 } 473 }
474 474
475 size = dlm_config.ci_lkbtbl_size; 475 idr_init(&ls->ls_lkbidr);
476 ls->ls_lkbtbl_size = size; 476 spin_lock_init(&ls->ls_lkbidr_spin);
477
478 ls->ls_lkbtbl = vmalloc(sizeof(struct dlm_lkbtable) * size);
479 if (!ls->ls_lkbtbl)
480 goto out_rsbfree;
481 for (i = 0; i < size; i++) {
482 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
483 rwlock_init(&ls->ls_lkbtbl[i].lock);
484 ls->ls_lkbtbl[i].counter = 1;
485 }
486 477
487 size = dlm_config.ci_dirtbl_size; 478 size = dlm_config.ci_dirtbl_size;
488 ls->ls_dirtbl_size = size; 479 ls->ls_dirtbl_size = size;
@@ -605,8 +596,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
605 out_dirfree: 596 out_dirfree:
606 vfree(ls->ls_dirtbl); 597 vfree(ls->ls_dirtbl);
607 out_lkbfree: 598 out_lkbfree:
608 vfree(ls->ls_lkbtbl); 599 idr_destroy(&ls->ls_lkbidr);
609 out_rsbfree:
610 vfree(ls->ls_rsbtbl); 600 vfree(ls->ls_rsbtbl);
611 out_lsfree: 601 out_lsfree:
612 if (do_unreg) 602 if (do_unreg)
@@ -641,50 +631,66 @@ int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
641 return error; 631 return error;
642} 632}
643 633
644/* Return 1 if the lockspace still has active remote locks, 634static int lkb_idr_is_local(int id, void *p, void *data)
645 * 2 if the lockspace still has active local locks. 635{
646 */ 636 struct dlm_lkb *lkb = p;
647static int lockspace_busy(struct dlm_ls *ls) 637
648{ 638 if (!lkb->lkb_nodeid)
649 int i, lkb_found = 0; 639 return 1;
650 struct dlm_lkb *lkb; 640 return 0;
651 641}
652 /* NOTE: We check the lockidtbl here rather than the resource table. 642
653 This is because there may be LKBs queued as ASTs that have been 643static int lkb_idr_is_any(int id, void *p, void *data)
654 unlinked from their RSBs and are pending deletion once the AST has 644{
655 been delivered */ 645 return 1;
656 646}
657 for (i = 0; i < ls->ls_lkbtbl_size; i++) { 647
658 read_lock(&ls->ls_lkbtbl[i].lock); 648static int lkb_idr_free(int id, void *p, void *data)
659 if (!list_empty(&ls->ls_lkbtbl[i].list)) { 649{
660 lkb_found = 1; 650 struct dlm_lkb *lkb = p;
661 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list, 651
662 lkb_idtbl_list) { 652 dlm_del_ast(lkb);
663 if (!lkb->lkb_nodeid) { 653
664 read_unlock(&ls->ls_lkbtbl[i].lock); 654 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
665 return 2; 655 dlm_free_lvb(lkb->lkb_lvbptr);
666 } 656
667 } 657 dlm_free_lkb(lkb);
668 } 658 return 0;
669 read_unlock(&ls->ls_lkbtbl[i].lock); 659}
660
661/* NOTE: We check the lkbidr here rather than the resource table.
662 This is because there may be LKBs queued as ASTs that have been unlinked
663 from their RSBs and are pending deletion once the AST has been delivered */
664
665static int lockspace_busy(struct dlm_ls *ls, int force)
666{
667 int rv;
668
669 spin_lock(&ls->ls_lkbidr_spin);
670 if (force == 0) {
671 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
672 } else if (force == 1) {
673 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
674 } else {
675 rv = 0;
670 } 676 }
671 return lkb_found; 677 spin_unlock(&ls->ls_lkbidr_spin);
678 return rv;
672} 679}
673 680
674static int release_lockspace(struct dlm_ls *ls, int force) 681static int release_lockspace(struct dlm_ls *ls, int force)
675{ 682{
676 struct dlm_lkb *lkb;
677 struct dlm_rsb *rsb; 683 struct dlm_rsb *rsb;
678 struct list_head *head; 684 struct list_head *head;
679 int i, busy, rv; 685 int i, busy, rv;
680 686
681 busy = lockspace_busy(ls); 687 busy = lockspace_busy(ls, force);
682 688
683 spin_lock(&lslist_lock); 689 spin_lock(&lslist_lock);
684 if (ls->ls_create_count == 1) { 690 if (ls->ls_create_count == 1) {
685 if (busy > force) 691 if (busy) {
686 rv = -EBUSY; 692 rv = -EBUSY;
687 else { 693 } else {
688 /* remove_lockspace takes ls off lslist */ 694 /* remove_lockspace takes ls off lslist */
689 ls->ls_create_count = 0; 695 ls->ls_create_count = 0;
690 rv = 0; 696 rv = 0;
@@ -724,29 +730,15 @@ static int release_lockspace(struct dlm_ls *ls, int force)
724 vfree(ls->ls_dirtbl); 730 vfree(ls->ls_dirtbl);
725 731
726 /* 732 /*
727 * Free all lkb's on lkbtbl[] lists. 733 * Free all lkb's in idr
728 */ 734 */
729 735
730 for (i = 0; i < ls->ls_lkbtbl_size; i++) { 736 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
731 head = &ls->ls_lkbtbl[i].list; 737 idr_remove_all(&ls->ls_lkbidr);
732 while (!list_empty(head)) { 738 idr_destroy(&ls->ls_lkbidr);
733 lkb = list_entry(head->next, struct dlm_lkb,
734 lkb_idtbl_list);
735
736 list_del(&lkb->lkb_idtbl_list);
737
738 dlm_del_ast(lkb);
739 739
740 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
741 dlm_free_lvb(lkb->lkb_lvbptr);
742
743 dlm_free_lkb(lkb);
744 }
745 }
746 dlm_astd_resume(); 740 dlm_astd_resume();
747 741
748 vfree(ls->ls_lkbtbl);
749
750 /* 742 /*
751 * Free all rsb's on rsbtbl[] lists 743 * Free all rsb's on rsbtbl[] lists
752 */ 744 */