diff options
-rw-r--r-- | fs/dlm/Kconfig | 2 | ||||
-rw-r--r-- | fs/dlm/dlm_internal.h | 1 | ||||
-rw-r--r-- | fs/dlm/lock.c | 16 | ||||
-rw-r--r-- | fs/dlm/lowcomms.c | 5 | ||||
-rw-r--r-- | fs/dlm/recover.c | 37 |
5 files changed, 47 insertions, 14 deletions
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig index 1897eb1b4b6a..e4242c3f8486 100644 --- a/fs/dlm/Kconfig +++ b/fs/dlm/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | menuconfig DLM | 1 | menuconfig DLM |
2 | tristate "Distributed Lock Manager (DLM)" | 2 | tristate "Distributed Lock Manager (DLM)" |
3 | depends on EXPERIMENTAL && INET | 3 | depends on INET |
4 | depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n) | 4 | depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n) |
5 | select IP_SCTP | 5 | select IP_SCTP |
6 | help | 6 | help |
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 871c1abf6029..77c0f70f8fe8 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h | |||
@@ -337,6 +337,7 @@ enum rsb_flags { | |||
337 | RSB_NEW_MASTER2, | 337 | RSB_NEW_MASTER2, |
338 | RSB_RECOVER_CONVERT, | 338 | RSB_RECOVER_CONVERT, |
339 | RSB_RECOVER_GRANT, | 339 | RSB_RECOVER_GRANT, |
340 | RSB_RECOVER_LVB_INVAL, | ||
340 | }; | 341 | }; |
341 | 342 | ||
342 | static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) | 343 | static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) |
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index b56950758188..a579f30f237d 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c | |||
@@ -5393,6 +5393,13 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, | |||
5393 | if ((lkb->lkb_nodeid == nodeid_gone) || | 5393 | if ((lkb->lkb_nodeid == nodeid_gone) || |
5394 | dlm_is_removed(ls, lkb->lkb_nodeid)) { | 5394 | dlm_is_removed(ls, lkb->lkb_nodeid)) { |
5395 | 5395 | ||
5396 | /* tell recover_lvb to invalidate the lvb | ||
5397 | because a node holding EX/PW failed */ | ||
5398 | if ((lkb->lkb_exflags & DLM_LKF_VALBLK) && | ||
5399 | (lkb->lkb_grmode >= DLM_LOCK_PW)) { | ||
5400 | rsb_set_flag(r, RSB_RECOVER_LVB_INVAL); | ||
5401 | } | ||
5402 | |||
5396 | del_lkb(r, lkb); | 5403 | del_lkb(r, lkb); |
5397 | 5404 | ||
5398 | /* this put should free the lkb */ | 5405 | /* this put should free the lkb */ |
@@ -6025,15 +6032,18 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) | |||
6025 | return error; | 6032 | return error; |
6026 | } | 6033 | } |
6027 | 6034 | ||
6028 | /* The force flag allows the unlock to go ahead even if the lkb isn't granted. | 6035 | /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't |
6029 | Regardless of what rsb queue the lock is on, it's removed and freed. */ | 6036 | granted. Regardless of what rsb queue the lock is on, it's removed and |
6037 | freed. The IVVALBLK flag causes the lvb on the resource to be invalidated | ||
6038 | if our lock is PW/EX (it's ignored if our granted mode is smaller.) */ | ||
6030 | 6039 | ||
6031 | static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) | 6040 | static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) |
6032 | { | 6041 | { |
6033 | struct dlm_args args; | 6042 | struct dlm_args args; |
6034 | int error; | 6043 | int error; |
6035 | 6044 | ||
6036 | set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args); | 6045 | set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK, |
6046 | lkb->lkb_ua, &args); | ||
6037 | 6047 | ||
6038 | error = unlock_lock(ls, lkb, &args); | 6048 | error = unlock_lock(ls, lkb, &args); |
6039 | if (error == -DLM_EUNLOCK) | 6049 | if (error == -DLM_EUNLOCK) |
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 331ea4f94efd..dd87a31bcc21 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c | |||
@@ -1385,7 +1385,6 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) | |||
1385 | struct connection *con; | 1385 | struct connection *con; |
1386 | struct writequeue_entry *e; | 1386 | struct writequeue_entry *e; |
1387 | int offset = 0; | 1387 | int offset = 0; |
1388 | int users = 0; | ||
1389 | 1388 | ||
1390 | con = nodeid2con(nodeid, allocation); | 1389 | con = nodeid2con(nodeid, allocation); |
1391 | if (!con) | 1390 | if (!con) |
@@ -1399,7 +1398,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) | |||
1399 | } else { | 1398 | } else { |
1400 | offset = e->end; | 1399 | offset = e->end; |
1401 | e->end += len; | 1400 | e->end += len; |
1402 | users = e->users++; | 1401 | e->users++; |
1403 | } | 1402 | } |
1404 | spin_unlock(&con->writequeue_lock); | 1403 | spin_unlock(&con->writequeue_lock); |
1405 | 1404 | ||
@@ -1414,7 +1413,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) | |||
1414 | spin_lock(&con->writequeue_lock); | 1413 | spin_lock(&con->writequeue_lock); |
1415 | offset = e->end; | 1414 | offset = e->end; |
1416 | e->end += len; | 1415 | e->end += len; |
1417 | users = e->users++; | 1416 | e->users++; |
1418 | list_add_tail(&e->list, &con->writequeue); | 1417 | list_add_tail(&e->list, &con->writequeue); |
1419 | spin_unlock(&con->writequeue_lock); | 1418 | spin_unlock(&con->writequeue_lock); |
1420 | goto got_one; | 1419 | goto got_one; |
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 4a7a76e42fc3..aedea28a86a1 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c | |||
@@ -717,8 +717,14 @@ void dlm_recovered_lock(struct dlm_rsb *r) | |||
717 | * the VALNOTVALID flag if necessary, and determining the correct lvb contents | 717 | * the VALNOTVALID flag if necessary, and determining the correct lvb contents |
718 | * based on the lvb's of the locks held on the rsb. | 718 | * based on the lvb's of the locks held on the rsb. |
719 | * | 719 | * |
720 | * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it | 720 | * RSB_VALNOTVALID is set in two cases: |
721 | * was already set prior to recovery, it's not cleared, regardless of locks. | 721 | * |
722 | * 1. we are master, but not new, and we purged an EX/PW lock held by a | ||
723 | * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL) | ||
724 | * | ||
725 | * 2. we are a new master, and there are only NL/CR locks left. | ||
726 | * (We could probably improve this by only invaliding in this way when | ||
727 | * the previous master left uncleanly. VMS docs mention that.) | ||
722 | * | 728 | * |
723 | * The LVB contents are only considered for changing when this is a new master | 729 | * The LVB contents are only considered for changing when this is a new master |
724 | * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with | 730 | * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with |
@@ -734,6 +740,19 @@ static void recover_lvb(struct dlm_rsb *r) | |||
734 | int big_lock_exists = 0; | 740 | int big_lock_exists = 0; |
735 | int lvblen = r->res_ls->ls_lvblen; | 741 | int lvblen = r->res_ls->ls_lvblen; |
736 | 742 | ||
743 | if (!rsb_flag(r, RSB_NEW_MASTER2) && | ||
744 | rsb_flag(r, RSB_RECOVER_LVB_INVAL)) { | ||
745 | /* case 1 above */ | ||
746 | rsb_set_flag(r, RSB_VALNOTVALID); | ||
747 | return; | ||
748 | } | ||
749 | |||
750 | if (!rsb_flag(r, RSB_NEW_MASTER2)) | ||
751 | return; | ||
752 | |||
753 | /* we are the new master, so figure out if VALNOTVALID should | ||
754 | be set, and set the rsb lvb from the best lkb available. */ | ||
755 | |||
737 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { | 756 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { |
738 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) | 757 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) |
739 | continue; | 758 | continue; |
@@ -772,13 +791,10 @@ static void recover_lvb(struct dlm_rsb *r) | |||
772 | if (!lock_lvb_exists) | 791 | if (!lock_lvb_exists) |
773 | goto out; | 792 | goto out; |
774 | 793 | ||
794 | /* lvb is invalidated if only NL/CR locks remain */ | ||
775 | if (!big_lock_exists) | 795 | if (!big_lock_exists) |
776 | rsb_set_flag(r, RSB_VALNOTVALID); | 796 | rsb_set_flag(r, RSB_VALNOTVALID); |
777 | 797 | ||
778 | /* don't mess with the lvb unless we're the new master */ | ||
779 | if (!rsb_flag(r, RSB_NEW_MASTER2)) | ||
780 | goto out; | ||
781 | |||
782 | if (!r->res_lvbptr) { | 798 | if (!r->res_lvbptr) { |
783 | r->res_lvbptr = dlm_allocate_lvb(r->res_ls); | 799 | r->res_lvbptr = dlm_allocate_lvb(r->res_ls); |
784 | if (!r->res_lvbptr) | 800 | if (!r->res_lvbptr) |
@@ -852,12 +868,19 @@ void dlm_recover_rsbs(struct dlm_ls *ls) | |||
852 | if (is_master(r)) { | 868 | if (is_master(r)) { |
853 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) | 869 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) |
854 | recover_conversion(r); | 870 | recover_conversion(r); |
871 | |||
872 | /* recover lvb before granting locks so the updated | ||
873 | lvb/VALNOTVALID is presented in the completion */ | ||
874 | recover_lvb(r); | ||
875 | |||
855 | if (rsb_flag(r, RSB_NEW_MASTER2)) | 876 | if (rsb_flag(r, RSB_NEW_MASTER2)) |
856 | recover_grant(r); | 877 | recover_grant(r); |
857 | recover_lvb(r); | ||
858 | count++; | 878 | count++; |
879 | } else { | ||
880 | rsb_clear_flag(r, RSB_VALNOTVALID); | ||
859 | } | 881 | } |
860 | rsb_clear_flag(r, RSB_RECOVER_CONVERT); | 882 | rsb_clear_flag(r, RSB_RECOVER_CONVERT); |
883 | rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL); | ||
861 | rsb_clear_flag(r, RSB_NEW_MASTER2); | 884 | rsb_clear_flag(r, RSB_NEW_MASTER2); |
862 | unlock_rsb(r); | 885 | unlock_rsb(r); |
863 | } | 886 | } |