diff options
Diffstat (limited to 'fs/ocfs2/dlm/dlmmaster.c')
| -rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 175 |
1 files changed, 88 insertions, 87 deletions
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 11eefb8c12e9..005261c333b0 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
| @@ -631,39 +631,54 @@ error: | |||
| 631 | return NULL; | 631 | return NULL; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | 634 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
| 635 | struct dlm_lock_resource *res, | 635 | struct dlm_lock_resource *res, int bit) |
| 636 | int new_lockres, | ||
| 637 | const char *file, | ||
| 638 | int line) | ||
| 639 | { | 636 | { |
| 640 | if (!new_lockres) | 637 | assert_spin_locked(&res->spinlock); |
| 641 | assert_spin_locked(&res->spinlock); | 638 | |
| 639 | mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, | ||
| 640 | res->lockname.name, bit, __builtin_return_address(0)); | ||
| 641 | |||
| 642 | set_bit(bit, res->refmap); | ||
| 643 | } | ||
| 644 | |||
| 645 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | ||
| 646 | struct dlm_lock_resource *res, int bit) | ||
| 647 | { | ||
| 648 | assert_spin_locked(&res->spinlock); | ||
| 649 | |||
| 650 | mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, | ||
| 651 | res->lockname.name, bit, __builtin_return_address(0)); | ||
| 652 | |||
| 653 | clear_bit(bit, res->refmap); | ||
| 654 | } | ||
| 655 | |||
| 656 | |||
| 657 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
| 658 | struct dlm_lock_resource *res) | ||
| 659 | { | ||
| 660 | assert_spin_locked(&res->spinlock); | ||
| 642 | 661 | ||
| 643 | if (!test_bit(dlm->node_num, res->refmap)) { | ||
| 644 | BUG_ON(res->inflight_locks != 0); | ||
| 645 | dlm_lockres_set_refmap_bit(dlm->node_num, res); | ||
| 646 | } | ||
| 647 | res->inflight_locks++; | 662 | res->inflight_locks++; |
| 648 | mlog(0, "%s:%.*s: inflight++: now %u\n", | 663 | |
| 649 | dlm->name, res->lockname.len, res->lockname.name, | 664 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
| 650 | res->inflight_locks); | 665 | res->lockname.len, res->lockname.name, res->inflight_locks, |
| 666 | __builtin_return_address(0)); | ||
| 651 | } | 667 | } |
| 652 | 668 | ||
| 653 | void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | 669 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
| 654 | struct dlm_lock_resource *res, | 670 | struct dlm_lock_resource *res) |
| 655 | const char *file, | ||
| 656 | int line) | ||
| 657 | { | 671 | { |
| 658 | assert_spin_locked(&res->spinlock); | 672 | assert_spin_locked(&res->spinlock); |
| 659 | 673 | ||
| 660 | BUG_ON(res->inflight_locks == 0); | 674 | BUG_ON(res->inflight_locks == 0); |
| 675 | |||
| 661 | res->inflight_locks--; | 676 | res->inflight_locks--; |
| 662 | mlog(0, "%s:%.*s: inflight--: now %u\n", | 677 | |
| 663 | dlm->name, res->lockname.len, res->lockname.name, | 678 | mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, |
| 664 | res->inflight_locks); | 679 | res->lockname.len, res->lockname.name, res->inflight_locks, |
| 665 | if (res->inflight_locks == 0) | 680 | __builtin_return_address(0)); |
| 666 | dlm_lockres_clear_refmap_bit(dlm->node_num, res); | 681 | |
| 667 | wake_up(&res->wq); | 682 | wake_up(&res->wq); |
| 668 | } | 683 | } |
| 669 | 684 | ||
| @@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | |||
| 697 | unsigned int hash; | 712 | unsigned int hash; |
| 698 | int tries = 0; | 713 | int tries = 0; |
| 699 | int bit, wait_on_recovery = 0; | 714 | int bit, wait_on_recovery = 0; |
| 700 | int drop_inflight_if_nonlocal = 0; | ||
| 701 | 715 | ||
| 702 | BUG_ON(!lockid); | 716 | BUG_ON(!lockid); |
| 703 | 717 | ||
| @@ -709,36 +723,33 @@ lookup: | |||
| 709 | spin_lock(&dlm->spinlock); | 723 | spin_lock(&dlm->spinlock); |
| 710 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); | 724 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); |
| 711 | if (tmpres) { | 725 | if (tmpres) { |
| 712 | int dropping_ref = 0; | ||
| 713 | |||
| 714 | spin_unlock(&dlm->spinlock); | 726 | spin_unlock(&dlm->spinlock); |
| 715 | |||
| 716 | spin_lock(&tmpres->spinlock); | 727 | spin_lock(&tmpres->spinlock); |
| 717 | /* We wait for the other thread that is mastering the resource */ | 728 | /* Wait on the thread that is mastering the resource */ |
| 718 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | 729 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { |
| 719 | __dlm_wait_on_lockres(tmpres); | 730 | __dlm_wait_on_lockres(tmpres); |
| 720 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); | 731 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); |
| 732 | spin_unlock(&tmpres->spinlock); | ||
| 733 | dlm_lockres_put(tmpres); | ||
| 734 | tmpres = NULL; | ||
| 735 | goto lookup; | ||
| 721 | } | 736 | } |
| 722 | 737 | ||
| 723 | if (tmpres->owner == dlm->node_num) { | 738 | /* Wait on the resource purge to complete before continuing */ |
| 724 | BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); | 739 | if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { |
| 725 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | 740 | BUG_ON(tmpres->owner == dlm->node_num); |
| 726 | } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) | 741 | __dlm_wait_on_lockres_flags(tmpres, |
| 727 | dropping_ref = 1; | 742 | DLM_LOCK_RES_DROPPING_REF); |
| 728 | spin_unlock(&tmpres->spinlock); | ||
| 729 | |||
| 730 | /* wait until done messaging the master, drop our ref to allow | ||
| 731 | * the lockres to be purged, start over. */ | ||
| 732 | if (dropping_ref) { | ||
| 733 | spin_lock(&tmpres->spinlock); | ||
| 734 | __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); | ||
| 735 | spin_unlock(&tmpres->spinlock); | 743 | spin_unlock(&tmpres->spinlock); |
| 736 | dlm_lockres_put(tmpres); | 744 | dlm_lockres_put(tmpres); |
| 737 | tmpres = NULL; | 745 | tmpres = NULL; |
| 738 | goto lookup; | 746 | goto lookup; |
| 739 | } | 747 | } |
| 740 | 748 | ||
| 741 | mlog(0, "found in hash!\n"); | 749 | /* Grab inflight ref to pin the resource */ |
| 750 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | ||
| 751 | |||
| 752 | spin_unlock(&tmpres->spinlock); | ||
| 742 | if (res) | 753 | if (res) |
| 743 | dlm_lockres_put(res); | 754 | dlm_lockres_put(res); |
| 744 | res = tmpres; | 755 | res = tmpres; |
| @@ -829,8 +840,8 @@ lookup: | |||
| 829 | * but they might own this lockres. wait on them. */ | 840 | * but they might own this lockres. wait on them. */ |
| 830 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | 841 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); |
| 831 | if (bit < O2NM_MAX_NODES) { | 842 | if (bit < O2NM_MAX_NODES) { |
| 832 | mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " | 843 | mlog(0, "%s: res %.*s, At least one node (%d) " |
| 833 | "recover before lock mastery can begin\n", | 844 | "to recover before lock mastery can begin\n", |
| 834 | dlm->name, namelen, (char *)lockid, bit); | 845 | dlm->name, namelen, (char *)lockid, bit); |
| 835 | wait_on_recovery = 1; | 846 | wait_on_recovery = 1; |
| 836 | } | 847 | } |
| @@ -843,12 +854,11 @@ lookup: | |||
| 843 | 854 | ||
| 844 | /* finally add the lockres to its hash bucket */ | 855 | /* finally add the lockres to its hash bucket */ |
| 845 | __dlm_insert_lockres(dlm, res); | 856 | __dlm_insert_lockres(dlm, res); |
| 846 | /* since this lockres is new it doesn't not require the spinlock */ | ||
| 847 | dlm_lockres_grab_inflight_ref_new(dlm, res); | ||
| 848 | 857 | ||
| 849 | /* if this node does not become the master make sure to drop | 858 | /* Grab inflight ref to pin the resource */ |
| 850 | * this inflight reference below */ | 859 | spin_lock(&res->spinlock); |
| 851 | drop_inflight_if_nonlocal = 1; | 860 | dlm_lockres_grab_inflight_ref(dlm, res); |
| 861 | spin_unlock(&res->spinlock); | ||
| 852 | 862 | ||
| 853 | /* get an extra ref on the mle in case this is a BLOCK | 863 | /* get an extra ref on the mle in case this is a BLOCK |
| 854 | * if so, the creator of the BLOCK may try to put the last | 864 | * if so, the creator of the BLOCK may try to put the last |
| @@ -864,8 +874,8 @@ redo_request: | |||
| 864 | * dlm spinlock would be detectable be a change on the mle, | 874 | * dlm spinlock would be detectable be a change on the mle, |
| 865 | * so we only need to clear out the recovery map once. */ | 875 | * so we only need to clear out the recovery map once. */ |
| 866 | if (dlm_is_recovery_lock(lockid, namelen)) { | 876 | if (dlm_is_recovery_lock(lockid, namelen)) { |
| 867 | mlog(ML_NOTICE, "%s: recovery map is not empty, but " | 877 | mlog(0, "%s: Recovery map is not empty, but must " |
| 868 | "must master $RECOVERY lock now\n", dlm->name); | 878 | "master $RECOVERY lock now\n", dlm->name); |
| 869 | if (!dlm_pre_master_reco_lockres(dlm, res)) | 879 | if (!dlm_pre_master_reco_lockres(dlm, res)) |
| 870 | wait_on_recovery = 0; | 880 | wait_on_recovery = 0; |
| 871 | else { | 881 | else { |
| @@ -883,8 +893,8 @@ redo_request: | |||
| 883 | spin_lock(&dlm->spinlock); | 893 | spin_lock(&dlm->spinlock); |
| 884 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | 894 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); |
| 885 | if (bit < O2NM_MAX_NODES) { | 895 | if (bit < O2NM_MAX_NODES) { |
| 886 | mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " | 896 | mlog(0, "%s: res %.*s, At least one node (%d) " |
| 887 | "recover before lock mastery can begin\n", | 897 | "to recover before lock mastery can begin\n", |
| 888 | dlm->name, namelen, (char *)lockid, bit); | 898 | dlm->name, namelen, (char *)lockid, bit); |
| 889 | wait_on_recovery = 1; | 899 | wait_on_recovery = 1; |
| 890 | } else | 900 | } else |
| @@ -913,8 +923,8 @@ redo_request: | |||
| 913 | * yet, keep going until it does. this is how the | 923 | * yet, keep going until it does. this is how the |
| 914 | * master will know that asserts are needed back to | 924 | * master will know that asserts are needed back to |
| 915 | * the lower nodes. */ | 925 | * the lower nodes. */ |
| 916 | mlog(0, "%s:%.*s: requests only up to %u but master " | 926 | mlog(0, "%s: res %.*s, Requests only up to %u but " |
| 917 | "is %u, keep going\n", dlm->name, namelen, | 927 | "master is %u, keep going\n", dlm->name, namelen, |
| 918 | lockid, nodenum, mle->master); | 928 | lockid, nodenum, mle->master); |
| 919 | } | 929 | } |
| 920 | } | 930 | } |
| @@ -924,13 +934,12 @@ wait: | |||
| 924 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | 934 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); |
| 925 | if (ret < 0) { | 935 | if (ret < 0) { |
| 926 | wait_on_recovery = 1; | 936 | wait_on_recovery = 1; |
| 927 | mlog(0, "%s:%.*s: node map changed, redo the " | 937 | mlog(0, "%s: res %.*s, Node map changed, redo the master " |
| 928 | "master request now, blocked=%d\n", | 938 | "request now, blocked=%d\n", dlm->name, res->lockname.len, |
| 929 | dlm->name, res->lockname.len, | ||
| 930 | res->lockname.name, blocked); | 939 | res->lockname.name, blocked); |
| 931 | if (++tries > 20) { | 940 | if (++tries > 20) { |
| 932 | mlog(ML_ERROR, "%s:%.*s: spinning on " | 941 | mlog(ML_ERROR, "%s: res %.*s, Spinning on " |
| 933 | "dlm_wait_for_lock_mastery, blocked=%d\n", | 942 | "dlm_wait_for_lock_mastery, blocked = %d\n", |
| 934 | dlm->name, res->lockname.len, | 943 | dlm->name, res->lockname.len, |
| 935 | res->lockname.name, blocked); | 944 | res->lockname.name, blocked); |
| 936 | dlm_print_one_lock_resource(res); | 945 | dlm_print_one_lock_resource(res); |
| @@ -940,7 +949,8 @@ wait: | |||
| 940 | goto redo_request; | 949 | goto redo_request; |
| 941 | } | 950 | } |
| 942 | 951 | ||
| 943 | mlog(0, "lockres mastered by %u\n", res->owner); | 952 | mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, |
| 953 | res->lockname.name, res->owner); | ||
| 944 | /* make sure we never continue without this */ | 954 | /* make sure we never continue without this */ |
| 945 | BUG_ON(res->owner == O2NM_MAX_NODES); | 955 | BUG_ON(res->owner == O2NM_MAX_NODES); |
| 946 | 956 | ||
| @@ -952,8 +962,6 @@ wait: | |||
| 952 | 962 | ||
| 953 | wake_waiters: | 963 | wake_waiters: |
| 954 | spin_lock(&res->spinlock); | 964 | spin_lock(&res->spinlock); |
| 955 | if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) | ||
| 956 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
| 957 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 965 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
| 958 | spin_unlock(&res->spinlock); | 966 | spin_unlock(&res->spinlock); |
| 959 | wake_up(&res->wq); | 967 | wake_up(&res->wq); |
| @@ -1426,9 +1434,7 @@ way_up_top: | |||
| 1426 | } | 1434 | } |
| 1427 | 1435 | ||
| 1428 | if (res->owner == dlm->node_num) { | 1436 | if (res->owner == dlm->node_num) { |
| 1429 | mlog(0, "%s:%.*s: setting bit %u in refmap\n", | 1437 | dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); |
| 1430 | dlm->name, namelen, name, request->node_idx); | ||
| 1431 | dlm_lockres_set_refmap_bit(request->node_idx, res); | ||
| 1432 | spin_unlock(&res->spinlock); | 1438 | spin_unlock(&res->spinlock); |
| 1433 | response = DLM_MASTER_RESP_YES; | 1439 | response = DLM_MASTER_RESP_YES; |
| 1434 | if (mle) | 1440 | if (mle) |
| @@ -1493,10 +1499,8 @@ way_up_top: | |||
| 1493 | * go back and clean the mles on any | 1499 | * go back and clean the mles on any |
| 1494 | * other nodes */ | 1500 | * other nodes */ |
| 1495 | dispatch_assert = 1; | 1501 | dispatch_assert = 1; |
| 1496 | dlm_lockres_set_refmap_bit(request->node_idx, res); | 1502 | dlm_lockres_set_refmap_bit(dlm, res, |
| 1497 | mlog(0, "%s:%.*s: setting bit %u in refmap\n", | 1503 | request->node_idx); |
| 1498 | dlm->name, namelen, name, | ||
| 1499 | request->node_idx); | ||
| 1500 | } else | 1504 | } else |
| 1501 | response = DLM_MASTER_RESP_NO; | 1505 | response = DLM_MASTER_RESP_NO; |
| 1502 | } else { | 1506 | } else { |
| @@ -1702,7 +1706,7 @@ again: | |||
| 1702 | "lockres, set the bit in the refmap\n", | 1706 | "lockres, set the bit in the refmap\n", |
| 1703 | namelen, lockname, to); | 1707 | namelen, lockname, to); |
| 1704 | spin_lock(&res->spinlock); | 1708 | spin_lock(&res->spinlock); |
| 1705 | dlm_lockres_set_refmap_bit(to, res); | 1709 | dlm_lockres_set_refmap_bit(dlm, res, to); |
| 1706 | spin_unlock(&res->spinlock); | 1710 | spin_unlock(&res->spinlock); |
| 1707 | } | 1711 | } |
| 1708 | } | 1712 | } |
| @@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
| 2187 | namelen = res->lockname.len; | 2191 | namelen = res->lockname.len; |
| 2188 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | 2192 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); |
| 2189 | 2193 | ||
| 2190 | mlog(0, "%s:%.*s: sending deref to %d\n", | ||
| 2191 | dlm->name, namelen, lockname, res->owner); | ||
| 2192 | memset(&deref, 0, sizeof(deref)); | 2194 | memset(&deref, 0, sizeof(deref)); |
| 2193 | deref.node_idx = dlm->node_num; | 2195 | deref.node_idx = dlm->node_num; |
| 2194 | deref.namelen = namelen; | 2196 | deref.namelen = namelen; |
| @@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
| 2197 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, | 2199 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, |
| 2198 | &deref, sizeof(deref), res->owner, &r); | 2200 | &deref, sizeof(deref), res->owner, &r); |
| 2199 | if (ret < 0) | 2201 | if (ret < 0) |
| 2200 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 2202 | mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", |
| 2201 | "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, | 2203 | dlm->name, namelen, lockname, ret, res->owner); |
| 2202 | res->owner); | ||
| 2203 | else if (r < 0) { | 2204 | else if (r < 0) { |
| 2204 | /* BAD. other node says I did not have a ref. */ | 2205 | /* BAD. other node says I did not have a ref. */ |
| 2205 | mlog(ML_ERROR,"while dropping ref on %s:%.*s " | 2206 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", |
| 2206 | "(master=%u) got %d.\n", dlm->name, namelen, | 2207 | dlm->name, namelen, lockname, res->owner, r); |
| 2207 | lockname, res->owner, r); | ||
| 2208 | dlm_print_one_lock_resource(res); | 2208 | dlm_print_one_lock_resource(res); |
| 2209 | BUG(); | 2209 | BUG(); |
| 2210 | } | 2210 | } |
| @@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
| 2260 | else { | 2260 | else { |
| 2261 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | 2261 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); |
| 2262 | if (test_bit(node, res->refmap)) { | 2262 | if (test_bit(node, res->refmap)) { |
| 2263 | dlm_lockres_clear_refmap_bit(node, res); | 2263 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
| 2264 | cleared = 1; | 2264 | cleared = 1; |
| 2265 | } | 2265 | } |
| 2266 | } | 2266 | } |
| @@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) | |||
| 2320 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | 2320 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); |
| 2321 | if (test_bit(node, res->refmap)) { | 2321 | if (test_bit(node, res->refmap)) { |
| 2322 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); | 2322 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); |
| 2323 | dlm_lockres_clear_refmap_bit(node, res); | 2323 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
| 2324 | cleared = 1; | 2324 | cleared = 1; |
| 2325 | } | 2325 | } |
| 2326 | spin_unlock(&res->spinlock); | 2326 | spin_unlock(&res->spinlock); |
| @@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
| 2802 | BUG_ON(!list_empty(&lock->bast_list)); | 2802 | BUG_ON(!list_empty(&lock->bast_list)); |
| 2803 | BUG_ON(lock->ast_pending); | 2803 | BUG_ON(lock->ast_pending); |
| 2804 | BUG_ON(lock->bast_pending); | 2804 | BUG_ON(lock->bast_pending); |
| 2805 | dlm_lockres_clear_refmap_bit(lock->ml.node, res); | 2805 | dlm_lockres_clear_refmap_bit(dlm, res, |
| 2806 | lock->ml.node); | ||
| 2806 | list_del_init(&lock->list); | 2807 | list_del_init(&lock->list); |
| 2807 | dlm_lock_put(lock); | 2808 | dlm_lock_put(lock); |
| 2808 | /* In a normal unlock, we would have added a | 2809 | /* In a normal unlock, we would have added a |
| @@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
| 2823 | mlog(0, "%s:%.*s: node %u had a ref to this " | 2824 | mlog(0, "%s:%.*s: node %u had a ref to this " |
| 2824 | "migrating lockres, clearing\n", dlm->name, | 2825 | "migrating lockres, clearing\n", dlm->name, |
| 2825 | res->lockname.len, res->lockname.name, bit); | 2826 | res->lockname.len, res->lockname.name, bit); |
| 2826 | dlm_lockres_clear_refmap_bit(bit, res); | 2827 | dlm_lockres_clear_refmap_bit(dlm, res, bit); |
| 2827 | } | 2828 | } |
| 2828 | bit++; | 2829 | bit++; |
| 2829 | } | 2830 | } |
| @@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
| 2916 | &migrate, sizeof(migrate), nodenum, | 2917 | &migrate, sizeof(migrate), nodenum, |
| 2917 | &status); | 2918 | &status); |
| 2918 | if (ret < 0) { | 2919 | if (ret < 0) { |
| 2919 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 2920 | mlog(ML_ERROR, "%s: res %.*s, Error %d send " |
| 2920 | "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, | 2921 | "MIGRATE_REQUEST to node %u\n", dlm->name, |
| 2921 | dlm->key, nodenum); | 2922 | migrate.namelen, migrate.name, ret, nodenum); |
| 2922 | if (!dlm_is_host_down(ret)) { | 2923 | if (!dlm_is_host_down(ret)) { |
| 2923 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); | 2924 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); |
| 2924 | BUG(); | 2925 | BUG(); |
| @@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
| 2937 | dlm->name, res->lockname.len, res->lockname.name, | 2938 | dlm->name, res->lockname.len, res->lockname.name, |
| 2938 | nodenum); | 2939 | nodenum); |
| 2939 | spin_lock(&res->spinlock); | 2940 | spin_lock(&res->spinlock); |
| 2940 | dlm_lockres_set_refmap_bit(nodenum, res); | 2941 | dlm_lockres_set_refmap_bit(dlm, res, nodenum); |
| 2941 | spin_unlock(&res->spinlock); | 2942 | spin_unlock(&res->spinlock); |
| 2942 | } | 2943 | } |
| 2943 | } | 2944 | } |
| @@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
| 3271 | * mastery reference here since old_master will briefly have | 3272 | * mastery reference here since old_master will briefly have |
| 3272 | * a reference after the migration completes */ | 3273 | * a reference after the migration completes */ |
| 3273 | spin_lock(&res->spinlock); | 3274 | spin_lock(&res->spinlock); |
| 3274 | dlm_lockres_set_refmap_bit(old_master, res); | 3275 | dlm_lockres_set_refmap_bit(dlm, res, old_master); |
| 3275 | spin_unlock(&res->spinlock); | 3276 | spin_unlock(&res->spinlock); |
| 3276 | 3277 | ||
| 3277 | mlog(0, "now time to do a migrate request to other nodes\n"); | 3278 | mlog(0, "now time to do a migrate request to other nodes\n"); |
