aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSunil Mushran <sunil.mushran@oracle.com>2011-07-24 13:23:54 -0400
committerSunil Mushran <sunil.mushran@oracle.com>2011-07-24 13:23:54 -0400
commit8decab3c8dadcdf4f54ffb30df6e6f67b398b6e0 (patch)
treec1221be95579ae14f55774d13d48d9d938152071 /fs
parent1dfecf810e0eacb35987905082f23e5c2cd26e91 (diff)
ocfs2/dlm: Clean up messages in o2dlm
o2dlm messages needed a facelift. Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c18
-rw-r--r--fs/ocfs2/dlm/dlmlock.c22
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c46
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c53
-rw-r--r--fs/ocfs2/dlm/dlmthread.c2
5 files changed, 71 insertions, 70 deletions
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6ed6b95dcf9..ce225fb0cea 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -539,17 +539,17 @@ again:
539 539
540static void __dlm_print_nodes(struct dlm_ctxt *dlm) 540static void __dlm_print_nodes(struct dlm_ctxt *dlm)
541{ 541{
542 int node = -1; 542 int node = -1, num = 0;
543 543
544 assert_spin_locked(&dlm->spinlock); 544 assert_spin_locked(&dlm->spinlock);
545 545
546 printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name); 546 printk("( ");
547
548 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 547 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
549 node + 1)) < O2NM_MAX_NODES) { 548 node + 1)) < O2NM_MAX_NODES) {
550 printk("%d ", node); 549 printk("%d ", node);
550 ++num;
551 } 551 }
552 printk("\n"); 552 printk(") %u nodes\n", num);
553} 553}
554 554
555static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, 555static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +566,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
566 566
567 node = exit_msg->node_idx; 567 node = exit_msg->node_idx;
568 568
569 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
570
571 spin_lock(&dlm->spinlock); 569 spin_lock(&dlm->spinlock);
572 clear_bit(node, dlm->domain_map); 570 clear_bit(node, dlm->domain_map);
573 clear_bit(node, dlm->exit_domain_map); 571 clear_bit(node, dlm->exit_domain_map);
572 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
574 __dlm_print_nodes(dlm); 573 __dlm_print_nodes(dlm);
575 574
576 /* notify anything attached to the heartbeat events */ 575 /* notify anything attached to the heartbeat events */
@@ -755,6 +754,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
755 754
756 dlm_mark_domain_leaving(dlm); 755 dlm_mark_domain_leaving(dlm);
757 dlm_leave_domain(dlm); 756 dlm_leave_domain(dlm);
757 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
758 dlm_force_free_mles(dlm); 758 dlm_force_free_mles(dlm);
759 dlm_complete_dlm_shutdown(dlm); 759 dlm_complete_dlm_shutdown(dlm);
760 } 760 }
@@ -970,7 +970,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
970 clear_bit(assert->node_idx, dlm->exit_domain_map); 970 clear_bit(assert->node_idx, dlm->exit_domain_map);
971 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 971 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
972 972
973 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", 973 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
974 assert->node_idx, dlm->name); 974 assert->node_idx, dlm->name);
975 __dlm_print_nodes(dlm); 975 __dlm_print_nodes(dlm);
976 976
@@ -1701,8 +1701,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1701bail: 1701bail:
1702 spin_lock(&dlm->spinlock); 1702 spin_lock(&dlm->spinlock);
1703 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 1703 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1704 if (!status) 1704 if (!status) {
1705 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
1705 __dlm_print_nodes(dlm); 1706 __dlm_print_nodes(dlm);
1707 }
1706 spin_unlock(&dlm->spinlock); 1708 spin_unlock(&dlm->spinlock);
1707 1709
1708 if (ctxt) { 1710 if (ctxt) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 8d39e0fd66f..c7f3e22bda1 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -319,27 +319,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
319 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, 319 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
320 sizeof(create), res->owner, &status); 320 sizeof(create), res->owner, &status);
321 if (tmpret >= 0) { 321 if (tmpret >= 0) {
322 // successfully sent and received 322 ret = status;
323 ret = status; // this is already a dlm_status
324 if (ret == DLM_REJECTED) { 323 if (ret == DLM_REJECTED) {
325 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " 324 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
326 "no longer owned by %u. that node is coming back " 325 "owned by node %u. That node is coming back up "
327 "up currently.\n", dlm->name, create.namelen, 326 "currently.\n", dlm->name, create.namelen,
328 create.name, res->owner); 327 create.name, res->owner);
329 dlm_print_one_lock_resource(res); 328 dlm_print_one_lock_resource(res);
330 BUG(); 329 BUG();
331 } 330 }
332 } else { 331 } else {
333 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 332 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
334 "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, 333 "node %u\n", dlm->name, create.namelen, create.name,
335 res->owner); 334 tmpret, res->owner);
336 if (dlm_is_host_down(tmpret)) { 335 if (dlm_is_host_down(tmpret))
337 ret = DLM_RECOVERING; 336 ret = DLM_RECOVERING;
338 mlog(0, "node %u died so returning DLM_RECOVERING " 337 else
339 "from lock message!\n", res->owner);
340 } else {
341 ret = dlm_err_to_dlm_status(tmpret); 338 ret = dlm_err_to_dlm_status(tmpret);
342 }
343 } 339 }
344 340
345 return ret; 341 return ret;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11eefb8c12e..9f3b093b629 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -829,8 +829,8 @@ lookup:
829 * but they might own this lockres. wait on them. */ 829 * but they might own this lockres. wait on them. */
830 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 830 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
831 if (bit < O2NM_MAX_NODES) { 831 if (bit < O2NM_MAX_NODES) {
832 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 832 mlog(0, "%s: res %.*s, At least one node (%d) "
833 "recover before lock mastery can begin\n", 833 "to recover before lock mastery can begin\n",
834 dlm->name, namelen, (char *)lockid, bit); 834 dlm->name, namelen, (char *)lockid, bit);
835 wait_on_recovery = 1; 835 wait_on_recovery = 1;
836 } 836 }
@@ -864,8 +864,8 @@ redo_request:
864 * dlm spinlock would be detectable be a change on the mle, 864 * dlm spinlock would be detectable be a change on the mle,
865 * so we only need to clear out the recovery map once. */ 865 * so we only need to clear out the recovery map once. */
866 if (dlm_is_recovery_lock(lockid, namelen)) { 866 if (dlm_is_recovery_lock(lockid, namelen)) {
867 mlog(ML_NOTICE, "%s: recovery map is not empty, but " 867 mlog(0, "%s: Recovery map is not empty, but must "
868 "must master $RECOVERY lock now\n", dlm->name); 868 "master $RECOVERY lock now\n", dlm->name);
869 if (!dlm_pre_master_reco_lockres(dlm, res)) 869 if (!dlm_pre_master_reco_lockres(dlm, res))
870 wait_on_recovery = 0; 870 wait_on_recovery = 0;
871 else { 871 else {
@@ -883,8 +883,8 @@ redo_request:
883 spin_lock(&dlm->spinlock); 883 spin_lock(&dlm->spinlock);
884 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 884 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
885 if (bit < O2NM_MAX_NODES) { 885 if (bit < O2NM_MAX_NODES) {
886 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 886 mlog(0, "%s: res %.*s, At least one node (%d) "
887 "recover before lock mastery can begin\n", 887 "to recover before lock mastery can begin\n",
888 dlm->name, namelen, (char *)lockid, bit); 888 dlm->name, namelen, (char *)lockid, bit);
889 wait_on_recovery = 1; 889 wait_on_recovery = 1;
890 } else 890 } else
@@ -913,8 +913,8 @@ redo_request:
913 * yet, keep going until it does. this is how the 913 * yet, keep going until it does. this is how the
914 * master will know that asserts are needed back to 914 * master will know that asserts are needed back to
915 * the lower nodes. */ 915 * the lower nodes. */
916 mlog(0, "%s:%.*s: requests only up to %u but master " 916 mlog(0, "%s: res %.*s, Requests only up to %u but "
917 "is %u, keep going\n", dlm->name, namelen, 917 "master is %u, keep going\n", dlm->name, namelen,
918 lockid, nodenum, mle->master); 918 lockid, nodenum, mle->master);
919 } 919 }
920 } 920 }
@@ -924,13 +924,12 @@ wait:
924 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 924 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
925 if (ret < 0) { 925 if (ret < 0) {
926 wait_on_recovery = 1; 926 wait_on_recovery = 1;
927 mlog(0, "%s:%.*s: node map changed, redo the " 927 mlog(0, "%s: res %.*s, Node map changed, redo the master "
928 "master request now, blocked=%d\n", 928 "request now, blocked=%d\n", dlm->name, res->lockname.len,
929 dlm->name, res->lockname.len,
930 res->lockname.name, blocked); 929 res->lockname.name, blocked);
931 if (++tries > 20) { 930 if (++tries > 20) {
932 mlog(ML_ERROR, "%s:%.*s: spinning on " 931 mlog(ML_ERROR, "%s: res %.*s, Spinning on "
933 "dlm_wait_for_lock_mastery, blocked=%d\n", 932 "dlm_wait_for_lock_mastery, blocked = %d\n",
934 dlm->name, res->lockname.len, 933 dlm->name, res->lockname.len,
935 res->lockname.name, blocked); 934 res->lockname.name, blocked);
936 dlm_print_one_lock_resource(res); 935 dlm_print_one_lock_resource(res);
@@ -940,7 +939,8 @@ wait:
940 goto redo_request; 939 goto redo_request;
941 } 940 }
942 941
943 mlog(0, "lockres mastered by %u\n", res->owner); 942 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
943 res->lockname.name, res->owner);
944 /* make sure we never continue without this */ 944 /* make sure we never continue without this */
945 BUG_ON(res->owner == O2NM_MAX_NODES); 945 BUG_ON(res->owner == O2NM_MAX_NODES);
946 946
@@ -2187,8 +2187,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2187 namelen = res->lockname.len; 2187 namelen = res->lockname.len;
2188 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2188 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2189 2189
2190 mlog(0, "%s:%.*s: sending deref to %d\n",
2191 dlm->name, namelen, lockname, res->owner);
2192 memset(&deref, 0, sizeof(deref)); 2190 memset(&deref, 0, sizeof(deref));
2193 deref.node_idx = dlm->node_num; 2191 deref.node_idx = dlm->node_num;
2194 deref.namelen = namelen; 2192 deref.namelen = namelen;
@@ -2197,14 +2195,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2197 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2195 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2198 &deref, sizeof(deref), res->owner, &r); 2196 &deref, sizeof(deref), res->owner, &r);
2199 if (ret < 0) 2197 if (ret < 0)
2200 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 2198 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2201 "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, 2199 dlm->name, namelen, lockname, ret, res->owner);
2202 res->owner);
2203 else if (r < 0) { 2200 else if (r < 0) {
2204 /* BAD. other node says I did not have a ref. */ 2201 /* BAD. other node says I did not have a ref. */
2205 mlog(ML_ERROR,"while dropping ref on %s:%.*s " 2202 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2206 "(master=%u) got %d.\n", dlm->name, namelen, 2203 dlm->name, namelen, lockname, res->owner, r);
2207 lockname, res->owner, r);
2208 dlm_print_one_lock_resource(res); 2204 dlm_print_one_lock_resource(res);
2209 BUG(); 2205 BUG();
2210 } 2206 }
@@ -2916,9 +2912,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2916 &migrate, sizeof(migrate), nodenum, 2912 &migrate, sizeof(migrate), nodenum,
2917 &status); 2913 &status);
2918 if (ret < 0) { 2914 if (ret < 0) {
2919 mlog(ML_ERROR, "Error %d when sending message %u (key " 2915 mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2920 "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, 2916 "MIGRATE_REQUEST to node %u\n", dlm->name,
2921 dlm->key, nodenum); 2917 migrate.namelen, migrate.name, ret, nodenum);
2922 if (!dlm_is_host_down(ret)) { 2918 if (!dlm_is_host_down(ret)) {
2923 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 2919 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2924 BUG(); 2920 BUG();
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 7efab6d28a2..a3c312c43b9 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -430,6 +430,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
430{ 430{
431 spin_lock(&dlm->spinlock); 431 spin_lock(&dlm->spinlock);
432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
433 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
434 dlm->name, dlm->reco.dead_node);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 435 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock); 436 spin_unlock(&dlm->spinlock);
435} 437}
@@ -440,9 +442,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 442 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 443 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock); 444 spin_unlock(&dlm->spinlock);
445 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
443 wake_up(&dlm->reco.event); 446 wake_up(&dlm->reco.event);
444} 447}
445 448
449static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
450{
451 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
452 "dead node %u in domain %s\n", dlm->reco.new_master,
453 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
454 dlm->reco.dead_node, dlm->name);
455}
456
446static int dlm_do_recovery(struct dlm_ctxt *dlm) 457static int dlm_do_recovery(struct dlm_ctxt *dlm)
447{ 458{
448 int status = 0; 459 int status = 0;
@@ -505,9 +516,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
505 } 516 }
506 mlog(0, "another node will master this recovery session.\n"); 517 mlog(0, "another node will master this recovery session.\n");
507 } 518 }
508 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", 519
509 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, 520 dlm_print_recovery_master(dlm);
510 dlm->node_num, dlm->reco.dead_node);
511 521
512 /* it is safe to start everything back up here 522 /* it is safe to start everything back up here
513 * because all of the dead node's lock resources 523 * because all of the dead node's lock resources
@@ -518,15 +528,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
518 return 0; 528 return 0;
519 529
520master_here: 530master_here:
521 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " 531 dlm_print_recovery_master(dlm);
522 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
523 dlm->node_num, dlm->reco.dead_node, dlm->name);
524 532
525 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 533 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
526 if (status < 0) { 534 if (status < 0) {
527 /* we should never hit this anymore */ 535 /* we should never hit this anymore */
528 mlog(ML_ERROR, "error %d remastering locks for node %u, " 536 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
529 "retrying.\n", status, dlm->reco.dead_node); 537 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
530 /* yield a bit to allow any final network messages 538 /* yield a bit to allow any final network messages
531 * to get handled on remaining nodes */ 539 * to get handled on remaining nodes */
532 msleep(100); 540 msleep(100);
@@ -567,7 +575,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
567 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 575 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
568 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 576 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
569 577
570 mlog(0, "requesting lock info from node %u\n", 578 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
571 ndata->node_num); 579 ndata->node_num);
572 580
573 if (ndata->node_num == dlm->node_num) { 581 if (ndata->node_num == dlm->node_num) {
@@ -640,7 +648,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
640 spin_unlock(&dlm_reco_state_lock); 648 spin_unlock(&dlm_reco_state_lock);
641 } 649 }
642 650
643 mlog(0, "done requesting all lock info\n"); 651 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
644 652
645 /* nodes should be sending reco data now 653 /* nodes should be sending reco data now
646 * just need to wait */ 654 * just need to wait */
@@ -802,10 +810,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
802 810
803 /* negative status is handled by caller */ 811 /* negative status is handled by caller */
804 if (ret < 0) 812 if (ret < 0)
805 mlog(ML_ERROR, "Error %d when sending message %u (key " 813 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
806 "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, 814 "to recover dead node %u\n", dlm->name, ret,
807 dlm->key, request_from); 815 request_from, dead_node);
808
809 // return from here, then 816 // return from here, then
810 // sleep until all received or error 817 // sleep until all received or error
811 return ret; 818 return ret;
@@ -956,9 +963,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
956 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 963 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
957 sizeof(done_msg), send_to, &tmpret); 964 sizeof(done_msg), send_to, &tmpret);
958 if (ret < 0) { 965 if (ret < 0) {
959 mlog(ML_ERROR, "Error %d when sending message %u (key " 966 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
960 "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, 967 "to recover dead node %u\n", dlm->name, ret, send_to,
961 dlm->key, send_to); 968 dead_node);
962 if (!dlm_is_host_down(ret)) { 969 if (!dlm_is_host_down(ret)) {
963 BUG(); 970 BUG();
964 } 971 }
@@ -1127,9 +1134,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1127 if (ret < 0) { 1134 if (ret < 0) {
1128 /* XXX: negative status is not handled. 1135 /* XXX: negative status is not handled.
1129 * this will end up killing this node. */ 1136 * this will end up killing this node. */
1130 mlog(ML_ERROR, "Error %d when sending message %u (key " 1137 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1131 "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, 1138 "node %u (%s)\n", dlm->name, mres->lockname_len,
1132 dlm->key, send_to); 1139 mres->lockname, ret, send_to,
1140 (orig_flags & DLM_MRES_MIGRATION ?
1141 "migration" : "recovery"));
1133 } else { 1142 } else {
1134 /* might get an -ENOMEM back here */ 1143 /* might get an -ENOMEM back here */
1135 ret = status; 1144 ret = status;
@@ -2324,9 +2333,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2324 dlm_revalidate_lvb(dlm, res, dead_node); 2333 dlm_revalidate_lvb(dlm, res, dead_node);
2325 if (res->owner == dead_node) { 2334 if (res->owner == dead_node) {
2326 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2335 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2327 mlog(ML_NOTICE, "Ignore %.*s for " 2336 mlog(ML_NOTICE, "%s: res %.*s, Skip "
2328 "recovery as it is being freed\n", 2337 "recovery as it is being freed\n",
2329 res->lockname.len, 2338 dlm->name, res->lockname.len,
2330 res->lockname.name); 2339 res->lockname.name);
2331 } else 2340 } else
2332 dlm_move_lockres_to_recovery_list(dlm, 2341 dlm_move_lockres_to_recovery_list(dlm,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 1d6d1d22c47..46faac20f16 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -185,8 +185,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
185 /* clear our bit from the master's refmap, ignore errors */ 185 /* clear our bit from the master's refmap, ignore errors */
186 ret = dlm_drop_lockres_ref(dlm, res); 186 ret = dlm_drop_lockres_ref(dlm, res);
187 if (ret < 0) { 187 if (ret < 0) {
188 mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
189 res->lockname.len, res->lockname.name, ret);
190 if (!dlm_is_host_down(ret)) 188 if (!dlm_is_host_down(ret))
191 BUG(); 189 BUG();
192 } 190 }