diff options
author | Wengang Wang <wen.gang.wang@oracle.com> | 2010-03-30 00:09:22 -0400 |
---|---|---|
committer | Joel Becker <joel.becker@oracle.com> | 2010-05-05 21:18:08 -0400 |
commit | a5196ec5ef80309fd390191c548ee1f2e8a327ee (patch) | |
tree | 5856168bc2708230107940ec73a6752fabd8fd7b /fs | |
parent | 83f92318fa33cc084e14e64dc903e605f75884c1 (diff) |
ocfs2: print node # when tcp fails
Print the node number of a peer node if sending it a message failed.
Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ocfs2/dlm/dlmast.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmconvert.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 19 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 12 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 27 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmunlock.c | 3 |
7 files changed, 51 insertions, 22 deletions
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index dccc439fa087..390a887c4df3 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -453,7 +453,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
453 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, | 453 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, |
454 | lock->ml.node, &status); | 454 | lock->ml.node, &status); |
455 | if (ret < 0) | 455 | if (ret < 0) |
456 | mlog_errno(ret); | 456 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
457 | "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key, | ||
458 | lock->ml.node); | ||
457 | else { | 459 | else { |
458 | if (status == DLM_RECOVERING) { | 460 | if (status == DLM_RECOVERING) { |
459 | mlog(ML_ERROR, "sent AST to node %u, it thinks this " | 461 | mlog(ML_ERROR, "sent AST to node %u, it thinks this " |
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index f283bce776b4..3028d05fc4e9 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c | |||
@@ -391,7 +391,9 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, | |||
391 | } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) | 391 | } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) |
392 | dlm_error(ret); | 392 | dlm_error(ret); |
393 | } else { | 393 | } else { |
394 | mlog_errno(tmpret); | 394 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
395 | "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key, | ||
396 | res->owner); | ||
395 | if (dlm_is_host_down(tmpret)) { | 397 | if (dlm_is_host_down(tmpret)) { |
396 | /* instead of logging the same network error over | 398 | /* instead of logging the same network error over |
397 | * and over, sleep here and wait for the heartbeat | 399 | * and over, sleep here and wait for the heartbeat |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 988c9055fd4e..eb50be0288f2 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -565,7 +565,9 @@ static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, | |||
565 | status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, | 565 | status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, |
566 | &leave_msg, sizeof(leave_msg), node, | 566 | &leave_msg, sizeof(leave_msg), node, |
567 | NULL); | 567 | NULL); |
568 | 568 | if (status < 0) | |
569 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | ||
570 | "node %u\n", status, DLM_EXIT_DOMAIN_MSG, dlm->key, node); | ||
569 | mlog(0, "status return %d from o2net_send_message\n", status); | 571 | mlog(0, "status return %d from o2net_send_message\n", status); |
570 | 572 | ||
571 | return status; | 573 | return status; |
@@ -962,7 +964,9 @@ static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm, | |||
962 | &cancel_msg, sizeof(cancel_msg), node, | 964 | &cancel_msg, sizeof(cancel_msg), node, |
963 | NULL); | 965 | NULL); |
964 | if (status < 0) { | 966 | if (status < 0) { |
965 | mlog_errno(status); | 967 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
968 | "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | ||
969 | node); | ||
966 | goto bail; | 970 | goto bail; |
967 | } | 971 | } |
968 | 972 | ||
@@ -1029,10 +1033,11 @@ static int dlm_request_join(struct dlm_ctxt *dlm, | |||
1029 | byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); | 1033 | byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); |
1030 | 1034 | ||
1031 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, | 1035 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, |
1032 | sizeof(join_msg), node, | 1036 | sizeof(join_msg), node, &join_resp); |
1033 | &join_resp); | ||
1034 | if (status < 0 && status != -ENOPROTOOPT) { | 1037 | if (status < 0 && status != -ENOPROTOOPT) { |
1035 | mlog_errno(status); | 1038 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
1039 | "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, | ||
1040 | node); | ||
1036 | goto bail; | 1041 | goto bail; |
1037 | } | 1042 | } |
1038 | dlm_query_join_wire_to_packet(join_resp, &packet); | 1043 | dlm_query_join_wire_to_packet(join_resp, &packet); |
@@ -1103,7 +1108,9 @@ static int dlm_send_one_join_assert(struct dlm_ctxt *dlm, | |||
1103 | &assert_msg, sizeof(assert_msg), node, | 1108 | &assert_msg, sizeof(assert_msg), node, |
1104 | NULL); | 1109 | NULL); |
1105 | if (status < 0) | 1110 | if (status < 0) |
1106 | mlog_errno(status); | 1111 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
1112 | "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | ||
1113 | node); | ||
1107 | 1114 | ||
1108 | return status; | 1115 | return status; |
1109 | } | 1116 | } |
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 733337772671..f1fba2a6a8fe 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -329,7 +329,9 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | |||
329 | BUG(); | 329 | BUG(); |
330 | } | 330 | } |
331 | } else { | 331 | } else { |
332 | mlog_errno(tmpret); | 332 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
333 | "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, | ||
334 | res->owner); | ||
333 | if (dlm_is_host_down(tmpret)) { | 335 | if (dlm_is_host_down(tmpret)) { |
334 | ret = DLM_RECOVERING; | 336 | ret = DLM_RECOVERING; |
335 | mlog(0, "node %u died so returning DLM_RECOVERING " | 337 | mlog(0, "node %u died so returning DLM_RECOVERING " |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a659606dcb95..3114de2e74c7 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -1666,7 +1666,9 @@ again: | |||
1666 | tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, | 1666 | tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, |
1667 | &assert, sizeof(assert), to, &r); | 1667 | &assert, sizeof(assert), to, &r); |
1668 | if (tmpret < 0) { | 1668 | if (tmpret < 0) { |
1669 | mlog(0, "assert_master returned %d!\n", tmpret); | 1669 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
1670 | "0x%x) to node %u\n", tmpret, | ||
1671 | DLM_ASSERT_MASTER_MSG, dlm->key, to); | ||
1670 | if (!dlm_is_host_down(tmpret)) { | 1672 | if (!dlm_is_host_down(tmpret)) { |
1671 | mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); | 1673 | mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); |
1672 | BUG(); | 1674 | BUG(); |
@@ -2207,7 +2209,9 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2207 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, | 2209 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, |
2208 | &deref, sizeof(deref), res->owner, &r); | 2210 | &deref, sizeof(deref), res->owner, &r); |
2209 | if (ret < 0) | 2211 | if (ret < 0) |
2210 | mlog_errno(ret); | 2212 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
2213 | "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, | ||
2214 | res->owner); | ||
2211 | else if (r < 0) { | 2215 | else if (r < 0) { |
2212 | /* BAD. other node says I did not have a ref. */ | 2216 | /* BAD. other node says I did not have a ref. */ |
2213 | mlog(ML_ERROR,"while dropping ref on %s:%.*s " | 2217 | mlog(ML_ERROR,"while dropping ref on %s:%.*s " |
@@ -2977,7 +2981,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
2977 | &migrate, sizeof(migrate), nodenum, | 2981 | &migrate, sizeof(migrate), nodenum, |
2978 | &status); | 2982 | &status); |
2979 | if (ret < 0) { | 2983 | if (ret < 0) { |
2980 | mlog(0, "migrate_request returned %d!\n", ret); | 2984 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
2985 | "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, | ||
2986 | dlm->key, nodenum); | ||
2981 | if (!dlm_is_host_down(ret)) { | 2987 | if (!dlm_is_host_down(ret)) { |
2982 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); | 2988 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); |
2983 | BUG(); | 2989 | BUG(); |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index b4f99de2caf3..f8b75ce4be70 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -803,7 +803,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
803 | 803 | ||
804 | /* negative status is handled by caller */ | 804 | /* negative status is handled by caller */ |
805 | if (ret < 0) | 805 | if (ret < 0) |
806 | mlog_errno(ret); | 806 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
807 | "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, | ||
808 | dlm->key, request_from); | ||
807 | 809 | ||
808 | // return from here, then | 810 | // return from here, then |
809 | // sleep until all received or error | 811 | // sleep until all received or error |
@@ -955,10 +957,10 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | |||
955 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | 957 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, |
956 | sizeof(done_msg), send_to, &tmpret); | 958 | sizeof(done_msg), send_to, &tmpret); |
957 | if (ret < 0) { | 959 | if (ret < 0) { |
960 | mlog(ML_ERROR, "Error %d when sending message %u (key " | ||
961 | "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, | ||
962 | dlm->key, send_to); | ||
958 | if (!dlm_is_host_down(ret)) { | 963 | if (!dlm_is_host_down(ret)) { |
959 | mlog_errno(ret); | ||
960 | mlog(ML_ERROR, "%s: unknown error sending data-done " | ||
961 | "to %u\n", dlm->name, send_to); | ||
962 | BUG(); | 964 | BUG(); |
963 | } | 965 | } |
964 | } else | 966 | } else |
@@ -1126,7 +1128,9 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | |||
1126 | if (ret < 0) { | 1128 | if (ret < 0) { |
1127 | /* XXX: negative status is not handled. | 1129 | /* XXX: negative status is not handled. |
1128 | * this will end up killing this node. */ | 1130 | * this will end up killing this node. */ |
1129 | mlog_errno(ret); | 1131 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
1132 | "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, | ||
1133 | dlm->key, send_to); | ||
1130 | } else { | 1134 | } else { |
1131 | /* might get an -ENOMEM back here */ | 1135 | /* might get an -ENOMEM back here */ |
1132 | ret = status; | 1136 | ret = status; |
@@ -1642,7 +1646,9 @@ int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
1642 | &req, sizeof(req), nodenum, &status); | 1646 | &req, sizeof(req), nodenum, &status); |
1643 | /* XXX: negative status not handled properly here. */ | 1647 | /* XXX: negative status not handled properly here. */ |
1644 | if (ret < 0) | 1648 | if (ret < 0) |
1645 | mlog_errno(ret); | 1649 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
1650 | "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, | ||
1651 | dlm->key, nodenum); | ||
1646 | else { | 1652 | else { |
1647 | BUG_ON(status < 0); | 1653 | BUG_ON(status < 0); |
1648 | BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); | 1654 | BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); |
@@ -2640,7 +2646,7 @@ retry: | |||
2640 | if (dlm_is_host_down(ret)) { | 2646 | if (dlm_is_host_down(ret)) { |
2641 | /* node is down. not involved in recovery | 2647 | /* node is down. not involved in recovery |
2642 | * so just keep going */ | 2648 | * so just keep going */ |
2643 | mlog(0, "%s: node %u was down when sending " | 2649 | mlog(ML_NOTICE, "%s: node %u was down when sending " |
2644 | "begin reco msg (%d)\n", dlm->name, nodenum, ret); | 2650 | "begin reco msg (%d)\n", dlm->name, nodenum, ret); |
2645 | ret = 0; | 2651 | ret = 0; |
2646 | } | 2652 | } |
@@ -2660,11 +2666,12 @@ retry: | |||
2660 | } | 2666 | } |
2661 | if (ret < 0) { | 2667 | if (ret < 0) { |
2662 | struct dlm_lock_resource *res; | 2668 | struct dlm_lock_resource *res; |
2669 | |||
2663 | /* this is now a serious problem, possibly ENOMEM | 2670 | /* this is now a serious problem, possibly ENOMEM |
2664 | * in the network stack. must retry */ | 2671 | * in the network stack. must retry */ |
2665 | mlog_errno(ret); | 2672 | mlog_errno(ret); |
2666 | mlog(ML_ERROR, "begin reco of dlm %s to node %u " | 2673 | mlog(ML_ERROR, "begin reco of dlm %s to node %u " |
2667 | " returned %d\n", dlm->name, nodenum, ret); | 2674 | "returned %d\n", dlm->name, nodenum, ret); |
2668 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, | 2675 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, |
2669 | DLM_RECOVERY_LOCK_NAME_LEN); | 2676 | DLM_RECOVERY_LOCK_NAME_LEN); |
2670 | if (res) { | 2677 | if (res) { |
@@ -2789,7 +2796,9 @@ stage2: | |||
2789 | if (ret >= 0) | 2796 | if (ret >= 0) |
2790 | ret = status; | 2797 | ret = status; |
2791 | if (ret < 0) { | 2798 | if (ret < 0) { |
2792 | mlog_errno(ret); | 2799 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
2800 | "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, | ||
2801 | dlm->key, nodenum); | ||
2793 | if (dlm_is_host_down(ret)) { | 2802 | if (dlm_is_host_down(ret)) { |
2794 | /* this has no effect on this recovery | 2803 | /* this has no effect on this recovery |
2795 | * session, so set the status to zero to | 2804 | * session, so set the status to zero to |
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 49e29ecd0201..2c1f306f8fa5 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
@@ -355,7 +355,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | |||
355 | mlog(0, "master was in-progress. retry\n"); | 355 | mlog(0, "master was in-progress. retry\n"); |
356 | ret = status; | 356 | ret = status; |
357 | } else { | 357 | } else { |
358 | mlog_errno(tmpret); | 358 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " |
359 | "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner); | ||
359 | if (dlm_is_host_down(tmpret)) { | 360 | if (dlm_is_host_down(tmpret)) { |
360 | /* NOTE: this seems strange, but it is what we want. | 361 | /* NOTE: this seems strange, but it is what we want. |
361 | * when the master goes down during a cancel or | 362 | * when the master goes down during a cancel or |