diff options
author | Sunil Mushran <sunil.mushran@oracle.com> | 2010-11-19 18:06:49 -0500 |
---|---|---|
committer | Joel Becker <joel.becker@oracle.com> | 2010-12-16 03:46:05 -0500 |
commit | 8e17d16f401f7c60908726e070bfa5cbdf31e2f3 (patch) | |
tree | 7d328ec4cdd8fd3e4073eb854c262f10a53eb514 /fs/ocfs2 | |
parent | 50308d813bf26500fed671882469939fd19403a3 (diff) |
ocfs2/dlm: Cleanup mlogs in dlmthread.c, dlmast.c and dlmdomain.c
Add the domain name and the resource name in the mlogs.
Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs/ocfs2')
-rw-r--r-- | fs/ocfs2/dlm/dlmast.c | 76 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 132 |
3 files changed, 120 insertions, 90 deletions
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index f44999156839..3a3ed4bb794b 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -90,19 +90,29 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
90 | 90 | ||
91 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 91 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
92 | { | 92 | { |
93 | mlog_entry_void(); | 93 | struct dlm_lock_resource *res; |
94 | 94 | ||
95 | BUG_ON(!dlm); | 95 | BUG_ON(!dlm); |
96 | BUG_ON(!lock); | 96 | BUG_ON(!lock); |
97 | 97 | ||
98 | res = lock->lockres; | ||
99 | |||
98 | assert_spin_locked(&dlm->ast_lock); | 100 | assert_spin_locked(&dlm->ast_lock); |
101 | |||
99 | if (!list_empty(&lock->ast_list)) { | 102 | if (!list_empty(&lock->ast_list)) { |
100 | mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", | 103 | mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " |
104 | "AST list not empty, pending %d, newlevel %d\n", | ||
105 | dlm->name, res->lockname.len, res->lockname.name, | ||
106 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
107 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
101 | lock->ast_pending, lock->ml.type); | 108 | lock->ast_pending, lock->ml.type); |
102 | BUG(); | 109 | BUG(); |
103 | } | 110 | } |
104 | if (lock->ast_pending) | 111 | if (lock->ast_pending) |
105 | mlog(0, "lock has an ast getting flushed right now\n"); | 112 | mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", |
113 | dlm->name, res->lockname.len, res->lockname.name, | ||
114 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
115 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
106 | 116 | ||
107 | /* putting lock on list, add a ref */ | 117 | /* putting lock on list, add a ref */ |
108 | dlm_lock_get(lock); | 118 | dlm_lock_get(lock); |
@@ -110,9 +120,10 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
110 | 120 | ||
111 | /* check to see if this ast obsoletes the bast */ | 121 | /* check to see if this ast obsoletes the bast */ |
112 | if (dlm_should_cancel_bast(dlm, lock)) { | 122 | if (dlm_should_cancel_bast(dlm, lock)) { |
113 | struct dlm_lock_resource *res = lock->lockres; | 123 | mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", |
114 | mlog(0, "%s: cancelling bast for %.*s\n", | 124 | dlm->name, res->lockname.len, res->lockname.name, |
115 | dlm->name, res->lockname.len, res->lockname.name); | 125 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), |
126 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
116 | lock->bast_pending = 0; | 127 | lock->bast_pending = 0; |
117 | list_del_init(&lock->bast_list); | 128 | list_del_init(&lock->bast_list); |
118 | lock->ml.highest_blocked = LKM_IVMODE; | 129 | lock->ml.highest_blocked = LKM_IVMODE; |
@@ -134,8 +145,6 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
134 | 145 | ||
135 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 146 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
136 | { | 147 | { |
137 | mlog_entry_void(); | ||
138 | |||
139 | BUG_ON(!dlm); | 148 | BUG_ON(!dlm); |
140 | BUG_ON(!lock); | 149 | BUG_ON(!lock); |
141 | 150 | ||
@@ -147,15 +156,21 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
147 | 156 | ||
148 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 157 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
149 | { | 158 | { |
150 | mlog_entry_void(); | 159 | struct dlm_lock_resource *res; |
151 | 160 | ||
152 | BUG_ON(!dlm); | 161 | BUG_ON(!dlm); |
153 | BUG_ON(!lock); | 162 | BUG_ON(!lock); |
163 | |||
154 | assert_spin_locked(&dlm->ast_lock); | 164 | assert_spin_locked(&dlm->ast_lock); |
155 | 165 | ||
166 | res = lock->lockres; | ||
167 | |||
156 | BUG_ON(!list_empty(&lock->bast_list)); | 168 | BUG_ON(!list_empty(&lock->bast_list)); |
157 | if (lock->bast_pending) | 169 | if (lock->bast_pending) |
158 | mlog(0, "lock has a bast getting flushed right now\n"); | 170 | mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", |
171 | dlm->name, res->lockname.len, res->lockname.name, | ||
172 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
173 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
159 | 174 | ||
160 | /* putting lock on list, add a ref */ | 175 | /* putting lock on list, add a ref */ |
161 | dlm_lock_get(lock); | 176 | dlm_lock_get(lock); |
@@ -167,8 +182,6 @@ void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
167 | 182 | ||
168 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 183 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
169 | { | 184 | { |
170 | mlog_entry_void(); | ||
171 | |||
172 | BUG_ON(!dlm); | 185 | BUG_ON(!dlm); |
173 | BUG_ON(!lock); | 186 | BUG_ON(!lock); |
174 | 187 | ||
@@ -213,7 +226,10 @@ void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
213 | dlm_astlockfunc_t *fn; | 226 | dlm_astlockfunc_t *fn; |
214 | struct dlm_lockstatus *lksb; | 227 | struct dlm_lockstatus *lksb; |
215 | 228 | ||
216 | mlog_entry_void(); | 229 | mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, |
230 | res->lockname.len, res->lockname.name, | ||
231 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
232 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
217 | 233 | ||
218 | lksb = lock->lksb; | 234 | lksb = lock->lksb; |
219 | fn = lock->ast; | 235 | fn = lock->ast; |
@@ -231,7 +247,10 @@ int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
231 | struct dlm_lockstatus *lksb; | 247 | struct dlm_lockstatus *lksb; |
232 | int lksbflags; | 248 | int lksbflags; |
233 | 249 | ||
234 | mlog_entry_void(); | 250 | mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, |
251 | res->lockname.len, res->lockname.name, | ||
252 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
253 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
235 | 254 | ||
236 | lksb = lock->lksb; | 255 | lksb = lock->lksb; |
237 | BUG_ON(lock->ml.node == dlm->node_num); | 256 | BUG_ON(lock->ml.node == dlm->node_num); |
@@ -250,9 +269,14 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
250 | { | 269 | { |
251 | dlm_bastlockfunc_t *fn = lock->bast; | 270 | dlm_bastlockfunc_t *fn = lock->bast; |
252 | 271 | ||
253 | mlog_entry_void(); | ||
254 | BUG_ON(lock->ml.node != dlm->node_num); | 272 | BUG_ON(lock->ml.node != dlm->node_num); |
255 | 273 | ||
274 | mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", | ||
275 | dlm->name, res->lockname.len, res->lockname.name, | ||
276 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
277 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
278 | blocked_type); | ||
279 | |||
256 | (*fn)(lock->astdata, blocked_type); | 280 | (*fn)(lock->astdata, blocked_type); |
257 | } | 281 | } |
258 | 282 | ||
@@ -332,7 +356,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, | |||
332 | /* cannot get a proxy ast message if this node owns it */ | 356 | /* cannot get a proxy ast message if this node owns it */ |
333 | BUG_ON(res->owner == dlm->node_num); | 357 | BUG_ON(res->owner == dlm->node_num); |
334 | 358 | ||
335 | mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); | 359 | mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, |
360 | res->lockname.name); | ||
336 | 361 | ||
337 | spin_lock(&res->spinlock); | 362 | spin_lock(&res->spinlock); |
338 | if (res->state & DLM_LOCK_RES_RECOVERING) { | 363 | if (res->state & DLM_LOCK_RES_RECOVERING) { |
@@ -382,8 +407,12 @@ do_ast: | |||
382 | if (past->type == DLM_AST) { | 407 | if (past->type == DLM_AST) { |
383 | /* do not alter lock refcount. switching lists. */ | 408 | /* do not alter lock refcount. switching lists. */ |
384 | list_move_tail(&lock->list, &res->granted); | 409 | list_move_tail(&lock->list, &res->granted); |
385 | mlog(0, "ast: Adding to granted list... type=%d, " | 410 | mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n", |
386 | "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); | 411 | dlm->name, res->lockname.len, res->lockname.name, |
412 | dlm_get_lock_cookie_node(be64_to_cpu(cookie)), | ||
413 | dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), | ||
414 | lock->ml.type, lock->ml.convert_type); | ||
415 | |||
387 | if (lock->ml.convert_type != LKM_IVMODE) { | 416 | if (lock->ml.convert_type != LKM_IVMODE) { |
388 | lock->ml.type = lock->ml.convert_type; | 417 | lock->ml.type = lock->ml.convert_type; |
389 | lock->ml.convert_type = LKM_IVMODE; | 418 | lock->ml.convert_type = LKM_IVMODE; |
@@ -426,9 +455,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
426 | size_t veclen = 1; | 455 | size_t veclen = 1; |
427 | int status; | 456 | int status; |
428 | 457 | ||
429 | mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", | 458 | mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name, |
430 | res->lockname.len, res->lockname.name, lock->ml.node, | 459 | res->lockname.len, res->lockname.name, lock->ml.node, msg_type, |
431 | msg_type, blocked_type); | 460 | blocked_type); |
432 | 461 | ||
433 | memset(&past, 0, sizeof(struct dlm_proxy_ast)); | 462 | memset(&past, 0, sizeof(struct dlm_proxy_ast)); |
434 | past.node_idx = dlm->node_num; | 463 | past.node_idx = dlm->node_num; |
@@ -441,7 +470,6 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
441 | vec[0].iov_len = sizeof(struct dlm_proxy_ast); | 470 | vec[0].iov_len = sizeof(struct dlm_proxy_ast); |
442 | vec[0].iov_base = &past; | 471 | vec[0].iov_base = &past; |
443 | if (flags & DLM_LKSB_GET_LVB) { | 472 | if (flags & DLM_LKSB_GET_LVB) { |
444 | mlog(0, "returning requested LVB data\n"); | ||
445 | be32_add_cpu(&past.flags, LKM_GET_LVB); | 473 | be32_add_cpu(&past.flags, LKM_GET_LVB); |
446 | vec[1].iov_len = DLM_LVB_LEN; | 474 | vec[1].iov_len = DLM_LVB_LEN; |
447 | vec[1].iov_base = lock->lksb->lvb; | 475 | vec[1].iov_base = lock->lksb->lvb; |
@@ -451,8 +479,8 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
451 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, | 479 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, |
452 | lock->ml.node, &status); | 480 | lock->ml.node, &status); |
453 | if (ret < 0) | 481 | if (ret < 0) |
454 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 482 | mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n", |
455 | "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key, | 483 | dlm->name, res->lockname.len, res->lockname.name, ret, |
456 | lock->ml.node); | 484 | lock->ml.node); |
457 | else { | 485 | else { |
458 | if (status == DLM_RECOVERING) { | 486 | if (status == DLM_RECOVERING) { |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index cc2aaa96cfe5..fcc40c33489d 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -460,8 +460,6 @@ redo_bucket: | |||
460 | } | 460 | } |
461 | cond_resched_lock(&dlm->spinlock); | 461 | cond_resched_lock(&dlm->spinlock); |
462 | num += n; | 462 | num += n; |
463 | mlog(0, "%s: touched %d lockreses in bucket %d " | ||
464 | "(tot=%d)\n", dlm->name, n, i, num); | ||
465 | } | 463 | } |
466 | spin_unlock(&dlm->spinlock); | 464 | spin_unlock(&dlm->spinlock); |
467 | wake_up(&dlm->dlm_thread_wq); | 465 | wake_up(&dlm->dlm_thread_wq); |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 2211acf33d9b..1d6d1d22c471 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -122,15 +122,13 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res) | |||
122 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 122 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, |
123 | struct dlm_lock_resource *res) | 123 | struct dlm_lock_resource *res) |
124 | { | 124 | { |
125 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
126 | |||
127 | assert_spin_locked(&dlm->spinlock); | 125 | assert_spin_locked(&dlm->spinlock); |
128 | assert_spin_locked(&res->spinlock); | 126 | assert_spin_locked(&res->spinlock); |
129 | 127 | ||
130 | if (__dlm_lockres_unused(res)){ | 128 | if (__dlm_lockres_unused(res)){ |
131 | if (list_empty(&res->purge)) { | 129 | if (list_empty(&res->purge)) { |
132 | mlog(0, "putting lockres %.*s:%p onto purge list\n", | 130 | mlog(0, "%s: Adding res %.*s to purge list\n", |
133 | res->lockname.len, res->lockname.name, res); | 131 | dlm->name, res->lockname.len, res->lockname.name); |
134 | 132 | ||
135 | res->last_used = jiffies; | 133 | res->last_used = jiffies; |
136 | dlm_lockres_get(res); | 134 | dlm_lockres_get(res); |
@@ -138,8 +136,8 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
138 | dlm->purge_count++; | 136 | dlm->purge_count++; |
139 | } | 137 | } |
140 | } else if (!list_empty(&res->purge)) { | 138 | } else if (!list_empty(&res->purge)) { |
141 | mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", | 139 | mlog(0, "%s: Removing res %.*s from purge list\n", |
142 | res->lockname.len, res->lockname.name, res, res->owner); | 140 | dlm->name, res->lockname.len, res->lockname.name); |
143 | 141 | ||
144 | list_del_init(&res->purge); | 142 | list_del_init(&res->purge); |
145 | dlm_lockres_put(res); | 143 | dlm_lockres_put(res); |
@@ -150,7 +148,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
150 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 148 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, |
151 | struct dlm_lock_resource *res) | 149 | struct dlm_lock_resource *res) |
152 | { | 150 | { |
153 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
154 | spin_lock(&dlm->spinlock); | 151 | spin_lock(&dlm->spinlock); |
155 | spin_lock(&res->spinlock); | 152 | spin_lock(&res->spinlock); |
156 | 153 | ||
@@ -171,9 +168,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
171 | 168 | ||
172 | master = (res->owner == dlm->node_num); | 169 | master = (res->owner == dlm->node_num); |
173 | 170 | ||
174 | 171 | mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, | |
175 | mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, | 172 | res->lockname.len, res->lockname.name, master); |
176 | res->lockname.name, master); | ||
177 | 173 | ||
178 | if (!master) { | 174 | if (!master) { |
179 | res->state |= DLM_LOCK_RES_DROPPING_REF; | 175 | res->state |= DLM_LOCK_RES_DROPPING_REF; |
@@ -189,27 +185,25 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
189 | /* clear our bit from the master's refmap, ignore errors */ | 185 | /* clear our bit from the master's refmap, ignore errors */ |
190 | ret = dlm_drop_lockres_ref(dlm, res); | 186 | ret = dlm_drop_lockres_ref(dlm, res); |
191 | if (ret < 0) { | 187 | if (ret < 0) { |
192 | mlog_errno(ret); | 188 | mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, |
189 | res->lockname.len, res->lockname.name, ret); | ||
193 | if (!dlm_is_host_down(ret)) | 190 | if (!dlm_is_host_down(ret)) |
194 | BUG(); | 191 | BUG(); |
195 | } | 192 | } |
196 | mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", | ||
197 | dlm->name, res->lockname.len, res->lockname.name, ret); | ||
198 | spin_lock(&dlm->spinlock); | 193 | spin_lock(&dlm->spinlock); |
199 | spin_lock(&res->spinlock); | 194 | spin_lock(&res->spinlock); |
200 | } | 195 | } |
201 | 196 | ||
202 | if (!list_empty(&res->purge)) { | 197 | if (!list_empty(&res->purge)) { |
203 | mlog(0, "removing lockres %.*s:%p from purgelist, " | 198 | mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", |
204 | "master = %d\n", res->lockname.len, res->lockname.name, | 199 | dlm->name, res->lockname.len, res->lockname.name, master); |
205 | res, master); | ||
206 | list_del_init(&res->purge); | 200 | list_del_init(&res->purge); |
207 | dlm_lockres_put(res); | 201 | dlm_lockres_put(res); |
208 | dlm->purge_count--; | 202 | dlm->purge_count--; |
209 | } | 203 | } |
210 | 204 | ||
211 | if (!__dlm_lockres_unused(res)) { | 205 | if (!__dlm_lockres_unused(res)) { |
212 | mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n", | 206 | mlog(ML_ERROR, "%s: res %.*s in use after deref\n", |
213 | dlm->name, res->lockname.len, res->lockname.name); | 207 | dlm->name, res->lockname.len, res->lockname.name); |
214 | __dlm_print_one_lock_resource(res); | 208 | __dlm_print_one_lock_resource(res); |
215 | BUG(); | 209 | BUG(); |
@@ -266,10 +260,10 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, | |||
266 | unused = __dlm_lockres_unused(lockres); | 260 | unused = __dlm_lockres_unused(lockres); |
267 | if (!unused || | 261 | if (!unused || |
268 | (lockres->state & DLM_LOCK_RES_MIGRATING)) { | 262 | (lockres->state & DLM_LOCK_RES_MIGRATING)) { |
269 | mlog(0, "lockres %s:%.*s: is in use or " | 263 | mlog(0, "%s: res %.*s is in use or being remastered, " |
270 | "being remastered, used %d, state %d\n", | 264 | "used %d, state %d\n", dlm->name, |
271 | dlm->name, lockres->lockname.len, | 265 | lockres->lockname.len, lockres->lockname.name, |
272 | lockres->lockname.name, !unused, lockres->state); | 266 | !unused, lockres->state); |
273 | list_move_tail(&dlm->purge_list, &lockres->purge); | 267 | list_move_tail(&dlm->purge_list, &lockres->purge); |
274 | spin_unlock(&lockres->spinlock); | 268 | spin_unlock(&lockres->spinlock); |
275 | continue; | 269 | continue; |
@@ -296,15 +290,12 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | |||
296 | struct list_head *head; | 290 | struct list_head *head; |
297 | int can_grant = 1; | 291 | int can_grant = 1; |
298 | 292 | ||
299 | //mlog(0, "res->lockname.len=%d\n", res->lockname.len); | 293 | /* |
300 | //mlog(0, "res->lockname.name=%p\n", res->lockname.name); | 294 | * Because this function is called with the lockres |
301 | //mlog(0, "shuffle res %.*s\n", res->lockname.len, | ||
302 | // res->lockname.name); | ||
303 | |||
304 | /* because this function is called with the lockres | ||
305 | * spinlock, and because we know that it is not migrating/ | 295 | * spinlock, and because we know that it is not migrating/ |
306 | * recovering/in-progress, it is fine to reserve asts and | 296 | * recovering/in-progress, it is fine to reserve asts and |
307 | * basts right before queueing them all throughout */ | 297 | * basts right before queueing them all throughout |
298 | */ | ||
308 | assert_spin_locked(&dlm->ast_lock); | 299 | assert_spin_locked(&dlm->ast_lock); |
309 | assert_spin_locked(&res->spinlock); | 300 | assert_spin_locked(&res->spinlock); |
310 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| | 301 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| |
@@ -314,13 +305,13 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | |||
314 | converting: | 305 | converting: |
315 | if (list_empty(&res->converting)) | 306 | if (list_empty(&res->converting)) |
316 | goto blocked; | 307 | goto blocked; |
317 | mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, | 308 | mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name, |
318 | res->lockname.name); | 309 | res->lockname.len, res->lockname.name); |
319 | 310 | ||
320 | target = list_entry(res->converting.next, struct dlm_lock, list); | 311 | target = list_entry(res->converting.next, struct dlm_lock, list); |
321 | if (target->ml.convert_type == LKM_IVMODE) { | 312 | if (target->ml.convert_type == LKM_IVMODE) { |
322 | mlog(ML_ERROR, "%.*s: converting a lock with no " | 313 | mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n", |
323 | "convert_type!\n", res->lockname.len, res->lockname.name); | 314 | dlm->name, res->lockname.len, res->lockname.name); |
324 | BUG(); | 315 | BUG(); |
325 | } | 316 | } |
326 | head = &res->granted; | 317 | head = &res->granted; |
@@ -365,9 +356,12 @@ converting: | |||
365 | spin_lock(&target->spinlock); | 356 | spin_lock(&target->spinlock); |
366 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | 357 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); |
367 | 358 | ||
368 | mlog(0, "calling ast for converting lock: %.*s, have: %d, " | 359 | mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type " |
369 | "granting: %d, node: %u\n", res->lockname.len, | 360 | "%d => %d, node %u\n", dlm->name, res->lockname.len, |
370 | res->lockname.name, target->ml.type, | 361 | res->lockname.name, |
362 | dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), | ||
363 | dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), | ||
364 | target->ml.type, | ||
371 | target->ml.convert_type, target->ml.node); | 365 | target->ml.convert_type, target->ml.node); |
372 | 366 | ||
373 | target->ml.type = target->ml.convert_type; | 367 | target->ml.type = target->ml.convert_type; |
@@ -428,11 +422,14 @@ blocked: | |||
428 | spin_lock(&target->spinlock); | 422 | spin_lock(&target->spinlock); |
429 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | 423 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); |
430 | 424 | ||
431 | mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " | 425 | mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, " |
432 | "node: %u\n", res->lockname.len, res->lockname.name, | 426 | "node %u\n", dlm->name, res->lockname.len, |
427 | res->lockname.name, | ||
428 | dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), | ||
429 | dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), | ||
433 | target->ml.type, target->ml.node); | 430 | target->ml.type, target->ml.node); |
434 | 431 | ||
435 | // target->ml.type is already correct | 432 | /* target->ml.type is already correct */ |
436 | list_move_tail(&target->list, &res->granted); | 433 | list_move_tail(&target->list, &res->granted); |
437 | 434 | ||
438 | BUG_ON(!target->lksb); | 435 | BUG_ON(!target->lksb); |
@@ -453,7 +450,6 @@ leave: | |||
453 | /* must have NO locks when calling this with res !=NULL * */ | 450 | /* must have NO locks when calling this with res !=NULL * */ |
454 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 451 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
455 | { | 452 | { |
456 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
457 | if (res) { | 453 | if (res) { |
458 | spin_lock(&dlm->spinlock); | 454 | spin_lock(&dlm->spinlock); |
459 | spin_lock(&res->spinlock); | 455 | spin_lock(&res->spinlock); |
@@ -466,8 +462,6 @@ void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
466 | 462 | ||
467 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 463 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
468 | { | 464 | { |
469 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
470 | |||
471 | assert_spin_locked(&dlm->spinlock); | 465 | assert_spin_locked(&dlm->spinlock); |
472 | assert_spin_locked(&res->spinlock); | 466 | assert_spin_locked(&res->spinlock); |
473 | 467 | ||
@@ -484,13 +478,16 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
484 | res->state |= DLM_LOCK_RES_DIRTY; | 478 | res->state |= DLM_LOCK_RES_DIRTY; |
485 | } | 479 | } |
486 | } | 480 | } |
481 | |||
482 | mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, | ||
483 | res->lockname.name); | ||
487 | } | 484 | } |
488 | 485 | ||
489 | 486 | ||
490 | /* Launch the NM thread for the mounted volume */ | 487 | /* Launch the NM thread for the mounted volume */ |
491 | int dlm_launch_thread(struct dlm_ctxt *dlm) | 488 | int dlm_launch_thread(struct dlm_ctxt *dlm) |
492 | { | 489 | { |
493 | mlog(0, "starting dlm thread...\n"); | 490 | mlog(0, "Starting dlm_thread...\n"); |
494 | 491 | ||
495 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); | 492 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); |
496 | if (IS_ERR(dlm->dlm_thread_task)) { | 493 | if (IS_ERR(dlm->dlm_thread_task)) { |
@@ -505,7 +502,7 @@ int dlm_launch_thread(struct dlm_ctxt *dlm) | |||
505 | void dlm_complete_thread(struct dlm_ctxt *dlm) | 502 | void dlm_complete_thread(struct dlm_ctxt *dlm) |
506 | { | 503 | { |
507 | if (dlm->dlm_thread_task) { | 504 | if (dlm->dlm_thread_task) { |
508 | mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); | 505 | mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n"); |
509 | kthread_stop(dlm->dlm_thread_task); | 506 | kthread_stop(dlm->dlm_thread_task); |
510 | dlm->dlm_thread_task = NULL; | 507 | dlm->dlm_thread_task = NULL; |
511 | } | 508 | } |
@@ -536,7 +533,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
536 | /* get an extra ref on lock */ | 533 | /* get an extra ref on lock */ |
537 | dlm_lock_get(lock); | 534 | dlm_lock_get(lock); |
538 | res = lock->lockres; | 535 | res = lock->lockres; |
539 | mlog(0, "delivering an ast for this lockres\n"); | 536 | mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, " |
537 | "node %u\n", dlm->name, res->lockname.len, | ||
538 | res->lockname.name, | ||
539 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
540 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
541 | lock->ml.type, lock->ml.node); | ||
540 | 542 | ||
541 | BUG_ON(!lock->ast_pending); | 543 | BUG_ON(!lock->ast_pending); |
542 | 544 | ||
@@ -557,9 +559,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
557 | /* possible that another ast was queued while | 559 | /* possible that another ast was queued while |
558 | * we were delivering the last one */ | 560 | * we were delivering the last one */ |
559 | if (!list_empty(&lock->ast_list)) { | 561 | if (!list_empty(&lock->ast_list)) { |
560 | mlog(0, "aha another ast got queued while " | 562 | mlog(0, "%s: res %.*s, AST queued while flushing last " |
561 | "we were finishing the last one. will " | 563 | "one\n", dlm->name, res->lockname.len, |
562 | "keep the ast_pending flag set.\n"); | 564 | res->lockname.name); |
563 | } else | 565 | } else |
564 | lock->ast_pending = 0; | 566 | lock->ast_pending = 0; |
565 | 567 | ||
@@ -590,8 +592,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
590 | dlm_lock_put(lock); | 592 | dlm_lock_put(lock); |
591 | spin_unlock(&dlm->ast_lock); | 593 | spin_unlock(&dlm->ast_lock); |
592 | 594 | ||
593 | mlog(0, "delivering a bast for this lockres " | 595 | mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, " |
594 | "(blocked = %d\n", hi); | 596 | "blocked %d, node %u\n", |
597 | dlm->name, res->lockname.len, res->lockname.name, | ||
598 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
599 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
600 | hi, lock->ml.node); | ||
595 | 601 | ||
596 | if (lock->ml.node != dlm->node_num) { | 602 | if (lock->ml.node != dlm->node_num) { |
597 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); | 603 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); |
@@ -605,9 +611,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
605 | /* possible that another bast was queued while | 611 | /* possible that another bast was queued while |
606 | * we were delivering the last one */ | 612 | * we were delivering the last one */ |
607 | if (!list_empty(&lock->bast_list)) { | 613 | if (!list_empty(&lock->bast_list)) { |
608 | mlog(0, "aha another bast got queued while " | 614 | mlog(0, "%s: res %.*s, BAST queued while flushing last " |
609 | "we were finishing the last one. will " | 615 | "one\n", dlm->name, res->lockname.len, |
610 | "keep the bast_pending flag set.\n"); | 616 | res->lockname.name); |
611 | } else | 617 | } else |
612 | lock->bast_pending = 0; | 618 | lock->bast_pending = 0; |
613 | 619 | ||
@@ -675,11 +681,12 @@ static int dlm_thread(void *data) | |||
675 | spin_lock(&res->spinlock); | 681 | spin_lock(&res->spinlock); |
676 | if (res->owner != dlm->node_num) { | 682 | if (res->owner != dlm->node_num) { |
677 | __dlm_print_one_lock_resource(res); | 683 | __dlm_print_one_lock_resource(res); |
678 | mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", | 684 | mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d," |
679 | res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", | 685 | " dirty %d\n", dlm->name, |
680 | res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", | 686 | !!(res->state & DLM_LOCK_RES_IN_PROGRESS), |
681 | res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", | 687 | !!(res->state & DLM_LOCK_RES_MIGRATING), |
682 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | 688 | !!(res->state & DLM_LOCK_RES_RECOVERING), |
689 | !!(res->state & DLM_LOCK_RES_DIRTY)); | ||
683 | } | 690 | } |
684 | BUG_ON(res->owner != dlm->node_num); | 691 | BUG_ON(res->owner != dlm->node_num); |
685 | 692 | ||
@@ -693,8 +700,8 @@ static int dlm_thread(void *data) | |||
693 | res->state &= ~DLM_LOCK_RES_DIRTY; | 700 | res->state &= ~DLM_LOCK_RES_DIRTY; |
694 | spin_unlock(&res->spinlock); | 701 | spin_unlock(&res->spinlock); |
695 | spin_unlock(&dlm->ast_lock); | 702 | spin_unlock(&dlm->ast_lock); |
696 | mlog(0, "delaying list shuffling for in-" | 703 | mlog(0, "%s: res %.*s, inprogress, delay list " |
697 | "progress lockres %.*s, state=%d\n", | 704 | "shuffle, state %d\n", dlm->name, |
698 | res->lockname.len, res->lockname.name, | 705 | res->lockname.len, res->lockname.name, |
699 | res->state); | 706 | res->state); |
700 | delay = 1; | 707 | delay = 1; |
@@ -706,10 +713,6 @@ static int dlm_thread(void *data) | |||
706 | * spinlock and do NOT have the dlm lock. | 713 | * spinlock and do NOT have the dlm lock. |
707 | * safe to reserve/queue asts and run the lists. */ | 714 | * safe to reserve/queue asts and run the lists. */ |
708 | 715 | ||
709 | mlog(0, "calling dlm_shuffle_lists with dlm=%s, " | ||
710 | "res=%.*s\n", dlm->name, | ||
711 | res->lockname.len, res->lockname.name); | ||
712 | |||
713 | /* called while holding lockres lock */ | 716 | /* called while holding lockres lock */ |
714 | dlm_shuffle_lists(dlm, res); | 717 | dlm_shuffle_lists(dlm, res); |
715 | res->state &= ~DLM_LOCK_RES_DIRTY; | 718 | res->state &= ~DLM_LOCK_RES_DIRTY; |
@@ -733,7 +736,8 @@ in_progress: | |||
733 | /* unlikely, but we may need to give time to | 736 | /* unlikely, but we may need to give time to |
734 | * other tasks */ | 737 | * other tasks */ |
735 | if (!--n) { | 738 | if (!--n) { |
736 | mlog(0, "throttling dlm_thread\n"); | 739 | mlog(0, "%s: Throttling dlm thread\n", |
740 | dlm->name); | ||
737 | break; | 741 | break; |
738 | } | 742 | } |
739 | } | 743 | } |