diff options
author | Mark Fasheh <mark.fasheh@oracle.com> | 2006-09-08 14:39:27 -0400 |
---|---|---|
committer | Mark Fasheh <mark.fasheh@oracle.com> | 2006-09-24 16:50:42 -0400 |
commit | ea5b3a187e2724fa9d08b2fbd3898c149ed95c6b (patch) | |
tree | 42748cd0189e3b41147ccbe114d02633dc8d0d1e /fs/ocfs2/dlm/userdlm.c | |
parent | 3384f3df5ed939a25135e1b2734fb7cdee1720a8 (diff) |
ocfs2: Update dlmfs for new dlmlock() API
We just need to add a namelen field to the user_lock_res structure, and
update a few debug prints. Instead of updating all debug prints, I took the
opportunity to remove a few that are likely unnecessary these days.
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/dlm/userdlm.c')
-rw-r--r-- | fs/ocfs2/dlm/userdlm.c | 81 |
1 files changed, 30 insertions, 51 deletions
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index e641b084b343..eead48bbfac6 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c | |||
@@ -102,10 +102,10 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) | |||
102 | spin_unlock(&lockres->l_lock); | 102 | spin_unlock(&lockres->l_lock); |
103 | } | 103 | } |
104 | 104 | ||
105 | #define user_log_dlm_error(_func, _stat, _lockres) do { \ | 105 | #define user_log_dlm_error(_func, _stat, _lockres) do { \ |
106 | mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ | 106 | mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ |
107 | "resource %s: %s\n", dlm_errname(_stat), _func, \ | 107 | "resource %.*s: %s\n", dlm_errname(_stat), _func, \ |
108 | _lockres->l_name, dlm_errmsg(_stat)); \ | 108 | _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ |
109 | } while (0) | 109 | } while (0) |
110 | 110 | ||
111 | /* WARNING: This function lives in a world where the only three lock | 111 | /* WARNING: This function lives in a world where the only three lock |
@@ -127,21 +127,22 @@ static void user_ast(void *opaque) | |||
127 | struct user_lock_res *lockres = opaque; | 127 | struct user_lock_res *lockres = opaque; |
128 | struct dlm_lockstatus *lksb; | 128 | struct dlm_lockstatus *lksb; |
129 | 129 | ||
130 | mlog(0, "AST fired for lockres %s\n", lockres->l_name); | 130 | mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, |
131 | lockres->l_name); | ||
131 | 132 | ||
132 | spin_lock(&lockres->l_lock); | 133 | spin_lock(&lockres->l_lock); |
133 | 134 | ||
134 | lksb = &(lockres->l_lksb); | 135 | lksb = &(lockres->l_lksb); |
135 | if (lksb->status != DLM_NORMAL) { | 136 | if (lksb->status != DLM_NORMAL) { |
136 | mlog(ML_ERROR, "lksb status value of %u on lockres %s\n", | 137 | mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", |
137 | lksb->status, lockres->l_name); | 138 | lksb->status, lockres->l_namelen, lockres->l_name); |
138 | spin_unlock(&lockres->l_lock); | 139 | spin_unlock(&lockres->l_lock); |
139 | return; | 140 | return; |
140 | } | 141 | } |
141 | 142 | ||
142 | mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, | 143 | mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, |
143 | "Lockres %s, requested ivmode. flags 0x%x\n", | 144 | "Lockres %.*s, requested ivmode. flags 0x%x\n", |
144 | lockres->l_name, lockres->l_flags); | 145 | lockres->l_namelen, lockres->l_name, lockres->l_flags); |
145 | 146 | ||
146 | /* we're downconverting. */ | 147 | /* we're downconverting. */ |
147 | if (lockres->l_requested < lockres->l_level) { | 148 | if (lockres->l_requested < lockres->l_level) { |
@@ -213,8 +214,8 @@ static void user_bast(void *opaque, int level) | |||
213 | { | 214 | { |
214 | struct user_lock_res *lockres = opaque; | 215 | struct user_lock_res *lockres = opaque; |
215 | 216 | ||
216 | mlog(0, "Blocking AST fired for lockres %s. Blocking level %d\n", | 217 | mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", |
217 | lockres->l_name, level); | 218 | lockres->l_namelen, lockres->l_name, level); |
218 | 219 | ||
219 | spin_lock(&lockres->l_lock); | 220 | spin_lock(&lockres->l_lock); |
220 | lockres->l_flags |= USER_LOCK_BLOCKED; | 221 | lockres->l_flags |= USER_LOCK_BLOCKED; |
@@ -231,7 +232,8 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) | |||
231 | { | 232 | { |
232 | struct user_lock_res *lockres = opaque; | 233 | struct user_lock_res *lockres = opaque; |
233 | 234 | ||
234 | mlog(0, "UNLOCK AST called on lock %s\n", lockres->l_name); | 235 | mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, |
236 | lockres->l_name); | ||
235 | 237 | ||
236 | if (status != DLM_NORMAL && status != DLM_CANCELGRANT) | 238 | if (status != DLM_NORMAL && status != DLM_CANCELGRANT) |
237 | mlog(ML_ERROR, "Dlm returns status %d\n", status); | 239 | mlog(ML_ERROR, "Dlm returns status %d\n", status); |
@@ -244,8 +246,6 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) | |||
244 | && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { | 246 | && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { |
245 | lockres->l_level = LKM_IVMODE; | 247 | lockres->l_level = LKM_IVMODE; |
246 | } else if (status == DLM_CANCELGRANT) { | 248 | } else if (status == DLM_CANCELGRANT) { |
247 | mlog(0, "Lock %s, cancel fails, flags 0x%x\n", | ||
248 | lockres->l_name, lockres->l_flags); | ||
249 | /* We tried to cancel a convert request, but it was | 249 | /* We tried to cancel a convert request, but it was |
250 | * already granted. Don't clear the busy flag - the | 250 | * already granted. Don't clear the busy flag - the |
251 | * ast should've done this already. */ | 251 | * ast should've done this already. */ |
@@ -255,8 +255,6 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) | |||
255 | } else { | 255 | } else { |
256 | BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); | 256 | BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); |
257 | /* Cancel succeeded, we want to re-queue */ | 257 | /* Cancel succeeded, we want to re-queue */ |
258 | mlog(0, "Lock %s, cancel succeeds, flags 0x%x\n", | ||
259 | lockres->l_name, lockres->l_flags); | ||
260 | lockres->l_requested = LKM_IVMODE; /* cancel an | 258 | lockres->l_requested = LKM_IVMODE; /* cancel an |
261 | * upconvert | 259 | * upconvert |
262 | * request. */ | 260 | * request. */ |
@@ -287,13 +285,14 @@ static void user_dlm_unblock_lock(void *opaque) | |||
287 | struct user_lock_res *lockres = (struct user_lock_res *) opaque; | 285 | struct user_lock_res *lockres = (struct user_lock_res *) opaque; |
288 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | 286 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); |
289 | 287 | ||
290 | mlog(0, "processing lockres %s\n", lockres->l_name); | 288 | mlog(0, "processing lockres %.*s\n", lockres->l_namelen, |
289 | lockres->l_name); | ||
291 | 290 | ||
292 | spin_lock(&lockres->l_lock); | 291 | spin_lock(&lockres->l_lock); |
293 | 292 | ||
294 | mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), | 293 | mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), |
295 | "Lockres %s, flags 0x%x\n", | 294 | "Lockres %.*s, flags 0x%x\n", |
296 | lockres->l_name, lockres->l_flags); | 295 | lockres->l_namelen, lockres->l_name, lockres->l_flags); |
297 | 296 | ||
298 | /* notice that we don't clear USER_LOCK_BLOCKED here. If it's | 297 | /* notice that we don't clear USER_LOCK_BLOCKED here. If it's |
299 | * set, we want user_ast clear it. */ | 298 | * set, we want user_ast clear it. */ |
@@ -305,22 +304,16 @@ static void user_dlm_unblock_lock(void *opaque) | |||
305 | * flag, and finally we might get another bast which re-queues | 304 | * flag, and finally we might get another bast which re-queues |
306 | * us before our ast for the downconvert is called. */ | 305 | * us before our ast for the downconvert is called. */ |
307 | if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { | 306 | if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { |
308 | mlog(0, "Lockres %s, flags 0x%x: queued but not blocking\n", | ||
309 | lockres->l_name, lockres->l_flags); | ||
310 | spin_unlock(&lockres->l_lock); | 307 | spin_unlock(&lockres->l_lock); |
311 | goto drop_ref; | 308 | goto drop_ref; |
312 | } | 309 | } |
313 | 310 | ||
314 | if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { | 311 | if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { |
315 | mlog(0, "lock is in teardown so we do nothing\n"); | ||
316 | spin_unlock(&lockres->l_lock); | 312 | spin_unlock(&lockres->l_lock); |
317 | goto drop_ref; | 313 | goto drop_ref; |
318 | } | 314 | } |
319 | 315 | ||
320 | if (lockres->l_flags & USER_LOCK_BUSY) { | 316 | if (lockres->l_flags & USER_LOCK_BUSY) { |
321 | mlog(0, "Cancel lock %s, flags 0x%x\n", | ||
322 | lockres->l_name, lockres->l_flags); | ||
323 | |||
324 | if (lockres->l_flags & USER_LOCK_IN_CANCEL) { | 317 | if (lockres->l_flags & USER_LOCK_IN_CANCEL) { |
325 | spin_unlock(&lockres->l_lock); | 318 | spin_unlock(&lockres->l_lock); |
326 | goto drop_ref; | 319 | goto drop_ref; |
@@ -372,6 +365,7 @@ static void user_dlm_unblock_lock(void *opaque) | |||
372 | &lockres->l_lksb, | 365 | &lockres->l_lksb, |
373 | LKM_CONVERT|LKM_VALBLK, | 366 | LKM_CONVERT|LKM_VALBLK, |
374 | lockres->l_name, | 367 | lockres->l_name, |
368 | lockres->l_namelen, | ||
375 | user_ast, | 369 | user_ast, |
376 | lockres, | 370 | lockres, |
377 | user_bast); | 371 | user_bast); |
@@ -420,16 +414,16 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres, | |||
420 | 414 | ||
421 | if (level != LKM_EXMODE && | 415 | if (level != LKM_EXMODE && |
422 | level != LKM_PRMODE) { | 416 | level != LKM_PRMODE) { |
423 | mlog(ML_ERROR, "lockres %s: invalid request!\n", | 417 | mlog(ML_ERROR, "lockres %.*s: invalid request!\n", |
424 | lockres->l_name); | 418 | lockres->l_namelen, lockres->l_name); |
425 | status = -EINVAL; | 419 | status = -EINVAL; |
426 | goto bail; | 420 | goto bail; |
427 | } | 421 | } |
428 | 422 | ||
429 | mlog(0, "lockres %s: asking for %s lock, passed flags = 0x%x\n", | 423 | mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", |
430 | lockres->l_name, | 424 | lockres->l_namelen, lockres->l_name, |
431 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", | 425 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", |
432 | lkm_flags); | 426 | lkm_flags); |
433 | 427 | ||
434 | again: | 428 | again: |
435 | if (signal_pending(current)) { | 429 | if (signal_pending(current)) { |
@@ -474,15 +468,13 @@ again: | |||
474 | BUG_ON(level == LKM_IVMODE); | 468 | BUG_ON(level == LKM_IVMODE); |
475 | BUG_ON(level == LKM_NLMODE); | 469 | BUG_ON(level == LKM_NLMODE); |
476 | 470 | ||
477 | mlog(0, "lock %s, get lock from %d to level = %d\n", | ||
478 | lockres->l_name, lockres->l_level, level); | ||
479 | |||
480 | /* call dlm_lock to upgrade lock now */ | 471 | /* call dlm_lock to upgrade lock now */ |
481 | status = dlmlock(dlm, | 472 | status = dlmlock(dlm, |
482 | level, | 473 | level, |
483 | &lockres->l_lksb, | 474 | &lockres->l_lksb, |
484 | local_flags, | 475 | local_flags, |
485 | lockres->l_name, | 476 | lockres->l_name, |
477 | lockres->l_namelen, | ||
486 | user_ast, | 478 | user_ast, |
487 | lockres, | 479 | lockres, |
488 | user_bast); | 480 | user_bast); |
@@ -498,9 +490,6 @@ again: | |||
498 | goto bail; | 490 | goto bail; |
499 | } | 491 | } |
500 | 492 | ||
501 | mlog(0, "lock %s, successfull return from dlmlock\n", | ||
502 | lockres->l_name); | ||
503 | |||
504 | user_wait_on_busy_lock(lockres); | 493 | user_wait_on_busy_lock(lockres); |
505 | goto again; | 494 | goto again; |
506 | } | 495 | } |
@@ -508,9 +497,6 @@ again: | |||
508 | user_dlm_inc_holders(lockres, level); | 497 | user_dlm_inc_holders(lockres, level); |
509 | spin_unlock(&lockres->l_lock); | 498 | spin_unlock(&lockres->l_lock); |
510 | 499 | ||
511 | mlog(0, "lockres %s: Got %s lock!\n", lockres->l_name, | ||
512 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE"); | ||
513 | |||
514 | status = 0; | 500 | status = 0; |
515 | bail: | 501 | bail: |
516 | return status; | 502 | return status; |
@@ -538,13 +524,11 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres, | |||
538 | { | 524 | { |
539 | if (level != LKM_EXMODE && | 525 | if (level != LKM_EXMODE && |
540 | level != LKM_PRMODE) { | 526 | level != LKM_PRMODE) { |
541 | mlog(ML_ERROR, "lockres %s: invalid request!\n", lockres->l_name); | 527 | mlog(ML_ERROR, "lockres %.*s: invalid request!\n", |
528 | lockres->l_namelen, lockres->l_name); | ||
542 | return; | 529 | return; |
543 | } | 530 | } |
544 | 531 | ||
545 | mlog(0, "lockres %s: dropping %s lock\n", lockres->l_name, | ||
546 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE"); | ||
547 | |||
548 | spin_lock(&lockres->l_lock); | 532 | spin_lock(&lockres->l_lock); |
549 | user_dlm_dec_holders(lockres, level); | 533 | user_dlm_dec_holders(lockres, level); |
550 | __user_dlm_cond_queue_lockres(lockres); | 534 | __user_dlm_cond_queue_lockres(lockres); |
@@ -602,6 +586,7 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres, | |||
602 | memcpy(lockres->l_name, | 586 | memcpy(lockres->l_name, |
603 | dentry->d_name.name, | 587 | dentry->d_name.name, |
604 | dentry->d_name.len); | 588 | dentry->d_name.len); |
589 | lockres->l_namelen = dentry->d_name.len; | ||
605 | } | 590 | } |
606 | 591 | ||
607 | int user_dlm_destroy_lock(struct user_lock_res *lockres) | 592 | int user_dlm_destroy_lock(struct user_lock_res *lockres) |
@@ -609,11 +594,10 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) | |||
609 | int status = -EBUSY; | 594 | int status = -EBUSY; |
610 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | 595 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); |
611 | 596 | ||
612 | mlog(0, "asked to destroy %s\n", lockres->l_name); | 597 | mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name); |
613 | 598 | ||
614 | spin_lock(&lockres->l_lock); | 599 | spin_lock(&lockres->l_lock); |
615 | if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { | 600 | if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { |
616 | mlog(0, "Lock is already torn down\n"); | ||
617 | spin_unlock(&lockres->l_lock); | 601 | spin_unlock(&lockres->l_lock); |
618 | return 0; | 602 | return 0; |
619 | } | 603 | } |
@@ -623,8 +607,6 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) | |||
623 | while (lockres->l_flags & USER_LOCK_BUSY) { | 607 | while (lockres->l_flags & USER_LOCK_BUSY) { |
624 | spin_unlock(&lockres->l_lock); | 608 | spin_unlock(&lockres->l_lock); |
625 | 609 | ||
626 | mlog(0, "lock %s is busy\n", lockres->l_name); | ||
627 | |||
628 | user_wait_on_busy_lock(lockres); | 610 | user_wait_on_busy_lock(lockres); |
629 | 611 | ||
630 | spin_lock(&lockres->l_lock); | 612 | spin_lock(&lockres->l_lock); |
@@ -632,14 +614,12 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) | |||
632 | 614 | ||
633 | if (lockres->l_ro_holders || lockres->l_ex_holders) { | 615 | if (lockres->l_ro_holders || lockres->l_ex_holders) { |
634 | spin_unlock(&lockres->l_lock); | 616 | spin_unlock(&lockres->l_lock); |
635 | mlog(0, "lock %s has holders\n", lockres->l_name); | ||
636 | goto bail; | 617 | goto bail; |
637 | } | 618 | } |
638 | 619 | ||
639 | status = 0; | 620 | status = 0; |
640 | if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { | 621 | if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { |
641 | spin_unlock(&lockres->l_lock); | 622 | spin_unlock(&lockres->l_lock); |
642 | mlog(0, "lock %s is not attached\n", lockres->l_name); | ||
643 | goto bail; | 623 | goto bail; |
644 | } | 624 | } |
645 | 625 | ||
@@ -647,7 +627,6 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) | |||
647 | lockres->l_flags |= USER_LOCK_BUSY; | 627 | lockres->l_flags |= USER_LOCK_BUSY; |
648 | spin_unlock(&lockres->l_lock); | 628 | spin_unlock(&lockres->l_lock); |
649 | 629 | ||
650 | mlog(0, "unlocking lockres %s\n", lockres->l_name); | ||
651 | status = dlmunlock(dlm, | 630 | status = dlmunlock(dlm, |
652 | &lockres->l_lksb, | 631 | &lockres->l_lksb, |
653 | LKM_VALBLK, | 632 | LKM_VALBLK, |