diff options
Diffstat (limited to 'fs/gfs2/glock.c')
| -rw-r--r-- | fs/gfs2/glock.c | 619 |
1 files changed, 373 insertions, 246 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 12accb08fe02..1815429a2978 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -23,6 +23,10 @@ | |||
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 24 | #include <linux/rwsem.h> | 24 | #include <linux/rwsem.h> |
| 25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
| 26 | #include <linux/seq_file.h> | ||
| 27 | #include <linux/debugfs.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/kallsyms.h> | ||
| 26 | 30 | ||
| 27 | #include "gfs2.h" | 31 | #include "gfs2.h" |
| 28 | #include "incore.h" | 32 | #include "incore.h" |
| @@ -40,20 +44,30 @@ struct gfs2_gl_hash_bucket { | |||
| 40 | struct hlist_head hb_list; | 44 | struct hlist_head hb_list; |
| 41 | }; | 45 | }; |
| 42 | 46 | ||
| 47 | struct glock_iter { | ||
| 48 | int hash; /* hash bucket index */ | ||
| 49 | struct gfs2_sbd *sdp; /* incore superblock */ | ||
| 50 | struct gfs2_glock *gl; /* current glock struct */ | ||
| 51 | struct hlist_head *hb_list; /* current hash bucket ptr */ | ||
| 52 | struct seq_file *seq; /* sequence file for debugfs */ | ||
| 53 | char string[512]; /* scratch space */ | ||
| 54 | }; | ||
| 55 | |||
| 43 | typedef void (*glock_examiner) (struct gfs2_glock * gl); | 56 | typedef void (*glock_examiner) (struct gfs2_glock * gl); |
| 44 | 57 | ||
| 45 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | 58 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
| 46 | static int dump_glock(struct gfs2_glock *gl); | 59 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); |
| 47 | static int dump_inode(struct gfs2_inode *ip); | 60 | static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); |
| 48 | static void gfs2_glock_xmote_th(struct gfs2_holder *gh); | ||
| 49 | static void gfs2_glock_drop_th(struct gfs2_glock *gl); | 61 | static void gfs2_glock_drop_th(struct gfs2_glock *gl); |
| 50 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 62 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
| 63 | static struct dentry *gfs2_root; | ||
| 51 | 64 | ||
| 52 | #define GFS2_GL_HASH_SHIFT 15 | 65 | #define GFS2_GL_HASH_SHIFT 15 |
| 53 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | 66 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) |
| 54 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) | 67 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) |
| 55 | 68 | ||
| 56 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; | 69 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; |
| 70 | static struct dentry *gfs2_root; | ||
| 57 | 71 | ||
| 58 | /* | 72 | /* |
| 59 | * Despite what you might think, the numbers below are not arbitrary :-) | 73 | * Despite what you might think, the numbers below are not arbitrary :-) |
| @@ -202,7 +216,6 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
| 202 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); | 216 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); |
| 203 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); | 217 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); |
| 204 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); | 218 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); |
| 205 | gfs2_assert(sdp, list_empty(&gl->gl_waiters2)); | ||
| 206 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); | 219 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); |
| 207 | glock_free(gl); | 220 | glock_free(gl); |
| 208 | rv = 1; | 221 | rv = 1; |
| @@ -303,7 +316,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
| 303 | atomic_set(&gl->gl_ref, 1); | 316 | atomic_set(&gl->gl_ref, 1); |
| 304 | gl->gl_state = LM_ST_UNLOCKED; | 317 | gl->gl_state = LM_ST_UNLOCKED; |
| 305 | gl->gl_hash = hash; | 318 | gl->gl_hash = hash; |
| 306 | gl->gl_owner = NULL; | 319 | gl->gl_owner_pid = 0; |
| 307 | gl->gl_ip = 0; | 320 | gl->gl_ip = 0; |
| 308 | gl->gl_ops = glops; | 321 | gl->gl_ops = glops; |
| 309 | gl->gl_req_gh = NULL; | 322 | gl->gl_req_gh = NULL; |
| @@ -367,7 +380,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, | |||
| 367 | INIT_LIST_HEAD(&gh->gh_list); | 380 | INIT_LIST_HEAD(&gh->gh_list); |
| 368 | gh->gh_gl = gl; | 381 | gh->gh_gl = gl; |
| 369 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 382 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
| 370 | gh->gh_owner = current; | 383 | gh->gh_owner_pid = current->pid; |
| 371 | gh->gh_state = state; | 384 | gh->gh_state = state; |
| 372 | gh->gh_flags = flags; | 385 | gh->gh_flags = flags; |
| 373 | gh->gh_error = 0; | 386 | gh->gh_error = 0; |
| @@ -389,7 +402,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder * | |||
| 389 | { | 402 | { |
| 390 | gh->gh_state = state; | 403 | gh->gh_state = state; |
| 391 | gh->gh_flags = flags; | 404 | gh->gh_flags = flags; |
| 392 | gh->gh_iflags &= 1 << HIF_ALLOCED; | 405 | gh->gh_iflags = 0; |
| 393 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 406 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
| 394 | } | 407 | } |
| 395 | 408 | ||
| @@ -406,54 +419,8 @@ void gfs2_holder_uninit(struct gfs2_holder *gh) | |||
| 406 | gh->gh_ip = 0; | 419 | gh->gh_ip = 0; |
| 407 | } | 420 | } |
| 408 | 421 | ||
| 409 | /** | 422 | static void gfs2_holder_wake(struct gfs2_holder *gh) |
| 410 | * gfs2_holder_get - get a struct gfs2_holder structure | ||
| 411 | * @gl: the glock | ||
| 412 | * @state: the state we're requesting | ||
| 413 | * @flags: the modifier flags | ||
| 414 | * @gfp_flags: | ||
| 415 | * | ||
| 416 | * Figure out how big an impact this function has. Either: | ||
| 417 | * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd | ||
| 418 | * 2) Leave it like it is | ||
| 419 | * | ||
| 420 | * Returns: the holder structure, NULL on ENOMEM | ||
| 421 | */ | ||
| 422 | |||
| 423 | static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, | ||
| 424 | unsigned int state, | ||
| 425 | int flags, gfp_t gfp_flags) | ||
| 426 | { | ||
| 427 | struct gfs2_holder *gh; | ||
| 428 | |||
| 429 | gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags); | ||
| 430 | if (!gh) | ||
| 431 | return NULL; | ||
| 432 | |||
| 433 | gfs2_holder_init(gl, state, flags, gh); | ||
| 434 | set_bit(HIF_ALLOCED, &gh->gh_iflags); | ||
| 435 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | ||
| 436 | return gh; | ||
| 437 | } | ||
| 438 | |||
| 439 | /** | ||
| 440 | * gfs2_holder_put - get rid of a struct gfs2_holder structure | ||
| 441 | * @gh: the holder structure | ||
| 442 | * | ||
| 443 | */ | ||
| 444 | |||
| 445 | static void gfs2_holder_put(struct gfs2_holder *gh) | ||
| 446 | { | 423 | { |
| 447 | gfs2_holder_uninit(gh); | ||
| 448 | kfree(gh); | ||
| 449 | } | ||
| 450 | |||
| 451 | static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh) | ||
| 452 | { | ||
| 453 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) { | ||
| 454 | gfs2_holder_put(gh); | ||
| 455 | return; | ||
| 456 | } | ||
| 457 | clear_bit(HIF_WAIT, &gh->gh_iflags); | 424 | clear_bit(HIF_WAIT, &gh->gh_iflags); |
| 458 | smp_mb(); | 425 | smp_mb(); |
| 459 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | 426 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); |
| @@ -519,7 +486,7 @@ static int rq_promote(struct gfs2_holder *gh) | |||
| 519 | gfs2_reclaim_glock(sdp); | 486 | gfs2_reclaim_glock(sdp); |
| 520 | } | 487 | } |
| 521 | 488 | ||
| 522 | gfs2_glock_xmote_th(gh); | 489 | gfs2_glock_xmote_th(gh->gh_gl, gh); |
| 523 | spin_lock(&gl->gl_spin); | 490 | spin_lock(&gl->gl_spin); |
| 524 | } | 491 | } |
| 525 | return 1; | 492 | return 1; |
| @@ -542,7 +509,7 @@ static int rq_promote(struct gfs2_holder *gh) | |||
| 542 | gh->gh_error = 0; | 509 | gh->gh_error = 0; |
| 543 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 510 | set_bit(HIF_HOLDER, &gh->gh_iflags); |
| 544 | 511 | ||
| 545 | gfs2_holder_dispose_or_wake(gh); | 512 | gfs2_holder_wake(gh); |
| 546 | 513 | ||
| 547 | return 0; | 514 | return 0; |
| 548 | } | 515 | } |
| @@ -554,32 +521,24 @@ static int rq_promote(struct gfs2_holder *gh) | |||
| 554 | * Returns: 1 if the queue is blocked | 521 | * Returns: 1 if the queue is blocked |
| 555 | */ | 522 | */ |
| 556 | 523 | ||
| 557 | static int rq_demote(struct gfs2_holder *gh) | 524 | static int rq_demote(struct gfs2_glock *gl) |
| 558 | { | 525 | { |
| 559 | struct gfs2_glock *gl = gh->gh_gl; | ||
| 560 | |||
| 561 | if (!list_empty(&gl->gl_holders)) | 526 | if (!list_empty(&gl->gl_holders)) |
| 562 | return 1; | 527 | return 1; |
| 563 | 528 | ||
| 564 | if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) { | 529 | if (gl->gl_state == gl->gl_demote_state || |
| 565 | list_del_init(&gh->gh_list); | 530 | gl->gl_state == LM_ST_UNLOCKED) { |
| 566 | gh->gh_error = 0; | 531 | clear_bit(GLF_DEMOTE, &gl->gl_flags); |
| 567 | spin_unlock(&gl->gl_spin); | 532 | return 0; |
| 568 | gfs2_holder_dispose_or_wake(gh); | ||
| 569 | spin_lock(&gl->gl_spin); | ||
| 570 | } else { | ||
| 571 | gl->gl_req_gh = gh; | ||
| 572 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
| 573 | spin_unlock(&gl->gl_spin); | ||
| 574 | |||
| 575 | if (gh->gh_state == LM_ST_UNLOCKED || | ||
| 576 | gl->gl_state != LM_ST_EXCLUSIVE) | ||
| 577 | gfs2_glock_drop_th(gl); | ||
| 578 | else | ||
| 579 | gfs2_glock_xmote_th(gh); | ||
| 580 | |||
| 581 | spin_lock(&gl->gl_spin); | ||
| 582 | } | 533 | } |
| 534 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
| 535 | spin_unlock(&gl->gl_spin); | ||
| 536 | if (gl->gl_demote_state == LM_ST_UNLOCKED || | ||
| 537 | gl->gl_state != LM_ST_EXCLUSIVE) | ||
| 538 | gfs2_glock_drop_th(gl); | ||
| 539 | else | ||
| 540 | gfs2_glock_xmote_th(gl, NULL); | ||
| 541 | spin_lock(&gl->gl_spin); | ||
| 583 | 542 | ||
| 584 | return 0; | 543 | return 0; |
| 585 | } | 544 | } |
| @@ -607,16 +566,8 @@ static void run_queue(struct gfs2_glock *gl) | |||
| 607 | else | 566 | else |
| 608 | gfs2_assert_warn(gl->gl_sbd, 0); | 567 | gfs2_assert_warn(gl->gl_sbd, 0); |
| 609 | 568 | ||
| 610 | } else if (!list_empty(&gl->gl_waiters2) && | 569 | } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { |
| 611 | !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) { | 570 | blocked = rq_demote(gl); |
| 612 | gh = list_entry(gl->gl_waiters2.next, | ||
| 613 | struct gfs2_holder, gh_list); | ||
| 614 | |||
| 615 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) | ||
| 616 | blocked = rq_demote(gh); | ||
| 617 | else | ||
| 618 | gfs2_assert_warn(gl->gl_sbd, 0); | ||
| 619 | |||
| 620 | } else if (!list_empty(&gl->gl_waiters3)) { | 571 | } else if (!list_empty(&gl->gl_waiters3)) { |
| 621 | gh = list_entry(gl->gl_waiters3.next, | 572 | gh = list_entry(gl->gl_waiters3.next, |
| 622 | struct gfs2_holder, gh_list); | 573 | struct gfs2_holder, gh_list); |
| @@ -654,7 +605,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl) | |||
| 654 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 605 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
| 655 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); | 606 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); |
| 656 | } else { | 607 | } else { |
| 657 | gl->gl_owner = current; | 608 | gl->gl_owner_pid = current->pid; |
| 658 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 609 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
| 659 | clear_bit(HIF_WAIT, &gh.gh_iflags); | 610 | clear_bit(HIF_WAIT, &gh.gh_iflags); |
| 660 | smp_mb(); | 611 | smp_mb(); |
| @@ -681,7 +632,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl) | |||
| 681 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 632 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
| 682 | acquired = 0; | 633 | acquired = 0; |
| 683 | } else { | 634 | } else { |
| 684 | gl->gl_owner = current; | 635 | gl->gl_owner_pid = current->pid; |
| 685 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 636 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
| 686 | } | 637 | } |
| 687 | spin_unlock(&gl->gl_spin); | 638 | spin_unlock(&gl->gl_spin); |
| @@ -699,7 +650,7 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl) | |||
| 699 | { | 650 | { |
| 700 | spin_lock(&gl->gl_spin); | 651 | spin_lock(&gl->gl_spin); |
| 701 | clear_bit(GLF_LOCK, &gl->gl_flags); | 652 | clear_bit(GLF_LOCK, &gl->gl_flags); |
| 702 | gl->gl_owner = NULL; | 653 | gl->gl_owner_pid = 0; |
| 703 | gl->gl_ip = 0; | 654 | gl->gl_ip = 0; |
| 704 | run_queue(gl); | 655 | run_queue(gl); |
| 705 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | 656 | BUG_ON(!spin_is_locked(&gl->gl_spin)); |
| @@ -707,50 +658,24 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl) | |||
| 707 | } | 658 | } |
| 708 | 659 | ||
| 709 | /** | 660 | /** |
| 710 | * handle_callback - add a demote request to a lock's queue | 661 | * handle_callback - process a demote request |
| 711 | * @gl: the glock | 662 | * @gl: the glock |
| 712 | * @state: the state the caller wants us to change to | 663 | * @state: the state the caller wants us to change to |
| 713 | * | 664 | * |
| 714 | * Note: This may fail sliently if we are out of memory. | 665 | * There are only two requests that we are going to see in actual |
| 666 | * practise: LM_ST_SHARED and LM_ST_UNLOCKED | ||
| 715 | */ | 667 | */ |
| 716 | 668 | ||
| 717 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) | 669 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) |
| 718 | { | 670 | { |
| 719 | struct gfs2_holder *gh, *new_gh = NULL; | ||
| 720 | |||
| 721 | restart: | ||
| 722 | spin_lock(&gl->gl_spin); | 671 | spin_lock(&gl->gl_spin); |
| 723 | 672 | if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) { | |
| 724 | list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { | 673 | gl->gl_demote_state = state; |
| 725 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags) && | 674 | gl->gl_demote_time = jiffies; |
| 726 | gl->gl_req_gh != gh) { | 675 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED) { |
| 727 | if (gh->gh_state != state) | 676 | gl->gl_demote_state = state; |
| 728 | gh->gh_state = LM_ST_UNLOCKED; | ||
| 729 | goto out; | ||
| 730 | } | ||
| 731 | } | ||
| 732 | |||
| 733 | if (new_gh) { | ||
| 734 | list_add_tail(&new_gh->gh_list, &gl->gl_waiters2); | ||
| 735 | new_gh = NULL; | ||
| 736 | } else { | ||
| 737 | spin_unlock(&gl->gl_spin); | ||
| 738 | |||
| 739 | new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS); | ||
| 740 | if (!new_gh) | ||
| 741 | return; | ||
| 742 | set_bit(HIF_DEMOTE, &new_gh->gh_iflags); | ||
| 743 | set_bit(HIF_DEALLOC, &new_gh->gh_iflags); | ||
| 744 | set_bit(HIF_WAIT, &new_gh->gh_iflags); | ||
| 745 | |||
| 746 | goto restart; | ||
| 747 | } | 677 | } |
| 748 | |||
| 749 | out: | ||
| 750 | spin_unlock(&gl->gl_spin); | 678 | spin_unlock(&gl->gl_spin); |
| 751 | |||
| 752 | if (new_gh) | ||
| 753 | gfs2_holder_put(new_gh); | ||
| 754 | } | 679 | } |
| 755 | 680 | ||
| 756 | /** | 681 | /** |
| @@ -810,56 +735,37 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
| 810 | 735 | ||
| 811 | /* Deal with each possible exit condition */ | 736 | /* Deal with each possible exit condition */ |
| 812 | 737 | ||
| 813 | if (!gh) | 738 | if (!gh) { |
| 814 | gl->gl_stamp = jiffies; | 739 | gl->gl_stamp = jiffies; |
| 815 | else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { | 740 | if (ret & LM_OUT_CANCELED) |
| 741 | op_done = 0; | ||
| 742 | else | ||
| 743 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | ||
| 744 | } else { | ||
| 816 | spin_lock(&gl->gl_spin); | 745 | spin_lock(&gl->gl_spin); |
| 817 | list_del_init(&gh->gh_list); | 746 | list_del_init(&gh->gh_list); |
| 818 | gh->gh_error = -EIO; | 747 | gh->gh_error = -EIO; |
| 819 | spin_unlock(&gl->gl_spin); | 748 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
| 820 | } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { | 749 | goto out; |
| 821 | spin_lock(&gl->gl_spin); | 750 | gh->gh_error = GLR_CANCELED; |
| 822 | list_del_init(&gh->gh_list); | 751 | if (ret & LM_OUT_CANCELED) |
| 823 | if (gl->gl_state == gh->gh_state || | 752 | goto out; |
| 824 | gl->gl_state == LM_ST_UNLOCKED) { | 753 | if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { |
| 754 | list_add_tail(&gh->gh_list, &gl->gl_holders); | ||
| 825 | gh->gh_error = 0; | 755 | gh->gh_error = 0; |
| 826 | } else { | 756 | set_bit(HIF_HOLDER, &gh->gh_iflags); |
| 827 | if (gfs2_assert_warn(sdp, gh->gh_flags & | 757 | set_bit(HIF_FIRST, &gh->gh_iflags); |
| 828 | (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) | 758 | op_done = 0; |
| 829 | fs_warn(sdp, "ret = 0x%.8X\n", ret); | 759 | goto out; |
| 830 | gh->gh_error = GLR_TRYFAILED; | ||
| 831 | } | 760 | } |
| 832 | spin_unlock(&gl->gl_spin); | ||
| 833 | |||
| 834 | if (ret & LM_OUT_CANCELED) | ||
| 835 | handle_callback(gl, LM_ST_UNLOCKED); | ||
| 836 | |||
| 837 | } else if (ret & LM_OUT_CANCELED) { | ||
| 838 | spin_lock(&gl->gl_spin); | ||
| 839 | list_del_init(&gh->gh_list); | ||
| 840 | gh->gh_error = GLR_CANCELED; | ||
| 841 | spin_unlock(&gl->gl_spin); | ||
| 842 | |||
| 843 | } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | ||
| 844 | spin_lock(&gl->gl_spin); | ||
| 845 | list_move_tail(&gh->gh_list, &gl->gl_holders); | ||
| 846 | gh->gh_error = 0; | ||
| 847 | set_bit(HIF_HOLDER, &gh->gh_iflags); | ||
| 848 | spin_unlock(&gl->gl_spin); | ||
| 849 | |||
| 850 | set_bit(HIF_FIRST, &gh->gh_iflags); | ||
| 851 | |||
| 852 | op_done = 0; | ||
| 853 | |||
| 854 | } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | ||
| 855 | spin_lock(&gl->gl_spin); | ||
| 856 | list_del_init(&gh->gh_list); | ||
| 857 | gh->gh_error = GLR_TRYFAILED; | 761 | gh->gh_error = GLR_TRYFAILED; |
| 858 | spin_unlock(&gl->gl_spin); | 762 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) |
| 859 | 763 | goto out; | |
| 860 | } else { | 764 | gh->gh_error = -EINVAL; |
| 861 | if (gfs2_assert_withdraw(sdp, 0) == -1) | 765 | if (gfs2_assert_withdraw(sdp, 0) == -1) |
| 862 | fs_err(sdp, "ret = 0x%.8X\n", ret); | 766 | fs_err(sdp, "ret = 0x%.8X\n", ret); |
| 767 | out: | ||
| 768 | spin_unlock(&gl->gl_spin); | ||
| 863 | } | 769 | } |
| 864 | 770 | ||
| 865 | if (glops->go_xmote_bh) | 771 | if (glops->go_xmote_bh) |
| @@ -877,7 +783,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
| 877 | gfs2_glock_put(gl); | 783 | gfs2_glock_put(gl); |
| 878 | 784 | ||
| 879 | if (gh) | 785 | if (gh) |
| 880 | gfs2_holder_dispose_or_wake(gh); | 786 | gfs2_holder_wake(gh); |
| 881 | } | 787 | } |
| 882 | 788 | ||
| 883 | /** | 789 | /** |
| @@ -888,12 +794,11 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
| 888 | * | 794 | * |
| 889 | */ | 795 | */ |
| 890 | 796 | ||
| 891 | void gfs2_glock_xmote_th(struct gfs2_holder *gh) | 797 | void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) |
| 892 | { | 798 | { |
| 893 | struct gfs2_glock *gl = gh->gh_gl; | ||
| 894 | struct gfs2_sbd *sdp = gl->gl_sbd; | 799 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| 895 | int flags = gh->gh_flags; | 800 | int flags = gh ? gh->gh_flags : 0; |
| 896 | unsigned state = gh->gh_state; | 801 | unsigned state = gh ? gh->gh_state : gl->gl_demote_state; |
| 897 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 802 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 898 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | | 803 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | |
| 899 | LM_FLAG_NOEXP | LM_FLAG_ANY | | 804 | LM_FLAG_NOEXP | LM_FLAG_ANY | |
| @@ -943,6 +848,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
| 943 | gfs2_assert_warn(sdp, !ret); | 848 | gfs2_assert_warn(sdp, !ret); |
| 944 | 849 | ||
| 945 | state_change(gl, LM_ST_UNLOCKED); | 850 | state_change(gl, LM_ST_UNLOCKED); |
| 851 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | ||
| 946 | 852 | ||
| 947 | if (glops->go_inval) | 853 | if (glops->go_inval) |
| 948 | glops->go_inval(gl, DIO_METADATA); | 854 | glops->go_inval(gl, DIO_METADATA); |
| @@ -964,7 +870,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
| 964 | gfs2_glock_put(gl); | 870 | gfs2_glock_put(gl); |
| 965 | 871 | ||
| 966 | if (gh) | 872 | if (gh) |
| 967 | gfs2_holder_dispose_or_wake(gh); | 873 | gfs2_holder_wake(gh); |
| 968 | } | 874 | } |
| 969 | 875 | ||
| 970 | /** | 876 | /** |
| @@ -1097,18 +1003,32 @@ static int glock_wait_internal(struct gfs2_holder *gh) | |||
| 1097 | } | 1003 | } |
| 1098 | 1004 | ||
| 1099 | static inline struct gfs2_holder * | 1005 | static inline struct gfs2_holder * |
| 1100 | find_holder_by_owner(struct list_head *head, struct task_struct *owner) | 1006 | find_holder_by_owner(struct list_head *head, pid_t pid) |
| 1101 | { | 1007 | { |
| 1102 | struct gfs2_holder *gh; | 1008 | struct gfs2_holder *gh; |
| 1103 | 1009 | ||
| 1104 | list_for_each_entry(gh, head, gh_list) { | 1010 | list_for_each_entry(gh, head, gh_list) { |
| 1105 | if (gh->gh_owner == owner) | 1011 | if (gh->gh_owner_pid == pid) |
| 1106 | return gh; | 1012 | return gh; |
| 1107 | } | 1013 | } |
| 1108 | 1014 | ||
| 1109 | return NULL; | 1015 | return NULL; |
| 1110 | } | 1016 | } |
| 1111 | 1017 | ||
| 1018 | static void print_dbg(struct glock_iter *gi, const char *fmt, ...) | ||
| 1019 | { | ||
| 1020 | va_list args; | ||
| 1021 | |||
| 1022 | va_start(args, fmt); | ||
| 1023 | if (gi) { | ||
| 1024 | vsprintf(gi->string, fmt, args); | ||
| 1025 | seq_printf(gi->seq, gi->string); | ||
| 1026 | } | ||
| 1027 | else | ||
| 1028 | vprintk(fmt, args); | ||
| 1029 | va_end(args); | ||
| 1030 | } | ||
| 1031 | |||
| 1112 | /** | 1032 | /** |
| 1113 | * add_to_queue - Add a holder to the wait queue (but look for recursion) | 1033 | * add_to_queue - Add a holder to the wait queue (but look for recursion) |
| 1114 | * @gh: the holder structure to add | 1034 | * @gh: the holder structure to add |
| @@ -1120,24 +1040,24 @@ static void add_to_queue(struct gfs2_holder *gh) | |||
| 1120 | struct gfs2_glock *gl = gh->gh_gl; | 1040 | struct gfs2_glock *gl = gh->gh_gl; |
| 1121 | struct gfs2_holder *existing; | 1041 | struct gfs2_holder *existing; |
| 1122 | 1042 | ||
| 1123 | BUG_ON(!gh->gh_owner); | 1043 | BUG_ON(!gh->gh_owner_pid); |
| 1124 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) | 1044 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) |
| 1125 | BUG(); | 1045 | BUG(); |
| 1126 | 1046 | ||
| 1127 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); | 1047 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid); |
| 1128 | if (existing) { | 1048 | if (existing) { |
| 1129 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); | 1049 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
| 1130 | printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid); | 1050 | printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); |
| 1131 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 1051 | printk(KERN_INFO "lock type : %d lock state : %d\n", |
| 1132 | existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); | 1052 | existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); |
| 1133 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 1053 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
| 1134 | printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid); | 1054 | printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); |
| 1135 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 1055 | printk(KERN_INFO "lock type : %d lock state : %d\n", |
| 1136 | gl->gl_name.ln_type, gl->gl_state); | 1056 | gl->gl_name.ln_type, gl->gl_state); |
| 1137 | BUG(); | 1057 | BUG(); |
| 1138 | } | 1058 | } |
| 1139 | 1059 | ||
| 1140 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); | 1060 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid); |
| 1141 | if (existing) { | 1061 | if (existing) { |
| 1142 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); | 1062 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
| 1143 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 1063 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
| @@ -1267,9 +1187,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
| 1267 | if (glops->go_unlock) | 1187 | if (glops->go_unlock) |
| 1268 | glops->go_unlock(gh); | 1188 | glops->go_unlock(gh); |
| 1269 | 1189 | ||
| 1270 | gl->gl_stamp = jiffies; | ||
| 1271 | |||
| 1272 | spin_lock(&gl->gl_spin); | 1190 | spin_lock(&gl->gl_spin); |
| 1191 | gl->gl_stamp = jiffies; | ||
| 1273 | } | 1192 | } |
| 1274 | 1193 | ||
| 1275 | clear_bit(GLF_LOCK, &gl->gl_flags); | 1194 | clear_bit(GLF_LOCK, &gl->gl_flags); |
| @@ -1841,6 +1760,15 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
| 1841 | * Diagnostic routines to help debug distributed deadlock | 1760 | * Diagnostic routines to help debug distributed deadlock |
| 1842 | */ | 1761 | */ |
| 1843 | 1762 | ||
| 1763 | static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, | ||
| 1764 | unsigned long address) | ||
| 1765 | { | ||
| 1766 | char buffer[KSYM_SYMBOL_LEN]; | ||
| 1767 | |||
| 1768 | sprint_symbol(buffer, address); | ||
| 1769 | print_dbg(gi, fmt, buffer); | ||
| 1770 | } | ||
| 1771 | |||
| 1844 | /** | 1772 | /** |
| 1845 | * dump_holder - print information about a glock holder | 1773 | * dump_holder - print information about a glock holder |
| 1846 | * @str: a string naming the type of holder | 1774 | * @str: a string naming the type of holder |
| @@ -1849,31 +1777,37 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
| 1849 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1777 | * Returns: 0 on success, -ENOBUFS when we run out of space |
| 1850 | */ | 1778 | */ |
| 1851 | 1779 | ||
| 1852 | static int dump_holder(char *str, struct gfs2_holder *gh) | 1780 | static int dump_holder(struct glock_iter *gi, char *str, |
| 1781 | struct gfs2_holder *gh) | ||
| 1853 | { | 1782 | { |
| 1854 | unsigned int x; | 1783 | unsigned int x; |
| 1855 | int error = -ENOBUFS; | 1784 | struct task_struct *gh_owner; |
| 1856 | 1785 | ||
| 1857 | printk(KERN_INFO " %s\n", str); | 1786 | print_dbg(gi, " %s\n", str); |
| 1858 | printk(KERN_INFO " owner = %ld\n", | 1787 | if (gh->gh_owner_pid) { |
| 1859 | (gh->gh_owner) ? (long)gh->gh_owner->pid : -1); | 1788 | print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid); |
| 1860 | printk(KERN_INFO " gh_state = %u\n", gh->gh_state); | 1789 | gh_owner = find_task_by_pid(gh->gh_owner_pid); |
| 1861 | printk(KERN_INFO " gh_flags ="); | 1790 | if (gh_owner) |
| 1791 | print_dbg(gi, "(%s)\n", gh_owner->comm); | ||
| 1792 | else | ||
| 1793 | print_dbg(gi, "(ended)\n"); | ||
| 1794 | } else | ||
| 1795 | print_dbg(gi, " owner = -1\n"); | ||
| 1796 | print_dbg(gi, " gh_state = %u\n", gh->gh_state); | ||
| 1797 | print_dbg(gi, " gh_flags ="); | ||
| 1862 | for (x = 0; x < 32; x++) | 1798 | for (x = 0; x < 32; x++) |
| 1863 | if (gh->gh_flags & (1 << x)) | 1799 | if (gh->gh_flags & (1 << x)) |
| 1864 | printk(" %u", x); | 1800 | print_dbg(gi, " %u", x); |
| 1865 | printk(" \n"); | 1801 | print_dbg(gi, " \n"); |
| 1866 | printk(KERN_INFO " error = %d\n", gh->gh_error); | 1802 | print_dbg(gi, " error = %d\n", gh->gh_error); |
| 1867 | printk(KERN_INFO " gh_iflags ="); | 1803 | print_dbg(gi, " gh_iflags ="); |
| 1868 | for (x = 0; x < 32; x++) | 1804 | for (x = 0; x < 32; x++) |
| 1869 | if (test_bit(x, &gh->gh_iflags)) | 1805 | if (test_bit(x, &gh->gh_iflags)) |
| 1870 | printk(" %u", x); | 1806 | print_dbg(gi, " %u", x); |
| 1871 | printk(" \n"); | 1807 | print_dbg(gi, " \n"); |
| 1872 | print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip); | 1808 | gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); |
| 1873 | |||
| 1874 | error = 0; | ||
| 1875 | 1809 | ||
| 1876 | return error; | 1810 | return 0; |
| 1877 | } | 1811 | } |
| 1878 | 1812 | ||
| 1879 | /** | 1813 | /** |
| @@ -1883,25 +1817,20 @@ static int dump_holder(char *str, struct gfs2_holder *gh) | |||
| 1883 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1817 | * Returns: 0 on success, -ENOBUFS when we run out of space |
| 1884 | */ | 1818 | */ |
| 1885 | 1819 | ||
| 1886 | static int dump_inode(struct gfs2_inode *ip) | 1820 | static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) |
| 1887 | { | 1821 | { |
| 1888 | unsigned int x; | 1822 | unsigned int x; |
| 1889 | int error = -ENOBUFS; | ||
| 1890 | 1823 | ||
| 1891 | printk(KERN_INFO " Inode:\n"); | 1824 | print_dbg(gi, " Inode:\n"); |
| 1892 | printk(KERN_INFO " num = %llu %llu\n", | 1825 | print_dbg(gi, " num = %llu/%llu\n", |
| 1893 | (unsigned long long)ip->i_num.no_formal_ino, | 1826 | ip->i_num.no_formal_ino, ip->i_num.no_addr); |
| 1894 | (unsigned long long)ip->i_num.no_addr); | 1827 | print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); |
| 1895 | printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode)); | 1828 | print_dbg(gi, " i_flags ="); |
| 1896 | printk(KERN_INFO " i_flags ="); | ||
| 1897 | for (x = 0; x < 32; x++) | 1829 | for (x = 0; x < 32; x++) |
| 1898 | if (test_bit(x, &ip->i_flags)) | 1830 | if (test_bit(x, &ip->i_flags)) |
| 1899 | printk(" %u", x); | 1831 | print_dbg(gi, " %u", x); |
| 1900 | printk(" \n"); | 1832 | print_dbg(gi, " \n"); |
| 1901 | 1833 | return 0; | |
| 1902 | error = 0; | ||
| 1903 | |||
| 1904 | return error; | ||
| 1905 | } | 1834 | } |
| 1906 | 1835 | ||
| 1907 | /** | 1836 | /** |
| @@ -1912,74 +1841,86 @@ static int dump_inode(struct gfs2_inode *ip) | |||
| 1912 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1841 | * Returns: 0 on success, -ENOBUFS when we run out of space |
| 1913 | */ | 1842 | */ |
| 1914 | 1843 | ||
| 1915 | static int dump_glock(struct gfs2_glock *gl) | 1844 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) |
| 1916 | { | 1845 | { |
| 1917 | struct gfs2_holder *gh; | 1846 | struct gfs2_holder *gh; |
| 1918 | unsigned int x; | 1847 | unsigned int x; |
| 1919 | int error = -ENOBUFS; | 1848 | int error = -ENOBUFS; |
| 1849 | struct task_struct *gl_owner; | ||
| 1920 | 1850 | ||
| 1921 | spin_lock(&gl->gl_spin); | 1851 | spin_lock(&gl->gl_spin); |
| 1922 | 1852 | ||
| 1923 | printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, | 1853 | print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, |
| 1924 | (unsigned long long)gl->gl_name.ln_number); | 1854 | (unsigned long long)gl->gl_name.ln_number); |
| 1925 | printk(KERN_INFO " gl_flags ="); | 1855 | print_dbg(gi, " gl_flags ="); |
| 1926 | for (x = 0; x < 32; x++) { | 1856 | for (x = 0; x < 32; x++) { |
| 1927 | if (test_bit(x, &gl->gl_flags)) | 1857 | if (test_bit(x, &gl->gl_flags)) |
| 1928 | printk(" %u", x); | 1858 | print_dbg(gi, " %u", x); |
| 1929 | } | 1859 | } |
| 1930 | printk(" \n"); | 1860 | if (!test_bit(GLF_LOCK, &gl->gl_flags)) |
| 1931 | printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref)); | 1861 | print_dbg(gi, " (unlocked)"); |
| 1932 | printk(KERN_INFO " gl_state = %u\n", gl->gl_state); | 1862 | print_dbg(gi, " \n"); |
| 1933 | printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm); | 1863 | print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); |
| 1934 | print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip); | 1864 | print_dbg(gi, " gl_state = %u\n", gl->gl_state); |
| 1935 | printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); | 1865 | if (gl->gl_owner_pid) { |
| 1936 | printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); | 1866 | gl_owner = find_task_by_pid(gl->gl_owner_pid); |
| 1937 | printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); | 1867 | if (gl_owner) |
| 1938 | printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no"); | 1868 | print_dbg(gi, " gl_owner = pid %d (%s)\n", |
| 1939 | printk(KERN_INFO " le = %s\n", | 1869 | gl->gl_owner_pid, gl_owner->comm); |
| 1870 | else | ||
| 1871 | print_dbg(gi, " gl_owner = %d (ended)\n", | ||
| 1872 | gl->gl_owner_pid); | ||
| 1873 | } else | ||
| 1874 | print_dbg(gi, " gl_owner = -1\n"); | ||
| 1875 | print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); | ||
| 1876 | print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); | ||
| 1877 | print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); | ||
| 1878 | print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); | ||
| 1879 | print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); | ||
| 1880 | print_dbg(gi, " le = %s\n", | ||
| 1940 | (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); | 1881 | (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); |
| 1941 | printk(KERN_INFO " reclaim = %s\n", | 1882 | print_dbg(gi, " reclaim = %s\n", |
| 1942 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); | 1883 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); |
| 1943 | if (gl->gl_aspace) | 1884 | if (gl->gl_aspace) |
| 1944 | printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, | 1885 | print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, |
| 1945 | gl->gl_aspace->i_mapping->nrpages); | 1886 | gl->gl_aspace->i_mapping->nrpages); |
| 1946 | else | 1887 | else |
| 1947 | printk(KERN_INFO " aspace = no\n"); | 1888 | print_dbg(gi, " aspace = no\n"); |
| 1948 | printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count)); | 1889 | print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); |
| 1949 | if (gl->gl_req_gh) { | 1890 | if (gl->gl_req_gh) { |
| 1950 | error = dump_holder("Request", gl->gl_req_gh); | 1891 | error = dump_holder(gi, "Request", gl->gl_req_gh); |
| 1951 | if (error) | 1892 | if (error) |
| 1952 | goto out; | 1893 | goto out; |
| 1953 | } | 1894 | } |
| 1954 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 1895 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { |
| 1955 | error = dump_holder("Holder", gh); | 1896 | error = dump_holder(gi, "Holder", gh); |
| 1956 | if (error) | 1897 | if (error) |
| 1957 | goto out; | 1898 | goto out; |
| 1958 | } | 1899 | } |
| 1959 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { | 1900 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { |
| 1960 | error = dump_holder("Waiter1", gh); | 1901 | error = dump_holder(gi, "Waiter1", gh); |
| 1961 | if (error) | ||
| 1962 | goto out; | ||
| 1963 | } | ||
| 1964 | list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { | ||
| 1965 | error = dump_holder("Waiter2", gh); | ||
| 1966 | if (error) | 1902 | if (error) |
| 1967 | goto out; | 1903 | goto out; |
| 1968 | } | 1904 | } |
| 1969 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { | 1905 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { |
| 1970 | error = dump_holder("Waiter3", gh); | 1906 | error = dump_holder(gi, "Waiter3", gh); |
| 1971 | if (error) | 1907 | if (error) |
| 1972 | goto out; | 1908 | goto out; |
| 1973 | } | 1909 | } |
| 1910 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | ||
| 1911 | print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", | ||
| 1912 | gl->gl_demote_state, | ||
| 1913 | (u64)(jiffies - gl->gl_demote_time)*(1000000/HZ)); | ||
| 1914 | } | ||
| 1974 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { | 1915 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { |
| 1975 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && | 1916 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && |
| 1976 | list_empty(&gl->gl_holders)) { | 1917 | list_empty(&gl->gl_holders)) { |
| 1977 | error = dump_inode(gl->gl_object); | 1918 | error = dump_inode(gi, gl->gl_object); |
| 1978 | if (error) | 1919 | if (error) |
| 1979 | goto out; | 1920 | goto out; |
| 1980 | } else { | 1921 | } else { |
| 1981 | error = -ENOBUFS; | 1922 | error = -ENOBUFS; |
| 1982 | printk(KERN_INFO " Inode: busy\n"); | 1923 | print_dbg(gi, " Inode: busy\n"); |
| 1983 | } | 1924 | } |
| 1984 | } | 1925 | } |
| 1985 | 1926 | ||
| @@ -2014,7 +1955,7 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | |||
| 2014 | if (gl->gl_sbd != sdp) | 1955 | if (gl->gl_sbd != sdp) |
| 2015 | continue; | 1956 | continue; |
| 2016 | 1957 | ||
| 2017 | error = dump_glock(gl); | 1958 | error = dump_glock(NULL, gl); |
| 2018 | if (error) | 1959 | if (error) |
| 2019 | break; | 1960 | break; |
| 2020 | } | 1961 | } |
| @@ -2043,3 +1984,189 @@ int __init gfs2_glock_init(void) | |||
| 2043 | return 0; | 1984 | return 0; |
| 2044 | } | 1985 | } |
| 2045 | 1986 | ||
| 1987 | static int gfs2_glock_iter_next(struct glock_iter *gi) | ||
| 1988 | { | ||
| 1989 | read_lock(gl_lock_addr(gi->hash)); | ||
| 1990 | while (1) { | ||
| 1991 | if (!gi->hb_list) { /* If we don't have a hash bucket yet */ | ||
| 1992 | gi->hb_list = &gl_hash_table[gi->hash].hb_list; | ||
| 1993 | if (hlist_empty(gi->hb_list)) { | ||
| 1994 | read_unlock(gl_lock_addr(gi->hash)); | ||
| 1995 | gi->hash++; | ||
| 1996 | read_lock(gl_lock_addr(gi->hash)); | ||
| 1997 | gi->hb_list = NULL; | ||
| 1998 | if (gi->hash >= GFS2_GL_HASH_SIZE) { | ||
| 1999 | read_unlock(gl_lock_addr(gi->hash)); | ||
| 2000 | return 1; | ||
| 2001 | } | ||
| 2002 | else | ||
| 2003 | continue; | ||
| 2004 | } | ||
| 2005 | if (!hlist_empty(gi->hb_list)) { | ||
| 2006 | gi->gl = list_entry(gi->hb_list->first, | ||
| 2007 | struct gfs2_glock, | ||
| 2008 | gl_list); | ||
| 2009 | } | ||
| 2010 | } else { | ||
| 2011 | if (gi->gl->gl_list.next == NULL) { | ||
| 2012 | read_unlock(gl_lock_addr(gi->hash)); | ||
| 2013 | gi->hash++; | ||
| 2014 | read_lock(gl_lock_addr(gi->hash)); | ||
| 2015 | gi->hb_list = NULL; | ||
| 2016 | continue; | ||
| 2017 | } | ||
| 2018 | gi->gl = list_entry(gi->gl->gl_list.next, | ||
| 2019 | struct gfs2_glock, gl_list); | ||
| 2020 | } | ||
| 2021 | if (gi->gl) | ||
| 2022 | break; | ||
| 2023 | } | ||
| 2024 | read_unlock(gl_lock_addr(gi->hash)); | ||
| 2025 | return 0; | ||
| 2026 | } | ||
| 2027 | |||
| 2028 | static void gfs2_glock_iter_free(struct glock_iter *gi) | ||
| 2029 | { | ||
| 2030 | kfree(gi); | ||
| 2031 | } | ||
| 2032 | |||
| 2033 | static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) | ||
| 2034 | { | ||
| 2035 | struct glock_iter *gi; | ||
| 2036 | |||
| 2037 | gi = kmalloc(sizeof (*gi), GFP_KERNEL); | ||
| 2038 | if (!gi) | ||
| 2039 | return NULL; | ||
| 2040 | |||
| 2041 | gi->sdp = sdp; | ||
| 2042 | gi->hash = 0; | ||
| 2043 | gi->gl = NULL; | ||
| 2044 | gi->hb_list = NULL; | ||
| 2045 | gi->seq = NULL; | ||
| 2046 | memset(gi->string, 0, sizeof(gi->string)); | ||
| 2047 | |||
| 2048 | if (gfs2_glock_iter_next(gi)) { | ||
| 2049 | gfs2_glock_iter_free(gi); | ||
| 2050 | return NULL; | ||
| 2051 | } | ||
| 2052 | |||
| 2053 | return gi; | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) | ||
| 2057 | { | ||
| 2058 | struct glock_iter *gi; | ||
| 2059 | loff_t n = *pos; | ||
| 2060 | |||
| 2061 | gi = gfs2_glock_iter_init(file->private); | ||
| 2062 | if (!gi) | ||
| 2063 | return NULL; | ||
| 2064 | |||
| 2065 | while (n--) { | ||
| 2066 | if (gfs2_glock_iter_next(gi)) { | ||
| 2067 | gfs2_glock_iter_free(gi); | ||
| 2068 | return NULL; | ||
| 2069 | } | ||
| 2070 | } | ||
| 2071 | |||
| 2072 | return gi; | ||
| 2073 | } | ||
| 2074 | |||
| 2075 | static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, | ||
| 2076 | loff_t *pos) | ||
| 2077 | { | ||
| 2078 | struct glock_iter *gi = iter_ptr; | ||
| 2079 | |||
| 2080 | (*pos)++; | ||
| 2081 | |||
| 2082 | if (gfs2_glock_iter_next(gi)) { | ||
| 2083 | gfs2_glock_iter_free(gi); | ||
| 2084 | return NULL; | ||
| 2085 | } | ||
| 2086 | |||
| 2087 | return gi; | ||
| 2088 | } | ||
| 2089 | |||
| 2090 | static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) | ||
| 2091 | { | ||
| 2092 | /* nothing for now */ | ||
| 2093 | } | ||
| 2094 | |||
| 2095 | static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) | ||
| 2096 | { | ||
| 2097 | struct glock_iter *gi = iter_ptr; | ||
| 2098 | |||
| 2099 | gi->seq = file; | ||
| 2100 | dump_glock(gi, gi->gl); | ||
| 2101 | |||
| 2102 | return 0; | ||
| 2103 | } | ||
| 2104 | |||
| 2105 | static struct seq_operations gfs2_glock_seq_ops = { | ||
| 2106 | .start = gfs2_glock_seq_start, | ||
| 2107 | .next = gfs2_glock_seq_next, | ||
| 2108 | .stop = gfs2_glock_seq_stop, | ||
| 2109 | .show = gfs2_glock_seq_show, | ||
| 2110 | }; | ||
| 2111 | |||
| 2112 | static int gfs2_debugfs_open(struct inode *inode, struct file *file) | ||
| 2113 | { | ||
| 2114 | struct seq_file *seq; | ||
| 2115 | int ret; | ||
| 2116 | |||
| 2117 | ret = seq_open(file, &gfs2_glock_seq_ops); | ||
| 2118 | if (ret) | ||
| 2119 | return ret; | ||
| 2120 | |||
| 2121 | seq = file->private_data; | ||
| 2122 | seq->private = inode->i_private; | ||
| 2123 | |||
| 2124 | return 0; | ||
| 2125 | } | ||
| 2126 | |||
| 2127 | static const struct file_operations gfs2_debug_fops = { | ||
| 2128 | .owner = THIS_MODULE, | ||
| 2129 | .open = gfs2_debugfs_open, | ||
| 2130 | .read = seq_read, | ||
| 2131 | .llseek = seq_lseek, | ||
| 2132 | .release = seq_release | ||
| 2133 | }; | ||
| 2134 | |||
| 2135 | int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) | ||
| 2136 | { | ||
| 2137 | sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); | ||
| 2138 | if (!sdp->debugfs_dir) | ||
| 2139 | return -ENOMEM; | ||
| 2140 | sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", | ||
| 2141 | S_IFREG | S_IRUGO, | ||
| 2142 | sdp->debugfs_dir, sdp, | ||
| 2143 | &gfs2_debug_fops); | ||
| 2144 | if (!sdp->debugfs_dentry_glocks) | ||
| 2145 | return -ENOMEM; | ||
| 2146 | |||
| 2147 | return 0; | ||
| 2148 | } | ||
| 2149 | |||
| 2150 | void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) | ||
| 2151 | { | ||
| 2152 | if (sdp && sdp->debugfs_dir) { | ||
| 2153 | if (sdp->debugfs_dentry_glocks) { | ||
| 2154 | debugfs_remove(sdp->debugfs_dentry_glocks); | ||
| 2155 | sdp->debugfs_dentry_glocks = NULL; | ||
| 2156 | } | ||
| 2157 | debugfs_remove(sdp->debugfs_dir); | ||
| 2158 | sdp->debugfs_dir = NULL; | ||
| 2159 | } | ||
| 2160 | } | ||
| 2161 | |||
| 2162 | int gfs2_register_debugfs(void) | ||
| 2163 | { | ||
| 2164 | gfs2_root = debugfs_create_dir("gfs2", NULL); | ||
| 2165 | return gfs2_root ? 0 : -ENOMEM; | ||
| 2166 | } | ||
| 2167 | |||
| 2168 | void gfs2_unregister_debugfs(void) | ||
| 2169 | { | ||
| 2170 | debugfs_remove(gfs2_root); | ||
| 2171 | gfs2_root = NULL; | ||
| 2172 | } | ||
