aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2007-03-16 05:40:31 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2007-05-01 04:10:39 -0400
commit3b8249f6178cb2b68b9d683587797270125cc06a (patch)
tree5bcc9bbd5eb923d5905077ba0bd41b5fa3f29ddd /fs/gfs2/glock.c
parent1de913909263ba7f7054debeda1b79771a7233db (diff)
[GFS2] Fix bz 224480 and cleanup glock demotion code
This patch prevents the printing of a warning message in cases where the fs is functioning normally by handing off responsibility for unlinked, but still open inodes, to another node for eventual deallocation. Also, there is now an improved system for ensuring that such requests to other nodes do not get lost. The callback on the iopen lock is only ever called when i_nlink == 0 and when a node is unable to deallocate it due to it still being in use on another node. When a node receives the callback therefore, it knows that i_nlink must be zero, so we mark it as such (in gfs2_drop_inode) in order that it will then attempt deallocation of the inode itself. As an additional benefit, queuing a demote request no longer requires a memory allocation. This simplifies the code for dealing with gfs2_holders as it removes one special case. There are two new fields in struct gfs2_glock. gl_demote_state is the state which the remote node has requested and gl_demote_time is the time when the request came in. Both fields are only valid when the GLF_DEMOTE flag is set in gl_flags. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c233
1 files changed, 62 insertions, 171 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a3a24f2e99d2..e7075945b051 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -54,7 +54,7 @@ struct glock_iter {
54typedef void (*glock_examiner) (struct gfs2_glock * gl); 54typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 55
56static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 56static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
57static void gfs2_glock_xmote_th(struct gfs2_holder *gh); 57static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
58static void gfs2_glock_drop_th(struct gfs2_glock *gl); 58static void gfs2_glock_drop_th(struct gfs2_glock *gl);
59static DECLARE_RWSEM(gfs2_umount_flush_sem); 59static DECLARE_RWSEM(gfs2_umount_flush_sem);
60static struct dentry *gfs2_root; 60static struct dentry *gfs2_root;
@@ -212,7 +212,6 @@ int gfs2_glock_put(struct gfs2_glock *gl)
212 gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); 212 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
213 gfs2_assert(sdp, list_empty(&gl->gl_holders)); 213 gfs2_assert(sdp, list_empty(&gl->gl_holders));
214 gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); 214 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
215 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
216 gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); 215 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
217 glock_free(gl); 216 glock_free(gl);
218 rv = 1; 217 rv = 1;
@@ -399,7 +398,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
399{ 398{
400 gh->gh_state = state; 399 gh->gh_state = state;
401 gh->gh_flags = flags; 400 gh->gh_flags = flags;
402 gh->gh_iflags &= 1 << HIF_ALLOCED; 401 gh->gh_iflags = 0;
403 gh->gh_ip = (unsigned long)__builtin_return_address(0); 402 gh->gh_ip = (unsigned long)__builtin_return_address(0);
404} 403}
405 404
@@ -416,54 +415,8 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
416 gh->gh_ip = 0; 415 gh->gh_ip = 0;
417} 416}
418 417
419/** 418static void gfs2_holder_wake(struct gfs2_holder *gh)
420 * gfs2_holder_get - get a struct gfs2_holder structure
421 * @gl: the glock
422 * @state: the state we're requesting
423 * @flags: the modifier flags
424 * @gfp_flags:
425 *
426 * Figure out how big an impact this function has. Either:
427 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
428 * 2) Leave it like it is
429 *
430 * Returns: the holder structure, NULL on ENOMEM
431 */
432
433static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
434 unsigned int state,
435 int flags, gfp_t gfp_flags)
436{
437 struct gfs2_holder *gh;
438
439 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
440 if (!gh)
441 return NULL;
442
443 gfs2_holder_init(gl, state, flags, gh);
444 set_bit(HIF_ALLOCED, &gh->gh_iflags);
445 gh->gh_ip = (unsigned long)__builtin_return_address(0);
446 return gh;
447}
448
449/**
450 * gfs2_holder_put - get rid of a struct gfs2_holder structure
451 * @gh: the holder structure
452 *
453 */
454
455static void gfs2_holder_put(struct gfs2_holder *gh)
456{
457 gfs2_holder_uninit(gh);
458 kfree(gh);
459}
460
461static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
462{ 419{
463 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
464 gfs2_holder_put(gh);
465 return;
466 }
467 clear_bit(HIF_WAIT, &gh->gh_iflags); 420 clear_bit(HIF_WAIT, &gh->gh_iflags);
468 smp_mb(); 421 smp_mb();
469 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 422 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
@@ -529,7 +482,7 @@ static int rq_promote(struct gfs2_holder *gh)
529 gfs2_reclaim_glock(sdp); 482 gfs2_reclaim_glock(sdp);
530 } 483 }
531 484
532 gfs2_glock_xmote_th(gh); 485 gfs2_glock_xmote_th(gh->gh_gl, gh);
533 spin_lock(&gl->gl_spin); 486 spin_lock(&gl->gl_spin);
534 } 487 }
535 return 1; 488 return 1;
@@ -552,7 +505,7 @@ static int rq_promote(struct gfs2_holder *gh)
552 gh->gh_error = 0; 505 gh->gh_error = 0;
553 set_bit(HIF_HOLDER, &gh->gh_iflags); 506 set_bit(HIF_HOLDER, &gh->gh_iflags);
554 507
555 gfs2_holder_dispose_or_wake(gh); 508 gfs2_holder_wake(gh);
556 509
557 return 0; 510 return 0;
558} 511}
@@ -564,32 +517,24 @@ static int rq_promote(struct gfs2_holder *gh)
564 * Returns: 1 if the queue is blocked 517 * Returns: 1 if the queue is blocked
565 */ 518 */
566 519
567static int rq_demote(struct gfs2_holder *gh) 520static int rq_demote(struct gfs2_glock *gl)
568{ 521{
569 struct gfs2_glock *gl = gh->gh_gl;
570
571 if (!list_empty(&gl->gl_holders)) 522 if (!list_empty(&gl->gl_holders))
572 return 1; 523 return 1;
573 524
574 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) { 525 if (gl->gl_state == gl->gl_demote_state ||
575 list_del_init(&gh->gh_list); 526 gl->gl_state == LM_ST_UNLOCKED) {
576 gh->gh_error = 0; 527 clear_bit(GLF_DEMOTE, &gl->gl_flags);
577 spin_unlock(&gl->gl_spin); 528 return 0;
578 gfs2_holder_dispose_or_wake(gh);
579 spin_lock(&gl->gl_spin);
580 } else {
581 gl->gl_req_gh = gh;
582 set_bit(GLF_LOCK, &gl->gl_flags);
583 spin_unlock(&gl->gl_spin);
584
585 if (gh->gh_state == LM_ST_UNLOCKED ||
586 gl->gl_state != LM_ST_EXCLUSIVE)
587 gfs2_glock_drop_th(gl);
588 else
589 gfs2_glock_xmote_th(gh);
590
591 spin_lock(&gl->gl_spin);
592 } 529 }
530 set_bit(GLF_LOCK, &gl->gl_flags);
531 spin_unlock(&gl->gl_spin);
532 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
533 gl->gl_state != LM_ST_EXCLUSIVE)
534 gfs2_glock_drop_th(gl);
535 else
536 gfs2_glock_xmote_th(gl, NULL);
537 spin_lock(&gl->gl_spin);
593 538
594 return 0; 539 return 0;
595} 540}
@@ -617,16 +562,8 @@ static void run_queue(struct gfs2_glock *gl)
617 else 562 else
618 gfs2_assert_warn(gl->gl_sbd, 0); 563 gfs2_assert_warn(gl->gl_sbd, 0);
619 564
620 } else if (!list_empty(&gl->gl_waiters2) && 565 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
621 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) { 566 blocked = rq_demote(gl);
622 gh = list_entry(gl->gl_waiters2.next,
623 struct gfs2_holder, gh_list);
624
625 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
626 blocked = rq_demote(gh);
627 else
628 gfs2_assert_warn(gl->gl_sbd, 0);
629
630 } else if (!list_empty(&gl->gl_waiters3)) { 567 } else if (!list_empty(&gl->gl_waiters3)) {
631 gh = list_entry(gl->gl_waiters3.next, 568 gh = list_entry(gl->gl_waiters3.next,
632 struct gfs2_holder, gh_list); 569 struct gfs2_holder, gh_list);
@@ -717,50 +654,24 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
717} 654}
718 655
719/** 656/**
720 * handle_callback - add a demote request to a lock's queue 657 * handle_callback - process a demote request
721 * @gl: the glock 658 * @gl: the glock
722 * @state: the state the caller wants us to change to 659 * @state: the state the caller wants us to change to
723 * 660 *
724 * Note: This may fail sliently if we are out of memory. 661 * There are only two requests that we are going to see in actual
662 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
725 */ 663 */
726 664
727static void handle_callback(struct gfs2_glock *gl, unsigned int state) 665static void handle_callback(struct gfs2_glock *gl, unsigned int state)
728{ 666{
729 struct gfs2_holder *gh, *new_gh = NULL;
730
731restart:
732 spin_lock(&gl->gl_spin); 667 spin_lock(&gl->gl_spin);
733 668 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
734 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { 669 gl->gl_demote_state = state;
735 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) && 670 gl->gl_demote_time = jiffies;
736 gl->gl_req_gh != gh) { 671 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
737 if (gh->gh_state != state) 672 gl->gl_demote_state = state;
738 gh->gh_state = LM_ST_UNLOCKED;
739 goto out;
740 }
741 }
742
743 if (new_gh) {
744 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
745 new_gh = NULL;
746 } else {
747 spin_unlock(&gl->gl_spin);
748
749 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
750 if (!new_gh)
751 return;
752 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
753 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
754 set_bit(HIF_WAIT, &new_gh->gh_iflags);
755
756 goto restart;
757 } 673 }
758
759out:
760 spin_unlock(&gl->gl_spin); 674 spin_unlock(&gl->gl_spin);
761
762 if (new_gh)
763 gfs2_holder_put(new_gh);
764} 675}
765 676
766/** 677/**
@@ -820,56 +731,37 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
820 731
821 /* Deal with each possible exit condition */ 732 /* Deal with each possible exit condition */
822 733
823 if (!gh) 734 if (!gh) {
824 gl->gl_stamp = jiffies; 735 gl->gl_stamp = jiffies;
825 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 736 if (ret & LM_OUT_CANCELED)
737 op_done = 0;
738 else
739 clear_bit(GLF_DEMOTE, &gl->gl_flags);
740 } else {
826 spin_lock(&gl->gl_spin); 741 spin_lock(&gl->gl_spin);
827 list_del_init(&gh->gh_list); 742 list_del_init(&gh->gh_list);
828 gh->gh_error = -EIO; 743 gh->gh_error = -EIO;
829 spin_unlock(&gl->gl_spin); 744 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
830 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { 745 goto out;
831 spin_lock(&gl->gl_spin); 746 gh->gh_error = GLR_CANCELED;
832 list_del_init(&gh->gh_list); 747 if (ret & LM_OUT_CANCELED)
833 if (gl->gl_state == gh->gh_state || 748 goto out;
834 gl->gl_state == LM_ST_UNLOCKED) { 749 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
750 list_add_tail(&gh->gh_list, &gl->gl_holders);
835 gh->gh_error = 0; 751 gh->gh_error = 0;
836 } else { 752 set_bit(HIF_HOLDER, &gh->gh_iflags);
837 if (gfs2_assert_warn(sdp, gh->gh_flags & 753 set_bit(HIF_FIRST, &gh->gh_iflags);
838 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) 754 op_done = 0;
839 fs_warn(sdp, "ret = 0x%.8X\n", ret); 755 goto out;
840 gh->gh_error = GLR_TRYFAILED;
841 } 756 }
842 spin_unlock(&gl->gl_spin);
843
844 if (ret & LM_OUT_CANCELED)
845 handle_callback(gl, LM_ST_UNLOCKED);
846
847 } else if (ret & LM_OUT_CANCELED) {
848 spin_lock(&gl->gl_spin);
849 list_del_init(&gh->gh_list);
850 gh->gh_error = GLR_CANCELED;
851 spin_unlock(&gl->gl_spin);
852
853 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
854 spin_lock(&gl->gl_spin);
855 list_move_tail(&gh->gh_list, &gl->gl_holders);
856 gh->gh_error = 0;
857 set_bit(HIF_HOLDER, &gh->gh_iflags);
858 spin_unlock(&gl->gl_spin);
859
860 set_bit(HIF_FIRST, &gh->gh_iflags);
861
862 op_done = 0;
863
864 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
865 spin_lock(&gl->gl_spin);
866 list_del_init(&gh->gh_list);
867 gh->gh_error = GLR_TRYFAILED; 757 gh->gh_error = GLR_TRYFAILED;
868 spin_unlock(&gl->gl_spin); 758 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
869 759 goto out;
870 } else { 760 gh->gh_error = -EINVAL;
871 if (gfs2_assert_withdraw(sdp, 0) == -1) 761 if (gfs2_assert_withdraw(sdp, 0) == -1)
872 fs_err(sdp, "ret = 0x%.8X\n", ret); 762 fs_err(sdp, "ret = 0x%.8X\n", ret);
763out:
764 spin_unlock(&gl->gl_spin);
873 } 765 }
874 766
875 if (glops->go_xmote_bh) 767 if (glops->go_xmote_bh)
@@ -887,7 +779,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
887 gfs2_glock_put(gl); 779 gfs2_glock_put(gl);
888 780
889 if (gh) 781 if (gh)
890 gfs2_holder_dispose_or_wake(gh); 782 gfs2_holder_wake(gh);
891} 783}
892 784
893/** 785/**
@@ -898,12 +790,11 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
898 * 790 *
899 */ 791 */
900 792
901void gfs2_glock_xmote_th(struct gfs2_holder *gh) 793void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
902{ 794{
903 struct gfs2_glock *gl = gh->gh_gl;
904 struct gfs2_sbd *sdp = gl->gl_sbd; 795 struct gfs2_sbd *sdp = gl->gl_sbd;
905 int flags = gh->gh_flags; 796 int flags = gh ? gh->gh_flags : 0;
906 unsigned state = gh->gh_state; 797 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
907 const struct gfs2_glock_operations *glops = gl->gl_ops; 798 const struct gfs2_glock_operations *glops = gl->gl_ops;
908 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 799 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
909 LM_FLAG_NOEXP | LM_FLAG_ANY | 800 LM_FLAG_NOEXP | LM_FLAG_ANY |
@@ -953,6 +844,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
953 gfs2_assert_warn(sdp, !ret); 844 gfs2_assert_warn(sdp, !ret);
954 845
955 state_change(gl, LM_ST_UNLOCKED); 846 state_change(gl, LM_ST_UNLOCKED);
847 clear_bit(GLF_DEMOTE, &gl->gl_flags);
956 848
957 if (glops->go_inval) 849 if (glops->go_inval)
958 glops->go_inval(gl, DIO_METADATA); 850 glops->go_inval(gl, DIO_METADATA);
@@ -974,7 +866,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
974 gfs2_glock_put(gl); 866 gfs2_glock_put(gl);
975 867
976 if (gh) 868 if (gh)
977 gfs2_holder_dispose_or_wake(gh); 869 gfs2_holder_wake(gh);
978} 870}
979 871
980/** 872/**
@@ -1291,9 +1183,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1291 if (glops->go_unlock) 1183 if (glops->go_unlock)
1292 glops->go_unlock(gh); 1184 glops->go_unlock(gh);
1293 1185
1294 gl->gl_stamp = jiffies;
1295
1296 spin_lock(&gl->gl_spin); 1186 spin_lock(&gl->gl_spin);
1187 gl->gl_stamp = jiffies;
1297 } 1188 }
1298 1189
1299 clear_bit(GLF_LOCK, &gl->gl_flags); 1190 clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1981,16 +1872,16 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1981 if (error) 1872 if (error)
1982 goto out; 1873 goto out;
1983 } 1874 }
1984 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
1985 error = dump_holder(gi, "Waiter2", gh);
1986 if (error)
1987 goto out;
1988 }
1989 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { 1875 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1990 error = dump_holder(gi, "Waiter3", gh); 1876 error = dump_holder(gi, "Waiter3", gh);
1991 if (error) 1877 if (error)
1992 goto out; 1878 goto out;
1993 } 1879 }
1880 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1881 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
1882 gl->gl_demote_state,
1883 (u64)(jiffies - gl->gl_demote_time)*1000000/HZ);
1884 }
1994 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { 1885 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1995 if (!test_bit(GLF_LOCK, &gl->gl_flags) && 1886 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1996 list_empty(&gl->gl_holders)) { 1887 list_empty(&gl->gl_holders)) {