aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2007-03-16 05:40:31 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2007-05-01 04:10:39 -0400
commit3b8249f6178cb2b68b9d683587797270125cc06a (patch)
tree5bcc9bbd5eb923d5905077ba0bd41b5fa3f29ddd
parent1de913909263ba7f7054debeda1b79771a7233db (diff)
[GFS2] Fix bz 224480 and cleanup glock demotion code
This patch prevents the printing of a warning message in cases where the fs is functioning normally by handing off responsibility for unlinked, but still open inodes, to another node for eventual deallocation. Also, there is now an improved system for ensuring that such requests to other nodes do not get lost. The callback on the iopen lock is only ever called when i_nlink == 0 and when a node is unable to deallocate it due to it still being in use on another node. When a node receives the callback therefore, it knows that i_nlink must be zero, so we mark it as such (in gfs2_drop_inode) in order that it will then attempt deallocation of the inode itself. As an additional benefit, queuing a demote request no longer requires a memory allocation. This simplifies the code for dealing with gfs2_holders as it removes one special case. There are two new fields in struct gfs2_glock. gl_demote_state is the state which the remote node has requested and gl_demote_time is the time when the request came in. Both fields are only valid when the GLF_DEMOTE flag is set in gl_flags. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r--fs/gfs2/glock.c233
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h8
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/ops_super.c28
5 files changed, 93 insertions, 179 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a3a24f2e99d2..e7075945b051 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -54,7 +54,7 @@ struct glock_iter {
54typedef void (*glock_examiner) (struct gfs2_glock * gl); 54typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 55
56static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 56static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
57static void gfs2_glock_xmote_th(struct gfs2_holder *gh); 57static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
58static void gfs2_glock_drop_th(struct gfs2_glock *gl); 58static void gfs2_glock_drop_th(struct gfs2_glock *gl);
59static DECLARE_RWSEM(gfs2_umount_flush_sem); 59static DECLARE_RWSEM(gfs2_umount_flush_sem);
60static struct dentry *gfs2_root; 60static struct dentry *gfs2_root;
@@ -212,7 +212,6 @@ int gfs2_glock_put(struct gfs2_glock *gl)
212 gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); 212 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
213 gfs2_assert(sdp, list_empty(&gl->gl_holders)); 213 gfs2_assert(sdp, list_empty(&gl->gl_holders));
214 gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); 214 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
215 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
216 gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); 215 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
217 glock_free(gl); 216 glock_free(gl);
218 rv = 1; 217 rv = 1;
@@ -399,7 +398,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
399{ 398{
400 gh->gh_state = state; 399 gh->gh_state = state;
401 gh->gh_flags = flags; 400 gh->gh_flags = flags;
402 gh->gh_iflags &= 1 << HIF_ALLOCED; 401 gh->gh_iflags = 0;
403 gh->gh_ip = (unsigned long)__builtin_return_address(0); 402 gh->gh_ip = (unsigned long)__builtin_return_address(0);
404} 403}
405 404
@@ -416,54 +415,8 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
416 gh->gh_ip = 0; 415 gh->gh_ip = 0;
417} 416}
418 417
419/** 418static void gfs2_holder_wake(struct gfs2_holder *gh)
420 * gfs2_holder_get - get a struct gfs2_holder structure
421 * @gl: the glock
422 * @state: the state we're requesting
423 * @flags: the modifier flags
424 * @gfp_flags:
425 *
426 * Figure out how big an impact this function has. Either:
427 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
428 * 2) Leave it like it is
429 *
430 * Returns: the holder structure, NULL on ENOMEM
431 */
432
433static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
434 unsigned int state,
435 int flags, gfp_t gfp_flags)
436{
437 struct gfs2_holder *gh;
438
439 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
440 if (!gh)
441 return NULL;
442
443 gfs2_holder_init(gl, state, flags, gh);
444 set_bit(HIF_ALLOCED, &gh->gh_iflags);
445 gh->gh_ip = (unsigned long)__builtin_return_address(0);
446 return gh;
447}
448
449/**
450 * gfs2_holder_put - get rid of a struct gfs2_holder structure
451 * @gh: the holder structure
452 *
453 */
454
455static void gfs2_holder_put(struct gfs2_holder *gh)
456{
457 gfs2_holder_uninit(gh);
458 kfree(gh);
459}
460
461static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
462{ 419{
463 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
464 gfs2_holder_put(gh);
465 return;
466 }
467 clear_bit(HIF_WAIT, &gh->gh_iflags); 420 clear_bit(HIF_WAIT, &gh->gh_iflags);
468 smp_mb(); 421 smp_mb();
469 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 422 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
@@ -529,7 +482,7 @@ static int rq_promote(struct gfs2_holder *gh)
529 gfs2_reclaim_glock(sdp); 482 gfs2_reclaim_glock(sdp);
530 } 483 }
531 484
532 gfs2_glock_xmote_th(gh); 485 gfs2_glock_xmote_th(gh->gh_gl, gh);
533 spin_lock(&gl->gl_spin); 486 spin_lock(&gl->gl_spin);
534 } 487 }
535 return 1; 488 return 1;
@@ -552,7 +505,7 @@ static int rq_promote(struct gfs2_holder *gh)
552 gh->gh_error = 0; 505 gh->gh_error = 0;
553 set_bit(HIF_HOLDER, &gh->gh_iflags); 506 set_bit(HIF_HOLDER, &gh->gh_iflags);
554 507
555 gfs2_holder_dispose_or_wake(gh); 508 gfs2_holder_wake(gh);
556 509
557 return 0; 510 return 0;
558} 511}
@@ -564,32 +517,24 @@ static int rq_promote(struct gfs2_holder *gh)
564 * Returns: 1 if the queue is blocked 517 * Returns: 1 if the queue is blocked
565 */ 518 */
566 519
567static int rq_demote(struct gfs2_holder *gh) 520static int rq_demote(struct gfs2_glock *gl)
568{ 521{
569 struct gfs2_glock *gl = gh->gh_gl;
570
571 if (!list_empty(&gl->gl_holders)) 522 if (!list_empty(&gl->gl_holders))
572 return 1; 523 return 1;
573 524
574 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) { 525 if (gl->gl_state == gl->gl_demote_state ||
575 list_del_init(&gh->gh_list); 526 gl->gl_state == LM_ST_UNLOCKED) {
576 gh->gh_error = 0; 527 clear_bit(GLF_DEMOTE, &gl->gl_flags);
577 spin_unlock(&gl->gl_spin); 528 return 0;
578 gfs2_holder_dispose_or_wake(gh);
579 spin_lock(&gl->gl_spin);
580 } else {
581 gl->gl_req_gh = gh;
582 set_bit(GLF_LOCK, &gl->gl_flags);
583 spin_unlock(&gl->gl_spin);
584
585 if (gh->gh_state == LM_ST_UNLOCKED ||
586 gl->gl_state != LM_ST_EXCLUSIVE)
587 gfs2_glock_drop_th(gl);
588 else
589 gfs2_glock_xmote_th(gh);
590
591 spin_lock(&gl->gl_spin);
592 } 529 }
530 set_bit(GLF_LOCK, &gl->gl_flags);
531 spin_unlock(&gl->gl_spin);
532 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
533 gl->gl_state != LM_ST_EXCLUSIVE)
534 gfs2_glock_drop_th(gl);
535 else
536 gfs2_glock_xmote_th(gl, NULL);
537 spin_lock(&gl->gl_spin);
593 538
594 return 0; 539 return 0;
595} 540}
@@ -617,16 +562,8 @@ static void run_queue(struct gfs2_glock *gl)
617 else 562 else
618 gfs2_assert_warn(gl->gl_sbd, 0); 563 gfs2_assert_warn(gl->gl_sbd, 0);
619 564
620 } else if (!list_empty(&gl->gl_waiters2) && 565 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
621 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) { 566 blocked = rq_demote(gl);
622 gh = list_entry(gl->gl_waiters2.next,
623 struct gfs2_holder, gh_list);
624
625 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
626 blocked = rq_demote(gh);
627 else
628 gfs2_assert_warn(gl->gl_sbd, 0);
629
630 } else if (!list_empty(&gl->gl_waiters3)) { 567 } else if (!list_empty(&gl->gl_waiters3)) {
631 gh = list_entry(gl->gl_waiters3.next, 568 gh = list_entry(gl->gl_waiters3.next,
632 struct gfs2_holder, gh_list); 569 struct gfs2_holder, gh_list);
@@ -717,50 +654,24 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
717} 654}
718 655
719/** 656/**
720 * handle_callback - add a demote request to a lock's queue 657 * handle_callback - process a demote request
721 * @gl: the glock 658 * @gl: the glock
722 * @state: the state the caller wants us to change to 659 * @state: the state the caller wants us to change to
723 * 660 *
724 * Note: This may fail sliently if we are out of memory. 661 * There are only two requests that we are going to see in actual
662 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
725 */ 663 */
726 664
727static void handle_callback(struct gfs2_glock *gl, unsigned int state) 665static void handle_callback(struct gfs2_glock *gl, unsigned int state)
728{ 666{
729 struct gfs2_holder *gh, *new_gh = NULL;
730
731restart:
732 spin_lock(&gl->gl_spin); 667 spin_lock(&gl->gl_spin);
733 668 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
734 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { 669 gl->gl_demote_state = state;
735 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) && 670 gl->gl_demote_time = jiffies;
736 gl->gl_req_gh != gh) { 671 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
737 if (gh->gh_state != state) 672 gl->gl_demote_state = state;
738 gh->gh_state = LM_ST_UNLOCKED;
739 goto out;
740 }
741 }
742
743 if (new_gh) {
744 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
745 new_gh = NULL;
746 } else {
747 spin_unlock(&gl->gl_spin);
748
749 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
750 if (!new_gh)
751 return;
752 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
753 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
754 set_bit(HIF_WAIT, &new_gh->gh_iflags);
755
756 goto restart;
757 } 673 }
758
759out:
760 spin_unlock(&gl->gl_spin); 674 spin_unlock(&gl->gl_spin);
761
762 if (new_gh)
763 gfs2_holder_put(new_gh);
764} 675}
765 676
766/** 677/**
@@ -820,56 +731,37 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
820 731
821 /* Deal with each possible exit condition */ 732 /* Deal with each possible exit condition */
822 733
823 if (!gh) 734 if (!gh) {
824 gl->gl_stamp = jiffies; 735 gl->gl_stamp = jiffies;
825 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 736 if (ret & LM_OUT_CANCELED)
737 op_done = 0;
738 else
739 clear_bit(GLF_DEMOTE, &gl->gl_flags);
740 } else {
826 spin_lock(&gl->gl_spin); 741 spin_lock(&gl->gl_spin);
827 list_del_init(&gh->gh_list); 742 list_del_init(&gh->gh_list);
828 gh->gh_error = -EIO; 743 gh->gh_error = -EIO;
829 spin_unlock(&gl->gl_spin); 744 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
830 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { 745 goto out;
831 spin_lock(&gl->gl_spin); 746 gh->gh_error = GLR_CANCELED;
832 list_del_init(&gh->gh_list); 747 if (ret & LM_OUT_CANCELED)
833 if (gl->gl_state == gh->gh_state || 748 goto out;
834 gl->gl_state == LM_ST_UNLOCKED) { 749 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
750 list_add_tail(&gh->gh_list, &gl->gl_holders);
835 gh->gh_error = 0; 751 gh->gh_error = 0;
836 } else { 752 set_bit(HIF_HOLDER, &gh->gh_iflags);
837 if (gfs2_assert_warn(sdp, gh->gh_flags & 753 set_bit(HIF_FIRST, &gh->gh_iflags);
838 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) 754 op_done = 0;
839 fs_warn(sdp, "ret = 0x%.8X\n", ret); 755 goto out;
840 gh->gh_error = GLR_TRYFAILED;
841 } 756 }
842 spin_unlock(&gl->gl_spin);
843
844 if (ret & LM_OUT_CANCELED)
845 handle_callback(gl, LM_ST_UNLOCKED);
846
847 } else if (ret & LM_OUT_CANCELED) {
848 spin_lock(&gl->gl_spin);
849 list_del_init(&gh->gh_list);
850 gh->gh_error = GLR_CANCELED;
851 spin_unlock(&gl->gl_spin);
852
853 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
854 spin_lock(&gl->gl_spin);
855 list_move_tail(&gh->gh_list, &gl->gl_holders);
856 gh->gh_error = 0;
857 set_bit(HIF_HOLDER, &gh->gh_iflags);
858 spin_unlock(&gl->gl_spin);
859
860 set_bit(HIF_FIRST, &gh->gh_iflags);
861
862 op_done = 0;
863
864 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
865 spin_lock(&gl->gl_spin);
866 list_del_init(&gh->gh_list);
867 gh->gh_error = GLR_TRYFAILED; 757 gh->gh_error = GLR_TRYFAILED;
868 spin_unlock(&gl->gl_spin); 758 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
869 759 goto out;
870 } else { 760 gh->gh_error = -EINVAL;
871 if (gfs2_assert_withdraw(sdp, 0) == -1) 761 if (gfs2_assert_withdraw(sdp, 0) == -1)
872 fs_err(sdp, "ret = 0x%.8X\n", ret); 762 fs_err(sdp, "ret = 0x%.8X\n", ret);
763out:
764 spin_unlock(&gl->gl_spin);
873 } 765 }
874 766
875 if (glops->go_xmote_bh) 767 if (glops->go_xmote_bh)
@@ -887,7 +779,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
887 gfs2_glock_put(gl); 779 gfs2_glock_put(gl);
888 780
889 if (gh) 781 if (gh)
890 gfs2_holder_dispose_or_wake(gh); 782 gfs2_holder_wake(gh);
891} 783}
892 784
893/** 785/**
@@ -898,12 +790,11 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
898 * 790 *
899 */ 791 */
900 792
901void gfs2_glock_xmote_th(struct gfs2_holder *gh) 793void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
902{ 794{
903 struct gfs2_glock *gl = gh->gh_gl;
904 struct gfs2_sbd *sdp = gl->gl_sbd; 795 struct gfs2_sbd *sdp = gl->gl_sbd;
905 int flags = gh->gh_flags; 796 int flags = gh ? gh->gh_flags : 0;
906 unsigned state = gh->gh_state; 797 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
907 const struct gfs2_glock_operations *glops = gl->gl_ops; 798 const struct gfs2_glock_operations *glops = gl->gl_ops;
908 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 799 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
909 LM_FLAG_NOEXP | LM_FLAG_ANY | 800 LM_FLAG_NOEXP | LM_FLAG_ANY |
@@ -953,6 +844,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
953 gfs2_assert_warn(sdp, !ret); 844 gfs2_assert_warn(sdp, !ret);
954 845
955 state_change(gl, LM_ST_UNLOCKED); 846 state_change(gl, LM_ST_UNLOCKED);
847 clear_bit(GLF_DEMOTE, &gl->gl_flags);
956 848
957 if (glops->go_inval) 849 if (glops->go_inval)
958 glops->go_inval(gl, DIO_METADATA); 850 glops->go_inval(gl, DIO_METADATA);
@@ -974,7 +866,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
974 gfs2_glock_put(gl); 866 gfs2_glock_put(gl);
975 867
976 if (gh) 868 if (gh)
977 gfs2_holder_dispose_or_wake(gh); 869 gfs2_holder_wake(gh);
978} 870}
979 871
980/** 872/**
@@ -1291,9 +1183,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1291 if (glops->go_unlock) 1183 if (glops->go_unlock)
1292 glops->go_unlock(gh); 1184 glops->go_unlock(gh);
1293 1185
1294 gl->gl_stamp = jiffies;
1295
1296 spin_lock(&gl->gl_spin); 1186 spin_lock(&gl->gl_spin);
1187 gl->gl_stamp = jiffies;
1297 } 1188 }
1298 1189
1299 clear_bit(GLF_LOCK, &gl->gl_flags); 1190 clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1981,16 +1872,16 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1981 if (error) 1872 if (error)
1982 goto out; 1873 goto out;
1983 } 1874 }
1984 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
1985 error = dump_holder(gi, "Waiter2", gh);
1986 if (error)
1987 goto out;
1988 }
1989 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { 1875 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1990 error = dump_holder(gi, "Waiter3", gh); 1876 error = dump_holder(gi, "Waiter3", gh);
1991 if (error) 1877 if (error)
1992 goto out; 1878 goto out;
1993 } 1879 }
1880 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1881 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
1882 gl->gl_demote_state,
1883 (u64)(jiffies - gl->gl_demote_time)*1000000/HZ);
1884 }
1994 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { 1885 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1995 if (!test_bit(GLF_LOCK, &gl->gl_flags) && 1886 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1996 list_empty(&gl->gl_holders)) { 1887 list_empty(&gl->gl_holders)) {
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index d7cef7408728..5e662eadc6f2 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -67,7 +67,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
67{ 67{
68 int ret; 68 int ret;
69 spin_lock(&gl->gl_spin); 69 spin_lock(&gl->gl_spin);
70 ret = !list_empty(&gl->gl_waiters2) || !list_empty(&gl->gl_waiters3); 70 ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3);
71 spin_unlock(&gl->gl_spin); 71 spin_unlock(&gl->gl_spin);
72 return ret; 72 return ret;
73} 73}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 7555261d911f..9c125823d760 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -115,11 +115,8 @@ enum {
115 /* Actions */ 115 /* Actions */
116 HIF_MUTEX = 0, 116 HIF_MUTEX = 0,
117 HIF_PROMOTE = 1, 117 HIF_PROMOTE = 1,
118 HIF_DEMOTE = 2,
119 118
120 /* States */ 119 /* States */
121 HIF_ALLOCED = 4,
122 HIF_DEALLOC = 5,
123 HIF_HOLDER = 6, 120 HIF_HOLDER = 6,
124 HIF_FIRST = 7, 121 HIF_FIRST = 7,
125 HIF_ABORTED = 9, 122 HIF_ABORTED = 9,
@@ -142,8 +139,8 @@ struct gfs2_holder {
142enum { 139enum {
143 GLF_LOCK = 1, 140 GLF_LOCK = 1,
144 GLF_STICKY = 2, 141 GLF_STICKY = 2,
142 GLF_DEMOTE = 3,
145 GLF_DIRTY = 5, 143 GLF_DIRTY = 5,
146 GLF_SKIP_WAITERS2 = 6,
147}; 144};
148 145
149struct gfs2_glock { 146struct gfs2_glock {
@@ -156,11 +153,12 @@ struct gfs2_glock {
156 153
157 unsigned int gl_state; 154 unsigned int gl_state;
158 unsigned int gl_hash; 155 unsigned int gl_hash;
156 unsigned int gl_demote_state; /* state requested by remote node */
157 unsigned long gl_demote_time; /* time of first demote request */
159 struct task_struct *gl_owner; 158 struct task_struct *gl_owner;
160 unsigned long gl_ip; 159 unsigned long gl_ip;
161 struct list_head gl_holders; 160 struct list_head gl_holders;
162 struct list_head gl_waiters1; /* HIF_MUTEX */ 161 struct list_head gl_waiters1; /* HIF_MUTEX */
163 struct list_head gl_waiters2; /* HIF_DEMOTE */
164 struct list_head gl_waiters3; /* HIF_PROMOTE */ 162 struct list_head gl_waiters3; /* HIF_PROMOTE */
165 163
166 const struct gfs2_glock_operations *gl_ops; 164 const struct gfs2_glock_operations *gl_ops;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 218395371dbe..c4bb374eaf92 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -45,7 +45,6 @@ static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned
45 spin_lock_init(&gl->gl_spin); 45 spin_lock_init(&gl->gl_spin);
46 INIT_LIST_HEAD(&gl->gl_holders); 46 INIT_LIST_HEAD(&gl->gl_holders);
47 INIT_LIST_HEAD(&gl->gl_waiters1); 47 INIT_LIST_HEAD(&gl->gl_waiters1);
48 INIT_LIST_HEAD(&gl->gl_waiters2);
49 INIT_LIST_HEAD(&gl->gl_waiters3); 48 INIT_LIST_HEAD(&gl->gl_waiters3);
50 gl->gl_lvb = NULL; 49 gl->gl_lvb = NULL;
51 atomic_set(&gl->gl_lvb_count, 0); 50 atomic_set(&gl->gl_lvb_count, 0);
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index b89999d3a767..485ce3d49923 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -284,6 +284,31 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
284} 284}
285 285
286/** 286/**
287 * gfs2_drop_inode - Drop an inode (test for remote unlink)
288 * @inode: The inode to drop
289 *
290 * If we've received a callback on an iopen lock then its because a
291 * remote node tried to deallocate the inode but failed due to this node
292 * still having the inode open. Here we mark the link count zero
293 * since we know that it must have reached zero if the GLF_DEMOTE flag
294 * is set on the iopen glock. If we didn't do a disk read since the
295 * remote node removed the final link then we might otherwise miss
296 * this event. This check ensures that this node will deallocate the
297 * inode's blocks, or alternatively pass the baton on to another
298 * node for later deallocation.
299 */
300static void gfs2_drop_inode(struct inode *inode)
301{
302 if (inode->i_private && inode->i_nlink) {
303 struct gfs2_inode *ip = GFS2_I(inode);
304 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
305 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
306 clear_nlink(inode);
307 }
308 generic_drop_inode(inode);
309}
310
311/**
287 * gfs2_clear_inode - Deallocate an inode when VFS is done with it 312 * gfs2_clear_inode - Deallocate an inode when VFS is done with it
288 * @inode: The VFS inode 313 * @inode: The VFS inode
289 * 314 *
@@ -441,7 +466,7 @@ out_unlock:
441out_uninit: 466out_uninit:
442 gfs2_holder_uninit(&ip->i_iopen_gh); 467 gfs2_holder_uninit(&ip->i_iopen_gh);
443 gfs2_glock_dq_uninit(&gh); 468 gfs2_glock_dq_uninit(&gh);
444 if (error) 469 if (error && error != GLR_TRYFAILED)
445 fs_warn(sdp, "gfs2_delete_inode: %d\n", error); 470 fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
446out: 471out:
447 truncate_inode_pages(&inode->i_data, 0); 472 truncate_inode_pages(&inode->i_data, 0);
@@ -481,6 +506,7 @@ const struct super_operations gfs2_super_ops = {
481 .statfs = gfs2_statfs, 506 .statfs = gfs2_statfs,
482 .remount_fs = gfs2_remount_fs, 507 .remount_fs = gfs2_remount_fs,
483 .clear_inode = gfs2_clear_inode, 508 .clear_inode = gfs2_clear_inode,
509 .drop_inode = gfs2_drop_inode,
484 .show_options = gfs2_show_options, 510 .show_options = gfs2_show_options,
485}; 511};
486 512