aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAbhijith Das <adas@redhat.com>2007-06-11 03:22:32 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2007-07-09 03:23:36 -0400
commitd93cfa9884354dac2d8ccd894594a43e0b962b6f (patch)
tree72704d54aaa99e0021d3cc0b025fb8c67b09e4ce /fs
parenta7a2ff8a951ab373732116e7c31e2e1fe025d5e0 (diff)
[GFS2] Fix deallocation issues
There were two issues during deallocation of unlinked inodes. The first was relating to the use of a "try" lock which in the case of the inode lock wasn't trying hard enough to deallocate in all circumstances (now changed to a normal glock) and in the case of the iopen lock didn't wait for the demotion of the shared lock before attempting to get the exclusive lock, and thereby sometimes (timing dependent) not completing the deallocation when it should have done. The second issue related to the lack of a way to invalidate dcache entries on remote nodes (now fixed by this patch) which meant that unlinks were taking a long time to return disk space to the fs. By adding some code to invalidate the dcache entries across the cluster for unlinked inodes, that is now fixed. This patch was written jointly by Abhijith Das and Steven Whitehouse. Signed-off-by: Abhijith Das <adas@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c52
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/inode.c1
-rw-r--r--fs/gfs2/ops_super.c8
4 files changed, 48 insertions, 14 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b3ed58551e74..384cae623ed3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -422,11 +422,11 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
422static void gfs2_holder_wake(struct gfs2_holder *gh) 422static void gfs2_holder_wake(struct gfs2_holder *gh)
423{ 423{
424 clear_bit(HIF_WAIT, &gh->gh_iflags); 424 clear_bit(HIF_WAIT, &gh->gh_iflags);
425 smp_mb(); 425 smp_mb__after_clear_bit();
426 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 426 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
427} 427}
428 428
429static int holder_wait(void *word) 429static int just_schedule(void *word)
430{ 430{
431 schedule(); 431 schedule();
432 return 0; 432 return 0;
@@ -435,7 +435,20 @@ static int holder_wait(void *word)
435static void wait_on_holder(struct gfs2_holder *gh) 435static void wait_on_holder(struct gfs2_holder *gh)
436{ 436{
437 might_sleep(); 437 might_sleep();
438 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE); 438 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
439}
440
441static void gfs2_demote_wake(struct gfs2_glock *gl)
442{
443 clear_bit(GLF_DEMOTE, &gl->gl_flags);
444 smp_mb__after_clear_bit();
445 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
446}
447
448static void wait_on_demote(struct gfs2_glock *gl)
449{
450 might_sleep();
451 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
439} 452}
440 453
441/** 454/**
@@ -528,7 +541,7 @@ static int rq_demote(struct gfs2_glock *gl)
528 541
529 if (gl->gl_state == gl->gl_demote_state || 542 if (gl->gl_state == gl->gl_demote_state ||
530 gl->gl_state == LM_ST_UNLOCKED) { 543 gl->gl_state == LM_ST_UNLOCKED) {
531 clear_bit(GLF_DEMOTE, &gl->gl_flags); 544 gfs2_demote_wake(gl);
532 return 0; 545 return 0;
533 } 546 }
534 set_bit(GLF_LOCK, &gl->gl_flags); 547 set_bit(GLF_LOCK, &gl->gl_flags);
@@ -666,12 +679,22 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
666 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 679 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
667 */ 680 */
668 681
669static void handle_callback(struct gfs2_glock *gl, unsigned int state) 682static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
670{ 683{
671 spin_lock(&gl->gl_spin); 684 spin_lock(&gl->gl_spin);
672 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) { 685 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
673 gl->gl_demote_state = state; 686 gl->gl_demote_state = state;
674 gl->gl_demote_time = jiffies; 687 gl->gl_demote_time = jiffies;
688 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
689 gl->gl_object) {
690 struct inode *inode = igrab(gl->gl_object);
691 spin_unlock(&gl->gl_spin);
692 if (inode) {
693 d_prune_aliases(inode);
694 iput(inode);
695 }
696 return;
697 }
675 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) { 698 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
676 gl->gl_demote_state = state; 699 gl->gl_demote_state = state;
677 } 700 }
@@ -740,7 +763,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
740 if (ret & LM_OUT_CANCELED) 763 if (ret & LM_OUT_CANCELED)
741 op_done = 0; 764 op_done = 0;
742 else 765 else
743 clear_bit(GLF_DEMOTE, &gl->gl_flags); 766 gfs2_demote_wake(gl);
744 } else { 767 } else {
745 spin_lock(&gl->gl_spin); 768 spin_lock(&gl->gl_spin);
746 list_del_init(&gh->gh_list); 769 list_del_init(&gh->gh_list);
@@ -848,7 +871,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
848 gfs2_assert_warn(sdp, !ret); 871 gfs2_assert_warn(sdp, !ret);
849 872
850 state_change(gl, LM_ST_UNLOCKED); 873 state_change(gl, LM_ST_UNLOCKED);
851 clear_bit(GLF_DEMOTE, &gl->gl_flags); 874 gfs2_demote_wake(gl);
852 875
853 if (glops->go_inval) 876 if (glops->go_inval)
854 glops->go_inval(gl, DIO_METADATA); 877 glops->go_inval(gl, DIO_METADATA);
@@ -1174,7 +1197,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1174 const struct gfs2_glock_operations *glops = gl->gl_ops; 1197 const struct gfs2_glock_operations *glops = gl->gl_ops;
1175 1198
1176 if (gh->gh_flags & GL_NOCACHE) 1199 if (gh->gh_flags & GL_NOCACHE)
1177 handle_callback(gl, LM_ST_UNLOCKED); 1200 handle_callback(gl, LM_ST_UNLOCKED, 0);
1178 1201
1179 gfs2_glmutex_lock(gl); 1202 gfs2_glmutex_lock(gl);
1180 1203
@@ -1196,6 +1219,13 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1196 spin_unlock(&gl->gl_spin); 1219 spin_unlock(&gl->gl_spin);
1197} 1220}
1198 1221
1222void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1223{
1224 struct gfs2_glock *gl = gh->gh_gl;
1225 gfs2_glock_dq(gh);
1226 wait_on_demote(gl);
1227}
1228
1199/** 1229/**
1200 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1230 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1201 * @gh: the holder structure 1231 * @gh: the holder structure
@@ -1456,7 +1486,7 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1456 if (!gl) 1486 if (!gl)
1457 return; 1487 return;
1458 1488
1459 handle_callback(gl, state); 1489 handle_callback(gl, state, 1);
1460 1490
1461 spin_lock(&gl->gl_spin); 1491 spin_lock(&gl->gl_spin);
1462 run_queue(gl); 1492 run_queue(gl);
@@ -1596,7 +1626,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1596 if (gfs2_glmutex_trylock(gl)) { 1626 if (gfs2_glmutex_trylock(gl)) {
1597 if (list_empty(&gl->gl_holders) && 1627 if (list_empty(&gl->gl_holders) &&
1598 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1628 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1599 handle_callback(gl, LM_ST_UNLOCKED); 1629 handle_callback(gl, LM_ST_UNLOCKED, 0);
1600 gfs2_glmutex_unlock(gl); 1630 gfs2_glmutex_unlock(gl);
1601 } 1631 }
1602 1632
@@ -1709,7 +1739,7 @@ static void clear_glock(struct gfs2_glock *gl)
1709 if (gfs2_glmutex_trylock(gl)) { 1739 if (gfs2_glmutex_trylock(gl)) {
1710 if (list_empty(&gl->gl_holders) && 1740 if (list_empty(&gl->gl_holders) &&
1711 gl->gl_state != LM_ST_UNLOCKED) 1741 gl->gl_state != LM_ST_UNLOCKED)
1712 handle_callback(gl, LM_ST_UNLOCKED); 1742 handle_callback(gl, LM_ST_UNLOCKED, 0);
1713 gfs2_glmutex_unlock(gl); 1743 gfs2_glmutex_unlock(gl);
1714 } 1744 }
1715} 1745}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index b3e152db70c8..7721ca3fff9e 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -87,6 +87,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh);
87int gfs2_glock_poll(struct gfs2_holder *gh); 87int gfs2_glock_poll(struct gfs2_holder *gh);
88int gfs2_glock_wait(struct gfs2_holder *gh); 88int gfs2_glock_wait(struct gfs2_holder *gh);
89void gfs2_glock_dq(struct gfs2_holder *gh); 89void gfs2_glock_dq(struct gfs2_holder *gh);
90void gfs2_glock_dq_wait(struct gfs2_holder *gh);
90 91
91void gfs2_glock_dq_uninit(struct gfs2_holder *gh); 92void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
92int gfs2_glock_nq_num(struct gfs2_sbd *sdp, 93int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 366235d6a5b5..792d64f69cc2 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -114,6 +114,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, u64 no_addr, unsigned in
114 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); 114 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
115 if (unlikely(error)) 115 if (unlikely(error))
116 goto fail_iopen; 116 goto fail_iopen;
117 ip->i_iopen_gh.gh_gl->gl_object = ip;
117 118
118 gfs2_glock_put(io_gl); 119 gfs2_glock_put(io_gl);
119 120
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 485ce3d49923..603d940f1159 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -326,8 +326,10 @@ static void gfs2_clear_inode(struct inode *inode)
326 gfs2_glock_schedule_for_reclaim(ip->i_gl); 326 gfs2_glock_schedule_for_reclaim(ip->i_gl);
327 gfs2_glock_put(ip->i_gl); 327 gfs2_glock_put(ip->i_gl);
328 ip->i_gl = NULL; 328 ip->i_gl = NULL;
329 if (ip->i_iopen_gh.gh_gl) 329 if (ip->i_iopen_gh.gh_gl) {
330 ip->i_iopen_gh.gh_gl->gl_object = NULL;
330 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 331 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
332 }
331 } 333 }
332} 334}
333 335
@@ -422,13 +424,13 @@ static void gfs2_delete_inode(struct inode *inode)
422 if (!inode->i_private) 424 if (!inode->i_private)
423 goto out; 425 goto out;
424 426
425 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &gh); 427 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
426 if (unlikely(error)) { 428 if (unlikely(error)) {
427 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 429 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
428 goto out; 430 goto out;
429 } 431 }
430 432
431 gfs2_glock_dq(&ip->i_iopen_gh); 433 gfs2_glock_dq_wait(&ip->i_iopen_gh);
432 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); 434 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
433 error = gfs2_glock_nq(&ip->i_iopen_gh); 435 error = gfs2_glock_nq(&ip->i_iopen_gh);
434 if (error) 436 if (error)