aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorBenjamin Marzinski <bmarzins@redhat.com>2009-07-23 19:52:34 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2009-07-30 06:01:03 -0400
commitb94a170e96dc416828af9d350ae2e34b70ae7347 (patch)
tree6000929d554359c7b520a49a63415b9fc18b48b9 /fs
parent6b94617024bd6810cde1d0d491202c30d5a38d91 (diff)
GFS2: remove dcache entries for remote deleted inodes
When a file is deleted from a gfs2 filesystem on one node, a dcache entry for it may still exist on other nodes in the cluster. If this happens, gfs2 will be unable to free this file on disk. Because of this, it's possible to have a gfs2 filesystem with no files on it and no free space. With this patch, when a node receives a callback notifying it that the file is being deleted on another node, it schedules a new workqueue thread to remove the file's dcache entry. Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c43
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c21
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/super.c1
5 files changed, 65 insertions, 5 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f041a89e1ab8..8b674b1f3a55 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
63static DECLARE_RWSEM(gfs2_umount_flush_sem); 63static DECLARE_RWSEM(gfs2_umount_flush_sem);
64static struct dentry *gfs2_root; 64static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue; 65static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue;
66static LIST_HEAD(lru_list); 67static LIST_HEAD(lru_list);
67static atomic_t lru_count = ATOMIC_INIT(0); 68static atomic_t lru_count = ATOMIC_INIT(0);
68static DEFINE_SPINLOCK(lru_lock); 69static DEFINE_SPINLOCK(lru_lock);
@@ -167,7 +168,7 @@ static void glock_free(struct gfs2_glock *gl)
167 * 168 *
168 */ 169 */
169 170
170static void gfs2_glock_hold(struct gfs2_glock *gl) 171void gfs2_glock_hold(struct gfs2_glock *gl)
171{ 172{
172 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 173 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
173 atomic_inc(&gl->gl_ref); 174 atomic_inc(&gl->gl_ref);
@@ -222,7 +223,7 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
222 * to the glock, in addition to the one it is dropping. 223 * to the glock, in addition to the one it is dropping.
223 */ 224 */
224 225
225static void gfs2_glock_put_nolock(struct gfs2_glock *gl) 226void gfs2_glock_put_nolock(struct gfs2_glock *gl)
226{ 227{
227 if (atomic_dec_and_test(&gl->gl_ref)) 228 if (atomic_dec_and_test(&gl->gl_ref))
228 GLOCK_BUG_ON(gl, 1); 229 GLOCK_BUG_ON(gl, 1);
@@ -679,6 +680,29 @@ out_unlock:
679 goto out; 680 goto out;
680} 681}
681 682
683static void delete_work_func(struct work_struct *work)
684{
685 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686 struct gfs2_sbd *sdp = gl->gl_sbd;
687 struct gfs2_inode *ip = NULL;
688 struct inode *inode;
689 u64 no_addr = 0;
690
691 spin_lock(&gl->gl_spin);
692 ip = (struct gfs2_inode *)gl->gl_object;
693 if (ip)
694 no_addr = ip->i_no_addr;
695 spin_unlock(&gl->gl_spin);
696 if (ip) {
697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
698 if (inode) {
699 d_prune_aliases(inode);
700 iput(inode);
701 }
702 }
703 gfs2_glock_put(gl);
704}
705
682static void glock_work_func(struct work_struct *work) 706static void glock_work_func(struct work_struct *work)
683{ 707{
684 unsigned long delay = 0; 708 unsigned long delay = 0;
@@ -757,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
757 gl->gl_sbd = sdp; 781 gl->gl_sbd = sdp;
758 gl->gl_aspace = NULL; 782 gl->gl_aspace = NULL;
759 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 783 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
784 INIT_WORK(&gl->gl_delete, delete_work_func);
760 785
761 /* If this glock protects actual on-disk data or metadata blocks, 786 /* If this glock protects actual on-disk data or metadata blocks,
762 create a VFS inode to manage the pages/buffers holding them. */ 787 create a VFS inode to manage the pages/buffers holding them. */
@@ -898,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
898 gl->gl_demote_state != state) { 923 gl->gl_demote_state != state) {
899 gl->gl_demote_state = LM_ST_UNLOCKED; 924 gl->gl_demote_state = LM_ST_UNLOCKED;
900 } 925 }
926 if (gl->gl_ops->go_callback)
927 gl->gl_ops->go_callback(gl);
901 trace_gfs2_demote_rq(gl); 928 trace_gfs2_demote_rq(gl);
902} 929}
903 930
@@ -1344,14 +1371,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1344 spin_unlock(&lru_lock); 1371 spin_unlock(&lru_lock);
1345 spin_lock(&gl->gl_spin); 1372 spin_lock(&gl->gl_spin);
1346 may_demote = demote_ok(gl); 1373 may_demote = demote_ok(gl);
1347 spin_unlock(&gl->gl_spin);
1348 clear_bit(GLF_LOCK, &gl->gl_flags);
1349 if (may_demote) { 1374 if (may_demote) {
1350 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1351 nr--; 1376 nr--;
1352 } 1377 }
1353 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1354 gfs2_glock_put(gl); 1379 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1355 spin_lock(&lru_lock); 1382 spin_lock(&lru_lock);
1356 continue; 1383 continue;
1357 } 1384 }
@@ -1738,6 +1765,11 @@ int __init gfs2_glock_init(void)
1738 glock_workqueue = create_workqueue("glock_workqueue"); 1765 glock_workqueue = create_workqueue("glock_workqueue");
1739 if (IS_ERR(glock_workqueue)) 1766 if (IS_ERR(glock_workqueue))
1740 return PTR_ERR(glock_workqueue); 1767 return PTR_ERR(glock_workqueue);
1768 gfs2_delete_workqueue = create_workqueue("delete_workqueue");
1769 if (IS_ERR(gfs2_delete_workqueue)) {
1770 destroy_workqueue(glock_workqueue);
1771 return PTR_ERR(gfs2_delete_workqueue);
1772 }
1741 1773
1742 register_shrinker(&glock_shrinker); 1774 register_shrinker(&glock_shrinker);
1743 1775
@@ -1748,6 +1780,7 @@ void gfs2_glock_exit(void)
1748{ 1780{
1749 unregister_shrinker(&glock_shrinker); 1781 unregister_shrinker(&glock_shrinker);
1750 destroy_workqueue(glock_workqueue); 1782 destroy_workqueue(glock_workqueue);
1783 destroy_workqueue(gfs2_delete_workqueue);
1751} 1784}
1752 1785
1753static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1786static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index a602a28f6f08..c609894ec0d0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -143,6 +143,7 @@ struct lm_lockops {
143 143
144#define GLR_TRYFAILED 13 144#define GLR_TRYFAILED 13
145 145
146extern struct workqueue_struct *gfs2_delete_workqueue;
146static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 147static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
147{ 148{
148 struct gfs2_holder *gh; 149 struct gfs2_holder *gh;
@@ -191,6 +192,8 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
191int gfs2_glock_get(struct gfs2_sbd *sdp, 192int gfs2_glock_get(struct gfs2_sbd *sdp,
192 u64 number, const struct gfs2_glock_operations *glops, 193 u64 number, const struct gfs2_glock_operations *glops,
193 int create, struct gfs2_glock **glp); 194 int create, struct gfs2_glock **glp);
195void gfs2_glock_hold(struct gfs2_glock *gl);
196void gfs2_glock_put_nolock(struct gfs2_glock *gl);
194int gfs2_glock_put(struct gfs2_glock *gl); 197int gfs2_glock_put(struct gfs2_glock *gl);
195void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 198void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
196 struct gfs2_holder *gh); 199 struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d5e4ab155ca0..6985eef06c39 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -323,6 +323,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
323 323
324 if (gl->gl_state != LM_ST_UNLOCKED && 324 if (gl->gl_state != LM_ST_UNLOCKED &&
325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
326 flush_workqueue(gfs2_delete_workqueue);
326 gfs2_meta_syncfs(sdp); 327 gfs2_meta_syncfs(sdp);
327 gfs2_log_shutdown(sdp); 328 gfs2_log_shutdown(sdp);
328 } 329 }
@@ -372,6 +373,25 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
372 return 0; 373 return 0;
373} 374}
374 375
376/**
377 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
378 * @gl: the glock
379 *
380 * gl_spin lock is held while calling this
381 */
382static void iopen_go_callback(struct gfs2_glock *gl)
383{
384 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
385
386 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
387 gl->gl_state == LM_ST_SHARED &&
388 ip && test_bit(GIF_USER, &ip->i_flags)) {
389 gfs2_glock_hold(gl);
390 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
391 gfs2_glock_put_nolock(gl);
392 }
393}
394
375const struct gfs2_glock_operations gfs2_meta_glops = { 395const struct gfs2_glock_operations gfs2_meta_glops = {
376 .go_type = LM_TYPE_META, 396 .go_type = LM_TYPE_META,
377}; 397};
@@ -406,6 +426,7 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
406 426
407const struct gfs2_glock_operations gfs2_iopen_glops = { 427const struct gfs2_glock_operations gfs2_iopen_glops = {
408 .go_type = LM_TYPE_IOPEN, 428 .go_type = LM_TYPE_IOPEN,
429 .go_callback = iopen_go_callback,
409}; 430};
410 431
411const struct gfs2_glock_operations gfs2_flock_glops = { 432const struct gfs2_glock_operations gfs2_flock_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 225347fbff3c..61801ada36f0 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -159,6 +159,7 @@ struct gfs2_glock_operations {
159 int (*go_lock) (struct gfs2_holder *gh); 159 int (*go_lock) (struct gfs2_holder *gh);
160 void (*go_unlock) (struct gfs2_holder *gh); 160 void (*go_unlock) (struct gfs2_holder *gh);
161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); 161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
162 void (*go_callback) (struct gfs2_glock *gl);
162 const int go_type; 163 const int go_type;
163 const unsigned long go_min_hold_time; 164 const unsigned long go_min_hold_time;
164}; 165};
@@ -228,6 +229,7 @@ struct gfs2_glock {
228 struct list_head gl_ail_list; 229 struct list_head gl_ail_list;
229 atomic_t gl_ail_count; 230 atomic_t gl_ail_count;
230 struct delayed_work gl_work; 231 struct delayed_work gl_work;
232 struct work_struct gl_delete;
231}; 233};
232 234
233#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ 235#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 552e321cee5e..f522bb017973 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -691,6 +691,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
691 struct gfs2_holder t_gh; 691 struct gfs2_holder t_gh;
692 int error; 692 int error;
693 693
694 flush_workqueue(gfs2_delete_workqueue);
694 gfs2_quota_sync(sdp); 695 gfs2_quota_sync(sdp);
695 gfs2_statfs_sync(sdp); 696 gfs2_statfs_sync(sdp);
696 697