diff options
author | Benjamin Marzinski <bmarzins@redhat.com> | 2009-07-23 19:52:34 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2009-07-30 06:01:03 -0400 |
commit | b94a170e96dc416828af9d350ae2e34b70ae7347 (patch) | |
tree | 6000929d554359c7b520a49a63415b9fc18b48b9 /fs/gfs2/glock.c | |
parent | 6b94617024bd6810cde1d0d491202c30d5a38d91 (diff) |
GFS2: remove dcache entries for remote deleted inodes
When a file is deleted from a gfs2 filesystem on one node, a dcache
entry for it may still exist on other nodes in the cluster. If this
happens, gfs2 will be unable to free this file on disk. Because of this,
it's possible to have a gfs2 filesystem with no files on it and no free
space. With this patch, when a node receives a callback notifying it
that the file is being deleted on another node, it schedules a new
workqueue thread to remove the file's dcache entry.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 43 |
1 files changed, 38 insertions, 5 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f041a89e1ab8..8b674b1f3a55 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int | |||
63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
64 | static struct dentry *gfs2_root; | 64 | static struct dentry *gfs2_root; |
65 | static struct workqueue_struct *glock_workqueue; | 65 | static struct workqueue_struct *glock_workqueue; |
66 | struct workqueue_struct *gfs2_delete_workqueue; | ||
66 | static LIST_HEAD(lru_list); | 67 | static LIST_HEAD(lru_list); |
67 | static atomic_t lru_count = ATOMIC_INIT(0); | 68 | static atomic_t lru_count = ATOMIC_INIT(0); |
68 | static DEFINE_SPINLOCK(lru_lock); | 69 | static DEFINE_SPINLOCK(lru_lock); |
@@ -167,7 +168,7 @@ static void glock_free(struct gfs2_glock *gl) | |||
167 | * | 168 | * |
168 | */ | 169 | */ |
169 | 170 | ||
170 | static void gfs2_glock_hold(struct gfs2_glock *gl) | 171 | void gfs2_glock_hold(struct gfs2_glock *gl) |
171 | { | 172 | { |
172 | GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); | 173 | GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); |
173 | atomic_inc(&gl->gl_ref); | 174 | atomic_inc(&gl->gl_ref); |
@@ -222,7 +223,7 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | |||
222 | * to the glock, in addition to the one it is dropping. | 223 | * to the glock, in addition to the one it is dropping. |
223 | */ | 224 | */ |
224 | 225 | ||
225 | static void gfs2_glock_put_nolock(struct gfs2_glock *gl) | 226 | void gfs2_glock_put_nolock(struct gfs2_glock *gl) |
226 | { | 227 | { |
227 | if (atomic_dec_and_test(&gl->gl_ref)) | 228 | if (atomic_dec_and_test(&gl->gl_ref)) |
228 | GLOCK_BUG_ON(gl, 1); | 229 | GLOCK_BUG_ON(gl, 1); |
@@ -679,6 +680,29 @@ out_unlock: | |||
679 | goto out; | 680 | goto out; |
680 | } | 681 | } |
681 | 682 | ||
683 | static void delete_work_func(struct work_struct *work) | ||
684 | { | ||
685 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); | ||
686 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
687 | struct gfs2_inode *ip = NULL; | ||
688 | struct inode *inode; | ||
689 | u64 no_addr = 0; | ||
690 | |||
691 | spin_lock(&gl->gl_spin); | ||
692 | ip = (struct gfs2_inode *)gl->gl_object; | ||
693 | if (ip) | ||
694 | no_addr = ip->i_no_addr; | ||
695 | spin_unlock(&gl->gl_spin); | ||
696 | if (ip) { | ||
697 | inode = gfs2_ilookup(sdp->sd_vfs, no_addr); | ||
698 | if (inode) { | ||
699 | d_prune_aliases(inode); | ||
700 | iput(inode); | ||
701 | } | ||
702 | } | ||
703 | gfs2_glock_put(gl); | ||
704 | } | ||
705 | |||
682 | static void glock_work_func(struct work_struct *work) | 706 | static void glock_work_func(struct work_struct *work) |
683 | { | 707 | { |
684 | unsigned long delay = 0; | 708 | unsigned long delay = 0; |
@@ -757,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
757 | gl->gl_sbd = sdp; | 781 | gl->gl_sbd = sdp; |
758 | gl->gl_aspace = NULL; | 782 | gl->gl_aspace = NULL; |
759 | INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); | 783 | INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); |
784 | INIT_WORK(&gl->gl_delete, delete_work_func); | ||
760 | 785 | ||
761 | /* If this glock protects actual on-disk data or metadata blocks, | 786 | /* If this glock protects actual on-disk data or metadata blocks, |
762 | create a VFS inode to manage the pages/buffers holding them. */ | 787 | create a VFS inode to manage the pages/buffers holding them. */ |
@@ -898,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, | |||
898 | gl->gl_demote_state != state) { | 923 | gl->gl_demote_state != state) { |
899 | gl->gl_demote_state = LM_ST_UNLOCKED; | 924 | gl->gl_demote_state = LM_ST_UNLOCKED; |
900 | } | 925 | } |
926 | if (gl->gl_ops->go_callback) | ||
927 | gl->gl_ops->go_callback(gl); | ||
901 | trace_gfs2_demote_rq(gl); | 928 | trace_gfs2_demote_rq(gl); |
902 | } | 929 | } |
903 | 930 | ||
@@ -1344,14 +1371,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1344 | spin_unlock(&lru_lock); | 1371 | spin_unlock(&lru_lock); |
1345 | spin_lock(&gl->gl_spin); | 1372 | spin_lock(&gl->gl_spin); |
1346 | may_demote = demote_ok(gl); | 1373 | may_demote = demote_ok(gl); |
1347 | spin_unlock(&gl->gl_spin); | ||
1348 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1349 | if (may_demote) { | 1374 | if (may_demote) { |
1350 | handle_callback(gl, LM_ST_UNLOCKED, 0); | 1375 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1351 | nr--; | 1376 | nr--; |
1352 | } | 1377 | } |
1353 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1378 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1354 | gfs2_glock_put(gl); | 1379 | gfs2_glock_put_nolock(gl); |
1380 | spin_unlock(&gl->gl_spin); | ||
1381 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1355 | spin_lock(&lru_lock); | 1382 | spin_lock(&lru_lock); |
1356 | continue; | 1383 | continue; |
1357 | } | 1384 | } |
@@ -1738,6 +1765,11 @@ int __init gfs2_glock_init(void) | |||
1738 | glock_workqueue = create_workqueue("glock_workqueue"); | 1765 | glock_workqueue = create_workqueue("glock_workqueue"); |
1739 | if (IS_ERR(glock_workqueue)) | 1766 | if (IS_ERR(glock_workqueue)) |
1740 | return PTR_ERR(glock_workqueue); | 1767 | return PTR_ERR(glock_workqueue); |
1768 | gfs2_delete_workqueue = create_workqueue("delete_workqueue"); | ||
1769 | if (IS_ERR(gfs2_delete_workqueue)) { | ||
1770 | destroy_workqueue(glock_workqueue); | ||
1771 | return PTR_ERR(gfs2_delete_workqueue); | ||
1772 | } | ||
1741 | 1773 | ||
1742 | register_shrinker(&glock_shrinker); | 1774 | register_shrinker(&glock_shrinker); |
1743 | 1775 | ||
@@ -1748,6 +1780,7 @@ void gfs2_glock_exit(void) | |||
1748 | { | 1780 | { |
1749 | unregister_shrinker(&glock_shrinker); | 1781 | unregister_shrinker(&glock_shrinker); |
1750 | destroy_workqueue(glock_workqueue); | 1782 | destroy_workqueue(glock_workqueue); |
1783 | destroy_workqueue(gfs2_delete_workqueue); | ||
1751 | } | 1784 | } |
1752 | 1785 | ||
1753 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) | 1786 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |