aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c43
1 files changed, 38 insertions, 5 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f041a89e1ab8..8b674b1f3a55 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
63static DECLARE_RWSEM(gfs2_umount_flush_sem); 63static DECLARE_RWSEM(gfs2_umount_flush_sem);
64static struct dentry *gfs2_root; 64static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue; 65static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue;
66static LIST_HEAD(lru_list); 67static LIST_HEAD(lru_list);
67static atomic_t lru_count = ATOMIC_INIT(0); 68static atomic_t lru_count = ATOMIC_INIT(0);
68static DEFINE_SPINLOCK(lru_lock); 69static DEFINE_SPINLOCK(lru_lock);
@@ -167,7 +168,7 @@ static void glock_free(struct gfs2_glock *gl)
167 * 168 *
168 */ 169 */
169 170
170static void gfs2_glock_hold(struct gfs2_glock *gl) 171void gfs2_glock_hold(struct gfs2_glock *gl)
171{ 172{
172 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 173 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
173 atomic_inc(&gl->gl_ref); 174 atomic_inc(&gl->gl_ref);
@@ -222,7 +223,7 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
222 * to the glock, in addition to the one it is dropping. 223 * to the glock, in addition to the one it is dropping.
223 */ 224 */
224 225
225static void gfs2_glock_put_nolock(struct gfs2_glock *gl) 226void gfs2_glock_put_nolock(struct gfs2_glock *gl)
226{ 227{
227 if (atomic_dec_and_test(&gl->gl_ref)) 228 if (atomic_dec_and_test(&gl->gl_ref))
228 GLOCK_BUG_ON(gl, 1); 229 GLOCK_BUG_ON(gl, 1);
@@ -679,6 +680,29 @@ out_unlock:
679 goto out; 680 goto out;
680} 681}
681 682
683static void delete_work_func(struct work_struct *work)
684{
685 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686 struct gfs2_sbd *sdp = gl->gl_sbd;
687 struct gfs2_inode *ip = NULL;
688 struct inode *inode;
689 u64 no_addr = 0;
690
691 spin_lock(&gl->gl_spin);
692 ip = (struct gfs2_inode *)gl->gl_object;
693 if (ip)
694 no_addr = ip->i_no_addr;
695 spin_unlock(&gl->gl_spin);
696 if (ip) {
697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
698 if (inode) {
699 d_prune_aliases(inode);
700 iput(inode);
701 }
702 }
703 gfs2_glock_put(gl);
704}
705
682static void glock_work_func(struct work_struct *work) 706static void glock_work_func(struct work_struct *work)
683{ 707{
684 unsigned long delay = 0; 708 unsigned long delay = 0;
@@ -757,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
757 gl->gl_sbd = sdp; 781 gl->gl_sbd = sdp;
758 gl->gl_aspace = NULL; 782 gl->gl_aspace = NULL;
759 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 783 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
784 INIT_WORK(&gl->gl_delete, delete_work_func);
760 785
761 /* If this glock protects actual on-disk data or metadata blocks, 786 /* If this glock protects actual on-disk data or metadata blocks,
762 create a VFS inode to manage the pages/buffers holding them. */ 787 create a VFS inode to manage the pages/buffers holding them. */
@@ -898,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
898 gl->gl_demote_state != state) { 923 gl->gl_demote_state != state) {
899 gl->gl_demote_state = LM_ST_UNLOCKED; 924 gl->gl_demote_state = LM_ST_UNLOCKED;
900 } 925 }
926 if (gl->gl_ops->go_callback)
927 gl->gl_ops->go_callback(gl);
901 trace_gfs2_demote_rq(gl); 928 trace_gfs2_demote_rq(gl);
902} 929}
903 930
@@ -1344,14 +1371,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1344 spin_unlock(&lru_lock); 1371 spin_unlock(&lru_lock);
1345 spin_lock(&gl->gl_spin); 1372 spin_lock(&gl->gl_spin);
1346 may_demote = demote_ok(gl); 1373 may_demote = demote_ok(gl);
1347 spin_unlock(&gl->gl_spin);
1348 clear_bit(GLF_LOCK, &gl->gl_flags);
1349 if (may_demote) { 1374 if (may_demote) {
1350 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1351 nr--; 1376 nr--;
1352 } 1377 }
1353 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1354 gfs2_glock_put(gl); 1379 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1355 spin_lock(&lru_lock); 1382 spin_lock(&lru_lock);
1356 continue; 1383 continue;
1357 } 1384 }
@@ -1738,6 +1765,11 @@ int __init gfs2_glock_init(void)
1738 glock_workqueue = create_workqueue("glock_workqueue"); 1765 glock_workqueue = create_workqueue("glock_workqueue");
1739 if (IS_ERR(glock_workqueue)) 1766 if (IS_ERR(glock_workqueue))
1740 return PTR_ERR(glock_workqueue); 1767 return PTR_ERR(glock_workqueue);
1768 gfs2_delete_workqueue = create_workqueue("delete_workqueue");
1769 if (IS_ERR(gfs2_delete_workqueue)) {
1770 destroy_workqueue(glock_workqueue);
1771 return PTR_ERR(gfs2_delete_workqueue);
1772 }
1741 1773
1742 register_shrinker(&glock_shrinker); 1774 register_shrinker(&glock_shrinker);
1743 1775
@@ -1748,6 +1780,7 @@ void gfs2_glock_exit(void)
1748{ 1780{
1749 unregister_shrinker(&glock_shrinker); 1781 unregister_shrinker(&glock_shrinker);
1750 destroy_workqueue(glock_workqueue); 1782 destroy_workqueue(glock_workqueue);
1783 destroy_workqueue(gfs2_delete_workqueue);
1751} 1784}
1752 1785
1753static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1786static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)