aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2010-01-08 11:14:29 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2010-03-01 09:07:53 -0500
commitc1184f8ab7ea26681f3cab18284a870aad678b0f (patch)
treefec6b61ba9b1aa16aab0d1dfabf63a21bbe47ac7 /fs/gfs2
parent009d851837ab26cab18adda6169a813f70b0b21b (diff)
GFS2: Remove loopy umount code
As a consequence of the previous patch, we can now remove the loop which used to be required due to the circular dependency between the inodes and glocks. Instead we can just invalidate the inodes, and then clear up any glocks which are left. Also we no longer need the rwsem since there is no longer any danger of the inode invalidation calling back into the glock code (and from there back into the inode code). Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c33
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/sys.c2
5 files changed, 4 insertions, 37 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index dfb10a4d467e..4773f9098a41 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/wait.h> 20#include <linux/wait.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/rwsem.h>
23#include <asm/uaccess.h> 22#include <asm/uaccess.h>
24#include <linux/seq_file.h> 23#include <linux/seq_file.h>
25#include <linux/debugfs.h> 24#include <linux/debugfs.h>
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) 59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62 61
63static DECLARE_RWSEM(gfs2_umount_flush_sem);
64static struct dentry *gfs2_root; 62static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue; 63static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue; 64struct workqueue_struct *gfs2_delete_workqueue;
@@ -714,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
714 finish_xmote(gl, gl->gl_reply); 712 finish_xmote(gl, gl->gl_reply);
715 drop_ref = 1; 713 drop_ref = 1;
716 } 714 }
717 down_read(&gfs2_umount_flush_sem);
718 spin_lock(&gl->gl_spin); 715 spin_lock(&gl->gl_spin);
719 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 716 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
720 gl->gl_state != LM_ST_UNLOCKED && 717 gl->gl_state != LM_ST_UNLOCKED &&
@@ -727,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
727 } 724 }
728 run_queue(gl, 0); 725 run_queue(gl, 0);
729 spin_unlock(&gl->gl_spin); 726 spin_unlock(&gl->gl_spin);
730 up_read(&gfs2_umount_flush_sem);
731 if (!delay || 727 if (!delay ||
732 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 728 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
733 gfs2_glock_put(gl); 729 gfs2_glock_put(gl);
@@ -1512,35 +1508,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1512 1508
1513void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1509void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1514{ 1510{
1515 unsigned long t;
1516 unsigned int x; 1511 unsigned int x;
1517 int cont;
1518 1512
1519 t = jiffies; 1513 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1520 1514 examine_bucket(clear_glock, sdp, x);
1521 for (;;) {
1522 cont = 0;
1523 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1524 if (examine_bucket(clear_glock, sdp, x))
1525 cont = 1;
1526 }
1527
1528 if (!cont)
1529 break;
1530
1531 if (time_after_eq(jiffies,
1532 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1533 fs_warn(sdp, "Unmount seems to be stalled. "
1534 "Dumping lock state...\n");
1535 gfs2_dump_lockstate(sdp);
1536 t = jiffies;
1537 }
1538
1539 down_write(&gfs2_umount_flush_sem);
1540 invalidate_inodes(sdp->sd_vfs);
1541 up_write(&gfs2_umount_flush_sem);
1542 msleep(10);
1543 }
1544 flush_workqueue(glock_workqueue); 1515 flush_workqueue(glock_workqueue);
1545 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); 1516 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1546 gfs2_dump_lockstate(sdp); 1517 gfs2_dump_lockstate(sdp);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 1de7e1b7ce83..b8025e51cabf 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -451,7 +451,6 @@ struct gfs2_tune {
451 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ 451 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
452 unsigned int gt_new_files_jdata; 452 unsigned int gt_new_files_jdata;
453 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 453 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
454 unsigned int gt_stall_secs; /* Detects trouble! */
455 unsigned int gt_complain_secs; 454 unsigned int gt_complain_secs;
456 unsigned int gt_statfs_quantum; 455 unsigned int gt_statfs_quantum;
457 unsigned int gt_statfs_slow; 456 unsigned int gt_statfs_slow;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a86ed6381566..a054b526dc08 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -65,7 +65,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
65 gt->gt_quota_scale_den = 1; 65 gt->gt_quota_scale_den = 1;
66 gt->gt_new_files_jdata = 0; 66 gt->gt_new_files_jdata = 0;
67 gt->gt_max_readahead = 1 << 18; 67 gt->gt_max_readahead = 1 << 18;
68 gt->gt_stall_secs = 600;
69 gt->gt_complain_secs = 10; 68 gt->gt_complain_secs = 10;
70} 69}
71 70
@@ -1241,10 +1240,9 @@ fail_sb:
1241fail_locking: 1240fail_locking:
1242 init_locking(sdp, &mount_gh, UNDO); 1241 init_locking(sdp, &mount_gh, UNDO);
1243fail_lm: 1242fail_lm:
1243 invalidate_inodes(sb);
1244 gfs2_gl_hash_clear(sdp); 1244 gfs2_gl_hash_clear(sdp);
1245 gfs2_lm_unmount(sdp); 1245 gfs2_lm_unmount(sdp);
1246 while (invalidate_inodes(sb))
1247 yield();
1248fail_sys: 1246fail_sys:
1249 gfs2_sys_fs_del(sdp); 1247 gfs2_sys_fs_del(sdp);
1250fail: 1248fail:
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ad7bc2d25ac2..e5e22629da67 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -859,6 +859,7 @@ restart:
859 gfs2_clear_rgrpd(sdp); 859 gfs2_clear_rgrpd(sdp);
860 gfs2_jindex_free(sdp); 860 gfs2_jindex_free(sdp);
861 /* Take apart glock structures and buffer lists */ 861 /* Take apart glock structures and buffer lists */
862 invalidate_inodes(sdp->sd_vfs);
862 gfs2_gl_hash_clear(sdp); 863 gfs2_gl_hash_clear(sdp);
863 /* Unmount the locking protocol */ 864 /* Unmount the locking protocol */
864 gfs2_lm_unmount(sdp); 865 gfs2_lm_unmount(sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0dc34621f6a6..a0db1c94317d 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -478,7 +478,6 @@ TUNE_ATTR(complain_secs, 0);
478TUNE_ATTR(statfs_slow, 0); 478TUNE_ATTR(statfs_slow, 0);
479TUNE_ATTR(new_files_jdata, 0); 479TUNE_ATTR(new_files_jdata, 0);
480TUNE_ATTR(quota_simul_sync, 1); 480TUNE_ATTR(quota_simul_sync, 1);
481TUNE_ATTR(stall_secs, 1);
482TUNE_ATTR(statfs_quantum, 1); 481TUNE_ATTR(statfs_quantum, 1);
483TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 482TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
484 483
@@ -491,7 +490,6 @@ static struct attribute *tune_attrs[] = {
491 &tune_attr_complain_secs.attr, 490 &tune_attr_complain_secs.attr,
492 &tune_attr_statfs_slow.attr, 491 &tune_attr_statfs_slow.attr,
493 &tune_attr_quota_simul_sync.attr, 492 &tune_attr_quota_simul_sync.attr,
494 &tune_attr_stall_secs.attr,
495 &tune_attr_statfs_quantum.attr, 493 &tune_attr_statfs_quantum.attr,
496 &tune_attr_quota_scale.attr, 494 &tune_attr_quota_scale.attr,
497 &tune_attr_new_files_jdata.attr, 495 &tune_attr_new_files_jdata.attr,