diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2008-11-20 08:39:47 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2009-01-05 02:39:09 -0500 |
commit | 97cc1025b1a91c52e84f12478dcf0f853abc6564 (patch) | |
tree | cd71419049aeb13eea7012889d0ee0c715394e4d | |
parent | 9ac1b4d9b6f885ccd7d8f56bceb609003a920ff7 (diff) |
GFS2: Kill two daemons with one patch
This patch removes the two daemons, gfs2_scand and gfs2_glockd
and replaces them with a shrinker which is called from the VM.
The net result is that GFS2 responds better when there is memory
pressure, since it shrinks the glock cache at the same rate
as the VFS shrinks the dcache and icache. There are no longer
any time based criteria for shrinking glocks, they are kept
until such time as the VM asks for more memory and then we
demote just as many glocks as required.
There are potential future changes to this code, including the
possibility of sorting the glocks which are to be written back
into inode number order, to get a better I/O ordering. It would
be very useful to have an elevator based workqueue implementation
for this, as that would automatically deal with the read I/O cases
at the same time.
This patch is my answer to Andrew Morton's remark, made during
the initial review of GFS2, asking why GFS2 needs so many kernel
threads, the answer being that it doesn't :-) This patch is a
net loss of about 200 lines of code.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r-- | fs/gfs2/Makefile | 2 | ||||
-rw-r--r-- | fs/gfs2/daemon.c | 61 | ||||
-rw-r--r-- | fs/gfs2/daemon.h | 15 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 248 | ||||
-rw-r--r-- | fs/gfs2/glock.h | 1 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 32 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 16 | ||||
-rw-r--r-- | fs/gfs2/inode.c | 1 | ||||
-rw-r--r-- | fs/gfs2/main.c | 2 | ||||
-rw-r--r-- | fs/gfs2/mount.c | 21 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 25 | ||||
-rw-r--r-- | fs/gfs2/ops_super.c | 5 | ||||
-rw-r--r-- | fs/gfs2/sys.c | 42 |
13 files changed, 130 insertions, 341 deletions
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile index ec65851ec80a..c1b4ec6a9650 100644 --- a/fs/gfs2/Makefile +++ b/fs/gfs2/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-$(CONFIG_GFS2_FS) += gfs2.o | 1 | obj-$(CONFIG_GFS2_FS) += gfs2.o |
2 | gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \ | 2 | gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \ |
3 | glops.o inode.o log.o lops.o locking.o main.o meta_io.o \ | 3 | glops.o inode.o log.o lops.o locking.o main.o meta_io.o \ |
4 | mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \ | 4 | mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \ |
5 | ops_fstype.o ops_inode.o ops_super.o quota.o \ | 5 | ops_fstype.o ops_inode.o ops_super.o quota.o \ |
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c deleted file mode 100644 index 2662df0d5b93..000000000000 --- a/fs/gfs2/daemon.c +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | ||
3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This copyrighted material is made available to anyone wishing to use, | ||
6 | * modify, copy, or redistribute it subject to the terms and conditions | ||
7 | * of the GNU General Public License version 2. | ||
8 | */ | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/completion.h> | ||
14 | #include <linux/buffer_head.h> | ||
15 | #include <linux/kthread.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/gfs2_ondisk.h> | ||
18 | #include <linux/lm_interface.h> | ||
19 | #include <linux/freezer.h> | ||
20 | |||
21 | #include "gfs2.h" | ||
22 | #include "incore.h" | ||
23 | #include "daemon.h" | ||
24 | #include "glock.h" | ||
25 | #include "log.h" | ||
26 | #include "recovery.h" | ||
27 | #include "super.h" | ||
28 | #include "util.h" | ||
29 | |||
30 | /* This uses schedule_timeout() instead of msleep() because it's good for | ||
31 | the daemons to wake up more often than the timeout when unmounting so | ||
32 | the user's unmount doesn't sit there forever. | ||
33 | |||
34 | The kthread functions used to start these daemons block and flush signals. */ | ||
35 | |||
36 | /** | ||
37 | * gfs2_glockd - Reclaim unused glock structures | ||
38 | * @sdp: Pointer to GFS2 superblock | ||
39 | * | ||
40 | * One or more of these daemons run, reclaiming glocks on sd_reclaim_list. | ||
41 | * Number of daemons can be set by user, with num_glockd mount option. | ||
42 | */ | ||
43 | |||
44 | int gfs2_glockd(void *data) | ||
45 | { | ||
46 | struct gfs2_sbd *sdp = data; | ||
47 | |||
48 | while (!kthread_should_stop()) { | ||
49 | while (atomic_read(&sdp->sd_reclaim_count)) | ||
50 | gfs2_reclaim_glock(sdp); | ||
51 | |||
52 | wait_event_interruptible(sdp->sd_reclaim_wq, | ||
53 | (atomic_read(&sdp->sd_reclaim_count) || | ||
54 | kthread_should_stop())); | ||
55 | if (freezing(current)) | ||
56 | refrigerator(); | ||
57 | } | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
diff --git a/fs/gfs2/daemon.h b/fs/gfs2/daemon.h deleted file mode 100644 index 5258954a234f..000000000000 --- a/fs/gfs2/daemon.h +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | ||
3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This copyrighted material is made available to anyone wishing to use, | ||
6 | * modify, copy, or redistribute it subject to the terms and conditions | ||
7 | * of the GNU General Public License version 2. | ||
8 | */ | ||
9 | |||
10 | #ifndef __DAEMON_DOT_H__ | ||
11 | #define __DAEMON_DOT_H__ | ||
12 | |||
13 | int gfs2_glockd(void *data); | ||
14 | |||
15 | #endif /* __DAEMON_DOT_H__ */ | ||
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 4ddf3bd55dda..07ffc8123d74 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -62,9 +62,10 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int | |||
62 | 62 | ||
63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
64 | static struct dentry *gfs2_root; | 64 | static struct dentry *gfs2_root; |
65 | static struct task_struct *scand_process; | ||
66 | static unsigned int scand_secs = 5; | ||
67 | static struct workqueue_struct *glock_workqueue; | 65 | static struct workqueue_struct *glock_workqueue; |
66 | static LIST_HEAD(lru_list); | ||
67 | static atomic_t lru_count = ATOMIC_INIT(0); | ||
68 | static spinlock_t lru_lock = SPIN_LOCK_UNLOCKED; | ||
68 | 69 | ||
69 | #define GFS2_GL_HASH_SHIFT 15 | 70 | #define GFS2_GL_HASH_SHIFT 15 |
70 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | 71 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) |
@@ -175,6 +176,22 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) | |||
175 | } | 176 | } |
176 | 177 | ||
177 | /** | 178 | /** |
179 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | ||
180 | * @gl: the glock | ||
181 | * | ||
182 | */ | ||
183 | |||
184 | static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | ||
185 | { | ||
186 | spin_lock(&lru_lock); | ||
187 | if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { | ||
188 | list_add_tail(&gl->gl_lru, &lru_list); | ||
189 | atomic_inc(&lru_count); | ||
190 | } | ||
191 | spin_unlock(&lru_lock); | ||
192 | } | ||
193 | |||
194 | /** | ||
178 | * gfs2_glock_put() - Decrement reference count on glock | 195 | * gfs2_glock_put() - Decrement reference count on glock |
179 | * @gl: The glock to put | 196 | * @gl: The glock to put |
180 | * | 197 | * |
@@ -188,14 +205,23 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
188 | if (atomic_dec_and_test(&gl->gl_ref)) { | 205 | if (atomic_dec_and_test(&gl->gl_ref)) { |
189 | hlist_del(&gl->gl_list); | 206 | hlist_del(&gl->gl_list); |
190 | write_unlock(gl_lock_addr(gl->gl_hash)); | 207 | write_unlock(gl_lock_addr(gl->gl_hash)); |
208 | spin_lock(&lru_lock); | ||
209 | if (!list_empty(&gl->gl_lru)) { | ||
210 | list_del_init(&gl->gl_lru); | ||
211 | atomic_dec(&lru_count); | ||
212 | } | ||
213 | spin_unlock(&lru_lock); | ||
191 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); | 214 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); |
192 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim)); | 215 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru)); |
193 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 216 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
194 | glock_free(gl); | 217 | glock_free(gl); |
195 | rv = 1; | 218 | rv = 1; |
196 | goto out; | 219 | goto out; |
197 | } | 220 | } |
198 | write_unlock(gl_lock_addr(gl->gl_hash)); | 221 | write_unlock(gl_lock_addr(gl->gl_hash)); |
222 | /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ | ||
223 | if (atomic_read(&gl->gl_ref) == 2) | ||
224 | gfs2_glock_schedule_for_reclaim(gl); | ||
199 | out: | 225 | out: |
200 | return rv; | 226 | return rv; |
201 | } | 227 | } |
@@ -837,7 +863,7 @@ static void wait_on_demote(struct gfs2_glock *gl) | |||
837 | */ | 863 | */ |
838 | 864 | ||
839 | static void handle_callback(struct gfs2_glock *gl, unsigned int state, | 865 | static void handle_callback(struct gfs2_glock *gl, unsigned int state, |
840 | int remote, unsigned long delay) | 866 | unsigned long delay) |
841 | { | 867 | { |
842 | int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; | 868 | int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; |
843 | 869 | ||
@@ -845,9 +871,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, | |||
845 | if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { | 871 | if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { |
846 | gl->gl_demote_state = state; | 872 | gl->gl_demote_state = state; |
847 | gl->gl_demote_time = jiffies; | 873 | gl->gl_demote_time = jiffies; |
848 | if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && | ||
849 | gl->gl_object) | ||
850 | gfs2_glock_schedule_for_reclaim(gl); | ||
851 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && | 874 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && |
852 | gl->gl_demote_state != state) { | 875 | gl->gl_demote_state != state) { |
853 | gl->gl_demote_state = LM_ST_UNLOCKED; | 876 | gl->gl_demote_state = LM_ST_UNLOCKED; |
@@ -1017,7 +1040,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
1017 | 1040 | ||
1018 | spin_lock(&gl->gl_spin); | 1041 | spin_lock(&gl->gl_spin); |
1019 | if (gh->gh_flags & GL_NOCACHE) | 1042 | if (gh->gh_flags & GL_NOCACHE) |
1020 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1043 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1021 | 1044 | ||
1022 | list_del_init(&gh->gh_list); | 1045 | list_del_init(&gh->gh_list); |
1023 | if (find_first_holder(gl) == NULL) { | 1046 | if (find_first_holder(gl) == NULL) { |
@@ -1288,7 +1311,7 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | |||
1288 | delay = gl->gl_ops->go_min_hold_time; | 1311 | delay = gl->gl_ops->go_min_hold_time; |
1289 | 1312 | ||
1290 | spin_lock(&gl->gl_spin); | 1313 | spin_lock(&gl->gl_spin); |
1291 | handle_callback(gl, state, 1, delay); | 1314 | handle_callback(gl, state, delay); |
1292 | spin_unlock(&gl->gl_spin); | 1315 | spin_unlock(&gl->gl_spin); |
1293 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | 1316 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) |
1294 | gfs2_glock_put(gl); | 1317 | gfs2_glock_put(gl); |
@@ -1357,80 +1380,83 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | |||
1357 | * Returns: 1 if it's ok | 1380 | * Returns: 1 if it's ok |
1358 | */ | 1381 | */ |
1359 | 1382 | ||
1360 | static int demote_ok(struct gfs2_glock *gl) | 1383 | static int demote_ok(const struct gfs2_glock *gl) |
1361 | { | 1384 | { |
1362 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1385 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
1363 | int demote = 1; | ||
1364 | |||
1365 | if (test_bit(GLF_STICKY, &gl->gl_flags)) | ||
1366 | demote = 0; | ||
1367 | else if (glops->go_demote_ok) | ||
1368 | demote = glops->go_demote_ok(gl); | ||
1369 | |||
1370 | return demote; | ||
1371 | } | ||
1372 | 1386 | ||
1373 | /** | 1387 | if (gl->gl_state == LM_ST_UNLOCKED) |
1374 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | 1388 | return 0; |
1375 | * @gl: the glock | 1389 | if (!list_empty(&gl->gl_holders)) |
1376 | * | 1390 | return 0; |
1377 | */ | 1391 | if (glops->go_demote_ok) |
1378 | 1392 | return glops->go_demote_ok(gl); | |
1379 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | 1393 | return 1; |
1380 | { | ||
1381 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
1382 | |||
1383 | spin_lock(&sdp->sd_reclaim_lock); | ||
1384 | if (list_empty(&gl->gl_reclaim)) { | ||
1385 | gfs2_glock_hold(gl); | ||
1386 | list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); | ||
1387 | atomic_inc(&sdp->sd_reclaim_count); | ||
1388 | spin_unlock(&sdp->sd_reclaim_lock); | ||
1389 | wake_up(&sdp->sd_reclaim_wq); | ||
1390 | } else | ||
1391 | spin_unlock(&sdp->sd_reclaim_lock); | ||
1392 | } | 1394 | } |
1393 | 1395 | ||
1394 | /** | ||
1395 | * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list | ||
1396 | * @sdp: the filesystem | ||
1397 | * | ||
1398 | * Called from gfs2_glockd() glock reclaim daemon, or when promoting a | ||
1399 | * different glock and we notice that there are a lot of glocks in the | ||
1400 | * reclaim list. | ||
1401 | * | ||
1402 | */ | ||
1403 | 1396 | ||
1404 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | 1397 | static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) |
1405 | { | 1398 | { |
1406 | struct gfs2_glock *gl; | 1399 | struct gfs2_glock *gl; |
1407 | int done_callback = 0; | 1400 | int may_demote; |
1401 | int nr_skipped = 0; | ||
1402 | int got_ref = 0; | ||
1403 | LIST_HEAD(skipped); | ||
1408 | 1404 | ||
1409 | spin_lock(&sdp->sd_reclaim_lock); | 1405 | if (nr == 0) |
1410 | if (list_empty(&sdp->sd_reclaim_list)) { | 1406 | goto out; |
1411 | spin_unlock(&sdp->sd_reclaim_lock); | ||
1412 | return; | ||
1413 | } | ||
1414 | gl = list_entry(sdp->sd_reclaim_list.next, | ||
1415 | struct gfs2_glock, gl_reclaim); | ||
1416 | list_del_init(&gl->gl_reclaim); | ||
1417 | spin_unlock(&sdp->sd_reclaim_lock); | ||
1418 | 1407 | ||
1419 | atomic_dec(&sdp->sd_reclaim_count); | 1408 | if (!(gfp_mask & __GFP_FS)) |
1420 | atomic_inc(&sdp->sd_reclaimed); | 1409 | return -1; |
1421 | 1410 | ||
1422 | spin_lock(&gl->gl_spin); | 1411 | spin_lock(&lru_lock); |
1423 | if (find_first_holder(gl) == NULL && | 1412 | while(nr && !list_empty(&lru_list)) { |
1424 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { | 1413 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); |
1425 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1414 | list_del_init(&gl->gl_lru); |
1426 | done_callback = 1; | 1415 | atomic_dec(&lru_count); |
1416 | |||
1417 | /* Test for being demotable */ | ||
1418 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | ||
1419 | gfs2_glock_hold(gl); | ||
1420 | got_ref = 1; | ||
1421 | spin_unlock(&lru_lock); | ||
1422 | spin_lock(&gl->gl_spin); | ||
1423 | may_demote = demote_ok(gl); | ||
1424 | spin_unlock(&gl->gl_spin); | ||
1425 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1426 | if (may_demote) { | ||
1427 | handle_callback(gl, LM_ST_UNLOCKED, 0); | ||
1428 | nr--; | ||
1429 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
1430 | gfs2_glock_put(gl); | ||
1431 | } | ||
1432 | spin_lock(&lru_lock); | ||
1433 | if (may_demote) | ||
1434 | continue; | ||
1435 | } | ||
1436 | if (list_empty(&gl->gl_lru) && | ||
1437 | (atomic_read(&gl->gl_ref) <= (2 + got_ref))) { | ||
1438 | nr_skipped++; | ||
1439 | list_add(&gl->gl_lru, &skipped); | ||
1440 | } | ||
1441 | if (got_ref) { | ||
1442 | spin_unlock(&lru_lock); | ||
1443 | gfs2_glock_put(gl); | ||
1444 | spin_lock(&lru_lock); | ||
1445 | got_ref = 0; | ||
1446 | } | ||
1427 | } | 1447 | } |
1428 | spin_unlock(&gl->gl_spin); | 1448 | list_splice(&skipped, &lru_list); |
1429 | if (!done_callback || | 1449 | atomic_add(nr_skipped, &lru_count); |
1430 | queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1450 | spin_unlock(&lru_lock); |
1431 | gfs2_glock_put(gl); | 1451 | out: |
1452 | return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; | ||
1432 | } | 1453 | } |
1433 | 1454 | ||
1455 | static struct shrinker glock_shrinker = { | ||
1456 | .shrink = gfs2_shrink_glock_memory, | ||
1457 | .seeks = DEFAULT_SEEKS, | ||
1458 | }; | ||
1459 | |||
1434 | /** | 1460 | /** |
1435 | * examine_bucket - Call a function for glock in a hash bucket | 1461 | * examine_bucket - Call a function for glock in a hash bucket |
1436 | * @examiner: the function | 1462 | * @examiner: the function |
@@ -1476,26 +1502,6 @@ out: | |||
1476 | } | 1502 | } |
1477 | 1503 | ||
1478 | /** | 1504 | /** |
1479 | * scan_glock - look at a glock and see if we can reclaim it | ||
1480 | * @gl: the glock to look at | ||
1481 | * | ||
1482 | */ | ||
1483 | |||
1484 | static void scan_glock(struct gfs2_glock *gl) | ||
1485 | { | ||
1486 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) | ||
1487 | return; | ||
1488 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | ||
1489 | return; | ||
1490 | |||
1491 | spin_lock(&gl->gl_spin); | ||
1492 | if (find_first_holder(gl) == NULL && | ||
1493 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | ||
1494 | gfs2_glock_schedule_for_reclaim(gl); | ||
1495 | spin_unlock(&gl->gl_spin); | ||
1496 | } | ||
1497 | |||
1498 | /** | ||
1499 | * clear_glock - look at a glock and see if we can free it from glock cache | 1505 | * clear_glock - look at a glock and see if we can free it from glock cache |
1500 | * @gl: the glock to look at | 1506 | * @gl: the glock to look at |
1501 | * | 1507 | * |
@@ -1503,23 +1509,16 @@ static void scan_glock(struct gfs2_glock *gl) | |||
1503 | 1509 | ||
1504 | static void clear_glock(struct gfs2_glock *gl) | 1510 | static void clear_glock(struct gfs2_glock *gl) |
1505 | { | 1511 | { |
1506 | struct gfs2_sbd *sdp = gl->gl_sbd; | 1512 | spin_lock(&lru_lock); |
1507 | int released; | 1513 | if (!list_empty(&gl->gl_lru)) { |
1508 | 1514 | list_del_init(&gl->gl_lru); | |
1509 | spin_lock(&sdp->sd_reclaim_lock); | 1515 | atomic_dec(&lru_count); |
1510 | if (!list_empty(&gl->gl_reclaim)) { | ||
1511 | list_del_init(&gl->gl_reclaim); | ||
1512 | atomic_dec(&sdp->sd_reclaim_count); | ||
1513 | spin_unlock(&sdp->sd_reclaim_lock); | ||
1514 | released = gfs2_glock_put(gl); | ||
1515 | gfs2_assert(sdp, !released); | ||
1516 | } else { | ||
1517 | spin_unlock(&sdp->sd_reclaim_lock); | ||
1518 | } | 1516 | } |
1517 | spin_unlock(&lru_lock); | ||
1519 | 1518 | ||
1520 | spin_lock(&gl->gl_spin); | 1519 | spin_lock(&gl->gl_spin); |
1521 | if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) | 1520 | if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) |
1522 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1521 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1523 | spin_unlock(&gl->gl_spin); | 1522 | spin_unlock(&gl->gl_spin); |
1524 | gfs2_glock_hold(gl); | 1523 | gfs2_glock_hold(gl); |
1525 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1524 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
@@ -1656,8 +1655,6 @@ static const char *gflags2str(char *buf, const unsigned long *gflags) | |||
1656 | char *p = buf; | 1655 | char *p = buf; |
1657 | if (test_bit(GLF_LOCK, gflags)) | 1656 | if (test_bit(GLF_LOCK, gflags)) |
1658 | *p++ = 'l'; | 1657 | *p++ = 'l'; |
1659 | if (test_bit(GLF_STICKY, gflags)) | ||
1660 | *p++ = 's'; | ||
1661 | if (test_bit(GLF_DEMOTE, gflags)) | 1658 | if (test_bit(GLF_DEMOTE, gflags)) |
1662 | *p++ = 'D'; | 1659 | *p++ = 'D'; |
1663 | if (test_bit(GLF_PENDING_DEMOTE, gflags)) | 1660 | if (test_bit(GLF_PENDING_DEMOTE, gflags)) |
@@ -1776,34 +1773,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | |||
1776 | return error; | 1773 | return error; |
1777 | } | 1774 | } |
1778 | 1775 | ||
1779 | /** | ||
1780 | * gfs2_scand - Look for cached glocks and inodes to toss from memory | ||
1781 | * @sdp: Pointer to GFS2 superblock | ||
1782 | * | ||
1783 | * One of these daemons runs, finding candidates to add to sd_reclaim_list. | ||
1784 | * See gfs2_glockd() | ||
1785 | */ | ||
1786 | |||
1787 | static int gfs2_scand(void *data) | ||
1788 | { | ||
1789 | unsigned x; | ||
1790 | unsigned delay; | ||
1791 | |||
1792 | while (!kthread_should_stop()) { | ||
1793 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | ||
1794 | examine_bucket(scan_glock, NULL, x); | ||
1795 | if (freezing(current)) | ||
1796 | refrigerator(); | ||
1797 | delay = scand_secs; | ||
1798 | if (delay < 1) | ||
1799 | delay = 1; | ||
1800 | schedule_timeout_interruptible(delay * HZ); | ||
1801 | } | ||
1802 | |||
1803 | return 0; | ||
1804 | } | ||
1805 | |||
1806 | |||
1807 | 1776 | ||
1808 | int __init gfs2_glock_init(void) | 1777 | int __init gfs2_glock_init(void) |
1809 | { | 1778 | { |
@@ -1817,28 +1786,21 @@ int __init gfs2_glock_init(void) | |||
1817 | } | 1786 | } |
1818 | #endif | 1787 | #endif |
1819 | 1788 | ||
1820 | scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand"); | ||
1821 | if (IS_ERR(scand_process)) | ||
1822 | return PTR_ERR(scand_process); | ||
1823 | |||
1824 | glock_workqueue = create_workqueue("glock_workqueue"); | 1789 | glock_workqueue = create_workqueue("glock_workqueue"); |
1825 | if (IS_ERR(glock_workqueue)) { | 1790 | if (IS_ERR(glock_workqueue)) |
1826 | kthread_stop(scand_process); | ||
1827 | return PTR_ERR(glock_workqueue); | 1791 | return PTR_ERR(glock_workqueue); |
1828 | } | 1792 | |
1793 | register_shrinker(&glock_shrinker); | ||
1829 | 1794 | ||
1830 | return 0; | 1795 | return 0; |
1831 | } | 1796 | } |
1832 | 1797 | ||
1833 | void gfs2_glock_exit(void) | 1798 | void gfs2_glock_exit(void) |
1834 | { | 1799 | { |
1800 | unregister_shrinker(&glock_shrinker); | ||
1835 | destroy_workqueue(glock_workqueue); | 1801 | destroy_workqueue(glock_workqueue); |
1836 | kthread_stop(scand_process); | ||
1837 | } | 1802 | } |
1838 | 1803 | ||
1839 | module_param(scand_secs, uint, S_IRUGO|S_IWUSR); | ||
1840 | MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); | ||
1841 | |||
1842 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) | 1804 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |
1843 | { | 1805 | { |
1844 | struct gfs2_glock *gl; | 1806 | struct gfs2_glock *gl; |
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 13a64ee6523b..543ec7ecfbda 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h | |||
@@ -129,7 +129,6 @@ int gfs2_lvb_hold(struct gfs2_glock *gl); | |||
129 | void gfs2_lvb_unhold(struct gfs2_glock *gl); | 129 | void gfs2_lvb_unhold(struct gfs2_glock *gl); |
130 | 130 | ||
131 | void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); | 131 | void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); |
132 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); | ||
133 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp); | 132 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp); |
134 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); | 133 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); |
135 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip); | 134 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip); |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 8ebff8ebae20..8522d3aa64fc 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -201,19 +201,12 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) | |||
201 | * Returns: 1 if it's ok | 201 | * Returns: 1 if it's ok |
202 | */ | 202 | */ |
203 | 203 | ||
204 | static int inode_go_demote_ok(struct gfs2_glock *gl) | 204 | static int inode_go_demote_ok(const struct gfs2_glock *gl) |
205 | { | 205 | { |
206 | struct gfs2_sbd *sdp = gl->gl_sbd; | 206 | struct gfs2_sbd *sdp = gl->gl_sbd; |
207 | int demote = 0; | 207 | if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) |
208 | 208 | return 0; | |
209 | if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) | 209 | return 1; |
210 | demote = 1; | ||
211 | else if (!sdp->sd_args.ar_localcaching && | ||
212 | time_after_eq(jiffies, gl->gl_stamp + | ||
213 | gfs2_tune_get(sdp, gt_demote_secs) * HZ)) | ||
214 | demote = 1; | ||
215 | |||
216 | return demote; | ||
217 | } | 210 | } |
218 | 211 | ||
219 | /** | 212 | /** |
@@ -284,7 +277,7 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) | |||
284 | * Returns: 1 if it's ok | 277 | * Returns: 1 if it's ok |
285 | */ | 278 | */ |
286 | 279 | ||
287 | static int rgrp_go_demote_ok(struct gfs2_glock *gl) | 280 | static int rgrp_go_demote_ok(const struct gfs2_glock *gl) |
288 | { | 281 | { |
289 | return !gl->gl_aspace->i_mapping->nrpages; | 282 | return !gl->gl_aspace->i_mapping->nrpages; |
290 | } | 283 | } |
@@ -386,13 +379,25 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) | |||
386 | } | 379 | } |
387 | 380 | ||
388 | /** | 381 | /** |
382 | * trans_go_demote_ok | ||
383 | * @gl: the glock | ||
384 | * | ||
385 | * Always returns 0 | ||
386 | */ | ||
387 | |||
388 | static int trans_go_demote_ok(const struct gfs2_glock *gl) | ||
389 | { | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | /** | ||
389 | * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock | 394 | * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock |
390 | * @gl: the glock | 395 | * @gl: the glock |
391 | * | 396 | * |
392 | * Returns: 1 if it's ok | 397 | * Returns: 1 if it's ok |
393 | */ | 398 | */ |
394 | 399 | ||
395 | static int quota_go_demote_ok(struct gfs2_glock *gl) | 400 | static int quota_go_demote_ok(const struct gfs2_glock *gl) |
396 | { | 401 | { |
397 | return !atomic_read(&gl->gl_lvb_count); | 402 | return !atomic_read(&gl->gl_lvb_count); |
398 | } | 403 | } |
@@ -426,6 +431,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = { | |||
426 | const struct gfs2_glock_operations gfs2_trans_glops = { | 431 | const struct gfs2_glock_operations gfs2_trans_glops = { |
427 | .go_xmote_th = trans_go_sync, | 432 | .go_xmote_th = trans_go_sync, |
428 | .go_xmote_bh = trans_go_xmote_bh, | 433 | .go_xmote_bh = trans_go_xmote_bh, |
434 | .go_demote_ok = trans_go_demote_ok, | ||
429 | .go_type = LM_TYPE_NONDISK, | 435 | .go_type = LM_TYPE_NONDISK, |
430 | }; | 436 | }; |
431 | 437 | ||
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index dd7d0f8f3575..608849d00021 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -125,7 +125,7 @@ struct gfs2_glock_operations { | |||
125 | void (*go_xmote_th) (struct gfs2_glock *gl); | 125 | void (*go_xmote_th) (struct gfs2_glock *gl); |
126 | int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); | 126 | int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); |
127 | void (*go_inval) (struct gfs2_glock *gl, int flags); | 127 | void (*go_inval) (struct gfs2_glock *gl, int flags); |
128 | int (*go_demote_ok) (struct gfs2_glock *gl); | 128 | int (*go_demote_ok) (const struct gfs2_glock *gl); |
129 | int (*go_lock) (struct gfs2_holder *gh); | 129 | int (*go_lock) (struct gfs2_holder *gh); |
130 | void (*go_unlock) (struct gfs2_holder *gh); | 130 | void (*go_unlock) (struct gfs2_holder *gh); |
131 | int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); | 131 | int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); |
@@ -155,7 +155,6 @@ struct gfs2_holder { | |||
155 | 155 | ||
156 | enum { | 156 | enum { |
157 | GLF_LOCK = 1, | 157 | GLF_LOCK = 1, |
158 | GLF_STICKY = 2, | ||
159 | GLF_DEMOTE = 3, | 158 | GLF_DEMOTE = 3, |
160 | GLF_PENDING_DEMOTE = 4, | 159 | GLF_PENDING_DEMOTE = 4, |
161 | GLF_DEMOTE_IN_PROGRESS = 5, | 160 | GLF_DEMOTE_IN_PROGRESS = 5, |
@@ -190,7 +189,7 @@ struct gfs2_glock { | |||
190 | unsigned long gl_tchange; | 189 | unsigned long gl_tchange; |
191 | void *gl_object; | 190 | void *gl_object; |
192 | 191 | ||
193 | struct list_head gl_reclaim; | 192 | struct list_head gl_lru; |
194 | 193 | ||
195 | struct gfs2_sbd *gl_sbd; | 194 | struct gfs2_sbd *gl_sbd; |
196 | 195 | ||
@@ -397,7 +396,6 @@ struct gfs2_args { | |||
397 | struct gfs2_tune { | 396 | struct gfs2_tune { |
398 | spinlock_t gt_spin; | 397 | spinlock_t gt_spin; |
399 | 398 | ||
400 | unsigned int gt_demote_secs; /* Cache retention for unheld glock */ | ||
401 | unsigned int gt_incore_log_blocks; | 399 | unsigned int gt_incore_log_blocks; |
402 | unsigned int gt_log_flush_secs; | 400 | unsigned int gt_log_flush_secs; |
403 | 401 | ||
@@ -478,10 +476,6 @@ struct gfs2_sbd { | |||
478 | /* Lock Stuff */ | 476 | /* Lock Stuff */ |
479 | 477 | ||
480 | struct lm_lockstruct sd_lockstruct; | 478 | struct lm_lockstruct sd_lockstruct; |
481 | struct list_head sd_reclaim_list; | ||
482 | spinlock_t sd_reclaim_lock; | ||
483 | wait_queue_head_t sd_reclaim_wq; | ||
484 | atomic_t sd_reclaim_count; | ||
485 | struct gfs2_holder sd_live_gh; | 479 | struct gfs2_holder sd_live_gh; |
486 | struct gfs2_glock *sd_rename_gl; | 480 | struct gfs2_glock *sd_rename_gl; |
487 | struct gfs2_glock *sd_trans_gl; | 481 | struct gfs2_glock *sd_trans_gl; |
@@ -541,8 +535,6 @@ struct gfs2_sbd { | |||
541 | struct task_struct *sd_recoverd_process; | 535 | struct task_struct *sd_recoverd_process; |
542 | struct task_struct *sd_logd_process; | 536 | struct task_struct *sd_logd_process; |
543 | struct task_struct *sd_quotad_process; | 537 | struct task_struct *sd_quotad_process; |
544 | struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX]; | ||
545 | unsigned int sd_glockd_num; | ||
546 | 538 | ||
547 | /* Quota stuff */ | 539 | /* Quota stuff */ |
548 | 540 | ||
@@ -615,10 +607,6 @@ struct gfs2_sbd { | |||
615 | struct mutex sd_freeze_lock; | 607 | struct mutex sd_freeze_lock; |
616 | unsigned int sd_freeze_count; | 608 | unsigned int sd_freeze_count; |
617 | 609 | ||
618 | /* Counters */ | ||
619 | |||
620 | atomic_t sd_reclaimed; | ||
621 | |||
622 | char sd_fsname[GFS2_FSNAME_LEN]; | 610 | char sd_fsname[GFS2_FSNAME_LEN]; |
623 | char sd_table_name[GFS2_FSNAME_LEN]; | 611 | char sd_table_name[GFS2_FSNAME_LEN]; |
624 | char sd_proto_name[GFS2_FSNAME_LEN]; | 612 | char sd_proto_name[GFS2_FSNAME_LEN]; |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 97d3ce65e26f..3b87c188da41 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -386,7 +386,6 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip) | |||
386 | gfs2_free_di(rgd, ip); | 386 | gfs2_free_di(rgd, ip); |
387 | 387 | ||
388 | gfs2_trans_end(sdp); | 388 | gfs2_trans_end(sdp); |
389 | clear_bit(GLF_STICKY, &ip->i_gl->gl_flags); | ||
390 | 389 | ||
391 | out_rg_gunlock: | 390 | out_rg_gunlock: |
392 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 391 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index cf39295ccb90..7cacfde32194 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -43,7 +43,7 @@ static void gfs2_init_glock_once(void *foo) | |||
43 | INIT_LIST_HEAD(&gl->gl_holders); | 43 | INIT_LIST_HEAD(&gl->gl_holders); |
44 | gl->gl_lvb = NULL; | 44 | gl->gl_lvb = NULL; |
45 | atomic_set(&gl->gl_lvb_count, 0); | 45 | atomic_set(&gl->gl_lvb_count, 0); |
46 | INIT_LIST_HEAD(&gl->gl_reclaim); | 46 | INIT_LIST_HEAD(&gl->gl_lru); |
47 | INIT_LIST_HEAD(&gl->gl_ail_list); | 47 | INIT_LIST_HEAD(&gl->gl_ail_list); |
48 | atomic_set(&gl->gl_ail_count, 0); | 48 | atomic_set(&gl->gl_ail_count, 0); |
49 | } | 49 | } |
diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c index f96eb90a2cfa..8c0f16e301f6 100644 --- a/fs/gfs2/mount.c +++ b/fs/gfs2/mount.c | |||
@@ -32,7 +32,6 @@ enum { | |||
32 | Opt_debug, | 32 | Opt_debug, |
33 | Opt_nodebug, | 33 | Opt_nodebug, |
34 | Opt_upgrade, | 34 | Opt_upgrade, |
35 | Opt_num_glockd, | ||
36 | Opt_acl, | 35 | Opt_acl, |
37 | Opt_noacl, | 36 | Opt_noacl, |
38 | Opt_quota_off, | 37 | Opt_quota_off, |
@@ -57,7 +56,6 @@ static const match_table_t tokens = { | |||
57 | {Opt_debug, "debug"}, | 56 | {Opt_debug, "debug"}, |
58 | {Opt_nodebug, "nodebug"}, | 57 | {Opt_nodebug, "nodebug"}, |
59 | {Opt_upgrade, "upgrade"}, | 58 | {Opt_upgrade, "upgrade"}, |
60 | {Opt_num_glockd, "num_glockd=%d"}, | ||
61 | {Opt_acl, "acl"}, | 59 | {Opt_acl, "acl"}, |
62 | {Opt_noacl, "noacl"}, | 60 | {Opt_noacl, "noacl"}, |
63 | {Opt_quota_off, "quota=off"}, | 61 | {Opt_quota_off, "quota=off"}, |
@@ -96,7 +94,6 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) | |||
96 | spin_unlock(&gfs2_sys_margs_lock); | 94 | spin_unlock(&gfs2_sys_margs_lock); |
97 | 95 | ||
98 | /* Set some defaults */ | 96 | /* Set some defaults */ |
99 | args->ar_num_glockd = GFS2_GLOCKD_DEFAULT; | ||
100 | args->ar_quota = GFS2_QUOTA_DEFAULT; | 97 | args->ar_quota = GFS2_QUOTA_DEFAULT; |
101 | args->ar_data = GFS2_DATA_DEFAULT; | 98 | args->ar_data = GFS2_DATA_DEFAULT; |
102 | } | 99 | } |
@@ -105,7 +102,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) | |||
105 | process them */ | 102 | process them */ |
106 | 103 | ||
107 | for (options = data; (o = strsep(&options, ",")); ) { | 104 | for (options = data; (o = strsep(&options, ",")); ) { |
108 | int token, option; | 105 | int token; |
109 | substring_t tmp[MAX_OPT_ARGS]; | 106 | substring_t tmp[MAX_OPT_ARGS]; |
110 | 107 | ||
111 | if (!*o) | 108 | if (!*o) |
@@ -196,22 +193,6 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount) | |||
196 | goto cant_remount; | 193 | goto cant_remount; |
197 | args->ar_upgrade = 1; | 194 | args->ar_upgrade = 1; |
198 | break; | 195 | break; |
199 | case Opt_num_glockd: | ||
200 | if ((error = match_int(&tmp[0], &option))) { | ||
201 | fs_info(sdp, "problem getting num_glockd\n"); | ||
202 | goto out_error; | ||
203 | } | ||
204 | |||
205 | if (remount && option != args->ar_num_glockd) | ||
206 | goto cant_remount; | ||
207 | if (!option || option > GFS2_GLOCKD_MAX) { | ||
208 | fs_info(sdp, "0 < num_glockd <= %u (not %u)\n", | ||
209 | GFS2_GLOCKD_MAX, option); | ||
210 | error = -EINVAL; | ||
211 | goto out_error; | ||
212 | } | ||
213 | args->ar_num_glockd = option; | ||
214 | break; | ||
215 | case Opt_acl: | 196 | case Opt_acl: |
216 | args->ar_posix_acl = 1; | 197 | args->ar_posix_acl = 1; |
217 | sdp->sd_vfs->s_flags |= MS_POSIXACL; | 198 | sdp->sd_vfs->s_flags |= MS_POSIXACL; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index d159e7e72722..fc300eafda84 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include "gfs2.h" | 22 | #include "gfs2.h" |
23 | #include "incore.h" | 23 | #include "incore.h" |
24 | #include "bmap.h" | 24 | #include "bmap.h" |
25 | #include "daemon.h" | ||
26 | #include "glock.h" | 25 | #include "glock.h" |
27 | #include "glops.h" | 26 | #include "glops.h" |
28 | #include "inode.h" | 27 | #include "inode.h" |
@@ -56,7 +55,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt) | |||
56 | { | 55 | { |
57 | spin_lock_init(>->gt_spin); | 56 | spin_lock_init(>->gt_spin); |
58 | 57 | ||
59 | gt->gt_demote_secs = 300; | ||
60 | gt->gt_incore_log_blocks = 1024; | 58 | gt->gt_incore_log_blocks = 1024; |
61 | gt->gt_log_flush_secs = 60; | 59 | gt->gt_log_flush_secs = 60; |
62 | gt->gt_recoverd_secs = 60; | 60 | gt->gt_recoverd_secs = 60; |
@@ -88,10 +86,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
88 | 86 | ||
89 | gfs2_tune_init(&sdp->sd_tune); | 87 | gfs2_tune_init(&sdp->sd_tune); |
90 | 88 | ||
91 | INIT_LIST_HEAD(&sdp->sd_reclaim_list); | ||
92 | spin_lock_init(&sdp->sd_reclaim_lock); | ||
93 | init_waitqueue_head(&sdp->sd_reclaim_wq); | ||
94 | |||
95 | mutex_init(&sdp->sd_inum_mutex); | 89 | mutex_init(&sdp->sd_inum_mutex); |
96 | spin_lock_init(&sdp->sd_statfs_spin); | 90 | spin_lock_init(&sdp->sd_statfs_spin); |
97 | 91 | ||
@@ -443,24 +437,11 @@ out: | |||
443 | static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, | 437 | static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, |
444 | int undo) | 438 | int undo) |
445 | { | 439 | { |
446 | struct task_struct *p; | ||
447 | int error = 0; | 440 | int error = 0; |
448 | 441 | ||
449 | if (undo) | 442 | if (undo) |
450 | goto fail_trans; | 443 | goto fail_trans; |
451 | 444 | ||
452 | for (sdp->sd_glockd_num = 0; | ||
453 | sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd; | ||
454 | sdp->sd_glockd_num++) { | ||
455 | p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd"); | ||
456 | error = IS_ERR(p); | ||
457 | if (error) { | ||
458 | fs_err(sdp, "can't start glockd thread: %d\n", error); | ||
459 | goto fail; | ||
460 | } | ||
461 | sdp->sd_glockd_process[sdp->sd_glockd_num] = p; | ||
462 | } | ||
463 | |||
464 | error = gfs2_glock_nq_num(sdp, | 445 | error = gfs2_glock_nq_num(sdp, |
465 | GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, | 446 | GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, |
466 | LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE, | 447 | LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE, |
@@ -493,7 +474,6 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, | |||
493 | fs_err(sdp, "can't create transaction glock: %d\n", error); | 474 | fs_err(sdp, "can't create transaction glock: %d\n", error); |
494 | goto fail_rename; | 475 | goto fail_rename; |
495 | } | 476 | } |
496 | set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags); | ||
497 | 477 | ||
498 | return 0; | 478 | return 0; |
499 | 479 | ||
@@ -506,9 +486,6 @@ fail_live: | |||
506 | fail_mount: | 486 | fail_mount: |
507 | gfs2_glock_dq_uninit(mount_gh); | 487 | gfs2_glock_dq_uninit(mount_gh); |
508 | fail: | 488 | fail: |
509 | while (sdp->sd_glockd_num--) | ||
510 | kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]); | ||
511 | |||
512 | return error; | 489 | return error; |
513 | } | 490 | } |
514 | 491 | ||
@@ -681,7 +658,6 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
681 | return PTR_ERR(sdp->sd_jindex); | 658 | return PTR_ERR(sdp->sd_jindex); |
682 | } | 659 | } |
683 | ip = GFS2_I(sdp->sd_jindex); | 660 | ip = GFS2_I(sdp->sd_jindex); |
684 | set_bit(GLF_STICKY, &ip->i_gl->gl_flags); | ||
685 | 661 | ||
686 | /* Load in the journal index special file */ | 662 | /* Load in the journal index special file */ |
687 | 663 | ||
@@ -832,7 +808,6 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) | |||
832 | goto fail_statfs; | 808 | goto fail_statfs; |
833 | } | 809 | } |
834 | ip = GFS2_I(sdp->sd_rindex); | 810 | ip = GFS2_I(sdp->sd_rindex); |
835 | set_bit(GLF_STICKY, &ip->i_gl->gl_flags); | ||
836 | sdp->sd_rindex_uptodate = 0; | 811 | sdp->sd_rindex_uptodate = 0; |
837 | 812 | ||
838 | /* Read in the quota inode */ | 813 | /* Read in the quota inode */ |
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index ad36af254fee..29f8a5c0b45b 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c | |||
@@ -142,8 +142,6 @@ static void gfs2_put_super(struct super_block *sb) | |||
142 | kthread_stop(sdp->sd_quotad_process); | 142 | kthread_stop(sdp->sd_quotad_process); |
143 | kthread_stop(sdp->sd_logd_process); | 143 | kthread_stop(sdp->sd_logd_process); |
144 | kthread_stop(sdp->sd_recoverd_process); | 144 | kthread_stop(sdp->sd_recoverd_process); |
145 | while (sdp->sd_glockd_num--) | ||
146 | kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]); | ||
147 | 145 | ||
148 | if (!(sb->s_flags & MS_RDONLY)) { | 146 | if (!(sb->s_flags & MS_RDONLY)) { |
149 | error = gfs2_make_fs_ro(sdp); | 147 | error = gfs2_make_fs_ro(sdp); |
@@ -369,7 +367,6 @@ static void gfs2_clear_inode(struct inode *inode) | |||
369 | */ | 367 | */ |
370 | if (test_bit(GIF_USER, &ip->i_flags)) { | 368 | if (test_bit(GIF_USER, &ip->i_flags)) { |
371 | ip->i_gl->gl_object = NULL; | 369 | ip->i_gl->gl_object = NULL; |
372 | gfs2_glock_schedule_for_reclaim(ip->i_gl); | ||
373 | gfs2_glock_put(ip->i_gl); | 370 | gfs2_glock_put(ip->i_gl); |
374 | ip->i_gl = NULL; | 371 | ip->i_gl = NULL; |
375 | if (ip->i_iopen_gh.gh_gl) { | 372 | if (ip->i_iopen_gh.gh_gl) { |
@@ -422,8 +419,6 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
422 | seq_printf(s, ",debug"); | 419 | seq_printf(s, ",debug"); |
423 | if (args->ar_upgrade) | 420 | if (args->ar_upgrade) |
424 | seq_printf(s, ",upgrade"); | 421 | seq_printf(s, ",upgrade"); |
425 | if (args->ar_num_glockd != GFS2_GLOCKD_DEFAULT) | ||
426 | seq_printf(s, ",num_glockd=%u", args->ar_num_glockd); | ||
427 | if (args->ar_posix_acl) | 422 | if (args->ar_posix_acl) |
428 | seq_printf(s, ",acl"); | 423 | seq_printf(s, ",acl"); |
429 | if (args->ar_quota != GFS2_QUOTA_DEFAULT) { | 424 | if (args->ar_quota != GFS2_QUOTA_DEFAULT) { |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 59e36fd80903..67ba5b7b759b 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -263,7 +263,6 @@ ARGS_ATTR(localcaching, "%d\n"); | |||
263 | ARGS_ATTR(localflocks, "%d\n"); | 263 | ARGS_ATTR(localflocks, "%d\n"); |
264 | ARGS_ATTR(debug, "%d\n"); | 264 | ARGS_ATTR(debug, "%d\n"); |
265 | ARGS_ATTR(upgrade, "%d\n"); | 265 | ARGS_ATTR(upgrade, "%d\n"); |
266 | ARGS_ATTR(num_glockd, "%u\n"); | ||
267 | ARGS_ATTR(posix_acl, "%d\n"); | 266 | ARGS_ATTR(posix_acl, "%d\n"); |
268 | ARGS_ATTR(quota, "%u\n"); | 267 | ARGS_ATTR(quota, "%u\n"); |
269 | ARGS_ATTR(suiddir, "%d\n"); | 268 | ARGS_ATTR(suiddir, "%d\n"); |
@@ -279,7 +278,6 @@ static struct attribute *args_attrs[] = { | |||
279 | &args_attr_localflocks.attr, | 278 | &args_attr_localflocks.attr, |
280 | &args_attr_debug.attr, | 279 | &args_attr_debug.attr, |
281 | &args_attr_upgrade.attr, | 280 | &args_attr_upgrade.attr, |
282 | &args_attr_num_glockd.attr, | ||
283 | &args_attr_posix_acl.attr, | 281 | &args_attr_posix_acl.attr, |
284 | &args_attr_quota.attr, | 282 | &args_attr_quota.attr, |
285 | &args_attr_suiddir.attr, | 283 | &args_attr_suiddir.attr, |
@@ -288,30 +286,6 @@ static struct attribute *args_attrs[] = { | |||
288 | }; | 286 | }; |
289 | 287 | ||
290 | /* | 288 | /* |
291 | * display counters from superblock | ||
292 | */ | ||
293 | |||
294 | struct counters_attr { | ||
295 | struct attribute attr; | ||
296 | ssize_t (*show)(struct gfs2_sbd *, char *); | ||
297 | }; | ||
298 | |||
299 | #define COUNTERS_ATTR(name, fmt) \ | ||
300 | static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ | ||
301 | { \ | ||
302 | return snprintf(buf, PAGE_SIZE, fmt, \ | ||
303 | (unsigned int)atomic_read(&sdp->sd_##name)); \ | ||
304 | } \ | ||
305 | static struct counters_attr counters_attr_##name = __ATTR_RO(name) | ||
306 | |||
307 | COUNTERS_ATTR(reclaimed, "%u\n"); | ||
308 | |||
309 | static struct attribute *counters_attrs[] = { | ||
310 | &counters_attr_reclaimed.attr, | ||
311 | NULL, | ||
312 | }; | ||
313 | |||
314 | /* | ||
315 | * get and set struct gfs2_tune fields | 289 | * get and set struct gfs2_tune fields |
316 | */ | 290 | */ |
317 | 291 | ||
@@ -393,7 +367,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ | |||
393 | } \ | 367 | } \ |
394 | TUNE_ATTR_2(name, name##_store) | 368 | TUNE_ATTR_2(name, name##_store) |
395 | 369 | ||
396 | TUNE_ATTR(demote_secs, 0); | ||
397 | TUNE_ATTR(incore_log_blocks, 0); | 370 | TUNE_ATTR(incore_log_blocks, 0); |
398 | TUNE_ATTR(log_flush_secs, 0); | 371 | TUNE_ATTR(log_flush_secs, 0); |
399 | TUNE_ATTR(quota_warn_period, 0); | 372 | TUNE_ATTR(quota_warn_period, 0); |
@@ -411,7 +384,6 @@ TUNE_ATTR_DAEMON(logd_secs, logd_process); | |||
411 | TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); | 384 | TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); |
412 | 385 | ||
413 | static struct attribute *tune_attrs[] = { | 386 | static struct attribute *tune_attrs[] = { |
414 | &tune_attr_demote_secs.attr, | ||
415 | &tune_attr_incore_log_blocks.attr, | 387 | &tune_attr_incore_log_blocks.attr, |
416 | &tune_attr_log_flush_secs.attr, | 388 | &tune_attr_log_flush_secs.attr, |
417 | &tune_attr_quota_warn_period.attr, | 389 | &tune_attr_quota_warn_period.attr, |
@@ -435,11 +407,6 @@ static struct attribute_group lockstruct_group = { | |||
435 | .attrs = lockstruct_attrs, | 407 | .attrs = lockstruct_attrs, |
436 | }; | 408 | }; |
437 | 409 | ||
438 | static struct attribute_group counters_group = { | ||
439 | .name = "counters", | ||
440 | .attrs = counters_attrs, | ||
441 | }; | ||
442 | |||
443 | static struct attribute_group args_group = { | 410 | static struct attribute_group args_group = { |
444 | .name = "args", | 411 | .name = "args", |
445 | .attrs = args_attrs, | 412 | .attrs = args_attrs, |
@@ -464,13 +431,9 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) | |||
464 | if (error) | 431 | if (error) |
465 | goto fail_reg; | 432 | goto fail_reg; |
466 | 433 | ||
467 | error = sysfs_create_group(&sdp->sd_kobj, &counters_group); | ||
468 | if (error) | ||
469 | goto fail_lockstruct; | ||
470 | |||
471 | error = sysfs_create_group(&sdp->sd_kobj, &args_group); | 434 | error = sysfs_create_group(&sdp->sd_kobj, &args_group); |
472 | if (error) | 435 | if (error) |
473 | goto fail_counters; | 436 | goto fail_lockstruct; |
474 | 437 | ||
475 | error = sysfs_create_group(&sdp->sd_kobj, &tune_group); | 438 | error = sysfs_create_group(&sdp->sd_kobj, &tune_group); |
476 | if (error) | 439 | if (error) |
@@ -481,8 +444,6 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) | |||
481 | 444 | ||
482 | fail_args: | 445 | fail_args: |
483 | sysfs_remove_group(&sdp->sd_kobj, &args_group); | 446 | sysfs_remove_group(&sdp->sd_kobj, &args_group); |
484 | fail_counters: | ||
485 | sysfs_remove_group(&sdp->sd_kobj, &counters_group); | ||
486 | fail_lockstruct: | 447 | fail_lockstruct: |
487 | sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); | 448 | sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); |
488 | fail_reg: | 449 | fail_reg: |
@@ -496,7 +457,6 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp) | |||
496 | { | 457 | { |
497 | sysfs_remove_group(&sdp->sd_kobj, &tune_group); | 458 | sysfs_remove_group(&sdp->sd_kobj, &tune_group); |
498 | sysfs_remove_group(&sdp->sd_kobj, &args_group); | 459 | sysfs_remove_group(&sdp->sd_kobj, &args_group); |
499 | sysfs_remove_group(&sdp->sd_kobj, &counters_group); | ||
500 | sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); | 460 | sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); |
501 | kobject_put(&sdp->sd_kobj); | 461 | kobject_put(&sdp->sd_kobj); |
502 | } | 462 | } |