diff options
author | Dave Chinner <dchinner@redhat.com> | 2011-07-08 00:14:42 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2011-07-20 20:47:10 -0400 |
commit | b0d40c92adafde7c2d81203ce7c1c69275f41140 (patch) | |
tree | f75a19dcd1a37aff23dc43323b58f014b1297c6b /fs/super.c | |
parent | 12ad3ab66103e6582ca69c0c9de18b13487eaaef (diff) |
superblock: introduce per-sb cache shrinker infrastructure
With context based shrinkers, we can implement a per-superblock
shrinker that shrinks the caches attached to the superblock. We
currently have global shrinkers for the inode and dentry caches that
split up into per-superblock operations via a coarse proportioning
method that does not batch very well. The global shrinkers also
have a dependency - dentries pin inodes - so we have to be very
careful about how we register the global shrinkers so that the
implicit call order is always correct.
With a per-sb shrinker callout, we can encode this dependency
directly into the per-sb shrinker, hence avoiding the need for
strictly ordering shrinker registrations. We also have no need for
any proportioning code for the shrinker subsystem already provides
this functionality across all shrinkers. Allowing the shrinker to
operate on a single superblock at a time means that we do less
superblock list traversals and locking and reclaim should batch more
effectively. This should result in less CPU overhead for reclaim and
potentially faster reclaim of items from each filesystem.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/super.c')
-rw-r--r-- | fs/super.c | 51 |
1 files changed, 50 insertions, 1 deletions
diff --git a/fs/super.c b/fs/super.c index e63c754447ce..37a75410079e 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -38,6 +38,48 @@ | |||
38 | LIST_HEAD(super_blocks); | 38 | LIST_HEAD(super_blocks); |
39 | DEFINE_SPINLOCK(sb_lock); | 39 | DEFINE_SPINLOCK(sb_lock); |
40 | 40 | ||
41 | /* | ||
42 | * One thing we have to be careful of with a per-sb shrinker is that we don't | ||
43 | * drop the last active reference to the superblock from within the shrinker. | ||
44 | * If that happens we could trigger unregistering the shrinker from within the | ||
45 | * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we | ||
46 | * take a passive reference to the superblock to avoid this from occurring. | ||
47 | */ | ||
48 | static int prune_super(struct shrinker *shrink, struct shrink_control *sc) | ||
49 | { | ||
50 | struct super_block *sb; | ||
51 | int count; | ||
52 | |||
53 | sb = container_of(shrink, struct super_block, s_shrink); | ||
54 | |||
55 | /* | ||
56 | * Deadlock avoidance. We may hold various FS locks, and we don't want | ||
57 | * to recurse into the FS that called us in clear_inode() and friends.. | ||
58 | */ | ||
59 | if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) | ||
60 | return -1; | ||
61 | |||
62 | if (!grab_super_passive(sb)) | ||
63 | return -1; | ||
64 | |||
65 | if (sc->nr_to_scan) { | ||
66 | /* proportion the scan between the two caches */ | ||
67 | int total; | ||
68 | |||
69 | total = sb->s_nr_dentry_unused + sb->s_nr_inodes_unused + 1; | ||
70 | count = (sc->nr_to_scan * sb->s_nr_dentry_unused) / total; | ||
71 | |||
72 | /* prune dcache first as icache is pinned by it */ | ||
73 | prune_dcache_sb(sb, count); | ||
74 | prune_icache_sb(sb, sc->nr_to_scan - count); | ||
75 | } | ||
76 | |||
77 | count = ((sb->s_nr_dentry_unused + sb->s_nr_inodes_unused) / 100) | ||
78 | * sysctl_vfs_cache_pressure; | ||
79 | drop_super(sb); | ||
80 | return count; | ||
81 | } | ||
82 | |||
41 | /** | 83 | /** |
42 | * alloc_super - create new superblock | 84 | * alloc_super - create new superblock |
43 | * @type: filesystem type superblock should belong to | 85 | * @type: filesystem type superblock should belong to |
@@ -116,6 +158,9 @@ static struct super_block *alloc_super(struct file_system_type *type) | |||
116 | s->s_op = &default_op; | 158 | s->s_op = &default_op; |
117 | s->s_time_gran = 1000000000; | 159 | s->s_time_gran = 1000000000; |
118 | s->cleancache_poolid = -1; | 160 | s->cleancache_poolid = -1; |
161 | |||
162 | s->s_shrink.seeks = DEFAULT_SEEKS; | ||
163 | s->s_shrink.shrink = prune_super; | ||
119 | } | 164 | } |
120 | out: | 165 | out: |
121 | return s; | 166 | return s; |
@@ -183,6 +228,10 @@ void deactivate_locked_super(struct super_block *s) | |||
183 | if (atomic_dec_and_test(&s->s_active)) { | 228 | if (atomic_dec_and_test(&s->s_active)) { |
184 | cleancache_flush_fs(s); | 229 | cleancache_flush_fs(s); |
185 | fs->kill_sb(s); | 230 | fs->kill_sb(s); |
231 | |||
232 | /* caches are now gone, we can safely kill the shrinker now */ | ||
233 | unregister_shrinker(&s->s_shrink); | ||
234 | |||
186 | /* | 235 | /* |
187 | * We need to call rcu_barrier so all the delayed rcu free | 236 | * We need to call rcu_barrier so all the delayed rcu free |
188 | * inodes are flushed before we release the fs module. | 237 | * inodes are flushed before we release the fs module. |
@@ -311,7 +360,6 @@ void generic_shutdown_super(struct super_block *sb) | |||
311 | { | 360 | { |
312 | const struct super_operations *sop = sb->s_op; | 361 | const struct super_operations *sop = sb->s_op; |
313 | 362 | ||
314 | |||
315 | if (sb->s_root) { | 363 | if (sb->s_root) { |
316 | shrink_dcache_for_umount(sb); | 364 | shrink_dcache_for_umount(sb); |
317 | sync_filesystem(sb); | 365 | sync_filesystem(sb); |
@@ -399,6 +447,7 @@ retry: | |||
399 | list_add(&s->s_instances, &type->fs_supers); | 447 | list_add(&s->s_instances, &type->fs_supers); |
400 | spin_unlock(&sb_lock); | 448 | spin_unlock(&sb_lock); |
401 | get_filesystem(type); | 449 | get_filesystem(type); |
450 | register_shrinker(&s->s_shrink); | ||
402 | return s; | 451 | return s; |
403 | } | 452 | } |
404 | 453 | ||