aboutsummaryrefslogtreecommitdiffstats
path: root/fs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/super.c')
-rw-r--r--fs/super.c59
1 files changed, 32 insertions, 27 deletions
diff --git a/fs/super.c b/fs/super.c
index eae088f6aaae..1facd2c282e5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -36,8 +36,8 @@
36#include "internal.h" 36#include "internal.h"
37 37
38 38
39LIST_HEAD(super_blocks); 39static LIST_HEAD(super_blocks);
40DEFINE_SPINLOCK(sb_lock); 40static DEFINE_SPINLOCK(sb_lock);
41 41
42static char *sb_writers_name[SB_FREEZE_LEVELS] = { 42static char *sb_writers_name[SB_FREEZE_LEVELS] = {
43 "sb_writers", 43 "sb_writers",
@@ -75,10 +75,10 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
75 return SHRINK_STOP; 75 return SHRINK_STOP;
76 76
77 if (sb->s_op->nr_cached_objects) 77 if (sb->s_op->nr_cached_objects)
78 fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid); 78 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
79 79
80 inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid); 80 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
81 dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid); 81 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
82 total_objects = dentries + inodes + fs_objects + 1; 82 total_objects = dentries + inodes + fs_objects + 1;
83 if (!total_objects) 83 if (!total_objects)
84 total_objects = 1; 84 total_objects = 1;
@@ -86,19 +86,23 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
86 /* proportion the scan between the caches */ 86 /* proportion the scan between the caches */
87 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); 87 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
88 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); 88 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
89 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
89 90
90 /* 91 /*
91 * prune the dcache first as the icache is pinned by it, then 92 * prune the dcache first as the icache is pinned by it, then
92 * prune the icache, followed by the filesystem specific caches 93 * prune the icache, followed by the filesystem specific caches
94 *
95 * Ensure that we always scan at least one object - memcg kmem
96 * accounting uses this to fully empty the caches.
93 */ 97 */
94 freed = prune_dcache_sb(sb, dentries, sc->nid); 98 sc->nr_to_scan = dentries + 1;
95 freed += prune_icache_sb(sb, inodes, sc->nid); 99 freed = prune_dcache_sb(sb, sc);
100 sc->nr_to_scan = inodes + 1;
101 freed += prune_icache_sb(sb, sc);
96 102
97 if (fs_objects) { 103 if (fs_objects) {
98 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, 104 sc->nr_to_scan = fs_objects + 1;
99 total_objects); 105 freed += sb->s_op->free_cached_objects(sb, sc);
100 freed += sb->s_op->free_cached_objects(sb, fs_objects,
101 sc->nid);
102 } 106 }
103 107
104 drop_super(sb); 108 drop_super(sb);
@@ -118,17 +122,14 @@ static unsigned long super_cache_count(struct shrinker *shrink,
118 * scalability bottleneck. The counts could get updated 122 * scalability bottleneck. The counts could get updated
119 * between super_cache_count and super_cache_scan anyway. 123 * between super_cache_count and super_cache_scan anyway.
120 * Call to super_cache_count with shrinker_rwsem held 124 * Call to super_cache_count with shrinker_rwsem held
121 * ensures the safety of call to list_lru_count_node() and 125 * ensures the safety of call to list_lru_shrink_count() and
122 * s_op->nr_cached_objects(). 126 * s_op->nr_cached_objects().
123 */ 127 */
124 if (sb->s_op && sb->s_op->nr_cached_objects) 128 if (sb->s_op && sb->s_op->nr_cached_objects)
125 total_objects = sb->s_op->nr_cached_objects(sb, 129 total_objects = sb->s_op->nr_cached_objects(sb, sc);
126 sc->nid);
127 130
128 total_objects += list_lru_count_node(&sb->s_dentry_lru, 131 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
129 sc->nid); 132 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
130 total_objects += list_lru_count_node(&sb->s_inode_lru,
131 sc->nid);
132 133
133 total_objects = vfs_pressure_ratio(total_objects); 134 total_objects = vfs_pressure_ratio(total_objects);
134 return total_objects; 135 return total_objects;
@@ -185,15 +186,15 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
185 } 186 }
186 init_waitqueue_head(&s->s_writers.wait); 187 init_waitqueue_head(&s->s_writers.wait);
187 init_waitqueue_head(&s->s_writers.wait_unfrozen); 188 init_waitqueue_head(&s->s_writers.wait_unfrozen);
189 s->s_bdi = &noop_backing_dev_info;
188 s->s_flags = flags; 190 s->s_flags = flags;
189 s->s_bdi = &default_backing_dev_info;
190 INIT_HLIST_NODE(&s->s_instances); 191 INIT_HLIST_NODE(&s->s_instances);
191 INIT_HLIST_BL_HEAD(&s->s_anon); 192 INIT_HLIST_BL_HEAD(&s->s_anon);
192 INIT_LIST_HEAD(&s->s_inodes); 193 INIT_LIST_HEAD(&s->s_inodes);
193 194
194 if (list_lru_init(&s->s_dentry_lru)) 195 if (list_lru_init_memcg(&s->s_dentry_lru))
195 goto fail; 196 goto fail;
196 if (list_lru_init(&s->s_inode_lru)) 197 if (list_lru_init_memcg(&s->s_inode_lru))
197 goto fail; 198 goto fail;
198 199
199 init_rwsem(&s->s_umount); 200 init_rwsem(&s->s_umount);
@@ -229,7 +230,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
229 s->s_shrink.scan_objects = super_cache_scan; 230 s->s_shrink.scan_objects = super_cache_scan;
230 s->s_shrink.count_objects = super_cache_count; 231 s->s_shrink.count_objects = super_cache_count;
231 s->s_shrink.batch = 1024; 232 s->s_shrink.batch = 1024;
232 s->s_shrink.flags = SHRINKER_NUMA_AWARE; 233 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
233 return s; 234 return s;
234 235
235fail: 236fail:
@@ -284,6 +285,14 @@ void deactivate_locked_super(struct super_block *s)
284 unregister_shrinker(&s->s_shrink); 285 unregister_shrinker(&s->s_shrink);
285 fs->kill_sb(s); 286 fs->kill_sb(s);
286 287
288 /*
289 * Since list_lru_destroy() may sleep, we cannot call it from
290 * put_super(), where we hold the sb_lock. Therefore we destroy
291 * the lru lists right now.
292 */
293 list_lru_destroy(&s->s_dentry_lru);
294 list_lru_destroy(&s->s_inode_lru);
295
287 put_filesystem(fs); 296 put_filesystem(fs);
288 put_super(s); 297 put_super(s);
289 } else { 298 } else {
@@ -863,10 +872,7 @@ EXPORT_SYMBOL(free_anon_bdev);
863 872
864int set_anon_super(struct super_block *s, void *data) 873int set_anon_super(struct super_block *s, void *data)
865{ 874{
866 int error = get_anon_bdev(&s->s_dev); 875 return get_anon_bdev(&s->s_dev);
867 if (!error)
868 s->s_bdi = &noop_backing_dev_info;
869 return error;
870} 876}
871 877
872EXPORT_SYMBOL(set_anon_super); 878EXPORT_SYMBOL(set_anon_super);
@@ -1111,7 +1117,6 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1111 sb = root->d_sb; 1117 sb = root->d_sb;
1112 BUG_ON(!sb); 1118 BUG_ON(!sb);
1113 WARN_ON(!sb->s_bdi); 1119 WARN_ON(!sb->s_bdi);
1114 WARN_ON(sb->s_bdi == &default_backing_dev_info);
1115 sb->s_flags |= MS_BORN; 1120 sb->s_flags |= MS_BORN;
1116 1121
1117 error = security_sb_kern_mount(sb, flags, secdata); 1122 error = security_sb_kern_mount(sb, flags, secdata);