diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 21:54:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 21:54:28 -0500 |
commit | 818099574b04c5301eacbbcd441022b353a65466 (patch) | |
tree | 77b3645b375105cb0389df2b4ea5ffa90329f7f8 /fs/super.c | |
parent | 802ea9d8645d33d24b7b4cd4537c14f3e698bde0 (diff) | |
parent | 6016daed58ee482a2f7684e93342e89139cf4419 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge third set of updates from Andrew Morton:
- the rest of MM
[ This includes getting rid of the numa hinting bits, in favor of
just generic protnone logic. Yay. - Linus ]
- core kernel
- procfs
- some of lib/ (lots of lib/ material this time)
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (104 commits)
lib/lcm.c: replace include
lib/percpu_ida.c: remove redundant includes
lib/strncpy_from_user.c: replace module.h include
lib/stmp_device.c: replace module.h include
lib/sort.c: move include inside #if 0
lib/show_mem.c: remove redundant include
lib/radix-tree.c: change to simpler include
lib/plist.c: remove redundant include
lib/nlattr.c: remove redundant include
lib/kobject_uevent.c: remove redundant include
lib/llist.c: remove redundant include
lib/md5.c: simplify include
lib/list_sort.c: rearrange includes
lib/genalloc.c: remove redundant include
lib/idr.c: remove redundant include
lib/halfmd4.c: simplify includes
lib/dynamic_queue_limits.c: simplify includes
lib/sort.c: use simpler includes
lib/interval_tree.c: simplify includes
hexdump: make it return number of bytes placed in buffer
...
Diffstat (limited to 'fs/super.c')
-rw-r--r-- | fs/super.c | 47 |
1 files changed, 28 insertions, 19 deletions
diff --git a/fs/super.c b/fs/super.c index 05a021638b11..1facd2c282e5 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -75,10 +75,10 @@ static unsigned long super_cache_scan(struct shrinker *shrink, | |||
75 | return SHRINK_STOP; | 75 | return SHRINK_STOP; |
76 | 76 | ||
77 | if (sb->s_op->nr_cached_objects) | 77 | if (sb->s_op->nr_cached_objects) |
78 | fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid); | 78 | fs_objects = sb->s_op->nr_cached_objects(sb, sc); |
79 | 79 | ||
80 | inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid); | 80 | inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); |
81 | dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid); | 81 | dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); |
82 | total_objects = dentries + inodes + fs_objects + 1; | 82 | total_objects = dentries + inodes + fs_objects + 1; |
83 | if (!total_objects) | 83 | if (!total_objects) |
84 | total_objects = 1; | 84 | total_objects = 1; |
@@ -86,19 +86,23 @@ static unsigned long super_cache_scan(struct shrinker *shrink, | |||
86 | /* proportion the scan between the caches */ | 86 | /* proportion the scan between the caches */ |
87 | dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); | 87 | dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); |
88 | inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); | 88 | inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); |
89 | fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); | ||
89 | 90 | ||
90 | /* | 91 | /* |
91 | * prune the dcache first as the icache is pinned by it, then | 92 | * prune the dcache first as the icache is pinned by it, then |
92 | * prune the icache, followed by the filesystem specific caches | 93 | * prune the icache, followed by the filesystem specific caches |
94 | * | ||
95 | * Ensure that we always scan at least one object - memcg kmem | ||
96 | * accounting uses this to fully empty the caches. | ||
93 | */ | 97 | */ |
94 | freed = prune_dcache_sb(sb, dentries, sc->nid); | 98 | sc->nr_to_scan = dentries + 1; |
95 | freed += prune_icache_sb(sb, inodes, sc->nid); | 99 | freed = prune_dcache_sb(sb, sc); |
100 | sc->nr_to_scan = inodes + 1; | ||
101 | freed += prune_icache_sb(sb, sc); | ||
96 | 102 | ||
97 | if (fs_objects) { | 103 | if (fs_objects) { |
98 | fs_objects = mult_frac(sc->nr_to_scan, fs_objects, | 104 | sc->nr_to_scan = fs_objects + 1; |
99 | total_objects); | 105 | freed += sb->s_op->free_cached_objects(sb, sc); |
100 | freed += sb->s_op->free_cached_objects(sb, fs_objects, | ||
101 | sc->nid); | ||
102 | } | 106 | } |
103 | 107 | ||
104 | drop_super(sb); | 108 | drop_super(sb); |
@@ -118,17 +122,14 @@ static unsigned long super_cache_count(struct shrinker *shrink, | |||
118 | * scalability bottleneck. The counts could get updated | 122 | * scalability bottleneck. The counts could get updated |
119 | * between super_cache_count and super_cache_scan anyway. | 123 | * between super_cache_count and super_cache_scan anyway. |
120 | * Call to super_cache_count with shrinker_rwsem held | 124 | * Call to super_cache_count with shrinker_rwsem held |
121 | * ensures the safety of call to list_lru_count_node() and | 125 | * ensures the safety of call to list_lru_shrink_count() and |
122 | * s_op->nr_cached_objects(). | 126 | * s_op->nr_cached_objects(). |
123 | */ | 127 | */ |
124 | if (sb->s_op && sb->s_op->nr_cached_objects) | 128 | if (sb->s_op && sb->s_op->nr_cached_objects) |
125 | total_objects = sb->s_op->nr_cached_objects(sb, | 129 | total_objects = sb->s_op->nr_cached_objects(sb, sc); |
126 | sc->nid); | ||
127 | 130 | ||
128 | total_objects += list_lru_count_node(&sb->s_dentry_lru, | 131 | total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); |
129 | sc->nid); | 132 | total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); |
130 | total_objects += list_lru_count_node(&sb->s_inode_lru, | ||
131 | sc->nid); | ||
132 | 133 | ||
133 | total_objects = vfs_pressure_ratio(total_objects); | 134 | total_objects = vfs_pressure_ratio(total_objects); |
134 | return total_objects; | 135 | return total_objects; |
@@ -191,9 +192,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) | |||
191 | INIT_HLIST_BL_HEAD(&s->s_anon); | 192 | INIT_HLIST_BL_HEAD(&s->s_anon); |
192 | INIT_LIST_HEAD(&s->s_inodes); | 193 | INIT_LIST_HEAD(&s->s_inodes); |
193 | 194 | ||
194 | if (list_lru_init(&s->s_dentry_lru)) | 195 | if (list_lru_init_memcg(&s->s_dentry_lru)) |
195 | goto fail; | 196 | goto fail; |
196 | if (list_lru_init(&s->s_inode_lru)) | 197 | if (list_lru_init_memcg(&s->s_inode_lru)) |
197 | goto fail; | 198 | goto fail; |
198 | 199 | ||
199 | init_rwsem(&s->s_umount); | 200 | init_rwsem(&s->s_umount); |
@@ -229,7 +230,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) | |||
229 | s->s_shrink.scan_objects = super_cache_scan; | 230 | s->s_shrink.scan_objects = super_cache_scan; |
230 | s->s_shrink.count_objects = super_cache_count; | 231 | s->s_shrink.count_objects = super_cache_count; |
231 | s->s_shrink.batch = 1024; | 232 | s->s_shrink.batch = 1024; |
232 | s->s_shrink.flags = SHRINKER_NUMA_AWARE; | 233 | s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; |
233 | return s; | 234 | return s; |
234 | 235 | ||
235 | fail: | 236 | fail: |
@@ -284,6 +285,14 @@ void deactivate_locked_super(struct super_block *s) | |||
284 | unregister_shrinker(&s->s_shrink); | 285 | unregister_shrinker(&s->s_shrink); |
285 | fs->kill_sb(s); | 286 | fs->kill_sb(s); |
286 | 287 | ||
288 | /* | ||
289 | * Since list_lru_destroy() may sleep, we cannot call it from | ||
290 | * put_super(), where we hold the sb_lock. Therefore we destroy | ||
291 | * the lru lists right now. | ||
292 | */ | ||
293 | list_lru_destroy(&s->s_dentry_lru); | ||
294 | list_lru_destroy(&s->s_inode_lru); | ||
295 | |||
287 | put_filesystem(fs); | 296 | put_filesystem(fs); |
288 | put_super(s); | 297 | put_super(s); |
289 | } else { | 298 | } else { |