aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:09 -0500
commitc0a5b560938a0f2fd2fbf66ddc446c7c2b41383a (patch)
treef69922a1bffc5d841d5323ba018a61d2e548db6c
parentff0b67ef5b1687692bc1fd3ce4bc3d1ff83587c7 (diff)
list_lru: organize all list_lrus to list
To make list_lru memcg aware, we need all list_lrus to be kept on a list protected by a mutex, so that we could sleep while walking over the list. Therefore after this change list_lru_destroy may sleep. Fortunately, there is only one user that calls it from an atomic context - it's put_super - and we can easily fix it by calling list_lru_destroy before put_super in destroy_locked_super - anyway we don't longer need lrus by that time. Another point that should be noted is that list_lru_destroy is allowed to be called on an uninitialized zeroed-out object, in which case it is a no-op. Before this patch this was guaranteed by kfree, but now we need an explicit check there. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Glauber Costa <glommer@gmail.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/super.c8
-rw-r--r--include/linux/list_lru.h3
-rw-r--r--mm/list_lru.c34
3 files changed, 45 insertions, 0 deletions
diff --git a/fs/super.c b/fs/super.c
index a2b735a42e74..b027849d92d2 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -282,6 +282,14 @@ void deactivate_locked_super(struct super_block *s)
282 unregister_shrinker(&s->s_shrink); 282 unregister_shrinker(&s->s_shrink);
283 fs->kill_sb(s); 283 fs->kill_sb(s);
284 284
285 /*
286 * Since list_lru_destroy() may sleep, we cannot call it from
287 * put_super(), where we hold the sb_lock. Therefore we destroy
288 * the lru lists right now.
289 */
290 list_lru_destroy(&s->s_dentry_lru);
291 list_lru_destroy(&s->s_inode_lru);
292
285 put_filesystem(fs); 293 put_filesystem(fs);
286 put_super(s); 294 put_super(s);
287 } else { 295 } else {
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 53c1d6b78270..ee9486ac0621 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -31,6 +31,9 @@ struct list_lru_node {
31 31
32struct list_lru { 32struct list_lru {
33 struct list_lru_node *node; 33 struct list_lru_node *node;
34#ifdef CONFIG_MEMCG_KMEM
35 struct list_head list;
36#endif
34}; 37};
35 38
36void list_lru_destroy(struct list_lru *lru); 39void list_lru_destroy(struct list_lru *lru);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 07e198c77888..a9021cb3ccde 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -9,6 +9,34 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/list_lru.h> 10#include <linux/list_lru.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/mutex.h>
13
14#ifdef CONFIG_MEMCG_KMEM
15static LIST_HEAD(list_lrus);
16static DEFINE_MUTEX(list_lrus_mutex);
17
18static void list_lru_register(struct list_lru *lru)
19{
20 mutex_lock(&list_lrus_mutex);
21 list_add(&lru->list, &list_lrus);
22 mutex_unlock(&list_lrus_mutex);
23}
24
25static void list_lru_unregister(struct list_lru *lru)
26{
27 mutex_lock(&list_lrus_mutex);
28 list_del(&lru->list);
29 mutex_unlock(&list_lrus_mutex);
30}
31#else
32static void list_lru_register(struct list_lru *lru)
33{
34}
35
36static void list_lru_unregister(struct list_lru *lru)
37{
38}
39#endif /* CONFIG_MEMCG_KMEM */
12 40
13bool list_lru_add(struct list_lru *lru, struct list_head *item) 41bool list_lru_add(struct list_lru *lru, struct list_head *item)
14{ 42{
@@ -137,12 +165,18 @@ int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
137 INIT_LIST_HEAD(&lru->node[i].list); 165 INIT_LIST_HEAD(&lru->node[i].list);
138 lru->node[i].nr_items = 0; 166 lru->node[i].nr_items = 0;
139 } 167 }
168 list_lru_register(lru);
140 return 0; 169 return 0;
141} 170}
142EXPORT_SYMBOL_GPL(list_lru_init_key); 171EXPORT_SYMBOL_GPL(list_lru_init_key);
143 172
144void list_lru_destroy(struct list_lru *lru) 173void list_lru_destroy(struct list_lru *lru)
145{ 174{
175 /* Already destroyed or not yet initialized? */
176 if (!lru->node)
177 return;
178 list_lru_unregister(lru);
146 kfree(lru->node); 179 kfree(lru->node);
180 lru->node = NULL;
147} 181}
148EXPORT_SYMBOL_GPL(list_lru_destroy); 182EXPORT_SYMBOL_GPL(list_lru_destroy);