diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-01-20 18:02:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-20 20:09:18 -0500 |
commit | 127424c86bb6cb87f0b563d9fdcfbbaf3c86ecec (patch) | |
tree | 7748cabc717161032e479be17e20f5046c9a8335 /mm | |
parent | 3893e302f6a377c4ef0f077f190bf760bf84e0be (diff) |
mm: memcontrol: move kmem accounting code to CONFIG_MEMCG
The cgroup2 memory controller will account important in-kernel memory
consumers per default. Move all necessary components to CONFIG_MEMCG.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/list_lru.c | 12 | ||||
-rw-r--r-- | mm/memcontrol.c | 69 | ||||
-rw-r--r-- | mm/slab.h | 6 | ||||
-rw-r--r-- | mm/slab_common.c | 10 | ||||
-rw-r--r-- | mm/slub.c | 10 |
5 files changed, 60 insertions, 47 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c index afc71ea9a381..1d05cb9d363d 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/memcontrol.h> | 13 | #include <linux/memcontrol.h> |
14 | 14 | ||
15 | #ifdef CONFIG_MEMCG_KMEM | 15 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
16 | static LIST_HEAD(list_lrus); | 16 | static LIST_HEAD(list_lrus); |
17 | static DEFINE_MUTEX(list_lrus_mutex); | 17 | static DEFINE_MUTEX(list_lrus_mutex); |
18 | 18 | ||
@@ -37,9 +37,9 @@ static void list_lru_register(struct list_lru *lru) | |||
37 | static void list_lru_unregister(struct list_lru *lru) | 37 | static void list_lru_unregister(struct list_lru *lru) |
38 | { | 38 | { |
39 | } | 39 | } |
40 | #endif /* CONFIG_MEMCG_KMEM */ | 40 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
41 | 41 | ||
42 | #ifdef CONFIG_MEMCG_KMEM | 42 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
43 | static inline bool list_lru_memcg_aware(struct list_lru *lru) | 43 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
44 | { | 44 | { |
45 | /* | 45 | /* |
@@ -104,7 +104,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) | |||
104 | { | 104 | { |
105 | return &nlru->lru; | 105 | return &nlru->lru; |
106 | } | 106 | } |
107 | #endif /* CONFIG_MEMCG_KMEM */ | 107 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
108 | 108 | ||
109 | bool list_lru_add(struct list_lru *lru, struct list_head *item) | 109 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
110 | { | 110 | { |
@@ -292,7 +292,7 @@ static void init_one_lru(struct list_lru_one *l) | |||
292 | l->nr_items = 0; | 292 | l->nr_items = 0; |
293 | } | 293 | } |
294 | 294 | ||
295 | #ifdef CONFIG_MEMCG_KMEM | 295 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
296 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, | 296 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
297 | int begin, int end) | 297 | int begin, int end) |
298 | { | 298 | { |
@@ -529,7 +529,7 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) | |||
529 | static void memcg_destroy_list_lru(struct list_lru *lru) | 529 | static void memcg_destroy_list_lru(struct list_lru *lru) |
530 | { | 530 | { |
531 | } | 531 | } |
532 | #endif /* CONFIG_MEMCG_KMEM */ | 532 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
533 | 533 | ||
534 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, | 534 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
535 | struct lock_class_key *key) | 535 | struct lock_class_key *key) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7f8219b58e0c..fe51d5e61389 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -297,7 +297,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |||
297 | return mem_cgroup_from_css(css); | 297 | return mem_cgroup_from_css(css); |
298 | } | 298 | } |
299 | 299 | ||
300 | #ifdef CONFIG_MEMCG_KMEM | 300 | #ifndef CONFIG_SLOB |
301 | /* | 301 | /* |
302 | * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. | 302 | * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. |
303 | * The main reason for not using cgroup id for this: | 303 | * The main reason for not using cgroup id for this: |
@@ -349,7 +349,7 @@ void memcg_put_cache_ids(void) | |||
349 | DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); | 349 | DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); |
350 | EXPORT_SYMBOL(memcg_kmem_enabled_key); | 350 | EXPORT_SYMBOL(memcg_kmem_enabled_key); |
351 | 351 | ||
352 | #endif /* CONFIG_MEMCG_KMEM */ | 352 | #endif /* !CONFIG_SLOB */ |
353 | 353 | ||
354 | static struct mem_cgroup_per_zone * | 354 | static struct mem_cgroup_per_zone * |
355 | mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) | 355 | mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) |
@@ -2203,7 +2203,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, | |||
2203 | unlock_page_lru(page, isolated); | 2203 | unlock_page_lru(page, isolated); |
2204 | } | 2204 | } |
2205 | 2205 | ||
2206 | #ifdef CONFIG_MEMCG_KMEM | 2206 | #ifndef CONFIG_SLOB |
2207 | static int memcg_alloc_cache_id(void) | 2207 | static int memcg_alloc_cache_id(void) |
2208 | { | 2208 | { |
2209 | int id, size; | 2209 | int id, size; |
@@ -2424,7 +2424,7 @@ void __memcg_kmem_uncharge(struct page *page, int order) | |||
2424 | page->mem_cgroup = NULL; | 2424 | page->mem_cgroup = NULL; |
2425 | css_put_many(&memcg->css, nr_pages); | 2425 | css_put_many(&memcg->css, nr_pages); |
2426 | } | 2426 | } |
2427 | #endif /* CONFIG_MEMCG_KMEM */ | 2427 | #endif /* !CONFIG_SLOB */ |
2428 | 2428 | ||
2429 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 2429 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
2430 | 2430 | ||
@@ -2860,7 +2860,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
2860 | } | 2860 | } |
2861 | } | 2861 | } |
2862 | 2862 | ||
2863 | #ifdef CONFIG_MEMCG_KMEM | 2863 | #ifndef CONFIG_SLOB |
2864 | static int memcg_online_kmem(struct mem_cgroup *memcg) | 2864 | static int memcg_online_kmem(struct mem_cgroup *memcg) |
2865 | { | 2865 | { |
2866 | int err = 0; | 2866 | int err = 0; |
@@ -2908,24 +2908,6 @@ out: | |||
2908 | return err; | 2908 | return err; |
2909 | } | 2909 | } |
2910 | 2910 | ||
2911 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | ||
2912 | unsigned long limit) | ||
2913 | { | ||
2914 | int ret; | ||
2915 | |||
2916 | mutex_lock(&memcg_limit_mutex); | ||
2917 | /* Top-level cgroup doesn't propagate from root */ | ||
2918 | if (!memcg_kmem_online(memcg)) { | ||
2919 | ret = memcg_online_kmem(memcg); | ||
2920 | if (ret) | ||
2921 | goto out; | ||
2922 | } | ||
2923 | ret = page_counter_limit(&memcg->kmem, limit); | ||
2924 | out: | ||
2925 | mutex_unlock(&memcg_limit_mutex); | ||
2926 | return ret; | ||
2927 | } | ||
2928 | |||
2929 | static int memcg_propagate_kmem(struct mem_cgroup *memcg) | 2911 | static int memcg_propagate_kmem(struct mem_cgroup *memcg) |
2930 | { | 2912 | { |
2931 | int ret = 0; | 2913 | int ret = 0; |
@@ -3000,16 +2982,45 @@ static void memcg_free_kmem(struct mem_cgroup *memcg) | |||
3000 | } | 2982 | } |
3001 | } | 2983 | } |
3002 | #else | 2984 | #else |
2985 | static int memcg_propagate_kmem(struct mem_cgroup *memcg) | ||
2986 | { | ||
2987 | return 0; | ||
2988 | } | ||
2989 | static void memcg_offline_kmem(struct mem_cgroup *memcg) | ||
2990 | { | ||
2991 | } | ||
2992 | static void memcg_free_kmem(struct mem_cgroup *memcg) | ||
2993 | { | ||
2994 | } | ||
2995 | #endif /* !CONFIG_SLOB */ | ||
2996 | |||
2997 | #ifdef CONFIG_MEMCG_KMEM | ||
3003 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | 2998 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, |
3004 | unsigned long limit) | 2999 | unsigned long limit) |
3005 | { | 3000 | { |
3006 | return -EINVAL; | 3001 | int ret; |
3002 | |||
3003 | mutex_lock(&memcg_limit_mutex); | ||
3004 | /* Top-level cgroup doesn't propagate from root */ | ||
3005 | if (!memcg_kmem_online(memcg)) { | ||
3006 | ret = memcg_online_kmem(memcg); | ||
3007 | if (ret) | ||
3008 | goto out; | ||
3009 | } | ||
3010 | ret = page_counter_limit(&memcg->kmem, limit); | ||
3011 | out: | ||
3012 | mutex_unlock(&memcg_limit_mutex); | ||
3013 | return ret; | ||
3007 | } | 3014 | } |
3008 | static void memcg_offline_kmem(struct mem_cgroup *memcg) | 3015 | #else |
3016 | static int memcg_update_kmem_limit(struct mem_cgroup *memcg, | ||
3017 | unsigned long limit) | ||
3009 | { | 3018 | { |
3019 | return -EINVAL; | ||
3010 | } | 3020 | } |
3011 | #endif /* CONFIG_MEMCG_KMEM */ | 3021 | #endif /* CONFIG_MEMCG_KMEM */ |
3012 | 3022 | ||
3023 | |||
3013 | /* | 3024 | /* |
3014 | * The user of this function is... | 3025 | * The user of this function is... |
3015 | * RES_LIMIT. | 3026 | * RES_LIMIT. |
@@ -4182,7 +4193,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |||
4182 | vmpressure_init(&memcg->vmpressure); | 4193 | vmpressure_init(&memcg->vmpressure); |
4183 | INIT_LIST_HEAD(&memcg->event_list); | 4194 | INIT_LIST_HEAD(&memcg->event_list); |
4184 | spin_lock_init(&memcg->event_list_lock); | 4195 | spin_lock_init(&memcg->event_list_lock); |
4185 | #ifdef CONFIG_MEMCG_KMEM | 4196 | #ifndef CONFIG_SLOB |
4186 | memcg->kmemcg_id = -1; | 4197 | memcg->kmemcg_id = -1; |
4187 | #endif | 4198 | #endif |
4188 | #ifdef CONFIG_CGROUP_WRITEBACK | 4199 | #ifdef CONFIG_CGROUP_WRITEBACK |
@@ -4244,10 +4255,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
4244 | } | 4255 | } |
4245 | mutex_unlock(&memcg_create_mutex); | 4256 | mutex_unlock(&memcg_create_mutex); |
4246 | 4257 | ||
4247 | #ifdef CONFIG_MEMCG_KMEM | ||
4248 | ret = memcg_propagate_kmem(memcg); | 4258 | ret = memcg_propagate_kmem(memcg); |
4249 | if (ret) | 4259 | if (ret) |
4250 | return ret; | 4260 | return ret; |
4261 | |||
4262 | #ifdef CONFIG_MEMCG_KMEM | ||
4251 | ret = tcp_init_cgroup(memcg); | 4263 | ret = tcp_init_cgroup(memcg); |
4252 | if (ret) | 4264 | if (ret) |
4253 | return ret; | 4265 | return ret; |
@@ -4308,8 +4320,9 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) | |||
4308 | static_branch_dec(&memcg_sockets_enabled_key); | 4320 | static_branch_dec(&memcg_sockets_enabled_key); |
4309 | #endif | 4321 | #endif |
4310 | 4322 | ||
4311 | #ifdef CONFIG_MEMCG_KMEM | ||
4312 | memcg_free_kmem(memcg); | 4323 | memcg_free_kmem(memcg); |
4324 | |||
4325 | #ifdef CONFIG_MEMCG_KMEM | ||
4313 | tcp_destroy_cgroup(memcg); | 4326 | tcp_destroy_cgroup(memcg); |
4314 | #endif | 4327 | #endif |
4315 | 4328 | ||
@@ -173,7 +173,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
173 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 173 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
174 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 174 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
175 | 175 | ||
176 | #ifdef CONFIG_MEMCG_KMEM | 176 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
177 | /* | 177 | /* |
178 | * Iterate over all memcg caches of the given root cache. The caller must hold | 178 | * Iterate over all memcg caches of the given root cache. The caller must hold |
179 | * slab_mutex. | 179 | * slab_mutex. |
@@ -251,7 +251,7 @@ static __always_inline int memcg_charge_slab(struct page *page, | |||
251 | 251 | ||
252 | extern void slab_init_memcg_params(struct kmem_cache *); | 252 | extern void slab_init_memcg_params(struct kmem_cache *); |
253 | 253 | ||
254 | #else /* !CONFIG_MEMCG_KMEM */ | 254 | #else /* CONFIG_MEMCG && !CONFIG_SLOB */ |
255 | 255 | ||
256 | #define for_each_memcg_cache(iter, root) \ | 256 | #define for_each_memcg_cache(iter, root) \ |
257 | for ((void)(iter), (void)(root); 0; ) | 257 | for ((void)(iter), (void)(root); 0; ) |
@@ -292,7 +292,7 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, | |||
292 | static inline void slab_init_memcg_params(struct kmem_cache *s) | 292 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
293 | { | 293 | { |
294 | } | 294 | } |
295 | #endif /* CONFIG_MEMCG_KMEM */ | 295 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
296 | 296 | ||
297 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | 297 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
298 | { | 298 | { |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 8c262e6dc33e..b50aef01ccf7 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -128,7 +128,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | |||
128 | return i; | 128 | return i; |
129 | } | 129 | } |
130 | 130 | ||
131 | #ifdef CONFIG_MEMCG_KMEM | 131 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
132 | void slab_init_memcg_params(struct kmem_cache *s) | 132 | void slab_init_memcg_params(struct kmem_cache *s) |
133 | { | 133 | { |
134 | s->memcg_params.is_root_cache = true; | 134 | s->memcg_params.is_root_cache = true; |
@@ -221,7 +221,7 @@ static inline int init_memcg_params(struct kmem_cache *s, | |||
221 | static inline void destroy_memcg_params(struct kmem_cache *s) | 221 | static inline void destroy_memcg_params(struct kmem_cache *s) |
222 | { | 222 | { |
223 | } | 223 | } |
224 | #endif /* CONFIG_MEMCG_KMEM */ | 224 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * Find a mergeable slab cache | 227 | * Find a mergeable slab cache |
@@ -477,7 +477,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier) | |||
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | #ifdef CONFIG_MEMCG_KMEM | 480 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
481 | /* | 481 | /* |
482 | * memcg_create_kmem_cache - Create a cache for a memory cgroup. | 482 | * memcg_create_kmem_cache - Create a cache for a memory cgroup. |
483 | * @memcg: The memory cgroup the new cache is for. | 483 | * @memcg: The memory cgroup the new cache is for. |
@@ -689,7 +689,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s, | |||
689 | { | 689 | { |
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | #endif /* CONFIG_MEMCG_KMEM */ | 692 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
693 | 693 | ||
694 | void slab_kmem_cache_release(struct kmem_cache *s) | 694 | void slab_kmem_cache_release(struct kmem_cache *s) |
695 | { | 695 | { |
@@ -1123,7 +1123,7 @@ static int slab_show(struct seq_file *m, void *p) | |||
1123 | return 0; | 1123 | return 0; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | #ifdef CONFIG_MEMCG_KMEM | 1126 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
1127 | int memcg_slab_show(struct seq_file *m, void *p) | 1127 | int memcg_slab_show(struct seq_file *m, void *p) |
1128 | { | 1128 | { |
1129 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); | 1129 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); |
@@ -5207,7 +5207,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
5207 | return -EIO; | 5207 | return -EIO; |
5208 | 5208 | ||
5209 | err = attribute->store(s, buf, len); | 5209 | err = attribute->store(s, buf, len); |
5210 | #ifdef CONFIG_MEMCG_KMEM | 5210 | #ifdef CONFIG_MEMCG |
5211 | if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { | 5211 | if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { |
5212 | struct kmem_cache *c; | 5212 | struct kmem_cache *c; |
5213 | 5213 | ||
@@ -5242,7 +5242,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
5242 | 5242 | ||
5243 | static void memcg_propagate_slab_attrs(struct kmem_cache *s) | 5243 | static void memcg_propagate_slab_attrs(struct kmem_cache *s) |
5244 | { | 5244 | { |
5245 | #ifdef CONFIG_MEMCG_KMEM | 5245 | #ifdef CONFIG_MEMCG |
5246 | int i; | 5246 | int i; |
5247 | char *buffer = NULL; | 5247 | char *buffer = NULL; |
5248 | struct kmem_cache *root_cache; | 5248 | struct kmem_cache *root_cache; |
@@ -5328,7 +5328,7 @@ static struct kset *slab_kset; | |||
5328 | 5328 | ||
5329 | static inline struct kset *cache_kset(struct kmem_cache *s) | 5329 | static inline struct kset *cache_kset(struct kmem_cache *s) |
5330 | { | 5330 | { |
5331 | #ifdef CONFIG_MEMCG_KMEM | 5331 | #ifdef CONFIG_MEMCG |
5332 | if (!is_root_cache(s)) | 5332 | if (!is_root_cache(s)) |
5333 | return s->memcg_params.root_cache->memcg_kset; | 5333 | return s->memcg_params.root_cache->memcg_kset; |
5334 | #endif | 5334 | #endif |
@@ -5405,7 +5405,7 @@ static int sysfs_slab_add(struct kmem_cache *s) | |||
5405 | if (err) | 5405 | if (err) |
5406 | goto out_del_kobj; | 5406 | goto out_del_kobj; |
5407 | 5407 | ||
5408 | #ifdef CONFIG_MEMCG_KMEM | 5408 | #ifdef CONFIG_MEMCG |
5409 | if (is_root_cache(s)) { | 5409 | if (is_root_cache(s)) { |
5410 | s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); | 5410 | s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); |
5411 | if (!s->memcg_kset) { | 5411 | if (!s->memcg_kset) { |
@@ -5438,7 +5438,7 @@ void sysfs_slab_remove(struct kmem_cache *s) | |||
5438 | */ | 5438 | */ |
5439 | return; | 5439 | return; |
5440 | 5440 | ||
5441 | #ifdef CONFIG_MEMCG_KMEM | 5441 | #ifdef CONFIG_MEMCG |
5442 | kset_unregister(s->memcg_kset); | 5442 | kset_unregister(s->memcg_kset); |
5443 | #endif | 5443 | #endif |
5444 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | 5444 | kobject_uevent(&s->kobj, KOBJ_REMOVE); |