aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-01-20 18:02:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:09:18 -0500
commitd886f4e483ce63a3304adc9eda87031b93341c28 (patch)
tree2a638984e7b055680ea4d93e2897659954fcd3be /mm
parentd55f90bfab40e3b5db323711d28186ff09461692 (diff)
mm: memcontrol: rein in the CONFIG space madness
What CONFIG_INET and CONFIG_LEGACY_KMEM guard inside the memory controller code is insignificant, having these conditionals is not worth the complication and fragility that comes with them. [akpm@linux-foundation.org: rework mem_cgroup_css_free() statement ordering] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c53
-rw-r--r--mm/vmpressure.c2
2 files changed, 4 insertions, 51 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 15896708429b..379f9911b87b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2842,11 +2842,9 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2842 case _KMEM: 2842 case _KMEM:
2843 counter = &memcg->kmem; 2843 counter = &memcg->kmem;
2844 break; 2844 break;
2845#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
2846 case _TCP: 2845 case _TCP:
2847 counter = &memcg->tcp_mem.memory_allocated; 2846 counter = &memcg->tcp_mem.memory_allocated;
2848 break; 2847 break;
2849#endif
2850 default: 2848 default:
2851 BUG(); 2849 BUG();
2852 } 2850 }
@@ -3006,7 +3004,6 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
3006} 3004}
3007#endif /* !CONFIG_SLOB */ 3005#endif /* !CONFIG_SLOB */
3008 3006
3009#ifdef CONFIG_MEMCG_LEGACY_KMEM
3010static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 3007static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3011 unsigned long limit) 3008 unsigned long limit)
3012{ 3009{
@@ -3024,16 +3021,7 @@ out:
3024 mutex_unlock(&memcg_limit_mutex); 3021 mutex_unlock(&memcg_limit_mutex);
3025 return ret; 3022 return ret;
3026} 3023}
3027#else
3028static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3029 unsigned long limit)
3030{
3031 return -EINVAL;
3032}
3033#endif /* CONFIG_MEMCG_LEGACY_KMEM */
3034 3024
3035
3036#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
3037static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) 3025static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
3038{ 3026{
3039 int ret; 3027 int ret;
@@ -3068,12 +3056,6 @@ out:
3068 mutex_unlock(&memcg_limit_mutex); 3056 mutex_unlock(&memcg_limit_mutex);
3069 return ret; 3057 return ret;
3070} 3058}
3071#else
3072static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
3073{
3074 return -EINVAL;
3075}
3076#endif /* CONFIG_MEMCG_LEGACY_KMEM && CONFIG_INET */
3077 3059
3078/* 3060/*
3079 * The user of this function is... 3061 * The user of this function is...
@@ -3136,11 +3118,9 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3136 case _KMEM: 3118 case _KMEM:
3137 counter = &memcg->kmem; 3119 counter = &memcg->kmem;
3138 break; 3120 break;
3139#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
3140 case _TCP: 3121 case _TCP:
3141 counter = &memcg->tcp_mem.memory_allocated; 3122 counter = &memcg->tcp_mem.memory_allocated;
3142 break; 3123 break;
3143#endif
3144 default: 3124 default:
3145 BUG(); 3125 BUG();
3146 } 3126 }
@@ -4094,7 +4074,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
4094 .seq_show = memcg_numa_stat_show, 4074 .seq_show = memcg_numa_stat_show,
4095 }, 4075 },
4096#endif 4076#endif
4097#ifdef CONFIG_MEMCG_LEGACY_KMEM
4098 { 4077 {
4099 .name = "kmem.limit_in_bytes", 4078 .name = "kmem.limit_in_bytes",
4100 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4079 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
@@ -4127,7 +4106,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
4127 .seq_show = memcg_slab_show, 4106 .seq_show = memcg_slab_show,
4128 }, 4107 },
4129#endif 4108#endif
4130#ifdef CONFIG_INET
4131 { 4109 {
4132 .name = "kmem.tcp.limit_in_bytes", 4110 .name = "kmem.tcp.limit_in_bytes",
4133 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4111 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
@@ -4151,8 +4129,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
4151 .write = mem_cgroup_reset, 4129 .write = mem_cgroup_reset,
4152 .read_u64 = mem_cgroup_read_u64, 4130 .read_u64 = mem_cgroup_read_u64,
4153 }, 4131 },
4154#endif
4155#endif
4156 { }, /* terminate */ 4132 { }, /* terminate */
4157}; 4133};
4158 4134
@@ -4280,15 +4256,13 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4280 vmpressure_init(&memcg->vmpressure); 4256 vmpressure_init(&memcg->vmpressure);
4281 INIT_LIST_HEAD(&memcg->event_list); 4257 INIT_LIST_HEAD(&memcg->event_list);
4282 spin_lock_init(&memcg->event_list_lock); 4258 spin_lock_init(&memcg->event_list_lock);
4259 memcg->socket_pressure = jiffies;
4283#ifndef CONFIG_SLOB 4260#ifndef CONFIG_SLOB
4284 memcg->kmemcg_id = -1; 4261 memcg->kmemcg_id = -1;
4285#endif 4262#endif
4286#ifdef CONFIG_CGROUP_WRITEBACK 4263#ifdef CONFIG_CGROUP_WRITEBACK
4287 INIT_LIST_HEAD(&memcg->cgwb_list); 4264 INIT_LIST_HEAD(&memcg->cgwb_list);
4288#endif 4265#endif
4289#ifdef CONFIG_INET
4290 memcg->socket_pressure = jiffies;
4291#endif
4292 return &memcg->css; 4266 return &memcg->css;
4293 4267
4294free_out: 4268free_out:
@@ -4321,10 +4295,8 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
4321 memcg->soft_limit = PAGE_COUNTER_MAX; 4295 memcg->soft_limit = PAGE_COUNTER_MAX;
4322 page_counter_init(&memcg->memsw, &parent->memsw); 4296 page_counter_init(&memcg->memsw, &parent->memsw);
4323 page_counter_init(&memcg->kmem, &parent->kmem); 4297 page_counter_init(&memcg->kmem, &parent->kmem);
4324#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
4325 page_counter_init(&memcg->tcp_mem.memory_allocated, 4298 page_counter_init(&memcg->tcp_mem.memory_allocated,
4326 &parent->tcp_mem.memory_allocated); 4299 &parent->tcp_mem.memory_allocated);
4327#endif
4328 4300
4329 /* 4301 /*
4330 * No need to take a reference to the parent because cgroup 4302 * No need to take a reference to the parent because cgroup
@@ -4336,9 +4308,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
4336 memcg->soft_limit = PAGE_COUNTER_MAX; 4308 memcg->soft_limit = PAGE_COUNTER_MAX;
4337 page_counter_init(&memcg->memsw, NULL); 4309 page_counter_init(&memcg->memsw, NULL);
4338 page_counter_init(&memcg->kmem, NULL); 4310 page_counter_init(&memcg->kmem, NULL);
4339#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
4340 page_counter_init(&memcg->tcp_mem.memory_allocated, NULL); 4311 page_counter_init(&memcg->tcp_mem.memory_allocated, NULL);
4341#endif
4342 /* 4312 /*
4343 * Deeper hierachy with use_hierarchy == false doesn't make 4313 * Deeper hierachy with use_hierarchy == false doesn't make
4344 * much sense so let cgroup subsystem know about this 4314 * much sense so let cgroup subsystem know about this
@@ -4353,10 +4323,8 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
4353 if (ret) 4323 if (ret)
4354 return ret; 4324 return ret;
4355 4325
4356#ifdef CONFIG_INET
4357 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4326 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4358 static_branch_inc(&memcg_sockets_enabled_key); 4327 static_branch_inc(&memcg_sockets_enabled_key);
4359#endif
4360 4328
4361 /* 4329 /*
4362 * Make sure the memcg is initialized: mem_cgroup_iter() 4330 * Make sure the memcg is initialized: mem_cgroup_iter()
@@ -4403,18 +4371,13 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4403{ 4371{
4404 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4372 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4405 4373
4406#ifdef CONFIG_INET
4407 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4374 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4408 static_branch_dec(&memcg_sockets_enabled_key); 4375 static_branch_dec(&memcg_sockets_enabled_key);
4409#endif
4410
4411 memcg_free_kmem(memcg);
4412 4376
4413#if defined(CONFIG_MEMCG_LEGACY_KMEM) && defined(CONFIG_INET)
4414 if (memcg->tcp_mem.active) 4377 if (memcg->tcp_mem.active)
4415 static_branch_dec(&memcg_sockets_enabled_key); 4378 static_branch_dec(&memcg_sockets_enabled_key);
4416#endif
4417 4379
4380 memcg_free_kmem(memcg);
4418 __mem_cgroup_free(memcg); 4381 __mem_cgroup_free(memcg);
4419} 4382}
4420 4383
@@ -5613,8 +5576,6 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5613 commit_charge(newpage, memcg, true); 5576 commit_charge(newpage, memcg, true);
5614} 5577}
5615 5578
5616#ifdef CONFIG_INET
5617
5618DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5579DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5619EXPORT_SYMBOL(memcg_sockets_enabled_key); 5580EXPORT_SYMBOL(memcg_sockets_enabled_key);
5620 5581
@@ -5640,10 +5601,8 @@ void sock_update_memcg(struct sock *sk)
5640 memcg = mem_cgroup_from_task(current); 5601 memcg = mem_cgroup_from_task(current);
5641 if (memcg == root_mem_cgroup) 5602 if (memcg == root_mem_cgroup)
5642 goto out; 5603 goto out;
5643#ifdef CONFIG_MEMCG_LEGACY_KMEM
5644 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active) 5604 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
5645 goto out; 5605 goto out;
5646#endif
5647 if (css_tryget_online(&memcg->css)) 5606 if (css_tryget_online(&memcg->css))
5648 sk->sk_memcg = memcg; 5607 sk->sk_memcg = memcg;
5649out: 5608out:
@@ -5669,7 +5628,6 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5669{ 5628{
5670 gfp_t gfp_mask = GFP_KERNEL; 5629 gfp_t gfp_mask = GFP_KERNEL;
5671 5630
5672#ifdef CONFIG_MEMCG_LEGACY_KMEM
5673 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5631 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5674 struct page_counter *counter; 5632 struct page_counter *counter;
5675 5633
@@ -5682,7 +5640,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5682 memcg->tcp_mem.memory_pressure = 1; 5640 memcg->tcp_mem.memory_pressure = 1;
5683 return false; 5641 return false;
5684 } 5642 }
5685#endif 5643
5686 /* Don't block in the packet receive path */ 5644 /* Don't block in the packet receive path */
5687 if (in_softirq()) 5645 if (in_softirq())
5688 gfp_mask = GFP_NOWAIT; 5646 gfp_mask = GFP_NOWAIT;
@@ -5701,19 +5659,16 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5701 */ 5659 */
5702void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5660void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5703{ 5661{
5704#ifdef CONFIG_MEMCG_LEGACY_KMEM
5705 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5662 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5706 page_counter_uncharge(&memcg->tcp_mem.memory_allocated, 5663 page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
5707 nr_pages); 5664 nr_pages);
5708 return; 5665 return;
5709 } 5666 }
5710#endif 5667
5711 page_counter_uncharge(&memcg->memory, nr_pages); 5668 page_counter_uncharge(&memcg->memory, nr_pages);
5712 css_put_many(&memcg->css, nr_pages); 5669 css_put_many(&memcg->css, nr_pages);
5713} 5670}
5714 5671
5715#endif /* CONFIG_INET */
5716
5717static int __init cgroup_memory(char *s) 5672static int __init cgroup_memory(char *s)
5718{ 5673{
5719 char *token; 5674 char *token;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 89b1d441af4b..9a6c0704211c 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -275,7 +275,6 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
275 275
276 level = vmpressure_calc_level(scanned, reclaimed); 276 level = vmpressure_calc_level(scanned, reclaimed);
277 277
278#ifdef CONFIG_INET
279 if (level > VMPRESSURE_LOW) { 278 if (level > VMPRESSURE_LOW) {
280 /* 279 /*
281 * Let the socket buffer allocator know that 280 * Let the socket buffer allocator know that
@@ -287,7 +286,6 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
287 */ 286 */
288 memcg->socket_pressure = jiffies + HZ; 287 memcg->socket_pressure = jiffies + HZ;
289 } 288 }
290#endif
291 } 289 }
292} 290}
293 291