aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-01-14 18:21:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commit1109208766d9fa7059a9b66ad488e66d99ce49af (patch)
treeddca91a9ba2b6ae0e3b63fad0b94b89d50cd166f /mm/memcontrol.c
parent7941d2145abc4def5583f9d8d0b2e02647b6d1de (diff)
mm: memcontrol: move socket code for unified hierarchy accounting
The unified hierarchy memory controller will account socket memory. Move the infrastructure functions accordingly. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c148
1 files changed, 74 insertions, 74 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6903e7d8c7be..6aac8d2e31d7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -294,80 +294,6 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
294 return mem_cgroup_from_css(css); 294 return mem_cgroup_from_css(css);
295} 295}
296 296
297/* Writing them here to avoid exposing memcg's inner layout */
298#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
299
300struct static_key memcg_sockets_enabled_key;
301EXPORT_SYMBOL(memcg_sockets_enabled_key);
302
303void sock_update_memcg(struct sock *sk)
304{
305 struct mem_cgroup *memcg;
306
307 /* Socket cloning can throw us here with sk_cgrp already
308 * filled. It won't however, necessarily happen from
309 * process context. So the test for root memcg given
310 * the current task's memcg won't help us in this case.
311 *
312 * Respecting the original socket's memcg is a better
313 * decision in this case.
314 */
315 if (sk->sk_memcg) {
316 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
317 css_get(&sk->sk_memcg->css);
318 return;
319 }
320
321 rcu_read_lock();
322 memcg = mem_cgroup_from_task(current);
323 if (memcg != root_mem_cgroup &&
324 memcg->tcp_mem.active &&
325 css_tryget_online(&memcg->css))
326 sk->sk_memcg = memcg;
327 rcu_read_unlock();
328}
329EXPORT_SYMBOL(sock_update_memcg);
330
331void sock_release_memcg(struct sock *sk)
332{
333 WARN_ON(!sk->sk_memcg);
334 css_put(&sk->sk_memcg->css);
335}
336
337/**
338 * mem_cgroup_charge_skmem - charge socket memory
339 * @memcg: memcg to charge
340 * @nr_pages: number of pages to charge
341 *
342 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
343 * @memcg's configured limit, %false if the charge had to be forced.
344 */
345bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
346{
347 struct page_counter *counter;
348
349 if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
350 nr_pages, &counter)) {
351 memcg->tcp_mem.memory_pressure = 0;
352 return true;
353 }
354 page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
355 memcg->tcp_mem.memory_pressure = 1;
356 return false;
357}
358
359/**
360 * mem_cgroup_uncharge_skmem - uncharge socket memory
361 * @memcg - memcg to uncharge
362 * @nr_pages - number of pages to uncharge
363 */
364void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
365{
366 page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
367}
368
369#endif
370
371#ifdef CONFIG_MEMCG_KMEM 297#ifdef CONFIG_MEMCG_KMEM
372/* 298/*
373 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 299 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
@@ -5607,6 +5533,80 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5607 commit_charge(newpage, memcg, true); 5533 commit_charge(newpage, memcg, true);
5608} 5534}
5609 5535
5536/* Writing them here to avoid exposing memcg's inner layout */
5537#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
5538
5539struct static_key memcg_sockets_enabled_key;
5540EXPORT_SYMBOL(memcg_sockets_enabled_key);
5541
5542void sock_update_memcg(struct sock *sk)
5543{
5544 struct mem_cgroup *memcg;
5545
5546 /* Socket cloning can throw us here with sk_cgrp already
5547 * filled. It won't however, necessarily happen from
5548 * process context. So the test for root memcg given
5549 * the current task's memcg won't help us in this case.
5550 *
5551 * Respecting the original socket's memcg is a better
5552 * decision in this case.
5553 */
5554 if (sk->sk_memcg) {
5555 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5556 css_get(&sk->sk_memcg->css);
5557 return;
5558 }
5559
5560 rcu_read_lock();
5561 memcg = mem_cgroup_from_task(current);
5562 if (memcg != root_mem_cgroup &&
5563 memcg->tcp_mem.active &&
5564 css_tryget_online(&memcg->css))
5565 sk->sk_memcg = memcg;
5566 rcu_read_unlock();
5567}
5568EXPORT_SYMBOL(sock_update_memcg);
5569
5570void sock_release_memcg(struct sock *sk)
5571{
5572 WARN_ON(!sk->sk_memcg);
5573 css_put(&sk->sk_memcg->css);
5574}
5575
5576/**
5577 * mem_cgroup_charge_skmem - charge socket memory
5578 * @memcg: memcg to charge
5579 * @nr_pages: number of pages to charge
5580 *
5581 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5582 * @memcg's configured limit, %false if the charge had to be forced.
5583 */
5584bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5585{
5586 struct page_counter *counter;
5587
5588 if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
5589 nr_pages, &counter)) {
5590 memcg->tcp_mem.memory_pressure = 0;
5591 return true;
5592 }
5593 page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
5594 memcg->tcp_mem.memory_pressure = 1;
5595 return false;
5596}
5597
5598/**
5599 * mem_cgroup_uncharge_skmem - uncharge socket memory
5600 * @memcg - memcg to uncharge
5601 * @nr_pages - number of pages to uncharge
5602 */
5603void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5604{
5605 page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
5606}
5607
5608#endif
5609
5610/* 5610/*
5611 * subsys_initcall() for memory controller. 5611 * subsys_initcall() for memory controller.
5612 * 5612 *