aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2014-04-07 18:37:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:35:56 -0400
commitb6b6cc72bc404c952968530d7df4c3a4ab82b65b (patch)
treec04f019844dacfafdd61ad2f97bb1a0a845066ee /mm
parentdf381975463996178d685f6ef7d3555c5f887201 (diff)
memcg: do not replicate get_mem_cgroup_from_mm in __mem_cgroup_try_charge
__mem_cgroup_try_charge duplicates get_mem_cgroup_from_mm for charges which came without a memcg. The only reason seems to be a tiny optimization when css_tryget is not called if the charge can be consumed from the stock. Nevertheless css_tryget is very cheap since it has been reworked to use per-cpu counting so this optimization doesn't give us anything these days. So let's drop the code duplication so that the code is more readable. Signed-off-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c50
1 files changed, 6 insertions, 44 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 87c3ec37dd26..7480022d4655 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2697,52 +2697,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
2697again: 2697again:
2698 if (*ptr) { /* css should be a valid one */ 2698 if (*ptr) { /* css should be a valid one */
2699 memcg = *ptr; 2699 memcg = *ptr;
2700 if (mem_cgroup_is_root(memcg))
2701 goto done;
2702 if (consume_stock(memcg, nr_pages))
2703 goto done;
2704 css_get(&memcg->css); 2700 css_get(&memcg->css);
2705 } else { 2701 } else {
2706 struct task_struct *p; 2702 memcg = get_mem_cgroup_from_mm(mm);
2707
2708 rcu_read_lock();
2709 p = rcu_dereference(mm->owner);
2710 /*
2711 * Because we don't have task_lock(), "p" can exit.
2712 * In that case, "memcg" can point to root or p can be NULL with
2713 * race with swapoff. Then, we have small risk of mis-accouning.
2714 * But such kind of mis-account by race always happens because
2715 * we don't have cgroup_mutex(). It's overkill and we allo that
2716 * small race, here.
2717 * (*) swapoff at el will charge against mm-struct not against
2718 * task-struct. So, mm->owner can be NULL.
2719 */
2720 memcg = mem_cgroup_from_task(p);
2721 if (!memcg)
2722 memcg = root_mem_cgroup;
2723 if (mem_cgroup_is_root(memcg)) {
2724 rcu_read_unlock();
2725 goto done;
2726 }
2727 if (consume_stock(memcg, nr_pages)) {
2728 /*
2729 * It seems dagerous to access memcg without css_get().
2730 * But considering how consume_stok works, it's not
2731 * necessary. If consume_stock success, some charges
2732 * from this memcg are cached on this cpu. So, we
2733 * don't need to call css_get()/css_tryget() before
2734 * calling consume_stock().
2735 */
2736 rcu_read_unlock();
2737 goto done;
2738 }
2739 /* after here, we may be blocked. we need to get refcnt */
2740 if (!css_tryget(&memcg->css)) {
2741 rcu_read_unlock();
2742 goto again;
2743 }
2744 rcu_read_unlock();
2745 } 2703 }
2704 if (mem_cgroup_is_root(memcg))
2705 goto done;
2706 if (consume_stock(memcg, nr_pages))
2707 goto done;
2746 2708
2747 do { 2709 do {
2748 bool invoke_oom = oom && !nr_oom_retries; 2710 bool invoke_oom = oom && !nr_oom_retries;
@@ -2778,8 +2740,8 @@ again:
2778 2740
2779 if (batch > nr_pages) 2741 if (batch > nr_pages)
2780 refill_stock(memcg, batch - nr_pages); 2742 refill_stock(memcg, batch - nr_pages);
2781 css_put(&memcg->css);
2782done: 2743done:
2744 css_put(&memcg->css);
2783 *ptr = memcg; 2745 *ptr = memcg;
2784 return 0; 2746 return 0;
2785nomem: 2747nomem: