aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-08-06 19:06:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:17 -0400
commita840cda63e543d41270698525542a82b7a8a18d7 (patch)
tree4e16e7a67514e3e3cbcd9368fdfeab8fd76f01f2 /mm/memcontrol.c
parent9a2385eef9f28fb5260c48c45fc8fe01f1da70a6 (diff)
mm: memcontrol: do not acquire page_cgroup lock for kmem pages
Kmem page charging and uncharging is serialized by means of exclusive access to the page. Do not take the page_cgroup lock and don't set pc->flags atomically. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c31bc40a5827..a6a062e409eb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3407,12 +3407,13 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3407 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3407 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3408 return; 3408 return;
3409 } 3409 }
3410 3410 /*
3411 * The page is freshly allocated and not visible to any
3412 * outside callers yet. Set up pc non-atomically.
3413 */
3411 pc = lookup_page_cgroup(page); 3414 pc = lookup_page_cgroup(page);
3412 lock_page_cgroup(pc);
3413 pc->mem_cgroup = memcg; 3415 pc->mem_cgroup = memcg;
3414 SetPageCgroupUsed(pc); 3416 pc->flags = PCG_USED;
3415 unlock_page_cgroup(pc);
3416} 3417}
3417 3418
3418void __memcg_kmem_uncharge_pages(struct page *page, int order) 3419void __memcg_kmem_uncharge_pages(struct page *page, int order)
@@ -3422,19 +3423,11 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
3422 3423
3423 3424
3424 pc = lookup_page_cgroup(page); 3425 pc = lookup_page_cgroup(page);
3425 /*
3426 * Fast unlocked return. Theoretically might have changed, have to
3427 * check again after locking.
3428 */
3429 if (!PageCgroupUsed(pc)) 3426 if (!PageCgroupUsed(pc))
3430 return; 3427 return;
3431 3428
3432 lock_page_cgroup(pc); 3429 memcg = pc->mem_cgroup;
3433 if (PageCgroupUsed(pc)) { 3430 pc->flags = 0;
3434 memcg = pc->mem_cgroup;
3435 ClearPageCgroupUsed(pc);
3436 }
3437 unlock_page_cgroup(pc);
3438 3431
3439 /* 3432 /*
3440 * We trust that only if there is a memcg associated with the page, it 3433 * We trust that only if there is a memcg associated with the page, it