aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-08-06 19:06:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:17 -0400
commit9a2385eef9f28fb5260c48c45fc8fe01f1da70a6 (patch)
tree1bd5538a1901519316b5b59f73dbf3ed508a8501 /mm
parent05b8430123359886ef6a4146fba384e30d771b3f (diff)
mm: memcontrol: remove ordering between pc->mem_cgroup and PageCgroupUsed
There is a write barrier between setting pc->mem_cgroup and PageCgroupUsed, which was added to allow LRU operations to lookup the memcg LRU list of a page without acquiring the page_cgroup lock. But ever since commit 38c5d72f3ebe ("memcg: simplify LRU handling by new rule"), pages are ensured to be off-LRU while charging, so nobody else is changing LRU state while pc->mem_cgroup is being written, and there are no read barriers anymore. Remove the unnecessary write barrier. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c9
1 files changed, 0 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 07908ea954b6..c31bc40a5827 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2795,14 +2795,6 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2795 } 2795 }
2796 2796
2797 pc->mem_cgroup = memcg; 2797 pc->mem_cgroup = memcg;
2798 /*
2799 * We access a page_cgroup asynchronously without lock_page_cgroup().
2800 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2801 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2802 * before USED bit, we need memory barrier here.
2803 * See mem_cgroup_add_lru_list(), etc.
2804 */
2805 smp_wmb();
2806 SetPageCgroupUsed(pc); 2798 SetPageCgroupUsed(pc);
2807 2799
2808 if (lrucare) { 2800 if (lrucare) {
@@ -3483,7 +3475,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
3483 for (i = 1; i < HPAGE_PMD_NR; i++) { 3475 for (i = 1; i < HPAGE_PMD_NR; i++) {
3484 pc = head_pc + i; 3476 pc = head_pc + i;
3485 pc->mem_cgroup = memcg; 3477 pc->mem_cgroup = memcg;
3486 smp_wmb();/* see __commit_charge() */
3487 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 3478 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3488 } 3479 }
3489 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 3480 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],