diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-01-20 17:44:31 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-20 20:02:06 -0500 |
commit | 713735b4233fad3ae35b5cad656baa41413887ca (patch) | |
tree | bba0310f23fb911106e04d75eebe1c6ee7e8d423 /mm | |
parent | 2550326ac7a062fdfc204f9a3b98bdb9179638fc (diff) |
memcg: correctly order reading PCG_USED and pc->mem_cgroup
The placement of the read-side barrier is confused: the writer first
sets pc->mem_cgroup, then PCG_USED. The read-side barrier has to be
between testing PCG_USED and reading pc->mem_cgroup.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 27 |
1 files changed, 9 insertions, 18 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5b562b375cbd..db76ef726293 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -836,13 +836,12 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) | |||
836 | return; | 836 | return; |
837 | 837 | ||
838 | pc = lookup_page_cgroup(page); | 838 | pc = lookup_page_cgroup(page); |
839 | /* | ||
840 | * Used bit is set without atomic ops but after smp_wmb(). | ||
841 | * For making pc->mem_cgroup visible, insert smp_rmb() here. | ||
842 | */ | ||
843 | smp_rmb(); | ||
844 | /* unused or root page is not rotated. */ | 839 | /* unused or root page is not rotated. */ |
845 | if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup)) | 840 | if (!PageCgroupUsed(pc)) |
841 | return; | ||
842 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | ||
843 | smp_rmb(); | ||
844 | if (mem_cgroup_is_root(pc->mem_cgroup)) | ||
846 | return; | 845 | return; |
847 | mz = page_cgroup_zoneinfo(pc); | 846 | mz = page_cgroup_zoneinfo(pc); |
848 | list_move(&pc->lru, &mz->lists[lru]); | 847 | list_move(&pc->lru, &mz->lists[lru]); |
@@ -857,14 +856,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) | |||
857 | return; | 856 | return; |
858 | pc = lookup_page_cgroup(page); | 857 | pc = lookup_page_cgroup(page); |
859 | VM_BUG_ON(PageCgroupAcctLRU(pc)); | 858 | VM_BUG_ON(PageCgroupAcctLRU(pc)); |
860 | /* | ||
861 | * Used bit is set without atomic ops but after smp_wmb(). | ||
862 | * For making pc->mem_cgroup visible, insert smp_rmb() here. | ||
863 | */ | ||
864 | smp_rmb(); | ||
865 | if (!PageCgroupUsed(pc)) | 859 | if (!PageCgroupUsed(pc)) |
866 | return; | 860 | return; |
867 | 861 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | |
862 | smp_rmb(); | ||
868 | mz = page_cgroup_zoneinfo(pc); | 863 | mz = page_cgroup_zoneinfo(pc); |
869 | /* huge page split is done under lru_lock. so, we have no races. */ | 864 | /* huge page split is done under lru_lock. so, we have no races. */ |
870 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); | 865 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); |
@@ -1031,14 +1026,10 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) | |||
1031 | return NULL; | 1026 | return NULL; |
1032 | 1027 | ||
1033 | pc = lookup_page_cgroup(page); | 1028 | pc = lookup_page_cgroup(page); |
1034 | /* | ||
1035 | * Used bit is set without atomic ops but after smp_wmb(). | ||
1036 | * For making pc->mem_cgroup visible, insert smp_rmb() here. | ||
1037 | */ | ||
1038 | smp_rmb(); | ||
1039 | if (!PageCgroupUsed(pc)) | 1029 | if (!PageCgroupUsed(pc)) |
1040 | return NULL; | 1030 | return NULL; |
1041 | 1031 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | |
1032 | smp_rmb(); | ||
1042 | mz = page_cgroup_zoneinfo(pc); | 1033 | mz = page_cgroup_zoneinfo(pc); |
1043 | if (!mz) | 1034 | if (!mz) |
1044 | return NULL; | 1035 | return NULL; |