diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:18:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:05 -0500 |
commit | ad2b8e601099a23dffffb53f91c18d874fe98854 (patch) | |
tree | 6b7706f3005fd799f23d0ddc21fe5b33fd1e79ac /mm/memcontrol.c | |
parent | 5660048ccac8735d9bc0a46325a02e6a6518b5b2 (diff) |
mm: memcg: remove optimization of keeping the root_mem_cgroup LRU lists empty
root_mem_cgroup, lacking a configurable limit, was never subject to
limit reclaim, so the pages charged to it could be kept off its LRU
lists. They would be found on the global per-zone LRU lists upon
physical memory pressure and it made sense to avoid uselessly linking
them to both lists.
The global per-zone LRU lists are about to go away on memcg-enabled
kernels, with all pages being exclusively linked to their respective
per-memcg LRU lists. As a result, pages of the root_mem_cgroup must
also be linked to its LRU lists again. This is purely about the LRU
list, root_mem_cgroup is still not charged.
The overhead is temporary until the double-LRU scheme is going away
completely.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 12 |
1 files changed, 2 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 750ed1449955..ad7f36f676ff 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1031,8 +1031,6 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) | |||
1031 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); | 1031 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
1032 | /* huge page split is done under lru_lock. so, we have no races. */ | 1032 | /* huge page split is done under lru_lock. so, we have no races. */ |
1033 | MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); | 1033 | MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); |
1034 | if (mem_cgroup_is_root(pc->mem_cgroup)) | ||
1035 | return; | ||
1036 | VM_BUG_ON(list_empty(&pc->lru)); | 1034 | VM_BUG_ON(list_empty(&pc->lru)); |
1037 | list_del_init(&pc->lru); | 1035 | list_del_init(&pc->lru); |
1038 | } | 1036 | } |
@@ -1057,13 +1055,11 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page) | |||
1057 | return; | 1055 | return; |
1058 | 1056 | ||
1059 | pc = lookup_page_cgroup(page); | 1057 | pc = lookup_page_cgroup(page); |
1060 | /* unused or root page is not rotated. */ | 1058 | /* unused page is not rotated. */ |
1061 | if (!PageCgroupUsed(pc)) | 1059 | if (!PageCgroupUsed(pc)) |
1062 | return; | 1060 | return; |
1063 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | 1061 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ |
1064 | smp_rmb(); | 1062 | smp_rmb(); |
1065 | if (mem_cgroup_is_root(pc->mem_cgroup)) | ||
1066 | return; | ||
1067 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); | 1063 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
1068 | list_move_tail(&pc->lru, &mz->lists[lru]); | 1064 | list_move_tail(&pc->lru, &mz->lists[lru]); |
1069 | } | 1065 | } |
@@ -1077,13 +1073,11 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) | |||
1077 | return; | 1073 | return; |
1078 | 1074 | ||
1079 | pc = lookup_page_cgroup(page); | 1075 | pc = lookup_page_cgroup(page); |
1080 | /* unused or root page is not rotated. */ | 1076 | /* unused page is not rotated. */ |
1081 | if (!PageCgroupUsed(pc)) | 1077 | if (!PageCgroupUsed(pc)) |
1082 | return; | 1078 | return; |
1083 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | 1079 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ |
1084 | smp_rmb(); | 1080 | smp_rmb(); |
1085 | if (mem_cgroup_is_root(pc->mem_cgroup)) | ||
1086 | return; | ||
1087 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); | 1081 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
1088 | list_move(&pc->lru, &mz->lists[lru]); | 1082 | list_move(&pc->lru, &mz->lists[lru]); |
1089 | } | 1083 | } |
@@ -1115,8 +1109,6 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) | |||
1115 | /* huge page split is done under lru_lock. so, we have no races. */ | 1109 | /* huge page split is done under lru_lock. so, we have no races. */ |
1116 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); | 1110 | MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); |
1117 | SetPageCgroupAcctLRU(pc); | 1111 | SetPageCgroupAcctLRU(pc); |
1118 | if (mem_cgroup_is_root(pc->mem_cgroup)) | ||
1119 | return; | ||
1120 | list_add(&pc->lru, &mz->lists[lru]); | 1112 | list_add(&pc->lru, &mz->lists[lru]); |
1121 | } | 1113 | } |
1122 | 1114 | ||