aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2011-03-22 19:32:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:44:03 -0400
commit3f58a82943337fb6e79acfa5346719a97d3c0b98 (patch)
tree667441ac13c845edac93c937d0baba03a2021ea8 /mm
parent315601809d124d046abd6c3ffa346d0dbd7aa29d (diff)
memcg: move memcg reclaimable page into tail of inactive list
The rotate_reclaimable_page function moves just written out pages, which the VM wanted to reclaim, to the end of the inactive list. That way the VM will find those pages first next time it needs to free memory. This patch applies the rule in memcg. It can help to prevent unnecessary working page eviction of memcg. Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c26
-rw-r--r--mm/swap.c3
2 files changed, 28 insertions, 1 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6ef5c53dffcb..9e0f05efd114 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -829,6 +829,32 @@ void mem_cgroup_del_lru(struct page *page)
829 mem_cgroup_del_lru_list(page, page_lru(page)); 829 mem_cgroup_del_lru_list(page, page_lru(page));
830} 830}
831 831
832/*
833 * Writeback is about to end against a page which has been marked for immediate
834 * reclaim. If it still appears to be reclaimable, move it to the tail of the
835 * inactive list.
836 */
837void mem_cgroup_rotate_reclaimable_page(struct page *page)
838{
839 struct mem_cgroup_per_zone *mz;
840 struct page_cgroup *pc;
841 enum lru_list lru = page_lru(page);
842
843 if (mem_cgroup_disabled())
844 return;
845
846 pc = lookup_page_cgroup(page);
847 /* unused or root page is not rotated. */
848 if (!PageCgroupUsed(pc))
849 return;
850 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
851 smp_rmb();
852 if (mem_cgroup_is_root(pc->mem_cgroup))
853 return;
854 mz = page_cgroup_zoneinfo(pc);
855 list_move_tail(&pc->lru, &mz->lists[lru]);
856}
857
832void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 858void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
833{ 859{
834 struct mem_cgroup_per_zone *mz; 860 struct mem_cgroup_per_zone *mz;
diff --git a/mm/swap.c b/mm/swap.c
index 4aea806d0d44..1b9e4ebaffc8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -200,8 +200,9 @@ static void pagevec_move_tail(struct pagevec *pvec)
200 spin_lock(&zone->lru_lock); 200 spin_lock(&zone->lru_lock);
201 } 201 }
202 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 202 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
203 int lru = page_lru_base_type(page); 203 enum lru_list lru = page_lru_base_type(page);
204 list_move_tail(&page->lru, &zone->lru[lru].list); 204 list_move_tail(&page->lru, &zone->lru[lru].list);
205 mem_cgroup_rotate_reclaimable_page(page);
205 pgmoved++; 206 pgmoved++;
206 } 207 }
207 } 208 }