diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:18:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:05 -0500 |
commit | 925b7673cce39116ce61e7a06683a4a0dad1e72a (patch) | |
tree | 66c134db836e531e196ee3dfc23c124ff74ac827 /mm/swap.c | |
parent | 6290df545814990ca2663baf6e894669132d5f73 (diff) |
mm: make per-memcg LRU lists exclusive
Now that all code that operated on global per-zone LRU lists is
converted to operate on per-memory cgroup LRU lists instead, there is no
reason to keep the double-LRU scheme around any longer.
The pc->lru member is removed and page->lru is linked directly to the
per-memory cgroup LRU lists, which removes two pointers from a
descriptor that exists for every page frame in the system.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Ying Han <yinghan@google.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 23 |
1 files changed, 15 insertions, 8 deletions
@@ -232,12 +232,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, | |||
232 | static void pagevec_move_tail_fn(struct page *page, void *arg) | 232 | static void pagevec_move_tail_fn(struct page *page, void *arg) |
233 | { | 233 | { |
234 | int *pgmoved = arg; | 234 | int *pgmoved = arg; |
235 | struct zone *zone = page_zone(page); | ||
236 | 235 | ||
237 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 236 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
238 | enum lru_list lru = page_lru_base_type(page); | 237 | enum lru_list lru = page_lru_base_type(page); |
239 | list_move_tail(&page->lru, &zone->lruvec.lists[lru]); | 238 | struct lruvec *lruvec; |
240 | mem_cgroup_rotate_reclaimable_page(page); | 239 | |
240 | lruvec = mem_cgroup_lru_move_lists(page_zone(page), | ||
241 | page, lru, lru); | ||
242 | list_move_tail(&page->lru, &lruvec->lists[lru]); | ||
241 | (*pgmoved)++; | 243 | (*pgmoved)++; |
242 | } | 244 | } |
243 | } | 245 | } |
@@ -476,12 +478,13 @@ static void lru_deactivate_fn(struct page *page, void *arg) | |||
476 | */ | 478 | */ |
477 | SetPageReclaim(page); | 479 | SetPageReclaim(page); |
478 | } else { | 480 | } else { |
481 | struct lruvec *lruvec; | ||
479 | /* | 482 | /* |
480 | * The page's writeback ends up during pagevec | 483 | * The page's writeback ends up during pagevec |
481 | * We moves tha page into tail of inactive. | 484 | * We moves tha page into tail of inactive. |
482 | */ | 485 | */ |
483 | list_move_tail(&page->lru, &zone->lruvec.lists[lru]); | 486 | lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); |
484 | mem_cgroup_rotate_reclaimable_page(page); | 487 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
485 | __count_vm_event(PGROTATED); | 488 | __count_vm_event(PGROTATED); |
486 | } | 489 | } |
487 | 490 | ||
@@ -663,6 +666,8 @@ void lru_add_page_tail(struct zone* zone, | |||
663 | SetPageLRU(page_tail); | 666 | SetPageLRU(page_tail); |
664 | 667 | ||
665 | if (page_evictable(page_tail, NULL)) { | 668 | if (page_evictable(page_tail, NULL)) { |
669 | struct lruvec *lruvec; | ||
670 | |||
666 | if (PageActive(page)) { | 671 | if (PageActive(page)) { |
667 | SetPageActive(page_tail); | 672 | SetPageActive(page_tail); |
668 | active = 1; | 673 | active = 1; |
@@ -672,11 +677,13 @@ void lru_add_page_tail(struct zone* zone, | |||
672 | lru = LRU_INACTIVE_ANON; | 677 | lru = LRU_INACTIVE_ANON; |
673 | } | 678 | } |
674 | update_page_reclaim_stat(zone, page_tail, file, active); | 679 | update_page_reclaim_stat(zone, page_tail, file, active); |
680 | lruvec = mem_cgroup_lru_add_list(zone, page_tail, lru); | ||
675 | if (likely(PageLRU(page))) | 681 | if (likely(PageLRU(page))) |
676 | __add_page_to_lru_list(zone, page_tail, lru, | 682 | list_add(&page_tail->lru, page->lru.prev); |
677 | page->lru.prev); | ||
678 | else | 683 | else |
679 | add_page_to_lru_list(zone, page_tail, lru); | 684 | list_add(&page_tail->lru, &lruvec->lists[lru]); |
685 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, | ||
686 | hpage_nr_pages(page_tail)); | ||
680 | } else { | 687 | } else { |
681 | SetPageUnevictable(page_tail); | 688 | SetPageUnevictable(page_tail); |
682 | add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); | 689 | add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); |