diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:18:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:05 -0500 |
commit | 6290df545814990ca2663baf6e894669132d5f73 (patch) | |
tree | c62472270ba81a7146bed0854be74e2e2338c629 /mm/swap.c | |
parent | b95a2f2d486d0d768a92879c023a03757b9c7e58 (diff) |
mm: collect LRU list heads into struct lruvec
Having a unified structure with a LRU list set for both global zones and
per-memcg zones allows to keep that code simple which deals with LRU
lists and does not care about the container itself.
Once the per-memcg LRU lists directly link struct pages, the isolation
function and all other list manipulations are shared between the memcg
case and the global LRU case.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 11 |
1 files changed, 5 insertions, 6 deletions
@@ -236,7 +236,7 @@ static void pagevec_move_tail_fn(struct page *page, void *arg) | |||
236 | 236 | ||
237 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 237 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
238 | enum lru_list lru = page_lru_base_type(page); | 238 | enum lru_list lru = page_lru_base_type(page); |
239 | list_move_tail(&page->lru, &zone->lru[lru].list); | 239 | list_move_tail(&page->lru, &zone->lruvec.lists[lru]); |
240 | mem_cgroup_rotate_reclaimable_page(page); | 240 | mem_cgroup_rotate_reclaimable_page(page); |
241 | (*pgmoved)++; | 241 | (*pgmoved)++; |
242 | } | 242 | } |
@@ -480,7 +480,7 @@ static void lru_deactivate_fn(struct page *page, void *arg) | |||
480 | * The page's writeback ends up during pagevec | 480 | * The page's writeback ends up during pagevec |
481 | * We moves tha page into tail of inactive. | 481 | * We moves tha page into tail of inactive. |
482 | */ | 482 | */ |
483 | list_move_tail(&page->lru, &zone->lru[lru].list); | 483 | list_move_tail(&page->lru, &zone->lruvec.lists[lru]); |
484 | mem_cgroup_rotate_reclaimable_page(page); | 484 | mem_cgroup_rotate_reclaimable_page(page); |
485 | __count_vm_event(PGROTATED); | 485 | __count_vm_event(PGROTATED); |
486 | } | 486 | } |
@@ -654,7 +654,6 @@ void lru_add_page_tail(struct zone* zone, | |||
654 | int active; | 654 | int active; |
655 | enum lru_list lru; | 655 | enum lru_list lru; |
656 | const int file = 0; | 656 | const int file = 0; |
657 | struct list_head *head; | ||
658 | 657 | ||
659 | VM_BUG_ON(!PageHead(page)); | 658 | VM_BUG_ON(!PageHead(page)); |
660 | VM_BUG_ON(PageCompound(page_tail)); | 659 | VM_BUG_ON(PageCompound(page_tail)); |
@@ -674,10 +673,10 @@ void lru_add_page_tail(struct zone* zone, | |||
674 | } | 673 | } |
675 | update_page_reclaim_stat(zone, page_tail, file, active); | 674 | update_page_reclaim_stat(zone, page_tail, file, active); |
676 | if (likely(PageLRU(page))) | 675 | if (likely(PageLRU(page))) |
677 | head = page->lru.prev; | 676 | __add_page_to_lru_list(zone, page_tail, lru, |
677 | page->lru.prev); | ||
678 | else | 678 | else |
679 | head = &zone->lru[lru].list; | 679 | add_page_to_lru_list(zone, page_tail, lru); |
680 | __add_page_to_lru_list(zone, page_tail, lru, head); | ||
681 | } else { | 680 | } else { |
682 | SetPageUnevictable(page_tail); | 681 | SetPageUnevictable(page_tail); |
683 | add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); | 682 | add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); |