aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commit6290df545814990ca2663baf6e894669132d5f73 (patch)
treec62472270ba81a7146bed0854be74e2e2338c629
parentb95a2f2d486d0d768a92879c023a03757b9c7e58 (diff)
mm: collect LRU list heads into struct lruvec
Having a unified structure with a LRU list set for both global zones and per-memcg zones allows to keep that code simple which deals with LRU lists and does not care about the container itself. Once the per-memcg LRU lists directly link struct pages, the isolation function and all other list manipulations are shared between the memcg case and the global LRU case. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Ying Han <yinghan@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/swap.c11
-rw-r--r--mm/vmscan.c10
6 files changed, 25 insertions, 27 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8f7d24712dc1..e6a7ffe16d31 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -33,7 +33,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
33static inline void 33static inline void
34add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) 34add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
35{ 35{
36 __add_page_to_lru_list(zone, page, l, &zone->lru[l].list); 36 __add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]);
37} 37}
38 38
39static inline void 39static inline void
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ca6ca92418a6..42e544cd4c9f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -159,6 +159,10 @@ static inline int is_unevictable_lru(enum lru_list l)
159 return (l == LRU_UNEVICTABLE); 159 return (l == LRU_UNEVICTABLE);
160} 160}
161 161
162struct lruvec {
163 struct list_head lists[NR_LRU_LISTS];
164};
165
162/* Mask used at gathering information at once (see memcontrol.c) */ 166/* Mask used at gathering information at once (see memcontrol.c) */
163#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 167#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
164#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 168#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
@@ -364,10 +368,8 @@ struct zone {
364 ZONE_PADDING(_pad1_) 368 ZONE_PADDING(_pad1_)
365 369
366 /* Fields commonly accessed by the page reclaim scanner */ 370 /* Fields commonly accessed by the page reclaim scanner */
367 spinlock_t lru_lock; 371 spinlock_t lru_lock;
368 struct zone_lru { 372 struct lruvec lruvec;
369 struct list_head list;
370 } lru[NR_LRU_LISTS];
371 373
372 struct zone_reclaim_stat reclaim_stat; 374 struct zone_reclaim_stat reclaim_stat;
373 375
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ad7f36f676ff..6e7f849a1a9e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter {
134 * per-zone information in memory controller. 134 * per-zone information in memory controller.
135 */ 135 */
136struct mem_cgroup_per_zone { 136struct mem_cgroup_per_zone {
137 /* 137 struct lruvec lruvec;
138 * spin_lock to protect the per cgroup LRU
139 */
140 struct list_head lists[NR_LRU_LISTS];
141 unsigned long count[NR_LRU_LISTS]; 138 unsigned long count[NR_LRU_LISTS];
142 139
143 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 140 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
@@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
1061 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1058 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1062 smp_rmb(); 1059 smp_rmb();
1063 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1060 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1064 list_move_tail(&pc->lru, &mz->lists[lru]); 1061 list_move_tail(&pc->lru, &mz->lruvec.lists[lru]);
1065} 1062}
1066 1063
1067void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 1064void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
@@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
1079 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1076 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1080 smp_rmb(); 1077 smp_rmb();
1081 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1078 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1082 list_move(&pc->lru, &mz->lists[lru]); 1079 list_move(&pc->lru, &mz->lruvec.lists[lru]);
1083} 1080}
1084 1081
1085void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 1082void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
@@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
1109 /* huge page split is done under lru_lock. so, we have no races. */ 1106 /* huge page split is done under lru_lock. so, we have no races. */
1110 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1107 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1111 SetPageCgroupAcctLRU(pc); 1108 SetPageCgroupAcctLRU(pc);
1112 list_add(&pc->lru, &mz->lists[lru]); 1109 list_add(&pc->lru, &mz->lruvec.lists[lru]);
1113} 1110}
1114 1111
1115/* 1112/*
@@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1307 1304
1308 BUG_ON(!mem_cont); 1305 BUG_ON(!mem_cont);
1309 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 1306 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1310 src = &mz->lists[lru]; 1307 src = &mz->lruvec.lists[lru];
1311 1308
1312 scan = 0; 1309 scan = 0;
1313 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 1310 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
@@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3738 3735
3739 zone = &NODE_DATA(node)->node_zones[zid]; 3736 zone = &NODE_DATA(node)->node_zones[zid];
3740 mz = mem_cgroup_zoneinfo(memcg, node, zid); 3737 mz = mem_cgroup_zoneinfo(memcg, node, zid);
3741 list = &mz->lists[lru]; 3738 list = &mz->lruvec.lists[lru];
3742 3739
3743 loop = MEM_CGROUP_ZSTAT(mz, lru); 3740 loop = MEM_CGROUP_ZSTAT(mz, lru);
3744 /* give some margin against EBUSY etc...*/ 3741 /* give some margin against EBUSY etc...*/
@@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4864 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4861 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4865 mz = &pn->zoneinfo[zone]; 4862 mz = &pn->zoneinfo[zone];
4866 for_each_lru(l) 4863 for_each_lru(l)
4867 INIT_LIST_HEAD(&mz->lists[l]); 4864 INIT_LIST_HEAD(&mz->lruvec.lists[l]);
4868 mz->usage_in_excess = 0; 4865 mz->usage_in_excess = 0;
4869 mz->on_tree = false; 4866 mz->on_tree = false;
4870 mz->mem = memcg; 4867 mz->mem = memcg;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 794e6715c226..25c248eb7d5f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4288,7 +4288,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4288 4288
4289 zone_pcp_init(zone); 4289 zone_pcp_init(zone);
4290 for_each_lru(l) 4290 for_each_lru(l)
4291 INIT_LIST_HEAD(&zone->lru[l].list); 4291 INIT_LIST_HEAD(&zone->lruvec.lists[l]);
4292 zone->reclaim_stat.recent_rotated[0] = 0; 4292 zone->reclaim_stat.recent_rotated[0] = 0;
4293 zone->reclaim_stat.recent_rotated[1] = 0; 4293 zone->reclaim_stat.recent_rotated[1] = 0;
4294 zone->reclaim_stat.recent_scanned[0] = 0; 4294 zone->reclaim_stat.recent_scanned[0] = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 67a09a633a09..76ef79d3857c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -236,7 +236,7 @@ static void pagevec_move_tail_fn(struct page *page, void *arg)
236 236
237 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 237 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
238 enum lru_list lru = page_lru_base_type(page); 238 enum lru_list lru = page_lru_base_type(page);
239 list_move_tail(&page->lru, &zone->lru[lru].list); 239 list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
240 mem_cgroup_rotate_reclaimable_page(page); 240 mem_cgroup_rotate_reclaimable_page(page);
241 (*pgmoved)++; 241 (*pgmoved)++;
242 } 242 }
@@ -480,7 +480,7 @@ static void lru_deactivate_fn(struct page *page, void *arg)
480 * The page's writeback ends up during pagevec 480 * The page's writeback ends up during pagevec
481 * We moves tha page into tail of inactive. 481 * We moves tha page into tail of inactive.
482 */ 482 */
483 list_move_tail(&page->lru, &zone->lru[lru].list); 483 list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
484 mem_cgroup_rotate_reclaimable_page(page); 484 mem_cgroup_rotate_reclaimable_page(page);
485 __count_vm_event(PGROTATED); 485 __count_vm_event(PGROTATED);
486 } 486 }
@@ -654,7 +654,6 @@ void lru_add_page_tail(struct zone* zone,
654 int active; 654 int active;
655 enum lru_list lru; 655 enum lru_list lru;
656 const int file = 0; 656 const int file = 0;
657 struct list_head *head;
658 657
659 VM_BUG_ON(!PageHead(page)); 658 VM_BUG_ON(!PageHead(page));
660 VM_BUG_ON(PageCompound(page_tail)); 659 VM_BUG_ON(PageCompound(page_tail));
@@ -674,10 +673,10 @@ void lru_add_page_tail(struct zone* zone,
674 } 673 }
675 update_page_reclaim_stat(zone, page_tail, file, active); 674 update_page_reclaim_stat(zone, page_tail, file, active);
676 if (likely(PageLRU(page))) 675 if (likely(PageLRU(page)))
677 head = page->lru.prev; 676 __add_page_to_lru_list(zone, page_tail, lru,
677 page->lru.prev);
678 else 678 else
679 head = &zone->lru[lru].list; 679 add_page_to_lru_list(zone, page_tail, lru);
680 __add_page_to_lru_list(zone, page_tail, lru, head);
681 } else { 680 } else {
682 SetPageUnevictable(page_tail); 681 SetPageUnevictable(page_tail);
683 add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); 682 add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 024168cfdcb0..93cdc44a1693 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1250,8 +1250,8 @@ static unsigned long isolate_pages_global(unsigned long nr,
1250 lru += LRU_ACTIVE; 1250 lru += LRU_ACTIVE;
1251 if (file) 1251 if (file)
1252 lru += LRU_FILE; 1252 lru += LRU_FILE;
1253 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 1253 return isolate_lru_pages(nr, &z->lruvec.lists[lru], dst,
1254 mode, file); 1254 scanned, order, mode, file);
1255} 1255}
1256 1256
1257/* 1257/*
@@ -1630,7 +1630,7 @@ static void move_active_pages_to_lru(struct zone *zone,
1630 VM_BUG_ON(PageLRU(page)); 1630 VM_BUG_ON(PageLRU(page));
1631 SetPageLRU(page); 1631 SetPageLRU(page);
1632 1632
1633 list_move(&page->lru, &zone->lru[lru].list); 1633 list_move(&page->lru, &zone->lruvec.lists[lru]);
1634 mem_cgroup_add_lru_list(page, lru); 1634 mem_cgroup_add_lru_list(page, lru);
1635 pgmoved += hpage_nr_pages(page); 1635 pgmoved += hpage_nr_pages(page);
1636 1636
@@ -3448,7 +3448,7 @@ retry:
3448 enum lru_list l = page_lru_base_type(page); 3448 enum lru_list l = page_lru_base_type(page);
3449 3449
3450 __dec_zone_state(zone, NR_UNEVICTABLE); 3450 __dec_zone_state(zone, NR_UNEVICTABLE);
3451 list_move(&page->lru, &zone->lru[l].list); 3451 list_move(&page->lru, &zone->lruvec.lists[l]);
3452 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 3452 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
3453 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 3453 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3454 __count_vm_event(UNEVICTABLE_PGRESCUED); 3454 __count_vm_event(UNEVICTABLE_PGRESCUED);
@@ -3457,7 +3457,7 @@ retry:
3457 * rotate unevictable list 3457 * rotate unevictable list
3458 */ 3458 */
3459 SetPageUnevictable(page); 3459 SetPageUnevictable(page);
3460 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 3460 list_move(&page->lru, &zone->lruvec.lists[LRU_UNEVICTABLE]);
3461 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 3461 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
3462 if (page_evictable(page, NULL)) 3462 if (page_evictable(page, NULL))
3463 goto retry; 3463 goto retry;