diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:18:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:05 -0500 |
commit | 925b7673cce39116ce61e7a06683a4a0dad1e72a (patch) | |
tree | 66c134db836e531e196ee3dfc23c124ff74ac827 /mm/vmscan.c | |
parent | 6290df545814990ca2663baf6e894669132d5f73 (diff) |
mm: make per-memcg LRU lists exclusive
Now that all code that operated on global per-zone LRU lists is
converted to operate on per-memory cgroup LRU lists instead, there is no
reason to keep the double-LRU scheme around any longer.
The pc->lru member is removed and page->lru is linked directly to the
per-memory cgroup LRU lists, which removes two pointers from a
descriptor that exists for every page frame in the system.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Ying Han <yinghan@google.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 64 |
1 files changed, 29 insertions, 35 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 93cdc44a1693..813aae820a27 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1139,15 +1139,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1139 | 1139 | ||
1140 | switch (__isolate_lru_page(page, mode, file)) { | 1140 | switch (__isolate_lru_page(page, mode, file)) { |
1141 | case 0: | 1141 | case 0: |
1142 | mem_cgroup_lru_del(page); | ||
1142 | list_move(&page->lru, dst); | 1143 | list_move(&page->lru, dst); |
1143 | mem_cgroup_del_lru(page); | ||
1144 | nr_taken += hpage_nr_pages(page); | 1144 | nr_taken += hpage_nr_pages(page); |
1145 | break; | 1145 | break; |
1146 | 1146 | ||
1147 | case -EBUSY: | 1147 | case -EBUSY: |
1148 | /* else it is being freed elsewhere */ | 1148 | /* else it is being freed elsewhere */ |
1149 | list_move(&page->lru, src); | 1149 | list_move(&page->lru, src); |
1150 | mem_cgroup_rotate_lru_list(page, page_lru(page)); | ||
1151 | continue; | 1150 | continue; |
1152 | 1151 | ||
1153 | default: | 1152 | default: |
@@ -1197,8 +1196,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1197 | break; | 1196 | break; |
1198 | 1197 | ||
1199 | if (__isolate_lru_page(cursor_page, mode, file) == 0) { | 1198 | if (__isolate_lru_page(cursor_page, mode, file) == 0) { |
1199 | mem_cgroup_lru_del(cursor_page); | ||
1200 | list_move(&cursor_page->lru, dst); | 1200 | list_move(&cursor_page->lru, dst); |
1201 | mem_cgroup_del_lru(cursor_page); | ||
1202 | nr_taken += hpage_nr_pages(cursor_page); | 1201 | nr_taken += hpage_nr_pages(cursor_page); |
1203 | nr_lumpy_taken++; | 1202 | nr_lumpy_taken++; |
1204 | if (PageDirty(cursor_page)) | 1203 | if (PageDirty(cursor_page)) |
@@ -1239,18 +1238,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1239 | return nr_taken; | 1238 | return nr_taken; |
1240 | } | 1239 | } |
1241 | 1240 | ||
1242 | static unsigned long isolate_pages_global(unsigned long nr, | 1241 | static unsigned long isolate_pages(unsigned long nr, struct mem_cgroup_zone *mz, |
1243 | struct list_head *dst, | 1242 | struct list_head *dst, |
1244 | unsigned long *scanned, int order, | 1243 | unsigned long *scanned, int order, |
1245 | isolate_mode_t mode, | 1244 | isolate_mode_t mode, int active, int file) |
1246 | struct zone *z, int active, int file) | ||
1247 | { | 1245 | { |
1246 | struct lruvec *lruvec; | ||
1248 | int lru = LRU_BASE; | 1247 | int lru = LRU_BASE; |
1248 | |||
1249 | lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup); | ||
1249 | if (active) | 1250 | if (active) |
1250 | lru += LRU_ACTIVE; | 1251 | lru += LRU_ACTIVE; |
1251 | if (file) | 1252 | if (file) |
1252 | lru += LRU_FILE; | 1253 | lru += LRU_FILE; |
1253 | return isolate_lru_pages(nr, &z->lruvec.lists[lru], dst, | 1254 | return isolate_lru_pages(nr, &lruvec->lists[lru], dst, |
1254 | scanned, order, mode, file); | 1255 | scanned, order, mode, file); |
1255 | } | 1256 | } |
1256 | 1257 | ||
@@ -1518,14 +1519,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, | |||
1518 | 1519 | ||
1519 | spin_lock_irq(&zone->lru_lock); | 1520 | spin_lock_irq(&zone->lru_lock); |
1520 | 1521 | ||
1521 | if (scanning_global_lru(mz)) { | 1522 | nr_taken = isolate_pages(nr_to_scan, mz, &page_list, |
1522 | nr_taken = isolate_pages_global(nr_to_scan, &page_list, | 1523 | &nr_scanned, sc->order, |
1523 | &nr_scanned, sc->order, reclaim_mode, zone, 0, file); | 1524 | reclaim_mode, 0, file); |
1524 | } else { | ||
1525 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, | ||
1526 | &nr_scanned, sc->order, reclaim_mode, zone, | ||
1527 | mz->mem_cgroup, 0, file); | ||
1528 | } | ||
1529 | if (global_reclaim(sc)) { | 1525 | if (global_reclaim(sc)) { |
1530 | zone->pages_scanned += nr_scanned; | 1526 | zone->pages_scanned += nr_scanned; |
1531 | if (current_is_kswapd()) | 1527 | if (current_is_kswapd()) |
@@ -1625,13 +1621,15 @@ static void move_active_pages_to_lru(struct zone *zone, | |||
1625 | pagevec_init(&pvec, 1); | 1621 | pagevec_init(&pvec, 1); |
1626 | 1622 | ||
1627 | while (!list_empty(list)) { | 1623 | while (!list_empty(list)) { |
1624 | struct lruvec *lruvec; | ||
1625 | |||
1628 | page = lru_to_page(list); | 1626 | page = lru_to_page(list); |
1629 | 1627 | ||
1630 | VM_BUG_ON(PageLRU(page)); | 1628 | VM_BUG_ON(PageLRU(page)); |
1631 | SetPageLRU(page); | 1629 | SetPageLRU(page); |
1632 | 1630 | ||
1633 | list_move(&page->lru, &zone->lruvec.lists[lru]); | 1631 | lruvec = mem_cgroup_lru_add_list(zone, page, lru); |
1634 | mem_cgroup_add_lru_list(page, lru); | 1632 | list_move(&page->lru, &lruvec->lists[lru]); |
1635 | pgmoved += hpage_nr_pages(page); | 1633 | pgmoved += hpage_nr_pages(page); |
1636 | 1634 | ||
1637 | if (!pagevec_add(&pvec, page) || list_empty(list)) { | 1635 | if (!pagevec_add(&pvec, page) || list_empty(list)) { |
@@ -1672,17 +1670,10 @@ static void shrink_active_list(unsigned long nr_pages, | |||
1672 | reclaim_mode |= ISOLATE_CLEAN; | 1670 | reclaim_mode |= ISOLATE_CLEAN; |
1673 | 1671 | ||
1674 | spin_lock_irq(&zone->lru_lock); | 1672 | spin_lock_irq(&zone->lru_lock); |
1675 | if (scanning_global_lru(mz)) { | 1673 | |
1676 | nr_taken = isolate_pages_global(nr_pages, &l_hold, | 1674 | nr_taken = isolate_pages(nr_pages, mz, &l_hold, |
1677 | &pgscanned, sc->order, | 1675 | &pgscanned, sc->order, |
1678 | reclaim_mode, zone, | 1676 | reclaim_mode, 1, file); |
1679 | 1, file); | ||
1680 | } else { | ||
1681 | nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, | ||
1682 | &pgscanned, sc->order, | ||
1683 | reclaim_mode, zone, | ||
1684 | mz->mem_cgroup, 1, file); | ||
1685 | } | ||
1686 | 1677 | ||
1687 | if (global_reclaim(sc)) | 1678 | if (global_reclaim(sc)) |
1688 | zone->pages_scanned += pgscanned; | 1679 | zone->pages_scanned += pgscanned; |
@@ -3440,16 +3431,18 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) | |||
3440 | */ | 3431 | */ |
3441 | static void check_move_unevictable_page(struct page *page, struct zone *zone) | 3432 | static void check_move_unevictable_page(struct page *page, struct zone *zone) |
3442 | { | 3433 | { |
3443 | VM_BUG_ON(PageActive(page)); | 3434 | struct lruvec *lruvec; |
3444 | 3435 | ||
3436 | VM_BUG_ON(PageActive(page)); | ||
3445 | retry: | 3437 | retry: |
3446 | ClearPageUnevictable(page); | 3438 | ClearPageUnevictable(page); |
3447 | if (page_evictable(page, NULL)) { | 3439 | if (page_evictable(page, NULL)) { |
3448 | enum lru_list l = page_lru_base_type(page); | 3440 | enum lru_list l = page_lru_base_type(page); |
3449 | 3441 | ||
3450 | __dec_zone_state(zone, NR_UNEVICTABLE); | 3442 | __dec_zone_state(zone, NR_UNEVICTABLE); |
3451 | list_move(&page->lru, &zone->lruvec.lists[l]); | 3443 | lruvec = mem_cgroup_lru_move_lists(zone, page, |
3452 | mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); | 3444 | LRU_UNEVICTABLE, l); |
3445 | list_move(&page->lru, &lruvec->lists[l]); | ||
3453 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); | 3446 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); |
3454 | __count_vm_event(UNEVICTABLE_PGRESCUED); | 3447 | __count_vm_event(UNEVICTABLE_PGRESCUED); |
3455 | } else { | 3448 | } else { |
@@ -3457,8 +3450,9 @@ retry: | |||
3457 | * rotate unevictable list | 3450 | * rotate unevictable list |
3458 | */ | 3451 | */ |
3459 | SetPageUnevictable(page); | 3452 | SetPageUnevictable(page); |
3460 | list_move(&page->lru, &zone->lruvec.lists[LRU_UNEVICTABLE]); | 3453 | lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE, |
3461 | mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); | 3454 | LRU_UNEVICTABLE); |
3455 | list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]); | ||
3462 | if (page_evictable(page, NULL)) | 3456 | if (page_evictable(page, NULL)) |
3463 | goto retry; | 3457 | goto retry; |
3464 | } | 3458 | } |