diff options
| author | Mel Gorman <mel@csn.ul.ie> | 2011-01-13 18:46:00 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:34 -0500 |
| commit | f3a310bc4e5ce7e55e1c8e25c31e63af017f3e50 (patch) | |
| tree | 0c78777bd505f44edeb9bbcc50fb3154896574aa | |
| parent | 9927af740b1b9b1e769310bd0b91425e8047b803 (diff) | |
mm: vmscan: rename lumpy_mode to reclaim_mode
With compaction being used instead of lumpy reclaim, the name lumpy_mode
and associated variables is a bit misleading. Rename lumpy_mode to
reclaim_mode which is a better fit. There is no functional change.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | include/trace/events/vmscan.h | 6 | ||||
| -rw-r--r-- | mm/vmscan.c | 70 |
2 files changed, 38 insertions, 38 deletions
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index be76429962ca..ea422aaa23e1 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h | |||
| @@ -25,13 +25,13 @@ | |||
| 25 | 25 | ||
| 26 | #define trace_reclaim_flags(page, sync) ( \ | 26 | #define trace_reclaim_flags(page, sync) ( \ |
| 27 | (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ | 27 | (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ |
| 28 | (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ | 28 | (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ |
| 29 | ) | 29 | ) |
| 30 | 30 | ||
| 31 | #define trace_shrink_flags(file, sync) ( \ | 31 | #define trace_shrink_flags(file, sync) ( \ |
| 32 | (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ | 32 | (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \ |
| 33 | (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ | 33 | (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ |
| 34 | (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ | 34 | (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ |
| 35 | ) | 35 | ) |
| 36 | 36 | ||
| 37 | TRACE_EVENT(mm_vmscan_kswapd_sleep, | 37 | TRACE_EVENT(mm_vmscan_kswapd_sleep, |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8320d115c85d..7037cc8c60b6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -53,22 +53,22 @@ | |||
| 53 | #include <trace/events/vmscan.h> | 53 | #include <trace/events/vmscan.h> |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * lumpy_mode determines how the inactive list is shrunk | 56 | * reclaim_mode determines how the inactive list is shrunk |
| 57 | * LUMPY_MODE_SINGLE: Reclaim only order-0 pages | 57 | * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages |
| 58 | * LUMPY_MODE_ASYNC: Do not block | 58 | * RECLAIM_MODE_ASYNC: Do not block |
| 59 | * LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback | 59 | * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback |
| 60 | * LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference | 60 | * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference |
| 61 | * page from the LRU and reclaim all pages within a | 61 | * page from the LRU and reclaim all pages within a |
| 62 | * naturally aligned range | 62 | * naturally aligned range |
| 63 | * LUMPY_MODE_COMPACTION: For high-order allocations, reclaim a number of | 63 | * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of |
| 64 | * order-0 pages and then compact the zone | 64 | * order-0 pages and then compact the zone |
| 65 | */ | 65 | */ |
| 66 | typedef unsigned __bitwise__ lumpy_mode; | 66 | typedef unsigned __bitwise__ reclaim_mode_t; |
| 67 | #define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u) | 67 | #define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) |
| 68 | #define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u) | 68 | #define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) |
| 69 | #define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u) | 69 | #define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) |
| 70 | #define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u) | 70 | #define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) |
| 71 | #define LUMPY_MODE_COMPACTION ((__force lumpy_mode)0x10u) | 71 | #define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) |
| 72 | 72 | ||
| 73 | struct scan_control { | 73 | struct scan_control { |
| 74 | /* Incremented by the number of inactive pages that were scanned */ | 74 | /* Incremented by the number of inactive pages that were scanned */ |
| @@ -101,7 +101,7 @@ struct scan_control { | |||
| 101 | * Intend to reclaim enough continuous memory rather than reclaim | 101 | * Intend to reclaim enough continuous memory rather than reclaim |
| 102 | * enough amount of memory. i.e, mode for high order allocation. | 102 | * enough amount of memory. i.e, mode for high order allocation. |
| 103 | */ | 103 | */ |
| 104 | lumpy_mode lumpy_reclaim_mode; | 104 | reclaim_mode_t reclaim_mode; |
| 105 | 105 | ||
| 106 | /* Which cgroup do we reclaim from */ | 106 | /* Which cgroup do we reclaim from */ |
| 107 | struct mem_cgroup *mem_cgroup; | 107 | struct mem_cgroup *mem_cgroup; |
| @@ -284,10 +284,10 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
| 284 | return ret; | 284 | return ret; |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, | 287 | static void set_reclaim_mode(int priority, struct scan_control *sc, |
| 288 | bool sync) | 288 | bool sync) |
| 289 | { | 289 | { |
| 290 | lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; | 290 | reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; |
| 291 | 291 | ||
| 292 | /* | 292 | /* |
| 293 | * Initially assume we are entering either lumpy reclaim or | 293 | * Initially assume we are entering either lumpy reclaim or |
| @@ -295,9 +295,9 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, | |||
| 295 | * sync mode or just reclaim order-0 pages later. | 295 | * sync mode or just reclaim order-0 pages later. |
| 296 | */ | 296 | */ |
| 297 | if (COMPACTION_BUILD) | 297 | if (COMPACTION_BUILD) |
| 298 | sc->lumpy_reclaim_mode = LUMPY_MODE_COMPACTION; | 298 | sc->reclaim_mode = RECLAIM_MODE_COMPACTION; |
| 299 | else | 299 | else |
| 300 | sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM; | 300 | sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; |
| 301 | 301 | ||
| 302 | /* | 302 | /* |
| 303 | * Avoid using lumpy reclaim or reclaim/compaction if possible by | 303 | * Avoid using lumpy reclaim or reclaim/compaction if possible by |
| @@ -305,16 +305,16 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, | |||
| 305 | * under memory pressure | 305 | * under memory pressure |
| 306 | */ | 306 | */ |
| 307 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | 307 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) |
| 308 | sc->lumpy_reclaim_mode |= syncmode; | 308 | sc->reclaim_mode |= syncmode; |
| 309 | else if (sc->order && priority < DEF_PRIORITY - 2) | 309 | else if (sc->order && priority < DEF_PRIORITY - 2) |
| 310 | sc->lumpy_reclaim_mode |= syncmode; | 310 | sc->reclaim_mode |= syncmode; |
| 311 | else | 311 | else |
| 312 | sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; | 312 | sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static void disable_lumpy_reclaim_mode(struct scan_control *sc) | 315 | static void reset_reclaim_mode(struct scan_control *sc) |
| 316 | { | 316 | { |
| 317 | sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; | 317 | sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | static inline int is_page_cache_freeable(struct page *page) | 320 | static inline int is_page_cache_freeable(struct page *page) |
| @@ -445,7 +445,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 445 | * first attempt to free a range of pages fails. | 445 | * first attempt to free a range of pages fails. |
| 446 | */ | 446 | */ |
| 447 | if (PageWriteback(page) && | 447 | if (PageWriteback(page) && |
| 448 | (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC)) | 448 | (sc->reclaim_mode & RECLAIM_MODE_SYNC)) |
| 449 | wait_on_page_writeback(page); | 449 | wait_on_page_writeback(page); |
| 450 | 450 | ||
| 451 | if (!PageWriteback(page)) { | 451 | if (!PageWriteback(page)) { |
| @@ -453,7 +453,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 453 | ClearPageReclaim(page); | 453 | ClearPageReclaim(page); |
| 454 | } | 454 | } |
| 455 | trace_mm_vmscan_writepage(page, | 455 | trace_mm_vmscan_writepage(page, |
| 456 | trace_reclaim_flags(page, sc->lumpy_reclaim_mode)); | 456 | trace_reclaim_flags(page, sc->reclaim_mode)); |
| 457 | inc_zone_page_state(page, NR_VMSCAN_WRITE); | 457 | inc_zone_page_state(page, NR_VMSCAN_WRITE); |
| 458 | return PAGE_SUCCESS; | 458 | return PAGE_SUCCESS; |
| 459 | } | 459 | } |
| @@ -638,7 +638,7 @@ static enum page_references page_check_references(struct page *page, | |||
| 638 | referenced_page = TestClearPageReferenced(page); | 638 | referenced_page = TestClearPageReferenced(page); |
| 639 | 639 | ||
| 640 | /* Lumpy reclaim - ignore references */ | 640 | /* Lumpy reclaim - ignore references */ |
| 641 | if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM) | 641 | if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) |
| 642 | return PAGEREF_RECLAIM; | 642 | return PAGEREF_RECLAIM; |
| 643 | 643 | ||
| 644 | /* | 644 | /* |
| @@ -755,7 +755,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 755 | * for any page for which writeback has already | 755 | * for any page for which writeback has already |
| 756 | * started. | 756 | * started. |
| 757 | */ | 757 | */ |
| 758 | if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) && | 758 | if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && |
| 759 | may_enter_fs) | 759 | may_enter_fs) |
| 760 | wait_on_page_writeback(page); | 760 | wait_on_page_writeback(page); |
| 761 | else { | 761 | else { |
| @@ -911,7 +911,7 @@ cull_mlocked: | |||
| 911 | try_to_free_swap(page); | 911 | try_to_free_swap(page); |
| 912 | unlock_page(page); | 912 | unlock_page(page); |
| 913 | putback_lru_page(page); | 913 | putback_lru_page(page); |
| 914 | disable_lumpy_reclaim_mode(sc); | 914 | reset_reclaim_mode(sc); |
| 915 | continue; | 915 | continue; |
| 916 | 916 | ||
| 917 | activate_locked: | 917 | activate_locked: |
| @@ -924,7 +924,7 @@ activate_locked: | |||
| 924 | keep_locked: | 924 | keep_locked: |
| 925 | unlock_page(page); | 925 | unlock_page(page); |
| 926 | keep: | 926 | keep: |
| 927 | disable_lumpy_reclaim_mode(sc); | 927 | reset_reclaim_mode(sc); |
| 928 | keep_lumpy: | 928 | keep_lumpy: |
| 929 | list_add(&page->lru, &ret_pages); | 929 | list_add(&page->lru, &ret_pages); |
| 930 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); | 930 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); |
| @@ -1340,7 +1340,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken, | |||
| 1340 | return false; | 1340 | return false; |
| 1341 | 1341 | ||
| 1342 | /* Only stall on lumpy reclaim */ | 1342 | /* Only stall on lumpy reclaim */ |
| 1343 | if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE) | 1343 | if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) |
| 1344 | return false; | 1344 | return false; |
| 1345 | 1345 | ||
| 1346 | /* If we have relaimed everything on the isolated list, no stall */ | 1346 | /* If we have relaimed everything on the isolated list, no stall */ |
| @@ -1384,14 +1384,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1384 | return SWAP_CLUSTER_MAX; | 1384 | return SWAP_CLUSTER_MAX; |
| 1385 | } | 1385 | } |
| 1386 | 1386 | ||
| 1387 | set_lumpy_reclaim_mode(priority, sc, false); | 1387 | set_reclaim_mode(priority, sc, false); |
| 1388 | lru_add_drain(); | 1388 | lru_add_drain(); |
| 1389 | spin_lock_irq(&zone->lru_lock); | 1389 | spin_lock_irq(&zone->lru_lock); |
| 1390 | 1390 | ||
| 1391 | if (scanning_global_lru(sc)) { | 1391 | if (scanning_global_lru(sc)) { |
| 1392 | nr_taken = isolate_pages_global(nr_to_scan, | 1392 | nr_taken = isolate_pages_global(nr_to_scan, |
| 1393 | &page_list, &nr_scanned, sc->order, | 1393 | &page_list, &nr_scanned, sc->order, |
| 1394 | sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM ? | 1394 | sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ? |
| 1395 | ISOLATE_BOTH : ISOLATE_INACTIVE, | 1395 | ISOLATE_BOTH : ISOLATE_INACTIVE, |
| 1396 | zone, 0, file); | 1396 | zone, 0, file); |
| 1397 | zone->pages_scanned += nr_scanned; | 1397 | zone->pages_scanned += nr_scanned; |
| @@ -1404,7 +1404,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1404 | } else { | 1404 | } else { |
| 1405 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, | 1405 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, |
| 1406 | &page_list, &nr_scanned, sc->order, | 1406 | &page_list, &nr_scanned, sc->order, |
| 1407 | sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM ? | 1407 | sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ? |
| 1408 | ISOLATE_BOTH : ISOLATE_INACTIVE, | 1408 | ISOLATE_BOTH : ISOLATE_INACTIVE, |
| 1409 | zone, sc->mem_cgroup, | 1409 | zone, sc->mem_cgroup, |
| 1410 | 0, file); | 1410 | 0, file); |
| @@ -1427,7 +1427,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1427 | 1427 | ||
| 1428 | /* Check if we should syncronously wait for writeback */ | 1428 | /* Check if we should syncronously wait for writeback */ |
| 1429 | if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { | 1429 | if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { |
| 1430 | set_lumpy_reclaim_mode(priority, sc, true); | 1430 | set_reclaim_mode(priority, sc, true); |
| 1431 | nr_reclaimed += shrink_page_list(&page_list, zone, sc); | 1431 | nr_reclaimed += shrink_page_list(&page_list, zone, sc); |
| 1432 | } | 1432 | } |
| 1433 | 1433 | ||
| @@ -1442,7 +1442,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1442 | zone_idx(zone), | 1442 | zone_idx(zone), |
| 1443 | nr_scanned, nr_reclaimed, | 1443 | nr_scanned, nr_reclaimed, |
| 1444 | priority, | 1444 | priority, |
| 1445 | trace_shrink_flags(file, sc->lumpy_reclaim_mode)); | 1445 | trace_shrink_flags(file, sc->reclaim_mode)); |
| 1446 | return nr_reclaimed; | 1446 | return nr_reclaimed; |
| 1447 | } | 1447 | } |
| 1448 | 1448 | ||
| @@ -1836,7 +1836,7 @@ static inline bool should_continue_reclaim(struct zone *zone, | |||
| 1836 | unsigned long inactive_lru_pages; | 1836 | unsigned long inactive_lru_pages; |
| 1837 | 1837 | ||
| 1838 | /* If not in reclaim/compaction mode, stop */ | 1838 | /* If not in reclaim/compaction mode, stop */ |
| 1839 | if (!(sc->lumpy_reclaim_mode & LUMPY_MODE_COMPACTION)) | 1839 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) |
| 1840 | return false; | 1840 | return false; |
| 1841 | 1841 | ||
| 1842 | /* | 1842 | /* |
