diff options
author | Mel Gorman <mel@csn.ul.ie> | 2011-01-13 18:45:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:33 -0500 |
commit | ee64fc9354e515a79c7232cfde65c88ec627308b (patch) | |
tree | fb5fb6c0045ff5467ed5870d5f64806784deba2d /mm | |
parent | b7aba6984dc048503b69c2a885098cdd430832bf (diff) |
mm: vmscan: convert lumpy_mode into a bitmask
Currently lumpy_mode is an enum and determines if lumpy reclaim is off,
syncronous or asyncronous. In preparation for using compaction instead of
lumpy reclaim, this patch converts the flags into a bitmap.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index cacdf6684971..3464312bde07 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -51,11 +51,20 @@ | |||
51 | #define CREATE_TRACE_POINTS | 51 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/vmscan.h> | 52 | #include <trace/events/vmscan.h> |
53 | 53 | ||
54 | enum lumpy_mode { | 54 | /* |
55 | LUMPY_MODE_NONE, | 55 | * lumpy_mode determines how the inactive list is shrunk |
56 | LUMPY_MODE_ASYNC, | 56 | * LUMPY_MODE_SINGLE: Reclaim only order-0 pages |
57 | LUMPY_MODE_SYNC, | 57 | * LUMPY_MODE_ASYNC: Do not block |
58 | }; | 58 | * LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback |
59 | * LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference | ||
60 | * page from the LRU and reclaim all pages within a | ||
61 | * naturally aligned range | ||
62 | */ | ||
63 | typedef unsigned __bitwise__ lumpy_mode; | ||
64 | #define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u) | ||
65 | #define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u) | ||
66 | #define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u) | ||
67 | #define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u) | ||
59 | 68 | ||
60 | struct scan_control { | 69 | struct scan_control { |
61 | /* Incremented by the number of inactive pages that were scanned */ | 70 | /* Incremented by the number of inactive pages that were scanned */ |
@@ -88,7 +97,7 @@ struct scan_control { | |||
88 | * Intend to reclaim enough continuous memory rather than reclaim | 97 | * Intend to reclaim enough continuous memory rather than reclaim |
89 | * enough amount of memory. i.e, mode for high order allocation. | 98 | * enough amount of memory. i.e, mode for high order allocation. |
90 | */ | 99 | */ |
91 | enum lumpy_mode lumpy_reclaim_mode; | 100 | lumpy_mode lumpy_reclaim_mode; |
92 | 101 | ||
93 | /* Which cgroup do we reclaim from */ | 102 | /* Which cgroup do we reclaim from */ |
94 | struct mem_cgroup *mem_cgroup; | 103 | struct mem_cgroup *mem_cgroup; |
@@ -274,13 +283,13 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
274 | static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, | 283 | static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, |
275 | bool sync) | 284 | bool sync) |
276 | { | 285 | { |
277 | enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; | 286 | lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; |
278 | 287 | ||
279 | /* | 288 | /* |
280 | * Some reclaim have alredy been failed. No worth to try synchronous | 289 | * Some reclaim have alredy been failed. No worth to try synchronous |
281 | * lumpy reclaim. | 290 | * lumpy reclaim. |
282 | */ | 291 | */ |
283 | if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) | 292 | if (sync && sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE) |
284 | return; | 293 | return; |
285 | 294 | ||
286 | /* | 295 | /* |
@@ -288,17 +297,18 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, | |||
288 | * trouble getting a small set of contiguous pages, we | 297 | * trouble getting a small set of contiguous pages, we |
289 | * will reclaim both active and inactive pages. | 298 | * will reclaim both active and inactive pages. |
290 | */ | 299 | */ |
300 | sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM; | ||
291 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | 301 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) |
292 | sc->lumpy_reclaim_mode = mode; | 302 | sc->lumpy_reclaim_mode |= syncmode; |
293 | else if (sc->order && priority < DEF_PRIORITY - 2) | 303 | else if (sc->order && priority < DEF_PRIORITY - 2) |
294 | sc->lumpy_reclaim_mode = mode; | 304 | sc->lumpy_reclaim_mode |= syncmode; |
295 | else | 305 | else |
296 | sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; | 306 | sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; |
297 | } | 307 | } |
298 | 308 | ||
299 | static void disable_lumpy_reclaim_mode(struct scan_control *sc) | 309 | static void disable_lumpy_reclaim_mode(struct scan_control *sc) |
300 | { | 310 | { |
301 | sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; | 311 | sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; |
302 | } | 312 | } |
303 | 313 | ||
304 | static inline int is_page_cache_freeable(struct page *page) | 314 | static inline int is_page_cache_freeable(struct page *page) |
@@ -429,7 +439,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
429 | * first attempt to free a range of pages fails. | 439 | * first attempt to free a range of pages fails. |
430 | */ | 440 | */ |
431 | if (PageWriteback(page) && | 441 | if (PageWriteback(page) && |
432 | sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC) | 442 | (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC)) |
433 | wait_on_page_writeback(page); | 443 | wait_on_page_writeback(page); |
434 | 444 | ||
435 | if (!PageWriteback(page)) { | 445 | if (!PageWriteback(page)) { |
@@ -622,7 +632,7 @@ static enum page_references page_check_references(struct page *page, | |||
622 | referenced_page = TestClearPageReferenced(page); | 632 | referenced_page = TestClearPageReferenced(page); |
623 | 633 | ||
624 | /* Lumpy reclaim - ignore references */ | 634 | /* Lumpy reclaim - ignore references */ |
625 | if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE) | 635 | if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM) |
626 | return PAGEREF_RECLAIM; | 636 | return PAGEREF_RECLAIM; |
627 | 637 | ||
628 | /* | 638 | /* |
@@ -739,7 +749,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
739 | * for any page for which writeback has already | 749 | * for any page for which writeback has already |
740 | * started. | 750 | * started. |
741 | */ | 751 | */ |
742 | if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC && | 752 | if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) && |
743 | may_enter_fs) | 753 | may_enter_fs) |
744 | wait_on_page_writeback(page); | 754 | wait_on_page_writeback(page); |
745 | else { | 755 | else { |
@@ -1324,7 +1334,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken, | |||
1324 | return false; | 1334 | return false; |
1325 | 1335 | ||
1326 | /* Only stall on lumpy reclaim */ | 1336 | /* Only stall on lumpy reclaim */ |
1327 | if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) | 1337 | if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE) |
1328 | return false; | 1338 | return false; |
1329 | 1339 | ||
1330 | /* If we have relaimed everything on the isolated list, no stall */ | 1340 | /* If we have relaimed everything on the isolated list, no stall */ |
@@ -1375,7 +1385,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
1375 | if (scanning_global_lru(sc)) { | 1385 | if (scanning_global_lru(sc)) { |
1376 | nr_taken = isolate_pages_global(nr_to_scan, | 1386 | nr_taken = isolate_pages_global(nr_to_scan, |
1377 | &page_list, &nr_scanned, sc->order, | 1387 | &page_list, &nr_scanned, sc->order, |
1378 | sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? | 1388 | sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ? |
1379 | ISOLATE_INACTIVE : ISOLATE_BOTH, | 1389 | ISOLATE_INACTIVE : ISOLATE_BOTH, |
1380 | zone, 0, file); | 1390 | zone, 0, file); |
1381 | zone->pages_scanned += nr_scanned; | 1391 | zone->pages_scanned += nr_scanned; |
@@ -1388,7 +1398,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
1388 | } else { | 1398 | } else { |
1389 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, | 1399 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, |
1390 | &page_list, &nr_scanned, sc->order, | 1400 | &page_list, &nr_scanned, sc->order, |
1391 | sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? | 1401 | sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ? |
1392 | ISOLATE_INACTIVE : ISOLATE_BOTH, | 1402 | ISOLATE_INACTIVE : ISOLATE_BOTH, |
1393 | zone, sc->mem_cgroup, | 1403 | zone, sc->mem_cgroup, |
1394 | 0, file); | 1404 | 0, file); |