diff options
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 41 |
1 files changed, 38 insertions, 3 deletions
@@ -354,26 +354,61 @@ void add_page_to_unevictable_list(struct page *page) | |||
354 | * head of the list, rather than the tail, to give the flusher | 354 | * head of the list, rather than the tail, to give the flusher |
355 | * threads some time to write it out, as this is much more | 355 | * threads some time to write it out, as this is much more |
356 | * effective than the single-page writeout from reclaim. | 356 | * effective than the single-page writeout from reclaim. |
357 | * | ||
358 | * If the page isn't page_mapped and dirty/writeback, the page | ||
359 | * could reclaim asap using PG_reclaim. | ||
360 | * | ||
361 | * 1. active, mapped page -> none | ||
362 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim | ||
363 | * 3. inactive, mapped page -> none | ||
364 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim | ||
365 | * 5. inactive, clean -> inactive, tail | ||
366 | * 6. Others -> none | ||
367 | * | ||
368 | * In 4, why it moves inactive's head, the VM expects the page would | ||
369 | * be write it out by flusher threads as this is much more effective | ||
370 | * than the single-page writeout from reclaim. | ||
357 | */ | 371 | */ |
358 | static void lru_deactivate(struct page *page, struct zone *zone) | 372 | static void lru_deactivate(struct page *page, struct zone *zone) |
359 | { | 373 | { |
360 | int lru, file; | 374 | int lru, file; |
375 | bool active; | ||
361 | 376 | ||
362 | if (!PageLRU(page) || !PageActive(page)) | 377 | if (!PageLRU(page)) |
363 | return; | 378 | return; |
364 | 379 | ||
365 | /* Some processes are using the page */ | 380 | /* Some processes are using the page */ |
366 | if (page_mapped(page)) | 381 | if (page_mapped(page)) |
367 | return; | 382 | return; |
368 | 383 | ||
384 | active = PageActive(page); | ||
385 | |||
369 | file = page_is_file_cache(page); | 386 | file = page_is_file_cache(page); |
370 | lru = page_lru_base_type(page); | 387 | lru = page_lru_base_type(page); |
371 | del_page_from_lru_list(zone, page, lru + LRU_ACTIVE); | 388 | del_page_from_lru_list(zone, page, lru + active); |
372 | ClearPageActive(page); | 389 | ClearPageActive(page); |
373 | ClearPageReferenced(page); | 390 | ClearPageReferenced(page); |
374 | add_page_to_lru_list(zone, page, lru); | 391 | add_page_to_lru_list(zone, page, lru); |
375 | __count_vm_event(PGDEACTIVATE); | ||
376 | 392 | ||
393 | if (PageWriteback(page) || PageDirty(page)) { | ||
394 | /* | ||
395 | * PG_reclaim could be raced with end_page_writeback | ||
396 | * It can make readahead confusing. But race window | ||
397 | * is _really_ small and it's non-critical problem. | ||
398 | */ | ||
399 | SetPageReclaim(page); | ||
400 | } else { | ||
401 | /* | ||
402 | * The page's writeback ends up during pagevec | ||
403 | * We moves tha page into tail of inactive. | ||
404 | */ | ||
405 | list_move_tail(&page->lru, &zone->lru[lru].list); | ||
406 | mem_cgroup_rotate_reclaimable_page(page); | ||
407 | __count_vm_event(PGROTATED); | ||
408 | } | ||
409 | |||
410 | if (active) | ||
411 | __count_vm_event(PGDEACTIVATE); | ||
377 | update_page_reclaim_stat(zone, page, file, 0); | 412 | update_page_reclaim_stat(zone, page, file, 0); |
378 | } | 413 | } |
379 | 414 | ||