diff options
-rw-r--r-- | mm/page-writeback.c | 12 | ||||
-rw-r--r-- | mm/swap.c | 41 |
2 files changed, 49 insertions, 4 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2cb01f6ec5d0..b437fe6257b0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1211,6 +1211,17 @@ int set_page_dirty(struct page *page) | |||
1211 | 1211 | ||
1212 | if (likely(mapping)) { | 1212 | if (likely(mapping)) { |
1213 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 1213 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; |
1214 | /* | ||
1215 | * readahead/lru_deactivate_page could remain | ||
1216 | * PG_readahead/PG_reclaim due to race with end_page_writeback | ||
1217 | * About readahead, if the page is written, the flags would be | ||
1218 | * reset. So no problem. | ||
1219 | * About lru_deactivate_page, if the page is redirty, the flag | ||
1220 | * will be reset. So no problem. but if the page is used by readahead | ||
1221 | * it will confuse readahead and make it restart the size rampup | ||
1222 | * process. But it's a trivial problem. | ||
1223 | */ | ||
1224 | ClearPageReclaim(page); | ||
1214 | #ifdef CONFIG_BLOCK | 1225 | #ifdef CONFIG_BLOCK |
1215 | if (!spd) | 1226 | if (!spd) |
1216 | spd = __set_page_dirty_buffers; | 1227 | spd = __set_page_dirty_buffers; |
@@ -1266,7 +1277,6 @@ int clear_page_dirty_for_io(struct page *page) | |||
1266 | 1277 | ||
1267 | BUG_ON(!PageLocked(page)); | 1278 | BUG_ON(!PageLocked(page)); |
1268 | 1279 | ||
1269 | ClearPageReclaim(page); | ||
1270 | if (mapping && mapping_cap_account_dirty(mapping)) { | 1280 | if (mapping && mapping_cap_account_dirty(mapping)) { |
1271 | /* | 1281 | /* |
1272 | * Yes, Virginia, this is indeed insane. | 1282 | * Yes, Virginia, this is indeed insane. |
@@ -354,26 +354,61 @@ void add_page_to_unevictable_list(struct page *page) | |||
354 | * head of the list, rather than the tail, to give the flusher | 354 | * head of the list, rather than the tail, to give the flusher |
355 | * threads some time to write it out, as this is much more | 355 | * threads some time to write it out, as this is much more |
356 | * effective than the single-page writeout from reclaim. | 356 | * effective than the single-page writeout from reclaim. |
357 | * | ||
358 | * If the page isn't page_mapped and dirty/writeback, the page | ||
359 | * could reclaim asap using PG_reclaim. | ||
360 | * | ||
361 | * 1. active, mapped page -> none | ||
362 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim | ||
363 | * 3. inactive, mapped page -> none | ||
364 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim | ||
365 | * 5. inactive, clean -> inactive, tail | ||
366 | * 6. Others -> none | ||
367 | * | ||
368 | * In 4, why it moves inactive's head, the VM expects the page would | ||
369 | * be write it out by flusher threads as this is much more effective | ||
370 | * than the single-page writeout from reclaim. | ||
357 | */ | 371 | */ |
358 | static void lru_deactivate(struct page *page, struct zone *zone) | 372 | static void lru_deactivate(struct page *page, struct zone *zone) |
359 | { | 373 | { |
360 | int lru, file; | 374 | int lru, file; |
375 | bool active; | ||
361 | 376 | ||
362 | if (!PageLRU(page) || !PageActive(page)) | 377 | if (!PageLRU(page)) |
363 | return; | 378 | return; |
364 | 379 | ||
365 | /* Some processes are using the page */ | 380 | /* Some processes are using the page */ |
366 | if (page_mapped(page)) | 381 | if (page_mapped(page)) |
367 | return; | 382 | return; |
368 | 383 | ||
384 | active = PageActive(page); | ||
385 | |||
369 | file = page_is_file_cache(page); | 386 | file = page_is_file_cache(page); |
370 | lru = page_lru_base_type(page); | 387 | lru = page_lru_base_type(page); |
371 | del_page_from_lru_list(zone, page, lru + LRU_ACTIVE); | 388 | del_page_from_lru_list(zone, page, lru + active); |
372 | ClearPageActive(page); | 389 | ClearPageActive(page); |
373 | ClearPageReferenced(page); | 390 | ClearPageReferenced(page); |
374 | add_page_to_lru_list(zone, page, lru); | 391 | add_page_to_lru_list(zone, page, lru); |
375 | __count_vm_event(PGDEACTIVATE); | ||
376 | 392 | ||
393 | if (PageWriteback(page) || PageDirty(page)) { | ||
394 | /* | ||
395 | * PG_reclaim could be raced with end_page_writeback | ||
396 | * It can make readahead confusing. But race window | ||
397 | * is _really_ small and it's non-critical problem. | ||
398 | */ | ||
399 | SetPageReclaim(page); | ||
400 | } else { | ||
401 | /* | ||
402 | * The page's writeback ends up during pagevec | ||
403 | * We moves tha page into tail of inactive. | ||
404 | */ | ||
405 | list_move_tail(&page->lru, &zone->lru[lru].list); | ||
406 | mem_cgroup_rotate_reclaimable_page(page); | ||
407 | __count_vm_event(PGROTATED); | ||
408 | } | ||
409 | |||
410 | if (active) | ||
411 | __count_vm_event(PGDEACTIVATE); | ||
377 | update_page_reclaim_stat(zone, page, file, 0); | 412 | update_page_reclaim_stat(zone, page, file, 0); |
378 | } | 413 | } |
379 | 414 | ||