aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2011-03-22 19:32:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:44:04 -0400
commit278df9f451dc71dcd002246be48358a473504ad0 (patch)
tree3b79e956f2f0b9381f62518ff2fcf94df4ff9c3f /mm
parent3f58a82943337fb6e79acfa5346719a97d3c0b98 (diff)
mm: reclaim invalidated page ASAP
invalidate_mapping_pages is very big hint to reclaimer. It means user doesn't want to use the page any more. So in order to prevent working set page eviction, this patch move the page into tail of inactive list by PG_reclaim. Please, remember that pages in inactive list are working set as well as active list. If we don't move pages into inactive list's tail, pages near by tail of inactive list can be evicted although we have a big clue about useless pages. It's totally bad. Now PG_readahead/PG_reclaim is shared. fe3cba17 added ClearPageReclaim into clear_page_dirty_for_io for preventing fast reclaiming readahead marker page. In this series, PG_reclaim is used by invalidated page, too. If VM find the page is invalidated and it's dirty, it sets PG_reclaim to reclaim asap. Then, when the dirty page will be writeback, clear_page_dirty_for_io will clear PG_reclaim unconditionally. It disturbs this serie's goal. I think it's okay to clear PG_readahead when the page is dirty, not writeback time. So this patch moves ClearPageReadahead. In v4, ClearPageReadahead in set_page_dirty has a problem which is reported by Steven Barrett. It's due to compound page. Some driver(ex, audio) calls set_page_dirty with compound page which isn't on LRU. but my patch does ClearPageRelcaim on compound page. In non-CONFIG_PAGEFLAGS_EXTENDED, it breaks PageTail flag. I think it doesn't affect THP and pass my test with THP enabling but Cced Andrea for double check. Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Reported-by: Steven Barrett <damentz@liquorix.net> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/swap.c41
2 files changed, 49 insertions, 4 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2cb01f6ec5d0..b437fe6257b0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1211,6 +1211,17 @@ int set_page_dirty(struct page *page)
1211 1211
1212 if (likely(mapping)) { 1212 if (likely(mapping)) {
1213 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; 1213 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1214 /*
1215 * readahead/lru_deactivate_page could remain
1216 * PG_readahead/PG_reclaim due to race with end_page_writeback
1217 * About readahead, if the page is written, the flags would be
1218 * reset. So no problem.
1219 * About lru_deactivate_page, if the page is redirty, the flag
1220 * will be reset. So no problem. but if the page is used by readahead
1221 * it will confuse readahead and make it restart the size rampup
1222 * process. But it's a trivial problem.
1223 */
1224 ClearPageReclaim(page);
1214#ifdef CONFIG_BLOCK 1225#ifdef CONFIG_BLOCK
1215 if (!spd) 1226 if (!spd)
1216 spd = __set_page_dirty_buffers; 1227 spd = __set_page_dirty_buffers;
@@ -1266,7 +1277,6 @@ int clear_page_dirty_for_io(struct page *page)
1266 1277
1267 BUG_ON(!PageLocked(page)); 1278 BUG_ON(!PageLocked(page));
1268 1279
1269 ClearPageReclaim(page);
1270 if (mapping && mapping_cap_account_dirty(mapping)) { 1280 if (mapping && mapping_cap_account_dirty(mapping)) {
1271 /* 1281 /*
1272 * Yes, Virginia, this is indeed insane. 1282 * Yes, Virginia, this is indeed insane.
diff --git a/mm/swap.c b/mm/swap.c
index 1b9e4ebaffc8..0a33714a7cba 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -354,26 +354,61 @@ void add_page_to_unevictable_list(struct page *page)
354 * head of the list, rather than the tail, to give the flusher 354 * head of the list, rather than the tail, to give the flusher
355 * threads some time to write it out, as this is much more 355 * threads some time to write it out, as this is much more
356 * effective than the single-page writeout from reclaim. 356 * effective than the single-page writeout from reclaim.
357 *
358 * If the page isn't page_mapped and dirty/writeback, the page
359 * could reclaim asap using PG_reclaim.
360 *
361 * 1. active, mapped page -> none
362 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
363 * 3. inactive, mapped page -> none
364 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
365 * 5. inactive, clean -> inactive, tail
366 * 6. Others -> none
367 *
368 * In 4, why it moves inactive's head, the VM expects the page would
369 * be write it out by flusher threads as this is much more effective
370 * than the single-page writeout from reclaim.
357 */ 371 */
358static void lru_deactivate(struct page *page, struct zone *zone) 372static void lru_deactivate(struct page *page, struct zone *zone)
359{ 373{
360 int lru, file; 374 int lru, file;
375 bool active;
361 376
362 if (!PageLRU(page) || !PageActive(page)) 377 if (!PageLRU(page))
363 return; 378 return;
364 379
365 /* Some processes are using the page */ 380 /* Some processes are using the page */
366 if (page_mapped(page)) 381 if (page_mapped(page))
367 return; 382 return;
368 383
384 active = PageActive(page);
385
369 file = page_is_file_cache(page); 386 file = page_is_file_cache(page);
370 lru = page_lru_base_type(page); 387 lru = page_lru_base_type(page);
371 del_page_from_lru_list(zone, page, lru + LRU_ACTIVE); 388 del_page_from_lru_list(zone, page, lru + active);
372 ClearPageActive(page); 389 ClearPageActive(page);
373 ClearPageReferenced(page); 390 ClearPageReferenced(page);
374 add_page_to_lru_list(zone, page, lru); 391 add_page_to_lru_list(zone, page, lru);
375 __count_vm_event(PGDEACTIVATE);
376 392
393 if (PageWriteback(page) || PageDirty(page)) {
394 /*
395 * PG_reclaim could be raced with end_page_writeback
396 * It can make readahead confusing. But race window
397 * is _really_ small and it's non-critical problem.
398 */
399 SetPageReclaim(page);
400 } else {
401 /*
402 * The page's writeback ends up during pagevec
403 * We moves tha page into tail of inactive.
404 */
405 list_move_tail(&page->lru, &zone->lru[lru].list);
406 mem_cgroup_rotate_reclaimable_page(page);
407 __count_vm_event(PGROTATED);
408 }
409
410 if (active)
411 __count_vm_event(PGDEACTIVATE);
377 update_page_reclaim_stat(zone, page, file, 0); 412 update_page_reclaim_stat(zone, page, file, 0);
378} 413}
379 414