diff options
-rw-r--r-- | mm/swap.c | 11 |
1 files changed, 10 insertions, 1 deletions
@@ -446,13 +446,22 @@ void mark_page_accessed(struct page *page) | |||
446 | } | 446 | } |
447 | EXPORT_SYMBOL(mark_page_accessed); | 447 | EXPORT_SYMBOL(mark_page_accessed); |
448 | 448 | ||
449 | /* | ||
450 | * Order of operations is important: flush the pagevec when it's already | ||
451 | * full, not when adding the last page, to make sure that last page is | ||
452 | * not added to the LRU directly when passed to this function. Because | ||
453 | * mark_page_accessed() (called after this when writing) only activates | ||
454 | * pages that are on the LRU, linear writes in subpage chunks would see | ||
455 | * every PAGEVEC_SIZE page activated, which is unexpected. | ||
456 | */ | ||
449 | void __lru_cache_add(struct page *page, enum lru_list lru) | 457 | void __lru_cache_add(struct page *page, enum lru_list lru) |
450 | { | 458 | { |
451 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | 459 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; |
452 | 460 | ||
453 | page_cache_get(page); | 461 | page_cache_get(page); |
454 | if (!pagevec_add(pvec, page)) | 462 | if (!pagevec_space(pvec)) |
455 | __pagevec_lru_add(pvec, lru); | 463 | __pagevec_lru_add(pvec, lru); |
464 | pagevec_add(pvec, page); | ||
456 | put_cpu_var(lru_add_pvecs); | 465 | put_cpu_var(lru_add_pvecs); |
457 | } | 466 | } |
458 | EXPORT_SYMBOL(__lru_cache_add); | 467 | EXPORT_SYMBOL(__lru_cache_add); |