aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/swap.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 77825883298f..f76c76c7501b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -446,13 +446,22 @@ void mark_page_accessed(struct page *page)
446} 446}
447EXPORT_SYMBOL(mark_page_accessed); 447EXPORT_SYMBOL(mark_page_accessed);
448 448
449/*
450 * Order of operations is important: flush the pagevec when it's already
451 * full, not when adding the last page, to make sure that last page is
452 * not added to the LRU directly when passed to this function. Because
453 * mark_page_accessed() (called after this when writing) only activates
454 * pages that are on the LRU, linear writes in subpage chunks would see
455 * every PAGEVEC_SIZE page activated, which is unexpected.
456 */
449void __lru_cache_add(struct page *page, enum lru_list lru) 457void __lru_cache_add(struct page *page, enum lru_list lru)
450{ 458{
451 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; 459 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
452 460
453 page_cache_get(page); 461 page_cache_get(page);
454 if (!pagevec_add(pvec, page)) 462 if (!pagevec_space(pvec))
455 __pagevec_lru_add(pvec, lru); 463 __pagevec_lru_add(pvec, lru);
464 pagevec_add(pvec, page);
456 put_cpu_var(lru_add_pvecs); 465 put_cpu_var(lru_add_pvecs);
457} 466}
458EXPORT_SYMBOL(__lru_cache_add); 467EXPORT_SYMBOL(__lru_cache_add);