aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 77825883298f..6310dc2008ff 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -446,13 +446,22 @@ void mark_page_accessed(struct page *page)
446} 446}
447EXPORT_SYMBOL(mark_page_accessed); 447EXPORT_SYMBOL(mark_page_accessed);
448 448
449/*
450 * Order of operations is important: flush the pagevec when it's already
451 * full, not when adding the last page, to make sure that last page is
452 * not added to the LRU directly when passed to this function. Because
453 * mark_page_accessed() (called after this when writing) only activates
454 * pages that are on the LRU, linear writes in subpage chunks would see
455 * every PAGEVEC_SIZE page activated, which is unexpected.
456 */
449void __lru_cache_add(struct page *page, enum lru_list lru) 457void __lru_cache_add(struct page *page, enum lru_list lru)
450{ 458{
451 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; 459 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
452 460
453 page_cache_get(page); 461 page_cache_get(page);
454 if (!pagevec_add(pvec, page)) 462 if (!pagevec_space(pvec))
455 __pagevec_lru_add(pvec, lru); 463 __pagevec_lru_add(pvec, lru);
464 pagevec_add(pvec, page);
456 put_cpu_var(lru_add_pvecs); 465 put_cpu_var(lru_add_pvecs);
457} 466}
458EXPORT_SYMBOL(__lru_cache_add); 467EXPORT_SYMBOL(__lru_cache_add);
@@ -742,7 +751,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
742 751
743 SetPageLRU(page_tail); 752 SetPageLRU(page_tail);
744 753
745 if (page_evictable(page_tail, NULL)) { 754 if (page_evictable(page_tail)) {
746 if (PageActive(page)) { 755 if (PageActive(page)) {
747 SetPageActive(page_tail); 756 SetPageActive(page_tail);
748 active = 1; 757 active = 1;