aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h11
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/swap.c17
-rw-r--r--mm/vmscan.c5
4 files changed, 20 insertions, 20 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1701ce4be746..85d74373002c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -10,6 +10,7 @@
10#include <linux/node.h> 10#include <linux/node.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
13#include <linux/page-flags.h>
13#include <asm/page.h> 14#include <asm/page.h>
14 15
15struct notifier_block; 16struct notifier_block;
@@ -233,8 +234,8 @@ extern unsigned long nr_free_pagecache_pages(void);
233 234
234 235
235/* linux/mm/swap.c */ 236/* linux/mm/swap.c */
236extern void __lru_cache_add(struct page *, enum lru_list lru); 237extern void __lru_cache_add(struct page *);
237extern void lru_cache_add_lru(struct page *, enum lru_list lru); 238extern void lru_cache_add(struct page *);
238extern void lru_add_page_tail(struct page *page, struct page *page_tail, 239extern void lru_add_page_tail(struct page *page, struct page *page_tail,
239 struct lruvec *lruvec, struct list_head *head); 240 struct lruvec *lruvec, struct list_head *head);
240extern void activate_page(struct page *); 241extern void activate_page(struct page *);
@@ -254,12 +255,14 @@ extern void add_page_to_unevictable_list(struct page *page);
254 */ 255 */
255static inline void lru_cache_add_anon(struct page *page) 256static inline void lru_cache_add_anon(struct page *page)
256{ 257{
257 __lru_cache_add(page, LRU_INACTIVE_ANON); 258 ClearPageActive(page);
259 __lru_cache_add(page);
258} 260}
259 261
260static inline void lru_cache_add_file(struct page *page) 262static inline void lru_cache_add_file(struct page *page)
261{ 263{
262 __lru_cache_add(page, LRU_INACTIVE_FILE); 264 ClearPageActive(page);
265 __lru_cache_add(page);
263} 266}
264 267
265/* linux/mm/vmscan.c */ 268/* linux/mm/vmscan.c */
diff --git a/mm/rmap.c b/mm/rmap.c
index 6280da86b5d6..e22ceeb6e5ec 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page,
1093 else 1093 else
1094 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1094 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1095 __page_set_anon_rmap(page, vma, address, 1); 1095 __page_set_anon_rmap(page, vma, address, 1);
1096 if (!mlocked_vma_newpage(vma, page)) 1096 if (!mlocked_vma_newpage(vma, page)) {
1097 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 1097 SetPageActive(page);
1098 else 1098 lru_cache_add(page);
1099 } else
1099 add_page_to_unevictable_list(page); 1100 add_page_to_unevictable_list(page);
1100} 1101}
1101 1102
diff --git a/mm/swap.c b/mm/swap.c
index 6a9d0c43924a..4a1d0d2c52fa 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -494,15 +494,10 @@ EXPORT_SYMBOL(mark_page_accessed);
494 * pagevec is drained. This gives a chance for the caller of __lru_cache_add() 494 * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
495 * have the page added to the active list using mark_page_accessed(). 495 * have the page added to the active list using mark_page_accessed().
496 */ 496 */
497void __lru_cache_add(struct page *page, enum lru_list lru) 497void __lru_cache_add(struct page *page)
498{ 498{
499 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 499 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
500 500
501 if (is_active_lru(lru))
502 SetPageActive(page);
503 else
504 ClearPageActive(page);
505
506 page_cache_get(page); 501 page_cache_get(page);
507 if (!pagevec_space(pvec)) 502 if (!pagevec_space(pvec))
508 __pagevec_lru_add(pvec); 503 __pagevec_lru_add(pvec);
@@ -512,11 +507,10 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
512EXPORT_SYMBOL(__lru_cache_add); 507EXPORT_SYMBOL(__lru_cache_add);
513 508
514/** 509/**
515 * lru_cache_add_lru - add a page to a page list 510 * lru_cache_add - add a page to a page list
516 * @page: the page to be added to the LRU. 511 * @page: the page to be added to the LRU.
517 * @lru: the LRU list to which the page is added.
518 */ 512 */
519void lru_cache_add_lru(struct page *page, enum lru_list lru) 513void lru_cache_add(struct page *page)
520{ 514{
521 if (PageActive(page)) { 515 if (PageActive(page)) {
522 VM_BUG_ON(PageUnevictable(page)); 516 VM_BUG_ON(PageUnevictable(page));
@@ -525,7 +519,7 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
525 } 519 }
526 520
527 VM_BUG_ON(PageLRU(page)); 521 VM_BUG_ON(PageLRU(page));
528 __lru_cache_add(page, lru); 522 __lru_cache_add(page);
529} 523}
530 524
531/** 525/**
@@ -745,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold)
745 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 739 del_page_from_lru_list(page, lruvec, page_off_lru(page));
746 } 740 }
747 741
742 /* Clear Active bit in case of parallel mark_page_accessed */
743 ClearPageActive(page);
744
748 list_add(&page->lru, &pages_to_free); 745 list_add(&page->lru, &pages_to_free);
749 } 746 }
750 if (zone) 747 if (zone)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c85794399848..99b3ac7771ad 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
546void putback_lru_page(struct page *page) 546void putback_lru_page(struct page *page)
547{ 547{
548 int lru; 548 int lru;
549 int active = !!TestClearPageActive(page);
550 int was_unevictable = PageUnevictable(page); 549 int was_unevictable = PageUnevictable(page);
551 550
552 VM_BUG_ON(PageLRU(page)); 551 VM_BUG_ON(PageLRU(page));
@@ -561,8 +560,8 @@ redo:
561 * unevictable page on [in]active list. 560 * unevictable page on [in]active list.
562 * We know how to handle that. 561 * We know how to handle that.
563 */ 562 */
564 lru = active + page_lru_base_type(page); 563 lru = page_lru_base_type(page);
565 lru_cache_add_lru(page, lru); 564 lru_cache_add(page);
566 } else { 565 } else {
567 /* 566 /*
568 * Put unevictable pages directly on zone's unevictable 567 * Put unevictable pages directly on zone's unevictable