aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-07-03 18:02:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:31 -0400
commitc53954a092d07c5684d31ea1fc813d262cff08a5 (patch)
tree257a36dd4b413e2d82aff23b5b7194a01b57df40 /mm
parenta0b8cab3b9b2efadabdcff264c450ca515e2619c (diff)
mm: remove lru parameter from __lru_cache_add and lru_cache_add_lru
Similar to __pagevec_lru_add, this patch removes the LRU parameter from __lru_cache_add and lru_cache_add_lru as the caller does not control the exact LRU the page gets added to. lru_cache_add_lru gets renamed to lru_cache_add the name is silly without the lru parameter. With the parameter removed, it is required that the caller indicate if they want the page added to the active or inactive list by setting or clearing PageActive respectively. [akpm@linux-foundation.org: Suggested the patch] [gang.chen@asianux.com: fix used-unintialized warning] Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Chen Gang <gang.chen@asianux.com> Cc: Jan Kara <jack@suse.cz> Cc: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Alexey Lyahkov <alexey.lyashkov@gmail.com> Cc: Andrew Perepechko <anserper@ya.ru> Cc: Robin Dong <sanbai@taobao.com> Cc: Theodore Tso <tytso@mit.edu> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Bernd Schubert <bernd.schubert@fastmail.fm> Cc: David Howells <dhowells@redhat.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/swap.c17
-rw-r--r--mm/vmscan.c5
3 files changed, 13 insertions, 16 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 6280da86b5d6..e22ceeb6e5ec 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page,
1093 else 1093 else
1094 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1094 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1095 __page_set_anon_rmap(page, vma, address, 1); 1095 __page_set_anon_rmap(page, vma, address, 1);
1096 if (!mlocked_vma_newpage(vma, page)) 1096 if (!mlocked_vma_newpage(vma, page)) {
1097 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 1097 SetPageActive(page);
1098 else 1098 lru_cache_add(page);
1099 } else
1099 add_page_to_unevictable_list(page); 1100 add_page_to_unevictable_list(page);
1100} 1101}
1101 1102
diff --git a/mm/swap.c b/mm/swap.c
index 6a9d0c43924a..4a1d0d2c52fa 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -494,15 +494,10 @@ EXPORT_SYMBOL(mark_page_accessed);
494 * pagevec is drained. This gives a chance for the caller of __lru_cache_add() 494 * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
495 * have the page added to the active list using mark_page_accessed(). 495 * have the page added to the active list using mark_page_accessed().
496 */ 496 */
497void __lru_cache_add(struct page *page, enum lru_list lru) 497void __lru_cache_add(struct page *page)
498{ 498{
499 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 499 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
500 500
501 if (is_active_lru(lru))
502 SetPageActive(page);
503 else
504 ClearPageActive(page);
505
506 page_cache_get(page); 501 page_cache_get(page);
507 if (!pagevec_space(pvec)) 502 if (!pagevec_space(pvec))
508 __pagevec_lru_add(pvec); 503 __pagevec_lru_add(pvec);
@@ -512,11 +507,10 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
512EXPORT_SYMBOL(__lru_cache_add); 507EXPORT_SYMBOL(__lru_cache_add);
513 508
514/** 509/**
515 * lru_cache_add_lru - add a page to a page list 510 * lru_cache_add - add a page to a page list
516 * @page: the page to be added to the LRU. 511 * @page: the page to be added to the LRU.
517 * @lru: the LRU list to which the page is added.
518 */ 512 */
519void lru_cache_add_lru(struct page *page, enum lru_list lru) 513void lru_cache_add(struct page *page)
520{ 514{
521 if (PageActive(page)) { 515 if (PageActive(page)) {
522 VM_BUG_ON(PageUnevictable(page)); 516 VM_BUG_ON(PageUnevictable(page));
@@ -525,7 +519,7 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
525 } 519 }
526 520
527 VM_BUG_ON(PageLRU(page)); 521 VM_BUG_ON(PageLRU(page));
528 __lru_cache_add(page, lru); 522 __lru_cache_add(page);
529} 523}
530 524
531/** 525/**
@@ -745,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold)
745 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 739 del_page_from_lru_list(page, lruvec, page_off_lru(page));
746 } 740 }
747 741
742 /* Clear Active bit in case of parallel mark_page_accessed */
743 ClearPageActive(page);
744
748 list_add(&page->lru, &pages_to_free); 745 list_add(&page->lru, &pages_to_free);
749 } 746 }
750 if (zone) 747 if (zone)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c85794399848..99b3ac7771ad 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
546void putback_lru_page(struct page *page) 546void putback_lru_page(struct page *page)
547{ 547{
548 int lru; 548 int lru;
549 int active = !!TestClearPageActive(page);
550 int was_unevictable = PageUnevictable(page); 549 int was_unevictable = PageUnevictable(page);
551 550
552 VM_BUG_ON(PageLRU(page)); 551 VM_BUG_ON(PageLRU(page));
@@ -561,8 +560,8 @@ redo:
561 * unevictable page on [in]active list. 560 * unevictable page on [in]active list.
562 * We know how to handle that. 561 * We know how to handle that.
563 */ 562 */
564 lru = active + page_lru_base_type(page); 563 lru = page_lru_base_type(page);
565 lru_cache_add_lru(page, lru); 564 lru_cache_add(page);
566 } else { 565 } else {
567 /* 566 /*
568 * Put unevictable pages directly on zone's unevictable 567 * Put unevictable pages directly on zone's unevictable