aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-10-08 19:33:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:55 -0400
commit39b5f29ac1f988c1615fbc9c69f6651ab0d0c3c7 (patch)
treed8030f58d542bb3e811d83676b38c5b4b3a16c02 /mm
parentec4d9f626d5908b6052c2973f37992f1db52e967 (diff)
mm: remove vma arg from page_evictable
page_evictable(page, vma) is an irritant: almost all its callers pass NULL for vma. Remove the vma arg and use mlocked_vma_newpage(vma, page) explicitly in the couple of places it's needed. But in those places we don't even need page_evictable() itself! They're dealing with a freshly allocated anonymous page, which has no "mapping" and cannot be mlocked yet. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michel Lespinasse <walken@google.com> Cc: Ying Han <yinghan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h5
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/swap.c2
-rw-r--r--mm/vmscan.c27
5 files changed, 14 insertions, 24 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 7f72f249bc29..78f25d6cc6a7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -168,9 +168,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
168} 168}
169 169
170/* 170/*
171 * Called only in fault path via page_evictable() for a new page 171 * Called only in fault path, to determine if a new page is being
172 * to determine if it's being mapped into a LOCKED vma. 172 * mapped into a LOCKED vma. If it is, mark page as mlocked.
173 * If so, mark page as mlocked.
174 */ 173 */
175static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 174static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
176 struct page *page) 175 struct page *page)
diff --git a/mm/ksm.c b/mm/ksm.c
index 14ee5cf8a513..ecbc090cdaad 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1586,7 +1586,7 @@ struct page *ksm_does_need_to_copy(struct page *page,
1586 SetPageSwapBacked(new_page); 1586 SetPageSwapBacked(new_page);
1587 __set_page_locked(new_page); 1587 __set_page_locked(new_page);
1588 1588
1589 if (page_evictable(new_page, vma)) 1589 if (!mlocked_vma_newpage(vma, new_page))
1590 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); 1590 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
1591 else 1591 else
1592 add_page_to_unevictable_list(new_page); 1592 add_page_to_unevictable_list(new_page);
diff --git a/mm/rmap.c b/mm/rmap.c
index 28777412de62..0d86433e42d7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1080,7 +1080,7 @@ void page_add_new_anon_rmap(struct page *page,
1080 else 1080 else
1081 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1081 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1082 __page_set_anon_rmap(page, vma, address, 1); 1082 __page_set_anon_rmap(page, vma, address, 1);
1083 if (page_evictable(page, vma)) 1083 if (!mlocked_vma_newpage(vma, page))
1084 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 1084 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
1085 else 1085 else
1086 add_page_to_unevictable_list(page); 1086 add_page_to_unevictable_list(page);
diff --git a/mm/swap.c b/mm/swap.c
index f76c76c7501b..6310dc2008ff 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -751,7 +751,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
751 751
752 SetPageLRU(page_tail); 752 SetPageLRU(page_tail);
753 753
754 if (page_evictable(page_tail, NULL)) { 754 if (page_evictable(page_tail)) {
755 if (PageActive(page)) { 755 if (PageActive(page)) {
756 SetPageActive(page_tail); 756 SetPageActive(page_tail);
757 active = 1; 757 active = 1;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b010efc43891..8b627309dd44 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -553,7 +553,7 @@ void putback_lru_page(struct page *page)
553redo: 553redo:
554 ClearPageUnevictable(page); 554 ClearPageUnevictable(page);
555 555
556 if (page_evictable(page, NULL)) { 556 if (page_evictable(page)) {
557 /* 557 /*
558 * For evictable pages, we can use the cache. 558 * For evictable pages, we can use the cache.
559 * In event of a race, worst case is we end up with an 559 * In event of a race, worst case is we end up with an
@@ -587,7 +587,7 @@ redo:
587 * page is on unevictable list, it never be freed. To avoid that, 587 * page is on unevictable list, it never be freed. To avoid that,
588 * check after we added it to the list, again. 588 * check after we added it to the list, again.
589 */ 589 */
590 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 590 if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
591 if (!isolate_lru_page(page)) { 591 if (!isolate_lru_page(page)) {
592 put_page(page); 592 put_page(page);
593 goto redo; 593 goto redo;
@@ -709,7 +709,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
709 709
710 sc->nr_scanned++; 710 sc->nr_scanned++;
711 711
712 if (unlikely(!page_evictable(page, NULL))) 712 if (unlikely(!page_evictable(page)))
713 goto cull_mlocked; 713 goto cull_mlocked;
714 714
715 if (!sc->may_unmap && page_mapped(page)) 715 if (!sc->may_unmap && page_mapped(page))
@@ -1217,7 +1217,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1217 1217
1218 VM_BUG_ON(PageLRU(page)); 1218 VM_BUG_ON(PageLRU(page));
1219 list_del(&page->lru); 1219 list_del(&page->lru);
1220 if (unlikely(!page_evictable(page, NULL))) { 1220 if (unlikely(!page_evictable(page))) {
1221 spin_unlock_irq(&zone->lru_lock); 1221 spin_unlock_irq(&zone->lru_lock);
1222 putback_lru_page(page); 1222 putback_lru_page(page);
1223 spin_lock_irq(&zone->lru_lock); 1223 spin_lock_irq(&zone->lru_lock);
@@ -1470,7 +1470,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1470 page = lru_to_page(&l_hold); 1470 page = lru_to_page(&l_hold);
1471 list_del(&page->lru); 1471 list_del(&page->lru);
1472 1472
1473 if (unlikely(!page_evictable(page, NULL))) { 1473 if (unlikely(!page_evictable(page))) {
1474 putback_lru_page(page); 1474 putback_lru_page(page);
1475 continue; 1475 continue;
1476 } 1476 }
@@ -3414,27 +3414,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3414/* 3414/*
3415 * page_evictable - test whether a page is evictable 3415 * page_evictable - test whether a page is evictable
3416 * @page: the page to test 3416 * @page: the page to test
3417 * @vma: the VMA in which the page is or will be mapped, may be NULL
3418 * 3417 *
3419 * Test whether page is evictable--i.e., should be placed on active/inactive 3418 * Test whether page is evictable--i.e., should be placed on active/inactive
3420 * lists vs unevictable list. The vma argument is !NULL when called from the 3419 * lists vs unevictable list.
3421 * fault path to determine how to instantate a new page.
3422 * 3420 *
3423 * Reasons page might not be evictable: 3421 * Reasons page might not be evictable:
3424 * (1) page's mapping marked unevictable 3422 * (1) page's mapping marked unevictable
3425 * (2) page is part of an mlocked VMA 3423 * (2) page is part of an mlocked VMA
3426 * 3424 *
3427 */ 3425 */
3428int page_evictable(struct page *page, struct vm_area_struct *vma) 3426int page_evictable(struct page *page)
3429{ 3427{
3430 3428 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3431 if (mapping_unevictable(page_mapping(page)))
3432 return 0;
3433
3434 if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
3435 return 0;
3436
3437 return 1;
3438} 3429}
3439 3430
3440#ifdef CONFIG_SHMEM 3431#ifdef CONFIG_SHMEM
@@ -3472,7 +3463,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3472 if (!PageLRU(page) || !PageUnevictable(page)) 3463 if (!PageLRU(page) || !PageUnevictable(page))
3473 continue; 3464 continue;
3474 3465
3475 if (page_evictable(page, NULL)) { 3466 if (page_evictable(page)) {
3476 enum lru_list lru = page_lru_base_type(page); 3467 enum lru_list lru = page_lru_base_type(page);
3477 3468
3478 VM_BUG_ON(PageActive(page)); 3469 VM_BUG_ON(PageActive(page));