diff options
-rw-r--r-- | Documentation/vm/unevictable-lru.txt | 10 | ||||
-rw-r--r-- | include/linux/swap.h | 2 | ||||
-rw-r--r-- | mm/internal.h | 5 | ||||
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 27 |
7 files changed, 18 insertions, 32 deletions
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt index 323ff5dba1cc..a68db7692ee8 100644 --- a/Documentation/vm/unevictable-lru.txt +++ b/Documentation/vm/unevictable-lru.txt | |||
@@ -197,12 +197,8 @@ the pages are also "rescued" from the unevictable list in the process of | |||
197 | freeing them. | 197 | freeing them. |
198 | 198 | ||
199 | page_evictable() also checks for mlocked pages by testing an additional page | 199 | page_evictable() also checks for mlocked pages by testing an additional page |
200 | flag, PG_mlocked (as wrapped by PageMlocked()). If the page is NOT mlocked, | 200 | flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is |
201 | and a non-NULL VMA is supplied, page_evictable() will check whether the VMA is | 201 | faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED. |
202 | VM_LOCKED via is_mlocked_vma(). is_mlocked_vma() will SetPageMlocked() and | ||
203 | update the appropriate statistics if the vma is VM_LOCKED. This method allows | ||
204 | efficient "culling" of pages in the fault path that are being faulted in to | ||
205 | VM_LOCKED VMAs. | ||
206 | 202 | ||
207 | 203 | ||
208 | VMSCAN'S HANDLING OF UNEVICTABLE PAGES | 204 | VMSCAN'S HANDLING OF UNEVICTABLE PAGES |
@@ -651,7 +647,7 @@ PAGE RECLAIM IN shrink_*_list() | |||
651 | ------------------------------- | 647 | ------------------------------- |
652 | 648 | ||
653 | shrink_active_list() culls any obviously unevictable pages - i.e. | 649 | shrink_active_list() culls any obviously unevictable pages - i.e. |
654 | !page_evictable(page, NULL) - diverting these to the unevictable list. | 650 | !page_evictable(page) - diverting these to the unevictable list. |
655 | However, shrink_active_list() only sees unevictable pages that made it onto the | 651 | However, shrink_active_list() only sees unevictable pages that made it onto the |
656 | active/inactive lru lists. Note that these pages do not have PageUnevictable | 652 | active/inactive lru lists. Note that these pages do not have PageUnevictable |
657 | set - otherwise they would be on the unevictable list and shrink_active_list | 653 | set - otherwise they would be on the unevictable list and shrink_active_list |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 388e70601413..68df9c17fbbb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -281,7 +281,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |||
281 | } | 281 | } |
282 | #endif | 282 | #endif |
283 | 283 | ||
284 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); | 284 | extern int page_evictable(struct page *page); |
285 | extern void check_move_unevictable_pages(struct page **, int nr_pages); | 285 | extern void check_move_unevictable_pages(struct page **, int nr_pages); |
286 | 286 | ||
287 | extern unsigned long scan_unevictable_pages; | 287 | extern unsigned long scan_unevictable_pages; |
diff --git a/mm/internal.h b/mm/internal.h index 7f72f249bc29..78f25d6cc6a7 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -168,9 +168,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * Called only in fault path via page_evictable() for a new page | 171 | * Called only in fault path, to determine if a new page is being |
172 | * to determine if it's being mapped into a LOCKED vma. | 172 | * mapped into a LOCKED vma. If it is, mark page as mlocked. |
173 | * If so, mark page as mlocked. | ||
174 | */ | 173 | */ |
175 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | 174 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, |
176 | struct page *page) | 175 | struct page *page) |
@@ -1586,7 +1586,7 @@ struct page *ksm_does_need_to_copy(struct page *page, | |||
1586 | SetPageSwapBacked(new_page); | 1586 | SetPageSwapBacked(new_page); |
1587 | __set_page_locked(new_page); | 1587 | __set_page_locked(new_page); |
1588 | 1588 | ||
1589 | if (page_evictable(new_page, vma)) | 1589 | if (!mlocked_vma_newpage(vma, new_page)) |
1590 | lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); | 1590 | lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); |
1591 | else | 1591 | else |
1592 | add_page_to_unevictable_list(new_page); | 1592 | add_page_to_unevictable_list(new_page); |
@@ -1080,7 +1080,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
1080 | else | 1080 | else |
1081 | __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); | 1081 | __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); |
1082 | __page_set_anon_rmap(page, vma, address, 1); | 1082 | __page_set_anon_rmap(page, vma, address, 1); |
1083 | if (page_evictable(page, vma)) | 1083 | if (!mlocked_vma_newpage(vma, page)) |
1084 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); | 1084 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); |
1085 | else | 1085 | else |
1086 | add_page_to_unevictable_list(page); | 1086 | add_page_to_unevictable_list(page); |
@@ -751,7 +751,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
751 | 751 | ||
752 | SetPageLRU(page_tail); | 752 | SetPageLRU(page_tail); |
753 | 753 | ||
754 | if (page_evictable(page_tail, NULL)) { | 754 | if (page_evictable(page_tail)) { |
755 | if (PageActive(page)) { | 755 | if (PageActive(page)) { |
756 | SetPageActive(page_tail); | 756 | SetPageActive(page_tail); |
757 | active = 1; | 757 | active = 1; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b010efc43891..8b627309dd44 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -553,7 +553,7 @@ void putback_lru_page(struct page *page) | |||
553 | redo: | 553 | redo: |
554 | ClearPageUnevictable(page); | 554 | ClearPageUnevictable(page); |
555 | 555 | ||
556 | if (page_evictable(page, NULL)) { | 556 | if (page_evictable(page)) { |
557 | /* | 557 | /* |
558 | * For evictable pages, we can use the cache. | 558 | * For evictable pages, we can use the cache. |
559 | * In event of a race, worst case is we end up with an | 559 | * In event of a race, worst case is we end up with an |
@@ -587,7 +587,7 @@ redo: | |||
587 | * page is on unevictable list, it never be freed. To avoid that, | 587 | * page is on unevictable list, it never be freed. To avoid that, |
588 | * check after we added it to the list, again. | 588 | * check after we added it to the list, again. |
589 | */ | 589 | */ |
590 | if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { | 590 | if (lru == LRU_UNEVICTABLE && page_evictable(page)) { |
591 | if (!isolate_lru_page(page)) { | 591 | if (!isolate_lru_page(page)) { |
592 | put_page(page); | 592 | put_page(page); |
593 | goto redo; | 593 | goto redo; |
@@ -709,7 +709,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
709 | 709 | ||
710 | sc->nr_scanned++; | 710 | sc->nr_scanned++; |
711 | 711 | ||
712 | if (unlikely(!page_evictable(page, NULL))) | 712 | if (unlikely(!page_evictable(page))) |
713 | goto cull_mlocked; | 713 | goto cull_mlocked; |
714 | 714 | ||
715 | if (!sc->may_unmap && page_mapped(page)) | 715 | if (!sc->may_unmap && page_mapped(page)) |
@@ -1217,7 +1217,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) | |||
1217 | 1217 | ||
1218 | VM_BUG_ON(PageLRU(page)); | 1218 | VM_BUG_ON(PageLRU(page)); |
1219 | list_del(&page->lru); | 1219 | list_del(&page->lru); |
1220 | if (unlikely(!page_evictable(page, NULL))) { | 1220 | if (unlikely(!page_evictable(page))) { |
1221 | spin_unlock_irq(&zone->lru_lock); | 1221 | spin_unlock_irq(&zone->lru_lock); |
1222 | putback_lru_page(page); | 1222 | putback_lru_page(page); |
1223 | spin_lock_irq(&zone->lru_lock); | 1223 | spin_lock_irq(&zone->lru_lock); |
@@ -1470,7 +1470,7 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
1470 | page = lru_to_page(&l_hold); | 1470 | page = lru_to_page(&l_hold); |
1471 | list_del(&page->lru); | 1471 | list_del(&page->lru); |
1472 | 1472 | ||
1473 | if (unlikely(!page_evictable(page, NULL))) { | 1473 | if (unlikely(!page_evictable(page))) { |
1474 | putback_lru_page(page); | 1474 | putback_lru_page(page); |
1475 | continue; | 1475 | continue; |
1476 | } | 1476 | } |
@@ -3414,27 +3414,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
3414 | /* | 3414 | /* |
3415 | * page_evictable - test whether a page is evictable | 3415 | * page_evictable - test whether a page is evictable |
3416 | * @page: the page to test | 3416 | * @page: the page to test |
3417 | * @vma: the VMA in which the page is or will be mapped, may be NULL | ||
3418 | * | 3417 | * |
3419 | * Test whether page is evictable--i.e., should be placed on active/inactive | 3418 | * Test whether page is evictable--i.e., should be placed on active/inactive |
3420 | * lists vs unevictable list. The vma argument is !NULL when called from the | 3419 | * lists vs unevictable list. |
3421 | * fault path to determine how to instantate a new page. | ||
3422 | * | 3420 | * |
3423 | * Reasons page might not be evictable: | 3421 | * Reasons page might not be evictable: |
3424 | * (1) page's mapping marked unevictable | 3422 | * (1) page's mapping marked unevictable |
3425 | * (2) page is part of an mlocked VMA | 3423 | * (2) page is part of an mlocked VMA |
3426 | * | 3424 | * |
3427 | */ | 3425 | */ |
3428 | int page_evictable(struct page *page, struct vm_area_struct *vma) | 3426 | int page_evictable(struct page *page) |
3429 | { | 3427 | { |
3430 | 3428 | return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); | |
3431 | if (mapping_unevictable(page_mapping(page))) | ||
3432 | return 0; | ||
3433 | |||
3434 | if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page))) | ||
3435 | return 0; | ||
3436 | |||
3437 | return 1; | ||
3438 | } | 3429 | } |
3439 | 3430 | ||
3440 | #ifdef CONFIG_SHMEM | 3431 | #ifdef CONFIG_SHMEM |
@@ -3472,7 +3463,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) | |||
3472 | if (!PageLRU(page) || !PageUnevictable(page)) | 3463 | if (!PageLRU(page) || !PageUnevictable(page)) |
3473 | continue; | 3464 | continue; |
3474 | 3465 | ||
3475 | if (page_evictable(page, NULL)) { | 3466 | if (page_evictable(page)) { |
3476 | enum lru_list lru = page_lru_base_type(page); | 3467 | enum lru_list lru = page_lru_base_type(page); |
3477 | 3468 | ||
3478 | VM_BUG_ON(PageActive(page)); | 3469 | VM_BUG_ON(PageActive(page)); |