aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h2
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/swap.c82
-rw-r--r--mm/vmscan.c59
4 files changed, 54 insertions, 95 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7b6a59f722a3..a1a3f4ed94ce 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page);
337extern void mark_page_lazyfree(struct page *page); 337extern void mark_page_lazyfree(struct page *page);
338extern void swap_setup(void); 338extern void swap_setup(void);
339 339
340extern void add_page_to_unevictable_list(struct page *page);
341
342extern void lru_cache_add_active_or_unevictable(struct page *page, 340extern void lru_cache_add_active_or_unevictable(struct page *page,
343 struct vm_area_struct *vma); 341 struct vm_area_struct *vma);
344 342
diff --git a/mm/mlock.c b/mm/mlock.c
index 79398200e423..74e5a6547c3d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -64,6 +64,12 @@ void clear_page_mlock(struct page *page)
64 mod_zone_page_state(page_zone(page), NR_MLOCK, 64 mod_zone_page_state(page_zone(page), NR_MLOCK,
65 -hpage_nr_pages(page)); 65 -hpage_nr_pages(page));
66 count_vm_event(UNEVICTABLE_PGCLEARED); 66 count_vm_event(UNEVICTABLE_PGCLEARED);
67 /*
68 * The previous TestClearPageMlocked() corresponds to the smp_mb()
69 * in __pagevec_lru_add_fn().
70 *
71 * See __pagevec_lru_add_fn for more explanation.
72 */
67 if (!isolate_lru_page(page)) { 73 if (!isolate_lru_page(page)) {
68 putback_lru_page(page); 74 putback_lru_page(page);
69 } else { 75 } else {
diff --git a/mm/swap.c b/mm/swap.c
index 567a7b96e41d..2d337710218f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -446,30 +446,6 @@ void lru_cache_add(struct page *page)
446} 446}
447 447
448/** 448/**
449 * add_page_to_unevictable_list - add a page to the unevictable list
450 * @page: the page to be added to the unevictable list
451 *
452 * Add page directly to its zone's unevictable list. To avoid races with
453 * tasks that might be making the page evictable, through eg. munlock,
454 * munmap or exit, while it's not on the lru, we want to add the page
455 * while it's locked or otherwise "invisible" to other tasks. This is
456 * difficult to do when using the pagevec cache, so bypass that.
457 */
458void add_page_to_unevictable_list(struct page *page)
459{
460 struct pglist_data *pgdat = page_pgdat(page);
461 struct lruvec *lruvec;
462
463 spin_lock_irq(&pgdat->lru_lock);
464 lruvec = mem_cgroup_page_lruvec(page, pgdat);
465 ClearPageActive(page);
466 SetPageUnevictable(page);
467 SetPageLRU(page);
468 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
469 spin_unlock_irq(&pgdat->lru_lock);
470}
471
472/**
473 * lru_cache_add_active_or_unevictable 449 * lru_cache_add_active_or_unevictable
474 * @page: the page to be added to LRU 450 * @page: the page to be added to LRU
475 * @vma: vma in which page is mapped for determining reclaimability 451 * @vma: vma in which page is mapped for determining reclaimability
@@ -484,13 +460,9 @@ void lru_cache_add_active_or_unevictable(struct page *page,
484{ 460{
485 VM_BUG_ON_PAGE(PageLRU(page), page); 461 VM_BUG_ON_PAGE(PageLRU(page), page);
486 462
487 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 463 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
488 SetPageActive(page); 464 SetPageActive(page);
489 lru_cache_add(page); 465 else if (!TestSetPageMlocked(page)) {
490 return;
491 }
492
493 if (!TestSetPageMlocked(page)) {
494 /* 466 /*
495 * We use the irq-unsafe __mod_zone_page_stat because this 467 * We use the irq-unsafe __mod_zone_page_stat because this
496 * counter is not modified from interrupt context, and the pte 468 * counter is not modified from interrupt context, and the pte
@@ -500,7 +472,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
500 hpage_nr_pages(page)); 472 hpage_nr_pages(page));
501 count_vm_event(UNEVICTABLE_PGMLOCKED); 473 count_vm_event(UNEVICTABLE_PGMLOCKED);
502 } 474 }
503 add_page_to_unevictable_list(page); 475 lru_cache_add(page);
504} 476}
505 477
506/* 478/*
@@ -886,15 +858,55 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
886static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 858static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
887 void *arg) 859 void *arg)
888{ 860{
889 int file = page_is_file_cache(page); 861 enum lru_list lru;
890 int active = PageActive(page); 862 int was_unevictable = TestClearPageUnevictable(page);
891 enum lru_list lru = page_lru(page);
892 863
893 VM_BUG_ON_PAGE(PageLRU(page), page); 864 VM_BUG_ON_PAGE(PageLRU(page), page);
894 865
895 SetPageLRU(page); 866 SetPageLRU(page);
867 /*
868 * Page becomes evictable in two ways:
869 * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
870 * 2) Before acquiring LRU lock to put the page to correct LRU and then
871 * a) do PageLRU check with lock [check_move_unevictable_pages]
872 * b) do PageLRU check before lock [clear_page_mlock]
873 *
874 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
875 * following strict ordering:
876 *
877 * #0: __pagevec_lru_add_fn #1: clear_page_mlock
878 *
879 * SetPageLRU() TestClearPageMlocked()
880 * smp_mb() // explicit ordering // above provides strict
881 * // ordering
882 * PageMlocked() PageLRU()
883 *
884 *
885 * if '#1' does not observe setting of PG_lru by '#0' and fails
886 * isolation, the explicit barrier will make sure that page_evictable
887 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
888 * can be reordered after PageMlocked check and can make '#1' to fail
889 * the isolation of the page whose Mlocked bit is cleared (#0 is also
890 * looking at the same page) and the evictable page will be stranded
891 * in an unevictable LRU.
892 */
893 smp_mb();
894
895 if (page_evictable(page)) {
896 lru = page_lru(page);
897 update_page_reclaim_stat(lruvec, page_is_file_cache(page),
898 PageActive(page));
899 if (was_unevictable)
900 count_vm_event(UNEVICTABLE_PGRESCUED);
901 } else {
902 lru = LRU_UNEVICTABLE;
903 ClearPageActive(page);
904 SetPageUnevictable(page);
905 if (!was_unevictable)
906 count_vm_event(UNEVICTABLE_PGCULLED);
907 }
908
896 add_page_to_lru_list(page, lruvec, lru); 909 add_page_to_lru_list(page, lruvec, lru);
897 update_page_reclaim_stat(lruvec, file, active);
898 trace_mm_lru_insertion(page, lru); 910 trace_mm_lru_insertion(page, lru);
899} 911}
900 912
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 444749669187..bee53495a829 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -769,64 +769,7 @@ int remove_mapping(struct address_space *mapping, struct page *page)
769 */ 769 */
770void putback_lru_page(struct page *page) 770void putback_lru_page(struct page *page)
771{ 771{
772 bool is_unevictable; 772 lru_cache_add(page);
773 int was_unevictable = PageUnevictable(page);
774
775 VM_BUG_ON_PAGE(PageLRU(page), page);
776
777redo:
778 ClearPageUnevictable(page);
779
780 if (page_evictable(page)) {
781 /*
782 * For evictable pages, we can use the cache.
783 * In event of a race, worst case is we end up with an
784 * unevictable page on [in]active list.
785 * We know how to handle that.
786 */
787 is_unevictable = false;
788 lru_cache_add(page);
789 } else {
790 /*
791 * Put unevictable pages directly on zone's unevictable
792 * list.
793 */
794 is_unevictable = true;
795 add_page_to_unevictable_list(page);
796 /*
797 * When racing with an mlock or AS_UNEVICTABLE clearing
798 * (page is unlocked) make sure that if the other thread
799 * does not observe our setting of PG_lru and fails
800 * isolation/check_move_unevictable_pages,
801 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
802 * the page back to the evictable list.
803 *
804 * The other side is TestClearPageMlocked() or shmem_lock().
805 */
806 smp_mb();
807 }
808
809 /*
810 * page's status can change while we move it among lru. If an evictable
811 * page is on unevictable list, it never be freed. To avoid that,
812 * check after we added it to the list, again.
813 */
814 if (is_unevictable && page_evictable(page)) {
815 if (!isolate_lru_page(page)) {
816 put_page(page);
817 goto redo;
818 }
819 /* This means someone else dropped this page from LRU
820 * So, it will be freed or putback to LRU again. There is
821 * nothing to do here.
822 */
823 }
824
825 if (was_unevictable && !is_unevictable)
826 count_vm_event(UNEVICTABLE_PGRESCUED);
827 else if (!was_unevictable && is_unevictable)
828 count_vm_event(UNEVICTABLE_PGCULLED);
829
830 put_page(page); /* drop ref from isolate */ 773 put_page(page); /* drop ref from isolate */
831} 774}
832 775