diff options
-rw-r--r-- | include/linux/rmap.h | 21 | ||||
-rw-r--r-- | mm/internal.h | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 43 |
4 files changed, 52 insertions, 17 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b2cce644ffc7..bfe1f4780644 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -71,6 +71,17 @@ struct anon_vma_chain { | |||
71 | #endif | 71 | #endif |
72 | }; | 72 | }; |
73 | 73 | ||
74 | enum ttu_flags { | ||
75 | TTU_UNMAP = 0, /* unmap mode */ | ||
76 | TTU_MIGRATION = 1, /* migration mode */ | ||
77 | TTU_MUNLOCK = 2, /* munlock mode */ | ||
78 | TTU_ACTION_MASK = 0xff, | ||
79 | |||
80 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | ||
81 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | ||
82 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | ||
83 | }; | ||
84 | |||
74 | #ifdef CONFIG_MMU | 85 | #ifdef CONFIG_MMU |
75 | static inline void get_anon_vma(struct anon_vma *anon_vma) | 86 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
76 | { | 87 | { |
@@ -164,16 +175,6 @@ int page_referenced(struct page *, int is_locked, | |||
164 | int page_referenced_one(struct page *, struct vm_area_struct *, | 175 | int page_referenced_one(struct page *, struct vm_area_struct *, |
165 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | 176 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); |
166 | 177 | ||
167 | enum ttu_flags { | ||
168 | TTU_UNMAP = 0, /* unmap mode */ | ||
169 | TTU_MIGRATION = 1, /* migration mode */ | ||
170 | TTU_MUNLOCK = 2, /* munlock mode */ | ||
171 | TTU_ACTION_MASK = 0xff, | ||
172 | |||
173 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | ||
174 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | ||
175 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | ||
176 | }; | ||
177 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 178 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
178 | 179 | ||
179 | int try_to_unmap(struct page *, enum ttu_flags flags); | 180 | int try_to_unmap(struct page *, enum ttu_flags flags); |
diff --git a/mm/internal.h b/mm/internal.h index bbd7b34db4ea..8312d4fadf59 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, | |||
356 | unsigned long, unsigned long); | 356 | unsigned long, unsigned long); |
357 | 357 | ||
358 | extern void set_pageblock_order(void); | 358 | extern void set_pageblock_order(void); |
359 | 359 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |
360 | struct list_head *page_list); | ||
360 | #endif /* __MM_INTERNAL_H */ | 361 | #endif /* __MM_INTERNAL_H */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cfd565dbe124..cefd14e6dcf2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5700,6 +5700,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) | |||
5700 | break; | 5700 | break; |
5701 | } | 5701 | } |
5702 | 5702 | ||
5703 | reclaim_clean_pages_from_list(cc.zone, &cc.migratepages); | ||
5704 | |||
5703 | ret = migrate_pages(&cc.migratepages, | 5705 | ret = migrate_pages(&cc.migratepages, |
5704 | __alloc_contig_migrate_alloc, | 5706 | __alloc_contig_migrate_alloc, |
5705 | 0, false, MIGRATE_SYNC); | 5707 | 0, false, MIGRATE_SYNC); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d16bf5a53266..1ee4b69a28a5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page, | |||
674 | static unsigned long shrink_page_list(struct list_head *page_list, | 674 | static unsigned long shrink_page_list(struct list_head *page_list, |
675 | struct zone *zone, | 675 | struct zone *zone, |
676 | struct scan_control *sc, | 676 | struct scan_control *sc, |
677 | enum ttu_flags ttu_flags, | ||
677 | unsigned long *ret_nr_dirty, | 678 | unsigned long *ret_nr_dirty, |
678 | unsigned long *ret_nr_writeback) | 679 | unsigned long *ret_nr_writeback, |
680 | bool force_reclaim) | ||
679 | { | 681 | { |
680 | LIST_HEAD(ret_pages); | 682 | LIST_HEAD(ret_pages); |
681 | LIST_HEAD(free_pages); | 683 | LIST_HEAD(free_pages); |
@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
689 | 691 | ||
690 | mem_cgroup_uncharge_start(); | 692 | mem_cgroup_uncharge_start(); |
691 | while (!list_empty(page_list)) { | 693 | while (!list_empty(page_list)) { |
692 | enum page_references references; | ||
693 | struct address_space *mapping; | 694 | struct address_space *mapping; |
694 | struct page *page; | 695 | struct page *page; |
695 | int may_enter_fs; | 696 | int may_enter_fs; |
697 | enum page_references references = PAGEREF_RECLAIM_CLEAN; | ||
696 | 698 | ||
697 | cond_resched(); | 699 | cond_resched(); |
698 | 700 | ||
@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
758 | wait_on_page_writeback(page); | 760 | wait_on_page_writeback(page); |
759 | } | 761 | } |
760 | 762 | ||
761 | references = page_check_references(page, sc); | 763 | if (!force_reclaim) |
764 | references = page_check_references(page, sc); | ||
765 | |||
762 | switch (references) { | 766 | switch (references) { |
763 | case PAGEREF_ACTIVATE: | 767 | case PAGEREF_ACTIVATE: |
764 | goto activate_locked; | 768 | goto activate_locked; |
@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
788 | * processes. Try to unmap it here. | 792 | * processes. Try to unmap it here. |
789 | */ | 793 | */ |
790 | if (page_mapped(page) && mapping) { | 794 | if (page_mapped(page) && mapping) { |
791 | switch (try_to_unmap(page, TTU_UNMAP)) { | 795 | switch (try_to_unmap(page, ttu_flags)) { |
792 | case SWAP_FAIL: | 796 | case SWAP_FAIL: |
793 | goto activate_locked; | 797 | goto activate_locked; |
794 | case SWAP_AGAIN: | 798 | case SWAP_AGAIN: |
@@ -960,6 +964,33 @@ keep: | |||
960 | return nr_reclaimed; | 964 | return nr_reclaimed; |
961 | } | 965 | } |
962 | 966 | ||
967 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, | ||
968 | struct list_head *page_list) | ||
969 | { | ||
970 | struct scan_control sc = { | ||
971 | .gfp_mask = GFP_KERNEL, | ||
972 | .priority = DEF_PRIORITY, | ||
973 | .may_unmap = 1, | ||
974 | }; | ||
975 | unsigned long ret, dummy1, dummy2; | ||
976 | struct page *page, *next; | ||
977 | LIST_HEAD(clean_pages); | ||
978 | |||
979 | list_for_each_entry_safe(page, next, page_list, lru) { | ||
980 | if (page_is_file_cache(page) && !PageDirty(page)) { | ||
981 | ClearPageActive(page); | ||
982 | list_move(&page->lru, &clean_pages); | ||
983 | } | ||
984 | } | ||
985 | |||
986 | ret = shrink_page_list(&clean_pages, zone, &sc, | ||
987 | TTU_UNMAP|TTU_IGNORE_ACCESS, | ||
988 | &dummy1, &dummy2, true); | ||
989 | list_splice(&clean_pages, page_list); | ||
990 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); | ||
991 | return ret; | ||
992 | } | ||
993 | |||
963 | /* | 994 | /* |
964 | * Attempt to remove the specified page from its LRU. Only take this page | 995 | * Attempt to remove the specified page from its LRU. Only take this page |
965 | * if it is of the appropriate PageActive status. Pages which are being | 996 | * if it is of the appropriate PageActive status. Pages which are being |
@@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1278 | if (nr_taken == 0) | 1309 | if (nr_taken == 0) |
1279 | return 0; | 1310 | return 0; |
1280 | 1311 | ||
1281 | nr_reclaimed = shrink_page_list(&page_list, zone, sc, | 1312 | nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, |
1282 | &nr_dirty, &nr_writeback); | 1313 | &nr_dirty, &nr_writeback, false); |
1283 | 1314 | ||
1284 | spin_lock_irq(&zone->lru_lock); | 1315 | spin_lock_irq(&zone->lru_lock); |
1285 | 1316 | ||