aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-08 19:31:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:43 -0400
commit02c6de8d757cb32c0829a45d81c3dfcbcafd998b (patch)
tree0d8f0d182a44ba4ec4af0c909d01eb663e03e254 /mm
parent70400303ce0c4ced3139499c676d5c79636b0c72 (diff)
mm: cma: discard clean pages during contiguous allocation instead of migration
Drop clean cache pages instead of migration during alloc_contig_range() to minimise allocation latency by reducing the amount of migration that is necessary. It's useful for CMA because latency of migration is more important than evicting the background process's working set. In addition, as pages are reclaimed then fewer free pages for migration targets are required so it avoids memory reclaiming to get free pages, which is a contributory factor to increased latency. I measured elapsed time of __alloc_contig_migrate_range() which migrates 10M in 40M movable zone in QEMU machine. Before - 146ms, After - 7ms [akpm@linux-foundation.org: fix nommu build] Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Mel Gorman <mgorman@suse.de> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Rik van Riel <riel@redhat.com> Tested-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c43
3 files changed, 41 insertions, 7 deletions
diff --git a/mm/internal.h b/mm/internal.h
index bbd7b34db4ea..8312d4fadf59 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
356 unsigned long, unsigned long); 356 unsigned long, unsigned long);
357 357
358extern void set_pageblock_order(void); 358extern void set_pageblock_order(void);
359 359unsigned long reclaim_clean_pages_from_list(struct zone *zone,
360 struct list_head *page_list);
360#endif /* __MM_INTERNAL_H */ 361#endif /* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cfd565dbe124..cefd14e6dcf2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5700,6 +5700,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5700 break; 5700 break;
5701 } 5701 }
5702 5702
5703 reclaim_clean_pages_from_list(cc.zone, &cc.migratepages);
5704
5703 ret = migrate_pages(&cc.migratepages, 5705 ret = migrate_pages(&cc.migratepages,
5704 __alloc_contig_migrate_alloc, 5706 __alloc_contig_migrate_alloc,
5705 0, false, MIGRATE_SYNC); 5707 0, false, MIGRATE_SYNC);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d16bf5a53266..1ee4b69a28a5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
674static unsigned long shrink_page_list(struct list_head *page_list, 674static unsigned long shrink_page_list(struct list_head *page_list,
675 struct zone *zone, 675 struct zone *zone,
676 struct scan_control *sc, 676 struct scan_control *sc,
677 enum ttu_flags ttu_flags,
677 unsigned long *ret_nr_dirty, 678 unsigned long *ret_nr_dirty,
678 unsigned long *ret_nr_writeback) 679 unsigned long *ret_nr_writeback,
680 bool force_reclaim)
679{ 681{
680 LIST_HEAD(ret_pages); 682 LIST_HEAD(ret_pages);
681 LIST_HEAD(free_pages); 683 LIST_HEAD(free_pages);
@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
689 691
690 mem_cgroup_uncharge_start(); 692 mem_cgroup_uncharge_start();
691 while (!list_empty(page_list)) { 693 while (!list_empty(page_list)) {
692 enum page_references references;
693 struct address_space *mapping; 694 struct address_space *mapping;
694 struct page *page; 695 struct page *page;
695 int may_enter_fs; 696 int may_enter_fs;
697 enum page_references references = PAGEREF_RECLAIM_CLEAN;
696 698
697 cond_resched(); 699 cond_resched();
698 700
@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
758 wait_on_page_writeback(page); 760 wait_on_page_writeback(page);
759 } 761 }
760 762
761 references = page_check_references(page, sc); 763 if (!force_reclaim)
764 references = page_check_references(page, sc);
765
762 switch (references) { 766 switch (references) {
763 case PAGEREF_ACTIVATE: 767 case PAGEREF_ACTIVATE:
764 goto activate_locked; 768 goto activate_locked;
@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
788 * processes. Try to unmap it here. 792 * processes. Try to unmap it here.
789 */ 793 */
790 if (page_mapped(page) && mapping) { 794 if (page_mapped(page) && mapping) {
791 switch (try_to_unmap(page, TTU_UNMAP)) { 795 switch (try_to_unmap(page, ttu_flags)) {
792 case SWAP_FAIL: 796 case SWAP_FAIL:
793 goto activate_locked; 797 goto activate_locked;
794 case SWAP_AGAIN: 798 case SWAP_AGAIN:
@@ -960,6 +964,33 @@ keep:
960 return nr_reclaimed; 964 return nr_reclaimed;
961} 965}
962 966
967unsigned long reclaim_clean_pages_from_list(struct zone *zone,
968 struct list_head *page_list)
969{
970 struct scan_control sc = {
971 .gfp_mask = GFP_KERNEL,
972 .priority = DEF_PRIORITY,
973 .may_unmap = 1,
974 };
975 unsigned long ret, dummy1, dummy2;
976 struct page *page, *next;
977 LIST_HEAD(clean_pages);
978
979 list_for_each_entry_safe(page, next, page_list, lru) {
980 if (page_is_file_cache(page) && !PageDirty(page)) {
981 ClearPageActive(page);
982 list_move(&page->lru, &clean_pages);
983 }
984 }
985
986 ret = shrink_page_list(&clean_pages, zone, &sc,
987 TTU_UNMAP|TTU_IGNORE_ACCESS,
988 &dummy1, &dummy2, true);
989 list_splice(&clean_pages, page_list);
990 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
991 return ret;
992}
993
963/* 994/*
964 * Attempt to remove the specified page from its LRU. Only take this page 995 * Attempt to remove the specified page from its LRU. Only take this page
965 * if it is of the appropriate PageActive status. Pages which are being 996 * if it is of the appropriate PageActive status. Pages which are being
@@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1278 if (nr_taken == 0) 1309 if (nr_taken == 0)
1279 return 0; 1310 return 0;
1280 1311
1281 nr_reclaimed = shrink_page_list(&page_list, zone, sc, 1312 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1282 &nr_dirty, &nr_writeback); 1313 &nr_dirty, &nr_writeback, false);
1283 1314
1284 spin_lock_irq(&zone->lru_lock); 1315 spin_lock_irq(&zone->lru_lock);
1285 1316