aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-08 19:33:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:23:00 -0400
commite46a28790e594c0876d1a84270926abf75460f61 (patch)
treefebfaa6c20dab69490308190729f1d898e4df930 /mm
parent7a71932d5676b7410ab64d149bad8bde6b0d8632 (diff)
CMA: migrate mlocked pages
Presently CMA cannot migrate mlocked pages so it ends up failing to allocate contiguous memory space. This patch makes mlocked pages be migrated out. Of course, it can affect realtime processes but in CMA usecase, contiguous memory allocation failing is far worse than access latency to an mlocked page being variable while CMA is running. If someone wants to make the system realtime, he shouldn't enable CMA because stalls can still happen at random times. [akpm@linux-foundation.org: tweak comment text, per Mel] Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c8
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c4
4 files changed, 10 insertions, 6 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index d8187f9cabbf..2c4ce17651d8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -461,6 +461,7 @@ static bool too_many_isolated(struct zone *zone)
461 * @cc: Compaction control structure. 461 * @cc: Compaction control structure.
462 * @low_pfn: The first PFN of the range. 462 * @low_pfn: The first PFN of the range.
463 * @end_pfn: The one-past-the-last PFN of the range. 463 * @end_pfn: The one-past-the-last PFN of the range.
464 * @unevictable: true if it allows to isolate unevictable pages
464 * 465 *
465 * Isolate all pages that can be migrated from the range specified by 466 * Isolate all pages that can be migrated from the range specified by
466 * [low_pfn, end_pfn). Returns zero if there is a fatal signal 467 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
@@ -476,7 +477,7 @@ static bool too_many_isolated(struct zone *zone)
476 */ 477 */
477unsigned long 478unsigned long
478isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 479isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
479 unsigned long low_pfn, unsigned long end_pfn) 480 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
480{ 481{
481 unsigned long last_pageblock_nr = 0, pageblock_nr; 482 unsigned long last_pageblock_nr = 0, pageblock_nr;
482 unsigned long nr_scanned = 0, nr_isolated = 0; 483 unsigned long nr_scanned = 0, nr_isolated = 0;
@@ -602,6 +603,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
602 if (!cc->sync) 603 if (!cc->sync)
603 mode |= ISOLATE_ASYNC_MIGRATE; 604 mode |= ISOLATE_ASYNC_MIGRATE;
604 605
606 if (unevictable)
607 mode |= ISOLATE_UNEVICTABLE;
608
605 lruvec = mem_cgroup_page_lruvec(page, zone); 609 lruvec = mem_cgroup_page_lruvec(page, zone);
606 610
607 /* Try isolate the page */ 611 /* Try isolate the page */
@@ -807,7 +811,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
807 } 811 }
808 812
809 /* Perform the isolation */ 813 /* Perform the isolation */
810 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 814 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
811 if (!low_pfn || cc->contended) 815 if (!low_pfn || cc->contended)
812 return ISOLATE_ABORT; 816 return ISOLATE_ABORT;
813 817
diff --git a/mm/internal.h b/mm/internal.h
index 4dc93e2fe69e..f5f295fe11e1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -138,7 +138,7 @@ isolate_freepages_range(struct compact_control *cc,
138 unsigned long start_pfn, unsigned long end_pfn); 138 unsigned long start_pfn, unsigned long end_pfn);
139unsigned long 139unsigned long
140isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 140isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
141 unsigned long low_pfn, unsigned long end_pfn); 141 unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
142 142
143#endif 143#endif
144 144
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5485f0ef4ec3..fd86c47de86f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5690,7 +5690,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
5690 if (list_empty(&cc->migratepages)) { 5690 if (list_empty(&cc->migratepages)) {
5691 cc->nr_migratepages = 0; 5691 cc->nr_migratepages = 0;
5692 pfn = isolate_migratepages_range(cc->zone, cc, 5692 pfn = isolate_migratepages_range(cc->zone, cc,
5693 pfn, end); 5693 pfn, end, true);
5694 if (!pfn) { 5694 if (!pfn) {
5695 ret = -EINTR; 5695 ret = -EINTR;
5696 break; 5696 break;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8b627309dd44..2624edcfb420 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1009,8 +1009,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1009 if (!PageLRU(page)) 1009 if (!PageLRU(page))
1010 return ret; 1010 return ret;
1011 1011
1012 /* Do not give back unevictable pages for compaction */ 1012 /* Compaction should not handle unevictable pages but CMA can do so */
1013 if (PageUnevictable(page)) 1013 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1014 return ret; 1014 return ret;
1015 1015
1016 ret = -EBUSY; 1016 ret = -EBUSY;