summaryrefslogtreecommitdiffstats
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-11-15 20:33:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 21:21:02 -0500
commit4da2ce250f986060750fcc5b29112914e31803ba (patch)
tree779c48d833cfea11124b1e57f8082372d0a156a6 /mm/page_isolation.c
parentd7b236e10cedd95373a79fd53b7e9c105bea4f08 (diff)
mm: distinguish CMA and MOVABLE isolation in has_unmovable_pages()
Joonsoo has noticed that "mm: drop migrate type checks from has_unmovable_pages" would break CMA allocator because it relies on has_unmovable_pages returning false even for CMA pageblocks which in fact don't have to be movable: alloc_contig_range start_isolate_page_range set_migratetype_isolate has_unmovable_pages This is a result of the code sharing between CMA and memory hotplug while each one has a different idea of what has_unmovable_pages should return. This is unfortunate but fixing it properly would require a lot of code duplication. Fix the issue by introducing the requested migrate type argument and special case MIGRATE_CMA case where CMA page blocks are handled properly. This will work for memory hotplug because it requires MIGRATE_MOVABLE. Link: http://lkml.kernel.org/r/20171019122118.y6cndierwl2vnguj@dhcp22.suse.cz Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Tested-by: Stefan Wahren <stefan.wahren@i2se.com> Tested-by: Ran Wang <ran.wang_1@nxp.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Igor Mammedov <imammedo@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Reza Arbab <arbab@linux.vnet.ibm.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 44f213935bf6..165ed8117bd1 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -15,7 +15,7 @@
15#define CREATE_TRACE_POINTS 15#define CREATE_TRACE_POINTS
16#include <trace/events/page_isolation.h> 16#include <trace/events/page_isolation.h>
17 17
18static int set_migratetype_isolate(struct page *page, 18static int set_migratetype_isolate(struct page *page, int migratetype,
19 bool skip_hwpoisoned_pages) 19 bool skip_hwpoisoned_pages)
20{ 20{
21 struct zone *zone; 21 struct zone *zone;
@@ -52,7 +52,7 @@ static int set_migratetype_isolate(struct page *page,
52 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 52 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
53 * We just check MOVABLE pages. 53 * We just check MOVABLE pages.
54 */ 54 */
55 if (!has_unmovable_pages(zone, page, arg.pages_found, 55 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
56 skip_hwpoisoned_pages)) 56 skip_hwpoisoned_pages))
57 ret = 0; 57 ret = 0;
58 58
@@ -64,14 +64,14 @@ static int set_migratetype_isolate(struct page *page,
64out: 64out:
65 if (!ret) { 65 if (!ret) {
66 unsigned long nr_pages; 66 unsigned long nr_pages;
67 int migratetype = get_pageblock_migratetype(page); 67 int mt = get_pageblock_migratetype(page);
68 68
69 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 69 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
70 zone->nr_isolate_pageblock++; 70 zone->nr_isolate_pageblock++;
71 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, 71 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
72 NULL); 72 NULL);
73 73
74 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 74 __mod_zone_freepage_state(zone, -nr_pages, mt);
75 } 75 }
76 76
77 spin_unlock_irqrestore(&zone->lock, flags); 77 spin_unlock_irqrestore(&zone->lock, flags);
@@ -183,7 +183,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
183 pfn += pageblock_nr_pages) { 183 pfn += pageblock_nr_pages) {
184 page = __first_valid_page(pfn, pageblock_nr_pages); 184 page = __first_valid_page(pfn, pageblock_nr_pages);
185 if (page && 185 if (page &&
186 set_migratetype_isolate(page, skip_hwpoisoned_pages)) { 186 set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
187 undo_pfn = pfn; 187 undo_pfn = pfn;
188 goto undo; 188 goto undo;
189 } 189 }