aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-isolation.h10
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory_hotplug.c5
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_isolation.c27
5 files changed, 53 insertions, 18 deletions
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 76a9539cfd3f..a92061e08d48 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,7 +2,8 @@
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4 4
5bool has_unmovable_pages(struct zone *zone, struct page *page, int count); 5bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6 bool skip_hwpoisoned_pages);
6void set_pageblock_migratetype(struct page *page, int migratetype); 7void set_pageblock_migratetype(struct page *page, int migratetype);
7int move_freepages_block(struct zone *zone, struct page *page, 8int move_freepages_block(struct zone *zone, struct page *page,
8 int migratetype); 9 int migratetype);
@@ -21,7 +22,7 @@ int move_freepages(struct zone *zone,
21 */ 22 */
22int 23int
23start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 24start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
24 unsigned migratetype); 25 unsigned migratetype, bool skip_hwpoisoned_pages);
25 26
26/* 27/*
27 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. 28 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
@@ -34,12 +35,13 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
34/* 35/*
35 * Test all pages in [start_pfn, end_pfn) are isolated or not. 36 * Test all pages in [start_pfn, end_pfn) are isolated or not.
36 */ 37 */
37int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn); 38int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
39 bool skip_hwpoisoned_pages);
38 40
39/* 41/*
40 * Internal functions. Changes pageblock's migrate type. 42 * Internal functions. Changes pageblock's migrate type.
41 */ 43 */
42int set_migratetype_isolate(struct page *page); 44int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
43void unset_migratetype_isolate(struct page *page, unsigned migratetype); 45void unset_migratetype_isolate(struct page *page, unsigned migratetype);
44struct page *alloc_migrate_target(struct page *page, unsigned long private, 46struct page *alloc_migrate_target(struct page *page, unsigned long private,
45 int **resultp); 47 int **resultp);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8b20278be6a6..2c9fc7340b12 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1385,7 +1385,7 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1385 * Isolate the page, so that it doesn't get reallocated if it 1385 * Isolate the page, so that it doesn't get reallocated if it
1386 * was free. 1386 * was free.
1387 */ 1387 */
1388 set_migratetype_isolate(p); 1388 set_migratetype_isolate(p, true);
1389 /* 1389 /*
1390 * When the target page is a free hugepage, just remove it 1390 * When the target page is a free hugepage, just remove it
1391 * from free hugepage list. 1391 * from free hugepage list.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e4eeacae2b91..0095d156324a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -847,7 +847,7 @@ check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
847{ 847{
848 int ret; 848 int ret;
849 long offlined = *(long *)data; 849 long offlined = *(long *)data;
850 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); 850 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
851 offlined = nr_pages; 851 offlined = nr_pages;
852 if (!ret) 852 if (!ret)
853 *(long *)data += offlined; 853 *(long *)data += offlined;
@@ -894,7 +894,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
894 nr_pages = end_pfn - start_pfn; 894 nr_pages = end_pfn - start_pfn;
895 895
896 /* set above range as isolated */ 896 /* set above range as isolated */
897 ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 897 ret = start_isolate_page_range(start_pfn, end_pfn,
898 MIGRATE_MOVABLE, true);
898 if (ret) 899 if (ret)
899 goto out; 900 goto out;
900 901
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a49b0ea3cc2f..6f50cfe98a7b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5616,7 +5616,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
5616 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 5616 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5617 * expect this function should be exact. 5617 * expect this function should be exact.
5618 */ 5618 */
5619bool has_unmovable_pages(struct zone *zone, struct page *page, int count) 5619bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
5620 bool skip_hwpoisoned_pages)
5620{ 5621{
5621 unsigned long pfn, iter, found; 5622 unsigned long pfn, iter, found;
5622 int mt; 5623 int mt;
@@ -5651,6 +5652,13 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count)
5651 continue; 5652 continue;
5652 } 5653 }
5653 5654
5655 /*
5656 * The HWPoisoned page may be not in buddy system, and
5657 * page_count() is not 0.
5658 */
5659 if (skip_hwpoisoned_pages && PageHWPoison(page))
5660 continue;
5661
5654 if (!PageLRU(page)) 5662 if (!PageLRU(page))
5655 found++; 5663 found++;
5656 /* 5664 /*
@@ -5693,7 +5701,7 @@ bool is_pageblock_removable_nolock(struct page *page)
5693 zone->zone_start_pfn + zone->spanned_pages <= pfn) 5701 zone->zone_start_pfn + zone->spanned_pages <= pfn)
5694 return false; 5702 return false;
5695 5703
5696 return !has_unmovable_pages(zone, page, 0); 5704 return !has_unmovable_pages(zone, page, 0, true);
5697} 5705}
5698 5706
5699#ifdef CONFIG_CMA 5707#ifdef CONFIG_CMA
@@ -5864,7 +5872,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5864 */ 5872 */
5865 5873
5866 ret = start_isolate_page_range(pfn_max_align_down(start), 5874 ret = start_isolate_page_range(pfn_max_align_down(start),
5867 pfn_max_align_up(end), migratetype); 5875 pfn_max_align_up(end), migratetype,
5876 false);
5868 if (ret) 5877 if (ret)
5869 return ret; 5878 return ret;
5870 5879
@@ -5903,7 +5912,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5903 } 5912 }
5904 5913
5905 /* Make sure the range is really isolated. */ 5914 /* Make sure the range is really isolated. */
5906 if (test_pages_isolated(outer_start, end)) { 5915 if (test_pages_isolated(outer_start, end, false)) {
5907 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 5916 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
5908 outer_start, end); 5917 outer_start, end);
5909 ret = -EBUSY; 5918 ret = -EBUSY;
@@ -6018,6 +6027,16 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6018 continue; 6027 continue;
6019 } 6028 }
6020 page = pfn_to_page(pfn); 6029 page = pfn_to_page(pfn);
6030 /*
6031 * The HWPoisoned page may be not in buddy system, and
6032 * page_count() is not 0.
6033 */
6034 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6035 pfn++;
6036 SetPageReserved(page);
6037 continue;
6038 }
6039
6021 BUG_ON(page_count(page)); 6040 BUG_ON(page_count(page));
6022 BUG_ON(!PageBuddy(page)); 6041 BUG_ON(!PageBuddy(page));
6023 order = page_order(page); 6042 order = page_order(page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index f2f5b4818e94..9d2264ea4606 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -30,7 +30,7 @@ static void restore_pageblock_isolate(struct page *page, int migratetype)
30 zone->nr_pageblock_isolate--; 30 zone->nr_pageblock_isolate--;
31} 31}
32 32
33int set_migratetype_isolate(struct page *page) 33int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
34{ 34{
35 struct zone *zone; 35 struct zone *zone;
36 unsigned long flags, pfn; 36 unsigned long flags, pfn;
@@ -66,7 +66,8 @@ int set_migratetype_isolate(struct page *page)
66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
67 * We just check MOVABLE pages. 67 * We just check MOVABLE pages.
68 */ 68 */
69 if (!has_unmovable_pages(zone, page, arg.pages_found)) 69 if (!has_unmovable_pages(zone, page, arg.pages_found,
70 skip_hwpoisoned_pages))
70 ret = 0; 71 ret = 0;
71 72
72 /* 73 /*
@@ -134,7 +135,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
134 * Returns 0 on success and -EBUSY if any part of range cannot be isolated. 135 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
135 */ 136 */
136int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 137int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
137 unsigned migratetype) 138 unsigned migratetype, bool skip_hwpoisoned_pages)
138{ 139{
139 unsigned long pfn; 140 unsigned long pfn;
140 unsigned long undo_pfn; 141 unsigned long undo_pfn;
@@ -147,7 +148,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
147 pfn < end_pfn; 148 pfn < end_pfn;
148 pfn += pageblock_nr_pages) { 149 pfn += pageblock_nr_pages) {
149 page = __first_valid_page(pfn, pageblock_nr_pages); 150 page = __first_valid_page(pfn, pageblock_nr_pages);
150 if (page && set_migratetype_isolate(page)) { 151 if (page &&
152 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
151 undo_pfn = pfn; 153 undo_pfn = pfn;
152 goto undo; 154 goto undo;
153 } 155 }
@@ -190,7 +192,8 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
190 * Returns 1 if all pages in the range are isolated. 192 * Returns 1 if all pages in the range are isolated.
191 */ 193 */
192static int 194static int
193__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) 195__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
196 bool skip_hwpoisoned_pages)
194{ 197{
195 struct page *page; 198 struct page *page;
196 199
@@ -220,6 +223,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
220 else if (page_count(page) == 0 && 223 else if (page_count(page) == 0 &&
221 get_freepage_migratetype(page) == MIGRATE_ISOLATE) 224 get_freepage_migratetype(page) == MIGRATE_ISOLATE)
222 pfn += 1; 225 pfn += 1;
226 else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
227 /*
228 * The HWPoisoned page may be not in buddy
229 * system, and page_count() is not 0.
230 */
231 pfn++;
232 continue;
233 }
223 else 234 else
224 break; 235 break;
225 } 236 }
@@ -228,7 +239,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
228 return 1; 239 return 1;
229} 240}
230 241
231int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 242int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
243 bool skip_hwpoisoned_pages)
232{ 244{
233 unsigned long pfn, flags; 245 unsigned long pfn, flags;
234 struct page *page; 246 struct page *page;
@@ -251,7 +263,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
251 /* Check all pages are free or Marked as ISOLATED */ 263 /* Check all pages are free or Marked as ISOLATED */
252 zone = page_zone(page); 264 zone = page_zone(page);
253 spin_lock_irqsave(&zone->lock, flags); 265 spin_lock_irqsave(&zone->lock, flags);
254 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); 266 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
267 skip_hwpoisoned_pages);
255 spin_unlock_irqrestore(&zone->lock, flags); 268 spin_unlock_irqrestore(&zone->lock, flags);
256 return ret ? 0 : -EBUSY; 269 return ret ? 0 : -EBUSY;
257} 270}