summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWen Congyang <wency@cn.fujitsu.com>2012-12-11 19:00:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:22 -0500
commitb023f46813cde6e3b8a8c24f432ff9c1fd8e9a64 (patch)
treeedac3509e7c44136450e20d7f9cc81de35926650 /mm
parentfa7194eb99b8e9fefe96f045002648ffb55f53c0 (diff)
memory-hotplug: skip HWPoisoned page when offlining pages
hwpoisoned may be set when we offline a page by the sysfs interface /sys/devices/system/memory/soft_offline_page or /sys/devices/system/memory/hard_offline_page. If we don't clear this flag when onlining pages, this page can't be freed, and will not in free list. So we can't offline these pages again. So we should skip such page when offlining pages. Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Jiang Liu <liuj97@gmail.com> Cc: Len Brown <len.brown@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Christoph Lameter <cl@linux.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory_hotplug.c5
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_isolation.c27
4 files changed, 47 insertions, 14 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8b20278be6a6..2c9fc7340b12 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1385,7 +1385,7 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1385 * Isolate the page, so that it doesn't get reallocated if it 1385 * Isolate the page, so that it doesn't get reallocated if it
1386 * was free. 1386 * was free.
1387 */ 1387 */
1388 set_migratetype_isolate(p); 1388 set_migratetype_isolate(p, true);
1389 /* 1389 /*
1390 * When the target page is a free hugepage, just remove it 1390 * When the target page is a free hugepage, just remove it
1391 * from free hugepage list. 1391 * from free hugepage list.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e4eeacae2b91..0095d156324a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -847,7 +847,7 @@ check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
847{ 847{
848 int ret; 848 int ret;
849 long offlined = *(long *)data; 849 long offlined = *(long *)data;
850 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); 850 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
851 offlined = nr_pages; 851 offlined = nr_pages;
852 if (!ret) 852 if (!ret)
853 *(long *)data += offlined; 853 *(long *)data += offlined;
@@ -894,7 +894,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
894 nr_pages = end_pfn - start_pfn; 894 nr_pages = end_pfn - start_pfn;
895 895
896 /* set above range as isolated */ 896 /* set above range as isolated */
897 ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 897 ret = start_isolate_page_range(start_pfn, end_pfn,
898 MIGRATE_MOVABLE, true);
898 if (ret) 899 if (ret)
899 goto out; 900 goto out;
900 901
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a49b0ea3cc2f..6f50cfe98a7b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5616,7 +5616,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
5616 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 5616 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5617 * expect this function should be exact. 5617 * expect this function should be exact.
5618 */ 5618 */
5619bool has_unmovable_pages(struct zone *zone, struct page *page, int count) 5619bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
5620 bool skip_hwpoisoned_pages)
5620{ 5621{
5621 unsigned long pfn, iter, found; 5622 unsigned long pfn, iter, found;
5622 int mt; 5623 int mt;
@@ -5651,6 +5652,13 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count)
5651 continue; 5652 continue;
5652 } 5653 }
5653 5654
5655 /*
5656 * The HWPoisoned page may be not in buddy system, and
5657 * page_count() is not 0.
5658 */
5659 if (skip_hwpoisoned_pages && PageHWPoison(page))
5660 continue;
5661
5654 if (!PageLRU(page)) 5662 if (!PageLRU(page))
5655 found++; 5663 found++;
5656 /* 5664 /*
@@ -5693,7 +5701,7 @@ bool is_pageblock_removable_nolock(struct page *page)
5693 zone->zone_start_pfn + zone->spanned_pages <= pfn) 5701 zone->zone_start_pfn + zone->spanned_pages <= pfn)
5694 return false; 5702 return false;
5695 5703
5696 return !has_unmovable_pages(zone, page, 0); 5704 return !has_unmovable_pages(zone, page, 0, true);
5697} 5705}
5698 5706
5699#ifdef CONFIG_CMA 5707#ifdef CONFIG_CMA
@@ -5864,7 +5872,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5864 */ 5872 */
5865 5873
5866 ret = start_isolate_page_range(pfn_max_align_down(start), 5874 ret = start_isolate_page_range(pfn_max_align_down(start),
5867 pfn_max_align_up(end), migratetype); 5875 pfn_max_align_up(end), migratetype,
5876 false);
5868 if (ret) 5877 if (ret)
5869 return ret; 5878 return ret;
5870 5879
@@ -5903,7 +5912,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5903 } 5912 }
5904 5913
5905 /* Make sure the range is really isolated. */ 5914 /* Make sure the range is really isolated. */
5906 if (test_pages_isolated(outer_start, end)) { 5915 if (test_pages_isolated(outer_start, end, false)) {
5907 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 5916 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
5908 outer_start, end); 5917 outer_start, end);
5909 ret = -EBUSY; 5918 ret = -EBUSY;
@@ -6018,6 +6027,16 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6018 continue; 6027 continue;
6019 } 6028 }
6020 page = pfn_to_page(pfn); 6029 page = pfn_to_page(pfn);
6030 /*
6031 * The HWPoisoned page may be not in buddy system, and
6032 * page_count() is not 0.
6033 */
6034 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6035 pfn++;
6036 SetPageReserved(page);
6037 continue;
6038 }
6039
6021 BUG_ON(page_count(page)); 6040 BUG_ON(page_count(page));
6022 BUG_ON(!PageBuddy(page)); 6041 BUG_ON(!PageBuddy(page));
6023 order = page_order(page); 6042 order = page_order(page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index f2f5b4818e94..9d2264ea4606 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -30,7 +30,7 @@ static void restore_pageblock_isolate(struct page *page, int migratetype)
30 zone->nr_pageblock_isolate--; 30 zone->nr_pageblock_isolate--;
31} 31}
32 32
33int set_migratetype_isolate(struct page *page) 33int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
34{ 34{
35 struct zone *zone; 35 struct zone *zone;
36 unsigned long flags, pfn; 36 unsigned long flags, pfn;
@@ -66,7 +66,8 @@ int set_migratetype_isolate(struct page *page)
66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
67 * We just check MOVABLE pages. 67 * We just check MOVABLE pages.
68 */ 68 */
69 if (!has_unmovable_pages(zone, page, arg.pages_found)) 69 if (!has_unmovable_pages(zone, page, arg.pages_found,
70 skip_hwpoisoned_pages))
70 ret = 0; 71 ret = 0;
71 72
72 /* 73 /*
@@ -134,7 +135,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
134 * Returns 0 on success and -EBUSY if any part of range cannot be isolated. 135 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
135 */ 136 */
136int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 137int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
137 unsigned migratetype) 138 unsigned migratetype, bool skip_hwpoisoned_pages)
138{ 139{
139 unsigned long pfn; 140 unsigned long pfn;
140 unsigned long undo_pfn; 141 unsigned long undo_pfn;
@@ -147,7 +148,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
147 pfn < end_pfn; 148 pfn < end_pfn;
148 pfn += pageblock_nr_pages) { 149 pfn += pageblock_nr_pages) {
149 page = __first_valid_page(pfn, pageblock_nr_pages); 150 page = __first_valid_page(pfn, pageblock_nr_pages);
150 if (page && set_migratetype_isolate(page)) { 151 if (page &&
152 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
151 undo_pfn = pfn; 153 undo_pfn = pfn;
152 goto undo; 154 goto undo;
153 } 155 }
@@ -190,7 +192,8 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
190 * Returns 1 if all pages in the range are isolated. 192 * Returns 1 if all pages in the range are isolated.
191 */ 193 */
192static int 194static int
193__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) 195__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
196 bool skip_hwpoisoned_pages)
194{ 197{
195 struct page *page; 198 struct page *page;
196 199
@@ -220,6 +223,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
220 else if (page_count(page) == 0 && 223 else if (page_count(page) == 0 &&
221 get_freepage_migratetype(page) == MIGRATE_ISOLATE) 224 get_freepage_migratetype(page) == MIGRATE_ISOLATE)
222 pfn += 1; 225 pfn += 1;
226 else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
227 /*
228 * The HWPoisoned page may be not in buddy
229 * system, and page_count() is not 0.
230 */
231 pfn++;
232 continue;
233 }
223 else 234 else
224 break; 235 break;
225 } 236 }
@@ -228,7 +239,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
228 return 1; 239 return 1;
229} 240}
230 241
231int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 242int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
243 bool skip_hwpoisoned_pages)
232{ 244{
233 unsigned long pfn, flags; 245 unsigned long pfn, flags;
234 struct page *page; 246 struct page *page;
@@ -251,7 +263,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
251 /* Check all pages are free or Marked as ISOLATED */ 263 /* Check all pages are free or Marked as ISOLATED */
252 zone = page_zone(page); 264 zone = page_zone(page);
253 spin_lock_irqsave(&zone->lock, flags); 265 spin_lock_irqsave(&zone->lock, flags);
254 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); 266 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
267 skip_hwpoisoned_pages);
255 spin_unlock_irqrestore(&zone->lock, flags); 268 spin_unlock_irqrestore(&zone->lock, flags);
256 return ret ? 0 : -EBUSY; 269 return ret ? 0 : -EBUSY;
257} 270}