summaryrefslogtreecommitdiffstats
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorWen Congyang <wency@cn.fujitsu.com>2012-12-11 19:00:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:22 -0500
commitb023f46813cde6e3b8a8c24f432ff9c1fd8e9a64 (patch)
treeedac3509e7c44136450e20d7f9cc81de35926650 /mm/page_isolation.c
parentfa7194eb99b8e9fefe96f045002648ffb55f53c0 (diff)
memory-hotplug: skip HWPoisoned page when offlining pages
hwpoisoned may be set when we offline a page by the sysfs interface /sys/devices/system/memory/soft_offline_page or /sys/devices/system/memory/hard_offline_page. If we don't clear this flag when onlining pages, this page can't be freed, and will not in free list. So we can't offline these pages again. So we should skip such page when offlining pages. Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Jiang Liu <liuj97@gmail.com> Cc: Len Brown <len.brown@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Christoph Lameter <cl@linux.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index f2f5b4818e94..9d2264ea4606 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -30,7 +30,7 @@ static void restore_pageblock_isolate(struct page *page, int migratetype)
30 zone->nr_pageblock_isolate--; 30 zone->nr_pageblock_isolate--;
31} 31}
32 32
33int set_migratetype_isolate(struct page *page) 33int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
34{ 34{
35 struct zone *zone; 35 struct zone *zone;
36 unsigned long flags, pfn; 36 unsigned long flags, pfn;
@@ -66,7 +66,8 @@ int set_migratetype_isolate(struct page *page)
66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
67 * We just check MOVABLE pages. 67 * We just check MOVABLE pages.
68 */ 68 */
69 if (!has_unmovable_pages(zone, page, arg.pages_found)) 69 if (!has_unmovable_pages(zone, page, arg.pages_found,
70 skip_hwpoisoned_pages))
70 ret = 0; 71 ret = 0;
71 72
72 /* 73 /*
@@ -134,7 +135,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
134 * Returns 0 on success and -EBUSY if any part of range cannot be isolated. 135 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
135 */ 136 */
136int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 137int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
137 unsigned migratetype) 138 unsigned migratetype, bool skip_hwpoisoned_pages)
138{ 139{
139 unsigned long pfn; 140 unsigned long pfn;
140 unsigned long undo_pfn; 141 unsigned long undo_pfn;
@@ -147,7 +148,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
147 pfn < end_pfn; 148 pfn < end_pfn;
148 pfn += pageblock_nr_pages) { 149 pfn += pageblock_nr_pages) {
149 page = __first_valid_page(pfn, pageblock_nr_pages); 150 page = __first_valid_page(pfn, pageblock_nr_pages);
150 if (page && set_migratetype_isolate(page)) { 151 if (page &&
152 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
151 undo_pfn = pfn; 153 undo_pfn = pfn;
152 goto undo; 154 goto undo;
153 } 155 }
@@ -190,7 +192,8 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
190 * Returns 1 if all pages in the range are isolated. 192 * Returns 1 if all pages in the range are isolated.
191 */ 193 */
192static int 194static int
193__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) 195__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
196 bool skip_hwpoisoned_pages)
194{ 197{
195 struct page *page; 198 struct page *page;
196 199
@@ -220,6 +223,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
220 else if (page_count(page) == 0 && 223 else if (page_count(page) == 0 &&
221 get_freepage_migratetype(page) == MIGRATE_ISOLATE) 224 get_freepage_migratetype(page) == MIGRATE_ISOLATE)
222 pfn += 1; 225 pfn += 1;
226 else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
227 /*
228 * The HWPoisoned page may be not in buddy
229 * system, and page_count() is not 0.
230 */
231 pfn++;
232 continue;
233 }
223 else 234 else
224 break; 235 break;
225 } 236 }
@@ -228,7 +239,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
228 return 1; 239 return 1;
229} 240}
230 241
231int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 242int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
243 bool skip_hwpoisoned_pages)
232{ 244{
233 unsigned long pfn, flags; 245 unsigned long pfn, flags;
234 struct page *page; 246 struct page *page;
@@ -251,7 +263,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
251 /* Check all pages are free or Marked as ISOLATED */ 263 /* Check all pages are free or Marked as ISOLATED */
252 zone = page_zone(page); 264 zone = page_zone(page);
253 spin_lock_irqsave(&zone->lock, flags); 265 spin_lock_irqsave(&zone->lock, flags);
254 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); 266 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
267 skip_hwpoisoned_pages);
255 spin_unlock_irqrestore(&zone->lock, flags); 268 spin_unlock_irqrestore(&zone->lock, flags);
256 return ret ? 0 : -EBUSY; 269 return ret ? 0 : -EBUSY;
257} 270}