aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-08 19:32:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:46 -0400
commit435b405c06119d93333738172b8060b0ed12af41 (patch)
treea87f9a493f5c677ab23eeab1eab2e45caeb79bc3
parent41d575ad4a511b71a4a41c8313004212f5c229b1 (diff)
memory-hotplug: fix pages missed by race rather than failing
If race between allocation and isolation in memory-hotplug offline happens, some pages could be in MIGRATE_MOVABLE of free_list although the pageblock's migratetype of the page is MIGRATE_ISOLATE. The race could be detected by get_freepage_migratetype in __test_page_isolated_in_pageblock. If it is detected, now EBUSY gets bubbled all the way up and the hotplug operations fails. But better idea is instead of returning and failing memory-hotremove, move the free page to the correct list at the time it is detected. It could enhance memory-hotremove operation success ratio although the race is really rare. Suggested by Mel Gorman. [akpm@linux-foundation.org: small cleanup] Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/page-isolation.h4
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_isolation.c16
3 files changed, 19 insertions, 3 deletions
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 105077aa7685..fca8c0a5c188 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -6,6 +6,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
6void set_pageblock_migratetype(struct page *page, int migratetype); 6void set_pageblock_migratetype(struct page *page, int migratetype);
7int move_freepages_block(struct zone *zone, struct page *page, 7int move_freepages_block(struct zone *zone, struct page *page,
8 int migratetype); 8 int migratetype);
9int move_freepages(struct zone *zone,
10 struct page *start_page, struct page *end_page,
11 int migratetype);
12
9/* 13/*
10 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 14 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
11 * If specified range includes migrate types other than MOVABLE or CMA, 15 * If specified range includes migrate types other than MOVABLE or CMA,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 94fd283dde98..82f0b2f54f81 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -925,7 +925,7 @@ static int fallbacks[MIGRATE_TYPES][4] = {
925 * Note that start_page and end_pages are not aligned on a pageblock 925 * Note that start_page and end_pages are not aligned on a pageblock
926 * boundary. If alignment is required, use move_freepages_block() 926 * boundary. If alignment is required, use move_freepages_block()
927 */ 927 */
928static int move_freepages(struct zone *zone, 928int move_freepages(struct zone *zone,
929 struct page *start_page, struct page *end_page, 929 struct page *start_page, struct page *end_page,
930 int migratetype) 930 int migratetype)
931{ 931{
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 6744235d2d0e..5f34a9053ce0 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -201,8 +201,20 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
201 } 201 }
202 page = pfn_to_page(pfn); 202 page = pfn_to_page(pfn);
203 if (PageBuddy(page)) { 203 if (PageBuddy(page)) {
204 if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) 204 /*
205 break; 205 * If race between isolatation and allocation happens,
206 * some free pages could be in MIGRATE_MOVABLE list
207 * although pageblock's migratation type of the page
208 * is MIGRATE_ISOLATE. Catch it and move the page into
209 * MIGRATE_ISOLATE list.
210 */
211 if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
212 struct page *end_page;
213
214 end_page = page + (1 << page_order(page)) - 1;
215 move_freepages(page_zone(page), page, end_page,
216 MIGRATE_ISOLATE);
217 }
206 pfn += 1 << page_order(page); 218 pfn += 1 << page_order(page);
207 } 219 }
208 else if (page_count(page) == 0 && 220 else if (page_count(page) == 0 &&