diff options
Diffstat (limited to 'mm/memory_hotplug.c')
| -rw-r--r-- | mm/memory_hotplug.c | 85 |
1 files changed, 40 insertions, 45 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b9a667d36c55..1ad28323fb9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page) | |||
| 1188 | return PageBuddy(page) && page_order(page) >= pageblock_order; | 1188 | return PageBuddy(page) && page_order(page) >= pageblock_order; |
| 1189 | } | 1189 | } |
| 1190 | 1190 | ||
| 1191 | /* Return the start of the next active pageblock after a given page */ | 1191 | /* Return the pfn of the start of the next active pageblock after a given pfn */ |
| 1192 | static struct page *next_active_pageblock(struct page *page) | 1192 | static unsigned long next_active_pageblock(unsigned long pfn) |
| 1193 | { | 1193 | { |
| 1194 | struct page *page = pfn_to_page(pfn); | ||
| 1195 | |||
| 1194 | /* Ensure the starting page is pageblock-aligned */ | 1196 | /* Ensure the starting page is pageblock-aligned */ |
| 1195 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | 1197 | BUG_ON(pfn & (pageblock_nr_pages - 1)); |
| 1196 | 1198 | ||
| 1197 | /* If the entire pageblock is free, move to the end of free page */ | 1199 | /* If the entire pageblock is free, move to the end of free page */ |
| 1198 | if (pageblock_free(page)) { | 1200 | if (pageblock_free(page)) { |
| @@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page) | |||
| 1200 | /* be careful. we don't have locks, page_order can be changed.*/ | 1202 | /* be careful. we don't have locks, page_order can be changed.*/ |
| 1201 | order = page_order(page); | 1203 | order = page_order(page); |
| 1202 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | 1204 | if ((order < MAX_ORDER) && (order >= pageblock_order)) |
| 1203 | return page + (1 << order); | 1205 | return pfn + (1 << order); |
| 1204 | } | 1206 | } |
| 1205 | 1207 | ||
| 1206 | return page + pageblock_nr_pages; | 1208 | return pfn + pageblock_nr_pages; |
| 1207 | } | 1209 | } |
| 1208 | 1210 | ||
| 1209 | static bool is_pageblock_removable_nolock(struct page *page) | 1211 | static bool is_pageblock_removable_nolock(unsigned long pfn) |
| 1210 | { | 1212 | { |
| 1213 | struct page *page = pfn_to_page(pfn); | ||
| 1211 | struct zone *zone; | 1214 | struct zone *zone; |
| 1212 | unsigned long pfn; | ||
| 1213 | 1215 | ||
| 1214 | /* | 1216 | /* |
| 1215 | * We have to be careful here because we are iterating over memory | 1217 | * We have to be careful here because we are iterating over memory |
| @@ -1232,12 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page) | |||
| 1232 | /* Checks if this range of memory is likely to be hot-removable. */ | 1234 | /* Checks if this range of memory is likely to be hot-removable. */ |
| 1233 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | 1235 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) |
| 1234 | { | 1236 | { |
| 1235 | struct page *page = pfn_to_page(start_pfn); | 1237 | unsigned long end_pfn, pfn; |
| 1236 | struct page *end_page = page + nr_pages; | 1238 | |
| 1239 | end_pfn = min(start_pfn + nr_pages, | ||
| 1240 | zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); | ||
| 1237 | 1241 | ||
| 1238 | /* Check the starting page of each pageblock within the range */ | 1242 | /* Check the starting page of each pageblock within the range */ |
| 1239 | for (; page < end_page; page = next_active_pageblock(page)) { | 1243 | for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { |
| 1240 | if (!is_pageblock_removable_nolock(page)) | 1244 | if (!is_pageblock_removable_nolock(pfn)) |
| 1241 | return false; | 1245 | return false; |
| 1242 | cond_resched(); | 1246 | cond_resched(); |
| 1243 | } | 1247 | } |
| @@ -1273,6 +1277,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
| 1273 | i++; | 1277 | i++; |
| 1274 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) | 1278 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) |
| 1275 | continue; | 1279 | continue; |
| 1280 | /* Check if we got outside of the zone */ | ||
| 1281 | if (zone && !zone_spans_pfn(zone, pfn + i)) | ||
| 1282 | return 0; | ||
| 1276 | page = pfn_to_page(pfn + i); | 1283 | page = pfn_to_page(pfn + i); |
| 1277 | if (zone && page_zone(page) != zone) | 1284 | if (zone && page_zone(page) != zone) |
| 1278 | return 0; | 1285 | return 0; |
| @@ -1301,23 +1308,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
| 1301 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) | 1308 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) |
| 1302 | { | 1309 | { |
| 1303 | unsigned long pfn; | 1310 | unsigned long pfn; |
| 1304 | struct page *page; | 1311 | |
| 1305 | for (pfn = start; pfn < end; pfn++) { | 1312 | for (pfn = start; pfn < end; pfn++) { |
| 1306 | if (pfn_valid(pfn)) { | 1313 | struct page *page, *head; |
| 1307 | page = pfn_to_page(pfn); | 1314 | unsigned long skip; |
| 1308 | if (PageLRU(page)) | 1315 | |
| 1309 | return pfn; | 1316 | if (!pfn_valid(pfn)) |
| 1310 | if (__PageMovable(page)) | 1317 | continue; |
| 1311 | return pfn; | 1318 | page = pfn_to_page(pfn); |
| 1312 | if (PageHuge(page)) { | 1319 | if (PageLRU(page)) |
| 1313 | if (hugepage_migration_supported(page_hstate(page)) && | 1320 | return pfn; |
| 1314 | page_huge_active(page)) | 1321 | if (__PageMovable(page)) |
| 1315 | return pfn; | 1322 | return pfn; |
| 1316 | else | 1323 | |
| 1317 | pfn = round_up(pfn + 1, | 1324 | if (!PageHuge(page)) |
| 1318 | 1 << compound_order(page)) - 1; | 1325 | continue; |
| 1319 | } | 1326 | head = compound_head(page); |
| 1320 | } | 1327 | if (hugepage_migration_supported(page_hstate(head)) && |
| 1328 | page_huge_active(head)) | ||
| 1329 | return pfn; | ||
| 1330 | skip = (1 << compound_order(head)) - (page - head); | ||
| 1331 | pfn += skip - 1; | ||
| 1321 | } | 1332 | } |
| 1322 | return 0; | 1333 | return 0; |
| 1323 | } | 1334 | } |
| @@ -1344,7 +1355,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
| 1344 | { | 1355 | { |
| 1345 | unsigned long pfn; | 1356 | unsigned long pfn; |
| 1346 | struct page *page; | 1357 | struct page *page; |
| 1347 | int not_managed = 0; | ||
| 1348 | int ret = 0; | 1358 | int ret = 0; |
| 1349 | LIST_HEAD(source); | 1359 | LIST_HEAD(source); |
| 1350 | 1360 | ||
| @@ -1392,7 +1402,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
| 1392 | else | 1402 | else |
| 1393 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | 1403 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); |
| 1394 | if (!ret) { /* Success */ | 1404 | if (!ret) { /* Success */ |
| 1395 | put_page(page); | ||
| 1396 | list_add_tail(&page->lru, &source); | 1405 | list_add_tail(&page->lru, &source); |
| 1397 | if (!__PageMovable(page)) | 1406 | if (!__PageMovable(page)) |
| 1398 | inc_node_page_state(page, NR_ISOLATED_ANON + | 1407 | inc_node_page_state(page, NR_ISOLATED_ANON + |
| @@ -1401,22 +1410,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
| 1401 | } else { | 1410 | } else { |
| 1402 | pr_warn("failed to isolate pfn %lx\n", pfn); | 1411 | pr_warn("failed to isolate pfn %lx\n", pfn); |
| 1403 | dump_page(page, "isolation failed"); | 1412 | dump_page(page, "isolation failed"); |
| 1404 | put_page(page); | ||
| 1405 | /* Because we don't have big zone->lock. we should | ||
| 1406 | check this again here. */ | ||
| 1407 | if (page_count(page)) { | ||
| 1408 | not_managed++; | ||
| 1409 | ret = -EBUSY; | ||
| 1410 | break; | ||
| 1411 | } | ||
| 1412 | } | 1413 | } |
| 1414 | put_page(page); | ||
| 1413 | } | 1415 | } |
| 1414 | if (!list_empty(&source)) { | 1416 | if (!list_empty(&source)) { |
| 1415 | if (not_managed) { | ||
| 1416 | putback_movable_pages(&source); | ||
| 1417 | goto out; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | /* Allocate a new page from the nearest neighbor node */ | 1417 | /* Allocate a new page from the nearest neighbor node */ |
| 1421 | ret = migrate_pages(&source, new_node_page, NULL, 0, | 1418 | ret = migrate_pages(&source, new_node_page, NULL, 0, |
| 1422 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | 1419 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); |
| @@ -1429,7 +1426,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
| 1429 | putback_movable_pages(&source); | 1426 | putback_movable_pages(&source); |
| 1430 | } | 1427 | } |
| 1431 | } | 1428 | } |
| 1432 | out: | 1429 | |
| 1433 | return ret; | 1430 | return ret; |
| 1434 | } | 1431 | } |
| 1435 | 1432 | ||
| @@ -1576,7 +1573,6 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
| 1576 | we assume this for now. .*/ | 1573 | we assume this for now. .*/ |
| 1577 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, | 1574 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, |
| 1578 | &valid_end)) { | 1575 | &valid_end)) { |
| 1579 | mem_hotplug_done(); | ||
| 1580 | ret = -EINVAL; | 1576 | ret = -EINVAL; |
| 1581 | reason = "multizone range"; | 1577 | reason = "multizone range"; |
| 1582 | goto failed_removal; | 1578 | goto failed_removal; |
| @@ -1591,7 +1587,6 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
| 1591 | MIGRATE_MOVABLE, | 1587 | MIGRATE_MOVABLE, |
| 1592 | SKIP_HWPOISON | REPORT_FAILURE); | 1588 | SKIP_HWPOISON | REPORT_FAILURE); |
| 1593 | if (ret) { | 1589 | if (ret) { |
| 1594 | mem_hotplug_done(); | ||
| 1595 | reason = "failure to isolate range"; | 1590 | reason = "failure to isolate range"; |
| 1596 | goto failed_removal; | 1591 | goto failed_removal; |
| 1597 | } | 1592 | } |
