aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c62
1 files changed, 27 insertions, 35 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b9a667d36c55..124e794867c5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1233,7 +1233,8 @@ static bool is_pageblock_removable_nolock(struct page *page)
1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1234{ 1234{
1235 struct page *page = pfn_to_page(start_pfn); 1235 struct page *page = pfn_to_page(start_pfn);
1236 struct page *end_page = page + nr_pages; 1236 unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
1237 struct page *end_page = pfn_to_page(end_pfn);
1237 1238
1238 /* Check the starting page of each pageblock within the range */ 1239 /* Check the starting page of each pageblock within the range */
1239 for (; page < end_page; page = next_active_pageblock(page)) { 1240 for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1273,6 +1274,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1273 i++; 1274 i++;
1274 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 1275 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1275 continue; 1276 continue;
1277 /* Check if we got outside of the zone */
1278 if (zone && !zone_spans_pfn(zone, pfn + i))
1279 return 0;
1276 page = pfn_to_page(pfn + i); 1280 page = pfn_to_page(pfn + i);
1277 if (zone && page_zone(page) != zone) 1281 if (zone && page_zone(page) != zone)
1278 return 0; 1282 return 0;
@@ -1301,23 +1305,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1301static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1305static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1302{ 1306{
1303 unsigned long pfn; 1307 unsigned long pfn;
1304 struct page *page; 1308
1305 for (pfn = start; pfn < end; pfn++) { 1309 for (pfn = start; pfn < end; pfn++) {
1306 if (pfn_valid(pfn)) { 1310 struct page *page, *head;
1307 page = pfn_to_page(pfn); 1311 unsigned long skip;
1308 if (PageLRU(page)) 1312
1309 return pfn; 1313 if (!pfn_valid(pfn))
1310 if (__PageMovable(page)) 1314 continue;
1311 return pfn; 1315 page = pfn_to_page(pfn);
1312 if (PageHuge(page)) { 1316 if (PageLRU(page))
1313 if (hugepage_migration_supported(page_hstate(page)) && 1317 return pfn;
1314 page_huge_active(page)) 1318 if (__PageMovable(page))
1315 return pfn; 1319 return pfn;
1316 else 1320
1317 pfn = round_up(pfn + 1, 1321 if (!PageHuge(page))
1318 1 << compound_order(page)) - 1; 1322 continue;
1319 } 1323 head = compound_head(page);
1320 } 1324 if (hugepage_migration_supported(page_hstate(head)) &&
1325 page_huge_active(head))
1326 return pfn;
1327 skip = (1 << compound_order(head)) - (page - head);
1328 pfn += skip - 1;
1321 } 1329 }
1322 return 0; 1330 return 0;
1323} 1331}
@@ -1344,7 +1352,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1344{ 1352{
1345 unsigned long pfn; 1353 unsigned long pfn;
1346 struct page *page; 1354 struct page *page;
1347 int not_managed = 0;
1348 int ret = 0; 1355 int ret = 0;
1349 LIST_HEAD(source); 1356 LIST_HEAD(source);
1350 1357
@@ -1392,7 +1399,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1392 else 1399 else
1393 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 1400 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1394 if (!ret) { /* Success */ 1401 if (!ret) { /* Success */
1395 put_page(page);
1396 list_add_tail(&page->lru, &source); 1402 list_add_tail(&page->lru, &source);
1397 if (!__PageMovable(page)) 1403 if (!__PageMovable(page))
1398 inc_node_page_state(page, NR_ISOLATED_ANON + 1404 inc_node_page_state(page, NR_ISOLATED_ANON +
@@ -1401,22 +1407,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1401 } else { 1407 } else {
1402 pr_warn("failed to isolate pfn %lx\n", pfn); 1408 pr_warn("failed to isolate pfn %lx\n", pfn);
1403 dump_page(page, "isolation failed"); 1409 dump_page(page, "isolation failed");
1404 put_page(page);
1405 /* Because we don't have big zone->lock. we should
1406 check this again here. */
1407 if (page_count(page)) {
1408 not_managed++;
1409 ret = -EBUSY;
1410 break;
1411 }
1412 } 1410 }
1411 put_page(page);
1413 } 1412 }
1414 if (!list_empty(&source)) { 1413 if (!list_empty(&source)) {
1415 if (not_managed) {
1416 putback_movable_pages(&source);
1417 goto out;
1418 }
1419
1420 /* Allocate a new page from the nearest neighbor node */ 1414 /* Allocate a new page from the nearest neighbor node */
1421 ret = migrate_pages(&source, new_node_page, NULL, 0, 1415 ret = migrate_pages(&source, new_node_page, NULL, 0,
1422 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1416 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
@@ -1429,7 +1423,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1429 putback_movable_pages(&source); 1423 putback_movable_pages(&source);
1430 } 1424 }
1431 } 1425 }
1432out: 1426
1433 return ret; 1427 return ret;
1434} 1428}
1435 1429
@@ -1576,7 +1570,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576 we assume this for now. .*/ 1570 we assume this for now. .*/
1577 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, 1571 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
1578 &valid_end)) { 1572 &valid_end)) {
1579 mem_hotplug_done();
1580 ret = -EINVAL; 1573 ret = -EINVAL;
1581 reason = "multizone range"; 1574 reason = "multizone range";
1582 goto failed_removal; 1575 goto failed_removal;
@@ -1591,7 +1584,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1591 MIGRATE_MOVABLE, 1584 MIGRATE_MOVABLE,
1592 SKIP_HWPOISON | REPORT_FAILURE); 1585 SKIP_HWPOISON | REPORT_FAILURE);
1593 if (ret) { 1586 if (ret) {
1594 mem_hotplug_done();
1595 reason = "failure to isolate range"; 1587 reason = "failure to isolate range";
1596 goto failed_removal; 1588 goto failed_removal;
1597 } 1589 }