diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 64 |
1 files changed, 9 insertions, 55 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4ba5e37127fc..df2022ff0c8a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -221,11 +221,6 @@ EXPORT_SYMBOL(nr_online_nodes); | |||
221 | 221 | ||
222 | int page_group_by_mobility_disabled __read_mostly; | 222 | int page_group_by_mobility_disabled __read_mostly; |
223 | 223 | ||
224 | /* | ||
225 | * NOTE: | ||
226 | * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly. | ||
227 | * Instead, use {un}set_pageblock_isolate. | ||
228 | */ | ||
229 | void set_pageblock_migratetype(struct page *page, int migratetype) | 224 | void set_pageblock_migratetype(struct page *page, int migratetype) |
230 | { | 225 | { |
231 | 226 | ||
@@ -1389,14 +1384,8 @@ void split_page(struct page *page, unsigned int order) | |||
1389 | set_page_refcounted(page + i); | 1384 | set_page_refcounted(page + i); |
1390 | } | 1385 | } |
1391 | 1386 | ||
1392 | /* | 1387 | static int __isolate_free_page(struct page *page, unsigned int order) |
1393 | * Similar to the split_page family of functions except that the page | ||
1394 | * required at the given order and being isolated now to prevent races | ||
1395 | * with parallel allocators | ||
1396 | */ | ||
1397 | int capture_free_page(struct page *page, int alloc_order, int migratetype) | ||
1398 | { | 1388 | { |
1399 | unsigned int order; | ||
1400 | unsigned long watermark; | 1389 | unsigned long watermark; |
1401 | struct zone *zone; | 1390 | struct zone *zone; |
1402 | int mt; | 1391 | int mt; |
@@ -1404,7 +1393,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1404 | BUG_ON(!PageBuddy(page)); | 1393 | BUG_ON(!PageBuddy(page)); |
1405 | 1394 | ||
1406 | zone = page_zone(page); | 1395 | zone = page_zone(page); |
1407 | order = page_order(page); | ||
1408 | mt = get_pageblock_migratetype(page); | 1396 | mt = get_pageblock_migratetype(page); |
1409 | 1397 | ||
1410 | if (mt != MIGRATE_ISOLATE) { | 1398 | if (mt != MIGRATE_ISOLATE) { |
@@ -1413,7 +1401,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1413 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) | 1401 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) |
1414 | return 0; | 1402 | return 0; |
1415 | 1403 | ||
1416 | __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); | 1404 | __mod_zone_freepage_state(zone, -(1UL << order), mt); |
1417 | } | 1405 | } |
1418 | 1406 | ||
1419 | /* Remove page from free list */ | 1407 | /* Remove page from free list */ |
@@ -1421,11 +1409,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1421 | zone->free_area[order].nr_free--; | 1409 | zone->free_area[order].nr_free--; |
1422 | rmv_page_order(page); | 1410 | rmv_page_order(page); |
1423 | 1411 | ||
1424 | if (alloc_order != order) | 1412 | /* Set the pageblock if the isolated page is at least a pageblock */ |
1425 | expand(zone, page, alloc_order, order, | ||
1426 | &zone->free_area[order], migratetype); | ||
1427 | |||
1428 | /* Set the pageblock if the captured page is at least a pageblock */ | ||
1429 | if (order >= pageblock_order - 1) { | 1413 | if (order >= pageblock_order - 1) { |
1430 | struct page *endpage = page + (1 << order) - 1; | 1414 | struct page *endpage = page + (1 << order) - 1; |
1431 | for (; page < endpage; page += pageblock_nr_pages) { | 1415 | for (; page < endpage; page += pageblock_nr_pages) { |
@@ -1436,7 +1420,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1436 | } | 1420 | } |
1437 | } | 1421 | } |
1438 | 1422 | ||
1439 | return 1UL << alloc_order; | 1423 | return 1UL << order; |
1440 | } | 1424 | } |
1441 | 1425 | ||
1442 | /* | 1426 | /* |
@@ -1454,10 +1438,9 @@ int split_free_page(struct page *page) | |||
1454 | unsigned int order; | 1438 | unsigned int order; |
1455 | int nr_pages; | 1439 | int nr_pages; |
1456 | 1440 | ||
1457 | BUG_ON(!PageBuddy(page)); | ||
1458 | order = page_order(page); | 1441 | order = page_order(page); |
1459 | 1442 | ||
1460 | nr_pages = capture_free_page(page, order, 0); | 1443 | nr_pages = __isolate_free_page(page, order); |
1461 | if (!nr_pages) | 1444 | if (!nr_pages) |
1462 | return 0; | 1445 | return 0; |
1463 | 1446 | ||
@@ -1655,20 +1638,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1655 | return true; | 1638 | return true; |
1656 | } | 1639 | } |
1657 | 1640 | ||
1658 | #ifdef CONFIG_MEMORY_ISOLATION | ||
1659 | static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) | ||
1660 | { | ||
1661 | if (unlikely(zone->nr_pageblock_isolate)) | ||
1662 | return zone->nr_pageblock_isolate * pageblock_nr_pages; | ||
1663 | return 0; | ||
1664 | } | ||
1665 | #else | ||
1666 | static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) | ||
1667 | { | ||
1668 | return 0; | ||
1669 | } | ||
1670 | #endif | ||
1671 | |||
1672 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 1641 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
1673 | int classzone_idx, int alloc_flags) | 1642 | int classzone_idx, int alloc_flags) |
1674 | { | 1643 | { |
@@ -1684,14 +1653,6 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |||
1684 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) | 1653 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) |
1685 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); | 1654 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); |
1686 | 1655 | ||
1687 | /* | ||
1688 | * If the zone has MIGRATE_ISOLATE type free pages, we should consider | ||
1689 | * it. nr_zone_isolate_freepages is never accurate so kswapd might not | ||
1690 | * sleep although it could do so. But this is more desirable for memory | ||
1691 | * hotplug than sleeping which can cause a livelock in the direct | ||
1692 | * reclaim path. | ||
1693 | */ | ||
1694 | free_pages -= nr_zone_isolate_freepages(z); | ||
1695 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 1656 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, |
1696 | free_pages); | 1657 | free_pages); |
1697 | } | 1658 | } |
@@ -2163,8 +2124,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2163 | bool *contended_compaction, bool *deferred_compaction, | 2124 | bool *contended_compaction, bool *deferred_compaction, |
2164 | unsigned long *did_some_progress) | 2125 | unsigned long *did_some_progress) |
2165 | { | 2126 | { |
2166 | struct page *page = NULL; | ||
2167 | |||
2168 | if (!order) | 2127 | if (!order) |
2169 | return NULL; | 2128 | return NULL; |
2170 | 2129 | ||
@@ -2176,16 +2135,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2176 | current->flags |= PF_MEMALLOC; | 2135 | current->flags |= PF_MEMALLOC; |
2177 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 2136 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
2178 | nodemask, sync_migration, | 2137 | nodemask, sync_migration, |
2179 | contended_compaction, &page); | 2138 | contended_compaction); |
2180 | current->flags &= ~PF_MEMALLOC; | 2139 | current->flags &= ~PF_MEMALLOC; |
2181 | 2140 | ||
2182 | /* If compaction captured a page, prep and use it */ | ||
2183 | if (page) { | ||
2184 | prep_new_page(page, order, gfp_mask); | ||
2185 | goto got_page; | ||
2186 | } | ||
2187 | |||
2188 | if (*did_some_progress != COMPACT_SKIPPED) { | 2141 | if (*did_some_progress != COMPACT_SKIPPED) { |
2142 | struct page *page; | ||
2143 | |||
2189 | /* Page migration frees to the PCP lists but we want merging */ | 2144 | /* Page migration frees to the PCP lists but we want merging */ |
2190 | drain_pages(get_cpu()); | 2145 | drain_pages(get_cpu()); |
2191 | put_cpu(); | 2146 | put_cpu(); |
@@ -2195,7 +2150,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2195 | alloc_flags & ~ALLOC_NO_WATERMARKS, | 2150 | alloc_flags & ~ALLOC_NO_WATERMARKS, |
2196 | preferred_zone, migratetype); | 2151 | preferred_zone, migratetype); |
2197 | if (page) { | 2152 | if (page) { |
2198 | got_page: | ||
2199 | preferred_zone->compact_blockskip_flush = false; | 2153 | preferred_zone->compact_blockskip_flush = false; |
2200 | preferred_zone->compact_considered = 0; | 2154 | preferred_zone->compact_considered = 0; |
2201 | preferred_zone->compact_defer_shift = 0; | 2155 | preferred_zone->compact_defer_shift = 0; |
@@ -5631,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) | |||
5631 | pfn &= (PAGES_PER_SECTION-1); | 5585 | pfn &= (PAGES_PER_SECTION-1); |
5632 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; | 5586 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; |
5633 | #else | 5587 | #else |
5634 | pfn = pfn - zone->zone_start_pfn; | 5588 | pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); |
5635 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; | 5589 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; |
5636 | #endif /* CONFIG_SPARSEMEM */ | 5590 | #endif /* CONFIG_SPARSEMEM */ |
5637 | } | 5591 | } |