diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 63 |
1 files changed, 47 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5e92698e5395..cfd565dbe124 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1380,16 +1380,11 @@ void split_page(struct page *page, unsigned int order) | |||
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | /* | 1382 | /* |
1383 | * Similar to split_page except the page is already free. As this is only | 1383 | * Similar to the split_page family of functions except that the page |
1384 | * being used for migration, the migratetype of the block also changes. | 1384 | * required at the given order and being isolated now to prevent races |
1385 | * As this is called with interrupts disabled, the caller is responsible | 1385 | * with parallel allocators |
1386 | * for calling arch_alloc_page() and kernel_map_page() after interrupts | ||
1387 | * are enabled. | ||
1388 | * | ||
1389 | * Note: this is probably too low level an operation for use in drivers. | ||
1390 | * Please consult with lkml before using this in your driver. | ||
1391 | */ | 1386 | */ |
1392 | int split_free_page(struct page *page) | 1387 | int capture_free_page(struct page *page, int alloc_order, int migratetype) |
1393 | { | 1388 | { |
1394 | unsigned int order; | 1389 | unsigned int order; |
1395 | unsigned long watermark; | 1390 | unsigned long watermark; |
@@ -1411,10 +1406,11 @@ int split_free_page(struct page *page) | |||
1411 | rmv_page_order(page); | 1406 | rmv_page_order(page); |
1412 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); | 1407 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); |
1413 | 1408 | ||
1414 | /* Split into individual pages */ | 1409 | if (alloc_order != order) |
1415 | set_page_refcounted(page); | 1410 | expand(zone, page, alloc_order, order, |
1416 | split_page(page, order); | 1411 | &zone->free_area[order], migratetype); |
1417 | 1412 | ||
1413 | /* Set the pageblock if the captured page is at least a pageblock */ | ||
1418 | if (order >= pageblock_order - 1) { | 1414 | if (order >= pageblock_order - 1) { |
1419 | struct page *endpage = page + (1 << order) - 1; | 1415 | struct page *endpage = page + (1 << order) - 1; |
1420 | for (; page < endpage; page += pageblock_nr_pages) { | 1416 | for (; page < endpage; page += pageblock_nr_pages) { |
@@ -1425,7 +1421,35 @@ int split_free_page(struct page *page) | |||
1425 | } | 1421 | } |
1426 | } | 1422 | } |
1427 | 1423 | ||
1428 | return 1 << order; | 1424 | return 1UL << order; |
1425 | } | ||
1426 | |||
1427 | /* | ||
1428 | * Similar to split_page except the page is already free. As this is only | ||
1429 | * being used for migration, the migratetype of the block also changes. | ||
1430 | * As this is called with interrupts disabled, the caller is responsible | ||
1431 | * for calling arch_alloc_page() and kernel_map_page() after interrupts | ||
1432 | * are enabled. | ||
1433 | * | ||
1434 | * Note: this is probably too low level an operation for use in drivers. | ||
1435 | * Please consult with lkml before using this in your driver. | ||
1436 | */ | ||
1437 | int split_free_page(struct page *page) | ||
1438 | { | ||
1439 | unsigned int order; | ||
1440 | int nr_pages; | ||
1441 | |||
1442 | BUG_ON(!PageBuddy(page)); | ||
1443 | order = page_order(page); | ||
1444 | |||
1445 | nr_pages = capture_free_page(page, order, 0); | ||
1446 | if (!nr_pages) | ||
1447 | return 0; | ||
1448 | |||
1449 | /* Split into individual pages */ | ||
1450 | set_page_refcounted(page); | ||
1451 | split_page(page, order); | ||
1452 | return nr_pages; | ||
1429 | } | 1453 | } |
1430 | 1454 | ||
1431 | /* | 1455 | /* |
@@ -2105,7 +2129,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2105 | bool *contended_compaction, bool *deferred_compaction, | 2129 | bool *contended_compaction, bool *deferred_compaction, |
2106 | unsigned long *did_some_progress) | 2130 | unsigned long *did_some_progress) |
2107 | { | 2131 | { |
2108 | struct page *page; | 2132 | struct page *page = NULL; |
2109 | 2133 | ||
2110 | if (!order) | 2134 | if (!order) |
2111 | return NULL; | 2135 | return NULL; |
@@ -2118,10 +2142,16 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2118 | current->flags |= PF_MEMALLOC; | 2142 | current->flags |= PF_MEMALLOC; |
2119 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 2143 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
2120 | nodemask, sync_migration, | 2144 | nodemask, sync_migration, |
2121 | contended_compaction); | 2145 | contended_compaction, &page); |
2122 | current->flags &= ~PF_MEMALLOC; | 2146 | current->flags &= ~PF_MEMALLOC; |
2123 | if (*did_some_progress != COMPACT_SKIPPED) { | ||
2124 | 2147 | ||
2148 | /* If compaction captured a page, prep and use it */ | ||
2149 | if (page) { | ||
2150 | prep_new_page(page, order, gfp_mask); | ||
2151 | goto got_page; | ||
2152 | } | ||
2153 | |||
2154 | if (*did_some_progress != COMPACT_SKIPPED) { | ||
2125 | /* Page migration frees to the PCP lists but we want merging */ | 2155 | /* Page migration frees to the PCP lists but we want merging */ |
2126 | drain_pages(get_cpu()); | 2156 | drain_pages(get_cpu()); |
2127 | put_cpu(); | 2157 | put_cpu(); |
@@ -2131,6 +2161,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2131 | alloc_flags & ~ALLOC_NO_WATERMARKS, | 2161 | alloc_flags & ~ALLOC_NO_WATERMARKS, |
2132 | preferred_zone, migratetype); | 2162 | preferred_zone, migratetype); |
2133 | if (page) { | 2163 | if (page) { |
2164 | got_page: | ||
2134 | preferred_zone->compact_considered = 0; | 2165 | preferred_zone->compact_considered = 0; |
2135 | preferred_zone->compact_defer_shift = 0; | 2166 | preferred_zone->compact_defer_shift = 0; |
2136 | if (order >= preferred_zone->compact_order_failed) | 2167 | if (order >= preferred_zone->compact_order_failed) |