diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 99 |
1 files changed, 72 insertions, 27 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2a362c52fdf4..07a654486f75 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -531,7 +531,7 @@ static inline void __free_one_page(struct page *page, | |||
531 | * so it's less likely to be used soon and more likely to be merged | 531 | * so it's less likely to be used soon and more likely to be merged |
532 | * as a higher order page | 532 | * as a higher order page |
533 | */ | 533 | */ |
534 | if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) { | 534 | if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { |
535 | struct page *higher_page, *higher_buddy; | 535 | struct page *higher_page, *higher_buddy; |
536 | combined_idx = __find_combined_index(page_idx, order); | 536 | combined_idx = __find_combined_index(page_idx, order); |
537 | higher_page = page + combined_idx - page_idx; | 537 | higher_page = page + combined_idx - page_idx; |
@@ -1907,7 +1907,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, | |||
1907 | preferred_zone, migratetype); | 1907 | preferred_zone, migratetype); |
1908 | 1908 | ||
1909 | if (!page && gfp_mask & __GFP_NOFAIL) | 1909 | if (!page && gfp_mask & __GFP_NOFAIL) |
1910 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 1910 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
1911 | } while (!page && (gfp_mask & __GFP_NOFAIL)); | 1911 | } while (!page && (gfp_mask & __GFP_NOFAIL)); |
1912 | 1912 | ||
1913 | return page; | 1913 | return page; |
@@ -1932,7 +1932,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
1932 | const gfp_t wait = gfp_mask & __GFP_WAIT; | 1932 | const gfp_t wait = gfp_mask & __GFP_WAIT; |
1933 | 1933 | ||
1934 | /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ | 1934 | /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ |
1935 | BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH); | 1935 | BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); |
1936 | 1936 | ||
1937 | /* | 1937 | /* |
1938 | * The caller may dip into page reserves a bit more if the caller | 1938 | * The caller may dip into page reserves a bit more if the caller |
@@ -1940,7 +1940,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
1940 | * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will | 1940 | * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will |
1941 | * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). | 1941 | * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). |
1942 | */ | 1942 | */ |
1943 | alloc_flags |= (gfp_mask & __GFP_HIGH); | 1943 | alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); |
1944 | 1944 | ||
1945 | if (!wait) { | 1945 | if (!wait) { |
1946 | alloc_flags |= ALLOC_HARDER; | 1946 | alloc_flags |= ALLOC_HARDER; |
@@ -2095,7 +2095,7 @@ rebalance: | |||
2095 | pages_reclaimed += did_some_progress; | 2095 | pages_reclaimed += did_some_progress; |
2096 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { | 2096 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { |
2097 | /* Wait for some write requests to complete then retry */ | 2097 | /* Wait for some write requests to complete then retry */ |
2098 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 2098 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
2099 | goto rebalance; | 2099 | goto rebalance; |
2100 | } | 2100 | } |
2101 | 2101 | ||
@@ -5297,12 +5297,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
5297 | * page allocater never alloc memory from ISOLATE block. | 5297 | * page allocater never alloc memory from ISOLATE block. |
5298 | */ | 5298 | */ |
5299 | 5299 | ||
5300 | static int | ||
5301 | __count_immobile_pages(struct zone *zone, struct page *page, int count) | ||
5302 | { | ||
5303 | unsigned long pfn, iter, found; | ||
5304 | /* | ||
5305 | * For avoiding noise data, lru_add_drain_all() should be called | ||
5306 | * If ZONE_MOVABLE, the zone never contains immobile pages | ||
5307 | */ | ||
5308 | if (zone_idx(zone) == ZONE_MOVABLE) | ||
5309 | return true; | ||
5310 | |||
5311 | if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) | ||
5312 | return true; | ||
5313 | |||
5314 | pfn = page_to_pfn(page); | ||
5315 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { | ||
5316 | unsigned long check = pfn + iter; | ||
5317 | |||
5318 | if (!pfn_valid_within(check)) { | ||
5319 | iter++; | ||
5320 | continue; | ||
5321 | } | ||
5322 | page = pfn_to_page(check); | ||
5323 | if (!page_count(page)) { | ||
5324 | if (PageBuddy(page)) | ||
5325 | iter += (1 << page_order(page)) - 1; | ||
5326 | continue; | ||
5327 | } | ||
5328 | if (!PageLRU(page)) | ||
5329 | found++; | ||
5330 | /* | ||
5331 | * If there are RECLAIMABLE pages, we need to check it. | ||
5332 | * But now, memory offline itself doesn't call shrink_slab() | ||
5333 | * and it still to be fixed. | ||
5334 | */ | ||
5335 | /* | ||
5336 | * If the page is not RAM, page_count()should be 0. | ||
5337 | * we don't need more check. This is an _used_ not-movable page. | ||
5338 | * | ||
5339 | * The problematic thing here is PG_reserved pages. PG_reserved | ||
5340 | * is set to both of a memory hole page and a _used_ kernel | ||
5341 | * page at boot. | ||
5342 | */ | ||
5343 | if (found > count) | ||
5344 | return false; | ||
5345 | } | ||
5346 | return true; | ||
5347 | } | ||
5348 | |||
5349 | bool is_pageblock_removable_nolock(struct page *page) | ||
5350 | { | ||
5351 | struct zone *zone = page_zone(page); | ||
5352 | return __count_immobile_pages(zone, page, 0); | ||
5353 | } | ||
5354 | |||
5300 | int set_migratetype_isolate(struct page *page) | 5355 | int set_migratetype_isolate(struct page *page) |
5301 | { | 5356 | { |
5302 | struct zone *zone; | 5357 | struct zone *zone; |
5303 | struct page *curr_page; | 5358 | unsigned long flags, pfn; |
5304 | unsigned long flags, pfn, iter; | ||
5305 | unsigned long immobile = 0; | ||
5306 | struct memory_isolate_notify arg; | 5359 | struct memory_isolate_notify arg; |
5307 | int notifier_ret; | 5360 | int notifier_ret; |
5308 | int ret = -EBUSY; | 5361 | int ret = -EBUSY; |
@@ -5312,11 +5365,6 @@ int set_migratetype_isolate(struct page *page) | |||
5312 | zone_idx = zone_idx(zone); | 5365 | zone_idx = zone_idx(zone); |
5313 | 5366 | ||
5314 | spin_lock_irqsave(&zone->lock, flags); | 5367 | spin_lock_irqsave(&zone->lock, flags); |
5315 | if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE || | ||
5316 | zone_idx == ZONE_MOVABLE) { | ||
5317 | ret = 0; | ||
5318 | goto out; | ||
5319 | } | ||
5320 | 5368 | ||
5321 | pfn = page_to_pfn(page); | 5369 | pfn = page_to_pfn(page); |
5322 | arg.start_pfn = pfn; | 5370 | arg.start_pfn = pfn; |
@@ -5336,23 +5384,20 @@ int set_migratetype_isolate(struct page *page) | |||
5336 | */ | 5384 | */ |
5337 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); | 5385 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); |
5338 | notifier_ret = notifier_to_errno(notifier_ret); | 5386 | notifier_ret = notifier_to_errno(notifier_ret); |
5339 | if (notifier_ret || !arg.pages_found) | 5387 | if (notifier_ret) |
5340 | goto out; | 5388 | goto out; |
5341 | 5389 | /* | |
5342 | for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) { | 5390 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
5343 | if (!pfn_valid_within(pfn)) | 5391 | * We just check MOVABLE pages. |
5344 | continue; | 5392 | */ |
5345 | 5393 | if (__count_immobile_pages(zone, page, arg.pages_found)) | |
5346 | curr_page = pfn_to_page(iter); | ||
5347 | if (!page_count(curr_page) || PageLRU(curr_page)) | ||
5348 | continue; | ||
5349 | |||
5350 | immobile++; | ||
5351 | } | ||
5352 | |||
5353 | if (arg.pages_found == immobile) | ||
5354 | ret = 0; | 5394 | ret = 0; |
5355 | 5395 | ||
5396 | /* | ||
5397 | * immobile means "not-on-lru" paes. If immobile is larger than | ||
5398 | * removable-by-driver pages reported by notifier, we'll fail. | ||
5399 | */ | ||
5400 | |||
5356 | out: | 5401 | out: |
5357 | if (!ret) { | 5402 | if (!ret) { |
5358 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | 5403 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |