diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 56 |
1 files changed, 40 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f12ad1836abe..985e072a3dd9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -103,19 +103,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; | |||
103 | * only be modified with pm_mutex held, unless the suspend/hibernate code is | 103 | * only be modified with pm_mutex held, unless the suspend/hibernate code is |
104 | * guaranteed not to run in parallel with that modification). | 104 | * guaranteed not to run in parallel with that modification). |
105 | */ | 105 | */ |
106 | void set_gfp_allowed_mask(gfp_t mask) | 106 | |
107 | static gfp_t saved_gfp_mask; | ||
108 | |||
109 | void pm_restore_gfp_mask(void) | ||
107 | { | 110 | { |
108 | WARN_ON(!mutex_is_locked(&pm_mutex)); | 111 | WARN_ON(!mutex_is_locked(&pm_mutex)); |
109 | gfp_allowed_mask = mask; | 112 | if (saved_gfp_mask) { |
113 | gfp_allowed_mask = saved_gfp_mask; | ||
114 | saved_gfp_mask = 0; | ||
115 | } | ||
110 | } | 116 | } |
111 | 117 | ||
112 | gfp_t clear_gfp_allowed_mask(gfp_t mask) | 118 | void pm_restrict_gfp_mask(void) |
113 | { | 119 | { |
114 | gfp_t ret = gfp_allowed_mask; | ||
115 | |||
116 | WARN_ON(!mutex_is_locked(&pm_mutex)); | 120 | WARN_ON(!mutex_is_locked(&pm_mutex)); |
117 | gfp_allowed_mask &= ~mask; | 121 | WARN_ON(saved_gfp_mask); |
118 | return ret; | 122 | saved_gfp_mask = gfp_allowed_mask; |
123 | gfp_allowed_mask &= ~GFP_IOFS; | ||
119 | } | 124 | } |
120 | #endif /* CONFIG_PM_SLEEP */ | 125 | #endif /* CONFIG_PM_SLEEP */ |
121 | 126 | ||
@@ -530,7 +535,7 @@ static inline void __free_one_page(struct page *page, | |||
530 | * so it's less likely to be used soon and more likely to be merged | 535 | * so it's less likely to be used soon and more likely to be merged |
531 | * as a higher order page | 536 | * as a higher order page |
532 | */ | 537 | */ |
533 | if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) { | 538 | if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { |
534 | struct page *higher_page, *higher_buddy; | 539 | struct page *higher_page, *higher_buddy; |
535 | combined_idx = __find_combined_index(page_idx, order); | 540 | combined_idx = __find_combined_index(page_idx, order); |
536 | higher_page = page + combined_idx - page_idx; | 541 | higher_page = page + combined_idx - page_idx; |
@@ -1454,24 +1459,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) | |||
1454 | #endif /* CONFIG_FAIL_PAGE_ALLOC */ | 1459 | #endif /* CONFIG_FAIL_PAGE_ALLOC */ |
1455 | 1460 | ||
1456 | /* | 1461 | /* |
1457 | * Return 1 if free pages are above 'mark'. This takes into account the order | 1462 | * Return true if free pages are above 'mark'. This takes into account the order |
1458 | * of the allocation. | 1463 | * of the allocation. |
1459 | */ | 1464 | */ |
1460 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 1465 | static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
1461 | int classzone_idx, int alloc_flags) | 1466 | int classzone_idx, int alloc_flags, long free_pages) |
1462 | { | 1467 | { |
1463 | /* free_pages my go negative - that's OK */ | 1468 | /* free_pages my go negative - that's OK */ |
1464 | long min = mark; | 1469 | long min = mark; |
1465 | long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; | ||
1466 | int o; | 1470 | int o; |
1467 | 1471 | ||
1472 | free_pages -= (1 << order) + 1; | ||
1468 | if (alloc_flags & ALLOC_HIGH) | 1473 | if (alloc_flags & ALLOC_HIGH) |
1469 | min -= min / 2; | 1474 | min -= min / 2; |
1470 | if (alloc_flags & ALLOC_HARDER) | 1475 | if (alloc_flags & ALLOC_HARDER) |
1471 | min -= min / 4; | 1476 | min -= min / 4; |
1472 | 1477 | ||
1473 | if (free_pages <= min + z->lowmem_reserve[classzone_idx]) | 1478 | if (free_pages <= min + z->lowmem_reserve[classzone_idx]) |
1474 | return 0; | 1479 | return false; |
1475 | for (o = 0; o < order; o++) { | 1480 | for (o = 0; o < order; o++) { |
1476 | /* At the next order, this order's pages become unavailable */ | 1481 | /* At the next order, this order's pages become unavailable */ |
1477 | free_pages -= z->free_area[o].nr_free << o; | 1482 | free_pages -= z->free_area[o].nr_free << o; |
@@ -1480,9 +1485,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1480 | min >>= 1; | 1485 | min >>= 1; |
1481 | 1486 | ||
1482 | if (free_pages <= min) | 1487 | if (free_pages <= min) |
1483 | return 0; | 1488 | return false; |
1484 | } | 1489 | } |
1485 | return 1; | 1490 | return true; |
1491 | } | ||
1492 | |||
1493 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | ||
1494 | int classzone_idx, int alloc_flags) | ||
1495 | { | ||
1496 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | ||
1497 | zone_page_state(z, NR_FREE_PAGES)); | ||
1498 | } | ||
1499 | |||
1500 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | ||
1501 | int classzone_idx, int alloc_flags) | ||
1502 | { | ||
1503 | long free_pages = zone_page_state(z, NR_FREE_PAGES); | ||
1504 | |||
1505 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) | ||
1506 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); | ||
1507 | |||
1508 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | ||
1509 | free_pages); | ||
1486 | } | 1510 | } |
1487 | 1511 | ||
1488 | #ifdef CONFIG_NUMA | 1512 | #ifdef CONFIG_NUMA |
@@ -2436,7 +2460,7 @@ void show_free_areas(void) | |||
2436 | " all_unreclaimable? %s" | 2460 | " all_unreclaimable? %s" |
2437 | "\n", | 2461 | "\n", |
2438 | zone->name, | 2462 | zone->name, |
2439 | K(zone_nr_free_pages(zone)), | 2463 | K(zone_page_state(zone, NR_FREE_PAGES)), |
2440 | K(min_wmark_pages(zone)), | 2464 | K(min_wmark_pages(zone)), |
2441 | K(low_wmark_pages(zone)), | 2465 | K(low_wmark_pages(zone)), |
2442 | K(high_wmark_pages(zone)), | 2466 | K(high_wmark_pages(zone)), |