diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cde5dac6229a..0b9f577b1a2a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone) | |||
2170 | 2170 | ||
2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], | 2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], |
2172 | watermark_boost_factor, 10000); | 2172 | watermark_boost_factor, 10000); |
2173 | |||
2174 | /* | ||
2175 | * high watermark may be uninitialised if fragmentation occurs | ||
2176 | * very early in boot so do not boost. We do not fall | ||
2177 | * through and boost by pageblock_nr_pages as failing | ||
2178 | * allocations that early means that reclaim is not going | ||
2179 | * to help and it may even be impossible to reclaim the | ||
2180 | * boosted watermark resulting in a hang. | ||
2181 | */ | ||
2182 | if (!max_boost) | ||
2183 | return; | ||
2184 | |||
2173 | max_boost = max(pageblock_nr_pages, max_boost); | 2185 | max_boost = max(pageblock_nr_pages, max_boost); |
2174 | 2186 | ||
2175 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, | 2187 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, |
@@ -2214,7 +2226,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, | |||
2214 | */ | 2226 | */ |
2215 | boost_watermark(zone); | 2227 | boost_watermark(zone); |
2216 | if (alloc_flags & ALLOC_KSWAPD) | 2228 | if (alloc_flags & ALLOC_KSWAPD) |
2217 | wakeup_kswapd(zone, 0, 0, zone_idx(zone)); | 2229 | set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); |
2218 | 2230 | ||
2219 | /* We are not allowed to try stealing from the whole block */ | 2231 | /* We are not allowed to try stealing from the whole block */ |
2220 | if (!whole_block) | 2232 | if (!whole_block) |
@@ -3102,6 +3114,12 @@ struct page *rmqueue(struct zone *preferred_zone, | |||
3102 | local_irq_restore(flags); | 3114 | local_irq_restore(flags); |
3103 | 3115 | ||
3104 | out: | 3116 | out: |
3117 | /* Separate test+clear to avoid unnecessary atomics */ | ||
3118 | if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { | ||
3119 | clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); | ||
3120 | wakeup_kswapd(zone, 0, 0, zone_idx(zone)); | ||
3121 | } | ||
3122 | |||
3105 | VM_BUG_ON_PAGE(page && bad_range(zone, page), page); | 3123 | VM_BUG_ON_PAGE(page && bad_range(zone, page), page); |
3106 | return page; | 3124 | return page; |
3107 | 3125 | ||
@@ -4669,11 +4687,11 @@ refill: | |||
4669 | /* Even if we own the page, we do not use atomic_set(). | 4687 | /* Even if we own the page, we do not use atomic_set(). |
4670 | * This would break get_page_unless_zero() users. | 4688 | * This would break get_page_unless_zero() users. |
4671 | */ | 4689 | */ |
4672 | page_ref_add(page, size - 1); | 4690 | page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); |
4673 | 4691 | ||
4674 | /* reset page count bias and offset to start of new frag */ | 4692 | /* reset page count bias and offset to start of new frag */ |
4675 | nc->pfmemalloc = page_is_pfmemalloc(page); | 4693 | nc->pfmemalloc = page_is_pfmemalloc(page); |
4676 | nc->pagecnt_bias = size; | 4694 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4677 | nc->offset = size; | 4695 | nc->offset = size; |
4678 | } | 4696 | } |
4679 | 4697 | ||
@@ -4689,10 +4707,10 @@ refill: | |||
4689 | size = nc->size; | 4707 | size = nc->size; |
4690 | #endif | 4708 | #endif |
4691 | /* OK, page count is 0, we can safely set it */ | 4709 | /* OK, page count is 0, we can safely set it */ |
4692 | set_page_count(page, size); | 4710 | set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); |
4693 | 4711 | ||
4694 | /* reset page count bias and offset to start of new frag */ | 4712 | /* reset page count bias and offset to start of new frag */ |
4695 | nc->pagecnt_bias = size; | 4713 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4696 | offset = size - fragsz; | 4714 | offset = size - fragsz; |
4697 | } | 4715 | } |
4698 | 4716 | ||
@@ -5695,18 +5713,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
5695 | cond_resched(); | 5713 | cond_resched(); |
5696 | } | 5714 | } |
5697 | } | 5715 | } |
5698 | #ifdef CONFIG_SPARSEMEM | ||
5699 | /* | ||
5700 | * If the zone does not span the rest of the section then | ||
5701 | * we should at least initialize those pages. Otherwise we | ||
5702 | * could blow up on a poisoned page in some paths which depend | ||
5703 | * on full sections being initialized (e.g. memory hotplug). | ||
5704 | */ | ||
5705 | while (end_pfn % PAGES_PER_SECTION) { | ||
5706 | __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); | ||
5707 | end_pfn++; | ||
5708 | } | ||
5709 | #endif | ||
5710 | } | 5716 | } |
5711 | 5717 | ||
5712 | #ifdef CONFIG_ZONE_DEVICE | 5718 | #ifdef CONFIG_ZONE_DEVICE |