diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 43 |
1 files changed, 41 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 32b3e121a388..80373eca453d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -262,6 +262,7 @@ compound_page_dtor * const compound_page_dtors[] = { | |||
262 | 262 | ||
263 | int min_free_kbytes = 1024; | 263 | int min_free_kbytes = 1024; |
264 | int user_min_free_kbytes = -1; | 264 | int user_min_free_kbytes = -1; |
265 | int watermark_boost_factor __read_mostly = 15000; | ||
265 | int watermark_scale_factor = 10; | 266 | int watermark_scale_factor = 10; |
266 | 267 | ||
267 | static unsigned long nr_kernel_pages __meminitdata; | 268 | static unsigned long nr_kernel_pages __meminitdata; |
@@ -2129,6 +2130,21 @@ static bool can_steal_fallback(unsigned int order, int start_mt) | |||
2129 | return false; | 2130 | return false; |
2130 | } | 2131 | } |
2131 | 2132 | ||
2133 | static inline void boost_watermark(struct zone *zone) | ||
2134 | { | ||
2135 | unsigned long max_boost; | ||
2136 | |||
2137 | if (!watermark_boost_factor) | ||
2138 | return; | ||
2139 | |||
2140 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], | ||
2141 | watermark_boost_factor, 10000); | ||
2142 | max_boost = max(pageblock_nr_pages, max_boost); | ||
2143 | |||
2144 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, | ||
2145 | max_boost); | ||
2146 | } | ||
2147 | |||
2132 | /* | 2148 | /* |
2133 | * This function implements actual steal behaviour. If order is large enough, | 2149 | * This function implements actual steal behaviour. If order is large enough, |
2134 | * we can steal whole pageblock. If not, we first move freepages in this | 2150 | * we can steal whole pageblock. If not, we first move freepages in this |
@@ -2138,7 +2154,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt) | |||
2138 | * itself, so pages freed in the future will be put on the correct free list. | 2154 | * itself, so pages freed in the future will be put on the correct free list. |
2139 | */ | 2155 | */ |
2140 | static void steal_suitable_fallback(struct zone *zone, struct page *page, | 2156 | static void steal_suitable_fallback(struct zone *zone, struct page *page, |
2141 | int start_type, bool whole_block) | 2157 | unsigned int alloc_flags, int start_type, bool whole_block) |
2142 | { | 2158 | { |
2143 | unsigned int current_order = page_order(page); | 2159 | unsigned int current_order = page_order(page); |
2144 | struct free_area *area; | 2160 | struct free_area *area; |
@@ -2160,6 +2176,15 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, | |||
2160 | goto single_page; | 2176 | goto single_page; |
2161 | } | 2177 | } |
2162 | 2178 | ||
2179 | /* | ||
2180 | * Boost watermarks to increase reclaim pressure to reduce the | ||
2181 | * likelihood of future fallbacks. Wake kswapd now as the node | ||
2182 | * may be balanced overall and kswapd will not wake naturally. | ||
2183 | */ | ||
2184 | boost_watermark(zone); | ||
2185 | if (alloc_flags & ALLOC_KSWAPD) | ||
2186 | wakeup_kswapd(zone, 0, 0, zone_idx(zone)); | ||
2187 | |||
2163 | /* We are not allowed to try stealing from the whole block */ | 2188 | /* We are not allowed to try stealing from the whole block */ |
2164 | if (!whole_block) | 2189 | if (!whole_block) |
2165 | goto single_page; | 2190 | goto single_page; |
@@ -2443,7 +2468,8 @@ do_steal: | |||
2443 | page = list_first_entry(&area->free_list[fallback_mt], | 2468 | page = list_first_entry(&area->free_list[fallback_mt], |
2444 | struct page, lru); | 2469 | struct page, lru); |
2445 | 2470 | ||
2446 | steal_suitable_fallback(zone, page, start_migratetype, can_steal); | 2471 | steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, |
2472 | can_steal); | ||
2447 | 2473 | ||
2448 | trace_mm_page_alloc_extfrag(page, order, current_order, | 2474 | trace_mm_page_alloc_extfrag(page, order, current_order, |
2449 | start_migratetype, fallback_mt); | 2475 | start_migratetype, fallback_mt); |
@@ -7454,6 +7480,7 @@ static void __setup_per_zone_wmarks(void) | |||
7454 | 7480 | ||
7455 | zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; | 7481 | zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; |
7456 | zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; | 7482 | zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; |
7483 | zone->watermark_boost = 0; | ||
7457 | 7484 | ||
7458 | spin_unlock_irqrestore(&zone->lock, flags); | 7485 | spin_unlock_irqrestore(&zone->lock, flags); |
7459 | } | 7486 | } |
@@ -7554,6 +7581,18 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, | |||
7554 | return 0; | 7581 | return 0; |
7555 | } | 7582 | } |
7556 | 7583 | ||
7584 | int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write, | ||
7585 | void __user *buffer, size_t *length, loff_t *ppos) | ||
7586 | { | ||
7587 | int rc; | ||
7588 | |||
7589 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
7590 | if (rc) | ||
7591 | return rc; | ||
7592 | |||
7593 | return 0; | ||
7594 | } | ||
7595 | |||
7557 | int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, | 7596 | int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, |
7558 | void __user *buffer, size_t *length, loff_t *ppos) | 7597 | void __user *buffer, size_t *length, loff_t *ppos) |
7559 | { | 7598 | { |