diff options
author | Michal Hocko <mhocko@suse.cz> | 2013-07-08 19:00:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 13:33:25 -0400 |
commit | 5f12733e9d976132e6cbbae9d08f71406fdacdfb (patch) | |
tree | 144cda4e157dcbedd28757796f40c2a4cb52c92f /mm | |
parent | 465939a1fa283cf2a5194362c5accf4429c99c42 (diff) |
mm: honor min_free_kbytes set by user
min_free_kbytes is updated during memory hotplug (by
init_per_zone_wmark_min) currently which is right thing to do in most
cases but this could be unexpected if admin increased the value to
prevent from allocation failures and the new min_free_kbytes would be
decreased as a result of memory hotadd.
This patch saves the user defined value and allows updating
min_free_kbytes only if it is higher than the saved one.
A warning is printed when the new value is ignored.
Signed-off-by: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b5855e545eec..b100255dedda 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -204,6 +204,7 @@ static char * const zone_names[MAX_NR_ZONES] = { | |||
204 | }; | 204 | }; |
205 | 205 | ||
206 | int min_free_kbytes = 1024; | 206 | int min_free_kbytes = 1024; |
207 | int user_min_free_kbytes; | ||
207 | 208 | ||
208 | static unsigned long __meminitdata nr_kernel_pages; | 209 | static unsigned long __meminitdata nr_kernel_pages; |
209 | static unsigned long __meminitdata nr_all_pages; | 210 | static unsigned long __meminitdata nr_all_pages; |
@@ -5589,14 +5590,21 @@ static void __meminit setup_per_zone_inactive_ratio(void) | |||
5589 | int __meminit init_per_zone_wmark_min(void) | 5590 | int __meminit init_per_zone_wmark_min(void) |
5590 | { | 5591 | { |
5591 | unsigned long lowmem_kbytes; | 5592 | unsigned long lowmem_kbytes; |
5593 | int new_min_free_kbytes; | ||
5592 | 5594 | ||
5593 | lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); | 5595 | lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); |
5594 | 5596 | new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); | |
5595 | min_free_kbytes = int_sqrt(lowmem_kbytes * 16); | 5597 | |
5596 | if (min_free_kbytes < 128) | 5598 | if (new_min_free_kbytes > user_min_free_kbytes) { |
5597 | min_free_kbytes = 128; | 5599 | min_free_kbytes = new_min_free_kbytes; |
5598 | if (min_free_kbytes > 65536) | 5600 | if (min_free_kbytes < 128) |
5599 | min_free_kbytes = 65536; | 5601 | min_free_kbytes = 128; |
5602 | if (min_free_kbytes > 65536) | ||
5603 | min_free_kbytes = 65536; | ||
5604 | } else { | ||
5605 | pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", | ||
5606 | new_min_free_kbytes, user_min_free_kbytes); | ||
5607 | } | ||
5600 | setup_per_zone_wmarks(); | 5608 | setup_per_zone_wmarks(); |
5601 | refresh_zone_stat_thresholds(); | 5609 | refresh_zone_stat_thresholds(); |
5602 | setup_per_zone_lowmem_reserve(); | 5610 | setup_per_zone_lowmem_reserve(); |
@@ -5614,8 +5622,10 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, | |||
5614 | void __user *buffer, size_t *length, loff_t *ppos) | 5622 | void __user *buffer, size_t *length, loff_t *ppos) |
5615 | { | 5623 | { |
5616 | proc_dointvec(table, write, buffer, length, ppos); | 5624 | proc_dointvec(table, write, buffer, length, ppos); |
5617 | if (write) | 5625 | if (write) { |
5626 | user_min_free_kbytes = min_free_kbytes; | ||
5618 | setup_per_zone_wmarks(); | 5627 | setup_per_zone_wmarks(); |
5628 | } | ||
5619 | return 0; | 5629 | return 0; |
5620 | } | 5630 | } |
5621 | 5631 | ||