diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2016-08-10 19:27:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-10 19:40:56 -0400 |
commit | 6423aa8192c596848e1b23bd4193dc0924e7274d (patch) | |
tree | e3e3282feeca26ae275a2a46f887d71542a06bc8 /mm | |
parent | 81cbcbc2d810c0ce49fba81f864302e1afe5ff27 (diff) |
mm/page_alloc.c: recalculate some of node threshold when on/offline memory
Some of node threshold depends on number of managed pages in the node.
When memory is going on/offline, it can be changed and we need to adjust
them.
Add recalculation to appropriate places and clean-up related functions
for better maintenance.
Link: http://lkml.kernel.org/r/1470724248-26780-2-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 50 |
1 files changed, 35 insertions, 15 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9a92718b1103..ab2c0ff8c2e6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4757,6 +4757,8 @@ int local_memory_node(int node) | |||
4757 | } | 4757 | } |
4758 | #endif | 4758 | #endif |
4759 | 4759 | ||
4760 | static void setup_min_unmapped_ratio(void); | ||
4761 | static void setup_min_slab_ratio(void); | ||
4760 | #else /* CONFIG_NUMA */ | 4762 | #else /* CONFIG_NUMA */ |
4761 | 4763 | ||
4762 | static void set_zonelist_order(void) | 4764 | static void set_zonelist_order(void) |
@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
5878 | zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; | 5880 | zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; |
5879 | #ifdef CONFIG_NUMA | 5881 | #ifdef CONFIG_NUMA |
5880 | zone->node = nid; | 5882 | zone->node = nid; |
5881 | pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio) | ||
5882 | / 100; | ||
5883 | pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100; | ||
5884 | #endif | 5883 | #endif |
5885 | zone->name = zone_names[j]; | 5884 | zone->name = zone_names[j]; |
5886 | zone->zone_pgdat = pgdat; | 5885 | zone->zone_pgdat = pgdat; |
@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void) | |||
6801 | setup_per_zone_wmarks(); | 6800 | setup_per_zone_wmarks(); |
6802 | refresh_zone_stat_thresholds(); | 6801 | refresh_zone_stat_thresholds(); |
6803 | setup_per_zone_lowmem_reserve(); | 6802 | setup_per_zone_lowmem_reserve(); |
6803 | |||
6804 | #ifdef CONFIG_NUMA | ||
6805 | setup_min_unmapped_ratio(); | ||
6806 | setup_min_slab_ratio(); | ||
6807 | #endif | ||
6808 | |||
6804 | return 0; | 6809 | return 0; |
6805 | } | 6810 | } |
6806 | core_initcall(init_per_zone_wmark_min) | 6811 | core_initcall(init_per_zone_wmark_min) |
@@ -6842,16 +6847,10 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, | |||
6842 | } | 6847 | } |
6843 | 6848 | ||
6844 | #ifdef CONFIG_NUMA | 6849 | #ifdef CONFIG_NUMA |
6845 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, | 6850 | static void setup_min_unmapped_ratio(void) |
6846 | void __user *buffer, size_t *length, loff_t *ppos) | ||
6847 | { | 6851 | { |
6848 | struct pglist_data *pgdat; | 6852 | pg_data_t *pgdat; |
6849 | struct zone *zone; | 6853 | struct zone *zone; |
6850 | int rc; | ||
6851 | |||
6852 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
6853 | if (rc) | ||
6854 | return rc; | ||
6855 | 6854 | ||
6856 | for_each_online_pgdat(pgdat) | 6855 | for_each_online_pgdat(pgdat) |
6857 | pgdat->min_unmapped_pages = 0; | 6856 | pgdat->min_unmapped_pages = 0; |
@@ -6859,26 +6858,47 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, | |||
6859 | for_each_zone(zone) | 6858 | for_each_zone(zone) |
6860 | zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * | 6859 | zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * |
6861 | sysctl_min_unmapped_ratio) / 100; | 6860 | sysctl_min_unmapped_ratio) / 100; |
6862 | return 0; | ||
6863 | } | 6861 | } |
6864 | 6862 | ||
6865 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, | 6863 | |
6864 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, | ||
6866 | void __user *buffer, size_t *length, loff_t *ppos) | 6865 | void __user *buffer, size_t *length, loff_t *ppos) |
6867 | { | 6866 | { |
6868 | struct pglist_data *pgdat; | ||
6869 | struct zone *zone; | ||
6870 | int rc; | 6867 | int rc; |
6871 | 6868 | ||
6872 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | 6869 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
6873 | if (rc) | 6870 | if (rc) |
6874 | return rc; | 6871 | return rc; |
6875 | 6872 | ||
6873 | setup_min_unmapped_ratio(); | ||
6874 | |||
6875 | return 0; | ||
6876 | } | ||
6877 | |||
6878 | static void setup_min_slab_ratio(void) | ||
6879 | { | ||
6880 | pg_data_t *pgdat; | ||
6881 | struct zone *zone; | ||
6882 | |||
6876 | for_each_online_pgdat(pgdat) | 6883 | for_each_online_pgdat(pgdat) |
6877 | pgdat->min_slab_pages = 0; | 6884 | pgdat->min_slab_pages = 0; |
6878 | 6885 | ||
6879 | for_each_zone(zone) | 6886 | for_each_zone(zone) |
6880 | zone->zone_pgdat->min_slab_pages += (zone->managed_pages * | 6887 | zone->zone_pgdat->min_slab_pages += (zone->managed_pages * |
6881 | sysctl_min_slab_ratio) / 100; | 6888 | sysctl_min_slab_ratio) / 100; |
6889 | } | ||
6890 | |||
6891 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, | ||
6892 | void __user *buffer, size_t *length, loff_t *ppos) | ||
6893 | { | ||
6894 | int rc; | ||
6895 | |||
6896 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
6897 | if (rc) | ||
6898 | return rc; | ||
6899 | |||
6900 | setup_min_slab_ratio(); | ||
6901 | |||
6882 | return 0; | 6902 | return 0; |
6883 | } | 6903 | } |
6884 | #endif | 6904 | #endif |