summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:46:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita5f5f91da6ad647fb0cc7fce0e17343c0d1c5a9a (patch)
tree0249dc4f9dd74daebc1d9aceac95992368834386 /mm/page_alloc.c
parent52e9f87ae8be96a863e44c7d8d7f482fb279dddd (diff)
mm: convert zone_reclaim to node_reclaim
As reclaim is now per-node based, convert zone_reclaim to be node_reclaim. It is possible that a node will be reclaimed multiple times if it has multiple zones but this is unavoidable without caching all nodes traversed so far. The documentation and interface to userspace is the same from a configuration perspective and will will be similar in behaviour unless the node-local allocation requests were also limited to lower zones. Link: http://lkml.kernel.org/r/1467970510-21195-24-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f2c56a13b065..c9d1720c58a3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2942,16 +2942,16 @@ zonelist_scan:
2942 if (alloc_flags & ALLOC_NO_WATERMARKS) 2942 if (alloc_flags & ALLOC_NO_WATERMARKS)
2943 goto try_this_zone; 2943 goto try_this_zone;
2944 2944
2945 if (zone_reclaim_mode == 0 || 2945 if (node_reclaim_mode == 0 ||
2946 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 2946 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
2947 continue; 2947 continue;
2948 2948
2949 ret = zone_reclaim(zone, gfp_mask, order); 2949 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
2950 switch (ret) { 2950 switch (ret) {
2951 case ZONE_RECLAIM_NOSCAN: 2951 case NODE_RECLAIM_NOSCAN:
2952 /* did not scan */ 2952 /* did not scan */
2953 continue; 2953 continue;
2954 case ZONE_RECLAIM_FULL: 2954 case NODE_RECLAIM_FULL:
2955 /* scanned but unreclaimable */ 2955 /* scanned but unreclaimable */
2956 continue; 2956 continue;
2957 default: 2957 default:
@@ -5948,9 +5948,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5948 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5948 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
5949#ifdef CONFIG_NUMA 5949#ifdef CONFIG_NUMA
5950 zone->node = nid; 5950 zone->node = nid;
5951 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 5951 pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
5952 / 100; 5952 / 100;
5953 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 5953 pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
5954#endif 5954#endif
5955 zone->name = zone_names[j]; 5955 zone->name = zone_names[j];
5956 zone->zone_pgdat = pgdat; 5956 zone->zone_pgdat = pgdat;
@@ -6922,6 +6922,7 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6922int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6922int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6923 void __user *buffer, size_t *length, loff_t *ppos) 6923 void __user *buffer, size_t *length, loff_t *ppos)
6924{ 6924{
6925 struct pglist_data *pgdat;
6925 struct zone *zone; 6926 struct zone *zone;
6926 int rc; 6927 int rc;
6927 6928
@@ -6929,8 +6930,11 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6929 if (rc) 6930 if (rc)
6930 return rc; 6931 return rc;
6931 6932
6933 for_each_online_pgdat(pgdat)
6934 pgdat->min_slab_pages = 0;
6935
6932 for_each_zone(zone) 6936 for_each_zone(zone)
6933 zone->min_unmapped_pages = (zone->managed_pages * 6937 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
6934 sysctl_min_unmapped_ratio) / 100; 6938 sysctl_min_unmapped_ratio) / 100;
6935 return 0; 6939 return 0;
6936} 6940}
@@ -6938,6 +6942,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6938int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6942int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6939 void __user *buffer, size_t *length, loff_t *ppos) 6943 void __user *buffer, size_t *length, loff_t *ppos)
6940{ 6944{
6945 struct pglist_data *pgdat;
6941 struct zone *zone; 6946 struct zone *zone;
6942 int rc; 6947 int rc;
6943 6948
@@ -6945,8 +6950,11 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6945 if (rc) 6950 if (rc)
6946 return rc; 6951 return rc;
6947 6952
6953 for_each_online_pgdat(pgdat)
6954 pgdat->min_slab_pages = 0;
6955
6948 for_each_zone(zone) 6956 for_each_zone(zone)
6949 zone->min_slab_pages = (zone->managed_pages * 6957 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
6950 sysctl_min_slab_ratio) / 100; 6958 sysctl_min_slab_ratio) / 100;
6951 return 0; 6959 return 0;
6952} 6960}