summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Yang <richardw.yang@linux.intel.com>2019-09-23 18:35:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:09 -0400
commit33fce0113da2f8a5c1bbce0c46a8b131500f1677 (patch)
treedff6b88fa98c4478dc2480f64c1764838d9ada14
parentb6c88d3b9d38f9448e0fcf44847a075ea81d5ca2 (diff)
mm/memory_hotplug.c: prevent memory leak when reusing pgdat
When offlining a node in try_offline_node(), pgdat is not released. So that pgdat could be reused in hotadd_new_pgdat(). While we reallocate pgdat->per_cpu_nodestats if this pgdat is reused. This patch prevents the memory leak by just allocating per_cpu_nodestats when it is a new pgdat. Link: http://lkml.kernel.org/r/20190813020608.10194-1-richardw.yang@linux.intel.com Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Oscar Salvador <OSalvador@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memory_hotplug.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 3706a137d880..c28e5dd017ba 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -925,8 +925,11 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
925 if (!pgdat) 925 if (!pgdat)
926 return NULL; 926 return NULL;
927 927
928 pgdat->per_cpu_nodestats =
929 alloc_percpu(struct per_cpu_nodestat);
928 arch_refresh_nodedata(nid, pgdat); 930 arch_refresh_nodedata(nid, pgdat);
929 } else { 931 } else {
932 int cpu;
930 /* 933 /*
931 * Reset the nr_zones, order and classzone_idx before reuse. 934 * Reset the nr_zones, order and classzone_idx before reuse.
932 * Note that kswapd will init kswapd_classzone_idx properly 935 * Note that kswapd will init kswapd_classzone_idx properly
@@ -935,6 +938,12 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
935 pgdat->nr_zones = 0; 938 pgdat->nr_zones = 0;
936 pgdat->kswapd_order = 0; 939 pgdat->kswapd_order = 0;
937 pgdat->kswapd_classzone_idx = 0; 940 pgdat->kswapd_classzone_idx = 0;
941 for_each_online_cpu(cpu) {
942 struct per_cpu_nodestat *p;
943
944 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
945 memset(p, 0, sizeof(*p));
946 }
938 } 947 }
939 948
940 /* we can use NODE_DATA(nid) from here */ 949 /* we can use NODE_DATA(nid) from here */
@@ -944,7 +953,6 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
944 953
945 /* init node's zones as empty zones, we don't have any present pages.*/ 954 /* init node's zones as empty zones, we don't have any present pages.*/
946 free_area_init_core_hotplug(nid); 955 free_area_init_core_hotplug(nid);
947 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
948 956
949 /* 957 /*
950 * The node we allocated has no zone fallback lists. For avoiding 958 * The node we allocated has no zone fallback lists. For avoiding