aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorzijun_hu <zijun_hu@htc.com>2016-10-05 09:30:24 -0400
committerTejun Heo <tj@kernel.org>2016-10-05 11:52:55 -0400
commit9b7396624a7b503220d85428654634b60762f2b0 (patch)
treefde5873a7ebf27bd57db430f3077d7e323eadd5f /mm
parent93c76b6b2faaad7bfbc0cda840763aa4819ef26e (diff)
mm/percpu.c: fix potential memory leakage for pcpu_embed_first_chunk()
in order to ensure the percpu group areas within a chunk aren't distributed too sparsely, pcpu_embed_first_chunk() goes to error handling path when a chunk spans over 3/4 VMALLOC area, however, during the error handling, it forget to free the memory allocated for all percpu groups by going to label @out_free other than @out_free_areas. it will cause memory leakage issue if the rare scene really happens, in order to fix the issue, we check chunk spanned area immediately after completing memory allocation for all percpu groups, we go to label @out_free_areas to free the memory then return if the checking is failed. in order to verify the approach, we dump all memory allocated then enforce the jump then dump all memory freed, the result is okay after checking whether we free all memory we allocate in this function. BTW, The approach is chosen after thinking over the below scenes - we don't go to label @out_free directly to fix this issue since we maybe free several allocated memory blocks twice - the aim of jumping after pcpu_setup_first_chunk() is bypassing free usable memory other than handling error, moreover, the function does not return error code in any case, it either panics due to BUG_ON() or return 0. Signed-off-by: zijun_hu <zijun_hu@htc.com> Tested-by: zijun_hu <zijun_hu@htc.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index e2737e56b017..255714302394 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1963,7 +1963,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1963 struct pcpu_alloc_info *ai; 1963 struct pcpu_alloc_info *ai;
1964 size_t size_sum, areas_size; 1964 size_t size_sum, areas_size;
1965 unsigned long max_distance; 1965 unsigned long max_distance;
1966 int group, i, rc; 1966 int group, i, highest_group, rc;
1967 1967
1968 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1968 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1969 cpu_distance_fn); 1969 cpu_distance_fn);
@@ -1979,7 +1979,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1979 goto out_free; 1979 goto out_free;
1980 } 1980 }
1981 1981
1982 /* allocate, copy and determine base address */ 1982 /* allocate, copy and determine base address & max_distance */
1983 highest_group = 0;
1983 for (group = 0; group < ai->nr_groups; group++) { 1984 for (group = 0; group < ai->nr_groups; group++) {
1984 struct pcpu_group_info *gi = &ai->groups[group]; 1985 struct pcpu_group_info *gi = &ai->groups[group];
1985 unsigned int cpu = NR_CPUS; 1986 unsigned int cpu = NR_CPUS;
@@ -2000,6 +2001,21 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2000 areas[group] = ptr; 2001 areas[group] = ptr;
2001 2002
2002 base = min(ptr, base); 2003 base = min(ptr, base);
2004 if (ptr > areas[highest_group])
2005 highest_group = group;
2006 }
2007 max_distance = areas[highest_group] - base;
2008 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2009
2010 /* warn if maximum distance is further than 75% of vmalloc space */
2011 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2012 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2013 max_distance, VMALLOC_TOTAL);
2014#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2015 /* and fail if we have fallback */
2016 rc = -EINVAL;
2017 goto out_free_areas;
2018#endif
2003 } 2019 }
2004 2020
2005 /* 2021 /*
@@ -2024,24 +2040,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2024 } 2040 }
2025 2041
2026 /* base address is now known, determine group base offsets */ 2042 /* base address is now known, determine group base offsets */
2027 i = 0;
2028 for (group = 0; group < ai->nr_groups; group++) { 2043 for (group = 0; group < ai->nr_groups; group++) {
2029 ai->groups[group].base_offset = areas[group] - base; 2044 ai->groups[group].base_offset = areas[group] - base;
2030 if (areas[group] > areas[i])
2031 i = group;
2032 }
2033 max_distance = ai->groups[i].base_offset +
2034 ai->unit_size * ai->groups[i].nr_units;
2035
2036 /* warn if maximum distance is further than 75% of vmalloc space */
2037 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2038 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2039 max_distance, VMALLOC_TOTAL);
2040#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2041 /* and fail if we have fallback */
2042 rc = -EINVAL;
2043 goto out_free;
2044#endif
2045 } 2045 }
2046 2046
2047 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2047 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",