diff options
author | Gavin Shan <shangw@linux.vnet.ibm.com> | 2012-05-29 18:06:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:24 -0400 |
commit | 181eb39425f2b9275afcb015eaa547d11f71a02f (patch) | |
tree | 0ce0f09b5a86931e580a0a6b88e546831c7318ed /mm/memblock.c | |
parent | 4e2f07750d9a94e8f23e86408df5ab95be88bf11 (diff) |
mm/memblock: fix memory leak on extending regions
The overall memblock has been organized into the memory regions and
reserved regions. Initially, the memory regions and reserved regions are
stored in the predetermined arrays of "struct memblock _region". It's
possible for the arrays to be enlarged when we have newly added regions,
but no free space left there. The policy here is to create double-sized
array either by slab allocator or memblock allocator. Unfortunately, we
didn't free the old array, which might be allocated through slab allocator
before. That would cause memory leak.
The patch introduces 2 variables to trace where (slab or memblock) the
memory and reserved regions come from. The memory for the memory or
reserved regions will be deallocated by kfree() if that was allocated by
slab allocator. Thus to fix the memory leak issue.
Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index eae06ea3aa50..952123eba433 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = { | |||
37 | 37 | ||
38 | int memblock_debug __initdata_memblock; | 38 | int memblock_debug __initdata_memblock; |
39 | static int memblock_can_resize __initdata_memblock; | 39 | static int memblock_can_resize __initdata_memblock; |
40 | static int memblock_memory_in_slab __initdata_memblock = 0; | ||
41 | static int memblock_reserved_in_slab __initdata_memblock = 0; | ||
40 | 42 | ||
41 | /* inline so we don't get a warning when pr_debug is compiled out */ | 43 | /* inline so we don't get a warning when pr_debug is compiled out */ |
42 | static inline const char *memblock_type_name(struct memblock_type *type) | 44 | static inline const char *memblock_type_name(struct memblock_type *type) |
@@ -187,6 +189,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
187 | struct memblock_region *new_array, *old_array; | 189 | struct memblock_region *new_array, *old_array; |
188 | phys_addr_t old_size, new_size, addr; | 190 | phys_addr_t old_size, new_size, addr; |
189 | int use_slab = slab_is_available(); | 191 | int use_slab = slab_is_available(); |
192 | int *in_slab; | ||
190 | 193 | ||
191 | /* We don't allow resizing until we know about the reserved regions | 194 | /* We don't allow resizing until we know about the reserved regions |
192 | * of memory that aren't suitable for allocation | 195 | * of memory that aren't suitable for allocation |
@@ -198,6 +201,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
198 | old_size = type->max * sizeof(struct memblock_region); | 201 | old_size = type->max * sizeof(struct memblock_region); |
199 | new_size = old_size << 1; | 202 | new_size = old_size << 1; |
200 | 203 | ||
204 | /* Retrieve the slab flag */ | ||
205 | if (type == &memblock.memory) | ||
206 | in_slab = &memblock_memory_in_slab; | ||
207 | else | ||
208 | in_slab = &memblock_reserved_in_slab; | ||
209 | |||
201 | /* Try to find some space for it. | 210 | /* Try to find some space for it. |
202 | * | 211 | * |
203 | * WARNING: We assume that either slab_is_available() and we use it or | 212 | * WARNING: We assume that either slab_is_available() and we use it or |
@@ -235,22 +244,24 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
235 | type->regions = new_array; | 244 | type->regions = new_array; |
236 | type->max <<= 1; | 245 | type->max <<= 1; |
237 | 246 | ||
238 | /* If we use SLAB that's it, we are done */ | 247 | /* Free old array. We needn't free it if the array is the |
239 | if (use_slab) | 248 | * static one |
240 | return 0; | ||
241 | |||
242 | /* Add the new reserved region now. Should not fail ! */ | ||
243 | BUG_ON(memblock_reserve(addr, new_size)); | ||
244 | |||
245 | /* If the array wasn't our static init one, then free it. We only do | ||
246 | * that before SLAB is available as later on, we don't know whether | ||
247 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | ||
248 | * anyways | ||
249 | */ | 249 | */ |
250 | if (old_array != memblock_memory_init_regions && | 250 | if (*in_slab) |
251 | old_array != memblock_reserved_init_regions) | 251 | kfree(old_array); |
252 | else if (old_array != memblock_memory_init_regions && | ||
253 | old_array != memblock_reserved_init_regions) | ||
252 | memblock_free(__pa(old_array), old_size); | 254 | memblock_free(__pa(old_array), old_size); |
253 | 255 | ||
256 | /* Reserve the new array if that comes from the memblock. | ||
257 | * Otherwise, we needn't do it | ||
258 | */ | ||
259 | if (!use_slab) | ||
260 | BUG_ON(memblock_reserve(addr, new_size)); | ||
261 | |||
262 | /* Update slab flag */ | ||
263 | *in_slab = use_slab; | ||
264 | |||
254 | return 0; | 265 | return 0; |
255 | } | 266 | } |
256 | 267 | ||