diff options
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 155 |
1 files changed, 105 insertions, 50 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index a44eab3157f8..5cc6731b00cc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = { | |||
37 | 37 | ||
38 | int memblock_debug __initdata_memblock; | 38 | int memblock_debug __initdata_memblock; |
39 | static int memblock_can_resize __initdata_memblock; | 39 | static int memblock_can_resize __initdata_memblock; |
40 | static int memblock_memory_in_slab __initdata_memblock = 0; | ||
41 | static int memblock_reserved_in_slab __initdata_memblock = 0; | ||
40 | 42 | ||
41 | /* inline so we don't get a warning when pr_debug is compiled out */ | 43 | /* inline so we don't get a warning when pr_debug is compiled out */ |
42 | static inline const char *memblock_type_name(struct memblock_type *type) | 44 | static inline const char *memblock_type_name(struct memblock_type *type) |
@@ -141,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, | |||
141 | MAX_NUMNODES); | 143 | MAX_NUMNODES); |
142 | } | 144 | } |
143 | 145 | ||
144 | /* | ||
145 | * Free memblock.reserved.regions | ||
146 | */ | ||
147 | int __init_memblock memblock_free_reserved_regions(void) | ||
148 | { | ||
149 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
150 | return 0; | ||
151 | |||
152 | return memblock_free(__pa(memblock.reserved.regions), | ||
153 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Reserve memblock.reserved.regions | ||
158 | */ | ||
159 | int __init_memblock memblock_reserve_reserved_regions(void) | ||
160 | { | ||
161 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
162 | return 0; | ||
163 | |||
164 | return memblock_reserve(__pa(memblock.reserved.regions), | ||
165 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
166 | } | ||
167 | |||
168 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) | 146 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
169 | { | 147 | { |
170 | type->total_size -= type->regions[r].size; | 148 | type->total_size -= type->regions[r].size; |
@@ -182,11 +160,42 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u | |||
182 | } | 160 | } |
183 | } | 161 | } |
184 | 162 | ||
185 | static int __init_memblock memblock_double_array(struct memblock_type *type) | 163 | phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( |
164 | phys_addr_t *addr) | ||
165 | { | ||
166 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
167 | return 0; | ||
168 | |||
169 | *addr = __pa(memblock.reserved.regions); | ||
170 | |||
171 | return PAGE_ALIGN(sizeof(struct memblock_region) * | ||
172 | memblock.reserved.max); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * memblock_double_array - double the size of the memblock regions array | ||
177 | * @type: memblock type of the regions array being doubled | ||
178 | * @new_area_start: starting address of memory range to avoid overlap with | ||
179 | * @new_area_size: size of memory range to avoid overlap with | ||
180 | * | ||
181 | * Double the size of the @type regions array. If memblock is being used to | ||
182 | * allocate memory for a new reserved regions array and there is a previously | ||
183 | * allocated memory range [@new_area_start,@new_area_start+@new_area_size] | ||
184 | * waiting to be reserved, ensure the memory used by the new array does | ||
185 | * not overlap. | ||
186 | * | ||
187 | * RETURNS: | ||
188 | * 0 on success, -1 on failure. | ||
189 | */ | ||
190 | static int __init_memblock memblock_double_array(struct memblock_type *type, | ||
191 | phys_addr_t new_area_start, | ||
192 | phys_addr_t new_area_size) | ||
186 | { | 193 | { |
187 | struct memblock_region *new_array, *old_array; | 194 | struct memblock_region *new_array, *old_array; |
195 | phys_addr_t old_alloc_size, new_alloc_size; | ||
188 | phys_addr_t old_size, new_size, addr; | 196 | phys_addr_t old_size, new_size, addr; |
189 | int use_slab = slab_is_available(); | 197 | int use_slab = slab_is_available(); |
198 | int *in_slab; | ||
190 | 199 | ||
191 | /* We don't allow resizing until we know about the reserved regions | 200 | /* We don't allow resizing until we know about the reserved regions |
192 | * of memory that aren't suitable for allocation | 201 | * of memory that aren't suitable for allocation |
@@ -197,6 +206,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
197 | /* Calculate new doubled size */ | 206 | /* Calculate new doubled size */ |
198 | old_size = type->max * sizeof(struct memblock_region); | 207 | old_size = type->max * sizeof(struct memblock_region); |
199 | new_size = old_size << 1; | 208 | new_size = old_size << 1; |
209 | /* | ||
210 | * We need to allocated new one align to PAGE_SIZE, | ||
211 | * so we can free them completely later. | ||
212 | */ | ||
213 | old_alloc_size = PAGE_ALIGN(old_size); | ||
214 | new_alloc_size = PAGE_ALIGN(new_size); | ||
215 | |||
216 | /* Retrieve the slab flag */ | ||
217 | if (type == &memblock.memory) | ||
218 | in_slab = &memblock_memory_in_slab; | ||
219 | else | ||
220 | in_slab = &memblock_reserved_in_slab; | ||
200 | 221 | ||
201 | /* Try to find some space for it. | 222 | /* Try to find some space for it. |
202 | * | 223 | * |
@@ -212,14 +233,26 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
212 | if (use_slab) { | 233 | if (use_slab) { |
213 | new_array = kmalloc(new_size, GFP_KERNEL); | 234 | new_array = kmalloc(new_size, GFP_KERNEL); |
214 | addr = new_array ? __pa(new_array) : 0; | 235 | addr = new_array ? __pa(new_array) : 0; |
215 | } else | 236 | } else { |
216 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); | 237 | /* only exclude range when trying to double reserved.regions */ |
238 | if (type != &memblock.reserved) | ||
239 | new_area_start = new_area_size = 0; | ||
240 | |||
241 | addr = memblock_find_in_range(new_area_start + new_area_size, | ||
242 | memblock.current_limit, | ||
243 | new_alloc_size, PAGE_SIZE); | ||
244 | if (!addr && new_area_size) | ||
245 | addr = memblock_find_in_range(0, | ||
246 | min(new_area_start, memblock.current_limit), | ||
247 | new_alloc_size, PAGE_SIZE); | ||
248 | |||
249 | new_array = addr ? __va(addr) : 0; | ||
250 | } | ||
217 | if (!addr) { | 251 | if (!addr) { |
218 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | 252 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
219 | memblock_type_name(type), type->max, type->max * 2); | 253 | memblock_type_name(type), type->max, type->max * 2); |
220 | return -1; | 254 | return -1; |
221 | } | 255 | } |
222 | new_array = __va(addr); | ||
223 | 256 | ||
224 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", | 257 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
225 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); | 258 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); |
@@ -234,21 +267,23 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
234 | type->regions = new_array; | 267 | type->regions = new_array; |
235 | type->max <<= 1; | 268 | type->max <<= 1; |
236 | 269 | ||
237 | /* If we use SLAB that's it, we are done */ | 270 | /* Free old array. We needn't free it if the array is the |
238 | if (use_slab) | 271 | * static one |
239 | return 0; | ||
240 | |||
241 | /* Add the new reserved region now. Should not fail ! */ | ||
242 | BUG_ON(memblock_reserve(addr, new_size)); | ||
243 | |||
244 | /* If the array wasn't our static init one, then free it. We only do | ||
245 | * that before SLAB is available as later on, we don't know whether | ||
246 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | ||
247 | * anyways | ||
248 | */ | 272 | */ |
249 | if (old_array != memblock_memory_init_regions && | 273 | if (*in_slab) |
250 | old_array != memblock_reserved_init_regions) | 274 | kfree(old_array); |
251 | memblock_free(__pa(old_array), old_size); | 275 | else if (old_array != memblock_memory_init_regions && |
276 | old_array != memblock_reserved_init_regions) | ||
277 | memblock_free(__pa(old_array), old_alloc_size); | ||
278 | |||
279 | /* Reserve the new array if that comes from the memblock. | ||
280 | * Otherwise, we needn't do it | ||
281 | */ | ||
282 | if (!use_slab) | ||
283 | BUG_ON(memblock_reserve(addr, new_alloc_size)); | ||
284 | |||
285 | /* Update slab flag */ | ||
286 | *in_slab = use_slab; | ||
252 | 287 | ||
253 | return 0; | 288 | return 0; |
254 | } | 289 | } |
@@ -387,7 +422,7 @@ repeat: | |||
387 | */ | 422 | */ |
388 | if (!insert) { | 423 | if (!insert) { |
389 | while (type->cnt + nr_new > type->max) | 424 | while (type->cnt + nr_new > type->max) |
390 | if (memblock_double_array(type) < 0) | 425 | if (memblock_double_array(type, obase, size) < 0) |
391 | return -ENOMEM; | 426 | return -ENOMEM; |
392 | insert = true; | 427 | insert = true; |
393 | goto repeat; | 428 | goto repeat; |
@@ -438,7 +473,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |||
438 | 473 | ||
439 | /* we'll create at most two more regions */ | 474 | /* we'll create at most two more regions */ |
440 | while (type->cnt + 2 > type->max) | 475 | while (type->cnt + 2 > type->max) |
441 | if (memblock_double_array(type) < 0) | 476 | if (memblock_double_array(type, base, size) < 0) |
442 | return -ENOMEM; | 477 | return -ENOMEM; |
443 | 478 | ||
444 | for (i = 0; i < type->cnt; i++) { | 479 | for (i = 0; i < type->cnt; i++) { |
@@ -528,9 +563,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |||
528 | * __next_free_mem_range - next function for for_each_free_mem_range() | 563 | * __next_free_mem_range - next function for for_each_free_mem_range() |
529 | * @idx: pointer to u64 loop variable | 564 | * @idx: pointer to u64 loop variable |
530 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 565 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes |
531 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 566 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
532 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 567 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
533 | * @p_nid: ptr to int for nid of the range, can be %NULL | 568 | * @out_nid: ptr to int for nid of the range, can be %NULL |
534 | * | 569 | * |
535 | * Find the first free area from *@idx which matches @nid, fill the out | 570 | * Find the first free area from *@idx which matches @nid, fill the out |
536 | * parameters, and update *@idx for the next iteration. The lower 32bit of | 571 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
@@ -604,9 +639,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |||
604 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | 639 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() |
605 | * @idx: pointer to u64 loop variable | 640 | * @idx: pointer to u64 loop variable |
606 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 641 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes |
607 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 642 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
608 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 643 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
609 | * @p_nid: ptr to int for nid of the range, can be %NULL | 644 | * @out_nid: ptr to int for nid of the range, can be %NULL |
610 | * | 645 | * |
611 | * Reverse of __next_free_mem_range(). | 646 | * Reverse of __next_free_mem_range(). |
612 | */ | 647 | */ |
@@ -855,6 +890,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) | |||
855 | return memblock_search(&memblock.memory, addr) != -1; | 890 | return memblock_search(&memblock.memory, addr) != -1; |
856 | } | 891 | } |
857 | 892 | ||
893 | /** | ||
894 | * memblock_is_region_memory - check if a region is a subset of memory | ||
895 | * @base: base of region to check | ||
896 | * @size: size of region to check | ||
897 | * | ||
898 | * Check if the region [@base, @base+@size) is a subset of a memory block. | ||
899 | * | ||
900 | * RETURNS: | ||
901 | * 0 if false, non-zero if true | ||
902 | */ | ||
858 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 903 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
859 | { | 904 | { |
860 | int idx = memblock_search(&memblock.memory, base); | 905 | int idx = memblock_search(&memblock.memory, base); |
@@ -867,6 +912,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size | |||
867 | memblock.memory.regions[idx].size) >= end; | 912 | memblock.memory.regions[idx].size) >= end; |
868 | } | 913 | } |
869 | 914 | ||
915 | /** | ||
916 | * memblock_is_region_reserved - check if a region intersects reserved memory | ||
917 | * @base: base of region to check | ||
918 | * @size: size of region to check | ||
919 | * | ||
920 | * Check if the region [@base, @base+@size) intersects a reserved memory block. | ||
921 | * | ||
922 | * RETURNS: | ||
923 | * 0 if false, non-zero if true | ||
924 | */ | ||
870 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | 925 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
871 | { | 926 | { |
872 | memblock_cap_size(base, &size); | 927 | memblock_cap_size(base, &size); |