diff options
Diffstat (limited to 'mm/memblock.c')
| -rw-r--r-- | mm/memblock.c | 115 |
1 files changed, 79 insertions, 36 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 952123eba433..5cc6731b00cc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, | |||
| 143 | MAX_NUMNODES); | 143 | MAX_NUMNODES); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /* | ||
| 147 | * Free memblock.reserved.regions | ||
| 148 | */ | ||
| 149 | int __init_memblock memblock_free_reserved_regions(void) | ||
| 150 | { | ||
| 151 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
| 152 | return 0; | ||
| 153 | |||
| 154 | return memblock_free(__pa(memblock.reserved.regions), | ||
| 155 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
| 156 | } | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Reserve memblock.reserved.regions | ||
| 160 | */ | ||
| 161 | int __init_memblock memblock_reserve_reserved_regions(void) | ||
| 162 | { | ||
| 163 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
| 164 | return 0; | ||
| 165 | |||
| 166 | return memblock_reserve(__pa(memblock.reserved.regions), | ||
| 167 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
| 168 | } | ||
| 169 | |||
| 170 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) | 146 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
| 171 | { | 147 | { |
| 172 | type->total_size -= type->regions[r].size; | 148 | type->total_size -= type->regions[r].size; |
| @@ -184,9 +160,39 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u | |||
| 184 | } | 160 | } |
| 185 | } | 161 | } |
| 186 | 162 | ||
| 187 | static int __init_memblock memblock_double_array(struct memblock_type *type) | 163 | phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( |
| 164 | phys_addr_t *addr) | ||
| 165 | { | ||
| 166 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
| 167 | return 0; | ||
| 168 | |||
| 169 | *addr = __pa(memblock.reserved.regions); | ||
| 170 | |||
| 171 | return PAGE_ALIGN(sizeof(struct memblock_region) * | ||
| 172 | memblock.reserved.max); | ||
| 173 | } | ||
| 174 | |||
| 175 | /** | ||
| 176 | * memblock_double_array - double the size of the memblock regions array | ||
| 177 | * @type: memblock type of the regions array being doubled | ||
| 178 | * @new_area_start: starting address of memory range to avoid overlap with | ||
| 179 | * @new_area_size: size of memory range to avoid overlap with | ||
| 180 | * | ||
| 181 | * Double the size of the @type regions array. If memblock is being used to | ||
| 182 | * allocate memory for a new reserved regions array and there is a previously | ||
| 183 | * allocated memory range [@new_area_start,@new_area_start+@new_area_size] | ||
| 184 | * waiting to be reserved, ensure the memory used by the new array does | ||
| 185 | * not overlap. | ||
| 186 | * | ||
| 187 | * RETURNS: | ||
| 188 | * 0 on success, -1 on failure. | ||
| 189 | */ | ||
| 190 | static int __init_memblock memblock_double_array(struct memblock_type *type, | ||
| 191 | phys_addr_t new_area_start, | ||
| 192 | phys_addr_t new_area_size) | ||
| 188 | { | 193 | { |
| 189 | struct memblock_region *new_array, *old_array; | 194 | struct memblock_region *new_array, *old_array; |
| 195 | phys_addr_t old_alloc_size, new_alloc_size; | ||
| 190 | phys_addr_t old_size, new_size, addr; | 196 | phys_addr_t old_size, new_size, addr; |
| 191 | int use_slab = slab_is_available(); | 197 | int use_slab = slab_is_available(); |
| 192 | int *in_slab; | 198 | int *in_slab; |
| @@ -200,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
| 200 | /* Calculate new doubled size */ | 206 | /* Calculate new doubled size */ |
| 201 | old_size = type->max * sizeof(struct memblock_region); | 207 | old_size = type->max * sizeof(struct memblock_region); |
| 202 | new_size = old_size << 1; | 208 | new_size = old_size << 1; |
| 209 | /* | ||
| 210 | * We need to allocated new one align to PAGE_SIZE, | ||
| 211 | * so we can free them completely later. | ||
| 212 | */ | ||
| 213 | old_alloc_size = PAGE_ALIGN(old_size); | ||
| 214 | new_alloc_size = PAGE_ALIGN(new_size); | ||
| 203 | 215 | ||
| 204 | /* Retrieve the slab flag */ | 216 | /* Retrieve the slab flag */ |
| 205 | if (type == &memblock.memory) | 217 | if (type == &memblock.memory) |
| @@ -222,7 +234,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
| 222 | new_array = kmalloc(new_size, GFP_KERNEL); | 234 | new_array = kmalloc(new_size, GFP_KERNEL); |
| 223 | addr = new_array ? __pa(new_array) : 0; | 235 | addr = new_array ? __pa(new_array) : 0; |
| 224 | } else { | 236 | } else { |
| 225 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); | 237 | /* only exclude range when trying to double reserved.regions */ |
| 238 | if (type != &memblock.reserved) | ||
| 239 | new_area_start = new_area_size = 0; | ||
| 240 | |||
| 241 | addr = memblock_find_in_range(new_area_start + new_area_size, | ||
| 242 | memblock.current_limit, | ||
| 243 | new_alloc_size, PAGE_SIZE); | ||
| 244 | if (!addr && new_area_size) | ||
| 245 | addr = memblock_find_in_range(0, | ||
| 246 | min(new_area_start, memblock.current_limit), | ||
| 247 | new_alloc_size, PAGE_SIZE); | ||
| 248 | |||
| 226 | new_array = addr ? __va(addr) : 0; | 249 | new_array = addr ? __va(addr) : 0; |
| 227 | } | 250 | } |
| 228 | if (!addr) { | 251 | if (!addr) { |
| @@ -251,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
| 251 | kfree(old_array); | 274 | kfree(old_array); |
| 252 | else if (old_array != memblock_memory_init_regions && | 275 | else if (old_array != memblock_memory_init_regions && |
| 253 | old_array != memblock_reserved_init_regions) | 276 | old_array != memblock_reserved_init_regions) |
| 254 | memblock_free(__pa(old_array), old_size); | 277 | memblock_free(__pa(old_array), old_alloc_size); |
| 255 | 278 | ||
| 256 | /* Reserve the new array if that comes from the memblock. | 279 | /* Reserve the new array if that comes from the memblock. |
| 257 | * Otherwise, we needn't do it | 280 | * Otherwise, we needn't do it |
| 258 | */ | 281 | */ |
| 259 | if (!use_slab) | 282 | if (!use_slab) |
| 260 | BUG_ON(memblock_reserve(addr, new_size)); | 283 | BUG_ON(memblock_reserve(addr, new_alloc_size)); |
| 261 | 284 | ||
| 262 | /* Update slab flag */ | 285 | /* Update slab flag */ |
| 263 | *in_slab = use_slab; | 286 | *in_slab = use_slab; |
| @@ -399,7 +422,7 @@ repeat: | |||
| 399 | */ | 422 | */ |
| 400 | if (!insert) { | 423 | if (!insert) { |
| 401 | while (type->cnt + nr_new > type->max) | 424 | while (type->cnt + nr_new > type->max) |
| 402 | if (memblock_double_array(type) < 0) | 425 | if (memblock_double_array(type, obase, size) < 0) |
| 403 | return -ENOMEM; | 426 | return -ENOMEM; |
| 404 | insert = true; | 427 | insert = true; |
| 405 | goto repeat; | 428 | goto repeat; |
| @@ -450,7 +473,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |||
| 450 | 473 | ||
| 451 | /* we'll create at most two more regions */ | 474 | /* we'll create at most two more regions */ |
| 452 | while (type->cnt + 2 > type->max) | 475 | while (type->cnt + 2 > type->max) |
| 453 | if (memblock_double_array(type) < 0) | 476 | if (memblock_double_array(type, base, size) < 0) |
| 454 | return -ENOMEM; | 477 | return -ENOMEM; |
| 455 | 478 | ||
| 456 | for (i = 0; i < type->cnt; i++) { | 479 | for (i = 0; i < type->cnt; i++) { |
| @@ -540,9 +563,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |||
| 540 | * __next_free_mem_range - next function for for_each_free_mem_range() | 563 | * __next_free_mem_range - next function for for_each_free_mem_range() |
| 541 | * @idx: pointer to u64 loop variable | 564 | * @idx: pointer to u64 loop variable |
| 542 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 565 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes |
| 543 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 566 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 544 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 567 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 545 | * @p_nid: ptr to int for nid of the range, can be %NULL | 568 | * @out_nid: ptr to int for nid of the range, can be %NULL |
| 546 | * | 569 | * |
| 547 | * Find the first free area from *@idx which matches @nid, fill the out | 570 | * Find the first free area from *@idx which matches @nid, fill the out |
| 548 | * parameters, and update *@idx for the next iteration. The lower 32bit of | 571 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
| @@ -616,9 +639,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |||
| 616 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | 639 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() |
| 617 | * @idx: pointer to u64 loop variable | 640 | * @idx: pointer to u64 loop variable |
| 618 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 641 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes |
| 619 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 642 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 620 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 643 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 621 | * @p_nid: ptr to int for nid of the range, can be %NULL | 644 | * @out_nid: ptr to int for nid of the range, can be %NULL |
| 622 | * | 645 | * |
| 623 | * Reverse of __next_free_mem_range(). | 646 | * Reverse of __next_free_mem_range(). |
| 624 | */ | 647 | */ |
| @@ -867,6 +890,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) | |||
| 867 | return memblock_search(&memblock.memory, addr) != -1; | 890 | return memblock_search(&memblock.memory, addr) != -1; |
| 868 | } | 891 | } |
| 869 | 892 | ||
| 893 | /** | ||
| 894 | * memblock_is_region_memory - check if a region is a subset of memory | ||
| 895 | * @base: base of region to check | ||
| 896 | * @size: size of region to check | ||
| 897 | * | ||
| 898 | * Check if the region [@base, @base+@size) is a subset of a memory block. | ||
| 899 | * | ||
| 900 | * RETURNS: | ||
| 901 | * 0 if false, non-zero if true | ||
| 902 | */ | ||
| 870 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 903 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
| 871 | { | 904 | { |
| 872 | int idx = memblock_search(&memblock.memory, base); | 905 | int idx = memblock_search(&memblock.memory, base); |
| @@ -879,6 +912,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size | |||
| 879 | memblock.memory.regions[idx].size) >= end; | 912 | memblock.memory.regions[idx].size) >= end; |
| 880 | } | 913 | } |
| 881 | 914 | ||
| 915 | /** | ||
| 916 | * memblock_is_region_reserved - check if a region intersects reserved memory | ||
| 917 | * @base: base of region to check | ||
| 918 | * @size: size of region to check | ||
| 919 | * | ||
| 920 | * Check if the region [@base, @base+@size) intersects a reserved memory block. | ||
| 921 | * | ||
| 922 | * RETURNS: | ||
| 923 | * 0 if false, non-zero if true | ||
| 924 | */ | ||
| 882 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | 925 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
| 883 | { | 926 | { |
| 884 | memblock_cap_size(base, &size); | 927 | memblock_cap_size(base, &size); |
