diff options
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 83 |
1 files changed, 80 insertions, 3 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index accff1087137..53e477bb5558 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
22 | 22 | ||
23 | #include <asm-generic/sections.h> | ||
24 | |||
23 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 25 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
24 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 26 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
25 | 27 | ||
@@ -32,6 +34,7 @@ struct memblock memblock __initdata_memblock = { | |||
32 | .reserved.cnt = 1, /* empty dummy entry */ | 34 | .reserved.cnt = 1, /* empty dummy entry */ |
33 | .reserved.max = INIT_MEMBLOCK_REGIONS, | 35 | .reserved.max = INIT_MEMBLOCK_REGIONS, |
34 | 36 | ||
37 | .bottom_up = false, | ||
35 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, | 38 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, |
36 | }; | 39 | }; |
37 | 40 | ||
@@ -82,6 +85,38 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, | |||
82 | return (i < type->cnt) ? i : -1; | 85 | return (i < type->cnt) ? i : -1; |
83 | } | 86 | } |
84 | 87 | ||
88 | /* | ||
89 | * __memblock_find_range_bottom_up - find free area utility in bottom-up | ||
90 | * @start: start of candidate range | ||
91 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | ||
92 | * @size: size of free area to find | ||
93 | * @align: alignment of free area to find | ||
94 | * @nid: nid of the free area to find, %MAX_NUMNODES for any node | ||
95 | * | ||
96 | * Utility called from memblock_find_in_range_node(), find free area bottom-up. | ||
97 | * | ||
98 | * RETURNS: | ||
99 | * Found address on success, 0 on failure. | ||
100 | */ | ||
101 | static phys_addr_t __init_memblock | ||
102 | __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, | ||
103 | phys_addr_t size, phys_addr_t align, int nid) | ||
104 | { | ||
105 | phys_addr_t this_start, this_end, cand; | ||
106 | u64 i; | ||
107 | |||
108 | for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { | ||
109 | this_start = clamp(this_start, start, end); | ||
110 | this_end = clamp(this_end, start, end); | ||
111 | |||
112 | cand = round_up(this_start, align); | ||
113 | if (cand < this_end && this_end - cand >= size) | ||
114 | return cand; | ||
115 | } | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
85 | /** | 120 | /** |
86 | * __memblock_find_range_top_down - find free area utility, in top-down | 121 | * __memblock_find_range_top_down - find free area utility, in top-down |
87 | * @start: start of candidate range | 122 | * @start: start of candidate range |
@@ -93,7 +128,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, | |||
93 | * Utility called from memblock_find_in_range_node(), find free area top-down. | 128 | * Utility called from memblock_find_in_range_node(), find free area top-down. |
94 | * | 129 | * |
95 | * RETURNS: | 130 | * RETURNS: |
96 | * Found address on success, %0 on failure. | 131 | * Found address on success, 0 on failure. |
97 | */ | 132 | */ |
98 | static phys_addr_t __init_memblock | 133 | static phys_addr_t __init_memblock |
99 | __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, | 134 | __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, |
@@ -127,13 +162,24 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, | |||
127 | * | 162 | * |
128 | * Find @size free area aligned to @align in the specified range and node. | 163 | * Find @size free area aligned to @align in the specified range and node. |
129 | * | 164 | * |
165 | * When allocation direction is bottom-up, the @start should be greater | ||
166 | * than the end of the kernel image. Otherwise, it will be trimmed. The | ||
167 | * reason is that we want the bottom-up allocation just near the kernel | ||
168 | * image so it is highly likely that the allocated memory and the kernel | ||
169 | * will reside in the same node. | ||
170 | * | ||
171 | * If bottom-up allocation failed, will try to allocate memory top-down. | ||
172 | * | ||
130 | * RETURNS: | 173 | * RETURNS: |
131 | * Found address on success, %0 on failure. | 174 | * Found address on success, 0 on failure. |
132 | */ | 175 | */ |
133 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | 176 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, |
134 | phys_addr_t end, phys_addr_t size, | 177 | phys_addr_t end, phys_addr_t size, |
135 | phys_addr_t align, int nid) | 178 | phys_addr_t align, int nid) |
136 | { | 179 | { |
180 | int ret; | ||
181 | phys_addr_t kernel_end; | ||
182 | |||
137 | /* pump up @end */ | 183 | /* pump up @end */ |
138 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 184 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
139 | end = memblock.current_limit; | 185 | end = memblock.current_limit; |
@@ -141,6 +187,37 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | |||
141 | /* avoid allocating the first page */ | 187 | /* avoid allocating the first page */ |
142 | start = max_t(phys_addr_t, start, PAGE_SIZE); | 188 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
143 | end = max(start, end); | 189 | end = max(start, end); |
190 | kernel_end = __pa_symbol(_end); | ||
191 | |||
192 | /* | ||
193 | * try bottom-up allocation only when bottom-up mode | ||
194 | * is set and @end is above the kernel image. | ||
195 | */ | ||
196 | if (memblock_bottom_up() && end > kernel_end) { | ||
197 | phys_addr_t bottom_up_start; | ||
198 | |||
199 | /* make sure we will allocate above the kernel */ | ||
200 | bottom_up_start = max(start, kernel_end); | ||
201 | |||
202 | /* ok, try bottom-up allocation first */ | ||
203 | ret = __memblock_find_range_bottom_up(bottom_up_start, end, | ||
204 | size, align, nid); | ||
205 | if (ret) | ||
206 | return ret; | ||
207 | |||
208 | /* | ||
209 | * we always limit bottom-up allocation above the kernel, | ||
210 | * but top-down allocation doesn't have the limit, so | ||
211 | * retrying top-down allocation may succeed when bottom-up | ||
212 | * allocation failed. | ||
213 | * | ||
214 | * bottom-up allocation is expected to be fail very rarely, | ||
215 | * so we use WARN_ONCE() here to see the stack trace if | ||
216 | * fail happens. | ||
217 | */ | ||
218 | WARN_ONCE(1, "memblock: bottom-up allocation failed, " | ||
219 | "memory hotunplug may be affected\n"); | ||
220 | } | ||
144 | 221 | ||
145 | return __memblock_find_range_top_down(start, end, size, align, nid); | 222 | return __memblock_find_range_top_down(start, end, size, align, nid); |
146 | } | 223 | } |
@@ -155,7 +232,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | |||
155 | * Find @size free area aligned to @align in the specified range. | 232 | * Find @size free area aligned to @align in the specified range. |
156 | * | 233 | * |
157 | * RETURNS: | 234 | * RETURNS: |
158 | * Found address on success, %0 on failure. | 235 | * Found address on success, 0 on failure. |
159 | */ | 236 | */ |
160 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, | 237 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
161 | phys_addr_t end, phys_addr_t size, | 238 | phys_addr_t end, phys_addr_t size, |