diff options
Diffstat (limited to 'lib/lmb.c')
-rw-r--r-- | lib/lmb.c | 86 |
1 files changed, 76 insertions, 10 deletions
@@ -232,6 +232,82 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, | |||
232 | return (i < rgn->cnt) ? i : -1; | 232 | return (i < rgn->cnt) ? i : -1; |
233 | } | 233 | } |
234 | 234 | ||
235 | static u64 lmb_align_down(u64 addr, u64 size) | ||
236 | { | ||
237 | return addr & ~(size - 1); | ||
238 | } | ||
239 | |||
240 | static u64 lmb_align_up(u64 addr, u64 size) | ||
241 | { | ||
242 | return (addr + (size - 1)) & ~(size - 1); | ||
243 | } | ||
244 | |||
245 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | ||
246 | u64 size, u64 align) | ||
247 | { | ||
248 | u64 base; | ||
249 | long j; | ||
250 | |||
251 | base = lmb_align_down((end - size), align); | ||
252 | while (start <= base && | ||
253 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0)) | ||
254 | base = lmb_align_down(lmb.reserved.region[j].base - size, | ||
255 | align); | ||
256 | |||
257 | if (base != 0 && start <= base) { | ||
258 | if (lmb_add_region(&lmb.reserved, base, | ||
259 | lmb_align_up(size, align)) < 0) | ||
260 | base = ~(u64)0; | ||
261 | return base; | ||
262 | } | ||
263 | |||
264 | return ~(u64)0; | ||
265 | } | ||
266 | |||
267 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | ||
268 | u64 (*nid_range)(u64, u64, int *), | ||
269 | u64 size, u64 align, int nid) | ||
270 | { | ||
271 | u64 start, end; | ||
272 | |||
273 | start = mp->base; | ||
274 | end = start + mp->size; | ||
275 | |||
276 | start = lmb_align_up(start, align); | ||
277 | while (start < end) { | ||
278 | u64 this_end; | ||
279 | int this_nid; | ||
280 | |||
281 | this_end = nid_range(start, end, &this_nid); | ||
282 | if (this_nid == nid) { | ||
283 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | ||
284 | size, align); | ||
285 | if (ret != ~(u64)0) | ||
286 | return ret; | ||
287 | } | ||
288 | start = this_end; | ||
289 | } | ||
290 | |||
291 | return ~(u64)0; | ||
292 | } | ||
293 | |||
294 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | ||
295 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
296 | { | ||
297 | struct lmb_region *mem = &lmb.memory; | ||
298 | int i; | ||
299 | |||
300 | for (i = 0; i < mem->cnt; i++) { | ||
301 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | ||
302 | nid_range, | ||
303 | size, align, nid); | ||
304 | if (ret != ~(u64)0) | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | return lmb_alloc(size, align); | ||
309 | } | ||
310 | |||
235 | u64 __init lmb_alloc(u64 size, u64 align) | 311 | u64 __init lmb_alloc(u64 size, u64 align) |
236 | { | 312 | { |
237 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | 313 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); |
@@ -250,16 +326,6 @@ u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
250 | return alloc; | 326 | return alloc; |
251 | } | 327 | } |
252 | 328 | ||
253 | static u64 lmb_align_down(u64 addr, u64 size) | ||
254 | { | ||
255 | return addr & ~(size - 1); | ||
256 | } | ||
257 | |||
258 | static u64 lmb_align_up(u64 addr, u64 size) | ||
259 | { | ||
260 | return (addr + (size - 1)) & ~(size - 1); | ||
261 | } | ||
262 | |||
263 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | 329 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
264 | { | 330 | { |
265 | long i, j; | 331 | long i, j; |