aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-24 05:50:48 -0400
committerPaul Mackerras <paulus@samba.org>2008-04-15 07:22:17 -0400
commitc50f68c8aea421267ba7995b1c485c281b28add6 (patch)
tree38d72f3d6c9e43a4653cc7e330af0aa0dfca3dd5 /lib
parent4b1d99b37f608b8cc03550033b16212ca9362efd (diff)
[LMB] Add lmb_alloc_nid()
A variant of lmb_alloc() that tries to allocate memory on a specified NUMA node 'nid' but falls back to normal lmb_alloc() if that fails. The caller provides a 'nid_range' function pointer which assists the allocator. It is given args 'start', 'end', and pointer to integer 'this_nid'. It places at 'this_nid' the NUMA node id that corresponds to 'start', and returns the end address within 'start' to 'end' at which memory assosciated with 'nid' ends. This callback allows a platform to use lmb_alloc_nid() in just about any context, even ones in which early_pfn_to_nid() might not be working yet. This function will be used by the NUMA setup code on sparc64, and also it can be used by powerpc, replacing it's hand crafted "careful_allocation()" function in arch/powerpc/mm/numa.c If x86 ever converts it's NUMA support over to using the LMB helpers, it can use this too as it has something entirely similar. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/lmb.c86
1 files changed, 76 insertions, 10 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index 3c43b95fef4a..549fbb3d70cf 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -232,6 +232,82 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base,
232 return (i < rgn->cnt) ? i : -1; 232 return (i < rgn->cnt) ? i : -1;
233} 233}
234 234
235static u64 lmb_align_down(u64 addr, u64 size)
236{
237 return addr & ~(size - 1);
238}
239
240static u64 lmb_align_up(u64 addr, u64 size)
241{
242 return (addr + (size - 1)) & ~(size - 1);
243}
244
245static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
246 u64 size, u64 align)
247{
248 u64 base;
249 long j;
250
251 base = lmb_align_down((end - size), align);
252 while (start <= base &&
253 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0))
254 base = lmb_align_down(lmb.reserved.region[j].base - size,
255 align);
256
257 if (base != 0 && start <= base) {
258 if (lmb_add_region(&lmb.reserved, base,
259 lmb_align_up(size, align)) < 0)
260 base = ~(u64)0;
261 return base;
262 }
263
264 return ~(u64)0;
265}
266
267static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
268 u64 (*nid_range)(u64, u64, int *),
269 u64 size, u64 align, int nid)
270{
271 u64 start, end;
272
273 start = mp->base;
274 end = start + mp->size;
275
276 start = lmb_align_up(start, align);
277 while (start < end) {
278 u64 this_end;
279 int this_nid;
280
281 this_end = nid_range(start, end, &this_nid);
282 if (this_nid == nid) {
283 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
284 size, align);
285 if (ret != ~(u64)0)
286 return ret;
287 }
288 start = this_end;
289 }
290
291 return ~(u64)0;
292}
293
294u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
295 u64 (*nid_range)(u64 start, u64 end, int *nid))
296{
297 struct lmb_region *mem = &lmb.memory;
298 int i;
299
300 for (i = 0; i < mem->cnt; i++) {
301 u64 ret = lmb_alloc_nid_region(&mem->region[i],
302 nid_range,
303 size, align, nid);
304 if (ret != ~(u64)0)
305 return ret;
306 }
307
308 return lmb_alloc(size, align);
309}
310
235u64 __init lmb_alloc(u64 size, u64 align) 311u64 __init lmb_alloc(u64 size, u64 align)
236{ 312{
237 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); 313 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
@@ -250,16 +326,6 @@ u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
250 return alloc; 326 return alloc;
251} 327}
252 328
253static u64 lmb_align_down(u64 addr, u64 size)
254{
255 return addr & ~(size - 1);
256}
257
258static u64 lmb_align_up(u64 addr, u64 size)
259{
260 return (addr + (size - 1)) & ~(size - 1);
261}
262
263u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) 329u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
264{ 330{
265 long i, j; 331 long i, j;