diff options
| author | David S. Miller <davem@davemloft.net> | 2008-03-24 05:50:48 -0400 |
|---|---|---|
| committer | Paul Mackerras <paulus@samba.org> | 2008-04-15 07:22:17 -0400 |
| commit | c50f68c8aea421267ba7995b1c485c281b28add6 (patch) | |
| tree | 38d72f3d6c9e43a4653cc7e330af0aa0dfca3dd5 | |
| parent | 4b1d99b37f608b8cc03550033b16212ca9362efd (diff) | |
[LMB] Add lmb_alloc_nid()
A variant of lmb_alloc() that tries to allocate memory on a specified
NUMA node 'nid' but falls back to normal lmb_alloc() if that fails.
The caller provides a 'nid_range' function pointer which assists the
allocator. It is given args 'start', 'end', and pointer to integer
'this_nid'.
It places at 'this_nid' the NUMA node id that corresponds to 'start',
and returns the end address within 'start' to 'end' at which memory
assosciated with 'nid' ends.
This callback allows a platform to use lmb_alloc_nid() in just
about any context, even ones in which early_pfn_to_nid() might
not be working yet.
This function will be used by the NUMA setup code on sparc64, and also
it can be used by powerpc, replacing it's hand crafted
"careful_allocation()" function in arch/powerpc/mm/numa.c
If x86 ever converts it's NUMA support over to using the LMB helpers,
it can use this too as it has something entirely similar.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
| -rw-r--r-- | include/linux/lmb.h | 2 | ||||
| -rw-r--r-- | lib/lmb.c | 86 |
2 files changed, 78 insertions, 10 deletions
diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 632717c6a2ba..271153d27fba 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h | |||
| @@ -42,6 +42,8 @@ extern void __init lmb_init(void); | |||
| 42 | extern void __init lmb_analyze(void); | 42 | extern void __init lmb_analyze(void); |
| 43 | extern long __init lmb_add(u64 base, u64 size); | 43 | extern long __init lmb_add(u64 base, u64 size); |
| 44 | extern long __init lmb_reserve(u64 base, u64 size); | 44 | extern long __init lmb_reserve(u64 base, u64 size); |
| 45 | extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | ||
| 46 | u64 (*nid_range)(u64, u64, int *)); | ||
| 45 | extern u64 __init lmb_alloc(u64 size, u64 align); | 47 | extern u64 __init lmb_alloc(u64 size, u64 align); |
| 46 | extern u64 __init lmb_alloc_base(u64 size, | 48 | extern u64 __init lmb_alloc_base(u64 size, |
| 47 | u64, u64 max_addr); | 49 | u64, u64 max_addr); |
| @@ -232,6 +232,82 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, | |||
| 232 | return (i < rgn->cnt) ? i : -1; | 232 | return (i < rgn->cnt) ? i : -1; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | static u64 lmb_align_down(u64 addr, u64 size) | ||
| 236 | { | ||
| 237 | return addr & ~(size - 1); | ||
| 238 | } | ||
| 239 | |||
| 240 | static u64 lmb_align_up(u64 addr, u64 size) | ||
| 241 | { | ||
| 242 | return (addr + (size - 1)) & ~(size - 1); | ||
| 243 | } | ||
| 244 | |||
| 245 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | ||
| 246 | u64 size, u64 align) | ||
| 247 | { | ||
| 248 | u64 base; | ||
| 249 | long j; | ||
| 250 | |||
| 251 | base = lmb_align_down((end - size), align); | ||
| 252 | while (start <= base && | ||
| 253 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0)) | ||
| 254 | base = lmb_align_down(lmb.reserved.region[j].base - size, | ||
| 255 | align); | ||
| 256 | |||
| 257 | if (base != 0 && start <= base) { | ||
| 258 | if (lmb_add_region(&lmb.reserved, base, | ||
| 259 | lmb_align_up(size, align)) < 0) | ||
| 260 | base = ~(u64)0; | ||
| 261 | return base; | ||
| 262 | } | ||
| 263 | |||
| 264 | return ~(u64)0; | ||
| 265 | } | ||
| 266 | |||
| 267 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | ||
| 268 | u64 (*nid_range)(u64, u64, int *), | ||
| 269 | u64 size, u64 align, int nid) | ||
| 270 | { | ||
| 271 | u64 start, end; | ||
| 272 | |||
| 273 | start = mp->base; | ||
| 274 | end = start + mp->size; | ||
| 275 | |||
| 276 | start = lmb_align_up(start, align); | ||
| 277 | while (start < end) { | ||
| 278 | u64 this_end; | ||
| 279 | int this_nid; | ||
| 280 | |||
| 281 | this_end = nid_range(start, end, &this_nid); | ||
| 282 | if (this_nid == nid) { | ||
| 283 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | ||
| 284 | size, align); | ||
| 285 | if (ret != ~(u64)0) | ||
| 286 | return ret; | ||
| 287 | } | ||
| 288 | start = this_end; | ||
| 289 | } | ||
| 290 | |||
| 291 | return ~(u64)0; | ||
| 292 | } | ||
| 293 | |||
| 294 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | ||
| 295 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
| 296 | { | ||
| 297 | struct lmb_region *mem = &lmb.memory; | ||
| 298 | int i; | ||
| 299 | |||
| 300 | for (i = 0; i < mem->cnt; i++) { | ||
| 301 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | ||
| 302 | nid_range, | ||
| 303 | size, align, nid); | ||
| 304 | if (ret != ~(u64)0) | ||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | |||
| 308 | return lmb_alloc(size, align); | ||
| 309 | } | ||
| 310 | |||
| 235 | u64 __init lmb_alloc(u64 size, u64 align) | 311 | u64 __init lmb_alloc(u64 size, u64 align) |
| 236 | { | 312 | { |
| 237 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | 313 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); |
| @@ -250,16 +326,6 @@ u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
| 250 | return alloc; | 326 | return alloc; |
| 251 | } | 327 | } |
| 252 | 328 | ||
| 253 | static u64 lmb_align_down(u64 addr, u64 size) | ||
| 254 | { | ||
| 255 | return addr & ~(size - 1); | ||
| 256 | } | ||
| 257 | |||
| 258 | static u64 lmb_align_up(u64 addr, u64 size) | ||
| 259 | { | ||
| 260 | return (addr + (size - 1)) & ~(size - 1); | ||
| 261 | } | ||
| 262 | |||
| 263 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | 329 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
| 264 | { | 330 | { |
| 265 | long i, j; | 331 | long i, j; |
