diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-03 23:34:42 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-04 22:56:09 -0400 |
commit | 2898cc4cdf208f15246b7a1c6951d2b126a70fd6 (patch) | |
tree | 9ab5e803751ee7b8288248796339a0f17617ca29 /mm/memblock.c | |
parent | cd3db0c4ca3d237e7ad20f7107216e575705d2b0 (diff) |
memblock: Change u64 to phys_addr_t
Let's not waste space and cycles on archs that don't support >32-bit
physical address space.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 118 |
1 files changed, 60 insertions, 58 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 73d903ebf3d4..81da63592a68 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -55,13 +55,14 @@ void memblock_dump_all(void) | |||
55 | memblock_dump(&memblock.reserved, "reserved"); | 55 | memblock_dump(&memblock.reserved, "reserved"); |
56 | } | 56 | } |
57 | 57 | ||
58 | static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, | 58 | static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
59 | u64 size2) | 59 | phys_addr_t base2, phys_addr_t size2) |
60 | { | 60 | { |
61 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 61 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
62 | } | 62 | } |
63 | 63 | ||
64 | static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | 64 | static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, |
65 | phys_addr_t base2, phys_addr_t size2) | ||
65 | { | 66 | { |
66 | if (base2 == base1 + size1) | 67 | if (base2 == base1 + size1) |
67 | return 1; | 68 | return 1; |
@@ -72,12 +73,12 @@ static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | |||
72 | } | 73 | } |
73 | 74 | ||
74 | static long memblock_regions_adjacent(struct memblock_type *type, | 75 | static long memblock_regions_adjacent(struct memblock_type *type, |
75 | unsigned long r1, unsigned long r2) | 76 | unsigned long r1, unsigned long r2) |
76 | { | 77 | { |
77 | u64 base1 = type->regions[r1].base; | 78 | phys_addr_t base1 = type->regions[r1].base; |
78 | u64 size1 = type->regions[r1].size; | 79 | phys_addr_t size1 = type->regions[r1].size; |
79 | u64 base2 = type->regions[r2].base; | 80 | phys_addr_t base2 = type->regions[r2].base; |
80 | u64 size2 = type->regions[r2].size; | 81 | phys_addr_t size2 = type->regions[r2].size; |
81 | 82 | ||
82 | return memblock_addrs_adjacent(base1, size1, base2, size2); | 83 | return memblock_addrs_adjacent(base1, size1, base2, size2); |
83 | } | 84 | } |
@@ -128,7 +129,7 @@ void __init memblock_analyze(void) | |||
128 | memblock.memory.size += memblock.memory.regions[i].size; | 129 | memblock.memory.size += memblock.memory.regions[i].size; |
129 | } | 130 | } |
130 | 131 | ||
131 | static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) | 132 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
132 | { | 133 | { |
133 | unsigned long coalesced = 0; | 134 | unsigned long coalesced = 0; |
134 | long adjacent, i; | 135 | long adjacent, i; |
@@ -141,8 +142,8 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) | |||
141 | 142 | ||
142 | /* First try and coalesce this MEMBLOCK with another. */ | 143 | /* First try and coalesce this MEMBLOCK with another. */ |
143 | for (i = 0; i < type->cnt; i++) { | 144 | for (i = 0; i < type->cnt; i++) { |
144 | u64 rgnbase = type->regions[i].base; | 145 | phys_addr_t rgnbase = type->regions[i].base; |
145 | u64 rgnsize = type->regions[i].size; | 146 | phys_addr_t rgnsize = type->regions[i].size; |
146 | 147 | ||
147 | if ((rgnbase == base) && (rgnsize == size)) | 148 | if ((rgnbase == base) && (rgnsize == size)) |
148 | /* Already have this region, so we're done */ | 149 | /* Already have this region, so we're done */ |
@@ -192,16 +193,16 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) | |||
192 | return 0; | 193 | return 0; |
193 | } | 194 | } |
194 | 195 | ||
195 | long memblock_add(u64 base, u64 size) | 196 | long memblock_add(phys_addr_t base, phys_addr_t size) |
196 | { | 197 | { |
197 | return memblock_add_region(&memblock.memory, base, size); | 198 | return memblock_add_region(&memblock.memory, base, size); |
198 | 199 | ||
199 | } | 200 | } |
200 | 201 | ||
201 | static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) | 202 | static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
202 | { | 203 | { |
203 | u64 rgnbegin, rgnend; | 204 | phys_addr_t rgnbegin, rgnend; |
204 | u64 end = base + size; | 205 | phys_addr_t end = base + size; |
205 | int i; | 206 | int i; |
206 | 207 | ||
207 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | 208 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
@@ -246,17 +247,17 @@ static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) | |||
246 | return memblock_add_region(type, end, rgnend - end); | 247 | return memblock_add_region(type, end, rgnend - end); |
247 | } | 248 | } |
248 | 249 | ||
249 | long memblock_remove(u64 base, u64 size) | 250 | long memblock_remove(phys_addr_t base, phys_addr_t size) |
250 | { | 251 | { |
251 | return __memblock_remove(&memblock.memory, base, size); | 252 | return __memblock_remove(&memblock.memory, base, size); |
252 | } | 253 | } |
253 | 254 | ||
254 | long __init memblock_free(u64 base, u64 size) | 255 | long __init memblock_free(phys_addr_t base, phys_addr_t size) |
255 | { | 256 | { |
256 | return __memblock_remove(&memblock.reserved, base, size); | 257 | return __memblock_remove(&memblock.reserved, base, size); |
257 | } | 258 | } |
258 | 259 | ||
259 | long __init memblock_reserve(u64 base, u64 size) | 260 | long __init memblock_reserve(phys_addr_t base, phys_addr_t size) |
260 | { | 261 | { |
261 | struct memblock_type *_rgn = &memblock.reserved; | 262 | struct memblock_type *_rgn = &memblock.reserved; |
262 | 263 | ||
@@ -265,13 +266,13 @@ long __init memblock_reserve(u64 base, u64 size) | |||
265 | return memblock_add_region(_rgn, base, size); | 266 | return memblock_add_region(_rgn, base, size); |
266 | } | 267 | } |
267 | 268 | ||
268 | long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) | 269 | long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
269 | { | 270 | { |
270 | unsigned long i; | 271 | unsigned long i; |
271 | 272 | ||
272 | for (i = 0; i < type->cnt; i++) { | 273 | for (i = 0; i < type->cnt; i++) { |
273 | u64 rgnbase = type->regions[i].base; | 274 | phys_addr_t rgnbase = type->regions[i].base; |
274 | u64 rgnsize = type->regions[i].size; | 275 | phys_addr_t rgnsize = type->regions[i].size; |
275 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | 276 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) |
276 | break; | 277 | break; |
277 | } | 278 | } |
@@ -279,20 +280,20 @@ long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) | |||
279 | return (i < type->cnt) ? i : -1; | 280 | return (i < type->cnt) ? i : -1; |
280 | } | 281 | } |
281 | 282 | ||
282 | static u64 memblock_align_down(u64 addr, u64 size) | 283 | static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) |
283 | { | 284 | { |
284 | return addr & ~(size - 1); | 285 | return addr & ~(size - 1); |
285 | } | 286 | } |
286 | 287 | ||
287 | static u64 memblock_align_up(u64 addr, u64 size) | 288 | static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) |
288 | { | 289 | { |
289 | return (addr + (size - 1)) & ~(size - 1); | 290 | return (addr + (size - 1)) & ~(size - 1); |
290 | } | 291 | } |
291 | 292 | ||
292 | static u64 __init memblock_alloc_region(u64 start, u64 end, | 293 | static phys_addr_t __init memblock_alloc_region(phys_addr_t start, phys_addr_t end, |
293 | u64 size, u64 align) | 294 | phys_addr_t size, phys_addr_t align) |
294 | { | 295 | { |
295 | u64 base, res_base; | 296 | phys_addr_t base, res_base; |
296 | long j; | 297 | long j; |
297 | 298 | ||
298 | base = memblock_align_down((end - size), align); | 299 | base = memblock_align_down((end - size), align); |
@@ -301,7 +302,7 @@ static u64 __init memblock_alloc_region(u64 start, u64 end, | |||
301 | if (j < 0) { | 302 | if (j < 0) { |
302 | /* this area isn't reserved, take it */ | 303 | /* this area isn't reserved, take it */ |
303 | if (memblock_add_region(&memblock.reserved, base, size) < 0) | 304 | if (memblock_add_region(&memblock.reserved, base, size) < 0) |
304 | base = ~(u64)0; | 305 | base = ~(phys_addr_t)0; |
305 | return base; | 306 | return base; |
306 | } | 307 | } |
307 | res_base = memblock.reserved.regions[j].base; | 308 | res_base = memblock.reserved.regions[j].base; |
@@ -310,42 +311,43 @@ static u64 __init memblock_alloc_region(u64 start, u64 end, | |||
310 | base = memblock_align_down(res_base - size, align); | 311 | base = memblock_align_down(res_base - size, align); |
311 | } | 312 | } |
312 | 313 | ||
313 | return ~(u64)0; | 314 | return ~(phys_addr_t)0; |
314 | } | 315 | } |
315 | 316 | ||
316 | u64 __weak __init memblock_nid_range(u64 start, u64 end, int *nid) | 317 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) |
317 | { | 318 | { |
318 | *nid = 0; | 319 | *nid = 0; |
319 | 320 | ||
320 | return end; | 321 | return end; |
321 | } | 322 | } |
322 | 323 | ||
323 | static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, | 324 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, |
324 | u64 size, u64 align, int nid) | 325 | phys_addr_t size, |
326 | phys_addr_t align, int nid) | ||
325 | { | 327 | { |
326 | u64 start, end; | 328 | phys_addr_t start, end; |
327 | 329 | ||
328 | start = mp->base; | 330 | start = mp->base; |
329 | end = start + mp->size; | 331 | end = start + mp->size; |
330 | 332 | ||
331 | start = memblock_align_up(start, align); | 333 | start = memblock_align_up(start, align); |
332 | while (start < end) { | 334 | while (start < end) { |
333 | u64 this_end; | 335 | phys_addr_t this_end; |
334 | int this_nid; | 336 | int this_nid; |
335 | 337 | ||
336 | this_end = memblock_nid_range(start, end, &this_nid); | 338 | this_end = memblock_nid_range(start, end, &this_nid); |
337 | if (this_nid == nid) { | 339 | if (this_nid == nid) { |
338 | u64 ret = memblock_alloc_region(start, this_end, size, align); | 340 | phys_addr_t ret = memblock_alloc_region(start, this_end, size, align); |
339 | if (ret != ~(u64)0) | 341 | if (ret != ~(phys_addr_t)0) |
340 | return ret; | 342 | return ret; |
341 | } | 343 | } |
342 | start = this_end; | 344 | start = this_end; |
343 | } | 345 | } |
344 | 346 | ||
345 | return ~(u64)0; | 347 | return ~(phys_addr_t)0; |
346 | } | 348 | } |
347 | 349 | ||
348 | u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) | 350 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
349 | { | 351 | { |
350 | struct memblock_type *mem = &memblock.memory; | 352 | struct memblock_type *mem = &memblock.memory; |
351 | int i; | 353 | int i; |
@@ -359,23 +361,23 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) | |||
359 | size = memblock_align_up(size, align); | 361 | size = memblock_align_up(size, align); |
360 | 362 | ||
361 | for (i = 0; i < mem->cnt; i++) { | 363 | for (i = 0; i < mem->cnt; i++) { |
362 | u64 ret = memblock_alloc_nid_region(&mem->regions[i], | 364 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], |
363 | size, align, nid); | 365 | size, align, nid); |
364 | if (ret != ~(u64)0) | 366 | if (ret != ~(phys_addr_t)0) |
365 | return ret; | 367 | return ret; |
366 | } | 368 | } |
367 | 369 | ||
368 | return memblock_alloc(size, align); | 370 | return memblock_alloc(size, align); |
369 | } | 371 | } |
370 | 372 | ||
371 | u64 __init memblock_alloc(u64 size, u64 align) | 373 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
372 | { | 374 | { |
373 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | 375 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
374 | } | 376 | } |
375 | 377 | ||
376 | u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 378 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
377 | { | 379 | { |
378 | u64 alloc; | 380 | phys_addr_t alloc; |
379 | 381 | ||
380 | alloc = __memblock_alloc_base(size, align, max_addr); | 382 | alloc = __memblock_alloc_base(size, align, max_addr); |
381 | 383 | ||
@@ -386,11 +388,11 @@ u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) | |||
386 | return alloc; | 388 | return alloc; |
387 | } | 389 | } |
388 | 390 | ||
389 | u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 391 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
390 | { | 392 | { |
391 | long i; | 393 | long i; |
392 | u64 base = 0; | 394 | phys_addr_t base = 0; |
393 | u64 res_base; | 395 | phys_addr_t res_base; |
394 | 396 | ||
395 | BUG_ON(0 == size); | 397 | BUG_ON(0 == size); |
396 | 398 | ||
@@ -405,26 +407,26 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | |||
405 | * top of memory | 407 | * top of memory |
406 | */ | 408 | */ |
407 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | 409 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { |
408 | u64 memblockbase = memblock.memory.regions[i].base; | 410 | phys_addr_t memblockbase = memblock.memory.regions[i].base; |
409 | u64 memblocksize = memblock.memory.regions[i].size; | 411 | phys_addr_t memblocksize = memblock.memory.regions[i].size; |
410 | 412 | ||
411 | if (memblocksize < size) | 413 | if (memblocksize < size) |
412 | continue; | 414 | continue; |
413 | base = min(memblockbase + memblocksize, max_addr); | 415 | base = min(memblockbase + memblocksize, max_addr); |
414 | res_base = memblock_alloc_region(memblockbase, base, size, align); | 416 | res_base = memblock_alloc_region(memblockbase, base, size, align); |
415 | if (res_base != ~(u64)0) | 417 | if (res_base != ~(phys_addr_t)0) |
416 | return res_base; | 418 | return res_base; |
417 | } | 419 | } |
418 | return 0; | 420 | return 0; |
419 | } | 421 | } |
420 | 422 | ||
421 | /* You must call memblock_analyze() before this. */ | 423 | /* You must call memblock_analyze() before this. */ |
422 | u64 __init memblock_phys_mem_size(void) | 424 | phys_addr_t __init memblock_phys_mem_size(void) |
423 | { | 425 | { |
424 | return memblock.memory.size; | 426 | return memblock.memory.size; |
425 | } | 427 | } |
426 | 428 | ||
427 | u64 memblock_end_of_DRAM(void) | 429 | phys_addr_t memblock_end_of_DRAM(void) |
428 | { | 430 | { |
429 | int idx = memblock.memory.cnt - 1; | 431 | int idx = memblock.memory.cnt - 1; |
430 | 432 | ||
@@ -432,10 +434,10 @@ u64 memblock_end_of_DRAM(void) | |||
432 | } | 434 | } |
433 | 435 | ||
434 | /* You must call memblock_analyze() after this. */ | 436 | /* You must call memblock_analyze() after this. */ |
435 | void __init memblock_enforce_memory_limit(u64 memory_limit) | 437 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
436 | { | 438 | { |
437 | unsigned long i; | 439 | unsigned long i; |
438 | u64 limit; | 440 | phys_addr_t limit; |
439 | struct memblock_region *p; | 441 | struct memblock_region *p; |
440 | 442 | ||
441 | if (!memory_limit) | 443 | if (!memory_limit) |
@@ -472,7 +474,7 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
472 | } | 474 | } |
473 | } | 475 | } |
474 | 476 | ||
475 | static int memblock_search(struct memblock_type *type, u64 addr) | 477 | static int memblock_search(struct memblock_type *type, phys_addr_t addr) |
476 | { | 478 | { |
477 | unsigned int left = 0, right = type->cnt; | 479 | unsigned int left = 0, right = type->cnt; |
478 | 480 | ||
@@ -490,17 +492,17 @@ static int memblock_search(struct memblock_type *type, u64 addr) | |||
490 | return -1; | 492 | return -1; |
491 | } | 493 | } |
492 | 494 | ||
493 | int __init memblock_is_reserved(u64 addr) | 495 | int __init memblock_is_reserved(phys_addr_t addr) |
494 | { | 496 | { |
495 | return memblock_search(&memblock.reserved, addr) != -1; | 497 | return memblock_search(&memblock.reserved, addr) != -1; |
496 | } | 498 | } |
497 | 499 | ||
498 | int memblock_is_memory(u64 addr) | 500 | int memblock_is_memory(phys_addr_t addr) |
499 | { | 501 | { |
500 | return memblock_search(&memblock.memory, addr) != -1; | 502 | return memblock_search(&memblock.memory, addr) != -1; |
501 | } | 503 | } |
502 | 504 | ||
503 | int memblock_is_region_memory(u64 base, u64 size) | 505 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
504 | { | 506 | { |
505 | int idx = memblock_search(&memblock.reserved, base); | 507 | int idx = memblock_search(&memblock.reserved, base); |
506 | 508 | ||
@@ -511,13 +513,13 @@ int memblock_is_region_memory(u64 base, u64 size) | |||
511 | memblock.reserved.regions[idx].size) >= (base + size); | 513 | memblock.reserved.regions[idx].size) >= (base + size); |
512 | } | 514 | } |
513 | 515 | ||
514 | int memblock_is_region_reserved(u64 base, u64 size) | 516 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
515 | { | 517 | { |
516 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | 518 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
517 | } | 519 | } |
518 | 520 | ||
519 | 521 | ||
520 | void __init memblock_set_current_limit(u64 limit) | 522 | void __init memblock_set_current_limit(phys_addr_t limit) |
521 | { | 523 | { |
522 | memblock.current_limit = limit; | 524 | memblock.current_limit = limit; |
523 | } | 525 | } |