aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memblock.h48
-rw-r--r--mm/memblock.c118
2 files changed, 84 insertions, 82 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 71b8edc6ede8..b65045a4ed08 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -21,19 +21,19 @@
21#define MAX_MEMBLOCK_REGIONS 128 21#define MAX_MEMBLOCK_REGIONS 128
22 22
23struct memblock_region { 23struct memblock_region {
24 u64 base; 24 phys_addr_t base;
25 u64 size; 25 phys_addr_t size;
26}; 26};
27 27
28struct memblock_type { 28struct memblock_type {
29 unsigned long cnt; 29 unsigned long cnt;
30 u64 size; 30 phys_addr_t size;
31 struct memblock_region regions[MAX_MEMBLOCK_REGIONS+1]; 31 struct memblock_region regions[MAX_MEMBLOCK_REGIONS+1];
32}; 32};
33 33
34struct memblock { 34struct memblock {
35 unsigned long debug; 35 unsigned long debug;
36 u64 current_limit; 36 phys_addr_t current_limit;
37 struct memblock_type memory; 37 struct memblock_type memory;
38 struct memblock_type reserved; 38 struct memblock_type reserved;
39}; 39};
@@ -42,34 +42,34 @@ extern struct memblock memblock;
42 42
43extern void __init memblock_init(void); 43extern void __init memblock_init(void);
44extern void __init memblock_analyze(void); 44extern void __init memblock_analyze(void);
45extern long memblock_add(u64 base, u64 size); 45extern long memblock_add(phys_addr_t base, phys_addr_t size);
46extern long memblock_remove(u64 base, u64 size); 46extern long memblock_remove(phys_addr_t base, phys_addr_t size);
47extern long __init memblock_free(u64 base, u64 size); 47extern long __init memblock_free(phys_addr_t base, phys_addr_t size);
48extern long __init memblock_reserve(u64 base, u64 size); 48extern long __init memblock_reserve(phys_addr_t base, phys_addr_t size);
49 49
50extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid); 50extern phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
51extern u64 __init memblock_alloc(u64 size, u64 align); 51extern phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align);
52 52
53/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 53/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
54#define MEMBLOCK_ALLOC_ANYWHERE (~(u64)0) 54#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
55#define MEMBLOCK_ALLOC_ACCESSIBLE 0 55#define MEMBLOCK_ALLOC_ACCESSIBLE 0
56 56
57extern u64 __init memblock_alloc_base(u64 size, 57extern phys_addr_t __init memblock_alloc_base(phys_addr_t size,
58 u64, u64 max_addr); 58 phys_addr_t, phys_addr_t max_addr);
59extern u64 __init __memblock_alloc_base(u64 size, 59extern phys_addr_t __init __memblock_alloc_base(phys_addr_t size,
60 u64 align, u64 max_addr); 60 phys_addr_t align, phys_addr_t max_addr);
61extern u64 __init memblock_phys_mem_size(void); 61extern phys_addr_t __init memblock_phys_mem_size(void);
62extern u64 memblock_end_of_DRAM(void); 62extern phys_addr_t memblock_end_of_DRAM(void);
63extern void __init memblock_enforce_memory_limit(u64 memory_limit); 63extern void __init memblock_enforce_memory_limit(phys_addr_t memory_limit);
64extern int memblock_is_memory(u64 addr); 64extern int memblock_is_memory(phys_addr_t addr);
65extern int memblock_is_region_memory(u64 base, u64 size); 65extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
66extern int __init memblock_is_reserved(u64 addr); 66extern int __init memblock_is_reserved(phys_addr_t addr);
67extern int memblock_is_region_reserved(u64 base, u64 size); 67extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
68 68
69extern void memblock_dump_all(void); 69extern void memblock_dump_all(void);
70 70
71/* Provided by the architecture */ 71/* Provided by the architecture */
72extern u64 memblock_nid_range(u64 start, u64 end, int *nid); 72extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
73 73
74/** 74/**
75 * memblock_set_current_limit - Set the current allocation limit to allow 75 * memblock_set_current_limit - Set the current allocation limit to allow
@@ -77,7 +77,7 @@ extern u64 memblock_nid_range(u64 start, u64 end, int *nid);
77 * accessible during boot 77 * accessible during boot
78 * @limit: New limit value (physical address) 78 * @limit: New limit value (physical address)
79 */ 79 */
80extern void memblock_set_current_limit(u64 limit); 80extern void memblock_set_current_limit(phys_addr_t limit);
81 81
82 82
83/* 83/*
diff --git a/mm/memblock.c b/mm/memblock.c
index 73d903ebf3d4..81da63592a68 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -55,13 +55,14 @@ void memblock_dump_all(void)
55 memblock_dump(&memblock.reserved, "reserved"); 55 memblock_dump(&memblock.reserved, "reserved");
56} 56}
57 57
58static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, 58static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
59 u64 size2) 59 phys_addr_t base2, phys_addr_t size2)
60{ 60{
61 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 61 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
62} 62}
63 63
64static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) 64static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
65 phys_addr_t base2, phys_addr_t size2)
65{ 66{
66 if (base2 == base1 + size1) 67 if (base2 == base1 + size1)
67 return 1; 68 return 1;
@@ -72,12 +73,12 @@ static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
72} 73}
73 74
74static long memblock_regions_adjacent(struct memblock_type *type, 75static long memblock_regions_adjacent(struct memblock_type *type,
75 unsigned long r1, unsigned long r2) 76 unsigned long r1, unsigned long r2)
76{ 77{
77 u64 base1 = type->regions[r1].base; 78 phys_addr_t base1 = type->regions[r1].base;
78 u64 size1 = type->regions[r1].size; 79 phys_addr_t size1 = type->regions[r1].size;
79 u64 base2 = type->regions[r2].base; 80 phys_addr_t base2 = type->regions[r2].base;
80 u64 size2 = type->regions[r2].size; 81 phys_addr_t size2 = type->regions[r2].size;
81 82
82 return memblock_addrs_adjacent(base1, size1, base2, size2); 83 return memblock_addrs_adjacent(base1, size1, base2, size2);
83} 84}
@@ -128,7 +129,7 @@ void __init memblock_analyze(void)
128 memblock.memory.size += memblock.memory.regions[i].size; 129 memblock.memory.size += memblock.memory.regions[i].size;
129} 130}
130 131
131static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) 132static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
132{ 133{
133 unsigned long coalesced = 0; 134 unsigned long coalesced = 0;
134 long adjacent, i; 135 long adjacent, i;
@@ -141,8 +142,8 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size)
141 142
142 /* First try and coalesce this MEMBLOCK with another. */ 143 /* First try and coalesce this MEMBLOCK with another. */
143 for (i = 0; i < type->cnt; i++) { 144 for (i = 0; i < type->cnt; i++) {
144 u64 rgnbase = type->regions[i].base; 145 phys_addr_t rgnbase = type->regions[i].base;
145 u64 rgnsize = type->regions[i].size; 146 phys_addr_t rgnsize = type->regions[i].size;
146 147
147 if ((rgnbase == base) && (rgnsize == size)) 148 if ((rgnbase == base) && (rgnsize == size))
148 /* Already have this region, so we're done */ 149 /* Already have this region, so we're done */
@@ -192,16 +193,16 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size)
192 return 0; 193 return 0;
193} 194}
194 195
195long memblock_add(u64 base, u64 size) 196long memblock_add(phys_addr_t base, phys_addr_t size)
196{ 197{
197 return memblock_add_region(&memblock.memory, base, size); 198 return memblock_add_region(&memblock.memory, base, size);
198 199
199} 200}
200 201
201static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) 202static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
202{ 203{
203 u64 rgnbegin, rgnend; 204 phys_addr_t rgnbegin, rgnend;
204 u64 end = base + size; 205 phys_addr_t end = base + size;
205 int i; 206 int i;
206 207
207 rgnbegin = rgnend = 0; /* supress gcc warnings */ 208 rgnbegin = rgnend = 0; /* supress gcc warnings */
@@ -246,17 +247,17 @@ static long __memblock_remove(struct memblock_type *type, u64 base, u64 size)
246 return memblock_add_region(type, end, rgnend - end); 247 return memblock_add_region(type, end, rgnend - end);
247} 248}
248 249
249long memblock_remove(u64 base, u64 size) 250long memblock_remove(phys_addr_t base, phys_addr_t size)
250{ 251{
251 return __memblock_remove(&memblock.memory, base, size); 252 return __memblock_remove(&memblock.memory, base, size);
252} 253}
253 254
254long __init memblock_free(u64 base, u64 size) 255long __init memblock_free(phys_addr_t base, phys_addr_t size)
255{ 256{
256 return __memblock_remove(&memblock.reserved, base, size); 257 return __memblock_remove(&memblock.reserved, base, size);
257} 258}
258 259
259long __init memblock_reserve(u64 base, u64 size) 260long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
260{ 261{
261 struct memblock_type *_rgn = &memblock.reserved; 262 struct memblock_type *_rgn = &memblock.reserved;
262 263
@@ -265,13 +266,13 @@ long __init memblock_reserve(u64 base, u64 size)
265 return memblock_add_region(_rgn, base, size); 266 return memblock_add_region(_rgn, base, size);
266} 267}
267 268
268long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) 269long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
269{ 270{
270 unsigned long i; 271 unsigned long i;
271 272
272 for (i = 0; i < type->cnt; i++) { 273 for (i = 0; i < type->cnt; i++) {
273 u64 rgnbase = type->regions[i].base; 274 phys_addr_t rgnbase = type->regions[i].base;
274 u64 rgnsize = type->regions[i].size; 275 phys_addr_t rgnsize = type->regions[i].size;
275 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 276 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
276 break; 277 break;
277 } 278 }
@@ -279,20 +280,20 @@ long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size)
279 return (i < type->cnt) ? i : -1; 280 return (i < type->cnt) ? i : -1;
280} 281}
281 282
282static u64 memblock_align_down(u64 addr, u64 size) 283static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
283{ 284{
284 return addr & ~(size - 1); 285 return addr & ~(size - 1);
285} 286}
286 287
287static u64 memblock_align_up(u64 addr, u64 size) 288static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
288{ 289{
289 return (addr + (size - 1)) & ~(size - 1); 290 return (addr + (size - 1)) & ~(size - 1);
290} 291}
291 292
292static u64 __init memblock_alloc_region(u64 start, u64 end, 293static phys_addr_t __init memblock_alloc_region(phys_addr_t start, phys_addr_t end,
293 u64 size, u64 align) 294 phys_addr_t size, phys_addr_t align)
294{ 295{
295 u64 base, res_base; 296 phys_addr_t base, res_base;
296 long j; 297 long j;
297 298
298 base = memblock_align_down((end - size), align); 299 base = memblock_align_down((end - size), align);
@@ -301,7 +302,7 @@ static u64 __init memblock_alloc_region(u64 start, u64 end,
301 if (j < 0) { 302 if (j < 0) {
302 /* this area isn't reserved, take it */ 303 /* this area isn't reserved, take it */
303 if (memblock_add_region(&memblock.reserved, base, size) < 0) 304 if (memblock_add_region(&memblock.reserved, base, size) < 0)
304 base = ~(u64)0; 305 base = ~(phys_addr_t)0;
305 return base; 306 return base;
306 } 307 }
307 res_base = memblock.reserved.regions[j].base; 308 res_base = memblock.reserved.regions[j].base;
@@ -310,42 +311,43 @@ static u64 __init memblock_alloc_region(u64 start, u64 end,
310 base = memblock_align_down(res_base - size, align); 311 base = memblock_align_down(res_base - size, align);
311 } 312 }
312 313
313 return ~(u64)0; 314 return ~(phys_addr_t)0;
314} 315}
315 316
316u64 __weak __init memblock_nid_range(u64 start, u64 end, int *nid) 317phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
317{ 318{
318 *nid = 0; 319 *nid = 0;
319 320
320 return end; 321 return end;
321} 322}
322 323
323static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, 324static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
324 u64 size, u64 align, int nid) 325 phys_addr_t size,
326 phys_addr_t align, int nid)
325{ 327{
326 u64 start, end; 328 phys_addr_t start, end;
327 329
328 start = mp->base; 330 start = mp->base;
329 end = start + mp->size; 331 end = start + mp->size;
330 332
331 start = memblock_align_up(start, align); 333 start = memblock_align_up(start, align);
332 while (start < end) { 334 while (start < end) {
333 u64 this_end; 335 phys_addr_t this_end;
334 int this_nid; 336 int this_nid;
335 337
336 this_end = memblock_nid_range(start, end, &this_nid); 338 this_end = memblock_nid_range(start, end, &this_nid);
337 if (this_nid == nid) { 339 if (this_nid == nid) {
338 u64 ret = memblock_alloc_region(start, this_end, size, align); 340 phys_addr_t ret = memblock_alloc_region(start, this_end, size, align);
339 if (ret != ~(u64)0) 341 if (ret != ~(phys_addr_t)0)
340 return ret; 342 return ret;
341 } 343 }
342 start = this_end; 344 start = this_end;
343 } 345 }
344 346
345 return ~(u64)0; 347 return ~(phys_addr_t)0;
346} 348}
347 349
348u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) 350phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
349{ 351{
350 struct memblock_type *mem = &memblock.memory; 352 struct memblock_type *mem = &memblock.memory;
351 int i; 353 int i;
@@ -359,23 +361,23 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid)
359 size = memblock_align_up(size, align); 361 size = memblock_align_up(size, align);
360 362
361 for (i = 0; i < mem->cnt; i++) { 363 for (i = 0; i < mem->cnt; i++) {
362 u64 ret = memblock_alloc_nid_region(&mem->regions[i], 364 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
363 size, align, nid); 365 size, align, nid);
364 if (ret != ~(u64)0) 366 if (ret != ~(phys_addr_t)0)
365 return ret; 367 return ret;
366 } 368 }
367 369
368 return memblock_alloc(size, align); 370 return memblock_alloc(size, align);
369} 371}
370 372
371u64 __init memblock_alloc(u64 size, u64 align) 373phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
372{ 374{
373 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 375 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
374} 376}
375 377
376u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) 378phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
377{ 379{
378 u64 alloc; 380 phys_addr_t alloc;
379 381
380 alloc = __memblock_alloc_base(size, align, max_addr); 382 alloc = __memblock_alloc_base(size, align, max_addr);
381 383
@@ -386,11 +388,11 @@ u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
386 return alloc; 388 return alloc;
387} 389}
388 390
389u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) 391phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
390{ 392{
391 long i; 393 long i;
392 u64 base = 0; 394 phys_addr_t base = 0;
393 u64 res_base; 395 phys_addr_t res_base;
394 396
395 BUG_ON(0 == size); 397 BUG_ON(0 == size);
396 398
@@ -405,26 +407,26 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
405 * top of memory 407 * top of memory
406 */ 408 */
407 for (i = memblock.memory.cnt - 1; i >= 0; i--) { 409 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
408 u64 memblockbase = memblock.memory.regions[i].base; 410 phys_addr_t memblockbase = memblock.memory.regions[i].base;
409 u64 memblocksize = memblock.memory.regions[i].size; 411 phys_addr_t memblocksize = memblock.memory.regions[i].size;
410 412
411 if (memblocksize < size) 413 if (memblocksize < size)
412 continue; 414 continue;
413 base = min(memblockbase + memblocksize, max_addr); 415 base = min(memblockbase + memblocksize, max_addr);
414 res_base = memblock_alloc_region(memblockbase, base, size, align); 416 res_base = memblock_alloc_region(memblockbase, base, size, align);
415 if (res_base != ~(u64)0) 417 if (res_base != ~(phys_addr_t)0)
416 return res_base; 418 return res_base;
417 } 419 }
418 return 0; 420 return 0;
419} 421}
420 422
421/* You must call memblock_analyze() before this. */ 423/* You must call memblock_analyze() before this. */
422u64 __init memblock_phys_mem_size(void) 424phys_addr_t __init memblock_phys_mem_size(void)
423{ 425{
424 return memblock.memory.size; 426 return memblock.memory.size;
425} 427}
426 428
427u64 memblock_end_of_DRAM(void) 429phys_addr_t memblock_end_of_DRAM(void)
428{ 430{
429 int idx = memblock.memory.cnt - 1; 431 int idx = memblock.memory.cnt - 1;
430 432
@@ -432,10 +434,10 @@ u64 memblock_end_of_DRAM(void)
432} 434}
433 435
434/* You must call memblock_analyze() after this. */ 436/* You must call memblock_analyze() after this. */
435void __init memblock_enforce_memory_limit(u64 memory_limit) 437void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
436{ 438{
437 unsigned long i; 439 unsigned long i;
438 u64 limit; 440 phys_addr_t limit;
439 struct memblock_region *p; 441 struct memblock_region *p;
440 442
441 if (!memory_limit) 443 if (!memory_limit)
@@ -472,7 +474,7 @@ void __init memblock_enforce_memory_limit(u64 memory_limit)
472 } 474 }
473} 475}
474 476
475static int memblock_search(struct memblock_type *type, u64 addr) 477static int memblock_search(struct memblock_type *type, phys_addr_t addr)
476{ 478{
477 unsigned int left = 0, right = type->cnt; 479 unsigned int left = 0, right = type->cnt;
478 480
@@ -490,17 +492,17 @@ static int memblock_search(struct memblock_type *type, u64 addr)
490 return -1; 492 return -1;
491} 493}
492 494
493int __init memblock_is_reserved(u64 addr) 495int __init memblock_is_reserved(phys_addr_t addr)
494{ 496{
495 return memblock_search(&memblock.reserved, addr) != -1; 497 return memblock_search(&memblock.reserved, addr) != -1;
496} 498}
497 499
498int memblock_is_memory(u64 addr) 500int memblock_is_memory(phys_addr_t addr)
499{ 501{
500 return memblock_search(&memblock.memory, addr) != -1; 502 return memblock_search(&memblock.memory, addr) != -1;
501} 503}
502 504
503int memblock_is_region_memory(u64 base, u64 size) 505int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
504{ 506{
505 int idx = memblock_search(&memblock.reserved, base); 507 int idx = memblock_search(&memblock.reserved, base);
506 508
@@ -511,13 +513,13 @@ int memblock_is_region_memory(u64 base, u64 size)
511 memblock.reserved.regions[idx].size) >= (base + size); 513 memblock.reserved.regions[idx].size) >= (base + size);
512} 514}
513 515
514int memblock_is_region_reserved(u64 base, u64 size) 516int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
515{ 517{
516 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 518 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
517} 519}
518 520
519 521
520void __init memblock_set_current_limit(u64 limit) 522void __init memblock_set_current_limit(phys_addr_t limit)
521{ 523{
522 memblock.current_limit = limit; 524 memblock.current_limit = limit;
523} 525}