aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lmb.c
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-02-13 19:58:39 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-13 19:58:39 -0500
commite5f270954364a4add74e8445b1db925ac534fcfb (patch)
tree00c0f7de7c9a6e076d02f6824d5d201fae6d3805 /lib/lmb.c
parent27e6672bb9912d3e3a41cf88e6142d3ae5e534b3 (diff)
[LMB]: Make lmb support large physical addressing
Convert the lmb code to use u64 instead of unsigned long for physical addresses and sizes. This is needed to support large amounts of RAM on 32-bit systems that support 36-bit physical addressing. Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c93
1 files changed, 46 insertions, 47 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index e34a9e586c42..e3c8dcb04b46 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -34,33 +34,34 @@ void lmb_dump_all(void)
34 34
35 DBG("lmb_dump_all:\n"); 35 DBG("lmb_dump_all:\n");
36 DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); 36 DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
37 DBG(" memory.size = 0x%lx\n", lmb.memory.size); 37 DBG(" memory.size = 0x%llx\n",
38 (unsigned long long)lmb.memory.size);
38 for (i=0; i < lmb.memory.cnt ;i++) { 39 for (i=0; i < lmb.memory.cnt ;i++) {
39 DBG(" memory.region[0x%x].base = 0x%lx\n", 40 DBG(" memory.region[0x%x].base = 0x%llx\n",
40 i, lmb.memory.region[i].base); 41 i, (unsigned long long)lmb.memory.region[i].base);
41 DBG(" .size = 0x%lx\n", 42 DBG(" .size = 0x%llx\n",
42 lmb.memory.region[i].size); 43 (unsigned long long)lmb.memory.region[i].size);
43 } 44 }
44 45
45 DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); 46 DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
46 DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); 47 DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
47 for (i=0; i < lmb.reserved.cnt ;i++) { 48 for (i=0; i < lmb.reserved.cnt ;i++) {
48 DBG(" reserved.region[0x%x].base = 0x%lx\n", 49 DBG(" reserved.region[0x%x].base = 0x%llx\n",
49 i, lmb.reserved.region[i].base); 50 i, (unsigned long long)lmb.reserved.region[i].base);
50 DBG(" .size = 0x%lx\n", 51 DBG(" .size = 0x%llx\n",
51 lmb.reserved.region[i].size); 52 (unsigned long long)lmb.reserved.region[i].size);
52 } 53 }
53#endif /* DEBUG */ 54#endif /* DEBUG */
54} 55}
55 56
56static unsigned long __init lmb_addrs_overlap(unsigned long base1, 57static unsigned long __init lmb_addrs_overlap(u64 base1,
57 unsigned long size1, unsigned long base2, unsigned long size2) 58 u64 size1, u64 base2, u64 size2)
58{ 59{
59 return ((base1 < (base2+size2)) && (base2 < (base1+size1))); 60 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
60} 61}
61 62
62static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, 63static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
63 unsigned long base2, unsigned long size2) 64 u64 base2, u64 size2)
64{ 65{
65 if (base2 == base1 + size1) 66 if (base2 == base1 + size1)
66 return 1; 67 return 1;
@@ -73,10 +74,10 @@ static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
73static long __init lmb_regions_adjacent(struct lmb_region *rgn, 74static long __init lmb_regions_adjacent(struct lmb_region *rgn,
74 unsigned long r1, unsigned long r2) 75 unsigned long r1, unsigned long r2)
75{ 76{
76 unsigned long base1 = rgn->region[r1].base; 77 u64 base1 = rgn->region[r1].base;
77 unsigned long size1 = rgn->region[r1].size; 78 u64 size1 = rgn->region[r1].size;
78 unsigned long base2 = rgn->region[r2].base; 79 u64 base2 = rgn->region[r2].base;
79 unsigned long size2 = rgn->region[r2].size; 80 u64 size2 = rgn->region[r2].size;
80 81
81 return lmb_addrs_adjacent(base1, size1, base2, size2); 82 return lmb_addrs_adjacent(base1, size1, base2, size2);
82} 83}
@@ -128,8 +129,7 @@ void __init lmb_analyze(void)
128} 129}
129 130
130/* This routine called with relocation disabled. */ 131/* This routine called with relocation disabled. */
131static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, 132static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
132 unsigned long size)
133{ 133{
134 unsigned long coalesced = 0; 134 unsigned long coalesced = 0;
135 long adjacent, i; 135 long adjacent, i;
@@ -142,8 +142,8 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
142 142
143 /* First try and coalesce this LMB with another. */ 143 /* First try and coalesce this LMB with another. */
144 for (i=0; i < rgn->cnt; i++) { 144 for (i=0; i < rgn->cnt; i++) {
145 unsigned long rgnbase = rgn->region[i].base; 145 u64 rgnbase = rgn->region[i].base;
146 unsigned long rgnsize = rgn->region[i].size; 146 u64 rgnsize = rgn->region[i].size;
147 147
148 if ((rgnbase == base) && (rgnsize == size)) 148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */ 149 /* Already have this region, so we're done */
@@ -190,7 +190,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
190} 190}
191 191
192/* This routine may be called with relocation disabled. */ 192/* This routine may be called with relocation disabled. */
193long __init lmb_add(unsigned long base, unsigned long size) 193long __init lmb_add(u64 base, u64 size)
194{ 194{
195 struct lmb_region *_rgn = &(lmb.memory); 195 struct lmb_region *_rgn = &(lmb.memory);
196 196
@@ -202,7 +202,7 @@ long __init lmb_add(unsigned long base, unsigned long size)
202 202
203} 203}
204 204
205long __init lmb_reserve(unsigned long base, unsigned long size) 205long __init lmb_reserve(u64 base, u64 size)
206{ 206{
207 struct lmb_region *_rgn = &(lmb.reserved); 207 struct lmb_region *_rgn = &(lmb.reserved);
208 208
@@ -211,14 +211,14 @@ long __init lmb_reserve(unsigned long base, unsigned long size)
211 return lmb_add_region(_rgn, base, size); 211 return lmb_add_region(_rgn, base, size);
212} 212}
213 213
214long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, 214long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base,
215 unsigned long size) 215 u64 size)
216{ 216{
217 unsigned long i; 217 unsigned long i;
218 218
219 for (i=0; i < rgn->cnt; i++) { 219 for (i=0; i < rgn->cnt; i++) {
220 unsigned long rgnbase = rgn->region[i].base; 220 u64 rgnbase = rgn->region[i].base;
221 unsigned long rgnsize = rgn->region[i].size; 221 u64 rgnsize = rgn->region[i].size;
222 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { 222 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
223 break; 223 break;
224 } 224 }
@@ -227,40 +227,38 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
227 return (i < rgn->cnt) ? i : -1; 227 return (i < rgn->cnt) ? i : -1;
228} 228}
229 229
230unsigned long __init lmb_alloc(unsigned long size, unsigned long align) 230u64 __init lmb_alloc(u64 size, u64 align)
231{ 231{
232 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); 232 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
233} 233}
234 234
235unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, 235u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
236 unsigned long max_addr)
237{ 236{
238 unsigned long alloc; 237 u64 alloc;
239 238
240 alloc = __lmb_alloc_base(size, align, max_addr); 239 alloc = __lmb_alloc_base(size, align, max_addr);
241 240
242 if (alloc == 0) 241 if (alloc == 0)
243 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", 242 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
244 size, max_addr); 243 (unsigned long long) size, (unsigned long long) max_addr);
245 244
246 return alloc; 245 return alloc;
247} 246}
248 247
249static unsigned long lmb_align_down(unsigned long addr, unsigned long size) 248static u64 lmb_align_down(u64 addr, u64 size)
250{ 249{
251 return addr & ~(size - 1); 250 return addr & ~(size - 1);
252} 251}
253 252
254static unsigned long lmb_align_up(unsigned long addr, unsigned long size) 253static u64 lmb_align_up(u64 addr, u64 size)
255{ 254{
256 return (addr + (size - 1)) & ~(size - 1); 255 return (addr + (size - 1)) & ~(size - 1);
257} 256}
258 257
259unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, 258u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
260 unsigned long max_addr)
261{ 259{
262 long i, j; 260 long i, j;
263 unsigned long base = 0; 261 u64 base = 0;
264 262
265 BUG_ON(0 == size); 263 BUG_ON(0 == size);
266 264
@@ -269,8 +267,8 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
269 max_addr = LMB_REAL_LIMIT; 267 max_addr = LMB_REAL_LIMIT;
270 268
271 for (i = lmb.memory.cnt-1; i >= 0; i--) { 269 for (i = lmb.memory.cnt-1; i >= 0; i--) {
272 unsigned long lmbbase = lmb.memory.region[i].base; 270 u64 lmbbase = lmb.memory.region[i].base;
273 unsigned long lmbsize = lmb.memory.region[i].size; 271 u64 lmbsize = lmb.memory.region[i].size;
274 272
275 if (max_addr == LMB_ALLOC_ANYWHERE) 273 if (max_addr == LMB_ALLOC_ANYWHERE)
276 base = lmb_align_down(lmbbase + lmbsize - size, align); 274 base = lmb_align_down(lmbbase + lmbsize - size, align);
@@ -299,12 +297,12 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
299} 297}
300 298
301/* You must call lmb_analyze() before this. */ 299/* You must call lmb_analyze() before this. */
302unsigned long __init lmb_phys_mem_size(void) 300u64 __init lmb_phys_mem_size(void)
303{ 301{
304 return lmb.memory.size; 302 return lmb.memory.size;
305} 303}
306 304
307unsigned long __init lmb_end_of_DRAM(void) 305u64 __init lmb_end_of_DRAM(void)
308{ 306{
309 int idx = lmb.memory.cnt - 1; 307 int idx = lmb.memory.cnt - 1;
310 308
@@ -312,9 +310,10 @@ unsigned long __init lmb_end_of_DRAM(void)
312} 310}
313 311
314/* You must call lmb_analyze() after this. */ 312/* You must call lmb_analyze() after this. */
315void __init lmb_enforce_memory_limit(unsigned long memory_limit) 313void __init lmb_enforce_memory_limit(u64 memory_limit)
316{ 314{
317 unsigned long i, limit; 315 unsigned long i;
316 u64 limit;
318 struct lmb_property *p; 317 struct lmb_property *p;
319 318
320 if (! memory_limit) 319 if (! memory_limit)
@@ -352,13 +351,13 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
352 } 351 }
353} 352}
354 353
355int __init lmb_is_reserved(unsigned long addr) 354int __init lmb_is_reserved(u64 addr)
356{ 355{
357 int i; 356 int i;
358 357
359 for (i = 0; i < lmb.reserved.cnt; i++) { 358 for (i = 0; i < lmb.reserved.cnt; i++) {
360 unsigned long upper = lmb.reserved.region[i].base + 359 u64 upper = lmb.reserved.region[i].base +
361 lmb.reserved.region[i].size - 1; 360 lmb.reserved.region[i].size - 1;
362 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) 361 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
363 return 1; 362 return 1;
364 } 363 }