diff options
-rw-r--r-- | include/linux/lmb.h | 38 | ||||
-rw-r--r-- | lib/lmb.c | 93 |
2 files changed, 65 insertions, 66 deletions
diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 8b93f63407e9..632717c6a2ba 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h | |||
@@ -19,19 +19,19 @@ | |||
19 | #define MAX_LMB_REGIONS 128 | 19 | #define MAX_LMB_REGIONS 128 |
20 | 20 | ||
21 | struct lmb_property { | 21 | struct lmb_property { |
22 | unsigned long base; | 22 | u64 base; |
23 | unsigned long size; | 23 | u64 size; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | struct lmb_region { | 26 | struct lmb_region { |
27 | unsigned long cnt; | 27 | unsigned long cnt; |
28 | unsigned long size; | 28 | u64 size; |
29 | struct lmb_property region[MAX_LMB_REGIONS+1]; | 29 | struct lmb_property region[MAX_LMB_REGIONS+1]; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | struct lmb { | 32 | struct lmb { |
33 | unsigned long debug; | 33 | unsigned long debug; |
34 | unsigned long rmo_size; | 34 | u64 rmo_size; |
35 | struct lmb_region memory; | 35 | struct lmb_region memory; |
36 | struct lmb_region reserved; | 36 | struct lmb_region reserved; |
37 | }; | 37 | }; |
@@ -40,36 +40,36 @@ extern struct lmb lmb; | |||
40 | 40 | ||
41 | extern void __init lmb_init(void); | 41 | extern void __init lmb_init(void); |
42 | extern void __init lmb_analyze(void); | 42 | extern void __init lmb_analyze(void); |
43 | extern long __init lmb_add(unsigned long base, unsigned long size); | 43 | extern long __init lmb_add(u64 base, u64 size); |
44 | extern long __init lmb_reserve(unsigned long base, unsigned long size); | 44 | extern long __init lmb_reserve(u64 base, u64 size); |
45 | extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align); | 45 | extern u64 __init lmb_alloc(u64 size, u64 align); |
46 | extern unsigned long __init lmb_alloc_base(unsigned long size, | 46 | extern u64 __init lmb_alloc_base(u64 size, |
47 | unsigned long align, unsigned long max_addr); | 47 | u64, u64 max_addr); |
48 | extern unsigned long __init __lmb_alloc_base(unsigned long size, | 48 | extern u64 __init __lmb_alloc_base(u64 size, |
49 | unsigned long align, unsigned long max_addr); | 49 | u64 align, u64 max_addr); |
50 | extern unsigned long __init lmb_phys_mem_size(void); | 50 | extern u64 __init lmb_phys_mem_size(void); |
51 | extern unsigned long __init lmb_end_of_DRAM(void); | 51 | extern u64 __init lmb_end_of_DRAM(void); |
52 | extern void __init lmb_enforce_memory_limit(unsigned long memory_limit); | 52 | extern void __init lmb_enforce_memory_limit(u64 memory_limit); |
53 | extern int __init lmb_is_reserved(unsigned long addr); | 53 | extern int __init lmb_is_reserved(u64 addr); |
54 | 54 | ||
55 | extern void lmb_dump_all(void); | 55 | extern void lmb_dump_all(void); |
56 | 56 | ||
57 | static inline unsigned long | 57 | static inline u64 |
58 | lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) | 58 | lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) |
59 | { | 59 | { |
60 | return type->region[region_nr].size; | 60 | return type->region[region_nr].size; |
61 | } | 61 | } |
62 | static inline unsigned long | 62 | static inline u64 |
63 | lmb_size_pages(struct lmb_region *type, unsigned long region_nr) | 63 | lmb_size_pages(struct lmb_region *type, unsigned long region_nr) |
64 | { | 64 | { |
65 | return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; | 65 | return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; |
66 | } | 66 | } |
67 | static inline unsigned long | 67 | static inline u64 |
68 | lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) | 68 | lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) |
69 | { | 69 | { |
70 | return type->region[region_nr].base >> PAGE_SHIFT; | 70 | return type->region[region_nr].base >> PAGE_SHIFT; |
71 | } | 71 | } |
72 | static inline unsigned long | 72 | static inline u64 |
73 | lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) | 73 | lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) |
74 | { | 74 | { |
75 | return lmb_start_pfn(type, region_nr) + | 75 | return lmb_start_pfn(type, region_nr) + |
@@ -34,33 +34,34 @@ void lmb_dump_all(void) | |||
34 | 34 | ||
35 | DBG("lmb_dump_all:\n"); | 35 | DBG("lmb_dump_all:\n"); |
36 | DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | 36 | DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); |
37 | DBG(" memory.size = 0x%lx\n", lmb.memory.size); | 37 | DBG(" memory.size = 0x%llx\n", |
38 | (unsigned long long)lmb.memory.size); | ||
38 | for (i=0; i < lmb.memory.cnt ;i++) { | 39 | for (i=0; i < lmb.memory.cnt ;i++) { |
39 | DBG(" memory.region[0x%x].base = 0x%lx\n", | 40 | DBG(" memory.region[0x%x].base = 0x%llx\n", |
40 | i, lmb.memory.region[i].base); | 41 | i, (unsigned long long)lmb.memory.region[i].base); |
41 | DBG(" .size = 0x%lx\n", | 42 | DBG(" .size = 0x%llx\n", |
42 | lmb.memory.region[i].size); | 43 | (unsigned long long)lmb.memory.region[i].size); |
43 | } | 44 | } |
44 | 45 | ||
45 | DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | 46 | DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); |
46 | DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); | 47 | DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); |
47 | for (i=0; i < lmb.reserved.cnt ;i++) { | 48 | for (i=0; i < lmb.reserved.cnt ;i++) { |
48 | DBG(" reserved.region[0x%x].base = 0x%lx\n", | 49 | DBG(" reserved.region[0x%x].base = 0x%llx\n", |
49 | i, lmb.reserved.region[i].base); | 50 | i, (unsigned long long)lmb.reserved.region[i].base); |
50 | DBG(" .size = 0x%lx\n", | 51 | DBG(" .size = 0x%llx\n", |
51 | lmb.reserved.region[i].size); | 52 | (unsigned long long)lmb.reserved.region[i].size); |
52 | } | 53 | } |
53 | #endif /* DEBUG */ | 54 | #endif /* DEBUG */ |
54 | } | 55 | } |
55 | 56 | ||
56 | static unsigned long __init lmb_addrs_overlap(unsigned long base1, | 57 | static unsigned long __init lmb_addrs_overlap(u64 base1, |
57 | unsigned long size1, unsigned long base2, unsigned long size2) | 58 | u64 size1, u64 base2, u64 size2) |
58 | { | 59 | { |
59 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | 60 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); |
60 | } | 61 | } |
61 | 62 | ||
62 | static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, | 63 | static long __init lmb_addrs_adjacent(u64 base1, u64 size1, |
63 | unsigned long base2, unsigned long size2) | 64 | u64 base2, u64 size2) |
64 | { | 65 | { |
65 | if (base2 == base1 + size1) | 66 | if (base2 == base1 + size1) |
66 | return 1; | 67 | return 1; |
@@ -73,10 +74,10 @@ static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, | |||
73 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | 74 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, |
74 | unsigned long r1, unsigned long r2) | 75 | unsigned long r1, unsigned long r2) |
75 | { | 76 | { |
76 | unsigned long base1 = rgn->region[r1].base; | 77 | u64 base1 = rgn->region[r1].base; |
77 | unsigned long size1 = rgn->region[r1].size; | 78 | u64 size1 = rgn->region[r1].size; |
78 | unsigned long base2 = rgn->region[r2].base; | 79 | u64 base2 = rgn->region[r2].base; |
79 | unsigned long size2 = rgn->region[r2].size; | 80 | u64 size2 = rgn->region[r2].size; |
80 | 81 | ||
81 | return lmb_addrs_adjacent(base1, size1, base2, size2); | 82 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
82 | } | 83 | } |
@@ -128,8 +129,7 @@ void __init lmb_analyze(void) | |||
128 | } | 129 | } |
129 | 130 | ||
130 | /* This routine called with relocation disabled. */ | 131 | /* This routine called with relocation disabled. */ |
131 | static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, | 132 | static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) |
132 | unsigned long size) | ||
133 | { | 133 | { |
134 | unsigned long coalesced = 0; | 134 | unsigned long coalesced = 0; |
135 | long adjacent, i; | 135 | long adjacent, i; |
@@ -142,8 +142,8 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, | |||
142 | 142 | ||
143 | /* First try and coalesce this LMB with another. */ | 143 | /* First try and coalesce this LMB with another. */ |
144 | for (i=0; i < rgn->cnt; i++) { | 144 | for (i=0; i < rgn->cnt; i++) { |
145 | unsigned long rgnbase = rgn->region[i].base; | 145 | u64 rgnbase = rgn->region[i].base; |
146 | unsigned long rgnsize = rgn->region[i].size; | 146 | u64 rgnsize = rgn->region[i].size; |
147 | 147 | ||
148 | if ((rgnbase == base) && (rgnsize == size)) | 148 | if ((rgnbase == base) && (rgnsize == size)) |
149 | /* Already have this region, so we're done */ | 149 | /* Already have this region, so we're done */ |
@@ -190,7 +190,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, | |||
190 | } | 190 | } |
191 | 191 | ||
192 | /* This routine may be called with relocation disabled. */ | 192 | /* This routine may be called with relocation disabled. */ |
193 | long __init lmb_add(unsigned long base, unsigned long size) | 193 | long __init lmb_add(u64 base, u64 size) |
194 | { | 194 | { |
195 | struct lmb_region *_rgn = &(lmb.memory); | 195 | struct lmb_region *_rgn = &(lmb.memory); |
196 | 196 | ||
@@ -202,7 +202,7 @@ long __init lmb_add(unsigned long base, unsigned long size) | |||
202 | 202 | ||
203 | } | 203 | } |
204 | 204 | ||
205 | long __init lmb_reserve(unsigned long base, unsigned long size) | 205 | long __init lmb_reserve(u64 base, u64 size) |
206 | { | 206 | { |
207 | struct lmb_region *_rgn = &(lmb.reserved); | 207 | struct lmb_region *_rgn = &(lmb.reserved); |
208 | 208 | ||
@@ -211,14 +211,14 @@ long __init lmb_reserve(unsigned long base, unsigned long size) | |||
211 | return lmb_add_region(_rgn, base, size); | 211 | return lmb_add_region(_rgn, base, size); |
212 | } | 212 | } |
213 | 213 | ||
214 | long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, | 214 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, |
215 | unsigned long size) | 215 | u64 size) |
216 | { | 216 | { |
217 | unsigned long i; | 217 | unsigned long i; |
218 | 218 | ||
219 | for (i=0; i < rgn->cnt; i++) { | 219 | for (i=0; i < rgn->cnt; i++) { |
220 | unsigned long rgnbase = rgn->region[i].base; | 220 | u64 rgnbase = rgn->region[i].base; |
221 | unsigned long rgnsize = rgn->region[i].size; | 221 | u64 rgnsize = rgn->region[i].size; |
222 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | 222 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { |
223 | break; | 223 | break; |
224 | } | 224 | } |
@@ -227,40 +227,38 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, | |||
227 | return (i < rgn->cnt) ? i : -1; | 227 | return (i < rgn->cnt) ? i : -1; |
228 | } | 228 | } |
229 | 229 | ||
230 | unsigned long __init lmb_alloc(unsigned long size, unsigned long align) | 230 | u64 __init lmb_alloc(u64 size, u64 align) |
231 | { | 231 | { |
232 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | 232 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); |
233 | } | 233 | } |
234 | 234 | ||
235 | unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, | 235 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
236 | unsigned long max_addr) | ||
237 | { | 236 | { |
238 | unsigned long alloc; | 237 | u64 alloc; |
239 | 238 | ||
240 | alloc = __lmb_alloc_base(size, align, max_addr); | 239 | alloc = __lmb_alloc_base(size, align, max_addr); |
241 | 240 | ||
242 | if (alloc == 0) | 241 | if (alloc == 0) |
243 | panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", | 242 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", |
244 | size, max_addr); | 243 | (unsigned long long) size, (unsigned long long) max_addr); |
245 | 244 | ||
246 | return alloc; | 245 | return alloc; |
247 | } | 246 | } |
248 | 247 | ||
249 | static unsigned long lmb_align_down(unsigned long addr, unsigned long size) | 248 | static u64 lmb_align_down(u64 addr, u64 size) |
250 | { | 249 | { |
251 | return addr & ~(size - 1); | 250 | return addr & ~(size - 1); |
252 | } | 251 | } |
253 | 252 | ||
254 | static unsigned long lmb_align_up(unsigned long addr, unsigned long size) | 253 | static u64 lmb_align_up(u64 addr, u64 size) |
255 | { | 254 | { |
256 | return (addr + (size - 1)) & ~(size - 1); | 255 | return (addr + (size - 1)) & ~(size - 1); |
257 | } | 256 | } |
258 | 257 | ||
259 | unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, | 258 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
260 | unsigned long max_addr) | ||
261 | { | 259 | { |
262 | long i, j; | 260 | long i, j; |
263 | unsigned long base = 0; | 261 | u64 base = 0; |
264 | 262 | ||
265 | BUG_ON(0 == size); | 263 | BUG_ON(0 == size); |
266 | 264 | ||
@@ -269,8 +267,8 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, | |||
269 | max_addr = LMB_REAL_LIMIT; | 267 | max_addr = LMB_REAL_LIMIT; |
270 | 268 | ||
271 | for (i = lmb.memory.cnt-1; i >= 0; i--) { | 269 | for (i = lmb.memory.cnt-1; i >= 0; i--) { |
272 | unsigned long lmbbase = lmb.memory.region[i].base; | 270 | u64 lmbbase = lmb.memory.region[i].base; |
273 | unsigned long lmbsize = lmb.memory.region[i].size; | 271 | u64 lmbsize = lmb.memory.region[i].size; |
274 | 272 | ||
275 | if (max_addr == LMB_ALLOC_ANYWHERE) | 273 | if (max_addr == LMB_ALLOC_ANYWHERE) |
276 | base = lmb_align_down(lmbbase + lmbsize - size, align); | 274 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
@@ -299,12 +297,12 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, | |||
299 | } | 297 | } |
300 | 298 | ||
301 | /* You must call lmb_analyze() before this. */ | 299 | /* You must call lmb_analyze() before this. */ |
302 | unsigned long __init lmb_phys_mem_size(void) | 300 | u64 __init lmb_phys_mem_size(void) |
303 | { | 301 | { |
304 | return lmb.memory.size; | 302 | return lmb.memory.size; |
305 | } | 303 | } |
306 | 304 | ||
307 | unsigned long __init lmb_end_of_DRAM(void) | 305 | u64 __init lmb_end_of_DRAM(void) |
308 | { | 306 | { |
309 | int idx = lmb.memory.cnt - 1; | 307 | int idx = lmb.memory.cnt - 1; |
310 | 308 | ||
@@ -312,9 +310,10 @@ unsigned long __init lmb_end_of_DRAM(void) | |||
312 | } | 310 | } |
313 | 311 | ||
314 | /* You must call lmb_analyze() after this. */ | 312 | /* You must call lmb_analyze() after this. */ |
315 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) | 313 | void __init lmb_enforce_memory_limit(u64 memory_limit) |
316 | { | 314 | { |
317 | unsigned long i, limit; | 315 | unsigned long i; |
316 | u64 limit; | ||
318 | struct lmb_property *p; | 317 | struct lmb_property *p; |
319 | 318 | ||
320 | if (! memory_limit) | 319 | if (! memory_limit) |
@@ -352,13 +351,13 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) | |||
352 | } | 351 | } |
353 | } | 352 | } |
354 | 353 | ||
355 | int __init lmb_is_reserved(unsigned long addr) | 354 | int __init lmb_is_reserved(u64 addr) |
356 | { | 355 | { |
357 | int i; | 356 | int i; |
358 | 357 | ||
359 | for (i = 0; i < lmb.reserved.cnt; i++) { | 358 | for (i = 0; i < lmb.reserved.cnt; i++) { |
360 | unsigned long upper = lmb.reserved.region[i].base + | 359 | u64 upper = lmb.reserved.region[i].base + |
361 | lmb.reserved.region[i].size - 1; | 360 | lmb.reserved.region[i].size - 1; |
362 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | 361 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) |
363 | return 1; | 362 | return 1; |
364 | } | 363 | } |