diff options
Diffstat (limited to 'lib/lmb.c')
-rw-r--r-- | lib/lmb.c | 370 |
1 files changed, 370 insertions, 0 deletions
diff --git a/lib/lmb.c b/lib/lmb.c new file mode 100644 index 000000000000..3c43b95fef4a --- /dev/null +++ b/lib/lmb.c | |||
@@ -0,0 +1,370 @@ | |||
1 | /* | ||
2 | * Procedures for maintaining information about logical memory blocks. | ||
3 | * | ||
4 | * Peter Bergner, IBM Corp. June 2001. | ||
5 | * Copyright (C) 2001 Peter Bergner. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/lmb.h> | ||
17 | |||
18 | #undef DEBUG | ||
19 | |||
20 | #ifdef DEBUG | ||
21 | #define DBG(fmt...) LMB_DBG(fmt) | ||
22 | #else | ||
23 | #define DBG(fmt...) | ||
24 | #endif | ||
25 | |||
26 | #define LMB_ALLOC_ANYWHERE 0 | ||
27 | |||
28 | struct lmb lmb; | ||
29 | |||
30 | void lmb_dump_all(void) | ||
31 | { | ||
32 | #ifdef DEBUG | ||
33 | unsigned long i; | ||
34 | |||
35 | DBG("lmb_dump_all:\n"); | ||
36 | DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | ||
37 | DBG(" memory.size = 0x%llx\n", | ||
38 | (unsigned long long)lmb.memory.size); | ||
39 | for (i=0; i < lmb.memory.cnt ;i++) { | ||
40 | DBG(" memory.region[0x%x].base = 0x%llx\n", | ||
41 | i, (unsigned long long)lmb.memory.region[i].base); | ||
42 | DBG(" .size = 0x%llx\n", | ||
43 | (unsigned long long)lmb.memory.region[i].size); | ||
44 | } | ||
45 | |||
46 | DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | ||
47 | DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); | ||
48 | for (i=0; i < lmb.reserved.cnt ;i++) { | ||
49 | DBG(" reserved.region[0x%x].base = 0x%llx\n", | ||
50 | i, (unsigned long long)lmb.reserved.region[i].base); | ||
51 | DBG(" .size = 0x%llx\n", | ||
52 | (unsigned long long)lmb.reserved.region[i].size); | ||
53 | } | ||
54 | #endif /* DEBUG */ | ||
55 | } | ||
56 | |||
57 | static unsigned long __init lmb_addrs_overlap(u64 base1, | ||
58 | u64 size1, u64 base2, u64 size2) | ||
59 | { | ||
60 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | ||
61 | } | ||
62 | |||
63 | static long __init lmb_addrs_adjacent(u64 base1, u64 size1, | ||
64 | u64 base2, u64 size2) | ||
65 | { | ||
66 | if (base2 == base1 + size1) | ||
67 | return 1; | ||
68 | else if (base1 == base2 + size2) | ||
69 | return -1; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | ||
75 | unsigned long r1, unsigned long r2) | ||
76 | { | ||
77 | u64 base1 = rgn->region[r1].base; | ||
78 | u64 size1 = rgn->region[r1].size; | ||
79 | u64 base2 = rgn->region[r2].base; | ||
80 | u64 size2 = rgn->region[r2].size; | ||
81 | |||
82 | return lmb_addrs_adjacent(base1, size1, base2, size2); | ||
83 | } | ||
84 | |||
85 | static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) | ||
86 | { | ||
87 | unsigned long i; | ||
88 | |||
89 | for (i = r; i < rgn->cnt - 1; i++) { | ||
90 | rgn->region[i].base = rgn->region[i + 1].base; | ||
91 | rgn->region[i].size = rgn->region[i + 1].size; | ||
92 | } | ||
93 | rgn->cnt--; | ||
94 | } | ||
95 | |||
96 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
97 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
98 | unsigned long r1, unsigned long r2) | ||
99 | { | ||
100 | rgn->region[r1].size += rgn->region[r2].size; | ||
101 | lmb_remove_region(rgn, r2); | ||
102 | } | ||
103 | |||
104 | /* This routine called with relocation disabled. */ | ||
105 | void __init lmb_init(void) | ||
106 | { | ||
107 | /* Create a dummy zero size LMB which will get coalesced away later. | ||
108 | * This simplifies the lmb_add() code below... | ||
109 | */ | ||
110 | lmb.memory.region[0].base = 0; | ||
111 | lmb.memory.region[0].size = 0; | ||
112 | lmb.memory.cnt = 1; | ||
113 | |||
114 | /* Ditto. */ | ||
115 | lmb.reserved.region[0].base = 0; | ||
116 | lmb.reserved.region[0].size = 0; | ||
117 | lmb.reserved.cnt = 1; | ||
118 | } | ||
119 | |||
120 | /* This routine may be called with relocation disabled. */ | ||
121 | void __init lmb_analyze(void) | ||
122 | { | ||
123 | int i; | ||
124 | |||
125 | lmb.memory.size = 0; | ||
126 | |||
127 | for (i = 0; i < lmb.memory.cnt; i++) | ||
128 | lmb.memory.size += lmb.memory.region[i].size; | ||
129 | } | ||
130 | |||
131 | /* This routine called with relocation disabled. */ | ||
132 | static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | ||
133 | { | ||
134 | unsigned long coalesced = 0; | ||
135 | long adjacent, i; | ||
136 | |||
137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | ||
138 | rgn->region[0].base = base; | ||
139 | rgn->region[0].size = size; | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | /* First try and coalesce this LMB with another. */ | ||
144 | for (i=0; i < rgn->cnt; i++) { | ||
145 | u64 rgnbase = rgn->region[i].base; | ||
146 | u64 rgnsize = rgn->region[i].size; | ||
147 | |||
148 | if ((rgnbase == base) && (rgnsize == size)) | ||
149 | /* Already have this region, so we're done */ | ||
150 | return 0; | ||
151 | |||
152 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); | ||
153 | if ( adjacent > 0 ) { | ||
154 | rgn->region[i].base -= size; | ||
155 | rgn->region[i].size += size; | ||
156 | coalesced++; | ||
157 | break; | ||
158 | } | ||
159 | else if ( adjacent < 0 ) { | ||
160 | rgn->region[i].size += size; | ||
161 | coalesced++; | ||
162 | break; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { | ||
167 | lmb_coalesce_regions(rgn, i, i+1); | ||
168 | coalesced++; | ||
169 | } | ||
170 | |||
171 | if (coalesced) | ||
172 | return coalesced; | ||
173 | if (rgn->cnt >= MAX_LMB_REGIONS) | ||
174 | return -1; | ||
175 | |||
176 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | ||
177 | for (i = rgn->cnt-1; i >= 0; i--) { | ||
178 | if (base < rgn->region[i].base) { | ||
179 | rgn->region[i+1].base = rgn->region[i].base; | ||
180 | rgn->region[i+1].size = rgn->region[i].size; | ||
181 | } else { | ||
182 | rgn->region[i+1].base = base; | ||
183 | rgn->region[i+1].size = size; | ||
184 | break; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | if (base < rgn->region[0].base) { | ||
189 | rgn->region[0].base = base; | ||
190 | rgn->region[0].size = size; | ||
191 | } | ||
192 | rgn->cnt++; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | /* This routine may be called with relocation disabled. */ | ||
198 | long __init lmb_add(u64 base, u64 size) | ||
199 | { | ||
200 | struct lmb_region *_rgn = &(lmb.memory); | ||
201 | |||
202 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | ||
203 | if (base == 0) | ||
204 | lmb.rmo_size = size; | ||
205 | |||
206 | return lmb_add_region(_rgn, base, size); | ||
207 | |||
208 | } | ||
209 | |||
210 | long __init lmb_reserve(u64 base, u64 size) | ||
211 | { | ||
212 | struct lmb_region *_rgn = &(lmb.reserved); | ||
213 | |||
214 | BUG_ON(0 == size); | ||
215 | |||
216 | return lmb_add_region(_rgn, base, size); | ||
217 | } | ||
218 | |||
219 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, | ||
220 | u64 size) | ||
221 | { | ||
222 | unsigned long i; | ||
223 | |||
224 | for (i=0; i < rgn->cnt; i++) { | ||
225 | u64 rgnbase = rgn->region[i].base; | ||
226 | u64 rgnsize = rgn->region[i].size; | ||
227 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | ||
228 | break; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | return (i < rgn->cnt) ? i : -1; | ||
233 | } | ||
234 | |||
235 | u64 __init lmb_alloc(u64 size, u64 align) | ||
236 | { | ||
237 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | ||
238 | } | ||
239 | |||
240 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
241 | { | ||
242 | u64 alloc; | ||
243 | |||
244 | alloc = __lmb_alloc_base(size, align, max_addr); | ||
245 | |||
246 | if (alloc == 0) | ||
247 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
248 | (unsigned long long) size, (unsigned long long) max_addr); | ||
249 | |||
250 | return alloc; | ||
251 | } | ||
252 | |||
253 | static u64 lmb_align_down(u64 addr, u64 size) | ||
254 | { | ||
255 | return addr & ~(size - 1); | ||
256 | } | ||
257 | |||
258 | static u64 lmb_align_up(u64 addr, u64 size) | ||
259 | { | ||
260 | return (addr + (size - 1)) & ~(size - 1); | ||
261 | } | ||
262 | |||
263 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
264 | { | ||
265 | long i, j; | ||
266 | u64 base = 0; | ||
267 | |||
268 | BUG_ON(0 == size); | ||
269 | |||
270 | /* On some platforms, make sure we allocate lowmem */ | ||
271 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
272 | max_addr = LMB_REAL_LIMIT; | ||
273 | |||
274 | for (i = lmb.memory.cnt-1; i >= 0; i--) { | ||
275 | u64 lmbbase = lmb.memory.region[i].base; | ||
276 | u64 lmbsize = lmb.memory.region[i].size; | ||
277 | |||
278 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
279 | base = lmb_align_down(lmbbase + lmbsize - size, align); | ||
280 | else if (lmbbase < max_addr) { | ||
281 | base = min(lmbbase + lmbsize, max_addr); | ||
282 | base = lmb_align_down(base - size, align); | ||
283 | } else | ||
284 | continue; | ||
285 | |||
286 | while ((lmbbase <= base) && | ||
287 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) | ||
288 | base = lmb_align_down(lmb.reserved.region[j].base - size, | ||
289 | align); | ||
290 | |||
291 | if ((base != 0) && (lmbbase <= base)) | ||
292 | break; | ||
293 | } | ||
294 | |||
295 | if (i < 0) | ||
296 | return 0; | ||
297 | |||
298 | if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0) | ||
299 | return 0; | ||
300 | |||
301 | return base; | ||
302 | } | ||
303 | |||
304 | /* You must call lmb_analyze() before this. */ | ||
305 | u64 __init lmb_phys_mem_size(void) | ||
306 | { | ||
307 | return lmb.memory.size; | ||
308 | } | ||
309 | |||
310 | u64 __init lmb_end_of_DRAM(void) | ||
311 | { | ||
312 | int idx = lmb.memory.cnt - 1; | ||
313 | |||
314 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | ||
315 | } | ||
316 | |||
317 | /* You must call lmb_analyze() after this. */ | ||
318 | void __init lmb_enforce_memory_limit(u64 memory_limit) | ||
319 | { | ||
320 | unsigned long i; | ||
321 | u64 limit; | ||
322 | struct lmb_property *p; | ||
323 | |||
324 | if (! memory_limit) | ||
325 | return; | ||
326 | |||
327 | /* Truncate the lmb regions to satisfy the memory limit. */ | ||
328 | limit = memory_limit; | ||
329 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
330 | if (limit > lmb.memory.region[i].size) { | ||
331 | limit -= lmb.memory.region[i].size; | ||
332 | continue; | ||
333 | } | ||
334 | |||
335 | lmb.memory.region[i].size = limit; | ||
336 | lmb.memory.cnt = i + 1; | ||
337 | break; | ||
338 | } | ||
339 | |||
340 | if (lmb.memory.region[0].size < lmb.rmo_size) | ||
341 | lmb.rmo_size = lmb.memory.region[0].size; | ||
342 | |||
343 | /* And truncate any reserves above the limit also. */ | ||
344 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
345 | p = &lmb.reserved.region[i]; | ||
346 | |||
347 | if (p->base > memory_limit) | ||
348 | p->size = 0; | ||
349 | else if ((p->base + p->size) > memory_limit) | ||
350 | p->size = memory_limit - p->base; | ||
351 | |||
352 | if (p->size == 0) { | ||
353 | lmb_remove_region(&lmb.reserved, i); | ||
354 | i--; | ||
355 | } | ||
356 | } | ||
357 | } | ||
358 | |||
359 | int __init lmb_is_reserved(u64 addr) | ||
360 | { | ||
361 | int i; | ||
362 | |||
363 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
364 | u64 upper = lmb.reserved.region[i].base + | ||
365 | lmb.reserved.region[i].size - 1; | ||
366 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | ||
367 | return 1; | ||
368 | } | ||
369 | return 0; | ||
370 | } | ||