aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lmb.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-02-13 19:56:49 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-13 19:56:49 -0500
commitd9b2b2a277219d4812311d995054ce4f95067725 (patch)
tree63af21df6686dd2e867015fdf9f0cb798d3ca348 /lib/lmb.c
parente760e716d47b48caf98da348368fd41b4a9b9e7e (diff)
[LIB]: Make PowerPC LMB code generic so sparc64 can use it too.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c354
1 files changed, 354 insertions, 0 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
new file mode 100644
index 000000000000..98078b4ec20e
--- /dev/null
+++ b/lib/lmb.c
@@ -0,0 +1,354 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#undef DEBUG
19
20#ifdef DEBUG
21#define DBG(fmt...) LMB_DBG(fmt)
22#else
23#define DBG(fmt...)
24#endif
25
26#define LMB_ALLOC_ANYWHERE 0
27
28struct lmb lmb;
29
30void lmb_dump_all(void)
31{
32#ifdef DEBUG
33 unsigned long i;
34
35 DBG("lmb_dump_all:\n");
36 DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
37 DBG(" memory.size = 0x%lx\n", lmb.memory.size);
38 for (i=0; i < lmb.memory.cnt ;i++) {
39 DBG(" memory.region[0x%x].base = 0x%lx\n",
40 i, lmb.memory.region[i].base);
41 DBG(" .size = 0x%lx\n",
42 lmb.memory.region[i].size);
43 }
44
45 DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
46 DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
47 for (i=0; i < lmb.reserved.cnt ;i++) {
48 DBG(" reserved.region[0x%x].base = 0x%lx\n",
49 i, lmb.reserved.region[i].base);
50 DBG(" .size = 0x%lx\n",
51 lmb.reserved.region[i].size);
52 }
53#endif /* DEBUG */
54}
55
56static unsigned long __init lmb_addrs_overlap(unsigned long base1,
57 unsigned long size1, unsigned long base2, unsigned long size2)
58{
59 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
60}
61
62static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
63 unsigned long base2, unsigned long size2)
64{
65 if (base2 == base1 + size1)
66 return 1;
67 else if (base1 == base2 + size2)
68 return -1;
69
70 return 0;
71}
72
73static long __init lmb_regions_adjacent(struct lmb_region *rgn,
74 unsigned long r1, unsigned long r2)
75{
76 unsigned long base1 = rgn->region[r1].base;
77 unsigned long size1 = rgn->region[r1].size;
78 unsigned long base2 = rgn->region[r2].base;
79 unsigned long size2 = rgn->region[r2].size;
80
81 return lmb_addrs_adjacent(base1, size1, base2, size2);
82}
83
84static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
85{
86 unsigned long i;
87
88 for (i = r; i < rgn->cnt - 1; i++) {
89 rgn->region[i].base = rgn->region[i + 1].base;
90 rgn->region[i].size = rgn->region[i + 1].size;
91 }
92 rgn->cnt--;
93}
94
95/* Assumption: base addr of region 1 < base addr of region 2 */
96static void __init lmb_coalesce_regions(struct lmb_region *rgn,
97 unsigned long r1, unsigned long r2)
98{
99 rgn->region[r1].size += rgn->region[r2].size;
100 lmb_remove_region(rgn, r2);
101}
102
103/* This routine called with relocation disabled. */
104void __init lmb_init(void)
105{
106 /* Create a dummy zero size LMB which will get coalesced away later.
107 * This simplifies the lmb_add() code below...
108 */
109 lmb.memory.region[0].base = 0;
110 lmb.memory.region[0].size = 0;
111 lmb.memory.cnt = 1;
112
113 /* Ditto. */
114 lmb.reserved.region[0].base = 0;
115 lmb.reserved.region[0].size = 0;
116 lmb.reserved.cnt = 1;
117}
118
119/* This routine may be called with relocation disabled. */
120void __init lmb_analyze(void)
121{
122 int i;
123
124 lmb.memory.size = 0;
125
126 for (i = 0; i < lmb.memory.cnt; i++)
127 lmb.memory.size += lmb.memory.region[i].size;
128}
129
130/* This routine called with relocation disabled. */
131static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
132 unsigned long size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 /* First try and coalesce this LMB with another. */
138 for (i=0; i < rgn->cnt; i++) {
139 unsigned long rgnbase = rgn->region[i].base;
140 unsigned long rgnsize = rgn->region[i].size;
141
142 if ((rgnbase == base) && (rgnsize == size))
143 /* Already have this region, so we're done */
144 return 0;
145
146 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
147 if ( adjacent > 0 ) {
148 rgn->region[i].base -= size;
149 rgn->region[i].size += size;
150 coalesced++;
151 break;
152 }
153 else if ( adjacent < 0 ) {
154 rgn->region[i].size += size;
155 coalesced++;
156 break;
157 }
158 }
159
160 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
161 lmb_coalesce_regions(rgn, i, i+1);
162 coalesced++;
163 }
164
165 if (coalesced)
166 return coalesced;
167 if (rgn->cnt >= MAX_LMB_REGIONS)
168 return -1;
169
170 /* Couldn't coalesce the LMB, so add it to the sorted table. */
171 for (i = rgn->cnt-1; i >= 0; i--) {
172 if (base < rgn->region[i].base) {
173 rgn->region[i+1].base = rgn->region[i].base;
174 rgn->region[i+1].size = rgn->region[i].size;
175 } else {
176 rgn->region[i+1].base = base;
177 rgn->region[i+1].size = size;
178 break;
179 }
180 }
181 rgn->cnt++;
182
183 return 0;
184}
185
186/* This routine may be called with relocation disabled. */
187long __init lmb_add(unsigned long base, unsigned long size)
188{
189 struct lmb_region *_rgn = &(lmb.memory);
190
191 /* On pSeries LPAR systems, the first LMB is our RMO region. */
192 if (base == 0)
193 lmb.rmo_size = size;
194
195 return lmb_add_region(_rgn, base, size);
196
197}
198
199long __init lmb_reserve(unsigned long base, unsigned long size)
200{
201 struct lmb_region *_rgn = &(lmb.reserved);
202
203 BUG_ON(0 == size);
204
205 return lmb_add_region(_rgn, base, size);
206}
207
208long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
209 unsigned long size)
210{
211 unsigned long i;
212
213 for (i=0; i < rgn->cnt; i++) {
214 unsigned long rgnbase = rgn->region[i].base;
215 unsigned long rgnsize = rgn->region[i].size;
216 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
217 break;
218 }
219 }
220
221 return (i < rgn->cnt) ? i : -1;
222}
223
224unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
225{
226 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
227}
228
229unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
230 unsigned long max_addr)
231{
232 unsigned long alloc;
233
234 alloc = __lmb_alloc_base(size, align, max_addr);
235
236 if (alloc == 0)
237 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
238 size, max_addr);
239
240 return alloc;
241}
242
243static unsigned long lmb_align_down(unsigned long addr, unsigned long size)
244{
245 return addr & ~(size - 1);
246}
247
248unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
249 unsigned long max_addr)
250{
251 long i, j;
252 unsigned long base = 0;
253
254 BUG_ON(0 == size);
255
256 /* On some platforms, make sure we allocate lowmem */
257 if (max_addr == LMB_ALLOC_ANYWHERE)
258 max_addr = LMB_REAL_LIMIT;
259
260 for (i = lmb.memory.cnt-1; i >= 0; i--) {
261 unsigned long lmbbase = lmb.memory.region[i].base;
262 unsigned long lmbsize = lmb.memory.region[i].size;
263
264 if (max_addr == LMB_ALLOC_ANYWHERE)
265 base = lmb_align_down(lmbbase + lmbsize - size, align);
266 else if (lmbbase < max_addr) {
267 base = min(lmbbase + lmbsize, max_addr);
268 base = lmb_align_down(base - size, align);
269 } else
270 continue;
271
272 while ((lmbbase <= base) &&
273 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
274 base = lmb_align_down(lmb.reserved.region[j].base - size,
275 align);
276
277 if ((base != 0) && (lmbbase <= base))
278 break;
279 }
280
281 if (i < 0)
282 return 0;
283
284 lmb_add_region(&lmb.reserved, base, size);
285
286 return base;
287}
288
289/* You must call lmb_analyze() before this. */
290unsigned long __init lmb_phys_mem_size(void)
291{
292 return lmb.memory.size;
293}
294
295unsigned long __init lmb_end_of_DRAM(void)
296{
297 int idx = lmb.memory.cnt - 1;
298
299 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
300}
301
302/* You must call lmb_analyze() after this. */
303void __init lmb_enforce_memory_limit(unsigned long memory_limit)
304{
305 unsigned long i, limit;
306 struct lmb_property *p;
307
308 if (! memory_limit)
309 return;
310
311 /* Truncate the lmb regions to satisfy the memory limit. */
312 limit = memory_limit;
313 for (i = 0; i < lmb.memory.cnt; i++) {
314 if (limit > lmb.memory.region[i].size) {
315 limit -= lmb.memory.region[i].size;
316 continue;
317 }
318
319 lmb.memory.region[i].size = limit;
320 lmb.memory.cnt = i + 1;
321 break;
322 }
323
324 if (lmb.memory.region[0].size < lmb.rmo_size)
325 lmb.rmo_size = lmb.memory.region[0].size;
326
327 /* And truncate any reserves above the limit also. */
328 for (i = 0; i < lmb.reserved.cnt; i++) {
329 p = &lmb.reserved.region[i];
330
331 if (p->base > memory_limit)
332 p->size = 0;
333 else if ((p->base + p->size) > memory_limit)
334 p->size = memory_limit - p->base;
335
336 if (p->size == 0) {
337 lmb_remove_region(&lmb.reserved, i);
338 i--;
339 }
340 }
341}
342
343int __init lmb_is_reserved(unsigned long addr)
344{
345 int i;
346
347 for (i = 0; i < lmb.reserved.cnt; i++) {
348 unsigned long upper = lmb.reserved.region[i].base +
349 lmb.reserved.region[i].size - 1;
350 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
351 return 1;
352 }
353 return 0;
354}