aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/lmb.c428
3 files changed, 433 insertions, 0 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index ba3d104994d9..2d53dc092e8b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -141,4 +141,7 @@ config HAS_DMA
141config CHECK_SIGNATURE 141config CHECK_SIGNATURE
142 bool 142 bool
143 143
144config HAVE_LMB
145 boolean
146
144endmenu 147endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 4d7649c326f6..bf8000fc7d48 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -68,6 +68,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
68 68
69lib-$(CONFIG_GENERIC_BUG) += bug.o 69lib-$(CONFIG_GENERIC_BUG) += bug.o
70 70
71obj-$(CONFIG_HAVE_LMB) += lmb.o
72
71hostprogs-y := gen_crc32table 73hostprogs-y := gen_crc32table
72clean-files := crc32table.h 74clean-files := crc32table.h
73 75
diff --git a/lib/lmb.c b/lib/lmb.c
new file mode 100644
index 000000000000..896e2832099e
--- /dev/null
+++ b/lib/lmb.c
@@ -0,0 +1,428 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22void lmb_dump_all(void)
23{
24#ifdef DEBUG
25 unsigned long i;
26
27 pr_debug("lmb_dump_all:\n");
28 pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
29 pr_debug(" memory.size = 0x%llx\n",
30 (unsigned long long)lmb.memory.size);
31 for (i=0; i < lmb.memory.cnt ;i++) {
32 pr_debug(" memory.region[0x%x].base = 0x%llx\n",
33 i, (unsigned long long)lmb.memory.region[i].base);
34 pr_debug(" .size = 0x%llx\n",
35 (unsigned long long)lmb.memory.region[i].size);
36 }
37
38 pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
39 pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
40 for (i=0; i < lmb.reserved.cnt ;i++) {
41 pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
42 i, (unsigned long long)lmb.reserved.region[i].base);
43 pr_debug(" .size = 0x%llx\n",
44 (unsigned long long)lmb.reserved.region[i].size);
45 }
46#endif /* DEBUG */
47}
48
49static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1,
50 u64 base2, u64 size2)
51{
52 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
53}
54
55static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
56 u64 base2, u64 size2)
57{
58 if (base2 == base1 + size1)
59 return 1;
60 else if (base1 == base2 + size2)
61 return -1;
62
63 return 0;
64}
65
66static long __init lmb_regions_adjacent(struct lmb_region *rgn,
67 unsigned long r1, unsigned long r2)
68{
69 u64 base1 = rgn->region[r1].base;
70 u64 size1 = rgn->region[r1].size;
71 u64 base2 = rgn->region[r2].base;
72 u64 size2 = rgn->region[r2].size;
73
74 return lmb_addrs_adjacent(base1, size1, base2, size2);
75}
76
77static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
78{
79 unsigned long i;
80
81 for (i = r; i < rgn->cnt - 1; i++) {
82 rgn->region[i].base = rgn->region[i + 1].base;
83 rgn->region[i].size = rgn->region[i + 1].size;
84 }
85 rgn->cnt--;
86}
87
88/* Assumption: base addr of region 1 < base addr of region 2 */
89static void __init lmb_coalesce_regions(struct lmb_region *rgn,
90 unsigned long r1, unsigned long r2)
91{
92 rgn->region[r1].size += rgn->region[r2].size;
93 lmb_remove_region(rgn, r2);
94}
95
96void __init lmb_init(void)
97{
98 /* Create a dummy zero size LMB which will get coalesced away later.
99 * This simplifies the lmb_add() code below...
100 */
101 lmb.memory.region[0].base = 0;
102 lmb.memory.region[0].size = 0;
103 lmb.memory.cnt = 1;
104
105 /* Ditto. */
106 lmb.reserved.region[0].base = 0;
107 lmb.reserved.region[0].size = 0;
108 lmb.reserved.cnt = 1;
109}
110
111void __init lmb_analyze(void)
112{
113 int i;
114
115 lmb.memory.size = 0;
116
117 for (i = 0; i < lmb.memory.cnt; i++)
118 lmb.memory.size += lmb.memory.region[i].size;
119}
120
121static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
122{
123 unsigned long coalesced = 0;
124 long adjacent, i;
125
126 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
127 rgn->region[0].base = base;
128 rgn->region[0].size = size;
129 return 0;
130 }
131
132 /* First try and coalesce this LMB with another. */
133 for (i = 0; i < rgn->cnt; i++) {
134 u64 rgnbase = rgn->region[i].base;
135 u64 rgnsize = rgn->region[i].size;
136
137 if ((rgnbase == base) && (rgnsize == size))
138 /* Already have this region, so we're done */
139 return 0;
140
141 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
142 if (adjacent > 0) {
143 rgn->region[i].base -= size;
144 rgn->region[i].size += size;
145 coalesced++;
146 break;
147 } else if (adjacent < 0) {
148 rgn->region[i].size += size;
149 coalesced++;
150 break;
151 }
152 }
153
154 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
155 lmb_coalesce_regions(rgn, i, i+1);
156 coalesced++;
157 }
158
159 if (coalesced)
160 return coalesced;
161 if (rgn->cnt >= MAX_LMB_REGIONS)
162 return -1;
163
164 /* Couldn't coalesce the LMB, so add it to the sorted table. */
165 for (i = rgn->cnt - 1; i >= 0; i--) {
166 if (base < rgn->region[i].base) {
167 rgn->region[i+1].base = rgn->region[i].base;
168 rgn->region[i+1].size = rgn->region[i].size;
169 } else {
170 rgn->region[i+1].base = base;
171 rgn->region[i+1].size = size;
172 break;
173 }
174 }
175
176 if (base < rgn->region[0].base) {
177 rgn->region[0].base = base;
178 rgn->region[0].size = size;
179 }
180 rgn->cnt++;
181
182 return 0;
183}
184
185long __init lmb_add(u64 base, u64 size)
186{
187 struct lmb_region *_rgn = &lmb.memory;
188
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if (base == 0)
191 lmb.rmo_size = size;
192
193 return lmb_add_region(_rgn, base, size);
194
195}
196
197long __init lmb_reserve(u64 base, u64 size)
198{
199 struct lmb_region *_rgn = &lmb.reserved;
200
201 BUG_ON(0 == size);
202
203 return lmb_add_region(_rgn, base, size);
204}
205
206long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
207{
208 unsigned long i;
209
210 for (i = 0; i < rgn->cnt; i++) {
211 u64 rgnbase = rgn->region[i].base;
212 u64 rgnsize = rgn->region[i].size;
213 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
214 break;
215 }
216
217 return (i < rgn->cnt) ? i : -1;
218}
219
220static u64 lmb_align_down(u64 addr, u64 size)
221{
222 return addr & ~(size - 1);
223}
224
225static u64 lmb_align_up(u64 addr, u64 size)
226{
227 return (addr + (size - 1)) & ~(size - 1);
228}
229
230static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
231 u64 size, u64 align)
232{
233 u64 base, res_base;
234 long j;
235
236 base = lmb_align_down((end - size), align);
237 while (start <= base) {
238 j = lmb_overlaps_region(&lmb.reserved, base, size);
239 if (j < 0) {
240 /* this area isn't reserved, take it */
241 if (lmb_add_region(&lmb.reserved, base,
242 lmb_align_up(size, align)) < 0)
243 base = ~(u64)0;
244 return base;
245 }
246 res_base = lmb.reserved.region[j].base;
247 if (res_base < size)
248 break;
249 base = lmb_align_down(res_base - size, align);
250 }
251
252 return ~(u64)0;
253}
254
255static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
256 u64 (*nid_range)(u64, u64, int *),
257 u64 size, u64 align, int nid)
258{
259 u64 start, end;
260
261 start = mp->base;
262 end = start + mp->size;
263
264 start = lmb_align_up(start, align);
265 while (start < end) {
266 u64 this_end;
267 int this_nid;
268
269 this_end = nid_range(start, end, &this_nid);
270 if (this_nid == nid) {
271 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
272 size, align);
273 if (ret != ~(u64)0)
274 return ret;
275 }
276 start = this_end;
277 }
278
279 return ~(u64)0;
280}
281
282u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
283 u64 (*nid_range)(u64 start, u64 end, int *nid))
284{
285 struct lmb_region *mem = &lmb.memory;
286 int i;
287
288 for (i = 0; i < mem->cnt; i++) {
289 u64 ret = lmb_alloc_nid_region(&mem->region[i],
290 nid_range,
291 size, align, nid);
292 if (ret != ~(u64)0)
293 return ret;
294 }
295
296 return lmb_alloc(size, align);
297}
298
299u64 __init lmb_alloc(u64 size, u64 align)
300{
301 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
302}
303
304u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
305{
306 u64 alloc;
307
308 alloc = __lmb_alloc_base(size, align, max_addr);
309
310 if (alloc == 0)
311 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
312 (unsigned long long) size, (unsigned long long) max_addr);
313
314 return alloc;
315}
316
317u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
318{
319 long i, j;
320 u64 base = 0;
321 u64 res_base;
322
323 BUG_ON(0 == size);
324
325 /* On some platforms, make sure we allocate lowmem */
326 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
327 if (max_addr == LMB_ALLOC_ANYWHERE)
328 max_addr = LMB_REAL_LIMIT;
329
330 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
331 u64 lmbbase = lmb.memory.region[i].base;
332 u64 lmbsize = lmb.memory.region[i].size;
333
334 if (lmbsize < size)
335 continue;
336 if (max_addr == LMB_ALLOC_ANYWHERE)
337 base = lmb_align_down(lmbbase + lmbsize - size, align);
338 else if (lmbbase < max_addr) {
339 base = min(lmbbase + lmbsize, max_addr);
340 base = lmb_align_down(base - size, align);
341 } else
342 continue;
343
344 while (base && lmbbase <= base) {
345 j = lmb_overlaps_region(&lmb.reserved, base, size);
346 if (j < 0) {
347 /* this area isn't reserved, take it */
348 if (lmb_add_region(&lmb.reserved, base,
349 size) < 0)
350 return 0;
351 return base;
352 }
353 res_base = lmb.reserved.region[j].base;
354 if (res_base < size)
355 break;
356 base = lmb_align_down(res_base - size, align);
357 }
358 }
359 return 0;
360}
361
362/* You must call lmb_analyze() before this. */
363u64 __init lmb_phys_mem_size(void)
364{
365 return lmb.memory.size;
366}
367
368u64 __init lmb_end_of_DRAM(void)
369{
370 int idx = lmb.memory.cnt - 1;
371
372 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
373}
374
375/* You must call lmb_analyze() after this. */
376void __init lmb_enforce_memory_limit(u64 memory_limit)
377{
378 unsigned long i;
379 u64 limit;
380 struct lmb_property *p;
381
382 if (!memory_limit)
383 return;
384
385 /* Truncate the lmb regions to satisfy the memory limit. */
386 limit = memory_limit;
387 for (i = 0; i < lmb.memory.cnt; i++) {
388 if (limit > lmb.memory.region[i].size) {
389 limit -= lmb.memory.region[i].size;
390 continue;
391 }
392
393 lmb.memory.region[i].size = limit;
394 lmb.memory.cnt = i + 1;
395 break;
396 }
397
398 if (lmb.memory.region[0].size < lmb.rmo_size)
399 lmb.rmo_size = lmb.memory.region[0].size;
400
401 /* And truncate any reserves above the limit also. */
402 for (i = 0; i < lmb.reserved.cnt; i++) {
403 p = &lmb.reserved.region[i];
404
405 if (p->base > memory_limit)
406 p->size = 0;
407 else if ((p->base + p->size) > memory_limit)
408 p->size = memory_limit - p->base;
409
410 if (p->size == 0) {
411 lmb_remove_region(&lmb.reserved, i);
412 i--;
413 }
414 }
415}
416
417int __init lmb_is_reserved(u64 addr)
418{
419 int i;
420
421 for (i = 0; i < lmb.reserved.cnt; i++) {
422 u64 upper = lmb.reserved.region[i].base +
423 lmb.reserved.region[i].size - 1;
424 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
425 return 1;
426 }
427 return 0;
428}