diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/lmb.c | 72 |
1 files changed, 30 insertions, 42 deletions
@@ -15,14 +15,6 @@ | |||
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | #include <linux/lmb.h> | 16 | #include <linux/lmb.h> |
17 | 17 | ||
18 | #undef DEBUG | ||
19 | |||
20 | #ifdef DEBUG | ||
21 | #define DBG(fmt...) LMB_DBG(fmt) | ||
22 | #else | ||
23 | #define DBG(fmt...) | ||
24 | #endif | ||
25 | |||
26 | #define LMB_ALLOC_ANYWHERE 0 | 18 | #define LMB_ALLOC_ANYWHERE 0 |
27 | 19 | ||
28 | struct lmb lmb; | 20 | struct lmb lmb; |
@@ -32,32 +24,32 @@ void lmb_dump_all(void) | |||
32 | #ifdef DEBUG | 24 | #ifdef DEBUG |
33 | unsigned long i; | 25 | unsigned long i; |
34 | 26 | ||
35 | DBG("lmb_dump_all:\n"); | 27 | pr_debug("lmb_dump_all:\n"); |
36 | DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | 28 | pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); |
37 | DBG(" memory.size = 0x%llx\n", | 29 | pr_debug(" memory.size = 0x%llx\n", |
38 | (unsigned long long)lmb.memory.size); | 30 | (unsigned long long)lmb.memory.size); |
39 | for (i=0; i < lmb.memory.cnt ;i++) { | 31 | for (i=0; i < lmb.memory.cnt ;i++) { |
40 | DBG(" memory.region[0x%x].base = 0x%llx\n", | 32 | pr_debug(" memory.region[0x%x].base = 0x%llx\n", |
41 | i, (unsigned long long)lmb.memory.region[i].base); | 33 | i, (unsigned long long)lmb.memory.region[i].base); |
42 | DBG(" .size = 0x%llx\n", | 34 | pr_debug(" .size = 0x%llx\n", |
43 | (unsigned long long)lmb.memory.region[i].size); | 35 | (unsigned long long)lmb.memory.region[i].size); |
44 | } | 36 | } |
45 | 37 | ||
46 | DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | 38 | pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); |
47 | DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); | 39 | pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); |
48 | for (i=0; i < lmb.reserved.cnt ;i++) { | 40 | for (i=0; i < lmb.reserved.cnt ;i++) { |
49 | DBG(" reserved.region[0x%x].base = 0x%llx\n", | 41 | pr_debug(" reserved.region[0x%x].base = 0x%llx\n", |
50 | i, (unsigned long long)lmb.reserved.region[i].base); | 42 | i, (unsigned long long)lmb.reserved.region[i].base); |
51 | DBG(" .size = 0x%llx\n", | 43 | pr_debug(" .size = 0x%llx\n", |
52 | (unsigned long long)lmb.reserved.region[i].size); | 44 | (unsigned long long)lmb.reserved.region[i].size); |
53 | } | 45 | } |
54 | #endif /* DEBUG */ | 46 | #endif /* DEBUG */ |
55 | } | 47 | } |
56 | 48 | ||
57 | static unsigned long __init lmb_addrs_overlap(u64 base1, | 49 | static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1, |
58 | u64 size1, u64 base2, u64 size2) | 50 | u64 base2, u64 size2) |
59 | { | 51 | { |
60 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | 52 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
61 | } | 53 | } |
62 | 54 | ||
63 | static long __init lmb_addrs_adjacent(u64 base1, u64 size1, | 55 | static long __init lmb_addrs_adjacent(u64 base1, u64 size1, |
@@ -101,7 +93,6 @@ static void __init lmb_coalesce_regions(struct lmb_region *rgn, | |||
101 | lmb_remove_region(rgn, r2); | 93 | lmb_remove_region(rgn, r2); |
102 | } | 94 | } |
103 | 95 | ||
104 | /* This routine called with relocation disabled. */ | ||
105 | void __init lmb_init(void) | 96 | void __init lmb_init(void) |
106 | { | 97 | { |
107 | /* Create a dummy zero size LMB which will get coalesced away later. | 98 | /* Create a dummy zero size LMB which will get coalesced away later. |
@@ -117,7 +108,6 @@ void __init lmb_init(void) | |||
117 | lmb.reserved.cnt = 1; | 108 | lmb.reserved.cnt = 1; |
118 | } | 109 | } |
119 | 110 | ||
120 | /* This routine may be called with relocation disabled. */ | ||
121 | void __init lmb_analyze(void) | 111 | void __init lmb_analyze(void) |
122 | { | 112 | { |
123 | int i; | 113 | int i; |
@@ -128,7 +118,6 @@ void __init lmb_analyze(void) | |||
128 | lmb.memory.size += lmb.memory.region[i].size; | 118 | lmb.memory.size += lmb.memory.region[i].size; |
129 | } | 119 | } |
130 | 120 | ||
131 | /* This routine called with relocation disabled. */ | ||
132 | static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | 121 | static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) |
133 | { | 122 | { |
134 | unsigned long coalesced = 0; | 123 | unsigned long coalesced = 0; |
@@ -141,7 +130,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | |||
141 | } | 130 | } |
142 | 131 | ||
143 | /* First try and coalesce this LMB with another. */ | 132 | /* First try and coalesce this LMB with another. */ |
144 | for (i=0; i < rgn->cnt; i++) { | 133 | for (i = 0; i < rgn->cnt; i++) { |
145 | u64 rgnbase = rgn->region[i].base; | 134 | u64 rgnbase = rgn->region[i].base; |
146 | u64 rgnsize = rgn->region[i].size; | 135 | u64 rgnsize = rgn->region[i].size; |
147 | 136 | ||
@@ -149,21 +138,20 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | |||
149 | /* Already have this region, so we're done */ | 138 | /* Already have this region, so we're done */ |
150 | return 0; | 139 | return 0; |
151 | 140 | ||
152 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); | 141 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
153 | if ( adjacent > 0 ) { | 142 | if (adjacent > 0) { |
154 | rgn->region[i].base -= size; | 143 | rgn->region[i].base -= size; |
155 | rgn->region[i].size += size; | 144 | rgn->region[i].size += size; |
156 | coalesced++; | 145 | coalesced++; |
157 | break; | 146 | break; |
158 | } | 147 | } else if (adjacent < 0) { |
159 | else if ( adjacent < 0 ) { | ||
160 | rgn->region[i].size += size; | 148 | rgn->region[i].size += size; |
161 | coalesced++; | 149 | coalesced++; |
162 | break; | 150 | break; |
163 | } | 151 | } |
164 | } | 152 | } |
165 | 153 | ||
166 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { | 154 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { |
167 | lmb_coalesce_regions(rgn, i, i+1); | 155 | lmb_coalesce_regions(rgn, i, i+1); |
168 | coalesced++; | 156 | coalesced++; |
169 | } | 157 | } |
@@ -174,7 +162,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | |||
174 | return -1; | 162 | return -1; |
175 | 163 | ||
176 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | 164 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ |
177 | for (i = rgn->cnt-1; i >= 0; i--) { | 165 | for (i = rgn->cnt - 1; i >= 0; i--) { |
178 | if (base < rgn->region[i].base) { | 166 | if (base < rgn->region[i].base) { |
179 | rgn->region[i+1].base = rgn->region[i].base; | 167 | rgn->region[i+1].base = rgn->region[i].base; |
180 | rgn->region[i+1].size = rgn->region[i].size; | 168 | rgn->region[i+1].size = rgn->region[i].size; |
@@ -194,10 +182,9 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | |||
194 | return 0; | 182 | return 0; |
195 | } | 183 | } |
196 | 184 | ||
197 | /* This routine may be called with relocation disabled. */ | ||
198 | long __init lmb_add(u64 base, u64 size) | 185 | long __init lmb_add(u64 base, u64 size) |
199 | { | 186 | { |
200 | struct lmb_region *_rgn = &(lmb.memory); | 187 | struct lmb_region *_rgn = &lmb.memory; |
201 | 188 | ||
202 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | 189 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ |
203 | if (base == 0) | 190 | if (base == 0) |
@@ -209,24 +196,22 @@ long __init lmb_add(u64 base, u64 size) | |||
209 | 196 | ||
210 | long __init lmb_reserve(u64 base, u64 size) | 197 | long __init lmb_reserve(u64 base, u64 size) |
211 | { | 198 | { |
212 | struct lmb_region *_rgn = &(lmb.reserved); | 199 | struct lmb_region *_rgn = &lmb.reserved; |
213 | 200 | ||
214 | BUG_ON(0 == size); | 201 | BUG_ON(0 == size); |
215 | 202 | ||
216 | return lmb_add_region(_rgn, base, size); | 203 | return lmb_add_region(_rgn, base, size); |
217 | } | 204 | } |
218 | 205 | ||
219 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, | 206 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) |
220 | u64 size) | ||
221 | { | 207 | { |
222 | unsigned long i; | 208 | unsigned long i; |
223 | 209 | ||
224 | for (i=0; i < rgn->cnt; i++) { | 210 | for (i = 0; i < rgn->cnt; i++) { |
225 | u64 rgnbase = rgn->region[i].base; | 211 | u64 rgnbase = rgn->region[i].base; |
226 | u64 rgnsize = rgn->region[i].size; | 212 | u64 rgnsize = rgn->region[i].size; |
227 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | 213 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
228 | break; | 214 | break; |
229 | } | ||
230 | } | 215 | } |
231 | 216 | ||
232 | return (i < rgn->cnt) ? i : -1; | 217 | return (i < rgn->cnt) ? i : -1; |
@@ -337,7 +322,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
337 | if (max_addr == LMB_ALLOC_ANYWHERE) | 322 | if (max_addr == LMB_ALLOC_ANYWHERE) |
338 | max_addr = LMB_REAL_LIMIT; | 323 | max_addr = LMB_REAL_LIMIT; |
339 | 324 | ||
340 | for (i = lmb.memory.cnt-1; i >= 0; i--) { | 325 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { |
341 | u64 lmbbase = lmb.memory.region[i].base; | 326 | u64 lmbbase = lmb.memory.region[i].base; |
342 | u64 lmbsize = lmb.memory.region[i].size; | 327 | u64 lmbsize = lmb.memory.region[i].size; |
343 | 328 | ||
@@ -349,10 +334,13 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
349 | } else | 334 | } else |
350 | continue; | 335 | continue; |
351 | 336 | ||
352 | while ((lmbbase <= base) && | 337 | while (lmbbase <= base) { |
353 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) | 338 | j = lmb_overlaps_region(&lmb.reserved, base, size); |
339 | if (j < 0) | ||
340 | break; | ||
354 | base = lmb_align_down(lmb.reserved.region[j].base - size, | 341 | base = lmb_align_down(lmb.reserved.region[j].base - size, |
355 | align); | 342 | align); |
343 | } | ||
356 | 344 | ||
357 | if ((base != 0) && (lmbbase <= base)) | 345 | if ((base != 0) && (lmbbase <= base)) |
358 | break; | 346 | break; |
@@ -387,7 +375,7 @@ void __init lmb_enforce_memory_limit(u64 memory_limit) | |||
387 | u64 limit; | 375 | u64 limit; |
388 | struct lmb_property *p; | 376 | struct lmb_property *p; |
389 | 377 | ||
390 | if (! memory_limit) | 378 | if (!memory_limit) |
391 | return; | 379 | return; |
392 | 380 | ||
393 | /* Truncate the lmb regions to satisfy the memory limit. */ | 381 | /* Truncate the lmb regions to satisfy the memory limit. */ |