diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-04 00:06:41 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-04 00:21:49 -0400 |
commit | e3239ff92a17976ac5d26fa0fe40ef3a9daf2523 (patch) | |
tree | da3c493196811ccae1b79c3c94234f5d481c8221 /mm/memblock.c | |
parent | f1c2c19c498e27de48bf0dc4221e6e31b1823169 (diff) |
memblock: Rename memblock_region to memblock_type and memblock_property to memblock_region
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 168 |
1 files changed, 83 insertions, 85 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 43840b305ecb..6f407ccf604e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -29,7 +29,7 @@ static int __init early_memblock(char *p) | |||
29 | } | 29 | } |
30 | early_param("memblock", early_memblock); | 30 | early_param("memblock", early_memblock); |
31 | 31 | ||
32 | static void memblock_dump(struct memblock_region *region, char *name) | 32 | static void memblock_dump(struct memblock_type *region, char *name) |
33 | { | 33 | { |
34 | unsigned long long base, size; | 34 | unsigned long long base, size; |
35 | int i; | 35 | int i; |
@@ -37,8 +37,8 @@ static void memblock_dump(struct memblock_region *region, char *name) | |||
37 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | 37 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); |
38 | 38 | ||
39 | for (i = 0; i < region->cnt; i++) { | 39 | for (i = 0; i < region->cnt; i++) { |
40 | base = region->region[i].base; | 40 | base = region->regions[i].base; |
41 | size = region->region[i].size; | 41 | size = region->regions[i].size; |
42 | 42 | ||
43 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | 43 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", |
44 | name, i, base, base + size - 1, size); | 44 | name, i, base, base + size - 1, size); |
@@ -74,34 +74,34 @@ static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | static long memblock_regions_adjacent(struct memblock_region *rgn, | 77 | static long memblock_regions_adjacent(struct memblock_type *type, |
78 | unsigned long r1, unsigned long r2) | 78 | unsigned long r1, unsigned long r2) |
79 | { | 79 | { |
80 | u64 base1 = rgn->region[r1].base; | 80 | u64 base1 = type->regions[r1].base; |
81 | u64 size1 = rgn->region[r1].size; | 81 | u64 size1 = type->regions[r1].size; |
82 | u64 base2 = rgn->region[r2].base; | 82 | u64 base2 = type->regions[r2].base; |
83 | u64 size2 = rgn->region[r2].size; | 83 | u64 size2 = type->regions[r2].size; |
84 | 84 | ||
85 | return memblock_addrs_adjacent(base1, size1, base2, size2); | 85 | return memblock_addrs_adjacent(base1, size1, base2, size2); |
86 | } | 86 | } |
87 | 87 | ||
88 | static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) | 88 | static void memblock_remove_region(struct memblock_type *type, unsigned long r) |
89 | { | 89 | { |
90 | unsigned long i; | 90 | unsigned long i; |
91 | 91 | ||
92 | for (i = r; i < rgn->cnt - 1; i++) { | 92 | for (i = r; i < type->cnt - 1; i++) { |
93 | rgn->region[i].base = rgn->region[i + 1].base; | 93 | type->regions[i].base = type->regions[i + 1].base; |
94 | rgn->region[i].size = rgn->region[i + 1].size; | 94 | type->regions[i].size = type->regions[i + 1].size; |
95 | } | 95 | } |
96 | rgn->cnt--; | 96 | type->cnt--; |
97 | } | 97 | } |
98 | 98 | ||
99 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 99 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
100 | static void memblock_coalesce_regions(struct memblock_region *rgn, | 100 | static void memblock_coalesce_regions(struct memblock_type *type, |
101 | unsigned long r1, unsigned long r2) | 101 | unsigned long r1, unsigned long r2) |
102 | { | 102 | { |
103 | rgn->region[r1].size += rgn->region[r2].size; | 103 | type->regions[r1].size += type->regions[r2].size; |
104 | memblock_remove_region(rgn, r2); | 104 | memblock_remove_region(type, r2); |
105 | } | 105 | } |
106 | 106 | ||
107 | void __init memblock_init(void) | 107 | void __init memblock_init(void) |
@@ -109,13 +109,13 @@ void __init memblock_init(void) | |||
109 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | 109 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. |
110 | * This simplifies the memblock_add() code below... | 110 | * This simplifies the memblock_add() code below... |
111 | */ | 111 | */ |
112 | memblock.memory.region[0].base = 0; | 112 | memblock.memory.regions[0].base = 0; |
113 | memblock.memory.region[0].size = 0; | 113 | memblock.memory.regions[0].size = 0; |
114 | memblock.memory.cnt = 1; | 114 | memblock.memory.cnt = 1; |
115 | 115 | ||
116 | /* Ditto. */ | 116 | /* Ditto. */ |
117 | memblock.reserved.region[0].base = 0; | 117 | memblock.reserved.regions[0].base = 0; |
118 | memblock.reserved.region[0].size = 0; | 118 | memblock.reserved.regions[0].size = 0; |
119 | memblock.reserved.cnt = 1; | 119 | memblock.reserved.cnt = 1; |
120 | } | 120 | } |
121 | 121 | ||
@@ -126,24 +126,24 @@ void __init memblock_analyze(void) | |||
126 | memblock.memory.size = 0; | 126 | memblock.memory.size = 0; |
127 | 127 | ||
128 | for (i = 0; i < memblock.memory.cnt; i++) | 128 | for (i = 0; i < memblock.memory.cnt; i++) |
129 | memblock.memory.size += memblock.memory.region[i].size; | 129 | memblock.memory.size += memblock.memory.regions[i].size; |
130 | } | 130 | } |
131 | 131 | ||
132 | static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) | 132 | static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) |
133 | { | 133 | { |
134 | unsigned long coalesced = 0; | 134 | unsigned long coalesced = 0; |
135 | long adjacent, i; | 135 | long adjacent, i; |
136 | 136 | ||
137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | 137 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { |
138 | rgn->region[0].base = base; | 138 | type->regions[0].base = base; |
139 | rgn->region[0].size = size; | 139 | type->regions[0].size = size; |
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
142 | 142 | ||
143 | /* First try and coalesce this MEMBLOCK with another. */ | 143 | /* First try and coalesce this MEMBLOCK with another. */ |
144 | for (i = 0; i < rgn->cnt; i++) { | 144 | for (i = 0; i < type->cnt; i++) { |
145 | u64 rgnbase = rgn->region[i].base; | 145 | u64 rgnbase = type->regions[i].base; |
146 | u64 rgnsize = rgn->region[i].size; | 146 | u64 rgnsize = type->regions[i].size; |
147 | 147 | ||
148 | if ((rgnbase == base) && (rgnsize == size)) | 148 | if ((rgnbase == base) && (rgnsize == size)) |
149 | /* Already have this region, so we're done */ | 149 | /* Already have this region, so we're done */ |
@@ -151,61 +151,59 @@ static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) | |||
151 | 151 | ||
152 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | 152 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); |
153 | if (adjacent > 0) { | 153 | if (adjacent > 0) { |
154 | rgn->region[i].base -= size; | 154 | type->regions[i].base -= size; |
155 | rgn->region[i].size += size; | 155 | type->regions[i].size += size; |
156 | coalesced++; | 156 | coalesced++; |
157 | break; | 157 | break; |
158 | } else if (adjacent < 0) { | 158 | } else if (adjacent < 0) { |
159 | rgn->region[i].size += size; | 159 | type->regions[i].size += size; |
160 | coalesced++; | 160 | coalesced++; |
161 | break; | 161 | break; |
162 | } | 162 | } |
163 | } | 163 | } |
164 | 164 | ||
165 | if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { | 165 | if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1)) { |
166 | memblock_coalesce_regions(rgn, i, i+1); | 166 | memblock_coalesce_regions(type, i, i+1); |
167 | coalesced++; | 167 | coalesced++; |
168 | } | 168 | } |
169 | 169 | ||
170 | if (coalesced) | 170 | if (coalesced) |
171 | return coalesced; | 171 | return coalesced; |
172 | if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) | 172 | if (type->cnt >= MAX_MEMBLOCK_REGIONS) |
173 | return -1; | 173 | return -1; |
174 | 174 | ||
175 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | 175 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ |
176 | for (i = rgn->cnt - 1; i >= 0; i--) { | 176 | for (i = type->cnt - 1; i >= 0; i--) { |
177 | if (base < rgn->region[i].base) { | 177 | if (base < type->regions[i].base) { |
178 | rgn->region[i+1].base = rgn->region[i].base; | 178 | type->regions[i+1].base = type->regions[i].base; |
179 | rgn->region[i+1].size = rgn->region[i].size; | 179 | type->regions[i+1].size = type->regions[i].size; |
180 | } else { | 180 | } else { |
181 | rgn->region[i+1].base = base; | 181 | type->regions[i+1].base = base; |
182 | rgn->region[i+1].size = size; | 182 | type->regions[i+1].size = size; |
183 | break; | 183 | break; |
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
187 | if (base < rgn->region[0].base) { | 187 | if (base < type->regions[0].base) { |
188 | rgn->region[0].base = base; | 188 | type->regions[0].base = base; |
189 | rgn->region[0].size = size; | 189 | type->regions[0].size = size; |
190 | } | 190 | } |
191 | rgn->cnt++; | 191 | type->cnt++; |
192 | 192 | ||
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | 195 | ||
196 | long memblock_add(u64 base, u64 size) | 196 | long memblock_add(u64 base, u64 size) |
197 | { | 197 | { |
198 | struct memblock_region *_rgn = &memblock.memory; | ||
199 | |||
200 | /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ | 198 | /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ |
201 | if (base == 0) | 199 | if (base == 0) |
202 | memblock.rmo_size = size; | 200 | memblock.rmo_size = size; |
203 | 201 | ||
204 | return memblock_add_region(_rgn, base, size); | 202 | return memblock_add_region(&memblock.memory, base, size); |
205 | 203 | ||
206 | } | 204 | } |
207 | 205 | ||
208 | static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | 206 | static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) |
209 | { | 207 | { |
210 | u64 rgnbegin, rgnend; | 208 | u64 rgnbegin, rgnend; |
211 | u64 end = base + size; | 209 | u64 end = base + size; |
@@ -214,34 +212,34 @@ static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | |||
214 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | 212 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
215 | 213 | ||
216 | /* Find the region where (base, size) belongs to */ | 214 | /* Find the region where (base, size) belongs to */ |
217 | for (i=0; i < rgn->cnt; i++) { | 215 | for (i=0; i < type->cnt; i++) { |
218 | rgnbegin = rgn->region[i].base; | 216 | rgnbegin = type->regions[i].base; |
219 | rgnend = rgnbegin + rgn->region[i].size; | 217 | rgnend = rgnbegin + type->regions[i].size; |
220 | 218 | ||
221 | if ((rgnbegin <= base) && (end <= rgnend)) | 219 | if ((rgnbegin <= base) && (end <= rgnend)) |
222 | break; | 220 | break; |
223 | } | 221 | } |
224 | 222 | ||
225 | /* Didn't find the region */ | 223 | /* Didn't find the region */ |
226 | if (i == rgn->cnt) | 224 | if (i == type->cnt) |
227 | return -1; | 225 | return -1; |
228 | 226 | ||
229 | /* Check to see if we are removing entire region */ | 227 | /* Check to see if we are removing entire region */ |
230 | if ((rgnbegin == base) && (rgnend == end)) { | 228 | if ((rgnbegin == base) && (rgnend == end)) { |
231 | memblock_remove_region(rgn, i); | 229 | memblock_remove_region(type, i); |
232 | return 0; | 230 | return 0; |
233 | } | 231 | } |
234 | 232 | ||
235 | /* Check to see if region is matching at the front */ | 233 | /* Check to see if region is matching at the front */ |
236 | if (rgnbegin == base) { | 234 | if (rgnbegin == base) { |
237 | rgn->region[i].base = end; | 235 | type->regions[i].base = end; |
238 | rgn->region[i].size -= size; | 236 | type->regions[i].size -= size; |
239 | return 0; | 237 | return 0; |
240 | } | 238 | } |
241 | 239 | ||
242 | /* Check to see if the region is matching at the end */ | 240 | /* Check to see if the region is matching at the end */ |
243 | if (rgnend == end) { | 241 | if (rgnend == end) { |
244 | rgn->region[i].size -= size; | 242 | type->regions[i].size -= size; |
245 | return 0; | 243 | return 0; |
246 | } | 244 | } |
247 | 245 | ||
@@ -249,8 +247,8 @@ static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | |||
249 | * We need to split the entry - adjust the current one to the | 247 | * We need to split the entry - adjust the current one to the |
250 | * beginging of the hole and add the region after hole. | 248 | * beginging of the hole and add the region after hole. |
251 | */ | 249 | */ |
252 | rgn->region[i].size = base - rgn->region[i].base; | 250 | type->regions[i].size = base - type->regions[i].base; |
253 | return memblock_add_region(rgn, end, rgnend - end); | 251 | return memblock_add_region(type, end, rgnend - end); |
254 | } | 252 | } |
255 | 253 | ||
256 | long memblock_remove(u64 base, u64 size) | 254 | long memblock_remove(u64 base, u64 size) |
@@ -265,25 +263,25 @@ long __init memblock_free(u64 base, u64 size) | |||
265 | 263 | ||
266 | long __init memblock_reserve(u64 base, u64 size) | 264 | long __init memblock_reserve(u64 base, u64 size) |
267 | { | 265 | { |
268 | struct memblock_region *_rgn = &memblock.reserved; | 266 | struct memblock_type *_rgn = &memblock.reserved; |
269 | 267 | ||
270 | BUG_ON(0 == size); | 268 | BUG_ON(0 == size); |
271 | 269 | ||
272 | return memblock_add_region(_rgn, base, size); | 270 | return memblock_add_region(_rgn, base, size); |
273 | } | 271 | } |
274 | 272 | ||
275 | long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) | 273 | long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) |
276 | { | 274 | { |
277 | unsigned long i; | 275 | unsigned long i; |
278 | 276 | ||
279 | for (i = 0; i < rgn->cnt; i++) { | 277 | for (i = 0; i < type->cnt; i++) { |
280 | u64 rgnbase = rgn->region[i].base; | 278 | u64 rgnbase = type->regions[i].base; |
281 | u64 rgnsize = rgn->region[i].size; | 279 | u64 rgnsize = type->regions[i].size; |
282 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | 280 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) |
283 | break; | 281 | break; |
284 | } | 282 | } |
285 | 283 | ||
286 | return (i < rgn->cnt) ? i : -1; | 284 | return (i < type->cnt) ? i : -1; |
287 | } | 285 | } |
288 | 286 | ||
289 | static u64 memblock_align_down(u64 addr, u64 size) | 287 | static u64 memblock_align_down(u64 addr, u64 size) |
@@ -311,7 +309,7 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, | |||
311 | base = ~(u64)0; | 309 | base = ~(u64)0; |
312 | return base; | 310 | return base; |
313 | } | 311 | } |
314 | res_base = memblock.reserved.region[j].base; | 312 | res_base = memblock.reserved.regions[j].base; |
315 | if (res_base < size) | 313 | if (res_base < size) |
316 | break; | 314 | break; |
317 | base = memblock_align_down(res_base - size, align); | 315 | base = memblock_align_down(res_base - size, align); |
@@ -320,7 +318,7 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, | |||
320 | return ~(u64)0; | 318 | return ~(u64)0; |
321 | } | 319 | } |
322 | 320 | ||
323 | static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, | 321 | static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, |
324 | u64 (*nid_range)(u64, u64, int *), | 322 | u64 (*nid_range)(u64, u64, int *), |
325 | u64 size, u64 align, int nid) | 323 | u64 size, u64 align, int nid) |
326 | { | 324 | { |
@@ -350,7 +348,7 @@ static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, | |||
350 | u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | 348 | u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, |
351 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | 349 | u64 (*nid_range)(u64 start, u64 end, int *nid)) |
352 | { | 350 | { |
353 | struct memblock_region *mem = &memblock.memory; | 351 | struct memblock_type *mem = &memblock.memory; |
354 | int i; | 352 | int i; |
355 | 353 | ||
356 | BUG_ON(0 == size); | 354 | BUG_ON(0 == size); |
@@ -358,7 +356,7 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | |||
358 | size = memblock_align_up(size, align); | 356 | size = memblock_align_up(size, align); |
359 | 357 | ||
360 | for (i = 0; i < mem->cnt; i++) { | 358 | for (i = 0; i < mem->cnt; i++) { |
361 | u64 ret = memblock_alloc_nid_region(&mem->region[i], | 359 | u64 ret = memblock_alloc_nid_region(&mem->regions[i], |
362 | nid_range, | 360 | nid_range, |
363 | size, align, nid); | 361 | size, align, nid); |
364 | if (ret != ~(u64)0) | 362 | if (ret != ~(u64)0) |
@@ -402,8 +400,8 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | |||
402 | max_addr = MEMBLOCK_REAL_LIMIT; | 400 | max_addr = MEMBLOCK_REAL_LIMIT; |
403 | 401 | ||
404 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | 402 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { |
405 | u64 memblockbase = memblock.memory.region[i].base; | 403 | u64 memblockbase = memblock.memory.regions[i].base; |
406 | u64 memblocksize = memblock.memory.region[i].size; | 404 | u64 memblocksize = memblock.memory.regions[i].size; |
407 | 405 | ||
408 | if (memblocksize < size) | 406 | if (memblocksize < size) |
409 | continue; | 407 | continue; |
@@ -423,7 +421,7 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | |||
423 | return 0; | 421 | return 0; |
424 | return base; | 422 | return base; |
425 | } | 423 | } |
426 | res_base = memblock.reserved.region[j].base; | 424 | res_base = memblock.reserved.regions[j].base; |
427 | if (res_base < size) | 425 | if (res_base < size) |
428 | break; | 426 | break; |
429 | base = memblock_align_down(res_base - size, align); | 427 | base = memblock_align_down(res_base - size, align); |
@@ -442,7 +440,7 @@ u64 memblock_end_of_DRAM(void) | |||
442 | { | 440 | { |
443 | int idx = memblock.memory.cnt - 1; | 441 | int idx = memblock.memory.cnt - 1; |
444 | 442 | ||
445 | return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); | 443 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
446 | } | 444 | } |
447 | 445 | ||
448 | /* You must call memblock_analyze() after this. */ | 446 | /* You must call memblock_analyze() after this. */ |
@@ -450,7 +448,7 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
450 | { | 448 | { |
451 | unsigned long i; | 449 | unsigned long i; |
452 | u64 limit; | 450 | u64 limit; |
453 | struct memblock_property *p; | 451 | struct memblock_region *p; |
454 | 452 | ||
455 | if (!memory_limit) | 453 | if (!memory_limit) |
456 | return; | 454 | return; |
@@ -458,24 +456,24 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
458 | /* Truncate the memblock regions to satisfy the memory limit. */ | 456 | /* Truncate the memblock regions to satisfy the memory limit. */ |
459 | limit = memory_limit; | 457 | limit = memory_limit; |
460 | for (i = 0; i < memblock.memory.cnt; i++) { | 458 | for (i = 0; i < memblock.memory.cnt; i++) { |
461 | if (limit > memblock.memory.region[i].size) { | 459 | if (limit > memblock.memory.regions[i].size) { |
462 | limit -= memblock.memory.region[i].size; | 460 | limit -= memblock.memory.regions[i].size; |
463 | continue; | 461 | continue; |
464 | } | 462 | } |
465 | 463 | ||
466 | memblock.memory.region[i].size = limit; | 464 | memblock.memory.regions[i].size = limit; |
467 | memblock.memory.cnt = i + 1; | 465 | memblock.memory.cnt = i + 1; |
468 | break; | 466 | break; |
469 | } | 467 | } |
470 | 468 | ||
471 | if (memblock.memory.region[0].size < memblock.rmo_size) | 469 | if (memblock.memory.regions[0].size < memblock.rmo_size) |
472 | memblock.rmo_size = memblock.memory.region[0].size; | 470 | memblock.rmo_size = memblock.memory.regions[0].size; |
473 | 471 | ||
474 | memory_limit = memblock_end_of_DRAM(); | 472 | memory_limit = memblock_end_of_DRAM(); |
475 | 473 | ||
476 | /* And truncate any reserves above the limit also. */ | 474 | /* And truncate any reserves above the limit also. */ |
477 | for (i = 0; i < memblock.reserved.cnt; i++) { | 475 | for (i = 0; i < memblock.reserved.cnt; i++) { |
478 | p = &memblock.reserved.region[i]; | 476 | p = &memblock.reserved.regions[i]; |
479 | 477 | ||
480 | if (p->base > memory_limit) | 478 | if (p->base > memory_limit) |
481 | p->size = 0; | 479 | p->size = 0; |
@@ -494,9 +492,9 @@ int __init memblock_is_reserved(u64 addr) | |||
494 | int i; | 492 | int i; |
495 | 493 | ||
496 | for (i = 0; i < memblock.reserved.cnt; i++) { | 494 | for (i = 0; i < memblock.reserved.cnt; i++) { |
497 | u64 upper = memblock.reserved.region[i].base + | 495 | u64 upper = memblock.reserved.regions[i].base + |
498 | memblock.reserved.region[i].size - 1; | 496 | memblock.reserved.regions[i].size - 1; |
499 | if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) | 497 | if ((addr >= memblock.reserved.regions[i].base) && (addr <= upper)) |
500 | return 1; | 498 | return 1; |
501 | } | 499 | } |
502 | return 0; | 500 | return 0; |
@@ -511,7 +509,7 @@ int memblock_is_region_reserved(u64 base, u64 size) | |||
511 | * Given a <base, len>, find which memory regions belong to this range. | 509 | * Given a <base, len>, find which memory regions belong to this range. |
512 | * Adjust the request and return a contiguous chunk. | 510 | * Adjust the request and return a contiguous chunk. |
513 | */ | 511 | */ |
514 | int memblock_find(struct memblock_property *res) | 512 | int memblock_find(struct memblock_region *res) |
515 | { | 513 | { |
516 | int i; | 514 | int i; |
517 | u64 rstart, rend; | 515 | u64 rstart, rend; |
@@ -520,8 +518,8 @@ int memblock_find(struct memblock_property *res) | |||
520 | rend = rstart + res->size - 1; | 518 | rend = rstart + res->size - 1; |
521 | 519 | ||
522 | for (i = 0; i < memblock.memory.cnt; i++) { | 520 | for (i = 0; i < memblock.memory.cnt; i++) { |
523 | u64 start = memblock.memory.region[i].base; | 521 | u64 start = memblock.memory.regions[i].base; |
524 | u64 end = start + memblock.memory.region[i].size - 1; | 522 | u64 end = start + memblock.memory.regions[i].size - 1; |
525 | 523 | ||
526 | if (start > rend) | 524 | if (start > rend) |
527 | return -1; | 525 | return -1; |