diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:30:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:12 -0500 |
commit | 2f36fa13ce49ffd000249feaedfcbefbcc83a72f (patch) | |
tree | 505162da5b3a9a2b068f46689528b09d3661b1e3 /arch | |
parent | 05fccb0e3840248324a96b320562210410be73dc (diff) |
x86: clean up arch/x86/kernel/e820_64.c
White space and coding style cleanup.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/e820_64.c | 305 |
1 files changed, 177 insertions, 128 deletions
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 04698e0b056c..d41cd2f01733 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Handle the memory map. | 2 | * Handle the memory map. |
3 | * The functions here do the job until bootmem takes over. | 3 | * The functions here do the job until bootmem takes over. |
4 | * | 4 | * |
@@ -29,44 +29,44 @@ | |||
29 | 29 | ||
30 | struct e820map e820; | 30 | struct e820map e820; |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * PFN of last memory page. | 33 | * PFN of last memory page. |
34 | */ | 34 | */ |
35 | unsigned long end_pfn; | 35 | unsigned long end_pfn; |
36 | EXPORT_SYMBOL(end_pfn); | 36 | EXPORT_SYMBOL(end_pfn); |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * end_pfn only includes RAM, while end_pfn_map includes all e820 entries. | 39 | * end_pfn only includes RAM, while end_pfn_map includes all e820 entries. |
40 | * The direct mapping extends to end_pfn_map, so that we can directly access | 40 | * The direct mapping extends to end_pfn_map, so that we can directly access |
41 | * apertures, ACPI and other tables without having to play with fixmaps. | 41 | * apertures, ACPI and other tables without having to play with fixmaps. |
42 | */ | 42 | */ |
43 | unsigned long end_pfn_map; | 43 | unsigned long end_pfn_map; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * Last pfn which the user wants to use. | 46 | * Last pfn which the user wants to use. |
47 | */ | 47 | */ |
48 | static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; | 48 | static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; |
49 | 49 | ||
50 | extern struct resource code_resource, data_resource, bss_resource; | 50 | extern struct resource code_resource, data_resource, bss_resource; |
51 | 51 | ||
52 | /* Check for some hardcoded bad areas that early boot is not allowed to touch */ | 52 | /* Check for some hardcoded bad areas that early boot is not allowed to touch */ |
53 | static inline int bad_addr(unsigned long *addrp, unsigned long size) | 53 | static inline int bad_addr(unsigned long *addrp, unsigned long size) |
54 | { | 54 | { |
55 | unsigned long addr = *addrp, last = addr + size; | 55 | unsigned long addr = *addrp, last = addr + size; |
56 | 56 | ||
57 | /* various gunk below that needed for SMP startup */ | 57 | /* various gunk below that needed for SMP startup */ |
58 | if (addr < 0x8000) { | 58 | if (addr < 0x8000) { |
59 | *addrp = PAGE_ALIGN(0x8000); | 59 | *addrp = PAGE_ALIGN(0x8000); |
60 | return 1; | 60 | return 1; |
61 | } | 61 | } |
62 | 62 | ||
63 | /* direct mapping tables of the kernel */ | 63 | /* direct mapping tables of the kernel */ |
64 | if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { | 64 | if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { |
65 | *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT); | 65 | *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT); |
66 | return 1; | 66 | return 1; |
67 | } | 67 | } |
68 | 68 | ||
69 | /* initrd */ | 69 | /* initrd */ |
70 | #ifdef CONFIG_BLK_DEV_INITRD | 70 | #ifdef CONFIG_BLK_DEV_INITRD |
71 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | 71 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
72 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | 72 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; |
@@ -77,7 +77,7 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) | |||
77 | *addrp = PAGE_ALIGN(ramdisk_end); | 77 | *addrp = PAGE_ALIGN(ramdisk_end); |
78 | return 1; | 78 | return 1; |
79 | } | 79 | } |
80 | } | 80 | } |
81 | #endif | 81 | #endif |
82 | /* kernel code */ | 82 | /* kernel code */ |
83 | if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) { | 83 | if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) { |
@@ -97,9 +97,9 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) | |||
97 | return 1; | 97 | return 1; |
98 | } | 98 | } |
99 | #endif | 99 | #endif |
100 | /* XXX ramdisk image here? */ | 100 | /* XXX ramdisk image here? */ |
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * This function checks if any part of the range <start,end> is mapped | 105 | * This function checks if any part of the range <start,end> is mapped |
@@ -107,16 +107,18 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) | |||
107 | */ | 107 | */ |
108 | int | 108 | int |
109 | e820_any_mapped(unsigned long start, unsigned long end, unsigned type) | 109 | e820_any_mapped(unsigned long start, unsigned long end, unsigned type) |
110 | { | 110 | { |
111 | int i; | 111 | int i; |
112 | for (i = 0; i < e820.nr_map; i++) { | 112 | |
113 | struct e820entry *ei = &e820.map[i]; | 113 | for (i = 0; i < e820.nr_map; i++) { |
114 | if (type && ei->type != type) | 114 | struct e820entry *ei = &e820.map[i]; |
115 | |||
116 | if (type && ei->type != type) | ||
115 | continue; | 117 | continue; |
116 | if (ei->addr >= end || ei->addr + ei->size <= start) | 118 | if (ei->addr >= end || ei->addr + ei->size <= start) |
117 | continue; | 119 | continue; |
118 | return 1; | 120 | return 1; |
119 | } | 121 | } |
120 | return 0; | 122 | return 0; |
121 | } | 123 | } |
122 | EXPORT_SYMBOL_GPL(e820_any_mapped); | 124 | EXPORT_SYMBOL_GPL(e820_any_mapped); |
@@ -127,11 +129,14 @@ EXPORT_SYMBOL_GPL(e820_any_mapped); | |||
127 | * Note: this function only works correct if the e820 table is sorted and | 129 | * Note: this function only works correct if the e820 table is sorted and |
128 | * not-overlapping, which is the case | 130 | * not-overlapping, which is the case |
129 | */ | 131 | */ |
130 | int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type) | 132 | int __init e820_all_mapped(unsigned long start, unsigned long end, |
133 | unsigned type) | ||
131 | { | 134 | { |
132 | int i; | 135 | int i; |
136 | |||
133 | for (i = 0; i < e820.nr_map; i++) { | 137 | for (i = 0; i < e820.nr_map; i++) { |
134 | struct e820entry *ei = &e820.map[i]; | 138 | struct e820entry *ei = &e820.map[i]; |
139 | |||
135 | if (type && ei->type != type) | 140 | if (type && ei->type != type) |
136 | continue; | 141 | continue; |
137 | /* is the region (part) in overlap with the current region ?*/ | 142 | /* is the region (part) in overlap with the current region ?*/ |
@@ -143,59 +148,66 @@ int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type | |||
143 | */ | 148 | */ |
144 | if (ei->addr <= start) | 149 | if (ei->addr <= start) |
145 | start = ei->addr + ei->size; | 150 | start = ei->addr + ei->size; |
146 | /* if start is now at or beyond end, we're done, full coverage */ | 151 | /* |
152 | * if start is now at or beyond end, we're done, full | ||
153 | * coverage | ||
154 | */ | ||
147 | if (start >= end) | 155 | if (start >= end) |
148 | return 1; /* we're done */ | 156 | return 1; |
149 | } | 157 | } |
150 | return 0; | 158 | return 0; |
151 | } | 159 | } |
152 | 160 | ||
153 | /* | 161 | /* |
154 | * Find a free area in a specific range. | 162 | * Find a free area in a specific range. |
155 | */ | 163 | */ |
156 | unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) | 164 | unsigned long __init find_e820_area(unsigned long start, unsigned long end, |
157 | { | 165 | unsigned size) |
158 | int i; | 166 | { |
159 | for (i = 0; i < e820.nr_map; i++) { | 167 | int i; |
160 | struct e820entry *ei = &e820.map[i]; | 168 | |
161 | unsigned long addr = ei->addr, last; | 169 | for (i = 0; i < e820.nr_map; i++) { |
162 | if (ei->type != E820_RAM) | 170 | struct e820entry *ei = &e820.map[i]; |
163 | continue; | 171 | unsigned long addr = ei->addr, last; |
164 | if (addr < start) | 172 | |
173 | if (ei->type != E820_RAM) | ||
174 | continue; | ||
175 | if (addr < start) | ||
165 | addr = start; | 176 | addr = start; |
166 | if (addr > ei->addr + ei->size) | 177 | if (addr > ei->addr + ei->size) |
167 | continue; | 178 | continue; |
168 | while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) | 179 | while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) |
169 | ; | 180 | ; |
170 | last = PAGE_ALIGN(addr) + size; | 181 | last = PAGE_ALIGN(addr) + size; |
171 | if (last > ei->addr + ei->size) | 182 | if (last > ei->addr + ei->size) |
172 | continue; | 183 | continue; |
173 | if (last > end) | 184 | if (last > end) |
174 | continue; | 185 | continue; |
175 | return addr; | 186 | return addr; |
176 | } | 187 | } |
177 | return -1UL; | 188 | return -1UL; |
178 | } | 189 | } |
179 | 190 | ||
180 | /* | 191 | /* |
181 | * Find the highest page frame number we have available | 192 | * Find the highest page frame number we have available |
182 | */ | 193 | */ |
183 | unsigned long __init e820_end_of_ram(void) | 194 | unsigned long __init e820_end_of_ram(void) |
184 | { | 195 | { |
185 | unsigned long end_pfn = 0; | 196 | unsigned long end_pfn; |
197 | |||
186 | end_pfn = find_max_pfn_with_active_regions(); | 198 | end_pfn = find_max_pfn_with_active_regions(); |
187 | 199 | ||
188 | if (end_pfn > end_pfn_map) | 200 | if (end_pfn > end_pfn_map) |
189 | end_pfn_map = end_pfn; | 201 | end_pfn_map = end_pfn; |
190 | if (end_pfn_map > MAXMEM>>PAGE_SHIFT) | 202 | if (end_pfn_map > MAXMEM>>PAGE_SHIFT) |
191 | end_pfn_map = MAXMEM>>PAGE_SHIFT; | 203 | end_pfn_map = MAXMEM>>PAGE_SHIFT; |
192 | if (end_pfn > end_user_pfn) | 204 | if (end_pfn > end_user_pfn) |
193 | end_pfn = end_user_pfn; | 205 | end_pfn = end_user_pfn; |
194 | if (end_pfn > end_pfn_map) | 206 | if (end_pfn > end_pfn_map) |
195 | end_pfn = end_pfn_map; | 207 | end_pfn = end_pfn_map; |
196 | 208 | ||
197 | printk("end_pfn_map = %lu\n", end_pfn_map); | 209 | printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map); |
198 | return end_pfn; | 210 | return end_pfn; |
199 | } | 211 | } |
200 | 212 | ||
201 | /* | 213 | /* |
@@ -219,9 +231,9 @@ void __init e820_reserve_resources(void) | |||
219 | request_resource(&iomem_resource, res); | 231 | request_resource(&iomem_resource, res); |
220 | if (e820.map[i].type == E820_RAM) { | 232 | if (e820.map[i].type == E820_RAM) { |
221 | /* | 233 | /* |
222 | * We don't know which RAM region contains kernel data, | 234 | * We don't know which RAM region contains kernel data, |
223 | * so we try it repeatedly and let the resource manager | 235 | * so we try it repeatedly and let the resource manager |
224 | * test it. | 236 | * test it. |
225 | */ | 237 | */ |
226 | request_resource(res, &code_resource); | 238 | request_resource(res, &code_resource); |
227 | request_resource(res, &data_resource); | 239 | request_resource(res, &data_resource); |
@@ -322,9 +334,9 @@ e820_register_active_regions(int nid, unsigned long start_pfn, | |||
322 | add_active_range(nid, ei_startpfn, ei_endpfn); | 334 | add_active_range(nid, ei_startpfn, ei_endpfn); |
323 | } | 335 | } |
324 | 336 | ||
325 | /* | 337 | /* |
326 | * Add a memory region to the kernel e820 map. | 338 | * Add a memory region to the kernel e820 map. |
327 | */ | 339 | */ |
328 | void __init add_memory_region(unsigned long start, unsigned long size, int type) | 340 | void __init add_memory_region(unsigned long start, unsigned long size, int type) |
329 | { | 341 | { |
330 | int x = e820.nr_map; | 342 | int x = e820.nr_map; |
@@ -349,9 +361,7 @@ unsigned long __init e820_hole_size(unsigned long start, unsigned long end) | |||
349 | { | 361 | { |
350 | unsigned long start_pfn = start >> PAGE_SHIFT; | 362 | unsigned long start_pfn = start >> PAGE_SHIFT; |
351 | unsigned long end_pfn = end >> PAGE_SHIFT; | 363 | unsigned long end_pfn = end >> PAGE_SHIFT; |
352 | unsigned long ei_startpfn; | 364 | unsigned long ei_startpfn, ei_endpfn, ram = 0; |
353 | unsigned long ei_endpfn; | ||
354 | unsigned long ram = 0; | ||
355 | int i; | 365 | int i; |
356 | 366 | ||
357 | for (i = 0; i < e820.nr_map; i++) { | 367 | for (i = 0; i < e820.nr_map; i++) { |
@@ -369,22 +379,25 @@ void __init e820_print_map(char *who) | |||
369 | 379 | ||
370 | for (i = 0; i < e820.nr_map; i++) { | 380 | for (i = 0; i < e820.nr_map; i++) { |
371 | printk(KERN_INFO " %s: %016Lx - %016Lx ", who, | 381 | printk(KERN_INFO " %s: %016Lx - %016Lx ", who, |
372 | (unsigned long long) e820.map[i].addr, | 382 | (unsigned long long) e820.map[i].addr, |
373 | (unsigned long long) (e820.map[i].addr + e820.map[i].size)); | 383 | (unsigned long long) |
384 | (e820.map[i].addr + e820.map[i].size)); | ||
374 | switch (e820.map[i].type) { | 385 | switch (e820.map[i].type) { |
375 | case E820_RAM: printk("(usable)\n"); | 386 | case E820_RAM: |
376 | break; | 387 | printk(KERN_CONT "(usable)\n"); |
388 | break; | ||
377 | case E820_RESERVED: | 389 | case E820_RESERVED: |
378 | printk("(reserved)\n"); | 390 | printk(KERN_CONT "(reserved)\n"); |
379 | break; | 391 | break; |
380 | case E820_ACPI: | 392 | case E820_ACPI: |
381 | printk("(ACPI data)\n"); | 393 | printk(KERN_CONT "(ACPI data)\n"); |
382 | break; | 394 | break; |
383 | case E820_NVS: | 395 | case E820_NVS: |
384 | printk("(ACPI NVS)\n"); | 396 | printk(KERN_CONT "(ACPI NVS)\n"); |
385 | break; | 397 | break; |
386 | default: printk("type %u\n", e820.map[i].type); | 398 | default: |
387 | break; | 399 | printk(KERN_CONT "type %u\n", e820.map[i].type); |
400 | break; | ||
388 | } | 401 | } |
389 | } | 402 | } |
390 | } | 403 | } |
@@ -392,11 +405,11 @@ void __init e820_print_map(char *who) | |||
392 | /* | 405 | /* |
393 | * Sanitize the BIOS e820 map. | 406 | * Sanitize the BIOS e820 map. |
394 | * | 407 | * |
395 | * Some e820 responses include overlapping entries. The following | 408 | * Some e820 responses include overlapping entries. The following |
396 | * replaces the original e820 map with a new one, removing overlaps. | 409 | * replaces the original e820 map with a new one, removing overlaps. |
397 | * | 410 | * |
398 | */ | 411 | */ |
399 | static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | 412 | static int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map) |
400 | { | 413 | { |
401 | struct change_member { | 414 | struct change_member { |
402 | struct e820entry *pbios; /* pointer to original bios entry */ | 415 | struct e820entry *pbios; /* pointer to original bios entry */ |
@@ -416,7 +429,8 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
416 | int i; | 429 | int i; |
417 | 430 | ||
418 | /* | 431 | /* |
419 | Visually we're performing the following (1,2,3,4 = memory types)... | 432 | Visually we're performing the following |
433 | (1,2,3,4 = memory types)... | ||
420 | 434 | ||
421 | Sample memory map (w/overlaps): | 435 | Sample memory map (w/overlaps): |
422 | ____22__________________ | 436 | ____22__________________ |
@@ -458,22 +472,23 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
458 | old_nr = *pnr_map; | 472 | old_nr = *pnr_map; |
459 | 473 | ||
460 | /* bail out if we find any unreasonable addresses in bios map */ | 474 | /* bail out if we find any unreasonable addresses in bios map */ |
461 | for (i=0; i<old_nr; i++) | 475 | for (i = 0; i < old_nr; i++) |
462 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) | 476 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) |
463 | return -1; | 477 | return -1; |
464 | 478 | ||
465 | /* create pointers for initial change-point information (for sorting) */ | 479 | /* create pointers for initial change-point information (for sorting) */ |
466 | for (i=0; i < 2*old_nr; i++) | 480 | for (i = 0; i < 2 * old_nr; i++) |
467 | change_point[i] = &change_point_list[i]; | 481 | change_point[i] = &change_point_list[i]; |
468 | 482 | ||
469 | /* record all known change-points (starting and ending addresses), | 483 | /* record all known change-points (starting and ending addresses), |
470 | omitting those that are for empty memory regions */ | 484 | omitting those that are for empty memory regions */ |
471 | chgidx = 0; | 485 | chgidx = 0; |
472 | for (i=0; i < old_nr; i++) { | 486 | for (i = 0; i < old_nr; i++) { |
473 | if (biosmap[i].size != 0) { | 487 | if (biosmap[i].size != 0) { |
474 | change_point[chgidx]->addr = biosmap[i].addr; | 488 | change_point[chgidx]->addr = biosmap[i].addr; |
475 | change_point[chgidx++]->pbios = &biosmap[i]; | 489 | change_point[chgidx++]->pbios = &biosmap[i]; |
476 | change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; | 490 | change_point[chgidx]->addr = biosmap[i].addr + |
491 | biosmap[i].size; | ||
477 | change_point[chgidx++]->pbios = &biosmap[i]; | 492 | change_point[chgidx++]->pbios = &biosmap[i]; |
478 | } | 493 | } |
479 | } | 494 | } |
@@ -483,75 +498,106 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
483 | still_changing = 1; | 498 | still_changing = 1; |
484 | while (still_changing) { | 499 | while (still_changing) { |
485 | still_changing = 0; | 500 | still_changing = 0; |
486 | for (i=1; i < chg_nr; i++) { | 501 | for (i = 1; i < chg_nr; i++) { |
487 | /* if <current_addr> > <last_addr>, swap */ | 502 | unsigned long long curaddr, lastaddr; |
488 | /* or, if current=<start_addr> & last=<end_addr>, swap */ | 503 | unsigned long long curpbaddr, lastpbaddr; |
489 | if ((change_point[i]->addr < change_point[i-1]->addr) || | 504 | |
490 | ((change_point[i]->addr == change_point[i-1]->addr) && | 505 | curaddr = change_point[i]->addr; |
491 | (change_point[i]->addr == change_point[i]->pbios->addr) && | 506 | lastaddr = change_point[i - 1]->addr; |
492 | (change_point[i-1]->addr != change_point[i-1]->pbios->addr)) | 507 | curpbaddr = change_point[i]->pbios->addr; |
493 | ) | 508 | lastpbaddr = change_point[i - 1]->pbios->addr; |
494 | { | 509 | |
510 | /* | ||
511 | * swap entries, when: | ||
512 | * | ||
513 | * curaddr > lastaddr or | ||
514 | * curaddr == lastaddr and curaddr == curpbaddr and | ||
515 | * lastaddr != lastpbaddr | ||
516 | */ | ||
517 | if (curaddr < lastaddr || | ||
518 | (curaddr == lastaddr && curaddr == curpbaddr && | ||
519 | lastaddr != lastpbaddr)) { | ||
495 | change_tmp = change_point[i]; | 520 | change_tmp = change_point[i]; |
496 | change_point[i] = change_point[i-1]; | 521 | change_point[i] = change_point[i-1]; |
497 | change_point[i-1] = change_tmp; | 522 | change_point[i-1] = change_tmp; |
498 | still_changing=1; | 523 | still_changing = 1; |
499 | } | 524 | } |
500 | } | 525 | } |
501 | } | 526 | } |
502 | 527 | ||
503 | /* create a new bios memory map, removing overlaps */ | 528 | /* create a new bios memory map, removing overlaps */ |
504 | overlap_entries=0; /* number of entries in the overlap table */ | 529 | overlap_entries = 0; /* number of entries in the overlap table */ |
505 | new_bios_entry=0; /* index for creating new bios map entries */ | 530 | new_bios_entry = 0; /* index for creating new bios map entries */ |
506 | last_type = 0; /* start with undefined memory type */ | 531 | last_type = 0; /* start with undefined memory type */ |
507 | last_addr = 0; /* start with 0 as last starting address */ | 532 | last_addr = 0; /* start with 0 as last starting address */ |
533 | |||
508 | /* loop through change-points, determining affect on the new bios map */ | 534 | /* loop through change-points, determining affect on the new bios map */ |
509 | for (chgidx=0; chgidx < chg_nr; chgidx++) | 535 | for (chgidx = 0; chgidx < chg_nr; chgidx++) { |
510 | { | ||
511 | /* keep track of all overlapping bios entries */ | 536 | /* keep track of all overlapping bios entries */ |
512 | if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) | 537 | if (change_point[chgidx]->addr == |
513 | { | 538 | change_point[chgidx]->pbios->addr) { |
514 | /* add map entry to overlap list (> 1 entry implies an overlap) */ | 539 | /* |
515 | overlap_list[overlap_entries++]=change_point[chgidx]->pbios; | 540 | * add map entry to overlap list (> 1 entry |
516 | } | 541 | * implies an overlap) |
517 | else | 542 | */ |
518 | { | 543 | overlap_list[overlap_entries++] = |
519 | /* remove entry from list (order independent, so swap with last) */ | 544 | change_point[chgidx]->pbios; |
520 | for (i=0; i<overlap_entries; i++) | 545 | } else { |
521 | { | 546 | /* |
522 | if (overlap_list[i] == change_point[chgidx]->pbios) | 547 | * remove entry from list (order independent, |
523 | overlap_list[i] = overlap_list[overlap_entries-1]; | 548 | * so swap with last) |
549 | */ | ||
550 | for (i = 0; i < overlap_entries; i++) { | ||
551 | if (overlap_list[i] == | ||
552 | change_point[chgidx]->pbios) | ||
553 | overlap_list[i] = | ||
554 | overlap_list[overlap_entries-1]; | ||
524 | } | 555 | } |
525 | overlap_entries--; | 556 | overlap_entries--; |
526 | } | 557 | } |
527 | /* if there are overlapping entries, decide which "type" to use */ | 558 | /* |
528 | /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ | 559 | * if there are overlapping entries, decide which |
560 | * "type" to use (larger value takes precedence -- | ||
561 | * 1=usable, 2,3,4,4+=unusable) | ||
562 | */ | ||
529 | current_type = 0; | 563 | current_type = 0; |
530 | for (i=0; i<overlap_entries; i++) | 564 | for (i = 0; i < overlap_entries; i++) |
531 | if (overlap_list[i]->type > current_type) | 565 | if (overlap_list[i]->type > current_type) |
532 | current_type = overlap_list[i]->type; | 566 | current_type = overlap_list[i]->type; |
533 | /* continue building up new bios map based on this information */ | 567 | /* |
568 | * continue building up new bios map based on this | ||
569 | * information | ||
570 | */ | ||
534 | if (current_type != last_type) { | 571 | if (current_type != last_type) { |
535 | if (last_type != 0) { | 572 | if (last_type != 0) { |
536 | new_bios[new_bios_entry].size = | 573 | new_bios[new_bios_entry].size = |
537 | change_point[chgidx]->addr - last_addr; | 574 | change_point[chgidx]->addr - last_addr; |
538 | /* move forward only if the new size was non-zero */ | 575 | /* |
576 | * move forward only if the new size | ||
577 | * was non-zero | ||
578 | */ | ||
539 | if (new_bios[new_bios_entry].size != 0) | 579 | if (new_bios[new_bios_entry].size != 0) |
580 | /* | ||
581 | * no more space left for new | ||
582 | * bios entries ? | ||
583 | */ | ||
540 | if (++new_bios_entry >= E820MAX) | 584 | if (++new_bios_entry >= E820MAX) |
541 | break; /* no more space left for new bios entries */ | 585 | break; |
542 | } | 586 | } |
543 | if (current_type != 0) { | 587 | if (current_type != 0) { |
544 | new_bios[new_bios_entry].addr = change_point[chgidx]->addr; | 588 | new_bios[new_bios_entry].addr = |
589 | change_point[chgidx]->addr; | ||
545 | new_bios[new_bios_entry].type = current_type; | 590 | new_bios[new_bios_entry].type = current_type; |
546 | last_addr=change_point[chgidx]->addr; | 591 | last_addr = change_point[chgidx]->addr; |
547 | } | 592 | } |
548 | last_type = current_type; | 593 | last_type = current_type; |
549 | } | 594 | } |
550 | } | 595 | } |
551 | new_nr = new_bios_entry; /* retain count for new bios entries */ | 596 | /* retain count for new bios entries */ |
597 | new_nr = new_bios_entry; | ||
552 | 598 | ||
553 | /* copy new bios mapping into original location */ | 599 | /* copy new bios mapping into original location */ |
554 | memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry)); | 600 | memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); |
555 | *pnr_map = new_nr; | 601 | *pnr_map = new_nr; |
556 | 602 | ||
557 | return 0; | 603 | return 0; |
@@ -566,7 +612,7 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
566 | * will have given us a memory map that we can use to properly | 612 | * will have given us a memory map that we can use to properly |
567 | * set up memory. If we aren't, we'll fake a memory map. | 613 | * set up memory. If we aren't, we'll fake a memory map. |
568 | */ | 614 | */ |
569 | static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) | 615 | static int __init copy_e820_map(struct e820entry *biosmap, int nr_map) |
570 | { | 616 | { |
571 | /* Only one memory region (or negative)? Ignore it */ | 617 | /* Only one memory region (or negative)? Ignore it */ |
572 | if (nr_map < 2) | 618 | if (nr_map < 2) |
@@ -583,7 +629,7 @@ static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) | |||
583 | return -1; | 629 | return -1; |
584 | 630 | ||
585 | add_memory_region(start, size, type); | 631 | add_memory_region(start, size, type); |
586 | } while (biosmap++,--nr_map); | 632 | } while (biosmap++, --nr_map); |
587 | return 0; | 633 | return 0; |
588 | } | 634 | } |
589 | 635 | ||
@@ -613,9 +659,9 @@ static int __init parse_memopt(char *p) | |||
613 | if (!p) | 659 | if (!p) |
614 | return -EINVAL; | 660 | return -EINVAL; |
615 | end_user_pfn = memparse(p, &p); | 661 | end_user_pfn = memparse(p, &p); |
616 | end_user_pfn >>= PAGE_SHIFT; | 662 | end_user_pfn >>= PAGE_SHIFT; |
617 | return 0; | 663 | return 0; |
618 | } | 664 | } |
619 | early_param("mem", parse_memopt); | 665 | early_param("mem", parse_memopt); |
620 | 666 | ||
621 | static int userdef __initdata; | 667 | static int userdef __initdata; |
@@ -627,9 +673,9 @@ static int __init parse_memmap_opt(char *p) | |||
627 | 673 | ||
628 | if (!strcmp(p, "exactmap")) { | 674 | if (!strcmp(p, "exactmap")) { |
629 | #ifdef CONFIG_CRASH_DUMP | 675 | #ifdef CONFIG_CRASH_DUMP |
630 | /* If we are doing a crash dump, we | 676 | /* |
631 | * still need to know the real mem | 677 | * If we are doing a crash dump, we still need to know |
632 | * size before original memory map is | 678 | * the real mem size before original memory map is |
633 | * reset. | 679 | * reset. |
634 | */ | 680 | */ |
635 | e820_register_active_regions(0, 0, -1UL); | 681 | e820_register_active_regions(0, 0, -1UL); |
@@ -713,8 +759,10 @@ __init void e820_setup_gap(void) | |||
713 | 759 | ||
714 | if (!found) { | 760 | if (!found) { |
715 | gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024; | 761 | gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024; |
716 | printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n" | 762 | printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit " |
717 | KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n"); | 763 | "address range\n" |
764 | KERN_ERR "PCI: Unassigned devices with 32bit resource " | ||
765 | "registers may break!\n"); | ||
718 | } | 766 | } |
719 | 767 | ||
720 | /* | 768 | /* |
@@ -727,8 +775,9 @@ __init void e820_setup_gap(void) | |||
727 | /* Fun with two's complement */ | 775 | /* Fun with two's complement */ |
728 | pci_mem_start = (gapstart + round) & -round; | 776 | pci_mem_start = (gapstart + round) & -round; |
729 | 777 | ||
730 | printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", | 778 | printk(KERN_INFO |
731 | pci_mem_start, gapstart, gapsize); | 779 | "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", |
780 | pci_mem_start, gapstart, gapsize); | ||
732 | } | 781 | } |
733 | 782 | ||
734 | int __init arch_get_ram_range(int slot, u64 *addr, u64 *size) | 783 | int __init arch_get_ram_range(int slot, u64 *addr, u64 *size) |