diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/e820_64.c | 70 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 106 |
2 files changed, 174 insertions, 2 deletions
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 4a0953857cb..4509757844e 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c | |||
@@ -114,6 +114,40 @@ again: | |||
114 | return changed; | 114 | return changed; |
115 | } | 115 | } |
116 | 116 | ||
117 | /* Check for already reserved areas */ | ||
118 | static inline int | ||
119 | bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align) | ||
120 | { | ||
121 | int i; | ||
122 | unsigned long addr = *addrp, last; | ||
123 | unsigned long size = *sizep; | ||
124 | int changed = 0; | ||
125 | again: | ||
126 | last = addr + size; | ||
127 | for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { | ||
128 | struct early_res *r = &early_res[i]; | ||
129 | if (last > r->start && addr < r->start) { | ||
130 | size = r->start - addr; | ||
131 | changed = 1; | ||
132 | goto again; | ||
133 | } | ||
134 | if (last > r->end && addr < r->end) { | ||
135 | addr = round_up(r->end, align); | ||
136 | size = last - addr; | ||
137 | changed = 1; | ||
138 | goto again; | ||
139 | } | ||
140 | if (last <= r->end && addr >= r->start) { | ||
141 | (*sizep)++; | ||
142 | return 0; | ||
143 | } | ||
144 | } | ||
145 | if (changed) { | ||
146 | *addrp = addr; | ||
147 | *sizep = size; | ||
148 | } | ||
149 | return changed; | ||
150 | } | ||
117 | /* | 151 | /* |
118 | * This function checks if any part of the range <start,end> is mapped | 152 | * This function checks if any part of the range <start,end> is mapped |
119 | * with type. | 153 | * with type. |
@@ -190,7 +224,7 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, | |||
190 | ei_last = ei->addr + ei->size; | 224 | ei_last = ei->addr + ei->size; |
191 | if (addr < start) | 225 | if (addr < start) |
192 | addr = round_up(start, align); | 226 | addr = round_up(start, align); |
193 | if (addr > ei_last) | 227 | if (addr >= ei_last) |
194 | continue; | 228 | continue; |
195 | while (bad_addr(&addr, size, align) && addr+size <= ei_last) | 229 | while (bad_addr(&addr, size, align) && addr+size <= ei_last) |
196 | ; | 230 | ; |
@@ -205,6 +239,40 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, | |||
205 | } | 239 | } |
206 | 240 | ||
207 | /* | 241 | /* |
242 | * Find next free range after *start | ||
243 | */ | ||
244 | unsigned long __init find_e820_area_size(unsigned long start, unsigned long *sizep, unsigned long align) | ||
245 | { | ||
246 | int i; | ||
247 | |||
248 | for (i = 0; i < e820.nr_map; i++) { | ||
249 | struct e820entry *ei = &e820.map[i]; | ||
250 | unsigned long addr, last; | ||
251 | unsigned long ei_last; | ||
252 | |||
253 | if (ei->type != E820_RAM) | ||
254 | continue; | ||
255 | addr = round_up(ei->addr, align); | ||
256 | ei_last = ei->addr + ei->size; | ||
257 | // printk(KERN_DEBUG "find_e820_area_size : e820 %d [%llx, %lx]\n", i, ei->addr, ei_last); | ||
258 | if (addr < start) | ||
259 | addr = round_up(start, align); | ||
260 | // printk(KERN_DEBUG "find_e820_area_size : 0 [%lx, %lx]\n", addr, ei_last); | ||
261 | if (addr >= ei_last) | ||
262 | continue; | ||
263 | *sizep = ei_last - addr; | ||
264 | while (bad_addr_size(&addr, sizep, align) && addr+ *sizep <= ei_last) | ||
265 | ; | ||
266 | last = addr + *sizep; | ||
267 | // printk(KERN_DEBUG "find_e820_area_size : 1 [%lx, %lx]\n", addr, last); | ||
268 | if (last > ei_last) | ||
269 | continue; | ||
270 | return addr; | ||
271 | } | ||
272 | return -1UL; | ||
273 | |||
274 | } | ||
275 | /* | ||
208 | * Find the highest page frame number we have available | 276 | * Find the highest page frame number we have available |
209 | */ | 277 | */ |
210 | unsigned long __init e820_end_of_ram(void) | 278 | unsigned long __init e820_end_of_ram(void) |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 255e51feb15..52f54ee4559 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -427,6 +427,106 @@ static void __init init_gbpages(void) | |||
427 | direct_gbpages = 0; | 427 | direct_gbpages = 0; |
428 | } | 428 | } |
429 | 429 | ||
430 | static void __init memtest(unsigned long start_phys, unsigned long size, unsigned pattern) | ||
431 | { | ||
432 | unsigned long i; | ||
433 | unsigned long *start; | ||
434 | unsigned long start_bad; | ||
435 | unsigned long last_bad; | ||
436 | unsigned long val; | ||
437 | unsigned long start_phys_aligned; | ||
438 | unsigned long count; | ||
439 | unsigned long incr; | ||
440 | |||
441 | switch (pattern) { | ||
442 | case 0: | ||
443 | val = 0UL; | ||
444 | break; | ||
445 | case 1: | ||
446 | val = -1UL; | ||
447 | break; | ||
448 | case 2: | ||
449 | val = 0x5555555555555555UL; | ||
450 | break; | ||
451 | case 3: | ||
452 | val = 0xaaaaaaaaaaaaaaaaUL; | ||
453 | break; | ||
454 | default: | ||
455 | return; | ||
456 | } | ||
457 | |||
458 | incr = sizeof(unsigned long); | ||
459 | start_phys_aligned = ALIGN(start_phys, incr); | ||
460 | count = (size - (start_phys_aligned - start_phys))/incr; | ||
461 | start = __va(start_phys_aligned); | ||
462 | start_bad = 0; | ||
463 | last_bad = 0; | ||
464 | |||
465 | for (i = 0; i < count; i++) | ||
466 | start[i] = val; | ||
467 | for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { | ||
468 | if (*start != val) { | ||
469 | if (start_phys_aligned == last_bad + incr) { | ||
470 | last_bad += incr; | ||
471 | } else { | ||
472 | if (start_bad) { | ||
473 | printk(KERN_INFO " %016lxx bad mem addr %016lx - %016lx reserved\n", | ||
474 | val, start_bad, last_bad + incr); | ||
475 | reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); | ||
476 | } | ||
477 | start_bad = last_bad = start_phys_aligned; | ||
478 | } | ||
479 | } | ||
480 | } | ||
481 | if (start_bad) { | ||
482 | printk(KERN_INFO " %016lx bad mem addr %016lx - %016lx reserved\n", | ||
483 | val, start_bad, last_bad + incr); | ||
484 | reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); | ||
485 | } | ||
486 | |||
487 | } | ||
488 | |||
489 | static int __initdata memtest_pattern; | ||
490 | static int __init parse_memtest(char *arg) | ||
491 | { | ||
492 | if (arg) | ||
493 | memtest_pattern = simple_strtoul(arg, NULL, 0) + 1; | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | early_param("memtest", parse_memtest); | ||
498 | |||
499 | static void __init early_memtest(unsigned long start, unsigned long end) | ||
500 | { | ||
501 | unsigned long t_start, t_size; | ||
502 | unsigned pattern; | ||
503 | |||
504 | if (memtest_pattern) | ||
505 | printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); | ||
506 | for (pattern = 0; pattern < memtest_pattern; pattern++) { | ||
507 | t_start = start; | ||
508 | t_size = 0; | ||
509 | while (t_start < end) { | ||
510 | t_start = find_e820_area_size(t_start, &t_size, 1); | ||
511 | |||
512 | /* done ? */ | ||
513 | if (t_start >= end) | ||
514 | break; | ||
515 | if (t_start + t_size > end) | ||
516 | t_size = end - t_start; | ||
517 | |||
518 | printk(KERN_CONT "\n %016lx - %016lx pattern %d", | ||
519 | t_start, t_start + t_size, pattern); | ||
520 | |||
521 | memtest(t_start, t_size, pattern); | ||
522 | |||
523 | t_start += t_size; | ||
524 | } | ||
525 | } | ||
526 | if (memtest_pattern) | ||
527 | printk(KERN_CONT "\n"); | ||
528 | } | ||
529 | |||
430 | /* | 530 | /* |
431 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 531 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
432 | * This runs before bootmem is initialized and gets pages directly from | 532 | * This runs before bootmem is initialized and gets pages directly from |
@@ -435,8 +535,9 @@ static void __init init_gbpages(void) | |||
435 | void __init_refok init_memory_mapping(unsigned long start, unsigned long end) | 535 | void __init_refok init_memory_mapping(unsigned long start, unsigned long end) |
436 | { | 536 | { |
437 | unsigned long next; | 537 | unsigned long next; |
538 | unsigned long start_phys = start, end_phys = end; | ||
438 | 539 | ||
439 | pr_debug("init_memory_mapping\n"); | 540 | printk(KERN_INFO "init_memory_mapping\n"); |
440 | 541 | ||
441 | /* | 542 | /* |
442 | * Find space for the kernel direct mapping tables. | 543 | * Find space for the kernel direct mapping tables. |
@@ -479,6 +580,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) | |||
479 | if (!after_bootmem) | 580 | if (!after_bootmem) |
480 | reserve_early(table_start << PAGE_SHIFT, | 581 | reserve_early(table_start << PAGE_SHIFT, |
481 | table_end << PAGE_SHIFT, "PGTABLE"); | 582 | table_end << PAGE_SHIFT, "PGTABLE"); |
583 | |||
584 | if (!after_bootmem) | ||
585 | early_memtest(start_phys, end_phys); | ||
482 | } | 586 | } |
483 | 587 | ||
484 | #ifndef CONFIG_NUMA | 588 | #ifndef CONFIG_NUMA |