diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-06-16 19:11:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 04:38:19 -0400 |
commit | cc9f7a0ccf000d4db5fbdc7b0ae48eefea102f69 (patch) | |
tree | 582125558bf4975446ae76f35b297bf4ce864bc1 /arch | |
parent | 41c094fd3ca54f1a71233049cf136ff94c91f4ae (diff) |
x86: kill bad_ppro
so don't punish all other cpus without that problem when init highmem
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/setup_32.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/discontig_32.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 43 |
3 files changed, 23 insertions, 33 deletions
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index f3ddba5ed9a7..9692aeb8ecae 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c | |||
@@ -68,6 +68,7 @@ | |||
68 | #include <asm/cacheflush.h> | 68 | #include <asm/cacheflush.h> |
69 | #include <asm/processor.h> | 69 | #include <asm/processor.h> |
70 | #include <asm/efi.h> | 70 | #include <asm/efi.h> |
71 | #include <asm/bugs.h> | ||
71 | 72 | ||
72 | /* This value is set up by the early boot code to point to the value | 73 | /* This value is set up by the early boot code to point to the value |
73 | immediately after the boot time page tables. It contains a *physical* | 74 | immediately after the boot time page tables. It contains a *physical* |
@@ -764,6 +765,14 @@ void __init setup_arch(char **cmdline_p) | |||
764 | if (efi_enabled) | 765 | if (efi_enabled) |
765 | efi_init(); | 766 | efi_init(); |
766 | 767 | ||
768 | if (ppro_with_ram_bug()) { | ||
769 | e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM, | ||
770 | E820_RESERVED); | ||
771 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | ||
772 | printk(KERN_INFO "fixed physical RAM map:\n"); | ||
773 | e820_print_map("bad_ppro"); | ||
774 | } | ||
775 | |||
767 | e820_register_active_regions(0, 0, -1UL); | 776 | e820_register_active_regions(0, 0, -1UL); |
768 | /* | 777 | /* |
769 | * partially used pages are not usable - thus | 778 | * partially used pages are not usable - thus |
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 7c4d0255f8d8..6216e43b6e95 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -427,7 +427,7 @@ void __init zone_sizes_init(void) | |||
427 | return; | 427 | return; |
428 | } | 428 | } |
429 | 429 | ||
430 | void __init set_highmem_pages_init(int bad_ppro) | 430 | void __init set_highmem_pages_init(void) |
431 | { | 431 | { |
432 | #ifdef CONFIG_HIGHMEM | 432 | #ifdef CONFIG_HIGHMEM |
433 | struct zone *zone; | 433 | struct zone *zone; |
@@ -447,7 +447,7 @@ void __init set_highmem_pages_init(int bad_ppro) | |||
447 | zone->name, nid, zone_start_pfn, zone_end_pfn); | 447 | zone->name, nid, zone_start_pfn, zone_end_pfn); |
448 | 448 | ||
449 | add_highpages_with_active_regions(nid, zone_start_pfn, | 449 | add_highpages_with_active_regions(nid, zone_start_pfn, |
450 | zone_end_pfn, bad_ppro); | 450 | zone_end_pfn); |
451 | } | 451 | } |
452 | totalram_pages += totalhigh_pages; | 452 | totalram_pages += totalhigh_pages; |
453 | #endif | 453 | #endif |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ba07a489230e..fb5694d788bf 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -220,13 +220,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
223 | static inline int page_kills_ppro(unsigned long pagenr) | ||
224 | { | ||
225 | if (pagenr >= 0x70000 && pagenr <= 0x7003F) | ||
226 | return 1; | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /* | 223 | /* |
231 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | 224 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address |
232 | * is valid. The argument is a physical page number. | 225 | * is valid. The argument is a physical page number. |
@@ -287,22 +280,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) | |||
287 | pkmap_page_table = pte; | 280 | pkmap_page_table = pte; |
288 | } | 281 | } |
289 | 282 | ||
290 | static void __init | 283 | static void __init add_one_highpage_init(struct page *page, int pfn) |
291 | add_one_highpage_init(struct page *page, int pfn, int bad_ppro) | ||
292 | { | 284 | { |
293 | if (!(bad_ppro && page_kills_ppro(pfn))) { | 285 | ClearPageReserved(page); |
294 | ClearPageReserved(page); | 286 | init_page_count(page); |
295 | init_page_count(page); | 287 | __free_page(page); |
296 | __free_page(page); | 288 | totalhigh_pages++; |
297 | totalhigh_pages++; | ||
298 | } else | ||
299 | SetPageReserved(page); | ||
300 | } | 289 | } |
301 | 290 | ||
302 | struct add_highpages_data { | 291 | struct add_highpages_data { |
303 | unsigned long start_pfn; | 292 | unsigned long start_pfn; |
304 | unsigned long end_pfn; | 293 | unsigned long end_pfn; |
305 | int bad_ppro; | ||
306 | }; | 294 | }; |
307 | 295 | ||
308 | static void __init add_highpages_work_fn(unsigned long start_pfn, | 296 | static void __init add_highpages_work_fn(unsigned long start_pfn, |
@@ -312,10 +300,8 @@ static void __init add_highpages_work_fn(unsigned long start_pfn, | |||
312 | struct page *page; | 300 | struct page *page; |
313 | unsigned long final_start_pfn, final_end_pfn; | 301 | unsigned long final_start_pfn, final_end_pfn; |
314 | struct add_highpages_data *data; | 302 | struct add_highpages_data *data; |
315 | int bad_ppro; | ||
316 | 303 | ||
317 | data = (struct add_highpages_data *)datax; | 304 | data = (struct add_highpages_data *)datax; |
318 | bad_ppro = data->bad_ppro; | ||
319 | 305 | ||
320 | final_start_pfn = max(start_pfn, data->start_pfn); | 306 | final_start_pfn = max(start_pfn, data->start_pfn); |
321 | final_end_pfn = min(end_pfn, data->end_pfn); | 307 | final_end_pfn = min(end_pfn, data->end_pfn); |
@@ -327,29 +313,26 @@ static void __init add_highpages_work_fn(unsigned long start_pfn, | |||
327 | if (!pfn_valid(node_pfn)) | 313 | if (!pfn_valid(node_pfn)) |
328 | continue; | 314 | continue; |
329 | page = pfn_to_page(node_pfn); | 315 | page = pfn_to_page(node_pfn); |
330 | add_one_highpage_init(page, node_pfn, bad_ppro); | 316 | add_one_highpage_init(page, node_pfn); |
331 | } | 317 | } |
332 | 318 | ||
333 | } | 319 | } |
334 | 320 | ||
335 | void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, | 321 | void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, |
336 | unsigned long end_pfn, | 322 | unsigned long end_pfn) |
337 | int bad_ppro) | ||
338 | { | 323 | { |
339 | struct add_highpages_data data; | 324 | struct add_highpages_data data; |
340 | 325 | ||
341 | data.start_pfn = start_pfn; | 326 | data.start_pfn = start_pfn; |
342 | data.end_pfn = end_pfn; | 327 | data.end_pfn = end_pfn; |
343 | data.bad_ppro = bad_ppro; | ||
344 | 328 | ||
345 | work_with_active_regions(nid, add_highpages_work_fn, &data); | 329 | work_with_active_regions(nid, add_highpages_work_fn, &data); |
346 | } | 330 | } |
347 | 331 | ||
348 | #ifndef CONFIG_NUMA | 332 | #ifndef CONFIG_NUMA |
349 | static void __init set_highmem_pages_init(int bad_ppro) | 333 | static void __init set_highmem_pages_init(void) |
350 | { | 334 | { |
351 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn, | 335 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); |
352 | bad_ppro); | ||
353 | 336 | ||
354 | totalram_pages += totalhigh_pages; | 337 | totalram_pages += totalhigh_pages; |
355 | } | 338 | } |
@@ -358,7 +341,7 @@ static void __init set_highmem_pages_init(int bad_ppro) | |||
358 | #else | 341 | #else |
359 | # define kmap_init() do { } while (0) | 342 | # define kmap_init() do { } while (0) |
360 | # define permanent_kmaps_init(pgd_base) do { } while (0) | 343 | # define permanent_kmaps_init(pgd_base) do { } while (0) |
361 | # define set_highmem_pages_init(bad_ppro) do { } while (0) | 344 | # define set_highmem_pages_init() do { } while (0) |
362 | #endif /* CONFIG_HIGHMEM */ | 345 | #endif /* CONFIG_HIGHMEM */ |
363 | 346 | ||
364 | pteval_t __PAGE_KERNEL = _PAGE_KERNEL; | 347 | pteval_t __PAGE_KERNEL = _PAGE_KERNEL; |
@@ -605,13 +588,11 @@ static struct kcore_list kcore_mem, kcore_vmalloc; | |||
605 | void __init mem_init(void) | 588 | void __init mem_init(void) |
606 | { | 589 | { |
607 | int codesize, reservedpages, datasize, initsize; | 590 | int codesize, reservedpages, datasize, initsize; |
608 | int tmp, bad_ppro; | 591 | int tmp; |
609 | 592 | ||
610 | #ifdef CONFIG_FLATMEM | 593 | #ifdef CONFIG_FLATMEM |
611 | BUG_ON(!mem_map); | 594 | BUG_ON(!mem_map); |
612 | #endif | 595 | #endif |
613 | bad_ppro = ppro_with_ram_bug(); | ||
614 | |||
615 | #ifdef CONFIG_HIGHMEM | 596 | #ifdef CONFIG_HIGHMEM |
616 | /* check that fixmap and pkmap do not overlap */ | 597 | /* check that fixmap and pkmap do not overlap */ |
617 | if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | 598 | if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { |
@@ -634,7 +615,7 @@ void __init mem_init(void) | |||
634 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | 615 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) |
635 | reservedpages++; | 616 | reservedpages++; |
636 | 617 | ||
637 | set_highmem_pages_init(bad_ppro); | 618 | set_highmem_pages_init(); |
638 | 619 | ||
639 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 620 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
640 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 621 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |