aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorYinghai Lu <Yinghai.Lu@Sun.COM>2008-01-30 07:33:32 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:32 -0500
commit093af8d7f0ba3c6be1485973508584ef081e9f93 (patch)
tree0a2db2401e09764e654efafbea60f6d5d6894dcd /arch/x86/kernel
parent11201e603d28a1cb7a4bb1d65f39e61629c97a28 (diff)
x86_32: trim memory by updating e820
when MTRRs are not covering the whole e820 table, we need to trim the RAM and need to update e820. reuse some code on 64-bit as well. here need to add early_get_cap and use it in early_cpu_detect, and move mtrr_bp_init early. The code successfully trimmed the memory map on Justin's system: from: [ 0.000000] BIOS-e820: 0000000100000000 - 000000022c000000 (usable) to: [ 0.000000] modified: 0000000100000000 - 0000000228000000 (usable) [ 0.000000] modified: 0000000228000000 - 000000022c000000 (reserved) According to Justin it makes quite a difference: | When I boot the box without any trimming it acts like a 286 or 386, | takes about 10 minutes to boot (using raptor disks). Signed-off-by: Yinghai Lu <yinghai.lu@sun.com> Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c30
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c22
-rw-r--r--arch/x86/kernel/e820_32.c11
-rw-r--r--arch/x86/kernel/setup_32.c6
4 files changed, 60 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 56cc341cc586..bba850b05d0e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -278,6 +278,33 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
278 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 278 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
279 } 279 }
280} 280}
281static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
282{
283 u32 tfms, xlvl;
284 int ebx;
285
286 memset(&c->x86_capability, 0, sizeof c->x86_capability);
287 if (have_cpuid_p()) {
288 /* Intel-defined flags: level 0x00000001 */
289 if (c->cpuid_level >= 0x00000001) {
290 u32 capability, excap;
291 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
292 c->x86_capability[0] = capability;
293 c->x86_capability[4] = excap;
294 }
295
296 /* AMD-defined flags: level 0x80000001 */
297 xlvl = cpuid_eax(0x80000000);
298 if ((xlvl & 0xffff0000) == 0x80000000) {
299 if (xlvl >= 0x80000001) {
300 c->x86_capability[1] = cpuid_edx(0x80000001);
301 c->x86_capability[6] = cpuid_ecx(0x80000001);
302 }
303 }
304
305 }
306
307}
281 308
282/* Do minimum CPU detection early. 309/* Do minimum CPU detection early.
283 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. 310 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
@@ -306,6 +333,8 @@ static void __init early_cpu_detect(void)
306 early_init_intel(c); 333 early_init_intel(c);
307 break; 334 break;
308 } 335 }
336
337 early_get_cap(c);
309} 338}
310 339
311static void __cpuinit generic_identify(struct cpuinfo_x86 * c) 340static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
@@ -485,7 +514,6 @@ void __init identify_boot_cpu(void)
485 identify_cpu(&boot_cpu_data); 514 identify_cpu(&boot_cpu_data);
486 sysenter_setup(); 515 sysenter_setup();
487 enable_sep_cpu(); 516 enable_sep_cpu();
488 mtrr_bp_init();
489} 517}
490 518
491void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 519void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index ccd36ed2187b..ac4b6338f3f4 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -624,7 +624,6 @@ static struct sysdev_driver mtrr_sysdev_driver = {
624 .resume = mtrr_restore, 624 .resume = mtrr_restore,
625}; 625};
626 626
627#ifdef CONFIG_X86_64
628static int disable_mtrr_trim; 627static int disable_mtrr_trim;
629 628
630static int __init disable_mtrr_trim_setup(char *str) 629static int __init disable_mtrr_trim_setup(char *str)
@@ -643,13 +642,10 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
643#define Tom2Enabled (1U << 21) 642#define Tom2Enabled (1U << 21)
644#define Tom2ForceMemTypeWB (1U << 22) 643#define Tom2ForceMemTypeWB (1U << 22)
645 644
646static __init int amd_special_default_mtrr(unsigned long end_pfn) 645static __init int amd_special_default_mtrr(void)
647{ 646{
648 u32 l, h; 647 u32 l, h;
649 648
650 /* Doesn't apply to memory < 4GB */
651 if (end_pfn <= (0xffffffff >> PAGE_SHIFT))
652 return 0;
653 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 649 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
654 return 0; 650 return 0;
655 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) 651 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
@@ -687,9 +683,14 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
687 * Make sure we only trim uncachable memory on machines that 683 * Make sure we only trim uncachable memory on machines that
688 * support the Intel MTRR architecture: 684 * support the Intel MTRR architecture:
689 */ 685 */
686 if (!is_cpu(INTEL) || disable_mtrr_trim)
687 return 0;
690 rdmsr(MTRRdefType_MSR, def, dummy); 688 rdmsr(MTRRdefType_MSR, def, dummy);
691 def &= 0xff; 689 def &= 0xff;
692 if (!is_cpu(INTEL) || disable_mtrr_trim || def != MTRR_TYPE_UNCACHABLE) 690 if (def != MTRR_TYPE_UNCACHABLE)
691 return 0;
692
693 if (amd_special_default_mtrr())
693 return 0; 694 return 0;
694 695
695 /* Find highest cached pfn */ 696 /* Find highest cached pfn */
@@ -703,8 +704,14 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
703 highest_addr = base + size; 704 highest_addr = base + size;
704 } 705 }
705 706
706 if (amd_special_default_mtrr(end_pfn)) 707 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
708 if (!highest_addr) {
709 printk(KERN_WARNING "***************\n");
710 printk(KERN_WARNING "**** WARNING: likely strange cpu\n");
711 printk(KERN_WARNING "**** MTRRs all blank, cpu in qemu?\n");
712 printk(KERN_WARNING "***************\n");
707 return 0; 713 return 0;
714 }
708 715
709 if ((highest_addr >> PAGE_SHIFT) < end_pfn) { 716 if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
710 printk(KERN_WARNING "***************\n"); 717 printk(KERN_WARNING "***************\n");
@@ -726,7 +733,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
726 733
727 return 0; 734 return 0;
728} 735}
729#endif
730 736
731/** 737/**
732 * mtrr_bp_init - initialize mtrrs on the boot CPU 738 * mtrr_bp_init - initialize mtrrs on the boot CPU
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 931934a7b353..4e16ef4a2659 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -749,3 +749,14 @@ static int __init parse_memmap(char *arg)
749 return 0; 749 return 0;
750} 750}
751early_param("memmap", parse_memmap); 751early_param("memmap", parse_memmap);
752void __init update_e820(void)
753{
754 u8 nr_map;
755
756 nr_map = e820.nr_map;
757 if (sanitize_e820_map(e820.map, &nr_map))
758 return;
759 e820.nr_map = nr_map;
760 printk(KERN_INFO "modified physical RAM map:\n");
761 print_memory_map("modified");
762}
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 26a56f714d34..83ba3ca5f431 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -48,6 +48,7 @@
48 48
49#include <video/edid.h> 49#include <video/edid.h>
50 50
51#include <asm/mtrr.h>
51#include <asm/apic.h> 52#include <asm/apic.h>
52#include <asm/e820.h> 53#include <asm/e820.h>
53#include <asm/mpspec.h> 54#include <asm/mpspec.h>
@@ -758,6 +759,11 @@ void __init setup_arch(char **cmdline_p)
758 759
759 max_low_pfn = setup_memory(); 760 max_low_pfn = setup_memory();
760 761
762 /* update e820 for memory not covered by WB MTRRs */
763 mtrr_bp_init();
764 if (mtrr_trim_uncached_memory(max_pfn))
765 max_low_pfn = setup_memory();
766
761#ifdef CONFIG_VMI 767#ifdef CONFIG_VMI
762 /* 768 /*
763 * Must be after max_low_pfn is determined, and before kernel 769 * Must be after max_low_pfn is determined, and before kernel