aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c17
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c109
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c18
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c20
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c5
6 files changed, 71 insertions, 101 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 84a8220a607..a6ef672adbb 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
56 56
57 switch (c->x86_vendor) { 57 switch (c->x86_vendor) {
58 case X86_VENDOR_INTEL: 58 case X86_VENDOR_INTEL:
59 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) 59 /*
60 * There is a known erratum on Pentium III and Core Solo
61 * and Core Duo CPUs.
62 * " Page with PAT set to WC while associated MTRR is UC
63 * may consolidate to UC "
64 * Because of this erratum, it is better to stick with
65 * setting WC in MTRR rather than using PAT on these CPUs.
66 *
67 * Enable PAT WC only on P4, Core 2 or later CPUs.
68 */
69 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
60 return; 70 return;
61 break; 71
72 pat_disable("PAT WC disabled due to known CPU erratum.");
73 return;
74
62 case X86_VENDOR_AMD: 75 case X86_VENDOR_AMD:
63 case X86_VENDOR_CENTAUR: 76 case X86_VENDOR_CENTAUR:
64 case X86_VENDOR_TRANSMETA: 77 case X86_VENDOR_TRANSMETA:
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 4e7271999a7..84bb395038d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -737,63 +737,44 @@ static int find_psb_table(struct powernow_k8_data *data)
737#ifdef CONFIG_X86_POWERNOW_K8_ACPI 737#ifdef CONFIG_X86_POWERNOW_K8_ACPI
738static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 738static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
739{ 739{
740 if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) 740 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
741 return; 741 return;
742 742
743 data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; 743 data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
744 data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; 744 data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
745 data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 745 data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
746 data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 746 data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
747 data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); 747 data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
748 data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; 748 data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
749}
750
751
752static struct acpi_processor_performance *acpi_perf_data;
753static int preregister_valid;
754
755static int powernow_k8_cpu_preinit_acpi(void)
756{
757 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
758 if (!acpi_perf_data)
759 return -ENODEV;
760
761 if (acpi_processor_preregister_performance(acpi_perf_data))
762 return -ENODEV;
763 else
764 preregister_valid = 1;
765 return 0;
766} 749}
767 750
768static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 751static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
769{ 752{
770 struct cpufreq_frequency_table *powernow_table; 753 struct cpufreq_frequency_table *powernow_table;
771 int ret_val; 754 int ret_val;
772 int cpu = 0;
773 755
774 data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 756 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
775 if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
776 dprintk("register performance failed: bad ACPI data\n"); 757 dprintk("register performance failed: bad ACPI data\n");
777 return -EIO; 758 return -EIO;
778 } 759 }
779 760
780 /* verify the data contained in the ACPI structures */ 761 /* verify the data contained in the ACPI structures */
781 if (data->acpi_data->state_count <= 1) { 762 if (data->acpi_data.state_count <= 1) {
782 dprintk("No ACPI P-States\n"); 763 dprintk("No ACPI P-States\n");
783 goto err_out; 764 goto err_out;
784 } 765 }
785 766
786 if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 767 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
787 (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 768 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
788 dprintk("Invalid control/status registers (%x - %x)\n", 769 dprintk("Invalid control/status registers (%x - %x)\n",
789 data->acpi_data->control_register.space_id, 770 data->acpi_data.control_register.space_id,
790 data->acpi_data->status_register.space_id); 771 data->acpi_data.status_register.space_id);
791 goto err_out; 772 goto err_out;
792 } 773 }
793 774
794 /* fill in data->powernow_table */ 775 /* fill in data->powernow_table */
795 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 776 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
796 * (data->acpi_data->state_count + 1)), GFP_KERNEL); 777 * (data->acpi_data.state_count + 1)), GFP_KERNEL);
797 if (!powernow_table) { 778 if (!powernow_table) {
798 dprintk("powernow_table memory alloc failure\n"); 779 dprintk("powernow_table memory alloc failure\n");
799 goto err_out; 780 goto err_out;
@@ -806,12 +787,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
806 if (ret_val) 787 if (ret_val)
807 goto err_out_mem; 788 goto err_out_mem;
808 789
809 powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; 790 powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
810 powernow_table[data->acpi_data->state_count].index = 0; 791 powernow_table[data->acpi_data.state_count].index = 0;
811 data->powernow_table = powernow_table; 792 data->powernow_table = powernow_table;
812 793
813 /* fill in data */ 794 /* fill in data */
814 data->numps = data->acpi_data->state_count; 795 data->numps = data->acpi_data.state_count;
815 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 796 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
816 print_basics(data); 797 print_basics(data);
817 powernow_k8_acpi_pst_values(data, 0); 798 powernow_k8_acpi_pst_values(data, 0);
@@ -819,31 +800,16 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
819 /* notify BIOS that we exist */ 800 /* notify BIOS that we exist */
820 acpi_processor_notify_smm(THIS_MODULE); 801 acpi_processor_notify_smm(THIS_MODULE);
821 802
822 /* determine affinity, from ACPI if available */
823 if (preregister_valid) {
824 if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
825 (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
826 data->starting_core_affinity = data->acpi_data->shared_cpu_map;
827 else
828 data->starting_core_affinity = cpumask_of_cpu(data->cpu);
829 } else {
830 /* best guess from family if not */
831 if (cpu_family == CPU_HW_PSTATE)
832 data->starting_core_affinity = cpumask_of_cpu(data->cpu);
833 else
834 data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu);
835 }
836
837 return 0; 803 return 0;
838 804
839err_out_mem: 805err_out_mem:
840 kfree(powernow_table); 806 kfree(powernow_table);
841 807
842err_out: 808err_out:
843 acpi_processor_unregister_performance(data->acpi_data, data->cpu); 809 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
844 810
845 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 811 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
846 data->acpi_data->state_count = 0; 812 data->acpi_data.state_count = 0;
847 813
848 return -ENODEV; 814 return -ENODEV;
849} 815}
@@ -855,10 +821,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
855 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 821 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
856 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 822 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
857 823
858 for (i = 0; i < data->acpi_data->state_count; i++) { 824 for (i = 0; i < data->acpi_data.state_count; i++) {
859 u32 index; 825 u32 index;
860 826
861 index = data->acpi_data->states[i].control & HW_PSTATE_MASK; 827 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
862 if (index > data->max_hw_pstate) { 828 if (index > data->max_hw_pstate) {
863 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); 829 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
864 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); 830 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
@@ -874,7 +840,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
874 840
875 powernow_table[i].index = index; 841 powernow_table[i].index = index;
876 842
877 powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; 843 powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
878 } 844 }
879 return 0; 845 return 0;
880} 846}
@@ -883,16 +849,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
883{ 849{
884 int i; 850 int i;
885 int cntlofreq = 0; 851 int cntlofreq = 0;
886 for (i = 0; i < data->acpi_data->state_count; i++) { 852 for (i = 0; i < data->acpi_data.state_count; i++) {
887 u32 fid; 853 u32 fid;
888 u32 vid; 854 u32 vid;
889 855
890 if (data->exttype) { 856 if (data->exttype) {
891 fid = data->acpi_data->states[i].status & EXT_FID_MASK; 857 fid = data->acpi_data.states[i].status & EXT_FID_MASK;
892 vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; 858 vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
893 } else { 859 } else {
894 fid = data->acpi_data->states[i].control & FID_MASK; 860 fid = data->acpi_data.states[i].control & FID_MASK;
895 vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; 861 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
896 } 862 }
897 863
898 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 864 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
@@ -933,10 +899,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
933 cntlofreq = i; 899 cntlofreq = i;
934 } 900 }
935 901
936 if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { 902 if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
937 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", 903 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
938 powernow_table[i].frequency, 904 powernow_table[i].frequency,
939 (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); 905 (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
940 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; 906 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
941 continue; 907 continue;
942 } 908 }
@@ -946,12 +912,11 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
946 912
947static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 913static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
948{ 914{
949 if (data->acpi_data->state_count) 915 if (data->acpi_data.state_count)
950 acpi_processor_unregister_performance(data->acpi_data, data->cpu); 916 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
951} 917}
952 918
953#else 919#else
954static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; }
955static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 920static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
956static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 921static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
957static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 922static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
@@ -1136,7 +1101,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1136static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1101static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1137{ 1102{
1138 struct powernow_k8_data *data; 1103 struct powernow_k8_data *data;
1139 cpumask_t oldmask = CPU_MASK_ALL; 1104 cpumask_t oldmask;
1140 int rc; 1105 int rc;
1141 1106
1142 if (!cpu_online(pol->cpu)) 1107 if (!cpu_online(pol->cpu))
@@ -1209,7 +1174,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1209 /* run on any CPU again */ 1174 /* run on any CPU again */
1210 set_cpus_allowed_ptr(current, &oldmask); 1175 set_cpus_allowed_ptr(current, &oldmask);
1211 1176
1212 pol->cpus = data->starting_core_affinity; 1177 if (cpu_family == CPU_HW_PSTATE)
1178 pol->cpus = cpumask_of_cpu(pol->cpu);
1179 else
1180 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1213 data->available_cores = &(pol->cpus); 1181 data->available_cores = &(pol->cpus);
1214 1182
1215 /* Take a crude guess here. 1183 /* Take a crude guess here.
@@ -1332,7 +1300,6 @@ static int __cpuinit powernowk8_init(void)
1332 } 1300 }
1333 1301
1334 if (supported_cpus == num_online_cpus()) { 1302 if (supported_cpus == num_online_cpus()) {
1335 powernow_k8_cpu_preinit_acpi();
1336 printk(KERN_INFO PFX "Found %d %s " 1303 printk(KERN_INFO PFX "Found %d %s "
1337 "processors (%d cpu cores) (" VERSION ")\n", 1304 "processors (%d cpu cores) (" VERSION ")\n",
1338 num_online_nodes(), 1305 num_online_nodes(),
@@ -1349,10 +1316,6 @@ static void __exit powernowk8_exit(void)
1349 dprintk("exit\n"); 1316 dprintk("exit\n");
1350 1317
1351 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1318 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1352
1353#ifdef CONFIG_X86_POWERNOW_K8_ACPI
1354 free_percpu(acpi_perf_data);
1355#endif
1356} 1319}
1357 1320
1358MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); 1321MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index a62612cd4be..ab48cfed4d9 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -33,13 +33,12 @@ struct powernow_k8_data {
33#ifdef CONFIG_X86_POWERNOW_K8_ACPI 33#ifdef CONFIG_X86_POWERNOW_K8_ACPI
34 /* the acpi table needs to be kept. it's only available if ACPI was 34 /* the acpi table needs to be kept. it's only available if ACPI was
35 * used to determine valid frequency/vid/fid states */ 35 * used to determine valid frequency/vid/fid states */
36 struct acpi_processor_performance *acpi_data; 36 struct acpi_processor_performance acpi_data;
37#endif 37#endif
38 /* we need to keep track of associated cores, but let cpufreq 38 /* we need to keep track of associated cores, but let cpufreq
39 * handle hotplug events - so just point at cpufreq pol->cpus 39 * handle hotplug events - so just point at cpufreq pol->cpus
40 * structure */ 40 * structure */
41 cpumask_t *available_cores; 41 cpumask_t *available_cores;
42 cpumask_t starting_core_affinity;
43}; 42};
44 43
45 44
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 3fd7a67bb06..e710a21bb6e 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); 134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
135} 135}
136 136
137static void __cpuinit set_cx86_inc(void)
138{
139 unsigned char ccr3;
140
141 printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
142
143 ccr3 = getCx86(CX86_CCR3);
144 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
145 /* PCR1 -- Performance Control */
146 /* Incrementor on, whatever that is */
147 setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
148 /* PCR0 -- Performance Control */
149 /* Incrementor Margin 10 */
150 setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
151 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
152}
153
154/* 137/*
155 * Configure later MediaGX and/or Geode processor. 138 * Configure later MediaGX and/or Geode processor.
156 */ 139 */
@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
174 157
175 set_cx86_memwb(); 158 set_cx86_memwb();
176 set_cx86_reorder(); 159 set_cx86_reorder();
177 set_cx86_inc();
178 160
179 local_irq_restore(flags); 161 local_irq_restore(flags);
180} 162}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 509bd3d9eac..cb7d3b6a80e 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
379 unsigned long *size, mtrr_type *type) 379 unsigned long *size, mtrr_type *type)
380{ 380{
381 unsigned int mask_lo, mask_hi, base_lo, base_hi; 381 unsigned int mask_lo, mask_hi, base_lo, base_hi;
382 unsigned int tmp, hi;
382 383
383 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 384 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
384 if ((mask_lo & 0x800) == 0) { 385 if ((mask_lo & 0x800) == 0) {
@@ -392,8 +393,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
392 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 393 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
393 394
394 /* Work out the shifted address mask. */ 395 /* Work out the shifted address mask. */
395 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) 396 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
396 | mask_lo >> PAGE_SHIFT; 397 mask_lo = size_or_mask | tmp;
398 /* Expand tmp with high bits to all 1s*/
399 hi = fls(tmp);
400 if (hi > 0) {
401 tmp |= ~((1<<(hi - 1)) - 1);
402
403 if (tmp != mask_lo) {
404 static int once = 1;
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp;
411 }
412 }
397 413
398 /* This works correctly if size is a power of two, i.e. a 414 /* This works correctly if size is a power of two, i.e. a
399 contiguous range. */ 415 contiguous range. */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6f23969c8fa..b117d7f8a56 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1496 1496
1497 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1497 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1498 if (!highest_pfn) { 1498 if (!highest_pfn) {
1499 if (!kvm_para_available()) { 1499 WARN(!kvm_para_available(), KERN_WARNING
1500 printk(KERN_WARNING
1501 "WARNING: strange, CPU MTRRs all blank?\n"); 1500 "WARNING: strange, CPU MTRRs all blank?\n");
1502 WARN_ON(1);
1503 }
1504 return 0; 1501 return 0;
1505 } 1502 }
1506 1503