aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/compressed/relocs.c7
-rw-r--r--arch/x86/boot/memory.c29
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c42
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/mm/pageattr.c13
11 files changed, 75 insertions, 47 deletions
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 857e492c571..bbeb0c3fbd9 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -504,8 +504,11 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
504 if (sym->st_shndx == SHN_ABS) { 504 if (sym->st_shndx == SHN_ABS) {
505 continue; 505 continue;
506 } 506 }
507 if (r_type == R_386_PC32) { 507 if (r_type == R_386_NONE || r_type == R_386_PC32) {
508 /* PC relative relocations don't need to be adjusted */ 508 /*
509 * NONE can be ignored and and PC relative
510 * relocations don't need to be adjusted.
511 */
509 } 512 }
510 else if (r_type == R_386_32) { 513 else if (r_type == R_386_32) {
511 /* Visit relocations that need to be adjusted */ 514 /* Visit relocations that need to be adjusted */
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index 5054c2ddd1a..74b3d2ba84e 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -17,11 +17,6 @@
17 17
18#define SMAP 0x534d4150 /* ASCII "SMAP" */ 18#define SMAP 0x534d4150 /* ASCII "SMAP" */
19 19
20struct e820_ext_entry {
21 struct e820entry std;
22 u32 ext_flags;
23} __attribute__((packed));
24
25static int detect_memory_e820(void) 20static int detect_memory_e820(void)
26{ 21{
27 int count = 0; 22 int count = 0;
@@ -29,13 +24,21 @@ static int detect_memory_e820(void)
29 u32 size, id, edi; 24 u32 size, id, edi;
30 u8 err; 25 u8 err;
31 struct e820entry *desc = boot_params.e820_map; 26 struct e820entry *desc = boot_params.e820_map;
32 static struct e820_ext_entry buf; /* static so it is zeroed */ 27 static struct e820entry buf; /* static so it is zeroed */
33 28
34 /* 29 /*
35 * Set this here so that if the BIOS doesn't change this field 30 * Note: at least one BIOS is known which assumes that the
36 * but still doesn't change %ecx, we're still okay... 31 * buffer pointed to by one e820 call is the same one as
32 * the previous call, and only changes modified fields. Therefore,
33 * we use a temporary buffer and copy the results entry by entry.
34 *
35 * This routine deliberately does not try to account for
36 * ACPI 3+ extended attributes. This is because there are
37 * BIOSes in the field which report zero for the valid bit for
38 * all ranges, and we don't currently make any use of the
39 * other attribute bits. Revisit this if we see the extended
40 * attribute bits deployed in a meaningful way in the future.
37 */ 41 */
38 buf.ext_flags = 1;
39 42
40 do { 43 do {
41 size = sizeof buf; 44 size = sizeof buf;
@@ -66,13 +69,7 @@ static int detect_memory_e820(void)
66 break; 69 break;
67 } 70 }
68 71
69 /* ACPI 3.0 added the extended flags support. If bit 0 72 *desc++ = buf;
70 in the extended flags is zero, we're supposed to simply
71 ignore the entry -- a backwards incompatible change! */
72 if (size > 20 && !(buf.ext_flags & 1))
73 continue;
74
75 *desc++ = buf.std;
76 count++; 73 count++;
77 } while (next && count < ARRAY_SIZE(boot_params.e820_map)); 74 } while (next && count < ARRAY_SIZE(boot_params.e820_map));
78 75
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1caefc82e6..77848d9fca6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -114,6 +114,13 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
114} }; 114} };
115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
116 116
117static int __init x86_xsave_setup(char *s)
118{
119 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
120 return 1;
121}
122__setup("noxsave", x86_xsave_setup);
123
117#ifdef CONFIG_X86_32 124#ifdef CONFIG_X86_32
118static int cachesize_override __cpuinitdata = -1; 125static int cachesize_override __cpuinitdata = -1;
119static int disable_x86_serial_nr __cpuinitdata = 1; 126static int disable_x86_serial_nr __cpuinitdata = 1;
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 6ac55bd341a..86961519372 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -168,6 +168,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
168 case 0x0E: /* Core */ 168 case 0x0E: /* Core */
169 case 0x0F: /* Core Duo */ 169 case 0x0F: /* Core Duo */
170 case 0x16: /* Celeron Core */ 170 case 0x16: /* Celeron Core */
171 case 0x1C: /* Atom */
171 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 172 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
172 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); 173 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
173 case 0x0D: /* Pentium M (Dothan) */ 174 case 0x0D: /* Pentium M (Dothan) */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 3c28ccd4974..a8363e5be4e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -168,10 +168,12 @@ static int check_powernow(void)
168 return 1; 168 return 1;
169} 169}
170 170
171#ifdef CONFIG_X86_POWERNOW_K7_ACPI
171static void invalidate_entry(unsigned int entry) 172static void invalidate_entry(unsigned int entry)
172{ 173{
173 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 174 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
174} 175}
176#endif
175 177
176static int get_ranges(unsigned char *pst) 178static int get_ranges(unsigned char *pst)
177{ 179{
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 4709ead2db5..f6b32d11235 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -649,6 +649,20 @@ static void print_basics(struct powernow_k8_data *data)
649 data->batps); 649 data->batps);
650} 650}
651 651
652static u32 freq_from_fid_did(u32 fid, u32 did)
653{
654 u32 mhz = 0;
655
656 if (boot_cpu_data.x86 == 0x10)
657 mhz = (100 * (fid + 0x10)) >> did;
658 else if (boot_cpu_data.x86 == 0x11)
659 mhz = (100 * (fid + 8)) >> did;
660 else
661 BUG();
662
663 return mhz * 1000;
664}
665
652static int fill_powernow_table(struct powernow_k8_data *data, 666static int fill_powernow_table(struct powernow_k8_data *data,
653 struct pst_s *pst, u8 maxvid) 667 struct pst_s *pst, u8 maxvid)
654{ 668{
@@ -923,8 +937,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
923 937
924 powernow_table[i].index = index; 938 powernow_table[i].index = index;
925 939
926 powernow_table[i].frequency = 940 /* Frequency may be rounded for these */
927 data->acpi_data.states[i].core_frequency * 1000; 941 if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
942 powernow_table[i].frequency =
943 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
944 } else
945 powernow_table[i].frequency =
946 data->acpi_data.states[i].core_frequency * 1000;
928 } 947 }
929 return 0; 948 return 0;
930} 949}
@@ -1215,13 +1234,16 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1215 return cpufreq_frequency_table_verify(pol, data->powernow_table); 1234 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1216} 1235}
1217 1236
1237static const char ACPI_PSS_BIOS_BUG_MSG[] =
1238 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1239 KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
1240
1218/* per CPU init entry point to the driver */ 1241/* per CPU init entry point to the driver */
1219static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1242static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1220{ 1243{
1221 struct powernow_k8_data *data; 1244 struct powernow_k8_data *data;
1222 cpumask_t oldmask; 1245 cpumask_t oldmask;
1223 int rc; 1246 int rc;
1224 static int print_once;
1225 1247
1226 if (!cpu_online(pol->cpu)) 1248 if (!cpu_online(pol->cpu))
1227 return -ENODEV; 1249 return -ENODEV;
@@ -1244,19 +1266,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1244 * an UP version, and is deprecated by AMD. 1266 * an UP version, and is deprecated by AMD.
1245 */ 1267 */
1246 if (num_online_cpus() != 1) { 1268 if (num_online_cpus() != 1) {
1247 /* 1269 printk_once(ACPI_PSS_BIOS_BUG_MSG);
1248 * Replace this one with print_once as soon as such a
1249 * thing gets introduced
1250 */
1251 if (!print_once) {
1252 WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS "
1253 "does not provide ACPI _PSS objects "
1254 "in a way that Linux understands. "
1255 "Please report this to the Linux ACPI"
1256 " maintainers and complain to your "
1257 "BIOS vendor.\n");
1258 print_once++;
1259 }
1260 goto err_out; 1270 goto err_out;
1261 } 1271 }
1262 if (pol->cpu != 0) { 1272 if (pol->cpu != 0) {
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1340dad417f..667188e0b5a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), 232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
233 }, 233 },
234 }, 234 },
235 { /* Handle problems with rebooting on Sony VGN-Z540N */
236 .callback = set_bios_reboot,
237 .ident = "Sony VGN-Z540N",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
241 },
242 },
235 { } 243 { }
236}; 244};
237 245
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 3a97a4cf187..8f0e13be36b 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -160,8 +160,10 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
160 /* 160 /*
161 * If large page isn't supported, there's no benefit in doing 161 * If large page isn't supported, there's no benefit in doing
162 * this. Also, on non-NUMA, embedding is better. 162 * this. Also, on non-NUMA, embedding is better.
163 *
164 * NOTE: disabled for now.
163 */ 165 */
164 if (!cpu_has_pse || !pcpu_need_numa()) 166 if (true || !cpu_has_pse || !pcpu_need_numa())
165 return -EINVAL; 167 return -EINVAL;
166 168
167 /* 169 /*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b6caf1329b1..32cf11e5728 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2897,8 +2897,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2897 2897
2898static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2898static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2899{ 2899{
2900 kvm_x86_ops->tlb_flush(vcpu); 2900 kvm_set_cr3(vcpu, vcpu->arch.cr3);
2901 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
2902 return 1; 2901 return 1;
2903} 2902}
2904 2903
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 49079a46687..3944e917e79 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -338,6 +338,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
338 338
339void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 339void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
340{ 340{
341 unsigned long old_cr4 = vcpu->arch.cr4;
342 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
343
341 if (cr4 & CR4_RESERVED_BITS) { 344 if (cr4 & CR4_RESERVED_BITS) {
342 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); 345 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
343 kvm_inject_gp(vcpu, 0); 346 kvm_inject_gp(vcpu, 0);
@@ -351,7 +354,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
351 kvm_inject_gp(vcpu, 0); 354 kvm_inject_gp(vcpu, 0);
352 return; 355 return;
353 } 356 }
354 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) 357 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
358 && ((cr4 ^ old_cr4) & pdptr_bits)
355 && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 359 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
356 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 360 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
357 kvm_inject_gp(vcpu, 0); 361 kvm_inject_gp(vcpu, 0);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 797f9f107cb..e17efed088c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -153,7 +153,7 @@ static void __cpa_flush_all(void *arg)
153 */ 153 */
154 __flush_tlb_all(); 154 __flush_tlb_all();
155 155
156 if (cache && boot_cpu_data.x86_model >= 4) 156 if (cache && boot_cpu_data.x86 >= 4)
157 wbinvd(); 157 wbinvd();
158} 158}
159 159
@@ -208,20 +208,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
208 int in_flags, struct page **pages) 208 int in_flags, struct page **pages)
209{ 209{
210 unsigned int i, level; 210 unsigned int i, level;
211 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
211 212
212 BUG_ON(irqs_disabled()); 213 BUG_ON(irqs_disabled());
213 214
214 on_each_cpu(__cpa_flush_range, NULL, 1); 215 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
215 216
216 if (!cache) 217 if (!cache || do_wbinvd)
217 return; 218 return;
218 219
219 /* 4M threshold */
220 if (numpages >= 1024) {
221 if (boot_cpu_data.x86_model >= 4)
222 wbinvd();
223 return;
224 }
225 /* 220 /*
226 * We only need to flush on one CPU, 221 * We only need to flush on one CPU,
227 * clflush is a MESI-coherent instruction that 222 * clflush is a MESI-coherent instruction that