aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/apic/probe_64.c7
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/crash_dump_64.c3
-rw-r--r--arch/x86/kernel/hw_breakpoint.c4
-rw-r--r--arch/x86/kernel/microcode_intel.c16
-rw-r--r--arch/x86/kernel/olpc.c5
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c85
-rw-r--r--arch/x86/kernel/traps.c1
-rw-r--r--arch/x86/kernel/vm86_32.c10
-rw-r--r--arch/x86/kernel/xsave.c3
18 files changed, 145 insertions, 37 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index e3b534cda49a..e0f220e158c1 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1340,6 +1340,14 @@ void __cpuinit end_local_APIC_setup(void)
1340 1340
1341 setup_apic_nmi_watchdog(NULL); 1341 setup_apic_nmi_watchdog(NULL);
1342 apic_pm_activate(); 1342 apic_pm_activate();
1343
1344 /*
1345 * Now that local APIC setup is completed for BP, configure the fault
1346 * handling for interrupt remapping.
1347 */
1348 if (!smp_processor_id() && intr_remapping_enabled)
1349 enable_drhd_fault_handling();
1350
1343} 1351}
1344 1352
1345#ifdef CONFIG_X86_X2APIC 1353#ifdef CONFIG_X86_X2APIC
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5c5b8f3dddb5..4d90327853b7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1397,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int irq,
1397 irte.dlvry_mode = apic->irq_delivery_mode; 1397 irte.dlvry_mode = apic->irq_delivery_mode;
1398 irte.vector = vector; 1398 irte.vector = vector;
1399 irte.dest_id = IRTE_DEST(destination); 1399 irte.dest_id = IRTE_DEST(destination);
1400 irte.redir_hint = 1;
1400 1401
1401 /* Set source-id of interrupt request */ 1402 /* Set source-id of interrupt request */
1402 set_ioapic_sid(&irte, apic_id); 1403 set_ioapic_sid(&irte, apic_id);
@@ -3348,6 +3349,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3348 irte.dlvry_mode = apic->irq_delivery_mode; 3349 irte.dlvry_mode = apic->irq_delivery_mode;
3349 irte.vector = cfg->vector; 3350 irte.vector = cfg->vector;
3350 irte.dest_id = IRTE_DEST(dest); 3351 irte.dest_id = IRTE_DEST(dest);
3352 irte.redir_hint = 1;
3351 3353
3352 /* Set source-id of interrupt request */ 3354 /* Set source-id of interrupt request */
3353 if (pdev) 3355 if (pdev)
@@ -3624,6 +3626,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3624 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3626 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3625 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3627 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3626 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3628 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3629 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3627 3630
3628 dmar_msi_write(irq, &msg); 3631 dmar_msi_write(irq, &msg);
3629 3632
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 83e9be4778e2..fac49a845064 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -76,13 +76,6 @@ void __init default_setup_apic_routing(void)
76 /* need to update phys_pkg_id */ 76 /* need to update phys_pkg_id */
77 apic->phys_pkg_id = apicid_phys_pkg_id; 77 apic->phys_pkg_id = apicid_phys_pkg_id;
78 } 78 }
79
80 /*
81 * Now that apic routing model is selected, configure the
82 * fault handling for intr remapping.
83 */
84 if (intr_remapping_enabled)
85 enable_drhd_fault_handling();
86} 79}
87 80
88/* Same for both flat and physical. */ 81/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ba5f62f45f01..81fa3cb12f39 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -305,8 +305,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
305 /* use socket ID also for last level cache */ 305 /* use socket ID also for last level cache */
306 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 306 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
307 /* fixup topology information on multi-node processors */ 307 /* fixup topology information on multi-node processors */
308 if ((c->x86 == 0x10) && (c->x86_model == 9)) 308 amd_fixup_dcm(c);
309 amd_fixup_dcm(c);
310#endif 309#endif
311} 310}
312 311
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index cd8da247dda1..a2baafb2fe6d 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
701 per_cpu(acfreq_data, policy->cpu) = NULL; 701 per_cpu(acfreq_data, policy->cpu) = NULL;
702 acpi_processor_unregister_performance(data->acpi_data, 702 acpi_processor_unregister_performance(data->acpi_data,
703 policy->cpu); 703 policy->cpu);
704 kfree(data->freq_table);
704 kfree(data); 705 kfree(data);
705 } 706 }
706 707
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index c5f59d071425..ac140c7be396 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void)
827 827
828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
829 return 0; 829 return 0;
830 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) 830 if (boot_cpu_data.x86 < 0xf)
831 return 0; 831 return 0;
832 /* In case some hypervisor doesn't pass SYSCFG through: */ 832 /* In case some hypervisor doesn't pass SYSCFG through: */
833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) 833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc3..bebabec5b448 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
793} 793}
794 794
795/* 795/*
796 * MTRR initialization for all AP's 796 * Delayed MTRR initialization for all AP's
797 */ 797 */
798void mtrr_aps_init(void) 798void mtrr_aps_init(void)
799{ 799{
800 if (!use_intel()) 800 if (!use_intel())
801 return; 801 return;
802 802
803 /*
804 * Check if someone has requested the delay of AP MTRR initialization,
805 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
806 * then we are done.
807 */
808 if (!mtrr_aps_delayed_init)
809 return;
810
803 set_mtrr(~0U, 0, 0, 0); 811 set_mtrr(~0U, 0, 0, 0);
804 mtrr_aps_delayed_init = false; 812 mtrr_aps_delayed_init = false;
805} 813}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c2897b7b4a3b..46d58448c3af 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids
52 [ C(DTLB) ] = { 52 [ C(DTLB) ] = {
53 [ C(OP_READ) ] = { 53 [ C(OP_READ) ] = {
54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
55 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ 55 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
56 }, 56 },
57 [ C(OP_WRITE) ] = { 57 [ C(OP_WRITE) ] = {
58 [ C(RESULT_ACCESS) ] = 0, 58 [ C(RESULT_ACCESS) ] = 0,
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids
66 [ C(ITLB) ] = { 66 [ C(ITLB) ] = {
67 [ C(OP_READ) ] = { 67 [ C(OP_READ) ] = {
68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ 68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
69 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ 69 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
70 }, 70 },
71 [ C(OP_WRITE) ] = { 71 [ C(OP_WRITE) ] = {
72 [ C(RESULT_ACCESS) ] = -1, 72 [ C(RESULT_ACCESS) ] = -1,
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 045b36cada65..994828899e09 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
34 if (!csize) 34 if (!csize)
35 return 0; 35 return 0;
36 36
37 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 37 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
38 if (!vaddr) 38 if (!vaddr)
39 return -ENOMEM; 39 return -ENOMEM;
40 40
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
46 } else 46 } else
47 memcpy(buf, vaddr + offset, csize); 47 memcpy(buf, vaddr + offset, csize);
48 48
49 set_iounmap_nonlazy();
49 iounmap(vaddr); 50 iounmap(vaddr);
50 return csize; 51 return csize;
51} 52}
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff15c9dcc25d..42c594254507 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
433 dr6_p = (unsigned long *)ERR_PTR(args->err); 433 dr6_p = (unsigned long *)ERR_PTR(args->err);
434 dr6 = *dr6_p; 434 dr6 = *dr6_p;
435 435
436 /* If it's a single step, TRAP bits are random */
437 if (dr6 & DR_STEP)
438 return NOTIFY_DONE;
439
436 /* Do an early return if no trap bits are set in DR6 */ 440 /* Do an early return if no trap bits are set in DR6 */
437 if ((dr6 & DR_TRAP_BITS) == 0) 441 if ((dr6 & DR_TRAP_BITS) == 0)
438 return NOTIFY_DONE; 442 return NOTIFY_DONE;
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 356170262a93..2573689bda77 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
364 364
365 /* For performance reasons, reuse mc area when possible */ 365 /* For performance reasons, reuse mc area when possible */
366 if (!mc || mc_size > curr_mc_size) { 366 if (!mc || mc_size > curr_mc_size) {
367 if (mc) 367 vfree(mc);
368 vfree(mc);
369 mc = vmalloc(mc_size); 368 mc = vmalloc(mc_size);
370 if (!mc) 369 if (!mc)
371 break; 370 break;
@@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
374 373
375 if (get_ucode_data(mc, ucode_ptr, mc_size) || 374 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
376 microcode_sanity_check(mc) < 0) { 375 microcode_sanity_check(mc) < 0) {
377 vfree(mc);
378 break; 376 break;
379 } 377 }
380 378
381 if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { 379 if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
382 if (new_mc) 380 vfree(new_mc);
383 vfree(new_mc);
384 new_rev = mc_header.rev; 381 new_rev = mc_header.rev;
385 new_mc = mc; 382 new_mc = mc;
386 mc = NULL; /* trigger new vmalloc */ 383 mc = NULL; /* trigger new vmalloc */
@@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
390 leftover -= mc_size; 387 leftover -= mc_size;
391 } 388 }
392 389
393 if (mc) 390 vfree(mc);
394 vfree(mc);
395 391
396 if (leftover) { 392 if (leftover) {
397 if (new_mc) 393 vfree(new_mc);
398 vfree(new_mc);
399 state = UCODE_ERROR; 394 state = UCODE_ERROR;
400 goto out; 395 goto out;
401 } 396 }
@@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
405 goto out; 400 goto out;
406 } 401 }
407 402
408 if (uci->mc) 403 vfree(uci->mc);
409 vfree(uci->mc);
410 uci->mc = (struct microcode_intel *)new_mc; 404 uci->mc = (struct microcode_intel *)new_mc;
411 405
412 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 406 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 0e0cdde519be..a2bd899b2b83 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -114,6 +114,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
114 unsigned long flags; 114 unsigned long flags;
115 int ret = -EIO; 115 int ret = -EIO;
116 int i; 116 int i;
117 int restarts = 0;
117 118
118 spin_lock_irqsave(&ec_lock, flags); 119 spin_lock_irqsave(&ec_lock, flags);
119 120
@@ -169,7 +170,9 @@ restart:
169 if (wait_on_obf(0x6c, 1)) { 170 if (wait_on_obf(0x6c, 1)) {
170 printk(KERN_ERR "olpc-ec: timeout waiting for" 171 printk(KERN_ERR "olpc-ec: timeout waiting for"
171 " EC to provide data!\n"); 172 " EC to provide data!\n");
172 goto restart; 173 if (restarts++ < 10)
174 goto restart;
175 goto err;
173 } 176 }
174 outbuf[i] = inb(0x68); 177 outbuf[i] = inb(0x68);
175 pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); 178 pr_devel("olpc-ec: received 0x%x\n", outbuf[i]);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e3af342fe83a..76a0d715a031 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -641,7 +641,7 @@ void native_machine_shutdown(void)
641 /* O.K Now that I'm on the appropriate processor, 641 /* O.K Now that I'm on the appropriate processor,
642 * stop all of the others. 642 * stop all of the others.
643 */ 643 */
644 smp_send_stop(); 644 stop_other_cpus();
645#endif 645#endif
646 646
647 lapic_shutdown(); 647 lapic_shutdown();
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 74cca6014c0e..96af3a8e7326 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -174,10 +174,10 @@ asmlinkage void smp_reboot_interrupt(void)
174 irq_exit(); 174 irq_exit();
175} 175}
176 176
177static void native_smp_send_stop(void) 177static void native_stop_other_cpus(int wait)
178{ 178{
179 unsigned long flags; 179 unsigned long flags;
180 unsigned long wait; 180 unsigned long timeout;
181 181
182 if (reboot_force) 182 if (reboot_force)
183 return; 183 return;
@@ -194,9 +194,12 @@ static void native_smp_send_stop(void)
194 if (num_online_cpus() > 1) { 194 if (num_online_cpus() > 1) {
195 apic->send_IPI_allbutself(REBOOT_VECTOR); 195 apic->send_IPI_allbutself(REBOOT_VECTOR);
196 196
197 /* Don't wait longer than a second */ 197 /*
198 wait = USEC_PER_SEC; 198 * Don't wait longer than a second if the caller
199 while (num_online_cpus() > 1 && wait--) 199 * didn't ask us to wait.
200 */
201 timeout = USEC_PER_SEC;
202 while (num_online_cpus() > 1 && (wait || timeout--))
200 udelay(1); 203 udelay(1);
201 } 204 }
202 205
@@ -254,7 +257,7 @@ struct smp_ops smp_ops = {
254 .smp_prepare_cpus = native_smp_prepare_cpus, 257 .smp_prepare_cpus = native_smp_prepare_cpus,
255 .smp_cpus_done = native_smp_cpus_done, 258 .smp_cpus_done = native_smp_cpus_done,
256 259
257 .smp_send_stop = native_smp_send_stop, 260 .stop_other_cpus = native_stop_other_cpus,
258 .smp_send_reschedule = native_smp_send_reschedule, 261 .smp_send_reschedule = native_smp_send_reschedule,
259 262
260 .cpu_up = native_cpu_up, 263 .cpu_up = native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8b3bfc4dd708..016179e5ba09 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1383,11 +1383,94 @@ void play_dead_common(void)
1383 local_irq_disable(); 1383 local_irq_disable();
1384} 1384}
1385 1385
1386#define MWAIT_SUBSTATE_MASK 0xf
1387#define MWAIT_SUBSTATE_SIZE 4
1388
1389#define CPUID_MWAIT_LEAF 5
1390#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
1391
1392/*
1393 * We need to flush the caches before going to sleep, lest we have
1394 * dirty data in our caches when we come back up.
1395 */
1396static inline void mwait_play_dead(void)
1397{
1398 unsigned int eax, ebx, ecx, edx;
1399 unsigned int highest_cstate = 0;
1400 unsigned int highest_subcstate = 0;
1401 int i;
1402 void *mwait_ptr;
1403
1404 if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
1405 return;
1406 if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
1407 return;
1408 if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
1409 return;
1410
1411 eax = CPUID_MWAIT_LEAF;
1412 ecx = 0;
1413 native_cpuid(&eax, &ebx, &ecx, &edx);
1414
1415 /*
1416 * eax will be 0 if EDX enumeration is not valid.
1417 * Initialized below to cstate, sub_cstate value when EDX is valid.
1418 */
1419 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1420 eax = 0;
1421 } else {
1422 edx >>= MWAIT_SUBSTATE_SIZE;
1423 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1424 if (edx & MWAIT_SUBSTATE_MASK) {
1425 highest_cstate = i;
1426 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1427 }
1428 }
1429 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1430 (highest_subcstate - 1);
1431 }
1432
1433 /*
1434 * This should be a memory location in a cache line which is
1435 * unlikely to be touched by other processors. The actual
1436 * content is immaterial as it is not actually modified in any way.
1437 */
1438 mwait_ptr = &current_thread_info()->flags;
1439
1440 wbinvd();
1441
1442 while (1) {
1443 /*
1444 * The CLFLUSH is a workaround for erratum AAI65 for
1445 * the Xeon 7400 series. It's not clear it is actually
1446 * needed, but it should be harmless in either case.
1447 * The WBINVD is insufficient due to the spurious-wakeup
1448 * case where we return around the loop.
1449 */
1450 clflush(mwait_ptr);
1451 __monitor(mwait_ptr, 0, 0);
1452 mb();
1453 __mwait(eax, 0);
1454 }
1455}
1456
1457static inline void hlt_play_dead(void)
1458{
1459 if (current_cpu_data.x86 >= 4)
1460 wbinvd();
1461
1462 while (1) {
1463 native_halt();
1464 }
1465}
1466
1386void native_play_dead(void) 1467void native_play_dead(void)
1387{ 1468{
1388 play_dead_common(); 1469 play_dead_common();
1389 tboot_shutdown(TB_SHUTDOWN_WFS); 1470 tboot_shutdown(TB_SHUTDOWN_WFS);
1390 wbinvd_halt(); 1471
1472 mwait_play_dead(); /* Only returns on failure */
1473 hlt_play_dead();
1391} 1474}
1392 1475
1393#else /* ... !CONFIG_HOTPLUG_CPU */ 1476#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 60788dee0f8a..9f4edeb21323 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
575 if (regs->flags & X86_VM_MASK) { 575 if (regs->flags & X86_VM_MASK) {
576 handle_vm86_trap((struct kernel_vm86_regs *) regs, 576 handle_vm86_trap((struct kernel_vm86_regs *) regs,
577 error_code, 1); 577 error_code, 1);
578 preempt_conditional_cli(regs);
578 return; 579 return;
579 } 580 }
580 581
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 5ffb5622f793..61fb98519622 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -551,8 +551,14 @@ cannot_handle:
551int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) 551int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
552{ 552{
553 if (VMPI.is_vm86pus) { 553 if (VMPI.is_vm86pus) {
554 if ((trapno == 3) || (trapno == 1)) 554 if ((trapno == 3) || (trapno == 1)) {
555 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 555 KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
556 /* setting this flag forces the code in entry_32.S to
557 call save_v86_state() and change the stack pointer
558 to KVM86->regs32 */
559 set_thread_flag(TIF_IRET);
560 return 0;
561 }
556 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 562 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
557 return 0; 563 return 0;
558 } 564 }
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 9c253bd65e24..547128546cc3 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
394 * Setup init_xstate_buf to represent the init state of 394 * Setup init_xstate_buf to represent the init state of
395 * all the features managed by the xsave 395 * all the features managed by the xsave
396 */ 396 */
397 init_xstate_buf = alloc_bootmem(xstate_size); 397 init_xstate_buf = alloc_bootmem_align(xstate_size,
398 __alignof__(struct xsave_struct));
398 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; 399 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
399 400
400 clts(); 401 clts();