aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2014-04-18 17:06:00 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2014-04-18 17:06:00 -0400
commita215aa7b9ab3759c047201199fba64d3042d7f13 (patch)
treebca37493d9b2233450e6d3ffced1261d0e4f71fe /arch/x86
parentd31199a77ef606f1d06894385f1852181ba6136b (diff)
Update 2.6.36 to 2.6.36.4wip-dissipation2-jerickso
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/io.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/mrst.h2
-rw-r--r--arch/x86/include/asm/processor.h23
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/apic/probe_64.c7
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/crash_dump_64.c3
-rw-r--r--arch/x86/kernel/hw_breakpoint.c4
-rw-r--r--arch/x86/kernel/microcode_intel.c16
-rw-r--r--arch/x86/kernel/olpc.c5
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c85
-rw-r--r--arch/x86/kernel/traps.c1
-rw-r--r--arch/x86/kernel/vm86_32.c10
-rw-r--r--arch/x86/kernel/xsave.c3
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/svm.c43
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c25
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/oprofile/op_model_amd.c24
-rw-r--r--arch/x86/vdso/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/smp.c6
35 files changed, 257 insertions, 116 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 3f76523589af..f857bd39cdfb 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -152,7 +152,7 @@
152#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ 152#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
153#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ 153#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
154#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ 154#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
155#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ 155#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
156#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ 156#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
158#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ 158#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 30a3e9776123..6a45ec41ec26 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
206 206
207extern void iounmap(volatile void __iomem *addr); 207extern void iounmap(volatile void __iomem *addr);
208 208
209extern void set_iounmap_nonlazy(void);
209 210
210#ifdef __KERNEL__ 211#ifdef __KERNEL__
211 212
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c52e2eb40a1e..6986312bb670 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -79,7 +79,7 @@
79#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 79#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
80#define KVM_MIN_FREE_MMU_PAGES 5 80#define KVM_MIN_FREE_MMU_PAGES 5
81#define KVM_REFILL_PAGES 25 81#define KVM_REFILL_PAGES 25
82#define KVM_MAX_CPUID_ENTRIES 40 82#define KVM_MAX_CPUID_ENTRIES 80
83#define KVM_NR_FIXED_MTRR_REGION 88 83#define KVM_NR_FIXED_MTRR_REGION 88
84#define KVM_NR_VAR_MTRR 8 84#define KVM_NR_VAR_MTRR 8
85 85
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0c18d9..8b5393ec1080 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 unsigned cpu = smp_processor_id(); 36 unsigned cpu = smp_processor_id();
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 41 percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
47 /* Re-load page tables */ 45 /* Re-load page tables */
48 load_cr3(next->pgd); 46 load_cr3(next->pgd);
49 47
48 /* stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
50 /* 51 /*
51 * load the LDT, if the LDT is different: 52 * load the LDT, if the LDT is different:
52 */ 53 */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 16350740edf6..33fc2966beb7 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -26,7 +26,7 @@ enum mrst_cpu_type {
26}; 26};
27 27
28extern enum mrst_cpu_type __mrst_cpu_chip; 28extern enum mrst_cpu_type __mrst_cpu_chip;
29static enum mrst_cpu_type mrst_identify_cpu(void) 29static inline enum mrst_cpu_type mrst_identify_cpu(void)
30{ 30{
31 return __mrst_cpu_chip; 31 return __mrst_cpu_chip;
32} 32}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ebaa04a8d3af..37ea41c63b49 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -768,29 +768,6 @@ extern unsigned long idle_halt;
768extern unsigned long idle_nomwait; 768extern unsigned long idle_nomwait;
769extern bool c1e_detected; 769extern bool c1e_detected;
770 770
771/*
772 * on systems with caches, caches must be flashed as the absolute
773 * last instruction before going into a suspended halt. Otherwise,
774 * dirty data can linger in the cache and become stale on resume,
775 * leading to strange errors.
776 *
777 * perform a variety of operations to guarantee that the compiler
778 * will not reorder instructions. wbinvd itself is serializing
779 * so the processor will not reorder.
780 *
781 * Systems without cache can just go into halt.
782 */
783static inline void wbinvd_halt(void)
784{
785 mb();
786 /* check for clflush to determine if wbinvd is legal */
787 if (cpu_has_clflush)
788 asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
789 else
790 while (1)
791 halt();
792}
793
794extern void enable_sep_cpu(void); 771extern void enable_sep_cpu(void);
795extern int sysenter_setup(void); 772extern int sysenter_setup(void);
796 773
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc90824068..4c2f63c7fc1b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
50 void (*smp_prepare_cpus)(unsigned max_cpus); 50 void (*smp_prepare_cpus)(unsigned max_cpus);
51 void (*smp_cpus_done)(unsigned max_cpus); 51 void (*smp_cpus_done)(unsigned max_cpus);
52 52
53 void (*smp_send_stop)(void); 53 void (*stop_other_cpus)(int wait);
54 void (*smp_send_reschedule)(int cpu); 54 void (*smp_send_reschedule)(int cpu);
55 55
56 int (*cpu_up)(unsigned cpu); 56 int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
73 73
74static inline void smp_send_stop(void) 74static inline void smp_send_stop(void)
75{ 75{
76 smp_ops.smp_send_stop(); 76 smp_ops.stop_other_cpus(0);
77}
78
79static inline void stop_other_cpus(void)
80{
81 smp_ops.stop_other_cpus(1);
77} 82}
78 83
79static inline void smp_prepare_boot_cpu(void) 84static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index e3b534cda49a..e0f220e158c1 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1340,6 +1340,14 @@ void __cpuinit end_local_APIC_setup(void)
1340 1340
1341 setup_apic_nmi_watchdog(NULL); 1341 setup_apic_nmi_watchdog(NULL);
1342 apic_pm_activate(); 1342 apic_pm_activate();
1343
1344 /*
1345 * Now that local APIC setup is completed for BP, configure the fault
1346 * handling for interrupt remapping.
1347 */
1348 if (!smp_processor_id() && intr_remapping_enabled)
1349 enable_drhd_fault_handling();
1350
1343} 1351}
1344 1352
1345#ifdef CONFIG_X86_X2APIC 1353#ifdef CONFIG_X86_X2APIC
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5c5b8f3dddb5..4d90327853b7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1397,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int irq,
1397 irte.dlvry_mode = apic->irq_delivery_mode; 1397 irte.dlvry_mode = apic->irq_delivery_mode;
1398 irte.vector = vector; 1398 irte.vector = vector;
1399 irte.dest_id = IRTE_DEST(destination); 1399 irte.dest_id = IRTE_DEST(destination);
1400 irte.redir_hint = 1;
1400 1401
1401 /* Set source-id of interrupt request */ 1402 /* Set source-id of interrupt request */
1402 set_ioapic_sid(&irte, apic_id); 1403 set_ioapic_sid(&irte, apic_id);
@@ -3348,6 +3349,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3348 irte.dlvry_mode = apic->irq_delivery_mode; 3349 irte.dlvry_mode = apic->irq_delivery_mode;
3349 irte.vector = cfg->vector; 3350 irte.vector = cfg->vector;
3350 irte.dest_id = IRTE_DEST(dest); 3351 irte.dest_id = IRTE_DEST(dest);
3352 irte.redir_hint = 1;
3351 3353
3352 /* Set source-id of interrupt request */ 3354 /* Set source-id of interrupt request */
3353 if (pdev) 3355 if (pdev)
@@ -3624,6 +3626,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3624 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3626 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3625 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3627 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3626 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3628 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3629 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3627 3630
3628 dmar_msi_write(irq, &msg); 3631 dmar_msi_write(irq, &msg);
3629 3632
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 83e9be4778e2..fac49a845064 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -76,13 +76,6 @@ void __init default_setup_apic_routing(void)
76 /* need to update phys_pkg_id */ 76 /* need to update phys_pkg_id */
77 apic->phys_pkg_id = apicid_phys_pkg_id; 77 apic->phys_pkg_id = apicid_phys_pkg_id;
78 } 78 }
79
80 /*
81 * Now that apic routing model is selected, configure the
82 * fault handling for intr remapping.
83 */
84 if (intr_remapping_enabled)
85 enable_drhd_fault_handling();
86} 79}
87 80
88/* Same for both flat and physical. */ 81/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ba5f62f45f01..81fa3cb12f39 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -305,8 +305,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
305 /* use socket ID also for last level cache */ 305 /* use socket ID also for last level cache */
306 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 306 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
307 /* fixup topology information on multi-node processors */ 307 /* fixup topology information on multi-node processors */
308 if ((c->x86 == 0x10) && (c->x86_model == 9)) 308 amd_fixup_dcm(c);
309 amd_fixup_dcm(c);
310#endif 309#endif
311} 310}
312 311
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index cd8da247dda1..a2baafb2fe6d 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
701 per_cpu(acfreq_data, policy->cpu) = NULL; 701 per_cpu(acfreq_data, policy->cpu) = NULL;
702 acpi_processor_unregister_performance(data->acpi_data, 702 acpi_processor_unregister_performance(data->acpi_data,
703 policy->cpu); 703 policy->cpu);
704 kfree(data->freq_table);
704 kfree(data); 705 kfree(data);
705 } 706 }
706 707
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index c5f59d071425..ac140c7be396 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void)
827 827
828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
829 return 0; 829 return 0;
830 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) 830 if (boot_cpu_data.x86 < 0xf)
831 return 0; 831 return 0;
832 /* In case some hypervisor doesn't pass SYSCFG through: */ 832 /* In case some hypervisor doesn't pass SYSCFG through: */
833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) 833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc3..bebabec5b448 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
793} 793}
794 794
795/* 795/*
796 * MTRR initialization for all AP's 796 * Delayed MTRR initialization for all AP's
797 */ 797 */
798void mtrr_aps_init(void) 798void mtrr_aps_init(void)
799{ 799{
800 if (!use_intel()) 800 if (!use_intel())
801 return; 801 return;
802 802
803 /*
804 * Check if someone has requested the delay of AP MTRR initialization,
805 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
806 * then we are done.
807 */
808 if (!mtrr_aps_delayed_init)
809 return;
810
803 set_mtrr(~0U, 0, 0, 0); 811 set_mtrr(~0U, 0, 0, 0);
804 mtrr_aps_delayed_init = false; 812 mtrr_aps_delayed_init = false;
805} 813}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c2897b7b4a3b..46d58448c3af 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids
52 [ C(DTLB) ] = { 52 [ C(DTLB) ] = {
53 [ C(OP_READ) ] = { 53 [ C(OP_READ) ] = {
54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
55 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ 55 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
56 }, 56 },
57 [ C(OP_WRITE) ] = { 57 [ C(OP_WRITE) ] = {
58 [ C(RESULT_ACCESS) ] = 0, 58 [ C(RESULT_ACCESS) ] = 0,
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids
66 [ C(ITLB) ] = { 66 [ C(ITLB) ] = {
67 [ C(OP_READ) ] = { 67 [ C(OP_READ) ] = {
68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ 68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
69 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ 69 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
70 }, 70 },
71 [ C(OP_WRITE) ] = { 71 [ C(OP_WRITE) ] = {
72 [ C(RESULT_ACCESS) ] = -1, 72 [ C(RESULT_ACCESS) ] = -1,
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 045b36cada65..994828899e09 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
34 if (!csize) 34 if (!csize)
35 return 0; 35 return 0;
36 36
37 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 37 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
38 if (!vaddr) 38 if (!vaddr)
39 return -ENOMEM; 39 return -ENOMEM;
40 40
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
46 } else 46 } else
47 memcpy(buf, vaddr + offset, csize); 47 memcpy(buf, vaddr + offset, csize);
48 48
49 set_iounmap_nonlazy();
49 iounmap(vaddr); 50 iounmap(vaddr);
50 return csize; 51 return csize;
51} 52}
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff15c9dcc25d..42c594254507 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
433 dr6_p = (unsigned long *)ERR_PTR(args->err); 433 dr6_p = (unsigned long *)ERR_PTR(args->err);
434 dr6 = *dr6_p; 434 dr6 = *dr6_p;
435 435
436 /* If it's a single step, TRAP bits are random */
437 if (dr6 & DR_STEP)
438 return NOTIFY_DONE;
439
436 /* Do an early return if no trap bits are set in DR6 */ 440 /* Do an early return if no trap bits are set in DR6 */
437 if ((dr6 & DR_TRAP_BITS) == 0) 441 if ((dr6 & DR_TRAP_BITS) == 0)
438 return NOTIFY_DONE; 442 return NOTIFY_DONE;
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 356170262a93..2573689bda77 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
364 364
365 /* For performance reasons, reuse mc area when possible */ 365 /* For performance reasons, reuse mc area when possible */
366 if (!mc || mc_size > curr_mc_size) { 366 if (!mc || mc_size > curr_mc_size) {
367 if (mc) 367 vfree(mc);
368 vfree(mc);
369 mc = vmalloc(mc_size); 368 mc = vmalloc(mc_size);
370 if (!mc) 369 if (!mc)
371 break; 370 break;
@@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
374 373
375 if (get_ucode_data(mc, ucode_ptr, mc_size) || 374 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
376 microcode_sanity_check(mc) < 0) { 375 microcode_sanity_check(mc) < 0) {
377 vfree(mc);
378 break; 376 break;
379 } 377 }
380 378
381 if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { 379 if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
382 if (new_mc) 380 vfree(new_mc);
383 vfree(new_mc);
384 new_rev = mc_header.rev; 381 new_rev = mc_header.rev;
385 new_mc = mc; 382 new_mc = mc;
386 mc = NULL; /* trigger new vmalloc */ 383 mc = NULL; /* trigger new vmalloc */
@@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
390 leftover -= mc_size; 387 leftover -= mc_size;
391 } 388 }
392 389
393 if (mc) 390 vfree(mc);
394 vfree(mc);
395 391
396 if (leftover) { 392 if (leftover) {
397 if (new_mc) 393 vfree(new_mc);
398 vfree(new_mc);
399 state = UCODE_ERROR; 394 state = UCODE_ERROR;
400 goto out; 395 goto out;
401 } 396 }
@@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
405 goto out; 400 goto out;
406 } 401 }
407 402
408 if (uci->mc) 403 vfree(uci->mc);
409 vfree(uci->mc);
410 uci->mc = (struct microcode_intel *)new_mc; 404 uci->mc = (struct microcode_intel *)new_mc;
411 405
412 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 406 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 0e0cdde519be..a2bd899b2b83 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -114,6 +114,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
114 unsigned long flags; 114 unsigned long flags;
115 int ret = -EIO; 115 int ret = -EIO;
116 int i; 116 int i;
117 int restarts = 0;
117 118
118 spin_lock_irqsave(&ec_lock, flags); 119 spin_lock_irqsave(&ec_lock, flags);
119 120
@@ -169,7 +170,9 @@ restart:
169 if (wait_on_obf(0x6c, 1)) { 170 if (wait_on_obf(0x6c, 1)) {
170 printk(KERN_ERR "olpc-ec: timeout waiting for" 171 printk(KERN_ERR "olpc-ec: timeout waiting for"
171 " EC to provide data!\n"); 172 " EC to provide data!\n");
172 goto restart; 173 if (restarts++ < 10)
174 goto restart;
175 goto err;
173 } 176 }
174 outbuf[i] = inb(0x68); 177 outbuf[i] = inb(0x68);
175 pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); 178 pr_devel("olpc-ec: received 0x%x\n", outbuf[i]);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e3af342fe83a..76a0d715a031 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -641,7 +641,7 @@ void native_machine_shutdown(void)
641 /* O.K Now that I'm on the appropriate processor, 641 /* O.K Now that I'm on the appropriate processor,
642 * stop all of the others. 642 * stop all of the others.
643 */ 643 */
644 smp_send_stop(); 644 stop_other_cpus();
645#endif 645#endif
646 646
647 lapic_shutdown(); 647 lapic_shutdown();
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 74cca6014c0e..96af3a8e7326 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -174,10 +174,10 @@ asmlinkage void smp_reboot_interrupt(void)
174 irq_exit(); 174 irq_exit();
175} 175}
176 176
177static void native_smp_send_stop(void) 177static void native_stop_other_cpus(int wait)
178{ 178{
179 unsigned long flags; 179 unsigned long flags;
180 unsigned long wait; 180 unsigned long timeout;
181 181
182 if (reboot_force) 182 if (reboot_force)
183 return; 183 return;
@@ -194,9 +194,12 @@ static void native_smp_send_stop(void)
194 if (num_online_cpus() > 1) { 194 if (num_online_cpus() > 1) {
195 apic->send_IPI_allbutself(REBOOT_VECTOR); 195 apic->send_IPI_allbutself(REBOOT_VECTOR);
196 196
197 /* Don't wait longer than a second */ 197 /*
198 wait = USEC_PER_SEC; 198 * Don't wait longer than a second if the caller
199 while (num_online_cpus() > 1 && wait--) 199 * didn't ask us to wait.
200 */
201 timeout = USEC_PER_SEC;
202 while (num_online_cpus() > 1 && (wait || timeout--))
200 udelay(1); 203 udelay(1);
201 } 204 }
202 205
@@ -254,7 +257,7 @@ struct smp_ops smp_ops = {
254 .smp_prepare_cpus = native_smp_prepare_cpus, 257 .smp_prepare_cpus = native_smp_prepare_cpus,
255 .smp_cpus_done = native_smp_cpus_done, 258 .smp_cpus_done = native_smp_cpus_done,
256 259
257 .smp_send_stop = native_smp_send_stop, 260 .stop_other_cpus = native_stop_other_cpus,
258 .smp_send_reschedule = native_smp_send_reschedule, 261 .smp_send_reschedule = native_smp_send_reschedule,
259 262
260 .cpu_up = native_cpu_up, 263 .cpu_up = native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8b3bfc4dd708..016179e5ba09 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1383,11 +1383,94 @@ void play_dead_common(void)
1383 local_irq_disable(); 1383 local_irq_disable();
1384} 1384}
1385 1385
1386#define MWAIT_SUBSTATE_MASK 0xf
1387#define MWAIT_SUBSTATE_SIZE 4
1388
1389#define CPUID_MWAIT_LEAF 5
1390#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
1391
1392/*
1393 * We need to flush the caches before going to sleep, lest we have
1394 * dirty data in our caches when we come back up.
1395 */
1396static inline void mwait_play_dead(void)
1397{
1398 unsigned int eax, ebx, ecx, edx;
1399 unsigned int highest_cstate = 0;
1400 unsigned int highest_subcstate = 0;
1401 int i;
1402 void *mwait_ptr;
1403
1404 if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
1405 return;
1406 if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
1407 return;
1408 if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
1409 return;
1410
1411 eax = CPUID_MWAIT_LEAF;
1412 ecx = 0;
1413 native_cpuid(&eax, &ebx, &ecx, &edx);
1414
1415 /*
1416 * eax will be 0 if EDX enumeration is not valid.
1417 * Initialized below to cstate, sub_cstate value when EDX is valid.
1418 */
1419 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1420 eax = 0;
1421 } else {
1422 edx >>= MWAIT_SUBSTATE_SIZE;
1423 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1424 if (edx & MWAIT_SUBSTATE_MASK) {
1425 highest_cstate = i;
1426 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1427 }
1428 }
1429 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1430 (highest_subcstate - 1);
1431 }
1432
1433 /*
1434 * This should be a memory location in a cache line which is
1435 * unlikely to be touched by other processors. The actual
1436 * content is immaterial as it is not actually modified in any way.
1437 */
1438 mwait_ptr = &current_thread_info()->flags;
1439
1440 wbinvd();
1441
1442 while (1) {
1443 /*
1444 * The CLFLUSH is a workaround for erratum AAI65 for
1445 * the Xeon 7400 series. It's not clear it is actually
1446 * needed, but it should be harmless in either case.
1447 * The WBINVD is insufficient due to the spurious-wakeup
1448 * case where we return around the loop.
1449 */
1450 clflush(mwait_ptr);
1451 __monitor(mwait_ptr, 0, 0);
1452 mb();
1453 __mwait(eax, 0);
1454 }
1455}
1456
1457static inline void hlt_play_dead(void)
1458{
1459 if (current_cpu_data.x86 >= 4)
1460 wbinvd();
1461
1462 while (1) {
1463 native_halt();
1464 }
1465}
1466
1386void native_play_dead(void) 1467void native_play_dead(void)
1387{ 1468{
1388 play_dead_common(); 1469 play_dead_common();
1389 tboot_shutdown(TB_SHUTDOWN_WFS); 1470 tboot_shutdown(TB_SHUTDOWN_WFS);
1390 wbinvd_halt(); 1471
1472 mwait_play_dead(); /* Only returns on failure */
1473 hlt_play_dead();
1391} 1474}
1392 1475
1393#else /* ... !CONFIG_HOTPLUG_CPU */ 1476#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 60788dee0f8a..9f4edeb21323 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
575 if (regs->flags & X86_VM_MASK) { 575 if (regs->flags & X86_VM_MASK) {
576 handle_vm86_trap((struct kernel_vm86_regs *) regs, 576 handle_vm86_trap((struct kernel_vm86_regs *) regs,
577 error_code, 1); 577 error_code, 1);
578 preempt_conditional_cli(regs);
578 return; 579 return;
579 } 580 }
580 581
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 5ffb5622f793..61fb98519622 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -551,8 +551,14 @@ cannot_handle:
551int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) 551int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
552{ 552{
553 if (VMPI.is_vm86pus) { 553 if (VMPI.is_vm86pus) {
554 if ((trapno == 3) || (trapno == 1)) 554 if ((trapno == 3) || (trapno == 1)) {
555 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 555 KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
556 /* setting this flag forces the code in entry_32.S to
557 call save_v86_state() and change the stack pointer
558 to KVM86->regs32 */
559 set_thread_flag(TIF_IRET);
560 return 0;
561 }
556 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 562 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
557 return 0; 563 return 0;
558 } 564 }
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 9c253bd65e24..547128546cc3 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
394 * Setup init_xstate_buf to represent the init state of 394 * Setup init_xstate_buf to represent the init state of
395 * all the features managed by the xsave 395 * all the features managed by the xsave
396 */ 396 */
397 init_xstate_buf = alloc_bootmem(xstate_size); 397 init_xstate_buf = alloc_bootmem_align(xstate_size,
398 __alignof__(struct xsave_struct));
398 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; 399 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
399 400
400 clts(); 401 clts();
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 4b7b73ce2098..9f163e61283c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -570,6 +570,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
570 s->pics[1].elcr_mask = 0xde; 570 s->pics[1].elcr_mask = 0xde;
571 s->pics[0].pics_state = s; 571 s->pics[0].pics_state = s;
572 s->pics[1].pics_state = s; 572 s->pics[1].pics_state = s;
573 s->pics[0].isr_ack = 0xff;
574 s->pics[1].isr_ack = 0xff;
573 575
574 /* 576 /*
575 * Initialize PIO device 577 * Initialize PIO device
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 311f6dad8951..7fed5b793faf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2254,6 +2254,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2254 return 0; 2254 return 0;
2255 } 2255 }
2256 direct = !is_paging(vcpu); 2256 direct = !is_paging(vcpu);
2257
2258 if (mmu_check_root(vcpu, root_gfn))
2259 return 1;
2260
2257 for (i = 0; i < 4; ++i) { 2261 for (i = 0; i < 4; ++i) {
2258 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2262 hpa_t root = vcpu->arch.mmu.pae_root[i];
2259 2263
@@ -2265,13 +2269,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2265 continue; 2269 continue;
2266 } 2270 }
2267 root_gfn = pdptr >> PAGE_SHIFT; 2271 root_gfn = pdptr >> PAGE_SHIFT;
2272 if (mmu_check_root(vcpu, root_gfn))
2273 return 1;
2268 } else if (vcpu->arch.mmu.root_level == 0) 2274 } else if (vcpu->arch.mmu.root_level == 0)
2269 root_gfn = 0; 2275 root_gfn = 0;
2270 if (mmu_check_root(vcpu, root_gfn))
2271 return 1;
2272 if (tdp_enabled) { 2276 if (tdp_enabled) {
2273 direct = 1; 2277 direct = 1;
2274 root_gfn = i << 30; 2278 root_gfn = i << (30 - PAGE_SHIFT);
2275 } 2279 }
2276 spin_lock(&vcpu->kvm->mmu_lock); 2280 spin_lock(&vcpu->kvm->mmu_lock);
2277 kvm_mmu_free_some_pages(vcpu); 2281 kvm_mmu_free_some_pages(vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8a3f9f64f86f..e7c3f3bd08fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -88,6 +88,14 @@ struct nested_state {
88 /* A VMEXIT is required but not yet emulated */ 88 /* A VMEXIT is required but not yet emulated */
89 bool exit_required; 89 bool exit_required;
90 90
91 /*
92 * If we vmexit during an instruction emulation we need this to restore
93 * the l1 guest rip after the emulation
94 */
95 unsigned long vmexit_rip;
96 unsigned long vmexit_rsp;
97 unsigned long vmexit_rax;
98
91 /* cache for intercepts of the guest */ 99 /* cache for intercepts of the guest */
92 u16 intercept_cr_read; 100 u16 intercept_cr_read;
93 u16 intercept_cr_write; 101 u16 intercept_cr_write;
@@ -1206,8 +1214,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1206 if (old == new) { 1214 if (old == new) {
1207 /* cr0 write with ts and mp unchanged */ 1215 /* cr0 write with ts and mp unchanged */
1208 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 1216 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1209 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) 1217 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
1218 svm->nested.vmexit_rip = kvm_rip_read(vcpu);
1219 svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
1220 svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
1210 return; 1221 return;
1222 }
1211 } 1223 }
1212 } 1224 }
1213 1225
@@ -2399,6 +2411,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2399 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; 2411 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2400} 2412}
2401 2413
2414static int cr0_write_interception(struct vcpu_svm *svm)
2415{
2416 struct kvm_vcpu *vcpu = &svm->vcpu;
2417 int r;
2418
2419 r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2420
2421 if (svm->nested.vmexit_rip) {
2422 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2423 kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2424 kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2425 svm->nested.vmexit_rip = 0;
2426 }
2427
2428 return r == EMULATE_DONE;
2429}
2430
2402static int cr8_write_interception(struct vcpu_svm *svm) 2431static int cr8_write_interception(struct vcpu_svm *svm)
2403{ 2432{
2404 struct kvm_run *kvm_run = svm->vcpu.run; 2433 struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2672,7 +2701,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2672 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2701 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2673 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2702 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2674 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 2703 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2675 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2704 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
2676 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2705 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2677 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2706 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2678 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 2707 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
@@ -3252,6 +3281,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3281 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3253 3282
3254 load_host_msrs(vcpu); 3283 load_host_msrs(vcpu);
3284 kvm_load_ldt(ldt_selector);
3255 loadsegment(fs, fs_selector); 3285 loadsegment(fs, fs_selector);
3256#ifdef CONFIG_X86_64 3286#ifdef CONFIG_X86_64
3257 load_gs_index(gs_selector); 3287 load_gs_index(gs_selector);
@@ -3259,7 +3289,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3259#else 3289#else
3260 loadsegment(gs, gs_selector); 3290 loadsegment(gs, gs_selector);
3261#endif 3291#endif
3262 kvm_load_ldt(ldt_selector);
3263 3292
3264 reload_tss(vcpu); 3293 reload_tss(vcpu);
3265 3294
@@ -3354,6 +3383,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3354static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3383static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3355{ 3384{
3356 switch (func) { 3385 switch (func) {
3386 case 0x00000001:
3387 /* Mask out xsave bit as long as it is not supported by SVM */
3388 entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
3389 break;
3390 case 0x80000001:
3391 if (nested)
3392 entry->ecx |= (1 << 2); /* Set SVM bit */
3393 break;
3357 case 0x8000000A: 3394 case 0x8000000A:
3358 entry->eax = 1; /* SVM revision 1 */ 3395 entry->eax = 1; /* SVM revision 1 */
3359 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 3396 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7bddfab12013..b3986fec7e68 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -828,10 +828,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
828#endif 828#endif
829 829
830#ifdef CONFIG_X86_64 830#ifdef CONFIG_X86_64
831 if (is_long_mode(&vmx->vcpu)) { 831 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
832 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 832 if (is_long_mode(&vmx->vcpu))
833 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 833 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
834 }
835#endif 834#endif
836 for (i = 0; i < vmx->save_nmsrs; ++i) 835 for (i = 0; i < vmx->save_nmsrs; ++i)
837 kvm_set_shared_msr(vmx->guest_msrs[i].index, 836 kvm_set_shared_msr(vmx->guest_msrs[i].index,
@@ -846,23 +845,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
846 845
847 ++vmx->vcpu.stat.host_state_reload; 846 ++vmx->vcpu.stat.host_state_reload;
848 vmx->host_state.loaded = 0; 847 vmx->host_state.loaded = 0;
849 if (vmx->host_state.fs_reload_needed) 848#ifdef CONFIG_X86_64
850 loadsegment(fs, vmx->host_state.fs_sel); 849 if (is_long_mode(&vmx->vcpu))
850 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
851#endif
851 if (vmx->host_state.gs_ldt_reload_needed) { 852 if (vmx->host_state.gs_ldt_reload_needed) {
852 kvm_load_ldt(vmx->host_state.ldt_sel); 853 kvm_load_ldt(vmx->host_state.ldt_sel);
853#ifdef CONFIG_X86_64 854#ifdef CONFIG_X86_64
854 load_gs_index(vmx->host_state.gs_sel); 855 load_gs_index(vmx->host_state.gs_sel);
855 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
856#else 856#else
857 loadsegment(gs, vmx->host_state.gs_sel); 857 loadsegment(gs, vmx->host_state.gs_sel);
858#endif 858#endif
859 } 859 }
860 if (vmx->host_state.fs_reload_needed)
861 loadsegment(fs, vmx->host_state.fs_sel);
860 reload_tss(); 862 reload_tss();
861#ifdef CONFIG_X86_64 863#ifdef CONFIG_X86_64
862 if (is_long_mode(&vmx->vcpu)) { 864 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
863 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
864 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
865 }
866#endif 865#endif
867 if (current_thread_info()->status & TS_USEDFPU) 866 if (current_thread_info()->status & TS_USEDFPU)
868 clts(); 867 clts();
@@ -4249,11 +4248,6 @@ static int vmx_get_lpage_level(void)
4249 return PT_PDPE_LEVEL; 4248 return PT_PDPE_LEVEL;
4250} 4249}
4251 4250
4252static inline u32 bit(int bitno)
4253{
4254 return 1 << (bitno & 31);
4255}
4256
4257static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 4251static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
4258{ 4252{
4259 struct kvm_cpuid_entry2 *best; 4253 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3a09c625d526..a5746de6f402 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -153,11 +153,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
153 153
154u64 __read_mostly host_xcr0; 154u64 __read_mostly host_xcr0;
155 155
156static inline u32 bit(int bitno)
157{
158 return 1 << (bitno & 31);
159}
160
161static void kvm_on_user_return(struct user_return_notifier *urn) 156static void kvm_on_user_return(struct user_return_notifier *urn)
162{ 157{
163 unsigned slot; 158 unsigned slot;
@@ -1994,9 +1989,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1994 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX); 1989 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
1995 /* cpuid 0x80000001.ecx */ 1990 /* cpuid 0x80000001.ecx */
1996 const u32 kvm_supported_word6_x86_features = 1991 const u32 kvm_supported_word6_x86_features =
1997 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | 1992 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
1998 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 1993 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1999 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | 1994 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2000 0 /* SKINIT */ | 0 /* WDT */; 1995 0 /* SKINIT */ | 0 /* WDT */;
2001 1996
2002 /* all calls to cpuid_count() should be made on the same cpu */ 1997 /* all calls to cpuid_count() should be made on the same cpu */
@@ -2305,6 +2300,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2305 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2300 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2306 events->exception.nr = vcpu->arch.exception.nr; 2301 events->exception.nr = vcpu->arch.exception.nr;
2307 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2302 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2303 events->exception.pad = 0;
2308 events->exception.error_code = vcpu->arch.exception.error_code; 2304 events->exception.error_code = vcpu->arch.exception.error_code;
2309 2305
2310 events->interrupt.injected = 2306 events->interrupt.injected =
@@ -2318,12 +2314,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2318 events->nmi.injected = vcpu->arch.nmi_injected; 2314 events->nmi.injected = vcpu->arch.nmi_injected;
2319 events->nmi.pending = vcpu->arch.nmi_pending; 2315 events->nmi.pending = vcpu->arch.nmi_pending;
2320 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2316 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2317 events->nmi.pad = 0;
2321 2318
2322 events->sipi_vector = vcpu->arch.sipi_vector; 2319 events->sipi_vector = vcpu->arch.sipi_vector;
2323 2320
2324 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2321 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2325 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2322 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2326 | KVM_VCPUEVENT_VALID_SHADOW); 2323 | KVM_VCPUEVENT_VALID_SHADOW);
2324 memset(&events->reserved, 0, sizeof(events->reserved));
2327} 2325}
2328 2326
2329static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2327static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2366,6 +2364,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2366 dbgregs->dr6 = vcpu->arch.dr6; 2364 dbgregs->dr6 = vcpu->arch.dr6;
2367 dbgregs->dr7 = vcpu->arch.dr7; 2365 dbgregs->dr7 = vcpu->arch.dr7;
2368 dbgregs->flags = 0; 2366 dbgregs->flags = 0;
2367 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2369} 2368}
2370 2369
2371static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2370static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -2849,6 +2848,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2849 sizeof(ps->channels)); 2848 sizeof(ps->channels));
2850 ps->flags = kvm->arch.vpit->pit_state.flags; 2849 ps->flags = kvm->arch.vpit->pit_state.flags;
2851 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 2850 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2851 memset(&ps->reserved, 0, sizeof(ps->reserved));
2852 return r; 2852 return r;
2853} 2853}
2854 2854
@@ -2912,10 +2912,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2912 struct kvm_memslots *slots, *old_slots; 2912 struct kvm_memslots *slots, *old_slots;
2913 unsigned long *dirty_bitmap; 2913 unsigned long *dirty_bitmap;
2914 2914
2915 spin_lock(&kvm->mmu_lock);
2916 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2917 spin_unlock(&kvm->mmu_lock);
2918
2919 r = -ENOMEM; 2915 r = -ENOMEM;
2920 dirty_bitmap = vmalloc(n); 2916 dirty_bitmap = vmalloc(n);
2921 if (!dirty_bitmap) 2917 if (!dirty_bitmap)
@@ -2937,6 +2933,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2937 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 2933 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2938 kfree(old_slots); 2934 kfree(old_slots);
2939 2935
2936 spin_lock(&kvm->mmu_lock);
2937 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2938 spin_unlock(&kvm->mmu_lock);
2939
2940 r = -EFAULT; 2940 r = -EFAULT;
2941 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 2941 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2942 vfree(dirty_bitmap); 2942 vfree(dirty_bitmap);
@@ -3229,6 +3229,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3229 now_ns = timespec_to_ns(&now); 3229 now_ns = timespec_to_ns(&now);
3230 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3230 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3231 user_ns.flags = 0; 3231 user_ns.flags = 0;
3232 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3232 3233
3233 r = -EFAULT; 3234 r = -EFAULT;
3234 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3235 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -5111,6 +5112,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5111 5112
5112 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5113 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5113 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5114 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5115 if (sregs->cr4 & X86_CR4_OSXSAVE)
5116 update_cpuid(vcpu);
5114 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5117 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5115 load_pdptrs(vcpu, vcpu->arch.cr3); 5118 load_pdptrs(vcpu, vcpu->arch.cr3);
5116 mmu_reset_needed = 1; 5119 mmu_reset_needed = 1;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b7a404722d2b..0bf327453499 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -65,6 +65,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
66} 66}
67 67
68static inline u32 bit(int bitno)
69{
70 return 1 << (bitno & 31);
71}
72
68void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 73void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
69void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 74void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
70 75
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index b67a6b5aa8d4..42623310c968 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -484,21 +484,29 @@ static int __init_ibs_nmi(void)
484 return 0; 484 return 0;
485} 485}
486 486
487/* initialize the APIC for the IBS interrupts if available */ 487/*
488 * check and reserve APIC extended interrupt LVT offset for IBS if
489 * available
490 *
491 * init_ibs() preforms implicitly cpu-local operations, so pin this
492 * thread to its current CPU
493 */
494
488static void init_ibs(void) 495static void init_ibs(void)
489{ 496{
490 ibs_caps = get_ibs_caps(); 497 preempt_disable();
491 498
499 ibs_caps = get_ibs_caps();
492 if (!ibs_caps) 500 if (!ibs_caps)
493 return; 501 goto out;
494 502
495 if (__init_ibs_nmi()) { 503 if (__init_ibs_nmi() < 0)
496 ibs_caps = 0; 504 ibs_caps = 0;
497 return; 505 else
498 } 506 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
499 507
500 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", 508out:
501 (unsigned)ibs_caps); 509 preempt_enable();
502} 510}
503 511
504static int (*create_arch_files)(struct super_block *sb, struct dentry *root); 512static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 4a2afa1bac51..b6552b189bcd 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
25 25
26export CPPFLAGS_vdso.lds += -P -C 26export CPPFLAGS_vdso.lds += -P -C
27 27
28VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \ 28VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
29 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 29 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
30 30
31$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so 31$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
@@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y) += sysenter
69vdso32-images = $(vdso32.so-y:%=vdso32-%.so) 69vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
70 70
71CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) 71CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
72VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1 72VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
73 73
74# This makes sure the $(obj) subdirectory exists even though vdso32/ 74# This makes sure the $(obj) subdirectory exists even though vdso32/
75# is not a kbuild sub-make subdirectory. 75# is not a kbuild sub-make subdirectory.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7d46c8441418..0f6cd146f1ee 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1017,10 +1017,6 @@ static void xen_reboot(int reason)
1017{ 1017{
1018 struct sched_shutdown r = { .reason = reason }; 1018 struct sched_shutdown r = { .reason = reason };
1019 1019
1020#ifdef CONFIG_SMP
1021 smp_send_stop();
1022#endif
1023
1024 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1020 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1025 BUG(); 1021 BUG();
1026} 1022}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 25f232b18a82..f4d010031465 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -400,9 +400,9 @@ static void stop_self(void *v)
400 BUG(); 400 BUG();
401} 401}
402 402
403static void xen_smp_send_stop(void) 403static void xen_stop_other_cpus(int wait)
404{ 404{
405 smp_call_function(stop_self, NULL, 0); 405 smp_call_function(stop_self, NULL, wait);
406} 406}
407 407
408static void xen_smp_send_reschedule(int cpu) 408static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
470 .cpu_disable = xen_cpu_disable, 470 .cpu_disable = xen_cpu_disable,
471 .play_dead = xen_play_dead, 471 .play_dead = xen_play_dead,
472 472
473 .smp_send_stop = xen_smp_send_stop, 473 .stop_other_cpus = xen_stop_other_cpus,
474 .smp_send_reschedule = xen_smp_send_reschedule, 474 .smp_send_reschedule = xen_smp_send_reschedule,
475 475
476 .send_call_func_ipi = xen_smp_send_call_function_ipi, 476 .send_call_func_ipi = xen_smp_send_call_function_ipi,