diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/jump_label.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/mutex_64.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 31 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_amd.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/sysfb_simplefb.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 28 | ||||
-rw-r--r-- | arch/x86/pci/mmconfig-shared.c | 7 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 11 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 10 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 9 | ||||
-rw-r--r-- | arch/x86/xen/spinlock.c | 26 |
19 files changed, 152 insertions, 71 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ee2fb9d37745..f67e839f06c8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt" | |||
860 | 860 | ||
861 | config X86_UP_APIC | 861 | config X86_UP_APIC |
862 | bool "Local APIC support on uniprocessors" | 862 | bool "Local APIC support on uniprocessors" |
863 | depends on X86_32 && !SMP && !X86_32_NON_STANDARD | 863 | depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI |
864 | ---help--- | 864 | ---help--- |
865 | A local APIC (Advanced Programmable Interrupt Controller) is an | 865 | A local APIC (Advanced Programmable Interrupt Controller) is an |
866 | integrated interrupt controller in the CPU. If you have a single-CPU | 866 | integrated interrupt controller in the CPU. If you have a single-CPU |
@@ -885,11 +885,11 @@ config X86_UP_IOAPIC | |||
885 | 885 | ||
886 | config X86_LOCAL_APIC | 886 | config X86_LOCAL_APIC |
887 | def_bool y | 887 | def_bool y |
888 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC | 888 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI |
889 | 889 | ||
890 | config X86_IO_APIC | 890 | config X86_IO_APIC |
891 | def_bool y | 891 | def_bool y |
892 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC | 892 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI |
893 | 893 | ||
894 | config X86_VISWS_APIC | 894 | config X86_VISWS_APIC |
895 | def_bool y | 895 | def_bool y |
@@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS | |||
1033 | 1033 | ||
1034 | config MICROCODE | 1034 | config MICROCODE |
1035 | tristate "CPU microcode loading support" | 1035 | tristate "CPU microcode loading support" |
1036 | depends on CPU_SUP_AMD || CPU_SUP_INTEL | ||
1036 | select FW_LOADER | 1037 | select FW_LOADER |
1037 | ---help--- | 1038 | ---help--- |
1038 | 1039 | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index d3f5c63078d8..89270b4318db 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) | |||
374 | * Catch too early usage of this before alternatives | 374 | * Catch too early usage of this before alternatives |
375 | * have run. | 375 | * have run. |
376 | */ | 376 | */ |
377 | asm goto("1: jmp %l[t_warn]\n" | 377 | asm_volatile_goto("1: jmp %l[t_warn]\n" |
378 | "2:\n" | 378 | "2:\n" |
379 | ".section .altinstructions,\"a\"\n" | 379 | ".section .altinstructions,\"a\"\n" |
380 | " .long 1b - .\n" | 380 | " .long 1b - .\n" |
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) | |||
388 | 388 | ||
389 | #endif | 389 | #endif |
390 | 390 | ||
391 | asm goto("1: jmp %l[t_no]\n" | 391 | asm_volatile_goto("1: jmp %l[t_no]\n" |
392 | "2:\n" | 392 | "2:\n" |
393 | ".section .altinstructions,\"a\"\n" | 393 | ".section .altinstructions,\"a\"\n" |
394 | " .long 1b - .\n" | 394 | " .long 1b - .\n" |
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) | |||
453 | * have. Thus, we force the jump to the widest, 4-byte, signed relative | 453 | * have. Thus, we force the jump to the widest, 4-byte, signed relative |
454 | * offset even though the last would often fit in less bytes. | 454 | * offset even though the last would often fit in less bytes. |
455 | */ | 455 | */ |
456 | asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" | 456 | asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" |
457 | "2:\n" | 457 | "2:\n" |
458 | ".section .altinstructions,\"a\"\n" | 458 | ".section .altinstructions,\"a\"\n" |
459 | " .long 1b - .\n" /* src offset */ | 459 | " .long 1b - .\n" /* src offset */ |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 64507f35800c..6a2cefb4395a 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | static __always_inline bool arch_static_branch(struct static_key *key) | 19 | static __always_inline bool arch_static_branch(struct static_key *key) |
20 | { | 20 | { |
21 | asm goto("1:" | 21 | asm_volatile_goto("1:" |
22 | ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" | 22 | ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" |
23 | ".pushsection __jump_table, \"aw\" \n\t" | 23 | ".pushsection __jump_table, \"aw\" \n\t" |
24 | _ASM_ALIGN "\n\t" | 24 | _ASM_ALIGN "\n\t" |
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h index e7e6751648ed..07537a44216e 100644 --- a/arch/x86/include/asm/mutex_64.h +++ b/arch/x86/include/asm/mutex_64.h | |||
@@ -20,7 +20,7 @@ | |||
20 | static inline void __mutex_fastpath_lock(atomic_t *v, | 20 | static inline void __mutex_fastpath_lock(atomic_t *v, |
21 | void (*fail_fn)(atomic_t *)) | 21 | void (*fail_fn)(atomic_t *)) |
22 | { | 22 | { |
23 | asm volatile goto(LOCK_PREFIX " decl %0\n" | 23 | asm_volatile_goto(LOCK_PREFIX " decl %0\n" |
24 | " jns %l[exit]\n" | 24 | " jns %l[exit]\n" |
25 | : : "m" (v->counter) | 25 | : : "m" (v->counter) |
26 | : "memory", "cc" | 26 | : "memory", "cc" |
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count) | |||
75 | static inline void __mutex_fastpath_unlock(atomic_t *v, | 75 | static inline void __mutex_fastpath_unlock(atomic_t *v, |
76 | void (*fail_fn)(atomic_t *)) | 76 | void (*fail_fn)(atomic_t *)) |
77 | { | 77 | { |
78 | asm volatile goto(LOCK_PREFIX " incl %0\n" | 78 | asm_volatile_goto(LOCK_PREFIX " incl %0\n" |
79 | " jg %l[exit]\n" | 79 | " jg %l[exit]\n" |
80 | : : "m" (v->counter) | 80 | : : "m" (v->counter) |
81 | : "memory", "cc" | 81 | : "memory", "cc" |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 6aef9fbc09b7..b913915e8e63 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) | |||
79 | return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; | 79 | return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | 82 | static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) |
83 | { | 83 | { |
84 | unsigned long pfn; | 84 | unsigned long pfn; |
85 | int ret = 0; | 85 | int ret; |
86 | 86 | ||
87 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 87 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
88 | return mfn; | 88 | return mfn; |
89 | 89 | ||
90 | if (unlikely(mfn >= machine_to_phys_nr)) { | 90 | if (unlikely(mfn >= machine_to_phys_nr)) |
91 | pfn = ~0; | 91 | return ~0; |
92 | goto try_override; | 92 | |
93 | } | ||
94 | pfn = 0; | ||
95 | /* | 93 | /* |
96 | * The array access can fail (e.g., device space beyond end of RAM). | 94 | * The array access can fail (e.g., device space beyond end of RAM). |
97 | * In such cases it doesn't matter what we return (we return garbage), | 95 | * In such cases it doesn't matter what we return (we return garbage), |
98 | * but we must handle the fault without crashing! | 96 | * but we must handle the fault without crashing! |
99 | */ | 97 | */ |
100 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | 98 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); |
101 | try_override: | ||
102 | /* ret might be < 0 if there are no entries in the m2p for mfn */ | ||
103 | if (ret < 0) | 99 | if (ret < 0) |
104 | pfn = ~0; | 100 | return ~0; |
105 | else if (get_phys_to_machine(pfn) != mfn) | 101 | |
102 | return pfn; | ||
103 | } | ||
104 | |||
105 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | ||
106 | { | ||
107 | unsigned long pfn; | ||
108 | |||
109 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
110 | return mfn; | ||
111 | |||
112 | pfn = mfn_to_pfn_no_overrides(mfn); | ||
113 | if (get_phys_to_machine(pfn) != mfn) { | ||
106 | /* | 114 | /* |
107 | * If this appears to be a foreign mfn (because the pfn | 115 | * If this appears to be a foreign mfn (because the pfn |
108 | * doesn't map back to the mfn), then check the local override | 116 | * doesn't map back to the mfn), then check the local override |
@@ -111,6 +119,7 @@ try_override: | |||
111 | * m2p_find_override_pfn returns ~0 if it doesn't find anything. | 119 | * m2p_find_override_pfn returns ~0 if it doesn't find anything. |
112 | */ | 120 | */ |
113 | pfn = m2p_find_override_pfn(mfn, ~0); | 121 | pfn = m2p_find_override_pfn(mfn, ~0); |
122 | } | ||
114 | 123 | ||
115 | /* | 124 | /* |
116 | * pfn is ~0 if there are no entries in the m2p for mfn or if the | 125 | * pfn is ~0 if there are no entries in the m2p for mfn or if the |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1191ac1c9d25..a419814cea57 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void) | |||
113 | break; | 113 | break; |
114 | case UV3_HUB_PART_NUMBER: | 114 | case UV3_HUB_PART_NUMBER: |
115 | case UV3_HUB_PART_NUMBER_X: | 115 | case UV3_HUB_PART_NUMBER_X: |
116 | uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; | 116 | uv_min_hub_revision_id += UV3_HUB_REVISION_BASE; |
117 | break; | 117 | break; |
118 | } | 118 | } |
119 | 119 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8355c84b9729..9d8449158cf9 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void) | |||
1506 | err = amd_pmu_init(); | 1506 | err = amd_pmu_init(); |
1507 | break; | 1507 | break; |
1508 | default: | 1508 | default: |
1509 | return 0; | 1509 | err = -ENOTSUPP; |
1510 | } | 1510 | } |
1511 | if (err != 0) { | 1511 | if (err != 0) { |
1512 | pr_cont("no PMU driver, software events only.\n"); | 1512 | pr_cont("no PMU driver, software events only.\n"); |
@@ -1883,26 +1883,21 @@ static struct pmu pmu = { | |||
1883 | 1883 | ||
1884 | void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | 1884 | void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) |
1885 | { | 1885 | { |
1886 | userpg->cap_usr_time = 0; | 1886 | userpg->cap_user_time = 0; |
1887 | userpg->cap_usr_time_zero = 0; | 1887 | userpg->cap_user_time_zero = 0; |
1888 | userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; | 1888 | userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; |
1889 | userpg->pmc_width = x86_pmu.cntval_bits; | 1889 | userpg->pmc_width = x86_pmu.cntval_bits; |
1890 | 1890 | ||
1891 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | 1891 | if (!sched_clock_stable) |
1892 | return; | ||
1893 | |||
1894 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
1895 | return; | 1892 | return; |
1896 | 1893 | ||
1897 | userpg->cap_usr_time = 1; | 1894 | userpg->cap_user_time = 1; |
1898 | userpg->time_mult = this_cpu_read(cyc2ns); | 1895 | userpg->time_mult = this_cpu_read(cyc2ns); |
1899 | userpg->time_shift = CYC2NS_SCALE_FACTOR; | 1896 | userpg->time_shift = CYC2NS_SCALE_FACTOR; |
1900 | userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; | 1897 | userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; |
1901 | 1898 | ||
1902 | if (sched_clock_stable && !check_tsc_disabled()) { | 1899 | userpg->cap_user_time_zero = 1; |
1903 | userpg->cap_usr_time_zero = 1; | 1900 | userpg->time_zero = this_cpu_read(cyc2ns_offset); |
1904 | userpg->time_zero = this_cpu_read(cyc2ns_offset); | ||
1905 | } | ||
1906 | } | 1901 | } |
1907 | 1902 | ||
1908 | /* | 1903 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 9db76c31b3c3..f31a1655d1ff 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void) | |||
2325 | break; | 2325 | break; |
2326 | 2326 | ||
2327 | case 55: /* Atom 22nm "Silvermont" */ | 2327 | case 55: /* Atom 22nm "Silvermont" */ |
2328 | case 77: /* Avoton "Silvermont" */ | ||
2328 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, | 2329 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, |
2329 | sizeof(hw_cache_event_ids)); | 2330 | sizeof(hw_cache_event_ids)); |
2330 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, | 2331 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 8ed44589b0e4..4118f9f68315 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) | |||
2706 | box->hrtimer.function = uncore_pmu_hrtimer; | 2706 | box->hrtimer.function = uncore_pmu_hrtimer; |
2707 | } | 2707 | } |
2708 | 2708 | ||
2709 | struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) | 2709 | static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node) |
2710 | { | 2710 | { |
2711 | struct intel_uncore_box *box; | 2711 | struct intel_uncore_box *box; |
2712 | int i, size; | 2712 | int i, size; |
2713 | 2713 | ||
2714 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); | 2714 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); |
2715 | 2715 | ||
2716 | box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); | 2716 | box = kzalloc_node(size, GFP_KERNEL, node); |
2717 | if (!box) | 2717 | if (!box) |
2718 | return NULL; | 2718 | return NULL; |
2719 | 2719 | ||
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu, | |||
3031 | struct intel_uncore_box *fake_box; | 3031 | struct intel_uncore_box *fake_box; |
3032 | int ret = -EINVAL, n; | 3032 | int ret = -EINVAL, n; |
3033 | 3033 | ||
3034 | fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); | 3034 | fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); |
3035 | if (!fake_box) | 3035 | if (!fake_box) |
3036 | return -ENOMEM; | 3036 | return -ENOMEM; |
3037 | 3037 | ||
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id | |||
3294 | } | 3294 | } |
3295 | 3295 | ||
3296 | type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; | 3296 | type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; |
3297 | box = uncore_alloc_box(type, 0); | 3297 | box = uncore_alloc_box(type, NUMA_NO_NODE); |
3298 | if (!box) | 3298 | if (!box) |
3299 | return -ENOMEM; | 3299 | return -ENOMEM; |
3300 | 3300 | ||
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id) | |||
3499 | if (pmu->func_id < 0) | 3499 | if (pmu->func_id < 0) |
3500 | pmu->func_id = j; | 3500 | pmu->func_id = j; |
3501 | 3501 | ||
3502 | box = uncore_alloc_box(type, cpu); | 3502 | box = uncore_alloc_box(type, cpu_to_node(cpu)); |
3503 | if (!box) | 3503 | if (!box) |
3504 | return -ENOMEM; | 3504 | return -ENOMEM; |
3505 | 3505 | ||
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 697b93af02dd..a0e2a8a80c94 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void) | |||
775 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | 775 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) |
776 | return; | 776 | return; |
777 | 777 | ||
778 | printk(KERN_INFO "KVM setup paravirtual spinlock\n"); | 778 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); |
779 | pv_lock_ops.unlock_kick = kvm_unlock_kick; | ||
780 | } | ||
781 | |||
782 | static __init int kvm_spinlock_init_jump(void) | ||
783 | { | ||
784 | if (!kvm_para_available()) | ||
785 | return 0; | ||
786 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | ||
787 | return 0; | ||
779 | 788 | ||
780 | static_key_slow_inc(¶virt_ticketlocks_enabled); | 789 | static_key_slow_inc(¶virt_ticketlocks_enabled); |
790 | printk(KERN_INFO "KVM setup paravirtual spinlock\n"); | ||
781 | 791 | ||
782 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); | 792 | return 0; |
783 | pv_lock_ops.unlock_kick = kvm_unlock_kick; | ||
784 | } | 793 | } |
794 | early_initcall(kvm_spinlock_init_jump); | ||
795 | |||
785 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ | 796 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7123b5df479d..af99f71aeb7f 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu) | |||
216 | /* need to apply patch? */ | 216 | /* need to apply patch? */ |
217 | if (rev >= mc_amd->hdr.patch_id) { | 217 | if (rev >= mc_amd->hdr.patch_id) { |
218 | c->microcode = rev; | 218 | c->microcode = rev; |
219 | uci->cpu_sig.rev = rev; | ||
219 | return 0; | 220 | return 0; |
220 | } | 221 | } |
221 | 222 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 563ed91e6faa..7e920bff99a3 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
326 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), | 326 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), |
327 | }, | 327 | }, |
328 | }, | 328 | }, |
329 | { /* Handle problems with rebooting on the Latitude E5410. */ | ||
330 | .callback = set_pci_reboot, | ||
331 | .ident = "Dell Latitude E5410", | ||
332 | .matches = { | ||
333 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
334 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"), | ||
335 | }, | ||
336 | }, | ||
329 | { /* Handle problems with rebooting on the Latitude E5420. */ | 337 | { /* Handle problems with rebooting on the Latitude E5420. */ |
330 | .callback = set_pci_reboot, | 338 | .callback = set_pci_reboot, |
331 | .ident = "Dell Latitude E5420", | 339 | .ident = "Dell Latitude E5420", |
@@ -352,12 +360,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
352 | }, | 360 | }, |
353 | { /* Handle problems with rebooting on the Precision M6600. */ | 361 | { /* Handle problems with rebooting on the Precision M6600. */ |
354 | .callback = set_pci_reboot, | 362 | .callback = set_pci_reboot, |
355 | .ident = "Dell OptiPlex 990", | 363 | .ident = "Dell Precision M6600", |
356 | .matches = { | 364 | .matches = { |
357 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 365 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
358 | DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), | 366 | DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), |
359 | }, | 367 | }, |
360 | }, | 368 | }, |
369 | { /* Handle problems with rebooting on the Dell PowerEdge C6100. */ | ||
370 | .callback = set_pci_reboot, | ||
371 | .ident = "Dell PowerEdge C6100", | ||
372 | .matches = { | ||
373 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
374 | DMI_MATCH(DMI_PRODUCT_NAME, "C6100"), | ||
375 | }, | ||
376 | }, | ||
377 | { /* Some C6100 machines were shipped with vendor being 'Dell'. */ | ||
378 | .callback = set_pci_reboot, | ||
379 | .ident = "Dell PowerEdge C6100", | ||
380 | .matches = { | ||
381 | DMI_MATCH(DMI_SYS_VENDOR, "Dell"), | ||
382 | DMI_MATCH(DMI_PRODUCT_NAME, "C6100"), | ||
383 | }, | ||
384 | }, | ||
361 | { } | 385 | { } |
362 | }; | 386 | }; |
363 | 387 | ||
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 22513e96b012..86179d409893 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c | |||
@@ -72,14 +72,14 @@ __init int create_simplefb(const struct screen_info *si, | |||
72 | * the part that is occupied by the framebuffer */ | 72 | * the part that is occupied by the framebuffer */ |
73 | len = mode->height * mode->stride; | 73 | len = mode->height * mode->stride; |
74 | len = PAGE_ALIGN(len); | 74 | len = PAGE_ALIGN(len); |
75 | if (len > si->lfb_size << 16) { | 75 | if (len > (u64)si->lfb_size << 16) { |
76 | printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); | 76 | printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); |
77 | return -EINVAL; | 77 | return -EINVAL; |
78 | } | 78 | } |
79 | 79 | ||
80 | /* setup IORESOURCE_MEM as framebuffer memory */ | 80 | /* setup IORESOURCE_MEM as framebuffer memory */ |
81 | memset(&res, 0, sizeof(res)); | 81 | memset(&res, 0, sizeof(res)); |
82 | res.flags = IORESOURCE_MEM; | 82 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
83 | res.name = simplefb_resname; | 83 | res.name = simplefb_resname; |
84 | res.start = si->lfb_base; | 84 | res.start = si->lfb_base; |
85 | res.end = si->lfb_base + len - 1; | 85 | res.end = si->lfb_base + len - 1; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a1216de9ffda..2b2fce1b2009 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
3255 | 3255 | ||
3256 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) | 3256 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) |
3257 | { | 3257 | { |
3258 | struct kvm_mmu *mmu = vcpu->arch.walk_mmu; | ||
3259 | |||
3258 | if (!test_bit(VCPU_EXREG_PDPTR, | 3260 | if (!test_bit(VCPU_EXREG_PDPTR, |
3259 | (unsigned long *)&vcpu->arch.regs_dirty)) | 3261 | (unsigned long *)&vcpu->arch.regs_dirty)) |
3260 | return; | 3262 | return; |
3261 | 3263 | ||
3262 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | 3264 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { |
3263 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); | 3265 | vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); |
3264 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); | 3266 | vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); |
3265 | vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); | 3267 | vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); |
3266 | vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); | 3268 | vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); |
3267 | } | 3269 | } |
3268 | } | 3270 | } |
3269 | 3271 | ||
3270 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu) | 3272 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu) |
3271 | { | 3273 | { |
3274 | struct kvm_mmu *mmu = vcpu->arch.walk_mmu; | ||
3275 | |||
3272 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { | 3276 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { |
3273 | vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); | 3277 | mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); |
3274 | vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); | 3278 | mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); |
3275 | vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); | 3279 | mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); |
3276 | vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); | 3280 | mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); |
3277 | } | 3281 | } |
3278 | 3282 | ||
3279 | __set_bit(VCPU_EXREG_PDPTR, | 3283 | __set_bit(VCPU_EXREG_PDPTR, |
@@ -5345,7 +5349,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) | |||
5345 | * There are errata that may cause this bit to not be set: | 5349 | * There are errata that may cause this bit to not be set: |
5346 | * AAK134, BY25. | 5350 | * AAK134, BY25. |
5347 | */ | 5351 | */ |
5348 | if (exit_qualification & INTR_INFO_UNBLOCK_NMI) | 5352 | if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && |
5353 | cpu_has_virtual_nmis() && | ||
5354 | (exit_qualification & INTR_INFO_UNBLOCK_NMI)) | ||
5349 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); | 5355 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); |
5350 | 5356 | ||
5351 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | 5357 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
@@ -7775,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7775 | vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); | 7781 | vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); |
7776 | vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); | 7782 | vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); |
7777 | vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); | 7783 | vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); |
7778 | __clear_bit(VCPU_EXREG_PDPTR, | ||
7779 | (unsigned long *)&vcpu->arch.regs_avail); | ||
7780 | __clear_bit(VCPU_EXREG_PDPTR, | ||
7781 | (unsigned long *)&vcpu->arch.regs_dirty); | ||
7782 | } | 7784 | } |
7783 | 7785 | ||
7784 | kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); | 7786 | kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 5596c7bdd327..082e88129712 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, | |||
700 | if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) | 700 | if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) |
701 | return -ENODEV; | 701 | return -ENODEV; |
702 | 702 | ||
703 | if (start > end || !addr) | 703 | if (start > end) |
704 | return -EINVAL; | 704 | return -EINVAL; |
705 | 705 | ||
706 | mutex_lock(&pci_mmcfg_lock); | 706 | mutex_lock(&pci_mmcfg_lock); |
@@ -716,6 +716,11 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, | |||
716 | return -EEXIST; | 716 | return -EEXIST; |
717 | } | 717 | } |
718 | 718 | ||
719 | if (!addr) { | ||
720 | mutex_unlock(&pci_mmcfg_lock); | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | |||
719 | rc = -EBUSY; | 724 | rc = -EBUSY; |
720 | cfg = pci_mmconfig_alloc(seg, start, end, addr); | 725 | cfg = pci_mmconfig_alloc(seg, start, end, addr); |
721 | if (cfg == NULL) { | 726 | if (cfg == NULL) { |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 90f6ed127096..c7e22ab29a5a 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void) | |||
912 | 912 | ||
913 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 913 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
914 | md = p; | 914 | md = p; |
915 | if (!(md->attribute & EFI_MEMORY_RUNTIME) && | 915 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) { |
916 | md->type != EFI_BOOT_SERVICES_CODE && | 916 | #ifdef CONFIG_X86_64 |
917 | md->type != EFI_BOOT_SERVICES_DATA) | 917 | if (md->type != EFI_BOOT_SERVICES_CODE && |
918 | continue; | 918 | md->type != EFI_BOOT_SERVICES_DATA) |
919 | #endif | ||
920 | continue; | ||
921 | } | ||
919 | 922 | ||
920 | size = md->num_pages << EFI_PAGE_SHIFT; | 923 | size = md->num_pages << EFI_PAGE_SHIFT; |
921 | end = md->phys_addr + size; | 924 | end = md->phys_addr + size; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 8b901e8d782d..a61c7d5811be 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
879 | unsigned long uninitialized_var(address); | 879 | unsigned long uninitialized_var(address); |
880 | unsigned level; | 880 | unsigned level; |
881 | pte_t *ptep = NULL; | 881 | pte_t *ptep = NULL; |
882 | int ret = 0; | ||
883 | 882 | ||
884 | pfn = page_to_pfn(page); | 883 | pfn = page_to_pfn(page); |
885 | if (!PageHighMem(page)) { | 884 | if (!PageHighMem(page)) { |
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
926 | * frontend pages while they are being shared with the backend, | 925 | * frontend pages while they are being shared with the backend, |
927 | * because mfn_to_pfn (that ends up being called by GUPF) will | 926 | * because mfn_to_pfn (that ends up being called by GUPF) will |
928 | * return the backend pfn rather than the frontend pfn. */ | 927 | * return the backend pfn rather than the frontend pfn. */ |
929 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | 928 | pfn = mfn_to_pfn_no_overrides(mfn); |
930 | if (ret == 0 && get_phys_to_machine(pfn) == mfn) | 929 | if (get_phys_to_machine(pfn) == mfn) |
931 | set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); | 930 | set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); |
932 | 931 | ||
933 | return 0; | 932 | return 0; |
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page, | |||
942 | unsigned long uninitialized_var(address); | 941 | unsigned long uninitialized_var(address); |
943 | unsigned level; | 942 | unsigned level; |
944 | pte_t *ptep = NULL; | 943 | pte_t *ptep = NULL; |
945 | int ret = 0; | ||
946 | 944 | ||
947 | pfn = page_to_pfn(page); | 945 | pfn = page_to_pfn(page); |
948 | mfn = get_phys_to_machine(pfn); | 946 | mfn = get_phys_to_machine(pfn); |
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page, | |||
1029 | * the original pfn causes mfn_to_pfn(mfn) to return the frontend | 1027 | * the original pfn causes mfn_to_pfn(mfn) to return the frontend |
1030 | * pfn again. */ | 1028 | * pfn again. */ |
1031 | mfn &= ~FOREIGN_FRAME_BIT; | 1029 | mfn &= ~FOREIGN_FRAME_BIT; |
1032 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | 1030 | pfn = mfn_to_pfn_no_overrides(mfn); |
1033 | if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && | 1031 | if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && |
1034 | m2p_find_override(mfn) == NULL) | 1032 | m2p_find_override(mfn) == NULL) |
1035 | set_phys_to_machine(pfn, mfn); | 1033 | set_phys_to_machine(pfn, mfn); |
1036 | 1034 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d1e4777b4e75..31d04758b76f 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void) | |||
278 | old memory can be recycled */ | 278 | old memory can be recycled */ |
279 | make_lowmem_page_readwrite(xen_initial_gdt); | 279 | make_lowmem_page_readwrite(xen_initial_gdt); |
280 | 280 | ||
281 | #ifdef CONFIG_X86_32 | ||
282 | /* | ||
283 | * Xen starts us with XEN_FLAT_RING1_DS, but linux code | ||
284 | * expects __USER_DS | ||
285 | */ | ||
286 | loadsegment(ds, __USER_DS); | ||
287 | loadsegment(es, __USER_DS); | ||
288 | #endif | ||
289 | |||
281 | xen_filter_cpu_maps(); | 290 | xen_filter_cpu_maps(); |
282 | xen_setup_vcpu_info_placement(); | 291 | xen_setup_vcpu_info_placement(); |
283 | } | 292 | } |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 253f63fceea1..be6b86078957 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | 261 | ||
262 | /* | ||
263 | * Our init of PV spinlocks is split in two init functions due to us | ||
264 | * using paravirt patching and jump labels patching and having to do | ||
265 | * all of this before SMP code is invoked. | ||
266 | * | ||
267 | * The paravirt patching needs to be done _before_ the alternative asm code | ||
268 | * is started, otherwise we would not patch the core kernel code. | ||
269 | */ | ||
262 | void __init xen_init_spinlocks(void) | 270 | void __init xen_init_spinlocks(void) |
263 | { | 271 | { |
264 | 272 | ||
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void) | |||
267 | return; | 275 | return; |
268 | } | 276 | } |
269 | 277 | ||
270 | static_key_slow_inc(¶virt_ticketlocks_enabled); | ||
271 | |||
272 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); | 278 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); |
273 | pv_lock_ops.unlock_kick = xen_unlock_kick; | 279 | pv_lock_ops.unlock_kick = xen_unlock_kick; |
274 | } | 280 | } |
275 | 281 | ||
282 | /* | ||
283 | * While the jump_label init code needs to happend _after_ the jump labels are | ||
284 | * enabled and before SMP is started. Hence we use pre-SMP initcall level | ||
285 | * init. We cannot do it in xen_init_spinlocks as that is done before | ||
286 | * jump labels are activated. | ||
287 | */ | ||
288 | static __init int xen_init_spinlocks_jump(void) | ||
289 | { | ||
290 | if (!xen_pvspin) | ||
291 | return 0; | ||
292 | |||
293 | static_key_slow_inc(¶virt_ticketlocks_enabled); | ||
294 | return 0; | ||
295 | } | ||
296 | early_initcall(xen_init_spinlocks_jump); | ||
297 | |||
276 | static __init int xen_parse_nopvspin(char *arg) | 298 | static __init int xen_parse_nopvspin(char *arg) |
277 | { | 299 | { |
278 | xen_pvspin = false; | 300 | xen_pvspin = false; |