diff options
Diffstat (limited to 'arch/x86')
53 files changed, 443 insertions, 189 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 602f57e590b5..33f71b01fd22 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -79,11 +79,14 @@ else | |||
79 | UTS_MACHINE := x86_64 | 79 | UTS_MACHINE := x86_64 |
80 | CHECKFLAGS += -D__x86_64__ -m64 | 80 | CHECKFLAGS += -D__x86_64__ -m64 |
81 | 81 | ||
82 | biarch := -m64 | ||
82 | KBUILD_AFLAGS += -m64 | 83 | KBUILD_AFLAGS += -m64 |
83 | KBUILD_CFLAGS += -m64 | 84 | KBUILD_CFLAGS += -m64 |
84 | 85 | ||
85 | # Don't autogenerate traditional x87, MMX or SSE instructions | 86 | # Don't autogenerate traditional x87, MMX or SSE instructions |
86 | KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387 | 87 | KBUILD_CFLAGS += -mno-mmx -mno-sse |
88 | KBUILD_CFLAGS += $(call cc-option,-mno-80387) | ||
89 | KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387) | ||
87 | 90 | ||
88 | # Use -mpreferred-stack-boundary=3 if supported. | 91 | # Use -mpreferred-stack-boundary=3 if supported. |
89 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) | 92 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) |
@@ -250,8 +253,8 @@ archclean: | |||
250 | PHONY += kvmconfig | 253 | PHONY += kvmconfig |
251 | kvmconfig: | 254 | kvmconfig: |
252 | $(if $(wildcard $(objtree)/.config),, $(error You need an existing .config for this target)) | 255 | $(if $(wildcard $(objtree)/.config),, $(error You need an existing .config for this target)) |
253 | $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m -O $(objtree) $(objtree)/.config arch/x86/configs/kvm_guest.config | 256 | $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m -O $(objtree) $(objtree)/.config $(srctree)/arch/x86/configs/kvm_guest.config |
254 | $(Q)yes "" | $(MAKE) oldconfig | 257 | $(Q)yes "" | $(MAKE) -f $(srctree)/Makefile oldconfig |
255 | 258 | ||
256 | define archhelp | 259 | define archhelp |
257 | echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' | 260 | echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' |
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index abb9eba61b50..dbe8dd2fe247 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -71,7 +71,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE | |||
71 | 71 | ||
72 | SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) | 72 | SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) |
73 | 73 | ||
74 | sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p' | 74 | sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p' |
75 | 75 | ||
76 | quiet_cmd_voffset = VOFFSET $@ | 76 | quiet_cmd_voffset = VOFFSET $@ |
77 | cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@ | 77 | cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@ |
@@ -80,7 +80,7 @@ targets += voffset.h | |||
80 | $(obj)/voffset.h: vmlinux FORCE | 80 | $(obj)/voffset.h: vmlinux FORCE |
81 | $(call if_changed,voffset) | 81 | $(call if_changed,voffset) |
82 | 82 | ||
83 | sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' | 83 | sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' |
84 | 84 | ||
85 | quiet_cmd_zoffset = ZOFFSET $@ | 85 | quiet_cmd_zoffset = ZOFFSET $@ |
86 | cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ | 86 | cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 17684615374b..57ab74df7eea 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -354,7 +354,7 @@ static void parse_elf(void *output) | |||
354 | free(phdrs); | 354 | free(phdrs); |
355 | } | 355 | } |
356 | 356 | ||
357 | asmlinkage void *decompress_kernel(void *rmode, memptr heap, | 357 | asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, |
358 | unsigned char *input_data, | 358 | unsigned char *input_data, |
359 | unsigned long input_len, | 359 | unsigned long input_len, |
360 | unsigned char *output, | 360 | unsigned char *output, |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index b18df579c0e9..36f7125945e3 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -63,6 +63,7 @@ | |||
63 | /* hpet memory map physical address */ | 63 | /* hpet memory map physical address */ |
64 | extern unsigned long hpet_address; | 64 | extern unsigned long hpet_address; |
65 | extern unsigned long force_hpet_address; | 65 | extern unsigned long force_hpet_address; |
66 | extern int boot_hpet_disable; | ||
66 | extern u8 hpet_blockid; | 67 | extern u8 hpet_blockid; |
67 | extern int hpet_force_user; | 68 | extern int hpet_force_user; |
68 | extern u8 hpet_msi_disable; | 69 | extern u8 hpet_msi_disable; |
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index a8091216963b..68c05398bba9 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h | |||
@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
52 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | 52 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
53 | unsigned long addr, pte_t *ptep) | 53 | unsigned long addr, pte_t *ptep) |
54 | { | 54 | { |
55 | ptep_clear_flush(vma, addr, ptep); | ||
55 | } | 56 | } |
56 | 57 | ||
57 | static inline int huge_pte_none(pte_t pte) | 58 | static inline int huge_pte_none(pte_t pte) |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fcaf9c961265..7de069afb382 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -60,7 +60,7 @@ | |||
60 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | 60 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ |
61 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | 61 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
62 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | 62 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
63 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | 63 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP)) |
64 | 64 | ||
65 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | 65 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) |
66 | 66 | ||
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8de6d9cf3b95..678205195ae1 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_X86_PAGE_64_DEFS_H | 1 | #ifndef _ASM_X86_PAGE_64_DEFS_H |
2 | #define _ASM_X86_PAGE_64_DEFS_H | 2 | #define _ASM_X86_PAGE_64_DEFS_H |
3 | 3 | ||
4 | #define THREAD_SIZE_ORDER 1 | 4 | #define THREAD_SIZE_ORDER 2 |
5 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | 5 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
6 | #define CURRENT_MASK (~(THREAD_SIZE - 1)) | 6 | #define CURRENT_MASK (~(THREAD_SIZE - 1)) |
7 | 7 | ||
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c827ace3121b..fcf2b3ae1bf0 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -384,7 +384,7 @@ | |||
384 | #define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 | 384 | #define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 |
385 | #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) | 385 | #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) |
386 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 | 386 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 |
387 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT); | 387 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) |
388 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 | 388 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 |
389 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) | 389 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) |
390 | #define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 | 390 | #define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 3a2ae4c88948..31368207837c 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -31,7 +31,7 @@ static char temp_stack[4096]; | |||
31 | * | 31 | * |
32 | * Wrapper around acpi_enter_sleep_state() to be called by assmebly. | 32 | * Wrapper around acpi_enter_sleep_state() to be called by assmebly. |
33 | */ | 33 | */ |
34 | acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state) | 34 | acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state) |
35 | { | 35 | { |
36 | return acpi_enter_sleep_state(state); | 36 | return acpi_enter_sleep_state(state); |
37 | } | 37 | } |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 6ad4658de705..992060e09897 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) | |||
2189 | cfg->move_in_progress = 0; | 2189 | cfg->move_in_progress = 0; |
2190 | } | 2190 | } |
2191 | 2191 | ||
2192 | asmlinkage void smp_irq_move_cleanup_interrupt(void) | 2192 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) |
2193 | { | 2193 | { |
2194 | unsigned vector, me; | 2194 | unsigned vector, me; |
2195 | 2195 | ||
@@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void) | |||
3425 | return nr_irqs_gsi; | 3425 | return nr_irqs_gsi; |
3426 | } | 3426 | } |
3427 | 3427 | ||
3428 | unsigned int arch_dynirq_lower_bound(unsigned int from) | ||
3429 | { | ||
3430 | return from < nr_irqs_gsi ? nr_irqs_gsi : from; | ||
3431 | } | ||
3432 | |||
3428 | int __init arch_probe_nr_irqs(void) | 3433 | int __init arch_probe_nr_irqs(void) |
3429 | { | 3434 | { |
3430 | int nr; | 3435 | int nr; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index eeee23ff75ef..68317c80de7f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -598,7 +598,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
598 | { | 598 | { |
599 | struct mce m; | 599 | struct mce m; |
600 | int i; | 600 | int i; |
601 | unsigned long *v; | ||
602 | 601 | ||
603 | this_cpu_inc(mce_poll_count); | 602 | this_cpu_inc(mce_poll_count); |
604 | 603 | ||
@@ -618,8 +617,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
618 | if (!(m.status & MCI_STATUS_VAL)) | 617 | if (!(m.status & MCI_STATUS_VAL)) |
619 | continue; | 618 | continue; |
620 | 619 | ||
621 | v = &get_cpu_var(mce_polled_error); | 620 | this_cpu_write(mce_polled_error, 1); |
622 | set_bit(0, v); | ||
623 | /* | 621 | /* |
624 | * Uncorrected or signalled events are handled by the exception | 622 | * Uncorrected or signalled events are handled by the exception |
625 | * handler when it is enabled, so don't process those here. | 623 | * handler when it is enabled, so don't process those here. |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..9a316b21df8b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | |||
42 | * cmci_discover_lock protects against parallel discovery attempts | 42 | * cmci_discover_lock protects against parallel discovery attempts |
43 | * which could race against each other. | 43 | * which could race against each other. |
44 | */ | 44 | */ |
45 | static DEFINE_RAW_SPINLOCK(cmci_discover_lock); | 45 | static DEFINE_SPINLOCK(cmci_discover_lock); |
46 | 46 | ||
47 | #define CMCI_THRESHOLD 1 | 47 | #define CMCI_THRESHOLD 1 |
48 | #define CMCI_POLL_INTERVAL (30 * HZ) | 48 | #define CMCI_POLL_INTERVAL (30 * HZ) |
@@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void) | |||
144 | int bank; | 144 | int bank; |
145 | u64 val; | 145 | u64 val; |
146 | 146 | ||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 147 | spin_lock_irqsave(&cmci_discover_lock, flags); |
148 | owned = __get_cpu_var(mce_banks_owned); | 148 | owned = __get_cpu_var(mce_banks_owned); |
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | 149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
151 | val &= ~MCI_CTL2_CMCI_EN; | 151 | val &= ~MCI_CTL2_CMCI_EN; |
152 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | 152 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
153 | } | 153 | } |
154 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 154 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
155 | } | 155 | } |
156 | 156 | ||
157 | static bool cmci_storm_detect(void) | 157 | static bool cmci_storm_detect(void) |
@@ -211,7 +211,7 @@ static void cmci_discover(int banks) | |||
211 | int i; | 211 | int i; |
212 | int bios_wrong_thresh = 0; | 212 | int bios_wrong_thresh = 0; |
213 | 213 | ||
214 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 214 | spin_lock_irqsave(&cmci_discover_lock, flags); |
215 | for (i = 0; i < banks; i++) { | 215 | for (i = 0; i < banks; i++) { |
216 | u64 val; | 216 | u64 val; |
217 | int bios_zero_thresh = 0; | 217 | int bios_zero_thresh = 0; |
@@ -266,7 +266,7 @@ static void cmci_discover(int banks) | |||
266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 269 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
270 | if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { | 270 | if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { |
271 | pr_info_once( | 271 | pr_info_once( |
272 | "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); | 272 | "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); |
@@ -316,10 +316,10 @@ void cmci_clear(void) | |||
316 | 316 | ||
317 | if (!cmci_supported(&banks)) | 317 | if (!cmci_supported(&banks)) |
318 | return; | 318 | return; |
319 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 319 | spin_lock_irqsave(&cmci_discover_lock, flags); |
320 | for (i = 0; i < banks; i++) | 320 | for (i = 0; i < banks; i++) |
321 | __cmci_disable_bank(i); | 321 | __cmci_disable_bank(i); |
322 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 322 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void cmci_rediscover_work_func(void *arg) | 325 | static void cmci_rediscover_work_func(void *arg) |
@@ -360,9 +360,9 @@ void cmci_disable_bank(int bank) | |||
360 | if (!cmci_supported(&banks)) | 360 | if (!cmci_supported(&banks)) |
361 | return; | 361 | return; |
362 | 362 | ||
363 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 363 | spin_lock_irqsave(&cmci_discover_lock, flags); |
364 | __cmci_disable_bank(bank); | 364 | __cmci_disable_bank(bank); |
365 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 365 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
366 | } | 366 | } |
367 | 367 | ||
368 | static void intel_init_cmci(void) | 368 | static void intel_init_cmci(void) |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index d921b7ee6595..36a1bb6d1ee0 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void) | |||
429 | smp_thermal_vector(); | 429 | smp_thermal_vector(); |
430 | } | 430 | } |
431 | 431 | ||
432 | asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | 432 | asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs) |
433 | { | 433 | { |
434 | entering_irq(); | 434 | entering_irq(); |
435 | __smp_thermal_interrupt(); | 435 | __smp_thermal_interrupt(); |
436 | exiting_ack_irq(); | 436 | exiting_ack_irq(); |
437 | } | 437 | } |
438 | 438 | ||
439 | asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs) | 439 | asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs) |
440 | { | 440 | { |
441 | entering_irq(); | 441 | entering_irq(); |
442 | trace_thermal_apic_entry(THERMAL_APIC_VECTOR); | 442 | trace_thermal_apic_entry(THERMAL_APIC_VECTOR); |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index fe6b1c86645b..7245980186ee 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
@@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void) | |||
24 | mce_threshold_vector(); | 24 | mce_threshold_vector(); |
25 | } | 25 | } |
26 | 26 | ||
27 | asmlinkage void smp_threshold_interrupt(void) | 27 | asmlinkage __visible void smp_threshold_interrupt(void) |
28 | { | 28 | { |
29 | entering_irq(); | 29 | entering_irq(); |
30 | __smp_threshold_interrupt(); | 30 | __smp_threshold_interrupt(); |
31 | exiting_ack_irq(); | 31 | exiting_ack_irq(); |
32 | } | 32 | } |
33 | 33 | ||
34 | asmlinkage void smp_trace_threshold_interrupt(void) | 34 | asmlinkage __visible void smp_trace_threshold_interrupt(void) |
35 | { | 35 | { |
36 | entering_irq(); | 36 | entering_irq(); |
37 | trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); | 37 | trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index aa333d966886..adb02aa62af5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly = | |||
169 | { | 169 | { |
170 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 170 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
171 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 171 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
172 | FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */ | ||
173 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ | 172 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ |
174 | EVENT_CONSTRAINT_END | 173 | EVENT_CONSTRAINT_END |
175 | }; | 174 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 059218ed5208..619f7699487a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -59,7 +59,7 @@ | |||
59 | #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ | 59 | #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ |
60 | #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ | 60 | #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ |
61 | #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ | 61 | #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ |
62 | #define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ | 62 | #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */ |
63 | #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ | 63 | #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ |
64 | 64 | ||
65 | /* Clients have PP0, PKG */ | 65 | /* Clients have PP0, PKG */ |
@@ -72,6 +72,12 @@ | |||
72 | 1<<RAPL_IDX_PKG_NRG_STAT|\ | 72 | 1<<RAPL_IDX_PKG_NRG_STAT|\ |
73 | 1<<RAPL_IDX_RAM_NRG_STAT) | 73 | 1<<RAPL_IDX_RAM_NRG_STAT) |
74 | 74 | ||
75 | /* Servers have PP0, PKG, RAM, PP1 */ | ||
76 | #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\ | ||
77 | 1<<RAPL_IDX_PKG_NRG_STAT|\ | ||
78 | 1<<RAPL_IDX_RAM_NRG_STAT|\ | ||
79 | 1<<RAPL_IDX_PP1_NRG_STAT) | ||
80 | |||
75 | /* | 81 | /* |
76 | * event code: LSB 8 bits, passed in attr->config | 82 | * event code: LSB 8 bits, passed in attr->config |
77 | * any other bit is reserved | 83 | * any other bit is reserved |
@@ -425,6 +431,24 @@ static struct attribute *rapl_events_cln_attr[] = { | |||
425 | NULL, | 431 | NULL, |
426 | }; | 432 | }; |
427 | 433 | ||
434 | static struct attribute *rapl_events_hsw_attr[] = { | ||
435 | EVENT_PTR(rapl_cores), | ||
436 | EVENT_PTR(rapl_pkg), | ||
437 | EVENT_PTR(rapl_gpu), | ||
438 | EVENT_PTR(rapl_ram), | ||
439 | |||
440 | EVENT_PTR(rapl_cores_unit), | ||
441 | EVENT_PTR(rapl_pkg_unit), | ||
442 | EVENT_PTR(rapl_gpu_unit), | ||
443 | EVENT_PTR(rapl_ram_unit), | ||
444 | |||
445 | EVENT_PTR(rapl_cores_scale), | ||
446 | EVENT_PTR(rapl_pkg_scale), | ||
447 | EVENT_PTR(rapl_gpu_scale), | ||
448 | EVENT_PTR(rapl_ram_scale), | ||
449 | NULL, | ||
450 | }; | ||
451 | |||
428 | static struct attribute_group rapl_pmu_events_group = { | 452 | static struct attribute_group rapl_pmu_events_group = { |
429 | .name = "events", | 453 | .name = "events", |
430 | .attrs = NULL, /* patched at runtime */ | 454 | .attrs = NULL, /* patched at runtime */ |
@@ -511,6 +535,7 @@ static int rapl_cpu_prepare(int cpu) | |||
511 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); | 535 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); |
512 | int phys_id = topology_physical_package_id(cpu); | 536 | int phys_id = topology_physical_package_id(cpu); |
513 | u64 ms; | 537 | u64 ms; |
538 | u64 msr_rapl_power_unit_bits; | ||
514 | 539 | ||
515 | if (pmu) | 540 | if (pmu) |
516 | return 0; | 541 | return 0; |
@@ -518,6 +543,10 @@ static int rapl_cpu_prepare(int cpu) | |||
518 | if (phys_id < 0) | 543 | if (phys_id < 0) |
519 | return -1; | 544 | return -1; |
520 | 545 | ||
546 | /* protect rdmsrl() to handle virtualization */ | ||
547 | if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) | ||
548 | return -1; | ||
549 | |||
521 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | 550 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); |
522 | if (!pmu) | 551 | if (!pmu) |
523 | return -1; | 552 | return -1; |
@@ -531,8 +560,7 @@ static int rapl_cpu_prepare(int cpu) | |||
531 | * | 560 | * |
532 | * we cache in local PMU instance | 561 | * we cache in local PMU instance |
533 | */ | 562 | */ |
534 | rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); | 563 | pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; |
535 | pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; | ||
536 | pmu->pmu = &rapl_pmu_class; | 564 | pmu->pmu = &rapl_pmu_class; |
537 | 565 | ||
538 | /* | 566 | /* |
@@ -631,11 +659,14 @@ static int __init rapl_pmu_init(void) | |||
631 | switch (boot_cpu_data.x86_model) { | 659 | switch (boot_cpu_data.x86_model) { |
632 | case 42: /* Sandy Bridge */ | 660 | case 42: /* Sandy Bridge */ |
633 | case 58: /* Ivy Bridge */ | 661 | case 58: /* Ivy Bridge */ |
634 | case 60: /* Haswell */ | ||
635 | case 69: /* Haswell-Celeron */ | ||
636 | rapl_cntr_mask = RAPL_IDX_CLN; | 662 | rapl_cntr_mask = RAPL_IDX_CLN; |
637 | rapl_pmu_events_group.attrs = rapl_events_cln_attr; | 663 | rapl_pmu_events_group.attrs = rapl_events_cln_attr; |
638 | break; | 664 | break; |
665 | case 60: /* Haswell */ | ||
666 | case 69: /* Haswell-Celeron */ | ||
667 | rapl_cntr_mask = RAPL_IDX_HSW; | ||
668 | rapl_pmu_events_group.attrs = rapl_events_hsw_attr; | ||
669 | break; | ||
639 | case 45: /* Sandy Bridge-EP */ | 670 | case 45: /* Sandy Bridge-EP */ |
640 | case 62: /* IvyTown */ | 671 | case 62: /* IvyTown */ |
641 | rapl_cntr_mask = RAPL_IDX_SRV; | 672 | rapl_cntr_mask = RAPL_IDX_SRV; |
@@ -650,7 +681,9 @@ static int __init rapl_pmu_init(void) | |||
650 | cpu_notifier_register_begin(); | 681 | cpu_notifier_register_begin(); |
651 | 682 | ||
652 | for_each_online_cpu(cpu) { | 683 | for_each_online_cpu(cpu) { |
653 | rapl_cpu_prepare(cpu); | 684 | ret = rapl_cpu_prepare(cpu); |
685 | if (ret) | ||
686 | goto out; | ||
654 | rapl_cpu_init(cpu); | 687 | rapl_cpu_init(cpu); |
655 | } | 688 | } |
656 | 689 | ||
@@ -673,6 +706,7 @@ static int __init rapl_pmu_init(void) | |||
673 | hweight32(rapl_cntr_mask), | 706 | hweight32(rapl_cntr_mask), |
674 | ktime_to_ms(pmu->timer_interval)); | 707 | ktime_to_ms(pmu->timer_interval)); |
675 | 708 | ||
709 | out: | ||
676 | cpu_notifier_register_done(); | 710 | cpu_notifier_register_done(); |
677 | 711 | ||
678 | return 0; | 712 | return 0; |
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 384df5105fbc..136ac74dee82 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
@@ -27,6 +27,7 @@ | |||
27 | static int __init x86_rdrand_setup(char *s) | 27 | static int __init x86_rdrand_setup(char *s) |
28 | { | 28 | { |
29 | setup_clear_cpu_cap(X86_FEATURE_RDRAND); | 29 | setup_clear_cpu_cap(X86_FEATURE_RDRAND); |
30 | setup_clear_cpu_cap(X86_FEATURE_RDSEED); | ||
30 | return 1; | 31 | return 1; |
31 | } | 32 | } |
32 | __setup("nordrand", x86_rdrand_setup); | 33 | __setup("nordrand", x86_rdrand_setup); |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index b0cc3809723d..6cda0baeac9d 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/dma.h> | 17 | #include <asm/dma.h> |
18 | #include <asm/io_apic.h> | 18 | #include <asm/io_apic.h> |
19 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
20 | #include <asm/hpet.h> | ||
20 | #include <asm/iommu.h> | 21 | #include <asm/iommu.h> |
21 | #include <asm/gart.h> | 22 | #include <asm/gart.h> |
22 | #include <asm/irq_remapping.h> | 23 | #include <asm/irq_remapping.h> |
@@ -240,7 +241,7 @@ static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_s | |||
240 | return base; | 241 | return base; |
241 | } | 242 | } |
242 | 243 | ||
243 | #define KB(x) ((x) * 1024) | 244 | #define KB(x) ((x) * 1024UL) |
244 | #define MB(x) (KB (KB (x))) | 245 | #define MB(x) (KB (KB (x))) |
245 | #define GB(x) (MB (KB (x))) | 246 | #define GB(x) (MB (KB (x))) |
246 | 247 | ||
@@ -530,6 +531,15 @@ static void __init intel_graphics_stolen(int num, int slot, int func) | |||
530 | } | 531 | } |
531 | } | 532 | } |
532 | 533 | ||
534 | static void __init force_disable_hpet(int num, int slot, int func) | ||
535 | { | ||
536 | #ifdef CONFIG_HPET_TIMER | ||
537 | boot_hpet_disable = 1; | ||
538 | pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); | ||
539 | #endif | ||
540 | } | ||
541 | |||
542 | |||
533 | #define QFLAG_APPLY_ONCE 0x1 | 543 | #define QFLAG_APPLY_ONCE 0x1 |
534 | #define QFLAG_APPLIED 0x2 | 544 | #define QFLAG_APPLIED 0x2 |
535 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) | 545 | #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) |
@@ -567,6 +577,12 @@ static struct chipset early_qrk[] __initdata = { | |||
567 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 577 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
568 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, | 578 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, |
569 | QFLAG_APPLY_ONCE, intel_graphics_stolen }, | 579 | QFLAG_APPLY_ONCE, intel_graphics_stolen }, |
580 | /* | ||
581 | * HPET on current version of Baytrail platform has accuracy | ||
582 | * problems, disable it for now: | ||
583 | */ | ||
584 | { PCI_VENDOR_ID_INTEL, 0x0f00, | ||
585 | PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, | ||
570 | {} | 586 | {} |
571 | }; | 587 | }; |
572 | 588 | ||
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index c61a14a4a310..d6c1b9836995 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void) | |||
29 | reserve_ebda_region(); | 29 | reserve_ebda_region(); |
30 | } | 30 | } |
31 | 31 | ||
32 | asmlinkage void __init i386_start_kernel(void) | 32 | asmlinkage __visible void __init i386_start_kernel(void) |
33 | { | 33 | { |
34 | sanitize_boot_params(&boot_params); | 34 | sanitize_boot_params(&boot_params); |
35 | 35 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 85126ccbdf6b..068054f4bf20 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data) | |||
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
140 | asmlinkage void __init x86_64_start_kernel(char * real_mode_data) | 140 | asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) |
141 | { | 141 | { |
142 | int i; | 142 | int i; |
143 | 143 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 8d80ae011603..4177bfbc80b0 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -88,7 +88,7 @@ static inline void hpet_clear_mapping(void) | |||
88 | /* | 88 | /* |
89 | * HPET command line enable / disable | 89 | * HPET command line enable / disable |
90 | */ | 90 | */ |
91 | static int boot_hpet_disable; | 91 | int boot_hpet_disable; |
92 | int hpet_force_user; | 92 | int hpet_force_user; |
93 | static int hpet_verbose; | 93 | static int hpet_verbose; |
94 | 94 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 79a3f9682871..61b17dc2c277 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -897,9 +897,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
897 | struct kprobe *cur = kprobe_running(); | 897 | struct kprobe *cur = kprobe_running(); |
898 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 898 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
899 | 899 | ||
900 | switch (kcb->kprobe_status) { | 900 | if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { |
901 | case KPROBE_HIT_SS: | 901 | /* This must happen on single-stepping */ |
902 | case KPROBE_REENTER: | 902 | WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS && |
903 | kcb->kprobe_status != KPROBE_REENTER); | ||
903 | /* | 904 | /* |
904 | * We are here because the instruction being single | 905 | * We are here because the instruction being single |
905 | * stepped caused a page fault. We reset the current | 906 | * stepped caused a page fault. We reset the current |
@@ -914,9 +915,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
914 | else | 915 | else |
915 | reset_current_kprobe(); | 916 | reset_current_kprobe(); |
916 | preempt_enable_no_resched(); | 917 | preempt_enable_no_resched(); |
917 | break; | 918 | } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE || |
918 | case KPROBE_HIT_ACTIVE: | 919 | kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
919 | case KPROBE_HIT_SSDONE: | ||
920 | /* | 920 | /* |
921 | * We increment the nmissed count for accounting, | 921 | * We increment the nmissed count for accounting, |
922 | * we can also use npre/npostfault count for accounting | 922 | * we can also use npre/npostfault count for accounting |
@@ -945,10 +945,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
945 | * fixup routine could not handle it, | 945 | * fixup routine could not handle it, |
946 | * Let do_page_fault() fix it. | 946 | * Let do_page_fault() fix it. |
947 | */ | 947 | */ |
948 | break; | ||
949 | default: | ||
950 | break; | ||
951 | } | 948 | } |
949 | |||
952 | return 0; | 950 | return 0; |
953 | } | 951 | } |
954 | 952 | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index af1d14a9ebda..dcbbaa165bde 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
21 | #include <asm/syscalls.h> | 21 | #include <asm/syscalls.h> |
22 | 22 | ||
23 | int sysctl_ldt16 = 0; | ||
24 | |||
23 | #ifdef CONFIG_SMP | 25 | #ifdef CONFIG_SMP |
24 | static void flush_ldt(void *current_mm) | 26 | static void flush_ldt(void *current_mm) |
25 | { | 27 | { |
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) | |||
234 | * IRET leaking the high bits of the kernel stack address. | 236 | * IRET leaking the high bits of the kernel stack address. |
235 | */ | 237 | */ |
236 | #ifdef CONFIG_X86_64 | 238 | #ifdef CONFIG_X86_64 |
237 | if (!ldt_info.seg_32bit) { | 239 | if (!ldt_info.seg_32bit && !sysctl_ldt16) { |
238 | error = -EINVAL; | 240 | error = -EINVAL; |
239 | goto out_unlock; | 241 | goto out_unlock; |
240 | } | 242 | } |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9c0280f93d05..898d077617a9 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -52,7 +52,7 @@ | |||
52 | 52 | ||
53 | asmlinkage extern void ret_from_fork(void); | 53 | asmlinkage extern void ret_from_fork(void); |
54 | 54 | ||
55 | asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp); | 55 | __visible DEFINE_PER_CPU(unsigned long, old_rsp); |
56 | 56 | ||
57 | /* Prints also some state that isn't saved in the pt_regs */ | 57 | /* Prints also some state that isn't saved in the pt_regs */ |
58 | void __show_regs(struct pt_regs *regs, int all) | 58 | void __show_regs(struct pt_regs *regs, int all) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 654b46574b91..52b1157c53eb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -114,8 +114,8 @@ EXPORT_SYMBOL(machine_real_restart); | |||
114 | */ | 114 | */ |
115 | static int __init set_pci_reboot(const struct dmi_system_id *d) | 115 | static int __init set_pci_reboot(const struct dmi_system_id *d) |
116 | { | 116 | { |
117 | if (reboot_type != BOOT_CF9) { | 117 | if (reboot_type != BOOT_CF9_FORCE) { |
118 | reboot_type = BOOT_CF9; | 118 | reboot_type = BOOT_CF9_FORCE; |
119 | pr_info("%s series board detected. Selecting %s-method for reboots.\n", | 119 | pr_info("%s series board detected. Selecting %s-method for reboots.\n", |
120 | d->ident, "PCI"); | 120 | d->ident, "PCI"); |
121 | } | 121 | } |
@@ -191,6 +191,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
191 | }, | 191 | }, |
192 | }, | 192 | }, |
193 | 193 | ||
194 | /* Certec */ | ||
195 | { /* Handle problems with rebooting on Certec BPC600 */ | ||
196 | .callback = set_pci_reboot, | ||
197 | .ident = "Certec BPC600", | ||
198 | .matches = { | ||
199 | DMI_MATCH(DMI_SYS_VENDOR, "Certec"), | ||
200 | DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"), | ||
201 | }, | ||
202 | }, | ||
203 | |||
194 | /* Dell */ | 204 | /* Dell */ |
195 | { /* Handle problems with rebooting on Dell DXP061 */ | 205 | { /* Handle problems with rebooting on Dell DXP061 */ |
196 | .callback = set_bios_reboot, | 206 | .callback = set_bios_reboot, |
@@ -458,20 +468,23 @@ void __attribute__((weak)) mach_reboot_fixups(void) | |||
458 | } | 468 | } |
459 | 469 | ||
460 | /* | 470 | /* |
461 | * Windows compatible x86 hardware expects the following on reboot: | 471 | * To the best of our knowledge Windows compatible x86 hardware expects |
472 | * the following on reboot: | ||
462 | * | 473 | * |
463 | * 1) If the FADT has the ACPI reboot register flag set, try it | 474 | * 1) If the FADT has the ACPI reboot register flag set, try it |
464 | * 2) If still alive, write to the keyboard controller | 475 | * 2) If still alive, write to the keyboard controller |
465 | * 3) If still alive, write to the ACPI reboot register again | 476 | * 3) If still alive, write to the ACPI reboot register again |
466 | * 4) If still alive, write to the keyboard controller again | 477 | * 4) If still alive, write to the keyboard controller again |
467 | * 5) If still alive, call the EFI runtime service to reboot | 478 | * 5) If still alive, call the EFI runtime service to reboot |
468 | * 6) If still alive, write to the PCI IO port 0xCF9 to reboot | 479 | * 6) If no EFI runtime service, call the BIOS to do a reboot |
469 | * 7) If still alive, inform BIOS to do a proper reboot | 480 | * |
481 | * We default to following the same pattern. We also have | ||
482 | * two other reboot methods: 'triple fault' and 'PCI', which | ||
483 | * can be triggered via the reboot= kernel boot option or | ||
484 | * via quirks. | ||
470 | * | 485 | * |
471 | * If the machine is still alive at this stage, it gives up. We default to | 486 | * This means that this function can never return, it can misbehave |
472 | * following the same pattern, except that if we're still alive after (7) we'll | 487 | * by not rebooting properly and hanging. |
473 | * try to force a triple fault and then cycle between hitting the keyboard | ||
474 | * controller and doing that | ||
475 | */ | 488 | */ |
476 | static void native_machine_emergency_restart(void) | 489 | static void native_machine_emergency_restart(void) |
477 | { | 490 | { |
@@ -492,6 +505,11 @@ static void native_machine_emergency_restart(void) | |||
492 | for (;;) { | 505 | for (;;) { |
493 | /* Could also try the reset bit in the Hammer NB */ | 506 | /* Could also try the reset bit in the Hammer NB */ |
494 | switch (reboot_type) { | 507 | switch (reboot_type) { |
508 | case BOOT_ACPI: | ||
509 | acpi_reboot(); | ||
510 | reboot_type = BOOT_KBD; | ||
511 | break; | ||
512 | |||
495 | case BOOT_KBD: | 513 | case BOOT_KBD: |
496 | mach_reboot_fixups(); /* For board specific fixups */ | 514 | mach_reboot_fixups(); /* For board specific fixups */ |
497 | 515 | ||
@@ -509,43 +527,29 @@ static void native_machine_emergency_restart(void) | |||
509 | } | 527 | } |
510 | break; | 528 | break; |
511 | 529 | ||
512 | case BOOT_TRIPLE: | ||
513 | load_idt(&no_idt); | ||
514 | __asm__ __volatile__("int3"); | ||
515 | |||
516 | /* We're probably dead after this, but... */ | ||
517 | reboot_type = BOOT_KBD; | ||
518 | break; | ||
519 | |||
520 | case BOOT_BIOS: | ||
521 | machine_real_restart(MRR_BIOS); | ||
522 | |||
523 | /* We're probably dead after this, but... */ | ||
524 | reboot_type = BOOT_TRIPLE; | ||
525 | break; | ||
526 | |||
527 | case BOOT_ACPI: | ||
528 | acpi_reboot(); | ||
529 | reboot_type = BOOT_KBD; | ||
530 | break; | ||
531 | |||
532 | case BOOT_EFI: | 530 | case BOOT_EFI: |
533 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 531 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
534 | efi.reset_system(reboot_mode == REBOOT_WARM ? | 532 | efi.reset_system(reboot_mode == REBOOT_WARM ? |
535 | EFI_RESET_WARM : | 533 | EFI_RESET_WARM : |
536 | EFI_RESET_COLD, | 534 | EFI_RESET_COLD, |
537 | EFI_SUCCESS, 0, NULL); | 535 | EFI_SUCCESS, 0, NULL); |
538 | reboot_type = BOOT_CF9_COND; | 536 | reboot_type = BOOT_BIOS; |
537 | break; | ||
538 | |||
539 | case BOOT_BIOS: | ||
540 | machine_real_restart(MRR_BIOS); | ||
541 | |||
542 | /* We're probably dead after this, but... */ | ||
543 | reboot_type = BOOT_CF9_SAFE; | ||
539 | break; | 544 | break; |
540 | 545 | ||
541 | case BOOT_CF9: | 546 | case BOOT_CF9_FORCE: |
542 | port_cf9_safe = true; | 547 | port_cf9_safe = true; |
543 | /* Fall through */ | 548 | /* Fall through */ |
544 | 549 | ||
545 | case BOOT_CF9_COND: | 550 | case BOOT_CF9_SAFE: |
546 | if (port_cf9_safe) { | 551 | if (port_cf9_safe) { |
547 | u8 reboot_code = reboot_mode == REBOOT_WARM ? | 552 | u8 reboot_code = reboot_mode == REBOOT_WARM ? 0x06 : 0x0E; |
548 | 0x06 : 0x0E; | ||
549 | u8 cf9 = inb(0xcf9) & ~reboot_code; | 553 | u8 cf9 = inb(0xcf9) & ~reboot_code; |
550 | outb(cf9|2, 0xcf9); /* Request hard reset */ | 554 | outb(cf9|2, 0xcf9); /* Request hard reset */ |
551 | udelay(50); | 555 | udelay(50); |
@@ -553,7 +557,15 @@ static void native_machine_emergency_restart(void) | |||
553 | outb(cf9|reboot_code, 0xcf9); | 557 | outb(cf9|reboot_code, 0xcf9); |
554 | udelay(50); | 558 | udelay(50); |
555 | } | 559 | } |
556 | reboot_type = BOOT_BIOS; | 560 | reboot_type = BOOT_TRIPLE; |
561 | break; | ||
562 | |||
563 | case BOOT_TRIPLE: | ||
564 | load_idt(&no_idt); | ||
565 | __asm__ __volatile__("int3"); | ||
566 | |||
567 | /* We're probably dead after this, but... */ | ||
568 | reboot_type = BOOT_KBD; | ||
557 | break; | 569 | break; |
558 | } | 570 | } |
559 | } | 571 | } |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 7c3a5a61f2e4..be8e1bde07aa 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | |||
168 | * this function calls the 'stop' function on all other CPUs in the system. | 168 | * this function calls the 'stop' function on all other CPUs in the system. |
169 | */ | 169 | */ |
170 | 170 | ||
171 | asmlinkage void smp_reboot_interrupt(void) | 171 | asmlinkage __visible void smp_reboot_interrupt(void) |
172 | { | 172 | { |
173 | ack_APIC_irq(); | 173 | ack_APIC_irq(); |
174 | irq_enter(); | 174 | irq_enter(); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 57409f6b8c62..f73b5d435bdc 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -357,7 +357,7 @@ exit: | |||
357 | * for scheduling or signal handling. The actual stack switch is done in | 357 | * for scheduling or signal handling. The actual stack switch is done in |
358 | * entry.S | 358 | * entry.S |
359 | */ | 359 | */ |
360 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | 360 | asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) |
361 | { | 361 | { |
362 | struct pt_regs *regs = eregs; | 362 | struct pt_regs *regs = eregs; |
363 | /* Did already sync */ | 363 | /* Did already sync */ |
@@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |||
601 | #endif | 601 | #endif |
602 | } | 602 | } |
603 | 603 | ||
604 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | 604 | asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) |
605 | { | 605 | { |
606 | } | 606 | } |
607 | 607 | ||
608 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | 608 | asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) |
609 | { | 609 | { |
610 | } | 610 | } |
611 | 611 | ||
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index f6584a90aba3..b99b9ad8540c 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -26,6 +26,9 @@ | |||
26 | 26 | ||
27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 | 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 |
28 | 28 | ||
29 | /* Flag below is initialized once during vSMP PCI initialization. */ | ||
30 | static int irq_routing_comply = 1; | ||
31 | |||
29 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT | 32 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT |
30 | /* | 33 | /* |
31 | * Interrupt control on vSMPowered systems: | 34 | * Interrupt control on vSMPowered systems: |
@@ -33,7 +36,7 @@ | |||
33 | * and vice versa. | 36 | * and vice versa. |
34 | */ | 37 | */ |
35 | 38 | ||
36 | asmlinkage unsigned long vsmp_save_fl(void) | 39 | asmlinkage __visible unsigned long vsmp_save_fl(void) |
37 | { | 40 | { |
38 | unsigned long flags = native_save_fl(); | 41 | unsigned long flags = native_save_fl(); |
39 | 42 | ||
@@ -53,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags) | |||
53 | } | 56 | } |
54 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); | 57 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); |
55 | 58 | ||
56 | asmlinkage void vsmp_irq_disable(void) | 59 | asmlinkage __visible void vsmp_irq_disable(void) |
57 | { | 60 | { |
58 | unsigned long flags = native_save_fl(); | 61 | unsigned long flags = native_save_fl(); |
59 | 62 | ||
@@ -61,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void) | |||
61 | } | 64 | } |
62 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); | 65 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); |
63 | 66 | ||
64 | asmlinkage void vsmp_irq_enable(void) | 67 | asmlinkage __visible void vsmp_irq_enable(void) |
65 | { | 68 | { |
66 | unsigned long flags = native_save_fl(); | 69 | unsigned long flags = native_save_fl(); |
67 | 70 | ||
@@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void) | |||
101 | #ifdef CONFIG_SMP | 104 | #ifdef CONFIG_SMP |
102 | if (cap & ctl & BIT(8)) { | 105 | if (cap & ctl & BIT(8)) { |
103 | ctl &= ~BIT(8); | 106 | ctl &= ~BIT(8); |
107 | |||
108 | /* Interrupt routing set to ignore */ | ||
109 | irq_routing_comply = 0; | ||
110 | |||
104 | #ifdef CONFIG_PROC_FS | 111 | #ifdef CONFIG_PROC_FS |
105 | /* Don't let users change irq affinity via procfs */ | 112 | /* Don't let users change irq affinity via procfs */ |
106 | no_irq_affinity = 1; | 113 | no_irq_affinity = 1; |
@@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void) | |||
218 | { | 225 | { |
219 | /* need to update phys_pkg_id */ | 226 | /* need to update phys_pkg_id */ |
220 | apic->phys_pkg_id = apicid_phys_pkg_id; | 227 | apic->phys_pkg_id = apicid_phys_pkg_id; |
221 | apic->vector_allocation_domain = fill_vector_allocation_domain; | 228 | |
229 | if (!irq_routing_comply) | ||
230 | apic->vector_allocation_domain = fill_vector_allocation_domain; | ||
222 | } | 231 | } |
223 | 232 | ||
224 | void __init vsmp_init(void) | 233 | void __init vsmp_init(void) |
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index f9c6e56e14b5..9531fbb123ba 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c | |||
@@ -43,7 +43,7 @@ void update_vsyscall(struct timekeeper *tk) | |||
43 | vdata->monotonic_time_sec = tk->xtime_sec | 43 | vdata->monotonic_time_sec = tk->xtime_sec |
44 | + tk->wall_to_monotonic.tv_sec; | 44 | + tk->wall_to_monotonic.tv_sec; |
45 | vdata->monotonic_time_snsec = tk->xtime_nsec | 45 | vdata->monotonic_time_snsec = tk->xtime_nsec |
46 | + (tk->wall_to_monotonic.tv_nsec | 46 | + ((u64)tk->wall_to_monotonic.tv_nsec |
47 | << tk->shift); | 47 | << tk->shift); |
48 | while (vdata->monotonic_time_snsec >= | 48 | while (vdata->monotonic_time_snsec >= |
49 | (((u64)NSEC_PER_SEC) << tk->shift)) { | 49 | (((u64)NSEC_PER_SEC) << tk->shift)) { |
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index bea60671ef8a..f47a104a749c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -308,7 +308,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
308 | const u32 kvm_supported_word9_x86_features = | 308 | const u32 kvm_supported_word9_x86_features = |
309 | F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | | 309 | F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | |
310 | F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | | 310 | F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | |
311 | F(ADX); | 311 | F(ADX) | F(SMAP); |
312 | 312 | ||
313 | /* all calls to cpuid_count() should be made on the same cpu */ | 313 | /* all calls to cpuid_count() should be made on the same cpu */ |
314 | get_cpu(); | 314 | get_cpu(); |
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index a2a1bb7ed8c1..eeecbed26ac7 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -48,6 +48,14 @@ static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) | |||
48 | return best && (best->ebx & bit(X86_FEATURE_SMEP)); | 48 | return best && (best->ebx & bit(X86_FEATURE_SMEP)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) | ||
52 | { | ||
53 | struct kvm_cpuid_entry2 *best; | ||
54 | |||
55 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
56 | return best && (best->ebx & bit(X86_FEATURE_SMAP)); | ||
57 | } | ||
58 | |||
51 | static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) | 59 | static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) |
52 | { | 60 | { |
53 | struct kvm_cpuid_entry2 *best; | 61 | struct kvm_cpuid_entry2 *best; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f5704d9e5ddc..813d31038b93 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3601,20 +3601,27 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, | |||
3601 | } | 3601 | } |
3602 | } | 3602 | } |
3603 | 3603 | ||
3604 | static void update_permission_bitmask(struct kvm_vcpu *vcpu, | 3604 | void update_permission_bitmask(struct kvm_vcpu *vcpu, |
3605 | struct kvm_mmu *mmu, bool ept) | 3605 | struct kvm_mmu *mmu, bool ept) |
3606 | { | 3606 | { |
3607 | unsigned bit, byte, pfec; | 3607 | unsigned bit, byte, pfec; |
3608 | u8 map; | 3608 | u8 map; |
3609 | bool fault, x, w, u, wf, uf, ff, smep; | 3609 | bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0; |
3610 | 3610 | ||
3611 | smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); | 3611 | cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
3612 | cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); | ||
3612 | for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { | 3613 | for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { |
3613 | pfec = byte << 1; | 3614 | pfec = byte << 1; |
3614 | map = 0; | 3615 | map = 0; |
3615 | wf = pfec & PFERR_WRITE_MASK; | 3616 | wf = pfec & PFERR_WRITE_MASK; |
3616 | uf = pfec & PFERR_USER_MASK; | 3617 | uf = pfec & PFERR_USER_MASK; |
3617 | ff = pfec & PFERR_FETCH_MASK; | 3618 | ff = pfec & PFERR_FETCH_MASK; |
3619 | /* | ||
3620 | * PFERR_RSVD_MASK bit is set in PFEC if the access is not | ||
3621 | * subject to SMAP restrictions, and cleared otherwise. The | ||
3622 | * bit is only meaningful if the SMAP bit is set in CR4. | ||
3623 | */ | ||
3624 | smapf = !(pfec & PFERR_RSVD_MASK); | ||
3618 | for (bit = 0; bit < 8; ++bit) { | 3625 | for (bit = 0; bit < 8; ++bit) { |
3619 | x = bit & ACC_EXEC_MASK; | 3626 | x = bit & ACC_EXEC_MASK; |
3620 | w = bit & ACC_WRITE_MASK; | 3627 | w = bit & ACC_WRITE_MASK; |
@@ -3626,12 +3633,33 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, | |||
3626 | /* Allow supervisor writes if !cr0.wp */ | 3633 | /* Allow supervisor writes if !cr0.wp */ |
3627 | w |= !is_write_protection(vcpu) && !uf; | 3634 | w |= !is_write_protection(vcpu) && !uf; |
3628 | /* Disallow supervisor fetches of user code if cr4.smep */ | 3635 | /* Disallow supervisor fetches of user code if cr4.smep */ |
3629 | x &= !(smep && u && !uf); | 3636 | x &= !(cr4_smep && u && !uf); |
3637 | |||
3638 | /* | ||
3639 | * SMAP:kernel-mode data accesses from user-mode | ||
3640 | * mappings should fault. A fault is considered | ||
3641 | * as a SMAP violation if all of the following | ||
3642 | * conditions are ture: | ||
3643 | * - X86_CR4_SMAP is set in CR4 | ||
3644 | * - An user page is accessed | ||
3645 | * - Page fault in kernel mode | ||
3646 | * - if CPL = 3 or X86_EFLAGS_AC is clear | ||
3647 | * | ||
3648 | * Here, we cover the first three conditions. | ||
3649 | * The fourth is computed dynamically in | ||
3650 | * permission_fault() and is in smapf. | ||
3651 | * | ||
3652 | * Also, SMAP does not affect instruction | ||
3653 | * fetches, add the !ff check here to make it | ||
3654 | * clearer. | ||
3655 | */ | ||
3656 | smap = cr4_smap && u && !uf && !ff; | ||
3630 | } else | 3657 | } else |
3631 | /* Not really needed: no U/S accesses on ept */ | 3658 | /* Not really needed: no U/S accesses on ept */ |
3632 | u = 1; | 3659 | u = 1; |
3633 | 3660 | ||
3634 | fault = (ff && !x) || (uf && !u) || (wf && !w); | 3661 | fault = (ff && !x) || (uf && !u) || (wf && !w) || |
3662 | (smapf && smap); | ||
3635 | map |= fault << bit; | 3663 | map |= fault << bit; |
3636 | } | 3664 | } |
3637 | mmu->permissions[byte] = map; | 3665 | mmu->permissions[byte] = map; |
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 292615274358..3842e70bdb7c 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
@@ -44,11 +44,17 @@ | |||
44 | #define PT_DIRECTORY_LEVEL 2 | 44 | #define PT_DIRECTORY_LEVEL 2 |
45 | #define PT_PAGE_TABLE_LEVEL 1 | 45 | #define PT_PAGE_TABLE_LEVEL 1 |
46 | 46 | ||
47 | #define PFERR_PRESENT_MASK (1U << 0) | 47 | #define PFERR_PRESENT_BIT 0 |
48 | #define PFERR_WRITE_MASK (1U << 1) | 48 | #define PFERR_WRITE_BIT 1 |
49 | #define PFERR_USER_MASK (1U << 2) | 49 | #define PFERR_USER_BIT 2 |
50 | #define PFERR_RSVD_MASK (1U << 3) | 50 | #define PFERR_RSVD_BIT 3 |
51 | #define PFERR_FETCH_MASK (1U << 4) | 51 | #define PFERR_FETCH_BIT 4 |
52 | |||
53 | #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) | ||
54 | #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) | ||
55 | #define PFERR_USER_MASK (1U << PFERR_USER_BIT) | ||
56 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | ||
57 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | ||
52 | 58 | ||
53 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); | 59 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); |
54 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); | 60 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); |
@@ -73,6 +79,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); | |||
73 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); | 79 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); |
74 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, | 80 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, |
75 | bool execonly); | 81 | bool execonly); |
82 | void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | ||
83 | bool ept); | ||
76 | 84 | ||
77 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) | 85 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
78 | { | 86 | { |
@@ -110,10 +118,30 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu) | |||
110 | * Will a fault with a given page-fault error code (pfec) cause a permission | 118 | * Will a fault with a given page-fault error code (pfec) cause a permission |
111 | * fault with the given access (in ACC_* format)? | 119 | * fault with the given access (in ACC_* format)? |
112 | */ | 120 | */ |
113 | static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access, | 121 | static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
114 | unsigned pfec) | 122 | unsigned pte_access, unsigned pfec) |
115 | { | 123 | { |
116 | return (mmu->permissions[pfec >> 1] >> pte_access) & 1; | 124 | int cpl = kvm_x86_ops->get_cpl(vcpu); |
125 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | ||
126 | |||
127 | /* | ||
128 | * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. | ||
129 | * | ||
130 | * If CPL = 3, SMAP applies to all supervisor-mode data accesses | ||
131 | * (these are implicit supervisor accesses) regardless of the value | ||
132 | * of EFLAGS.AC. | ||
133 | * | ||
134 | * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving | ||
135 | * the result in X86_EFLAGS_AC. We then insert it in place of | ||
136 | * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, | ||
137 | * but it will be one in index if SMAP checks are being overridden. | ||
138 | * It is important to keep this branchless. | ||
139 | */ | ||
140 | unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); | ||
141 | int index = (pfec >> 1) + | ||
142 | (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); | ||
143 | |||
144 | return (mmu->permissions[index] >> pte_access) & 1; | ||
117 | } | 145 | } |
118 | 146 | ||
119 | void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); | 147 | void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index b1e6c1bf68d3..123efd3ec29f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -353,7 +353,7 @@ retry_walk: | |||
353 | walker->ptes[walker->level - 1] = pte; | 353 | walker->ptes[walker->level - 1] = pte; |
354 | } while (!is_last_gpte(mmu, walker->level, pte)); | 354 | } while (!is_last_gpte(mmu, walker->level, pte)); |
355 | 355 | ||
356 | if (unlikely(permission_fault(mmu, pte_access, access))) { | 356 | if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) { |
357 | errcode |= PFERR_PRESENT_MASK; | 357 | errcode |= PFERR_PRESENT_MASK; |
358 | goto error; | 358 | goto error; |
359 | } | 359 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1320e0f8e611..138ceffc6377 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |||
503 | [number##_HIGH] = VMCS12_OFFSET(name)+4 | 503 | [number##_HIGH] = VMCS12_OFFSET(name)+4 |
504 | 504 | ||
505 | 505 | ||
506 | static const unsigned long shadow_read_only_fields[] = { | 506 | static unsigned long shadow_read_only_fields[] = { |
507 | /* | 507 | /* |
508 | * We do NOT shadow fields that are modified when L0 | 508 | * We do NOT shadow fields that are modified when L0 |
509 | * traps and emulates any vmx instruction (e.g. VMPTRLD, | 509 | * traps and emulates any vmx instruction (e.g. VMPTRLD, |
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = { | |||
526 | GUEST_LINEAR_ADDRESS, | 526 | GUEST_LINEAR_ADDRESS, |
527 | GUEST_PHYSICAL_ADDRESS | 527 | GUEST_PHYSICAL_ADDRESS |
528 | }; | 528 | }; |
529 | static const int max_shadow_read_only_fields = | 529 | static int max_shadow_read_only_fields = |
530 | ARRAY_SIZE(shadow_read_only_fields); | 530 | ARRAY_SIZE(shadow_read_only_fields); |
531 | 531 | ||
532 | static const unsigned long shadow_read_write_fields[] = { | 532 | static unsigned long shadow_read_write_fields[] = { |
533 | GUEST_RIP, | 533 | GUEST_RIP, |
534 | GUEST_RSP, | 534 | GUEST_RSP, |
535 | GUEST_CR0, | 535 | GUEST_CR0, |
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = { | |||
558 | HOST_FS_SELECTOR, | 558 | HOST_FS_SELECTOR, |
559 | HOST_GS_SELECTOR | 559 | HOST_GS_SELECTOR |
560 | }; | 560 | }; |
561 | static const int max_shadow_read_write_fields = | 561 | static int max_shadow_read_write_fields = |
562 | ARRAY_SIZE(shadow_read_write_fields); | 562 | ARRAY_SIZE(shadow_read_write_fields); |
563 | 563 | ||
564 | static const unsigned short vmcs_field_to_offset_table[] = { | 564 | static const unsigned short vmcs_field_to_offset_table[] = { |
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void) | |||
3009 | } | 3009 | } |
3010 | } | 3010 | } |
3011 | 3011 | ||
3012 | static void init_vmcs_shadow_fields(void) | ||
3013 | { | ||
3014 | int i, j; | ||
3015 | |||
3016 | /* No checks for read only fields yet */ | ||
3017 | |||
3018 | for (i = j = 0; i < max_shadow_read_write_fields; i++) { | ||
3019 | switch (shadow_read_write_fields[i]) { | ||
3020 | case GUEST_BNDCFGS: | ||
3021 | if (!vmx_mpx_supported()) | ||
3022 | continue; | ||
3023 | break; | ||
3024 | default: | ||
3025 | break; | ||
3026 | } | ||
3027 | |||
3028 | if (j < i) | ||
3029 | shadow_read_write_fields[j] = | ||
3030 | shadow_read_write_fields[i]; | ||
3031 | j++; | ||
3032 | } | ||
3033 | max_shadow_read_write_fields = j; | ||
3034 | |||
3035 | /* shadowed fields guest access without vmexit */ | ||
3036 | for (i = 0; i < max_shadow_read_write_fields; i++) { | ||
3037 | clear_bit(shadow_read_write_fields[i], | ||
3038 | vmx_vmwrite_bitmap); | ||
3039 | clear_bit(shadow_read_write_fields[i], | ||
3040 | vmx_vmread_bitmap); | ||
3041 | } | ||
3042 | for (i = 0; i < max_shadow_read_only_fields; i++) | ||
3043 | clear_bit(shadow_read_only_fields[i], | ||
3044 | vmx_vmread_bitmap); | ||
3045 | } | ||
3046 | |||
3012 | static __init int alloc_kvm_area(void) | 3047 | static __init int alloc_kvm_area(void) |
3013 | { | 3048 | { |
3014 | int cpu; | 3049 | int cpu; |
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void) | |||
3039 | enable_vpid = 0; | 3074 | enable_vpid = 0; |
3040 | if (!cpu_has_vmx_shadow_vmcs()) | 3075 | if (!cpu_has_vmx_shadow_vmcs()) |
3041 | enable_shadow_vmcs = 0; | 3076 | enable_shadow_vmcs = 0; |
3077 | if (enable_shadow_vmcs) | ||
3078 | init_vmcs_shadow_fields(); | ||
3042 | 3079 | ||
3043 | if (!cpu_has_vmx_ept() || | 3080 | if (!cpu_has_vmx_ept() || |
3044 | !cpu_has_vmx_ept_4levels()) { | 3081 | !cpu_has_vmx_ept_4levels()) { |
@@ -3484,13 +3521,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
3484 | hw_cr4 &= ~X86_CR4_PAE; | 3521 | hw_cr4 &= ~X86_CR4_PAE; |
3485 | hw_cr4 |= X86_CR4_PSE; | 3522 | hw_cr4 |= X86_CR4_PSE; |
3486 | /* | 3523 | /* |
3487 | * SMEP is disabled if CPU is in non-paging mode in | 3524 | * SMEP/SMAP is disabled if CPU is in non-paging mode |
3488 | * hardware. However KVM always uses paging mode to | 3525 | * in hardware. However KVM always uses paging mode to |
3489 | * emulate guest non-paging mode with TDP. | 3526 | * emulate guest non-paging mode with TDP. |
3490 | * To emulate this behavior, SMEP needs to be manually | 3527 | * To emulate this behavior, SMEP/SMAP needs to be |
3491 | * disabled when guest switches to non-paging mode. | 3528 | * manually disabled when guest switches to non-paging |
3529 | * mode. | ||
3492 | */ | 3530 | */ |
3493 | hw_cr4 &= ~X86_CR4_SMEP; | 3531 | hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP); |
3494 | } else if (!(cr4 & X86_CR4_PAE)) { | 3532 | } else if (!(cr4 & X86_CR4_PAE)) { |
3495 | hw_cr4 &= ~X86_CR4_PAE; | 3533 | hw_cr4 &= ~X86_CR4_PAE; |
3496 | } | 3534 | } |
@@ -7740,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7740 | 7778 | ||
7741 | exec_control = vmcs12->pin_based_vm_exec_control; | 7779 | exec_control = vmcs12->pin_based_vm_exec_control; |
7742 | exec_control |= vmcs_config.pin_based_exec_ctrl; | 7780 | exec_control |= vmcs_config.pin_based_exec_ctrl; |
7743 | exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; | 7781 | exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER | |
7782 | PIN_BASED_POSTED_INTR); | ||
7744 | vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); | 7783 | vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); |
7745 | 7784 | ||
7746 | vmx->nested.preemption_timer_expired = false; | 7785 | vmx->nested.preemption_timer_expired = false; |
@@ -7777,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7777 | if (!vmx->rdtscp_enabled) | 7816 | if (!vmx->rdtscp_enabled) |
7778 | exec_control &= ~SECONDARY_EXEC_RDTSCP; | 7817 | exec_control &= ~SECONDARY_EXEC_RDTSCP; |
7779 | /* Take the following fields only from vmcs12 */ | 7818 | /* Take the following fields only from vmcs12 */ |
7780 | exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | 7819 | exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
7820 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | | ||
7821 | SECONDARY_EXEC_APIC_REGISTER_VIRT); | ||
7781 | if (nested_cpu_has(vmcs12, | 7822 | if (nested_cpu_has(vmcs12, |
7782 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) | 7823 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) |
7783 | exec_control |= vmcs12->secondary_vm_exec_control; | 7824 | exec_control |= vmcs12->secondary_vm_exec_control; |
@@ -8802,14 +8843,6 @@ static int __init vmx_init(void) | |||
8802 | 8843 | ||
8803 | memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); | 8844 | memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); |
8804 | memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); | 8845 | memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); |
8805 | /* shadowed read/write fields */ | ||
8806 | for (i = 0; i < max_shadow_read_write_fields; i++) { | ||
8807 | clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); | ||
8808 | clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); | ||
8809 | } | ||
8810 | /* shadowed read only fields */ | ||
8811 | for (i = 0; i < max_shadow_read_only_fields; i++) | ||
8812 | clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); | ||
8813 | 8846 | ||
8814 | /* | 8847 | /* |
8815 | * Allow direct access to the PC debug port (it is often used for I/O | 8848 | * Allow direct access to the PC debug port (it is often used for I/O |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d1b5cd4d34c..20316c67b824 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); | |||
106 | static u32 tsc_tolerance_ppm = 250; | 106 | static u32 tsc_tolerance_ppm = 250; |
107 | module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); | 107 | module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); |
108 | 108 | ||
109 | static bool backwards_tsc_observed = false; | ||
110 | |||
109 | #define KVM_NR_SHARED_MSRS 16 | 111 | #define KVM_NR_SHARED_MSRS 16 |
110 | 112 | ||
111 | struct kvm_shared_msrs_global { | 113 | struct kvm_shared_msrs_global { |
@@ -280,7 +282,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
280 | } | 282 | } |
281 | EXPORT_SYMBOL_GPL(kvm_set_apic_base); | 283 | EXPORT_SYMBOL_GPL(kvm_set_apic_base); |
282 | 284 | ||
283 | asmlinkage void kvm_spurious_fault(void) | 285 | asmlinkage __visible void kvm_spurious_fault(void) |
284 | { | 286 | { |
285 | /* Fault while not rebooting. We want the trace. */ | 287 | /* Fault while not rebooting. We want the trace. */ |
286 | BUG(); | 288 | BUG(); |
@@ -652,6 +654,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
652 | if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) | 654 | if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) |
653 | return 1; | 655 | return 1; |
654 | 656 | ||
657 | if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) | ||
658 | return 1; | ||
659 | |||
655 | if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) | 660 | if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) |
656 | return 1; | 661 | return 1; |
657 | 662 | ||
@@ -680,6 +685,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
680 | (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) | 685 | (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) |
681 | kvm_mmu_reset_context(vcpu); | 686 | kvm_mmu_reset_context(vcpu); |
682 | 687 | ||
688 | if ((cr4 ^ old_cr4) & X86_CR4_SMAP) | ||
689 | update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); | ||
690 | |||
683 | if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) | 691 | if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) |
684 | kvm_update_cpuid(vcpu); | 692 | kvm_update_cpuid(vcpu); |
685 | 693 | ||
@@ -1117,7 +1125,6 @@ static inline u64 get_kernel_ns(void) | |||
1117 | { | 1125 | { |
1118 | struct timespec ts; | 1126 | struct timespec ts; |
1119 | 1127 | ||
1120 | WARN_ON(preemptible()); | ||
1121 | ktime_get_ts(&ts); | 1128 | ktime_get_ts(&ts); |
1122 | monotonic_to_bootbased(&ts); | 1129 | monotonic_to_bootbased(&ts); |
1123 | return timespec_to_ns(&ts); | 1130 | return timespec_to_ns(&ts); |
@@ -1481,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) | |||
1481 | &ka->master_kernel_ns, | 1488 | &ka->master_kernel_ns, |
1482 | &ka->master_cycle_now); | 1489 | &ka->master_cycle_now); |
1483 | 1490 | ||
1484 | ka->use_master_clock = host_tsc_clocksource & vcpus_matched; | 1491 | ka->use_master_clock = host_tsc_clocksource && vcpus_matched |
1492 | && !backwards_tsc_observed; | ||
1485 | 1493 | ||
1486 | if (ka->use_master_clock) | 1494 | if (ka->use_master_clock) |
1487 | atomic_set(&kvm_guest_has_master_clock, 1); | 1495 | atomic_set(&kvm_guest_has_master_clock, 1); |
@@ -4164,7 +4172,8 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, | |||
4164 | | (write ? PFERR_WRITE_MASK : 0); | 4172 | | (write ? PFERR_WRITE_MASK : 0); |
4165 | 4173 | ||
4166 | if (vcpu_match_mmio_gva(vcpu, gva) | 4174 | if (vcpu_match_mmio_gva(vcpu, gva) |
4167 | && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { | 4175 | && !permission_fault(vcpu, vcpu->arch.walk_mmu, |
4176 | vcpu->arch.access, access)) { | ||
4168 | *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | | 4177 | *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | |
4169 | (gva & (PAGE_SIZE - 1)); | 4178 | (gva & (PAGE_SIZE - 1)); |
4170 | trace_vcpu_match_mmio(gva, *gpa, write, false); | 4179 | trace_vcpu_match_mmio(gva, *gpa, write, false); |
@@ -6939,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage) | |||
6939 | */ | 6948 | */ |
6940 | if (backwards_tsc) { | 6949 | if (backwards_tsc) { |
6941 | u64 delta_cyc = max_tsc - local_tsc; | 6950 | u64 delta_cyc = max_tsc - local_tsc; |
6951 | backwards_tsc_observed = true; | ||
6942 | list_for_each_entry(kvm, &vm_list, vm_list) { | 6952 | list_for_each_entry(kvm, &vm_list, vm_list) { |
6943 | kvm_for_each_vcpu(i, vcpu, kvm) { | 6953 | kvm_for_each_vcpu(i, vcpu, kvm) { |
6944 | vcpu->arch.tsc_offset_adjustment += delta_cyc; | 6954 | vcpu->arch.tsc_offset_adjustment += delta_cyc; |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index ad1fb5f53925..aae94132bc24 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next) | |||
233 | * flags word contains all kind of stuff, but in practice Linux only cares | 233 | * flags word contains all kind of stuff, but in practice Linux only cares |
234 | * about the interrupt flag. Our "save_flags()" just returns that. | 234 | * about the interrupt flag. Our "save_flags()" just returns that. |
235 | */ | 235 | */ |
236 | asmlinkage unsigned long lguest_save_fl(void) | 236 | asmlinkage __visible unsigned long lguest_save_fl(void) |
237 | { | 237 | { |
238 | return lguest_data.irq_enabled; | 238 | return lguest_data.irq_enabled; |
239 | } | 239 | } |
240 | 240 | ||
241 | /* Interrupts go off... */ | 241 | /* Interrupts go off... */ |
242 | asmlinkage void lguest_irq_disable(void) | 242 | asmlinkage __visible void lguest_irq_disable(void) |
243 | { | 243 | { |
244 | lguest_data.irq_enabled = 0; | 244 | lguest_data.irq_enabled = 0; |
245 | } | 245 | } |
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index db9db446b71a..43623739c7cf 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c | |||
@@ -76,7 +76,7 @@ static inline int __flip_bit(u32 msr, u8 bit, bool set) | |||
76 | if (m1.q == m.q) | 76 | if (m1.q == m.q) |
77 | return 0; | 77 | return 0; |
78 | 78 | ||
79 | err = msr_write(msr, &m); | 79 | err = msr_write(msr, &m1); |
80 | if (err) | 80 | if (err) |
81 | return err; | 81 | return err; |
82 | 82 | ||
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c index a5449089cd9f..9e6545f269e5 100644 --- a/arch/x86/math-emu/errors.c +++ b/arch/x86/math-emu/errors.c | |||
@@ -302,7 +302,7 @@ static struct { | |||
302 | 0x242 in div_Xsig.S | 302 | 0x242 in div_Xsig.S |
303 | */ | 303 | */ |
304 | 304 | ||
305 | asmlinkage void FPU_exception(int n) | 305 | asmlinkage __visible void FPU_exception(int n) |
306 | { | 306 | { |
307 | int i, int_type; | 307 | int i, int_type; |
308 | 308 | ||
@@ -492,7 +492,7 @@ int real_2op_NaN(FPU_REG const *b, u_char tagb, | |||
492 | 492 | ||
493 | /* Invalid arith operation on Valid registers */ | 493 | /* Invalid arith operation on Valid registers */ |
494 | /* Returns < 0 if the exception is unmasked */ | 494 | /* Returns < 0 if the exception is unmasked */ |
495 | asmlinkage int arith_invalid(int deststnr) | 495 | asmlinkage __visible int arith_invalid(int deststnr) |
496 | { | 496 | { |
497 | 497 | ||
498 | EXCEPTION(EX_Invalid); | 498 | EXCEPTION(EX_Invalid); |
@@ -507,7 +507,7 @@ asmlinkage int arith_invalid(int deststnr) | |||
507 | } | 507 | } |
508 | 508 | ||
509 | /* Divide a finite number by zero */ | 509 | /* Divide a finite number by zero */ |
510 | asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign) | 510 | asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign) |
511 | { | 511 | { |
512 | FPU_REG *dest = &st(deststnr); | 512 | FPU_REG *dest = &st(deststnr); |
513 | int tag = TAG_Valid; | 513 | int tag = TAG_Valid; |
@@ -539,7 +539,7 @@ int set_precision_flag(int flags) | |||
539 | } | 539 | } |
540 | 540 | ||
541 | /* This may be called often, so keep it lean */ | 541 | /* This may be called often, so keep it lean */ |
542 | asmlinkage void set_precision_flag_up(void) | 542 | asmlinkage __visible void set_precision_flag_up(void) |
543 | { | 543 | { |
544 | if (control_word & CW_Precision) | 544 | if (control_word & CW_Precision) |
545 | partial_status |= (SW_Precision | SW_C1); /* The masked response */ | 545 | partial_status |= (SW_Precision | SW_C1); /* The masked response */ |
@@ -548,7 +548,7 @@ asmlinkage void set_precision_flag_up(void) | |||
548 | } | 548 | } |
549 | 549 | ||
550 | /* This may be called often, so keep it lean */ | 550 | /* This may be called often, so keep it lean */ |
551 | asmlinkage void set_precision_flag_down(void) | 551 | asmlinkage __visible void set_precision_flag_down(void) |
552 | { | 552 | { |
553 | if (control_word & CW_Precision) { /* The masked response */ | 553 | if (control_word & CW_Precision) { /* The masked response */ |
554 | partial_status &= ~SW_C1; | 554 | partial_status &= ~SW_C1; |
@@ -557,7 +557,7 @@ asmlinkage void set_precision_flag_down(void) | |||
557 | EXCEPTION(EX_Precision); | 557 | EXCEPTION(EX_Precision); |
558 | } | 558 | } |
559 | 559 | ||
560 | asmlinkage int denormal_operand(void) | 560 | asmlinkage __visible int denormal_operand(void) |
561 | { | 561 | { |
562 | if (control_word & CW_Denormal) { /* The masked response */ | 562 | if (control_word & CW_Denormal) { /* The masked response */ |
563 | partial_status |= SW_Denorm_Op; | 563 | partial_status |= SW_Denorm_Op; |
@@ -568,7 +568,7 @@ asmlinkage int denormal_operand(void) | |||
568 | } | 568 | } |
569 | } | 569 | } |
570 | 570 | ||
571 | asmlinkage int arith_overflow(FPU_REG *dest) | 571 | asmlinkage __visible int arith_overflow(FPU_REG *dest) |
572 | { | 572 | { |
573 | int tag = TAG_Valid; | 573 | int tag = TAG_Valid; |
574 | 574 | ||
@@ -596,7 +596,7 @@ asmlinkage int arith_overflow(FPU_REG *dest) | |||
596 | 596 | ||
597 | } | 597 | } |
598 | 598 | ||
599 | asmlinkage int arith_underflow(FPU_REG *dest) | 599 | asmlinkage __visible int arith_underflow(FPU_REG *dest) |
600 | { | 600 | { |
601 | int tag = TAG_Valid; | 601 | int tag = TAG_Valid; |
602 | 602 | ||
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index dc017735bb91..6d5663a599a7 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, | |||
171 | memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ | 171 | memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ |
172 | 172 | ||
173 | header->pages = sz / PAGE_SIZE; | 173 | header->pages = sz / PAGE_SIZE; |
174 | hole = sz - (proglen + sizeof(*header)); | 174 | hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); |
175 | 175 | ||
176 | /* insert a random number of int3 instructions before BPF code */ | 176 | /* insert a random number of int3 instructions before BPF code */ |
177 | *image_ptr = &header->image[prandom_u32() % hole]; | 177 | *image_ptr = &header->image[prandom_u32() % hole]; |
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 81b506d5befd..524142117296 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c | |||
@@ -14,48 +14,92 @@ | |||
14 | 14 | ||
15 | static const struct font_desc *font; | 15 | static const struct font_desc *font; |
16 | static u32 efi_x, efi_y; | 16 | static u32 efi_x, efi_y; |
17 | static void *efi_fb; | ||
18 | static bool early_efi_keep; | ||
17 | 19 | ||
18 | static __init void early_efi_clear_scanline(unsigned int y) | 20 | /* |
21 | * efi earlyprintk need use early_ioremap to map the framebuffer. | ||
22 | * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should | ||
23 | * be used instead. ioremap will be available after paging_init() which is | ||
24 | * earlier than initcall callbacks. Thus adding this early initcall function | ||
25 | * early_efi_map_fb to map the whole efi framebuffer. | ||
26 | */ | ||
27 | static __init int early_efi_map_fb(void) | ||
19 | { | 28 | { |
20 | unsigned long base, *dst; | 29 | unsigned long base, size; |
21 | u16 len; | 30 | |
31 | if (!early_efi_keep) | ||
32 | return 0; | ||
22 | 33 | ||
23 | base = boot_params.screen_info.lfb_base; | 34 | base = boot_params.screen_info.lfb_base; |
24 | len = boot_params.screen_info.lfb_linelength; | 35 | size = boot_params.screen_info.lfb_size; |
36 | efi_fb = ioremap(base, size); | ||
37 | |||
38 | return efi_fb ? 0 : -ENOMEM; | ||
39 | } | ||
40 | early_initcall(early_efi_map_fb); | ||
41 | |||
42 | /* | ||
43 | * early_efi_map maps efi framebuffer region [start, start + len -1] | ||
44 | * In case earlyprintk=efi,keep we have the whole framebuffer mapped already | ||
45 | * so just return the offset efi_fb + start. | ||
46 | */ | ||
47 | static __init_refok void *early_efi_map(unsigned long start, unsigned long len) | ||
48 | { | ||
49 | unsigned long base; | ||
50 | |||
51 | base = boot_params.screen_info.lfb_base; | ||
52 | |||
53 | if (efi_fb) | ||
54 | return (efi_fb + start); | ||
55 | else | ||
56 | return early_ioremap(base + start, len); | ||
57 | } | ||
25 | 58 | ||
26 | dst = early_ioremap(base + y*len, len); | 59 | static __init_refok void early_efi_unmap(void *addr, unsigned long len) |
60 | { | ||
61 | if (!efi_fb) | ||
62 | early_iounmap(addr, len); | ||
63 | } | ||
64 | |||
65 | static void early_efi_clear_scanline(unsigned int y) | ||
66 | { | ||
67 | unsigned long *dst; | ||
68 | u16 len; | ||
69 | |||
70 | len = boot_params.screen_info.lfb_linelength; | ||
71 | dst = early_efi_map(y*len, len); | ||
27 | if (!dst) | 72 | if (!dst) |
28 | return; | 73 | return; |
29 | 74 | ||
30 | memset(dst, 0, len); | 75 | memset(dst, 0, len); |
31 | early_iounmap(dst, len); | 76 | early_efi_unmap(dst, len); |
32 | } | 77 | } |
33 | 78 | ||
34 | static __init void early_efi_scroll_up(void) | 79 | static void early_efi_scroll_up(void) |
35 | { | 80 | { |
36 | unsigned long base, *dst, *src; | 81 | unsigned long *dst, *src; |
37 | u16 len; | 82 | u16 len; |
38 | u32 i, height; | 83 | u32 i, height; |
39 | 84 | ||
40 | base = boot_params.screen_info.lfb_base; | ||
41 | len = boot_params.screen_info.lfb_linelength; | 85 | len = boot_params.screen_info.lfb_linelength; |
42 | height = boot_params.screen_info.lfb_height; | 86 | height = boot_params.screen_info.lfb_height; |
43 | 87 | ||
44 | for (i = 0; i < height - font->height; i++) { | 88 | for (i = 0; i < height - font->height; i++) { |
45 | dst = early_ioremap(base + i*len, len); | 89 | dst = early_efi_map(i*len, len); |
46 | if (!dst) | 90 | if (!dst) |
47 | return; | 91 | return; |
48 | 92 | ||
49 | src = early_ioremap(base + (i + font->height) * len, len); | 93 | src = early_efi_map((i + font->height) * len, len); |
50 | if (!src) { | 94 | if (!src) { |
51 | early_iounmap(dst, len); | 95 | early_efi_unmap(dst, len); |
52 | return; | 96 | return; |
53 | } | 97 | } |
54 | 98 | ||
55 | memmove(dst, src, len); | 99 | memmove(dst, src, len); |
56 | 100 | ||
57 | early_iounmap(src, len); | 101 | early_efi_unmap(src, len); |
58 | early_iounmap(dst, len); | 102 | early_efi_unmap(dst, len); |
59 | } | 103 | } |
60 | } | 104 | } |
61 | 105 | ||
@@ -79,16 +123,14 @@ static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h) | |||
79 | } | 123 | } |
80 | } | 124 | } |
81 | 125 | ||
82 | static __init void | 126 | static void |
83 | early_efi_write(struct console *con, const char *str, unsigned int num) | 127 | early_efi_write(struct console *con, const char *str, unsigned int num) |
84 | { | 128 | { |
85 | struct screen_info *si; | 129 | struct screen_info *si; |
86 | unsigned long base; | ||
87 | unsigned int len; | 130 | unsigned int len; |
88 | const char *s; | 131 | const char *s; |
89 | void *dst; | 132 | void *dst; |
90 | 133 | ||
91 | base = boot_params.screen_info.lfb_base; | ||
92 | si = &boot_params.screen_info; | 134 | si = &boot_params.screen_info; |
93 | len = si->lfb_linelength; | 135 | len = si->lfb_linelength; |
94 | 136 | ||
@@ -109,7 +151,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) | |||
109 | for (h = 0; h < font->height; h++) { | 151 | for (h = 0; h < font->height; h++) { |
110 | unsigned int n, x; | 152 | unsigned int n, x; |
111 | 153 | ||
112 | dst = early_ioremap(base + (efi_y + h) * len, len); | 154 | dst = early_efi_map((efi_y + h) * len, len); |
113 | if (!dst) | 155 | if (!dst) |
114 | return; | 156 | return; |
115 | 157 | ||
@@ -123,7 +165,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) | |||
123 | s++; | 165 | s++; |
124 | } | 166 | } |
125 | 167 | ||
126 | early_iounmap(dst, len); | 168 | early_efi_unmap(dst, len); |
127 | } | 169 | } |
128 | 170 | ||
129 | num -= count; | 171 | num -= count; |
@@ -179,6 +221,9 @@ static __init int early_efi_setup(struct console *con, char *options) | |||
179 | for (i = 0; i < (yres - efi_y) / font->height; i++) | 221 | for (i = 0; i < (yres - efi_y) / font->height; i++) |
180 | early_efi_scroll_up(); | 222 | early_efi_scroll_up(); |
181 | 223 | ||
224 | /* early_console_register will unset CON_BOOT in case ,keep */ | ||
225 | if (!(con->flags & CON_BOOT)) | ||
226 | early_efi_keep = true; | ||
182 | return 0; | 227 | return 0; |
183 | } | 228 | } |
184 | 229 | ||
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c index ff0174dda810..a9acde72d4ed 100644 --- a/arch/x86/platform/olpc/olpc-xo1-pm.c +++ b/arch/x86/platform/olpc/olpc-xo1-pm.c | |||
@@ -75,7 +75,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state) | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | asmlinkage int xo1_do_sleep(u8 sleep_state) | 78 | asmlinkage __visible int xo1_do_sleep(u8 sleep_state) |
79 | { | 79 | { |
80 | void *pgd_addr = __va(read_cr3()); | 80 | void *pgd_addr = __va(read_cr3()); |
81 | 81 | ||
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 304fca20d96e..35e2bb6c0f37 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c | |||
@@ -23,7 +23,7 @@ | |||
23 | extern __visible const void __nosave_begin, __nosave_end; | 23 | extern __visible const void __nosave_begin, __nosave_end; |
24 | 24 | ||
25 | /* Defined in hibernate_asm_64.S */ | 25 | /* Defined in hibernate_asm_64.S */ |
26 | extern asmlinkage int restore_image(void); | 26 | extern asmlinkage __visible int restore_image(void); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Address to jump to in the last phase of restore in order to get to the image | 29 | * Address to jump to in the last phase of restore in order to get to the image |
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile index f325af26107c..3323c2745248 100644 --- a/arch/x86/syscalls/Makefile +++ b/arch/x86/syscalls/Makefile | |||
@@ -54,5 +54,7 @@ syshdr-$(CONFIG_X86_64) += syscalls_64.h | |||
54 | 54 | ||
55 | targets += $(uapisyshdr-y) $(syshdr-y) | 55 | targets += $(uapisyshdr-y) $(syshdr-y) |
56 | 56 | ||
57 | PHONY += all | ||
57 | all: $(addprefix $(uapi)/,$(uapisyshdr-y)) | 58 | all: $(addprefix $(uapi)/,$(uapisyshdr-y)) |
58 | all: $(addprefix $(out)/,$(syshdr-y)) | 59 | all: $(addprefix $(out)/,$(syshdr-y)) |
60 | @: | ||
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index 96bc506ac6de..d6b867921612 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl | |||
@@ -359,3 +359,4 @@ | |||
359 | 350 i386 finit_module sys_finit_module | 359 | 350 i386 finit_module sys_finit_module |
360 | 351 i386 sched_setattr sys_sched_setattr | 360 | 351 i386 sched_setattr sys_sched_setattr |
361 | 352 i386 sched_getattr sys_sched_getattr | 361 | 352 i386 sched_getattr sys_sched_getattr |
362 | 353 i386 renameat2 sys_renameat2 | ||
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index e8120346903b..604a37efd4d5 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile | |||
@@ -40,4 +40,6 @@ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/ina | |||
40 | HOST_EXTRACFLAGS += -I$(srctree)/tools/include | 40 | HOST_EXTRACFLAGS += -I$(srctree)/tools/include |
41 | hostprogs-y += relocs | 41 | hostprogs-y += relocs |
42 | relocs-objs := relocs_32.o relocs_64.o relocs_common.o | 42 | relocs-objs := relocs_32.o relocs_64.o relocs_common.o |
43 | PHONY += relocs | ||
43 | relocs: $(obj)/relocs | 44 | relocs: $(obj)/relocs |
45 | @: | ||
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S index 2e263f367b13..9df017ab2285 100644 --- a/arch/x86/vdso/vdso-layout.lds.S +++ b/arch/x86/vdso/vdso-layout.lds.S | |||
@@ -9,12 +9,9 @@ SECTIONS | |||
9 | #ifdef BUILD_VDSO32 | 9 | #ifdef BUILD_VDSO32 |
10 | #include <asm/vdso32.h> | 10 | #include <asm/vdso32.h> |
11 | 11 | ||
12 | .hpet_sect : { | 12 | hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE); |
13 | hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE); | ||
14 | } :text :hpet_sect | ||
15 | 13 | ||
16 | .vvar_sect : { | 14 | vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE); |
17 | vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE); | ||
18 | 15 | ||
19 | /* Place all vvars at the offsets in asm/vvar.h. */ | 16 | /* Place all vvars at the offsets in asm/vvar.h. */ |
20 | #define EMIT_VVAR(name, offset) vvar_ ## name = vvar + offset; | 17 | #define EMIT_VVAR(name, offset) vvar_ ## name = vvar + offset; |
@@ -22,7 +19,6 @@ SECTIONS | |||
22 | #include <asm/vvar.h> | 19 | #include <asm/vvar.h> |
23 | #undef __VVAR_KERNEL_LDS | 20 | #undef __VVAR_KERNEL_LDS |
24 | #undef EMIT_VVAR | 21 | #undef EMIT_VVAR |
25 | } :text :vvar_sect | ||
26 | #endif | 22 | #endif |
27 | . = SIZEOF_HEADERS; | 23 | . = SIZEOF_HEADERS; |
28 | 24 | ||
@@ -61,7 +57,12 @@ SECTIONS | |||
61 | */ | 57 | */ |
62 | . = ALIGN(0x100); | 58 | . = ALIGN(0x100); |
63 | 59 | ||
64 | .text : { *(.text*) } :text =0x90909090 | 60 | .text : { *(.text*) } :text =0x90909090, |
61 | |||
62 | /* | ||
63 | * The comma above works around a bug in gold: | ||
64 | * https://sourceware.org/bugzilla/show_bug.cgi?id=16804 | ||
65 | */ | ||
65 | 66 | ||
66 | /DISCARD/ : { | 67 | /DISCARD/ : { |
67 | *(.discard) | 68 | *(.discard) |
@@ -84,8 +85,4 @@ PHDRS | |||
84 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | 85 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ |
85 | note PT_NOTE FLAGS(4); /* PF_R */ | 86 | note PT_NOTE FLAGS(4); /* PF_R */ |
86 | eh_frame_hdr PT_GNU_EH_FRAME; | 87 | eh_frame_hdr PT_GNU_EH_FRAME; |
87 | #ifdef BUILD_VDSO32 | ||
88 | vvar_sect PT_NULL FLAGS(4); /* PF_R */ | ||
89 | hpet_sect PT_NULL FLAGS(4); /* PF_R */ | ||
90 | #endif | ||
91 | } | 88 | } |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 00348980a3a6..e1f220e3ca68 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #ifdef CONFIG_X86_64 | 39 | #ifdef CONFIG_X86_64 |
40 | #define vdso_enabled sysctl_vsyscall32 | 40 | #define vdso_enabled sysctl_vsyscall32 |
41 | #define arch_setup_additional_pages syscall32_setup_pages | 41 | #define arch_setup_additional_pages syscall32_setup_pages |
42 | extern int sysctl_ldt16; | ||
42 | #endif | 43 | #endif |
43 | 44 | ||
44 | /* | 45 | /* |
@@ -249,6 +250,13 @@ static struct ctl_table abi_table2[] = { | |||
249 | .mode = 0644, | 250 | .mode = 0644, |
250 | .proc_handler = proc_dointvec | 251 | .proc_handler = proc_dointvec |
251 | }, | 252 | }, |
253 | { | ||
254 | .procname = "ldt16", | ||
255 | .data = &sysctl_ldt16, | ||
256 | .maxlen = sizeof(int), | ||
257 | .mode = 0644, | ||
258 | .proc_handler = proc_dointvec | ||
259 | }, | ||
252 | {} | 260 | {} |
253 | }; | 261 | }; |
254 | 262 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 201d09a7c46b..c34bfc4bbe7f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1515,7 +1515,7 @@ static void __init xen_pvh_early_guest_init(void) | |||
1515 | } | 1515 | } |
1516 | 1516 | ||
1517 | /* First C function to be called on Xen boot */ | 1517 | /* First C function to be called on Xen boot */ |
1518 | asmlinkage void __init xen_start_kernel(void) | 1518 | asmlinkage __visible void __init xen_start_kernel(void) |
1519 | { | 1519 | { |
1520 | struct physdev_set_iopl set_iopl; | 1520 | struct physdev_set_iopl set_iopl; |
1521 | int rc; | 1521 | int rc; |
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 08f763de26fe..a1207cb6472a 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void) | |||
23 | (void)HYPERVISOR_xen_version(0, NULL); | 23 | (void)HYPERVISOR_xen_version(0, NULL); |
24 | } | 24 | } |
25 | 25 | ||
26 | asmlinkage unsigned long xen_save_fl(void) | 26 | asmlinkage __visible unsigned long xen_save_fl(void) |
27 | { | 27 | { |
28 | struct vcpu_info *vcpu; | 28 | struct vcpu_info *vcpu; |
29 | unsigned long flags; | 29 | unsigned long flags; |
@@ -63,7 +63,7 @@ __visible void xen_restore_fl(unsigned long flags) | |||
63 | } | 63 | } |
64 | PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); | 64 | PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); |
65 | 65 | ||
66 | asmlinkage void xen_irq_disable(void) | 66 | asmlinkage __visible void xen_irq_disable(void) |
67 | { | 67 | { |
68 | /* There's a one instruction preempt window here. We need to | 68 | /* There's a one instruction preempt window here. We need to |
69 | make sure we're don't switch CPUs between getting the vcpu | 69 | make sure we're don't switch CPUs between getting the vcpu |
@@ -74,7 +74,7 @@ asmlinkage void xen_irq_disable(void) | |||
74 | } | 74 | } |
75 | PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); | 75 | PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); |
76 | 76 | ||
77 | asmlinkage void xen_irq_enable(void) | 77 | asmlinkage __visible void xen_irq_enable(void) |
78 | { | 78 | { |
79 | struct vcpu_info *vcpu; | 79 | struct vcpu_info *vcpu; |
80 | 80 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index a18eadd8bb40..7005974c3ff3 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -441,10 +441,11 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) | |||
441 | irq_ctx_init(cpu); | 441 | irq_ctx_init(cpu); |
442 | #else | 442 | #else |
443 | clear_tsk_thread_flag(idle, TIF_FORK); | 443 | clear_tsk_thread_flag(idle, TIF_FORK); |
444 | #endif | ||
444 | per_cpu(kernel_stack, cpu) = | 445 | per_cpu(kernel_stack, cpu) = |
445 | (unsigned long)task_stack_page(idle) - | 446 | (unsigned long)task_stack_page(idle) - |
446 | KERNEL_STACK_OFFSET + THREAD_SIZE; | 447 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
447 | #endif | 448 | |
448 | xen_setup_runstate_info(cpu); | 449 | xen_setup_runstate_info(cpu); |
449 | xen_setup_timer(cpu); | 450 | xen_setup_timer(cpu); |
450 | xen_init_lock_cpu(cpu); | 451 | xen_init_lock_cpu(cpu); |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 4d3acc34a998..0ba5f3b967f0 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -274,7 +274,7 @@ void __init xen_init_spinlocks(void) | |||
274 | printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); | 274 | printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); |
275 | return; | 275 | return; |
276 | } | 276 | } |
277 | 277 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); | |
278 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); | 278 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); |
279 | pv_lock_ops.unlock_kick = xen_unlock_kick; | 279 | pv_lock_ops.unlock_kick = xen_unlock_kick; |
280 | } | 280 | } |
@@ -290,6 +290,9 @@ static __init int xen_init_spinlocks_jump(void) | |||
290 | if (!xen_pvspin) | 290 | if (!xen_pvspin) |
291 | return 0; | 291 | return 0; |
292 | 292 | ||
293 | if (!xen_domain()) | ||
294 | return 0; | ||
295 | |||
293 | static_key_slow_inc(¶virt_ticketlocks_enabled); | 296 | static_key_slow_inc(¶virt_ticketlocks_enabled); |
294 | return 0; | 297 | return 0; |
295 | } | 298 | } |
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 33ca6e42a4ca..fd92a64d748e 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S | |||
@@ -75,6 +75,17 @@ ENDPROC(xen_sysexit) | |||
75 | * stack state in whatever form its in, we keep things simple by only | 75 | * stack state in whatever form its in, we keep things simple by only |
76 | * using a single register which is pushed/popped on the stack. | 76 | * using a single register which is pushed/popped on the stack. |
77 | */ | 77 | */ |
78 | |||
79 | .macro POP_FS | ||
80 | 1: | ||
81 | popw %fs | ||
82 | .pushsection .fixup, "ax" | ||
83 | 2: movw $0, (%esp) | ||
84 | jmp 1b | ||
85 | .popsection | ||
86 | _ASM_EXTABLE(1b,2b) | ||
87 | .endm | ||
88 | |||
78 | ENTRY(xen_iret) | 89 | ENTRY(xen_iret) |
79 | /* test eflags for special cases */ | 90 | /* test eflags for special cases */ |
80 | testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) | 91 | testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) |
@@ -83,15 +94,13 @@ ENTRY(xen_iret) | |||
83 | push %eax | 94 | push %eax |
84 | ESP_OFFSET=4 # bytes pushed onto stack | 95 | ESP_OFFSET=4 # bytes pushed onto stack |
85 | 96 | ||
86 | /* | 97 | /* Store vcpu_info pointer for easy access */ |
87 | * Store vcpu_info pointer for easy access. Do it this way to | ||
88 | * avoid having to reload %fs | ||
89 | */ | ||
90 | #ifdef CONFIG_SMP | 98 | #ifdef CONFIG_SMP |
91 | GET_THREAD_INFO(%eax) | 99 | pushw %fs |
92 | movl %ss:TI_cpu(%eax), %eax | 100 | movl $(__KERNEL_PERCPU), %eax |
93 | movl %ss:__per_cpu_offset(,%eax,4), %eax | 101 | movl %eax, %fs |
94 | mov %ss:xen_vcpu(%eax), %eax | 102 | movl %fs:xen_vcpu, %eax |
103 | POP_FS | ||
95 | #else | 104 | #else |
96 | movl %ss:xen_vcpu, %eax | 105 | movl %ss:xen_vcpu, %eax |
97 | #endif | 106 | #endif |