aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/apic.h27
-rw-r--r--arch/x86/include/asm/cpufeature.h118
-rw-r--r--arch/x86/include/asm/desc.h117
-rw-r--r--arch/x86/include/asm/efi.h28
-rw-r--r--arch/x86/include/asm/entry_arch.h8
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/fpu-internal.h4
-rw-r--r--arch/x86/include/asm/hw_irq.h17
-rw-r--r--arch/x86/include/asm/kvm_host.h17
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/microcode_amd.h78
-rw-r--r--arch/x86/include/asm/microcode_intel.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h3
-rw-r--r--arch/x86/include/asm/mutex_32.h11
-rw-r--r--arch/x86/include/asm/mutex_64.h11
-rw-r--r--arch/x86/include/asm/perf_event.h3
-rw-r--r--arch/x86/include/asm/pgtable.h3
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/include/asm/sighandling.h4
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h104
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h3
24 files changed, 515 insertions, 58 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 338803422239..f8119b582c3c 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -12,6 +12,7 @@
12#include <asm/fixmap.h> 12#include <asm/fixmap.h>
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/msr.h> 14#include <asm/msr.h>
15#include <asm/idle.h>
15 16
16#define ARCH_APICTIMER_STOPS_ON_C3 1 17#define ARCH_APICTIMER_STOPS_ON_C3 1
17 18
@@ -687,5 +688,31 @@ extern int default_check_phys_apicid_present(int phys_apicid);
687#endif 688#endif
688 689
689#endif /* CONFIG_X86_LOCAL_APIC */ 690#endif /* CONFIG_X86_LOCAL_APIC */
691extern void irq_enter(void);
692extern void irq_exit(void);
693
694static inline void entering_irq(void)
695{
696 irq_enter();
697 exit_idle();
698}
699
700static inline void entering_ack_irq(void)
701{
702 ack_APIC_irq();
703 entering_irq();
704}
705
706static inline void exiting_irq(void)
707{
708 irq_exit();
709}
710
711static inline void exiting_ack_irq(void)
712{
713 irq_exit();
714 /* Ack only at the end to avoid potential reentry */
715 ack_APIC_irq();
716}
690 717
691#endif /* _ASM_X86_APIC_H */ 718#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e99ac27f95b2..47538a61c91b 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -92,7 +92,7 @@
92#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ 92#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
93#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 93#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
94#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 94#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
95 /* 21 available, was AMD_C1E */ 95#define X86_FEATURE_ALWAYS (3*32+21) /* "" Always-present feature */
96#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ 96#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
97#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ 97#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
98#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 98#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
@@ -356,15 +356,36 @@ extern const char * const x86_power_flags[32];
356#endif /* CONFIG_X86_64 */ 356#endif /* CONFIG_X86_64 */
357 357
358#if __GNUC__ >= 4 358#if __GNUC__ >= 4
359extern void warn_pre_alternatives(void);
360extern bool __static_cpu_has_safe(u16 bit);
361
359/* 362/*
360 * Static testing of CPU features. Used the same as boot_cpu_has(). 363 * Static testing of CPU features. Used the same as boot_cpu_has().
361 * These are only valid after alternatives have run, but will statically 364 * These are only valid after alternatives have run, but will statically
362 * patch the target code for additional performance. 365 * patch the target code for additional performance.
363 *
364 */ 366 */
365static __always_inline __pure bool __static_cpu_has(u16 bit) 367static __always_inline __pure bool __static_cpu_has(u16 bit)
366{ 368{
367#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5 369#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
370
371#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
372 /*
373 * Catch too early usage of this before alternatives
374 * have run.
375 */
376 asm goto("1: jmp %l[t_warn]\n"
377 "2:\n"
378 ".section .altinstructions,\"a\"\n"
379 " .long 1b - .\n"
380 " .long 0\n" /* no replacement */
381 " .word %P0\n" /* 1: do replace */
382 " .byte 2b - 1b\n" /* source len */
383 " .byte 0\n" /* replacement len */
384 ".previous\n"
385 /* skipping size check since replacement size = 0 */
386 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
387#endif
388
368 asm goto("1: jmp %l[t_no]\n" 389 asm goto("1: jmp %l[t_no]\n"
369 "2:\n" 390 "2:\n"
370 ".section .altinstructions,\"a\"\n" 391 ".section .altinstructions,\"a\"\n"
@@ -379,7 +400,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
379 return true; 400 return true;
380 t_no: 401 t_no:
381 return false; 402 return false;
382#else 403
404#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
405 t_warn:
406 warn_pre_alternatives();
407 return false;
408#endif
409#else /* GCC_VERSION >= 40500 */
383 u8 flag; 410 u8 flag;
384 /* Open-coded due to __stringify() in ALTERNATIVE() */ 411 /* Open-coded due to __stringify() in ALTERNATIVE() */
385 asm volatile("1: movb $0,%0\n" 412 asm volatile("1: movb $0,%0\n"
@@ -411,11 +438,94 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
411 __static_cpu_has(bit) : \ 438 __static_cpu_has(bit) : \
412 boot_cpu_has(bit) \ 439 boot_cpu_has(bit) \
413) 440)
441
442static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
443{
444#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
445/*
446 * We need to spell the jumps to the compiler because, depending on the offset,
447 * the replacement jump can be bigger than the original jump, and this we cannot
448 * have. Thus, we force the jump to the widest, 4-byte, signed relative
449 * offset even though the last would often fit in less bytes.
450 */
451 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
452 "2:\n"
453 ".section .altinstructions,\"a\"\n"
454 " .long 1b - .\n" /* src offset */
455 " .long 3f - .\n" /* repl offset */
456 " .word %P1\n" /* always replace */
457 " .byte 2b - 1b\n" /* src len */
458 " .byte 4f - 3f\n" /* repl len */
459 ".previous\n"
460 ".section .altinstr_replacement,\"ax\"\n"
461 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
462 "4:\n"
463 ".previous\n"
464 ".section .altinstructions,\"a\"\n"
465 " .long 1b - .\n" /* src offset */
466 " .long 0\n" /* no replacement */
467 " .word %P0\n" /* feature bit */
468 " .byte 2b - 1b\n" /* src len */
469 " .byte 0\n" /* repl len */
470 ".previous\n"
471 : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
472 : : t_dynamic, t_no);
473 return true;
474 t_no:
475 return false;
476 t_dynamic:
477 return __static_cpu_has_safe(bit);
478#else /* GCC_VERSION >= 40500 */
479 u8 flag;
480 /* Open-coded due to __stringify() in ALTERNATIVE() */
481 asm volatile("1: movb $2,%0\n"
482 "2:\n"
483 ".section .altinstructions,\"a\"\n"
484 " .long 1b - .\n" /* src offset */
485 " .long 3f - .\n" /* repl offset */
486 " .word %P2\n" /* always replace */
487 " .byte 2b - 1b\n" /* source len */
488 " .byte 4f - 3f\n" /* replacement len */
489 ".previous\n"
490 ".section .discard,\"aw\",@progbits\n"
491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
492 ".previous\n"
493 ".section .altinstr_replacement,\"ax\"\n"
494 "3: movb $0,%0\n"
495 "4:\n"
496 ".previous\n"
497 ".section .altinstructions,\"a\"\n"
498 " .long 1b - .\n" /* src offset */
499 " .long 5f - .\n" /* repl offset */
500 " .word %P1\n" /* feature bit */
501 " .byte 4b - 3b\n" /* src len */
502 " .byte 6f - 5f\n" /* repl len */
503 ".previous\n"
504 ".section .discard,\"aw\",@progbits\n"
505 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
506 ".previous\n"
507 ".section .altinstr_replacement,\"ax\"\n"
508 "5: movb $1,%0\n"
509 "6:\n"
510 ".previous\n"
511 : "=qm" (flag)
512 : "i" (bit), "i" (X86_FEATURE_ALWAYS));
513 return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
514#endif
515}
516
517#define static_cpu_has_safe(bit) \
518( \
519 __builtin_constant_p(boot_cpu_has(bit)) ? \
520 boot_cpu_has(bit) : \
521 _static_cpu_has_safe(bit) \
522)
414#else 523#else
415/* 524/*
416 * gcc 3.x is too stupid to do the static test; fall back to dynamic. 525 * gcc 3.x is too stupid to do the static test; fall back to dynamic.
417 */ 526 */
418#define static_cpu_has(bit) boot_cpu_has(bit) 527#define static_cpu_has(bit) boot_cpu_has(bit)
528#define static_cpu_has_safe(bit) boot_cpu_has(bit)
419#endif 529#endif
420 530
421#define cpu_has_bug(c, bit) cpu_has(c, (bit)) 531#define cpu_has_bug(c, bit) cpu_has(c, (bit))
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 8bf1c06070d5..b90e5dfeee46 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -36,8 +36,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
36 36
37extern struct desc_ptr idt_descr; 37extern struct desc_ptr idt_descr;
38extern gate_desc idt_table[]; 38extern gate_desc idt_table[];
39extern struct desc_ptr nmi_idt_descr; 39extern struct desc_ptr debug_idt_descr;
40extern gate_desc nmi_idt_table[]; 40extern gate_desc debug_idt_table[];
41 41
42struct gdt_page { 42struct gdt_page {
43 struct desc_struct gdt[GDT_ENTRIES]; 43 struct desc_struct gdt[GDT_ENTRIES];
@@ -316,7 +316,20 @@ static inline void set_nmi_gate(int gate, void *addr)
316 gate_desc s; 316 gate_desc s;
317 317
318 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS); 318 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
319 write_idt_entry(nmi_idt_table, gate, &s); 319 write_idt_entry(debug_idt_table, gate, &s);
320}
321#endif
322
323#ifdef CONFIG_TRACING
324extern struct desc_ptr trace_idt_descr;
325extern gate_desc trace_idt_table[];
326static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
327{
328 write_idt_entry(trace_idt_table, entry, gate);
329}
330#else
331static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
332{
320} 333}
321#endif 334#endif
322 335
@@ -331,6 +344,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
331 * setup time 344 * setup time
332 */ 345 */
333 write_idt_entry(idt_table, gate, &s); 346 write_idt_entry(idt_table, gate, &s);
347 write_trace_idt_entry(gate, &s);
334} 348}
335 349
336/* 350/*
@@ -360,12 +374,39 @@ static inline void alloc_system_vector(int vector)
360 } 374 }
361} 375}
362 376
363static inline void alloc_intr_gate(unsigned int n, void *addr) 377#ifdef CONFIG_TRACING
378static inline void trace_set_intr_gate(unsigned int gate, void *addr)
379{
380 gate_desc s;
381
382 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
383 write_idt_entry(trace_idt_table, gate, &s);
384}
385
386static inline void __trace_alloc_intr_gate(unsigned int n, void *addr)
387{
388 trace_set_intr_gate(n, addr);
389}
390#else
391static inline void trace_set_intr_gate(unsigned int gate, void *addr)
392{
393}
394
395#define __trace_alloc_intr_gate(n, addr)
396#endif
397
398static inline void __alloc_intr_gate(unsigned int n, void *addr)
364{ 399{
365 alloc_system_vector(n);
366 set_intr_gate(n, addr); 400 set_intr_gate(n, addr);
367} 401}
368 402
403#define alloc_intr_gate(n, addr) \
404 do { \
405 alloc_system_vector(n); \
406 __alloc_intr_gate(n, addr); \
407 __trace_alloc_intr_gate(n, trace_##addr); \
408 } while (0)
409
369/* 410/*
370 * This routine sets up an interrupt gate at directory privilege level 3. 411 * This routine sets up an interrupt gate at directory privilege level 3.
371 */ 412 */
@@ -405,4 +446,70 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
405 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); 446 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
406} 447}
407 448
449#ifdef CONFIG_X86_64
450DECLARE_PER_CPU(u32, debug_idt_ctr);
451static inline bool is_debug_idt_enabled(void)
452{
453 if (this_cpu_read(debug_idt_ctr))
454 return true;
455
456 return false;
457}
458
459static inline void load_debug_idt(void)
460{
461 load_idt((const struct desc_ptr *)&debug_idt_descr);
462}
463#else
464static inline bool is_debug_idt_enabled(void)
465{
466 return false;
467}
468
469static inline void load_debug_idt(void)
470{
471}
472#endif
473
474#ifdef CONFIG_TRACING
475extern atomic_t trace_idt_ctr;
476static inline bool is_trace_idt_enabled(void)
477{
478 if (atomic_read(&trace_idt_ctr))
479 return true;
480
481 return false;
482}
483
484static inline void load_trace_idt(void)
485{
486 load_idt((const struct desc_ptr *)&trace_idt_descr);
487}
488#else
489static inline bool is_trace_idt_enabled(void)
490{
491 return false;
492}
493
494static inline void load_trace_idt(void)
495{
496}
497#endif
498
499/*
500 * The load_current_idt() must be called with interrupts disabled
501 * to avoid races. That way the IDT will always be set back to the expected
502 * descriptor. It's also called when a CPU is being initialized, and
503 * that doesn't need to disable interrupts, as nothing should be
504 * bothering the CPU then.
505 */
506static inline void load_current_idt(void)
507{
508 if (is_debug_idt_enabled())
509 load_debug_idt();
510 else if (is_trace_idt_enabled())
511 load_trace_idt();
512 else
513 load_idt((const struct desc_ptr *)&idt_descr);
514}
408#endif /* _ASM_X86_DESC_H */ 515#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 60c89f30c727..0062a0125041 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -52,40 +52,40 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
52 u64 arg4, u64 arg5, u64 arg6); 52 u64 arg4, u64 arg5, u64 arg6);
53 53
54#define efi_call_phys0(f) \ 54#define efi_call_phys0(f) \
55 efi_call0((void *)(f)) 55 efi_call0((f))
56#define efi_call_phys1(f, a1) \ 56#define efi_call_phys1(f, a1) \
57 efi_call1((void *)(f), (u64)(a1)) 57 efi_call1((f), (u64)(a1))
58#define efi_call_phys2(f, a1, a2) \ 58#define efi_call_phys2(f, a1, a2) \
59 efi_call2((void *)(f), (u64)(a1), (u64)(a2)) 59 efi_call2((f), (u64)(a1), (u64)(a2))
60#define efi_call_phys3(f, a1, a2, a3) \ 60#define efi_call_phys3(f, a1, a2, a3) \
61 efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3)) 61 efi_call3((f), (u64)(a1), (u64)(a2), (u64)(a3))
62#define efi_call_phys4(f, a1, a2, a3, a4) \ 62#define efi_call_phys4(f, a1, a2, a3, a4) \
63 efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ 63 efi_call4((f), (u64)(a1), (u64)(a2), (u64)(a3), \
64 (u64)(a4)) 64 (u64)(a4))
65#define efi_call_phys5(f, a1, a2, a3, a4, a5) \ 65#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
66 efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ 66 efi_call5((f), (u64)(a1), (u64)(a2), (u64)(a3), \
67 (u64)(a4), (u64)(a5)) 67 (u64)(a4), (u64)(a5))
68#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ 68#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
69 efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ 69 efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \
70 (u64)(a4), (u64)(a5), (u64)(a6)) 70 (u64)(a4), (u64)(a5), (u64)(a6))
71 71
72#define efi_call_virt0(f) \ 72#define efi_call_virt0(f) \
73 efi_call0((void *)(efi.systab->runtime->f)) 73 efi_call0((efi.systab->runtime->f))
74#define efi_call_virt1(f, a1) \ 74#define efi_call_virt1(f, a1) \
75 efi_call1((void *)(efi.systab->runtime->f), (u64)(a1)) 75 efi_call1((efi.systab->runtime->f), (u64)(a1))
76#define efi_call_virt2(f, a1, a2) \ 76#define efi_call_virt2(f, a1, a2) \
77 efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) 77 efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2))
78#define efi_call_virt3(f, a1, a2, a3) \ 78#define efi_call_virt3(f, a1, a2, a3) \
79 efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 79 efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
80 (u64)(a3)) 80 (u64)(a3))
81#define efi_call_virt4(f, a1, a2, a3, a4) \ 81#define efi_call_virt4(f, a1, a2, a3, a4) \
82 efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 82 efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
83 (u64)(a3), (u64)(a4)) 83 (u64)(a3), (u64)(a4))
84#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ 84#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
85 efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 85 efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
86 (u64)(a3), (u64)(a4), (u64)(a5)) 86 (u64)(a3), (u64)(a4), (u64)(a5))
87#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 87#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
88 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 88 efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
90 90
91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 9bd4ecac72be..dc5fa661465f 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -13,14 +13,16 @@
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 16BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR,
17BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) 17 smp_irq_move_cleanup_interrupt)
18BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt)
18#endif 19#endif
19 20
20BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) 21BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
21 22
22#ifdef CONFIG_HAVE_KVM 23#ifdef CONFIG_HAVE_KVM
23BUILD_INTERRUPT(kvm_posted_intr_ipi, POSTED_INTR_VECTOR) 24BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
25 smp_kvm_posted_intr_ipi)
24#endif 26#endif
25 27
26/* 28/*
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0dc7d9e21c34..e846225265ed 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -81,11 +81,11 @@ enum fixed_addresses {
81 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, 81 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
82 VVAR_PAGE, 82 VVAR_PAGE,
83 VSYSCALL_HPET, 83 VSYSCALL_HPET,
84#endif
85#ifdef CONFIG_PARAVIRT_CLOCK 84#ifdef CONFIG_PARAVIRT_CLOCK
86 PVCLOCK_FIXMAP_BEGIN, 85 PVCLOCK_FIXMAP_BEGIN,
87 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, 86 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
88#endif 87#endif
88#endif
89 FIX_DBGP_BASE, 89 FIX_DBGP_BASE,
90 FIX_EARLYCON_MEM_BASE, 90 FIX_EARLYCON_MEM_BASE,
91#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 91#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e25cc33ec54d..4d0bda7b11e3 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -62,10 +62,8 @@ extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
62#define xstateregs_active fpregs_active 62#define xstateregs_active fpregs_active
63 63
64#ifdef CONFIG_MATH_EMULATION 64#ifdef CONFIG_MATH_EMULATION
65# define HAVE_HWFP (boot_cpu_data.hard_math)
66extern void finit_soft_fpu(struct i387_soft_struct *soft); 65extern void finit_soft_fpu(struct i387_soft_struct *soft);
67#else 66#else
68# define HAVE_HWFP 1
69static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
70#endif 68#endif
71 69
@@ -345,7 +343,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk)
345 343
346static inline void __thread_fpu_begin(struct task_struct *tsk) 344static inline void __thread_fpu_begin(struct task_struct *tsk)
347{ 345{
348 if (!use_eager_fpu()) 346 if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
349 clts(); 347 clts();
350 __thread_set_has_fpu(tsk); 348 __thread_set_has_fpu(tsk);
351} 349}
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 1da97efad08a..e4ac559c4a24 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -77,6 +77,23 @@ extern void threshold_interrupt(void);
77extern void call_function_interrupt(void); 77extern void call_function_interrupt(void);
78extern void call_function_single_interrupt(void); 78extern void call_function_single_interrupt(void);
79 79
80#ifdef CONFIG_TRACING
81/* Interrupt handlers registered during init_IRQ */
82extern void trace_apic_timer_interrupt(void);
83extern void trace_x86_platform_ipi(void);
84extern void trace_error_interrupt(void);
85extern void trace_irq_work_interrupt(void);
86extern void trace_spurious_interrupt(void);
87extern void trace_thermal_interrupt(void);
88extern void trace_reschedule_interrupt(void);
89extern void trace_threshold_interrupt(void);
90extern void trace_call_function_interrupt(void);
91extern void trace_call_function_single_interrupt(void);
92#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
93#define trace_reboot_interrupt reboot_interrupt
94#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
95#endif /* CONFIG_TRACING */
96
80/* IOAPIC */ 97/* IOAPIC */
81#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) 98#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
82extern unsigned long io_apic_irqs; 99extern unsigned long io_apic_irqs;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3741c653767c..f87f7fcefa0a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -59,7 +59,7 @@
59 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 59 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ 62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
64 64
65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -222,14 +222,22 @@ struct kvm_mmu_page {
222 int root_count; /* Currently serving as active root */ 222 int root_count; /* Currently serving as active root */
223 unsigned int unsync_children; 223 unsigned int unsync_children;
224 unsigned long parent_ptes; /* Reverse mapping for parent_pte */ 224 unsigned long parent_ptes; /* Reverse mapping for parent_pte */
225
226 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
227 unsigned long mmu_valid_gen;
228
225 DECLARE_BITMAP(unsync_child_bitmap, 512); 229 DECLARE_BITMAP(unsync_child_bitmap, 512);
226 230
227#ifdef CONFIG_X86_32 231#ifdef CONFIG_X86_32
232 /*
233 * Used out of the mmu-lock to avoid reading spte values while an
234 * update is in progress; see the comments in __get_spte_lockless().
235 */
228 int clear_spte_count; 236 int clear_spte_count;
229#endif 237#endif
230 238
239 /* Number of writes since the last time traversal visited this page. */
231 int write_flooding_count; 240 int write_flooding_count;
232 bool mmio_cached;
233}; 241};
234 242
235struct kvm_pio_request { 243struct kvm_pio_request {
@@ -529,11 +537,14 @@ struct kvm_arch {
529 unsigned int n_requested_mmu_pages; 537 unsigned int n_requested_mmu_pages;
530 unsigned int n_max_mmu_pages; 538 unsigned int n_max_mmu_pages;
531 unsigned int indirect_shadow_pages; 539 unsigned int indirect_shadow_pages;
540 unsigned long mmu_valid_gen;
532 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 541 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
533 /* 542 /*
534 * Hash table of struct kvm_mmu_page. 543 * Hash table of struct kvm_mmu_page.
535 */ 544 */
536 struct list_head active_mmu_pages; 545 struct list_head active_mmu_pages;
546 struct list_head zapped_obsolete_pages;
547
537 struct list_head assigned_dev_head; 548 struct list_head assigned_dev_head;
538 struct iommu_domain *iommu_domain; 549 struct iommu_domain *iommu_domain;
539 int iommu_flags; 550 int iommu_flags;
@@ -769,7 +780,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
769 struct kvm_memory_slot *slot, 780 struct kvm_memory_slot *slot,
770 gfn_t gfn_offset, unsigned long mask); 781 gfn_t gfn_offset, unsigned long mask);
771void kvm_mmu_zap_all(struct kvm *kvm); 782void kvm_mmu_zap_all(struct kvm *kvm);
772void kvm_mmu_zap_mmio_sptes(struct kvm *kvm); 783void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
773unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 784unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
774void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 785void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
775 786
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index fa5f71e021d5..6b52980c29c1 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -61,7 +61,7 @@
61#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ 61#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
62#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ 62#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
63#define MCJ_EXCEPTION 0x8 /* raise as exception */ 63#define MCJ_EXCEPTION 0x8 /* raise as exception */
64#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ 64#define MCJ_IRQ_BROADCAST 0x10 /* do IRQ broadcasting */
65 65
66#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ 66#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
67 67
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
new file mode 100644
index 000000000000..c6b043f40271
--- /dev/null
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -0,0 +1,78 @@
1#ifndef _ASM_X86_MICROCODE_AMD_H
2#define _ASM_X86_MICROCODE_AMD_H
3
4#include <asm/microcode.h>
5
6#define UCODE_MAGIC 0x00414d44
7#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
8#define UCODE_UCODE_TYPE 0x00000001
9
10#define SECTION_HDR_SIZE 8
11#define CONTAINER_HDR_SZ 12
12
13struct equiv_cpu_entry {
14 u32 installed_cpu;
15 u32 fixed_errata_mask;
16 u32 fixed_errata_compare;
17 u16 equiv_cpu;
18 u16 res;
19} __attribute__((packed));
20
21struct microcode_header_amd {
22 u32 data_code;
23 u32 patch_id;
24 u16 mc_patch_data_id;
25 u8 mc_patch_data_len;
26 u8 init_flag;
27 u32 mc_patch_data_checksum;
28 u32 nb_dev_id;
29 u32 sb_dev_id;
30 u16 processor_rev_id;
31 u8 nb_rev_id;
32 u8 sb_rev_id;
33 u8 bios_api_rev;
34 u8 reserved1[3];
35 u32 match_reg[8];
36} __attribute__((packed));
37
38struct microcode_amd {
39 struct microcode_header_amd hdr;
40 unsigned int mpb[0];
41};
42
43static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
44 unsigned int sig)
45{
46 int i = 0;
47
48 if (!equiv_cpu_table)
49 return 0;
50
51 while (equiv_cpu_table[i].installed_cpu != 0) {
52 if (sig == equiv_cpu_table[i].installed_cpu)
53 return equiv_cpu_table[i].equiv_cpu;
54
55 i++;
56 }
57 return 0;
58}
59
60extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
61extern int apply_microcode_amd(int cpu);
62extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
63
64#ifdef CONFIG_MICROCODE_AMD_EARLY
65#ifdef CONFIG_X86_32
66#define MPB_MAX_SIZE PAGE_SIZE
67extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
68#endif
69extern void __init load_ucode_amd_bsp(void);
70extern void __cpuinit load_ucode_amd_ap(void);
71extern int __init save_microcode_in_initrd_amd(void);
72#else
73static inline void __init load_ucode_amd_bsp(void) {}
74static inline void __cpuinit load_ucode_amd_ap(void) {}
75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
76#endif
77
78#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 5356f927d411..87a085333cbf 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -67,10 +67,12 @@ update_match_revision(struct microcode_header_intel *mc_header, int rev);
67extern void __init load_ucode_intel_bsp(void); 67extern void __init load_ucode_intel_bsp(void);
68extern void __cpuinit load_ucode_intel_ap(void); 68extern void __cpuinit load_ucode_intel_ap(void);
69extern void show_ucode_info_early(void); 69extern void show_ucode_info_early(void);
70extern int __init save_microcode_in_initrd_intel(void);
70#else 71#else
71static inline __init void load_ucode_intel_bsp(void) {} 72static inline __init void load_ucode_intel_bsp(void) {}
72static inline __cpuinit void load_ucode_intel_ap(void) {} 73static inline __cpuinit void load_ucode_intel_ap(void) {}
73static inline void show_ucode_info_early(void) {} 74static inline void show_ucode_info_early(void) {}
75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
74#endif 76#endif
75 77
76#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) 78#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index c2934be2446a..cd9c41938b8a 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -12,6 +12,9 @@ struct ms_hyperv_info {
12extern struct ms_hyperv_info ms_hyperv; 12extern struct ms_hyperv_info ms_hyperv;
13 13
14void hyperv_callback_vector(void); 14void hyperv_callback_vector(void);
15#ifdef CONFIG_TRACING
16#define trace_hyperv_callback_vector hyperv_callback_vector
17#endif
15void hyperv_vector_handler(struct pt_regs *regs); 18void hyperv_vector_handler(struct pt_regs *regs);
16void hv_register_vmbus_handler(int irq, irq_handler_t handler); 19void hv_register_vmbus_handler(int irq, irq_handler_t handler);
17 20
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
index 03f90c8a5a7c..0208c3c2cbc6 100644
--- a/arch/x86/include/asm/mutex_32.h
+++ b/arch/x86/include/asm/mutex_32.h
@@ -42,17 +42,14 @@ do { \
42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
43 * from 1 to a 0 value 43 * from 1 to a 0 value
44 * @count: pointer of type atomic_t 44 * @count: pointer of type atomic_t
45 * @fail_fn: function to call if the original value was not 1
46 * 45 *
47 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 46 * Change the count from 1 to a value lower than 1. This function returns 0
48 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 47 * if the fastpath succeeds, or -1 otherwise.
49 * or anything the slow path function returns
50 */ 48 */
51static inline int __mutex_fastpath_lock_retval(atomic_t *count, 49static inline int __mutex_fastpath_lock_retval(atomic_t *count)
52 int (*fail_fn)(atomic_t *))
53{ 50{
54 if (unlikely(atomic_dec_return(count) < 0)) 51 if (unlikely(atomic_dec_return(count) < 0))
55 return fail_fn(count); 52 return -1;
56 else 53 else
57 return 0; 54 return 0;
58} 55}
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 68a87b0f8e29..2c543fff241b 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -37,17 +37,14 @@ do { \
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
38 * from 1 to a 0 value 38 * from 1 to a 0 value
39 * @count: pointer of type atomic_t 39 * @count: pointer of type atomic_t
40 * @fail_fn: function to call if the original value was not 1
41 * 40 *
42 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 41 * Change the count from 1 to a value lower than 1. This function returns 0
43 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 42 * if the fastpath succeeds, or -1 otherwise.
44 * or anything the slow path function returns
45 */ 43 */
46static inline int __mutex_fastpath_lock_retval(atomic_t *count, 44static inline int __mutex_fastpath_lock_retval(atomic_t *count)
47 int (*fail_fn)(atomic_t *))
48{ 45{
49 if (unlikely(atomic_dec_return(count) < 0)) 46 if (unlikely(atomic_dec_return(count) < 0))
50 return fail_fn(count); 47 return -1;
51 else 48 else
52 return 0; 49 return 0;
53} 50}
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 57cb63402213..8249df45d2f2 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -29,6 +29,9 @@
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define HSW_IN_TX (1ULL << 32)
33#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
34
32#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) 35#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
33#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) 36#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
34#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) 37#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1e672234c4ff..5b0818bc8963 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -506,9 +506,6 @@ static inline unsigned long pages_to_mb(unsigned long npg)
506 return npg >> (20 - PAGE_SHIFT); 506 return npg >> (20 - PAGE_SHIFT);
507} 507}
508 508
509#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
510 remap_pfn_range(vma, vaddr, pfn, size, prot)
511
512#if PAGETABLE_LEVELS > 2 509#if PAGETABLE_LEVELS > 2
513static inline int pud_none(pud_t pud) 510static inline int pud_none(pud_t pud)
514{ 511{
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 22224b3b43bb..29937c4f6ff8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,9 +89,9 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hard_math;
93 char rfu; 92 char rfu;
94 char pad0; 93 char pad0;
94 char pad1;
95#else 95#else
96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
97 int x86_tlbsize; 97 int x86_tlbsize;
@@ -164,6 +164,7 @@ extern const struct seq_operations cpuinfo_op;
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165 165
166extern void cpu_detect(struct cpuinfo_x86 *c); 166extern void cpu_detect(struct cpuinfo_x86 *c);
167extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c);
167 168
168extern void early_cpu_init(void); 169extern void early_cpu_init(void);
169extern void identify_boot_cpu(void); 170extern void identify_boot_cpu(void);
@@ -981,5 +982,5 @@ bool xen_set_default_idle(void);
981#endif 982#endif
982 983
983void stop_this_cpu(void *dummy); 984void stop_this_cpu(void *dummy);
984 985void df_debug(struct pt_regs *regs, long error_code);
985#endif /* _ASM_X86_PROCESSOR_H */ 986#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index beff97f7df37..7a958164088c 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -7,10 +7,10 @@
7 7
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9 9
10#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ 10#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
11 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ 11 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
12 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ 12 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
13 X86_EFLAGS_CF) 13 X86_EFLAGS_CF | X86_EFLAGS_RF)
14 14
15void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 15void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
16 16
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 41fc93a2e225..2f4d924fe6c9 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -16,7 +16,7 @@ static inline void native_clts(void)
16 * all loads stores around it, which can hurt performance. Solution is to 16 * all loads stores around it, which can hurt performance. Solution is to
17 * use a variable and mimic reads and writes to it to enforce serialization 17 * use a variable and mimic reads and writes to it to enforce serialization
18 */ 18 */
19static unsigned long __force_order; 19extern unsigned long __force_order;
20 20
21static inline unsigned long native_read_cr0(void) 21static inline unsigned long native_read_cr0(void)
22{ 22{
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 50a7fc0f824a..cf512003e663 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
62 62
63static inline void __flush_tlb_one(unsigned long addr) 63static inline void __flush_tlb_one(unsigned long addr)
64{ 64{
65 __flush_tlb_single(addr); 65 __flush_tlb_single(addr);
66} 66}
67 67
68#define TLB_FLUSH_ALL -1UL 68#define TLB_FLUSH_ALL -1UL
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
new file mode 100644
index 000000000000..2874df24e7a4
--- /dev/null
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -0,0 +1,104 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq_vectors
3
4#if !defined(_TRACE_IRQ_VECTORS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_VECTORS_H
6
7#include <linux/tracepoint.h>
8
9extern void trace_irq_vector_regfunc(void);
10extern void trace_irq_vector_unregfunc(void);
11
12DECLARE_EVENT_CLASS(x86_irq_vector,
13
14 TP_PROTO(int vector),
15
16 TP_ARGS(vector),
17
18 TP_STRUCT__entry(
19 __field( int, vector )
20 ),
21
22 TP_fast_assign(
23 __entry->vector = vector;
24 ),
25
26 TP_printk("vector=%d", __entry->vector) );
27
28#define DEFINE_IRQ_VECTOR_EVENT(name) \
29DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
30 TP_PROTO(int vector), \
31 TP_ARGS(vector), \
32 trace_irq_vector_regfunc, \
33 trace_irq_vector_unregfunc); \
34DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
35 TP_PROTO(int vector), \
36 TP_ARGS(vector), \
37 trace_irq_vector_regfunc, \
38 trace_irq_vector_unregfunc);
39
40
41/*
42 * local_timer - called when entering/exiting a local timer interrupt
43 * vector handler
44 */
45DEFINE_IRQ_VECTOR_EVENT(local_timer);
46
47/*
48 * reschedule - called when entering/exiting a reschedule vector handler
49 */
50DEFINE_IRQ_VECTOR_EVENT(reschedule);
51
52/*
53 * spurious_apic - called when entering/exiting a spurious apic vector handler
54 */
55DEFINE_IRQ_VECTOR_EVENT(spurious_apic);
56
57/*
58 * error_apic - called when entering/exiting an error apic vector handler
59 */
60DEFINE_IRQ_VECTOR_EVENT(error_apic);
61
62/*
63 * x86_platform_ipi - called when entering/exiting a x86 platform ipi interrupt
64 * vector handler
65 */
66DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
67
68/*
69 * irq_work - called when entering/exiting a irq work interrupt
70 * vector handler
71 */
72DEFINE_IRQ_VECTOR_EVENT(irq_work);
73
74/*
75 * call_function - called when entering/exiting a call function interrupt
76 * vector handler
77 */
78DEFINE_IRQ_VECTOR_EVENT(call_function);
79
80/*
81 * call_function_single - called when entering/exiting a call function
82 * single interrupt vector handler
83 */
84DEFINE_IRQ_VECTOR_EVENT(call_function_single);
85
86/*
87 * threshold_apic - called when entering/exiting a threshold apic interrupt
88 * vector handler
89 */
90DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
91
92/*
93 * thermal_apic - called when entering/exiting a thermal apic interrupt
94 * vector handler
95 */
96DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
97
98#undef TRACE_INCLUDE_PATH
99#define TRACE_INCLUDE_PATH .
100#define TRACE_INCLUDE_FILE irq_vectors
101#endif /* _TRACE_IRQ_VECTORS_H */
102
103/* This part must be outside protection */
104#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 142810c457dc..4f7923dd0007 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -235,7 +235,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
235static inline int 235static inline int
236__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) 236__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
237{ 237{
238 might_sleep(); 238 might_fault();
239 return __copy_user_nocache(dst, src, size, 1); 239 return __copy_user_nocache(dst, src, size, 1);
240} 240}
241 241
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index a06983cdc125..0b46ef261c77 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -731,6 +731,9 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
731} 731}
732 732
733extern void uv_bau_message_intr1(void); 733extern void uv_bau_message_intr1(void);
734#ifdef CONFIG_TRACING
735#define trace_uv_bau_message_intr1 uv_bau_message_intr1
736#endif
734extern void uv_bau_timeout_intr1(void); 737extern void uv_bau_timeout_intr1(void);
735 738
736struct atomic_short { 739struct atomic_short {