aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/alternative-asm.h18
-rw-r--r--arch/x86/include/asm/apic.h6
-rw-r--r--arch/x86/include/asm/asm.h25
-rw-r--r--arch/x86/include/asm/atomic.h30
-rw-r--r--arch/x86/include/asm/atomic64_64.h8
-rw-r--r--arch/x86/include/asm/cacheflush.h6
-rw-r--r--arch/x86/include/asm/calling.h247
-rw-r--r--arch/x86/include/asm/dwarf2.h170
-rw-r--r--arch/x86/include/asm/entry_arch.h5
-rw-r--r--arch/x86/include/asm/frame.h7
-rw-r--r--arch/x86/include/asm/hardirq.h4
-rw-r--r--arch/x86/include/asm/hpet.h16
-rw-r--r--arch/x86/include/asm/hw_irq.h140
-rw-r--r--arch/x86/include/asm/io.h9
-rw-r--r--arch/x86/include/asm/io_apic.h114
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_remapping.h80
-rw-r--r--arch/x86/include/asm/irq_vectors.h51
-rw-r--r--arch/x86/include/asm/irqdomain.h63
-rw-r--r--arch/x86/include/asm/mce.h28
-rw-r--r--arch/x86/include/asm/msi.h7
-rw-r--r--arch/x86/include/asm/msr-index.h665
-rw-r--r--arch/x86/include/asm/msr.h12
-rw-r--r--arch/x86/include/asm/mtrr.h15
-rw-r--r--arch/x86/include/asm/paravirt_types.h7
-rw-r--r--arch/x86/include/asm/pat.h9
-rw-r--r--arch/x86/include/asm/pci.h5
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/include/asm/proto.h10
-rw-r--r--arch/x86/include/asm/special_insns.h38
-rw-r--r--arch/x86/include/asm/thread_info.h8
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h6
-rw-r--r--arch/x86/include/asm/traps.h3
-rw-r--r--arch/x86/include/asm/uaccess_32.h4
-rw-r--r--arch/x86/include/asm/x86_init.h21
37 files changed, 1109 insertions, 745 deletions
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index bdf02eeee765..e7636bac7372 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -18,6 +18,12 @@
18 .endm 18 .endm
19#endif 19#endif
20 20
21/*
22 * Issue one struct alt_instr descriptor entry (need to put it into
23 * the section .altinstructions, see below). This entry contains
24 * enough information for the alternatives patching code to patch an
25 * instruction. See apply_alternatives().
26 */
21.macro altinstruction_entry orig alt feature orig_len alt_len pad_len 27.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
22 .long \orig - . 28 .long \orig - .
23 .long \alt - . 29 .long \alt - .
@@ -27,6 +33,12 @@
27 .byte \pad_len 33 .byte \pad_len
28.endm 34.endm
29 35
36/*
37 * Define an alternative between two instructions. If @feature is
38 * present, early code in apply_alternatives() replaces @oldinstr with
39 * @newinstr. ".skip" directive takes care of proper instruction padding
40 * in case @newinstr is longer than @oldinstr.
41 */
30.macro ALTERNATIVE oldinstr, newinstr, feature 42.macro ALTERNATIVE oldinstr, newinstr, feature
31140: 43140:
32 \oldinstr 44 \oldinstr
@@ -55,6 +67,12 @@
55 */ 67 */
56#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) 68#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
57 69
70
71/*
72 * Same as ALTERNATIVE macro above but for two alternatives. If CPU
73 * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
74 * @feature2, it replaces @oldinstr with @feature2.
75 */
58.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 76.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
59140: 77140:
60 \oldinstr 78 \oldinstr
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 976b86a325e5..c8393634ca0c 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -644,6 +644,12 @@ static inline void entering_ack_irq(void)
644 entering_irq(); 644 entering_irq();
645} 645}
646 646
647static inline void ipi_entering_ack_irq(void)
648{
649 ack_APIC_irq();
650 irq_enter();
651}
652
647static inline void exiting_irq(void) 653static inline void exiting_irq(void)
648{ 654{
649 irq_exit(); 655 irq_exit();
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7730c1c5c83a..189679aba703 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -63,6 +63,31 @@
63 _ASM_ALIGN ; \ 63 _ASM_ALIGN ; \
64 _ASM_PTR (entry); \ 64 _ASM_PTR (entry); \
65 .popsection 65 .popsection
66
67.macro ALIGN_DESTINATION
68 /* check for bad alignment of destination */
69 movl %edi,%ecx
70 andl $7,%ecx
71 jz 102f /* already aligned */
72 subl $8,%ecx
73 negl %ecx
74 subl %ecx,%edx
75100: movb (%rsi),%al
76101: movb %al,(%rdi)
77 incq %rsi
78 incq %rdi
79 decl %ecx
80 jnz 100b
81102:
82 .section .fixup,"ax"
83103: addl %ecx,%edx /* ecx is zerorest also */
84 jmp copy_user_handle_tail
85 .previous
86
87 _ASM_EXTABLE(100b,103b)
88 _ASM_EXTABLE(101b,103b)
89 .endm
90
66#else 91#else
67# define _ASM_EXTABLE(from,to) \ 92# define _ASM_EXTABLE(from,to) \
68 " .pushsection \"__ex_table\",\"a\"\n" \ 93 " .pushsection \"__ex_table\",\"a\"\n" \
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 5e5cd123fdfb..e9168955c42f 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,7 @@
22 * 22 *
23 * Atomically reads the value of @v. 23 * Atomically reads the value of @v.
24 */ 24 */
25static inline int atomic_read(const atomic_t *v) 25static __always_inline int atomic_read(const atomic_t *v)
26{ 26{
27 return ACCESS_ONCE((v)->counter); 27 return ACCESS_ONCE((v)->counter);
28} 28}
@@ -34,7 +34,7 @@ static inline int atomic_read(const atomic_t *v)
34 * 34 *
35 * Atomically sets the value of @v to @i. 35 * Atomically sets the value of @v to @i.
36 */ 36 */
37static inline void atomic_set(atomic_t *v, int i) 37static __always_inline void atomic_set(atomic_t *v, int i)
38{ 38{
39 v->counter = i; 39 v->counter = i;
40} 40}
@@ -46,7 +46,7 @@ static inline void atomic_set(atomic_t *v, int i)
46 * 46 *
47 * Atomically adds @i to @v. 47 * Atomically adds @i to @v.
48 */ 48 */
49static inline void atomic_add(int i, atomic_t *v) 49static __always_inline void atomic_add(int i, atomic_t *v)
50{ 50{
51 asm volatile(LOCK_PREFIX "addl %1,%0" 51 asm volatile(LOCK_PREFIX "addl %1,%0"
52 : "+m" (v->counter) 52 : "+m" (v->counter)
@@ -60,7 +60,7 @@ static inline void atomic_add(int i, atomic_t *v)
60 * 60 *
61 * Atomically subtracts @i from @v. 61 * Atomically subtracts @i from @v.
62 */ 62 */
63static inline void atomic_sub(int i, atomic_t *v) 63static __always_inline void atomic_sub(int i, atomic_t *v)
64{ 64{
65 asm volatile(LOCK_PREFIX "subl %1,%0" 65 asm volatile(LOCK_PREFIX "subl %1,%0"
66 : "+m" (v->counter) 66 : "+m" (v->counter)
@@ -76,7 +76,7 @@ static inline void atomic_sub(int i, atomic_t *v)
76 * true if the result is zero, or false for all 76 * true if the result is zero, or false for all
77 * other cases. 77 * other cases.
78 */ 78 */
79static inline int atomic_sub_and_test(int i, atomic_t *v) 79static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
80{ 80{
81 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); 81 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
82} 82}
@@ -87,7 +87,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
87 * 87 *
88 * Atomically increments @v by 1. 88 * Atomically increments @v by 1.
89 */ 89 */
90static inline void atomic_inc(atomic_t *v) 90static __always_inline void atomic_inc(atomic_t *v)
91{ 91{
92 asm volatile(LOCK_PREFIX "incl %0" 92 asm volatile(LOCK_PREFIX "incl %0"
93 : "+m" (v->counter)); 93 : "+m" (v->counter));
@@ -99,7 +99,7 @@ static inline void atomic_inc(atomic_t *v)
99 * 99 *
100 * Atomically decrements @v by 1. 100 * Atomically decrements @v by 1.
101 */ 101 */
102static inline void atomic_dec(atomic_t *v) 102static __always_inline void atomic_dec(atomic_t *v)
103{ 103{
104 asm volatile(LOCK_PREFIX "decl %0" 104 asm volatile(LOCK_PREFIX "decl %0"
105 : "+m" (v->counter)); 105 : "+m" (v->counter));
@@ -113,7 +113,7 @@ static inline void atomic_dec(atomic_t *v)
113 * returns true if the result is 0, or false for all other 113 * returns true if the result is 0, or false for all other
114 * cases. 114 * cases.
115 */ 115 */
116static inline int atomic_dec_and_test(atomic_t *v) 116static __always_inline int atomic_dec_and_test(atomic_t *v)
117{ 117{
118 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); 118 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
119} 119}
@@ -126,7 +126,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
126 * and returns true if the result is zero, or false for all 126 * and returns true if the result is zero, or false for all
127 * other cases. 127 * other cases.
128 */ 128 */
129static inline int atomic_inc_and_test(atomic_t *v) 129static __always_inline int atomic_inc_and_test(atomic_t *v)
130{ 130{
131 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); 131 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
132} 132}
@@ -140,7 +140,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
140 * if the result is negative, or false when 140 * if the result is negative, or false when
141 * result is greater than or equal to zero. 141 * result is greater than or equal to zero.
142 */ 142 */
143static inline int atomic_add_negative(int i, atomic_t *v) 143static __always_inline int atomic_add_negative(int i, atomic_t *v)
144{ 144{
145 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); 145 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
146} 146}
@@ -152,7 +152,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
152 * 152 *
153 * Atomically adds @i to @v and returns @i + @v 153 * Atomically adds @i to @v and returns @i + @v
154 */ 154 */
155static inline int atomic_add_return(int i, atomic_t *v) 155static __always_inline int atomic_add_return(int i, atomic_t *v)
156{ 156{
157 return i + xadd(&v->counter, i); 157 return i + xadd(&v->counter, i);
158} 158}
@@ -164,7 +164,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
164 * 164 *
165 * Atomically subtracts @i from @v and returns @v - @i 165 * Atomically subtracts @i from @v and returns @v - @i
166 */ 166 */
167static inline int atomic_sub_return(int i, atomic_t *v) 167static __always_inline int atomic_sub_return(int i, atomic_t *v)
168{ 168{
169 return atomic_add_return(-i, v); 169 return atomic_add_return(-i, v);
170} 170}
@@ -172,7 +172,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
172#define atomic_inc_return(v) (atomic_add_return(1, v)) 172#define atomic_inc_return(v) (atomic_add_return(1, v))
173#define atomic_dec_return(v) (atomic_sub_return(1, v)) 173#define atomic_dec_return(v) (atomic_sub_return(1, v))
174 174
175static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 175static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
176{ 176{
177 return cmpxchg(&v->counter, old, new); 177 return cmpxchg(&v->counter, old, new);
178} 178}
@@ -191,7 +191,7 @@ static inline int atomic_xchg(atomic_t *v, int new)
191 * Atomically adds @a to @v, so long as @v was not already @u. 191 * Atomically adds @a to @v, so long as @v was not already @u.
192 * Returns the old value of @v. 192 * Returns the old value of @v.
193 */ 193 */
194static inline int __atomic_add_unless(atomic_t *v, int a, int u) 194static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
195{ 195{
196 int c, old; 196 int c, old;
197 c = atomic_read(v); 197 c = atomic_read(v);
@@ -213,7 +213,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
213 * Atomically adds 1 to @v 213 * Atomically adds 1 to @v
214 * Returns the new value of @u 214 * Returns the new value of @u
215 */ 215 */
216static inline short int atomic_inc_short(short int *v) 216static __always_inline short int atomic_inc_short(short int *v)
217{ 217{
218 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); 218 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
219 return *v; 219 return *v;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index f8d273e18516..b965f9e03f2a 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -40,7 +40,7 @@ static inline void atomic64_set(atomic64_t *v, long i)
40 * 40 *
41 * Atomically adds @i to @v. 41 * Atomically adds @i to @v.
42 */ 42 */
43static inline void atomic64_add(long i, atomic64_t *v) 43static __always_inline void atomic64_add(long i, atomic64_t *v)
44{ 44{
45 asm volatile(LOCK_PREFIX "addq %1,%0" 45 asm volatile(LOCK_PREFIX "addq %1,%0"
46 : "=m" (v->counter) 46 : "=m" (v->counter)
@@ -81,7 +81,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
81 * 81 *
82 * Atomically increments @v by 1. 82 * Atomically increments @v by 1.
83 */ 83 */
84static inline void atomic64_inc(atomic64_t *v) 84static __always_inline void atomic64_inc(atomic64_t *v)
85{ 85{
86 asm volatile(LOCK_PREFIX "incq %0" 86 asm volatile(LOCK_PREFIX "incq %0"
87 : "=m" (v->counter) 87 : "=m" (v->counter)
@@ -94,7 +94,7 @@ static inline void atomic64_inc(atomic64_t *v)
94 * 94 *
95 * Atomically decrements @v by 1. 95 * Atomically decrements @v by 1.
96 */ 96 */
97static inline void atomic64_dec(atomic64_t *v) 97static __always_inline void atomic64_dec(atomic64_t *v)
98{ 98{
99 asm volatile(LOCK_PREFIX "decq %0" 99 asm volatile(LOCK_PREFIX "decq %0"
100 : "=m" (v->counter) 100 : "=m" (v->counter)
@@ -148,7 +148,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
148 * 148 *
149 * Atomically adds @i to @v and returns @i + @v 149 * Atomically adds @i to @v and returns @i + @v
150 */ 150 */
151static inline long atomic64_add_return(long i, atomic64_t *v) 151static __always_inline long atomic64_add_return(long i, atomic64_t *v)
152{ 152{
153 return i + xadd(&v->counter, i); 153 return i + xadd(&v->counter, i);
154} 154}
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 47c8e32f621a..b6f7457d12e4 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -8,7 +8,7 @@
8/* 8/*
9 * The set_memory_* API can be used to change various attributes of a virtual 9 * The set_memory_* API can be used to change various attributes of a virtual
10 * address range. The attributes include: 10 * address range. The attributes include:
11 * Cachability : UnCached, WriteCombining, WriteBack 11 * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
12 * Executability : eXeutable, NoteXecutable 12 * Executability : eXeutable, NoteXecutable
13 * Read/Write : ReadOnly, ReadWrite 13 * Read/Write : ReadOnly, ReadWrite
14 * Presence : NotPresent 14 * Presence : NotPresent
@@ -35,9 +35,11 @@
35 35
36int _set_memory_uc(unsigned long addr, int numpages); 36int _set_memory_uc(unsigned long addr, int numpages);
37int _set_memory_wc(unsigned long addr, int numpages); 37int _set_memory_wc(unsigned long addr, int numpages);
38int _set_memory_wt(unsigned long addr, int numpages);
38int _set_memory_wb(unsigned long addr, int numpages); 39int _set_memory_wb(unsigned long addr, int numpages);
39int set_memory_uc(unsigned long addr, int numpages); 40int set_memory_uc(unsigned long addr, int numpages);
40int set_memory_wc(unsigned long addr, int numpages); 41int set_memory_wc(unsigned long addr, int numpages);
42int set_memory_wt(unsigned long addr, int numpages);
41int set_memory_wb(unsigned long addr, int numpages); 43int set_memory_wb(unsigned long addr, int numpages);
42int set_memory_x(unsigned long addr, int numpages); 44int set_memory_x(unsigned long addr, int numpages);
43int set_memory_nx(unsigned long addr, int numpages); 45int set_memory_nx(unsigned long addr, int numpages);
@@ -48,10 +50,12 @@ int set_memory_4k(unsigned long addr, int numpages);
48 50
49int set_memory_array_uc(unsigned long *addr, int addrinarray); 51int set_memory_array_uc(unsigned long *addr, int addrinarray);
50int set_memory_array_wc(unsigned long *addr, int addrinarray); 52int set_memory_array_wc(unsigned long *addr, int addrinarray);
53int set_memory_array_wt(unsigned long *addr, int addrinarray);
51int set_memory_array_wb(unsigned long *addr, int addrinarray); 54int set_memory_array_wb(unsigned long *addr, int addrinarray);
52 55
53int set_pages_array_uc(struct page **pages, int addrinarray); 56int set_pages_array_uc(struct page **pages, int addrinarray);
54int set_pages_array_wc(struct page **pages, int addrinarray); 57int set_pages_array_wc(struct page **pages, int addrinarray);
58int set_pages_array_wt(struct page **pages, int addrinarray);
55int set_pages_array_wb(struct page **pages, int addrinarray); 59int set_pages_array_wb(struct page **pages, int addrinarray);
56 60
57/* 61/*
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
deleted file mode 100644
index 1c8b50edb2db..000000000000
--- a/arch/x86/include/asm/calling.h
+++ /dev/null
@@ -1,247 +0,0 @@
1/*
2
3 x86 function call convention, 64-bit:
4 -------------------------------------
5 arguments | callee-saved | extra caller-saved | return
6 [callee-clobbered] | | [callee-clobbered] |
7 ---------------------------------------------------------------------------
8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
9
10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
11 functions when it sees tail-call optimization possibilities) rflags is
12 clobbered. Leftover arguments are passed over the stack frame.)
13
14 [*] In the frame-pointers case rbp is fixed to the stack frame.
15
16 [**] for struct return values wider than 64 bits the return convention is a
17 bit more complex: up to 128 bits width we return small structures
18 straight in rax, rdx. For structures larger than that (3 words or
19 larger) the caller puts a pointer to an on-stack return struct
20 [allocated in the caller's stack frame] into the first argument - i.e.
21 into rdi. All other arguments shift up by one in this case.
22 Fortunately this case is rare in the kernel.
23
24For 32-bit we have the following conventions - kernel is built with
25-mregparm=3 and -freg-struct-return:
26
27 x86 function calling convention, 32-bit:
28 ----------------------------------------
29 arguments | callee-saved | extra caller-saved | return
30 [callee-clobbered] | | [callee-clobbered] |
31 -------------------------------------------------------------------------
32 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
33
34 ( here too esp is obviously invariant across normal function calls. eflags
35 is clobbered. Leftover arguments are passed over the stack frame. )
36
37 [*] In the frame-pointers case ebp is fixed to the stack frame.
38
39 [**] We build with -freg-struct-return, which on 32-bit means similar
40 semantics as on 64-bit: edx can be used for a second return value
41 (i.e. covering integer and structure sizes up to 64 bits) - after that
42 it gets more complex and more expensive: 3-word or larger struct returns
43 get done in the caller's frame and the pointer to the return struct goes
44 into regparm0, i.e. eax - the other arguments shift up and the
45 function's register parameters degenerate to regparm=2 in essence.
46
47*/
48
49#include <asm/dwarf2.h>
50
51#ifdef CONFIG_X86_64
52
53/*
54 * 64-bit system call stack frame layout defines and helpers,
55 * for assembly code:
56 */
57
58/* The layout forms the "struct pt_regs" on the stack: */
59/*
60 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
61 * unless syscall needs a complete, fully filled "struct pt_regs".
62 */
63#define R15 0*8
64#define R14 1*8
65#define R13 2*8
66#define R12 3*8
67#define RBP 4*8
68#define RBX 5*8
69/* These regs are callee-clobbered. Always saved on kernel entry. */
70#define R11 6*8
71#define R10 7*8
72#define R9 8*8
73#define R8 9*8
74#define RAX 10*8
75#define RCX 11*8
76#define RDX 12*8
77#define RSI 13*8
78#define RDI 14*8
79/*
80 * On syscall entry, this is syscall#. On CPU exception, this is error code.
81 * On hw interrupt, it's IRQ number:
82 */
83#define ORIG_RAX 15*8
84/* Return frame for iretq */
85#define RIP 16*8
86#define CS 17*8
87#define EFLAGS 18*8
88#define RSP 19*8
89#define SS 20*8
90
91#define SIZEOF_PTREGS 21*8
92
93 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
94 subq $15*8+\addskip, %rsp
95 CFI_ADJUST_CFA_OFFSET 15*8+\addskip
96 .endm
97
98 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
99 .if \r11
100 movq_cfi r11, 6*8+\offset
101 .endif
102 .if \r8910
103 movq_cfi r10, 7*8+\offset
104 movq_cfi r9, 8*8+\offset
105 movq_cfi r8, 9*8+\offset
106 .endif
107 .if \rax
108 movq_cfi rax, 10*8+\offset
109 .endif
110 .if \rcx
111 movq_cfi rcx, 11*8+\offset
112 .endif
113 movq_cfi rdx, 12*8+\offset
114 movq_cfi rsi, 13*8+\offset
115 movq_cfi rdi, 14*8+\offset
116 .endm
117 .macro SAVE_C_REGS offset=0
118 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
119 .endm
120 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
121 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
122 .endm
123 .macro SAVE_C_REGS_EXCEPT_R891011
124 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
125 .endm
126 .macro SAVE_C_REGS_EXCEPT_RCX_R891011
127 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
128 .endm
129 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
130 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
131 .endm
132
133 .macro SAVE_EXTRA_REGS offset=0
134 movq_cfi r15, 0*8+\offset
135 movq_cfi r14, 1*8+\offset
136 movq_cfi r13, 2*8+\offset
137 movq_cfi r12, 3*8+\offset
138 movq_cfi rbp, 4*8+\offset
139 movq_cfi rbx, 5*8+\offset
140 .endm
141 .macro SAVE_EXTRA_REGS_RBP offset=0
142 movq_cfi rbp, 4*8+\offset
143 .endm
144
145 .macro RESTORE_EXTRA_REGS offset=0
146 movq_cfi_restore 0*8+\offset, r15
147 movq_cfi_restore 1*8+\offset, r14
148 movq_cfi_restore 2*8+\offset, r13
149 movq_cfi_restore 3*8+\offset, r12
150 movq_cfi_restore 4*8+\offset, rbp
151 movq_cfi_restore 5*8+\offset, rbx
152 .endm
153
154 .macro ZERO_EXTRA_REGS
155 xorl %r15d, %r15d
156 xorl %r14d, %r14d
157 xorl %r13d, %r13d
158 xorl %r12d, %r12d
159 xorl %ebp, %ebp
160 xorl %ebx, %ebx
161 .endm
162
163 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
164 .if \rstor_r11
165 movq_cfi_restore 6*8, r11
166 .endif
167 .if \rstor_r8910
168 movq_cfi_restore 7*8, r10
169 movq_cfi_restore 8*8, r9
170 movq_cfi_restore 9*8, r8
171 .endif
172 .if \rstor_rax
173 movq_cfi_restore 10*8, rax
174 .endif
175 .if \rstor_rcx
176 movq_cfi_restore 11*8, rcx
177 .endif
178 .if \rstor_rdx
179 movq_cfi_restore 12*8, rdx
180 .endif
181 movq_cfi_restore 13*8, rsi
182 movq_cfi_restore 14*8, rdi
183 .endm
184 .macro RESTORE_C_REGS
185 RESTORE_C_REGS_HELPER 1,1,1,1,1
186 .endm
187 .macro RESTORE_C_REGS_EXCEPT_RAX
188 RESTORE_C_REGS_HELPER 0,1,1,1,1
189 .endm
190 .macro RESTORE_C_REGS_EXCEPT_RCX
191 RESTORE_C_REGS_HELPER 1,0,1,1,1
192 .endm
193 .macro RESTORE_C_REGS_EXCEPT_R11
194 RESTORE_C_REGS_HELPER 1,1,0,1,1
195 .endm
196 .macro RESTORE_C_REGS_EXCEPT_RCX_R11
197 RESTORE_C_REGS_HELPER 1,0,0,1,1
198 .endm
199 .macro RESTORE_RSI_RDI
200 RESTORE_C_REGS_HELPER 0,0,0,0,0
201 .endm
202 .macro RESTORE_RSI_RDI_RDX
203 RESTORE_C_REGS_HELPER 0,0,0,0,1
204 .endm
205
206 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
207 addq $15*8+\addskip, %rsp
208 CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
209 .endm
210
211 .macro icebp
212 .byte 0xf1
213 .endm
214
215#else /* CONFIG_X86_64 */
216
217/*
218 * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
219 * are different from the entry_32.S versions in not changing the segment
220 * registers. So only suitable for in kernel use, not when transitioning
221 * from or to user space. The resulting stack frame is not a standard
222 * pt_regs frame. The main use case is calling C code from assembler
223 * when all the registers need to be preserved.
224 */
225
226 .macro SAVE_ALL
227 pushl_cfi_reg eax
228 pushl_cfi_reg ebp
229 pushl_cfi_reg edi
230 pushl_cfi_reg esi
231 pushl_cfi_reg edx
232 pushl_cfi_reg ecx
233 pushl_cfi_reg ebx
234 .endm
235
236 .macro RESTORE_ALL
237 popl_cfi_reg ebx
238 popl_cfi_reg ecx
239 popl_cfi_reg edx
240 popl_cfi_reg esi
241 popl_cfi_reg edi
242 popl_cfi_reg ebp
243 popl_cfi_reg eax
244 .endm
245
246#endif /* CONFIG_X86_64 */
247
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
deleted file mode 100644
index de1cdaf4d743..000000000000
--- a/arch/x86/include/asm/dwarf2.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef _ASM_X86_DWARF2_H
2#define _ASM_X86_DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 * Macros for dwarf2 CFI unwind table entries.
10 * See "as.info" for details on these pseudo ops. Unfortunately
11 * they are only supported in very new binutils, so define them
12 * away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30#define CFI_ESCAPE .cfi_escape
31
32#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
33#define CFI_SIGNAL_FRAME .cfi_signal_frame
34#else
35#define CFI_SIGNAL_FRAME
36#endif
37
38#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
39 /*
40 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
41 * The latter we currently just discard since we don't do DWARF
42 * unwinding at runtime. So only the offline DWARF information is
43 * useful to anyone. Note we should not use this directive if this
44 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
45 * changed so it doesn't discard .eh_frame.
46 */
47 .cfi_sections .debug_frame
48#endif
49
50#else
51
52/*
53 * Due to the structure of pre-exisiting code, don't use assembler line
54 * comment character # to ignore the arguments. Instead, use a dummy macro.
55 */
56.macro cfi_ignore a=0, b=0, c=0, d=0
57.endm
58
59#define CFI_STARTPROC cfi_ignore
60#define CFI_ENDPROC cfi_ignore
61#define CFI_DEF_CFA cfi_ignore
62#define CFI_DEF_CFA_REGISTER cfi_ignore
63#define CFI_DEF_CFA_OFFSET cfi_ignore
64#define CFI_ADJUST_CFA_OFFSET cfi_ignore
65#define CFI_OFFSET cfi_ignore
66#define CFI_REL_OFFSET cfi_ignore
67#define CFI_REGISTER cfi_ignore
68#define CFI_RESTORE cfi_ignore
69#define CFI_REMEMBER_STATE cfi_ignore
70#define CFI_RESTORE_STATE cfi_ignore
71#define CFI_UNDEFINED cfi_ignore
72#define CFI_ESCAPE cfi_ignore
73#define CFI_SIGNAL_FRAME cfi_ignore
74
75#endif
76
77/*
78 * An attempt to make CFI annotations more or less
79 * correct and shorter. It is implied that you know
80 * what you're doing if you use them.
81 */
82#ifdef __ASSEMBLY__
83#ifdef CONFIG_X86_64
84 .macro pushq_cfi reg
85 pushq \reg
86 CFI_ADJUST_CFA_OFFSET 8
87 .endm
88
89 .macro pushq_cfi_reg reg
90 pushq %\reg
91 CFI_ADJUST_CFA_OFFSET 8
92 CFI_REL_OFFSET \reg, 0
93 .endm
94
95 .macro popq_cfi reg
96 popq \reg
97 CFI_ADJUST_CFA_OFFSET -8
98 .endm
99
100 .macro popq_cfi_reg reg
101 popq %\reg
102 CFI_ADJUST_CFA_OFFSET -8
103 CFI_RESTORE \reg
104 .endm
105
106 .macro pushfq_cfi
107 pushfq
108 CFI_ADJUST_CFA_OFFSET 8
109 .endm
110
111 .macro popfq_cfi
112 popfq
113 CFI_ADJUST_CFA_OFFSET -8
114 .endm
115
116 .macro movq_cfi reg offset=0
117 movq %\reg, \offset(%rsp)
118 CFI_REL_OFFSET \reg, \offset
119 .endm
120
121 .macro movq_cfi_restore offset reg
122 movq \offset(%rsp), %\reg
123 CFI_RESTORE \reg
124 .endm
125#else /*!CONFIG_X86_64*/
126 .macro pushl_cfi reg
127 pushl \reg
128 CFI_ADJUST_CFA_OFFSET 4
129 .endm
130
131 .macro pushl_cfi_reg reg
132 pushl %\reg
133 CFI_ADJUST_CFA_OFFSET 4
134 CFI_REL_OFFSET \reg, 0
135 .endm
136
137 .macro popl_cfi reg
138 popl \reg
139 CFI_ADJUST_CFA_OFFSET -4
140 .endm
141
142 .macro popl_cfi_reg reg
143 popl %\reg
144 CFI_ADJUST_CFA_OFFSET -4
145 CFI_RESTORE \reg
146 .endm
147
148 .macro pushfl_cfi
149 pushfl
150 CFI_ADJUST_CFA_OFFSET 4
151 .endm
152
153 .macro popfl_cfi
154 popfl
155 CFI_ADJUST_CFA_OFFSET -4
156 .endm
157
158 .macro movl_cfi reg offset=0
159 movl %\reg, \offset(%esp)
160 CFI_REL_OFFSET \reg, \offset
161 .endm
162
163 .macro movl_cfi_restore offset reg
164 movl \offset(%esp), %\reg
165 CFI_RESTORE \reg
166 .endm
167#endif /*!CONFIG_X86_64*/
168#endif /*__ASSEMBLY__*/
169
170#endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index dc5fa661465f..df002992d8fd 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -23,6 +23,8 @@ BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
23#ifdef CONFIG_HAVE_KVM 23#ifdef CONFIG_HAVE_KVM
24BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR, 24BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
25 smp_kvm_posted_intr_ipi) 25 smp_kvm_posted_intr_ipi)
26BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
27 smp_kvm_posted_intr_wakeup_ipi)
26#endif 28#endif
27 29
28/* 30/*
@@ -50,4 +52,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
50BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) 52BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
51#endif 53#endif
52 54
55#ifdef CONFIG_X86_MCE_AMD
56BUILD_INTERRUPT(deferred_error_interrupt, DEFERRED_ERROR_VECTOR)
57#endif
53#endif 58#endif
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 3b629f47eb65..793179cf8e21 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,20 +1,17 @@
1#ifdef __ASSEMBLY__ 1#ifdef __ASSEMBLY__
2 2
3#include <asm/asm.h> 3#include <asm/asm.h>
4#include <asm/dwarf2.h>
5 4
6/* The annotation hides the frame from the unwinder and makes it look 5/* The annotation hides the frame from the unwinder and makes it look
7 like a ordinary ebp save/restore. This avoids some special cases for 6 like a ordinary ebp save/restore. This avoids some special cases for
8 frame pointer later */ 7 frame pointer later */
9#ifdef CONFIG_FRAME_POINTER 8#ifdef CONFIG_FRAME_POINTER
10 .macro FRAME 9 .macro FRAME
11 __ASM_SIZE(push,_cfi) %__ASM_REG(bp) 10 __ASM_SIZE(push,) %__ASM_REG(bp)
12 CFI_REL_OFFSET __ASM_REG(bp), 0
13 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) 11 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
14 .endm 12 .endm
15 .macro ENDFRAME 13 .macro ENDFRAME
16 __ASM_SIZE(pop,_cfi) %__ASM_REG(bp) 14 __ASM_SIZE(pop,) %__ASM_REG(bp)
17 CFI_RESTORE __ASM_REG(bp)
18 .endm 15 .endm
19#else 16#else
20 .macro FRAME 17 .macro FRAME
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f5fb6b6567e..7178043b0e1d 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -14,6 +14,7 @@ typedef struct {
14#endif 14#endif
15#ifdef CONFIG_HAVE_KVM 15#ifdef CONFIG_HAVE_KVM
16 unsigned int kvm_posted_intr_ipis; 16 unsigned int kvm_posted_intr_ipis;
17 unsigned int kvm_posted_intr_wakeup_ipis;
17#endif 18#endif
18 unsigned int x86_platform_ipis; /* arch dependent */ 19 unsigned int x86_platform_ipis; /* arch dependent */
19 unsigned int apic_perf_irqs; 20 unsigned int apic_perf_irqs;
@@ -33,6 +34,9 @@ typedef struct {
33#ifdef CONFIG_X86_MCE_THRESHOLD 34#ifdef CONFIG_X86_MCE_THRESHOLD
34 unsigned int irq_threshold_count; 35 unsigned int irq_threshold_count;
35#endif 36#endif
37#ifdef CONFIG_X86_MCE_AMD
38 unsigned int irq_deferred_error_count;
39#endif
36#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) 40#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
37 unsigned int irq_hv_callback_count; 41 unsigned int irq_hv_callback_count;
38#endif 42#endif
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 36f7125945e3..5fa9fb0f8809 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -74,20 +74,16 @@ extern unsigned int hpet_readl(unsigned int a);
74extern void force_hpet_resume(void); 74extern void force_hpet_resume(void);
75 75
76struct irq_data; 76struct irq_data;
77struct hpet_dev;
78struct irq_domain;
79
77extern void hpet_msi_unmask(struct irq_data *data); 80extern void hpet_msi_unmask(struct irq_data *data);
78extern void hpet_msi_mask(struct irq_data *data); 81extern void hpet_msi_mask(struct irq_data *data);
79struct hpet_dev;
80extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); 82extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
81extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); 83extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
82 84extern struct irq_domain *hpet_create_irq_domain(int hpet_id);
83#ifdef CONFIG_PCI_MSI 85extern int hpet_assign_irq(struct irq_domain *domain,
84extern int default_setup_hpet_msi(unsigned int irq, unsigned int id); 86 struct hpet_dev *dev, int dev_num);
85#else
86static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
87{
88 return -EINVAL;
89}
90#endif
91 87
92#ifdef CONFIG_HPET_EMULATE_RTC 88#ifdef CONFIG_HPET_EMULATE_RTC
93 89
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index e9571ddabc4f..6615032e19c8 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -29,6 +29,7 @@
29extern asmlinkage void apic_timer_interrupt(void); 29extern asmlinkage void apic_timer_interrupt(void);
30extern asmlinkage void x86_platform_ipi(void); 30extern asmlinkage void x86_platform_ipi(void);
31extern asmlinkage void kvm_posted_intr_ipi(void); 31extern asmlinkage void kvm_posted_intr_ipi(void);
32extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
32extern asmlinkage void error_interrupt(void); 33extern asmlinkage void error_interrupt(void);
33extern asmlinkage void irq_work_interrupt(void); 34extern asmlinkage void irq_work_interrupt(void);
34 35
@@ -36,43 +37,10 @@ extern asmlinkage void spurious_interrupt(void);
36extern asmlinkage void thermal_interrupt(void); 37extern asmlinkage void thermal_interrupt(void);
37extern asmlinkage void reschedule_interrupt(void); 38extern asmlinkage void reschedule_interrupt(void);
38 39
39extern asmlinkage void invalidate_interrupt(void);
40extern asmlinkage void invalidate_interrupt0(void);
41extern asmlinkage void invalidate_interrupt1(void);
42extern asmlinkage void invalidate_interrupt2(void);
43extern asmlinkage void invalidate_interrupt3(void);
44extern asmlinkage void invalidate_interrupt4(void);
45extern asmlinkage void invalidate_interrupt5(void);
46extern asmlinkage void invalidate_interrupt6(void);
47extern asmlinkage void invalidate_interrupt7(void);
48extern asmlinkage void invalidate_interrupt8(void);
49extern asmlinkage void invalidate_interrupt9(void);
50extern asmlinkage void invalidate_interrupt10(void);
51extern asmlinkage void invalidate_interrupt11(void);
52extern asmlinkage void invalidate_interrupt12(void);
53extern asmlinkage void invalidate_interrupt13(void);
54extern asmlinkage void invalidate_interrupt14(void);
55extern asmlinkage void invalidate_interrupt15(void);
56extern asmlinkage void invalidate_interrupt16(void);
57extern asmlinkage void invalidate_interrupt17(void);
58extern asmlinkage void invalidate_interrupt18(void);
59extern asmlinkage void invalidate_interrupt19(void);
60extern asmlinkage void invalidate_interrupt20(void);
61extern asmlinkage void invalidate_interrupt21(void);
62extern asmlinkage void invalidate_interrupt22(void);
63extern asmlinkage void invalidate_interrupt23(void);
64extern asmlinkage void invalidate_interrupt24(void);
65extern asmlinkage void invalidate_interrupt25(void);
66extern asmlinkage void invalidate_interrupt26(void);
67extern asmlinkage void invalidate_interrupt27(void);
68extern asmlinkage void invalidate_interrupt28(void);
69extern asmlinkage void invalidate_interrupt29(void);
70extern asmlinkage void invalidate_interrupt30(void);
71extern asmlinkage void invalidate_interrupt31(void);
72
73extern asmlinkage void irq_move_cleanup_interrupt(void); 40extern asmlinkage void irq_move_cleanup_interrupt(void);
74extern asmlinkage void reboot_interrupt(void); 41extern asmlinkage void reboot_interrupt(void);
75extern asmlinkage void threshold_interrupt(void); 42extern asmlinkage void threshold_interrupt(void);
43extern asmlinkage void deferred_error_interrupt(void);
76 44
77extern asmlinkage void call_function_interrupt(void); 45extern asmlinkage void call_function_interrupt(void);
78extern asmlinkage void call_function_single_interrupt(void); 46extern asmlinkage void call_function_single_interrupt(void);
@@ -87,60 +55,93 @@ extern void trace_spurious_interrupt(void);
87extern void trace_thermal_interrupt(void); 55extern void trace_thermal_interrupt(void);
88extern void trace_reschedule_interrupt(void); 56extern void trace_reschedule_interrupt(void);
89extern void trace_threshold_interrupt(void); 57extern void trace_threshold_interrupt(void);
58extern void trace_deferred_error_interrupt(void);
90extern void trace_call_function_interrupt(void); 59extern void trace_call_function_interrupt(void);
91extern void trace_call_function_single_interrupt(void); 60extern void trace_call_function_single_interrupt(void);
92#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt 61#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
93#define trace_reboot_interrupt reboot_interrupt 62#define trace_reboot_interrupt reboot_interrupt
94#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi 63#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
64#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi
95#endif /* CONFIG_TRACING */ 65#endif /* CONFIG_TRACING */
96 66
97#ifdef CONFIG_IRQ_REMAP
98/* Intel specific interrupt remapping information */
99struct irq_2_iommu {
100 struct intel_iommu *iommu;
101 u16 irte_index;
102 u16 sub_handle;
103 u8 irte_mask;
104};
105
106/* AMD specific interrupt remapping information */
107struct irq_2_irte {
108 u16 devid; /* Device ID for IRTE table */
109 u16 index; /* Index into IRTE table*/
110};
111#endif /* CONFIG_IRQ_REMAP */
112
113#ifdef CONFIG_X86_LOCAL_APIC 67#ifdef CONFIG_X86_LOCAL_APIC
114struct irq_data; 68struct irq_data;
69struct pci_dev;
70struct msi_desc;
71
72enum irq_alloc_type {
73 X86_IRQ_ALLOC_TYPE_IOAPIC = 1,
74 X86_IRQ_ALLOC_TYPE_HPET,
75 X86_IRQ_ALLOC_TYPE_MSI,
76 X86_IRQ_ALLOC_TYPE_MSIX,
77 X86_IRQ_ALLOC_TYPE_DMAR,
78 X86_IRQ_ALLOC_TYPE_UV,
79};
115 80
116struct irq_cfg { 81struct irq_alloc_info {
117 cpumask_var_t domain; 82 enum irq_alloc_type type;
118 cpumask_var_t old_domain; 83 u32 flags;
119 u8 vector; 84 const struct cpumask *mask; /* CPU mask for vector allocation */
120 u8 move_in_progress : 1;
121#ifdef CONFIG_IRQ_REMAP
122 u8 remapped : 1;
123 union { 85 union {
124 struct irq_2_iommu irq_2_iommu; 86 int unused;
125 struct irq_2_irte irq_2_irte; 87#ifdef CONFIG_HPET_TIMER
126 }; 88 struct {
89 int hpet_id;
90 int hpet_index;
91 void *hpet_data;
92 };
127#endif 93#endif
128 union { 94#ifdef CONFIG_PCI_MSI
129#ifdef CONFIG_X86_IO_APIC
130 struct { 95 struct {
131 struct list_head irq_2_pin; 96 struct pci_dev *msi_dev;
97 irq_hw_number_t msi_hwirq;
98 };
99#endif
100#ifdef CONFIG_X86_IO_APIC
101 struct {
102 int ioapic_id;
103 int ioapic_pin;
104 int ioapic_node;
105 u32 ioapic_trigger : 1;
106 u32 ioapic_polarity : 1;
107 u32 ioapic_valid : 1;
108 struct IO_APIC_route_entry *ioapic_entry;
109 };
110#endif
111#ifdef CONFIG_DMAR_TABLE
112 struct {
113 int dmar_id;
114 void *dmar_data;
115 };
116#endif
117#ifdef CONFIG_HT_IRQ
118 struct {
119 int ht_pos;
120 int ht_idx;
121 struct pci_dev *ht_dev;
122 void *ht_update;
123 };
124#endif
125#ifdef CONFIG_X86_UV
126 struct {
127 int uv_limit;
128 int uv_blade;
129 unsigned long uv_offset;
130 char *uv_name;
132 }; 131 };
133#endif 132#endif
134 }; 133 };
135}; 134};
136 135
136struct irq_cfg {
137 unsigned int dest_apicid;
138 u8 vector;
139};
140
137extern struct irq_cfg *irq_cfg(unsigned int irq); 141extern struct irq_cfg *irq_cfg(unsigned int irq);
138extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data); 142extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
139extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
140extern void lock_vector_lock(void); 143extern void lock_vector_lock(void);
141extern void unlock_vector_lock(void); 144extern void unlock_vector_lock(void);
142extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
143extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
144extern void setup_vector_irq(int cpu); 145extern void setup_vector_irq(int cpu);
145#ifdef CONFIG_SMP 146#ifdef CONFIG_SMP
146extern void send_cleanup_vector(struct irq_cfg *); 147extern void send_cleanup_vector(struct irq_cfg *);
@@ -150,10 +151,7 @@ static inline void send_cleanup_vector(struct irq_cfg *c) { }
150static inline void irq_complete_move(struct irq_cfg *c) { } 151static inline void irq_complete_move(struct irq_cfg *c) { }
151#endif 152#endif
152 153
153extern int apic_retrigger_irq(struct irq_data *data);
154extern void apic_ack_edge(struct irq_data *data); 154extern void apic_ack_edge(struct irq_data *data);
155extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
156 unsigned int *dest_id);
157#else /* CONFIG_X86_LOCAL_APIC */ 155#else /* CONFIG_X86_LOCAL_APIC */
158static inline void lock_vector_lock(void) {} 156static inline void lock_vector_lock(void) {}
159static inline void unlock_vector_lock(void) {} 157static inline void unlock_vector_lock(void) {}
@@ -163,8 +161,7 @@ static inline void unlock_vector_lock(void) {}
163extern atomic_t irq_err_count; 161extern atomic_t irq_err_count;
164extern atomic_t irq_mis_count; 162extern atomic_t irq_mis_count;
165 163
166/* EISA */ 164extern void elcr_set_level_irq(unsigned int irq);
167extern void eisa_set_level_irq(unsigned int irq);
168 165
169/* SMP */ 166/* SMP */
170extern __visible void smp_apic_timer_interrupt(struct pt_regs *); 167extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
@@ -178,7 +175,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
178extern __visible void smp_reschedule_interrupt(struct pt_regs *); 175extern __visible void smp_reschedule_interrupt(struct pt_regs *);
179extern __visible void smp_call_function_interrupt(struct pt_regs *); 176extern __visible void smp_call_function_interrupt(struct pt_regs *);
180extern __visible void smp_call_function_single_interrupt(struct pt_regs *); 177extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
181extern __visible void smp_invalidate_interrupt(struct pt_regs *);
182#endif 178#endif
183 179
184extern char irq_entries_start[]; 180extern char irq_entries_start[];
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 34a5b93704d3..83ec9b1d77cc 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -35,11 +35,13 @@
35 */ 35 */
36 36
37#define ARCH_HAS_IOREMAP_WC 37#define ARCH_HAS_IOREMAP_WC
38#define ARCH_HAS_IOREMAP_WT
38 39
39#include <linux/string.h> 40#include <linux/string.h>
40#include <linux/compiler.h> 41#include <linux/compiler.h>
41#include <asm/page.h> 42#include <asm/page.h>
42#include <asm/early_ioremap.h> 43#include <asm/early_ioremap.h>
44#include <asm/pgtable_types.h>
43 45
44#define build_mmio_read(name, size, type, reg, barrier) \ 46#define build_mmio_read(name, size, type, reg, barrier) \
45static inline type name(const volatile void __iomem *addr) \ 47static inline type name(const volatile void __iomem *addr) \
@@ -177,6 +179,7 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
177 * look at pci_iomap(). 179 * look at pci_iomap().
178 */ 180 */
179extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 181extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
182extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
180extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 183extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
181extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 184extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
182 unsigned long prot_val); 185 unsigned long prot_val);
@@ -197,8 +200,6 @@ extern void set_iounmap_nonlazy(void);
197 200
198#include <asm-generic/iomap.h> 201#include <asm-generic/iomap.h>
199 202
200#include <linux/vmalloc.h>
201
202/* 203/*
203 * Convert a virtual cached pointer to an uncached pointer 204 * Convert a virtual cached pointer to an uncached pointer
204 */ 205 */
@@ -320,6 +321,7 @@ extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
320extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 321extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
321 enum page_cache_mode pcm); 322 enum page_cache_mode pcm);
322extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 323extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
324extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
323 325
324extern bool is_early_ioremap_ptep(pte_t *ptep); 326extern bool is_early_ioremap_ptep(pte_t *ptep);
325 327
@@ -338,6 +340,9 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
338#define IO_SPACE_LIMIT 0xffff 340#define IO_SPACE_LIMIT 0xffff
339 341
340#ifdef CONFIG_MTRR 342#ifdef CONFIG_MTRR
343extern int __must_check arch_phys_wc_index(int handle);
344#define arch_phys_wc_index arch_phys_wc_index
345
341extern int __must_check arch_phys_wc_add(unsigned long base, 346extern int __must_check arch_phys_wc_add(unsigned long base,
342 unsigned long size); 347 unsigned long size);
343extern void arch_phys_wc_del(int handle); 348extern void arch_phys_wc_del(int handle);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 2f91685fe1cd..6cbf2cfb3f8a 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -95,9 +95,22 @@ struct IR_IO_APIC_route_entry {
95 index : 15; 95 index : 15;
96} __attribute__ ((packed)); 96} __attribute__ ((packed));
97 97
98#define IOAPIC_AUTO -1 98struct irq_alloc_info;
99#define IOAPIC_EDGE 0 99struct ioapic_domain_cfg;
100#define IOAPIC_LEVEL 1 100
101#define IOAPIC_AUTO -1
102#define IOAPIC_EDGE 0
103#define IOAPIC_LEVEL 1
104
105#define IOAPIC_MASKED 1
106#define IOAPIC_UNMASKED 0
107
108#define IOAPIC_POL_HIGH 0
109#define IOAPIC_POL_LOW 1
110
111#define IOAPIC_DEST_MODE_PHYSICAL 0
112#define IOAPIC_DEST_MODE_LOGICAL 1
113
101#define IOAPIC_MAP_ALLOC 0x1 114#define IOAPIC_MAP_ALLOC 0x1
102#define IOAPIC_MAP_CHECK 0x2 115#define IOAPIC_MAP_CHECK 0x2
103 116
@@ -110,9 +123,6 @@ extern int nr_ioapics;
110 123
111extern int mpc_ioapic_id(int ioapic); 124extern int mpc_ioapic_id(int ioapic);
112extern unsigned int mpc_ioapic_addr(int ioapic); 125extern unsigned int mpc_ioapic_addr(int ioapic);
113extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
114
115#define MP_MAX_IOAPIC_PIN 127
116 126
117/* # of MP IRQ source entries */ 127/* # of MP IRQ source entries */
118extern int mp_irq_entries; 128extern int mp_irq_entries;
@@ -120,9 +130,6 @@ extern int mp_irq_entries;
120/* MP IRQ source entries */ 130/* MP IRQ source entries */
121extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 131extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
122 132
123/* Older SiS APIC requires we rewrite the index register */
124extern int sis_apic_bug;
125
126/* 1 if "noapic" boot option passed */ 133/* 1 if "noapic" boot option passed */
127extern int skip_ioapic_setup; 134extern int skip_ioapic_setup;
128 135
@@ -132,6 +139,8 @@ extern int noioapicquirk;
132/* -1 if "noapic" boot option passed */ 139/* -1 if "noapic" boot option passed */
133extern int noioapicreroute; 140extern int noioapicreroute;
134 141
142extern u32 gsi_top;
143
135extern unsigned long io_apic_irqs; 144extern unsigned long io_apic_irqs;
136 145
137#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs)) 146#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
@@ -147,13 +156,6 @@ struct irq_cfg;
147extern void ioapic_insert_resources(void); 156extern void ioapic_insert_resources(void);
148extern int arch_early_ioapic_init(void); 157extern int arch_early_ioapic_init(void);
149 158
150extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
151 unsigned int, int,
152 struct io_apic_irq_attr *);
153extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
154
155extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
156
157extern int save_ioapic_entries(void); 159extern int save_ioapic_entries(void);
158extern void mask_ioapic_entries(void); 160extern void mask_ioapic_entries(void);
159extern int restore_ioapic_entries(void); 161extern int restore_ioapic_entries(void);
@@ -161,82 +163,32 @@ extern int restore_ioapic_entries(void);
161extern void setup_ioapic_ids_from_mpc(void); 163extern void setup_ioapic_ids_from_mpc(void);
162extern void setup_ioapic_ids_from_mpc_nocheck(void); 164extern void setup_ioapic_ids_from_mpc_nocheck(void);
163 165
164struct io_apic_irq_attr {
165 int ioapic;
166 int ioapic_pin;
167 int trigger;
168 int polarity;
169};
170
171enum ioapic_domain_type {
172 IOAPIC_DOMAIN_INVALID,
173 IOAPIC_DOMAIN_LEGACY,
174 IOAPIC_DOMAIN_STRICT,
175 IOAPIC_DOMAIN_DYNAMIC,
176};
177
178struct device_node;
179struct irq_domain;
180struct irq_domain_ops;
181
182struct ioapic_domain_cfg {
183 enum ioapic_domain_type type;
184 const struct irq_domain_ops *ops;
185 struct device_node *dev;
186};
187
188struct mp_ioapic_gsi{
189 u32 gsi_base;
190 u32 gsi_end;
191};
192extern u32 gsi_top;
193
194extern int mp_find_ioapic(u32 gsi); 166extern int mp_find_ioapic(u32 gsi);
195extern int mp_find_ioapic_pin(int ioapic, u32 gsi); 167extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
196extern u32 mp_pin_to_gsi(int ioapic, int pin); 168extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags,
197extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags); 169 struct irq_alloc_info *info);
198extern void mp_unmap_irq(int irq); 170extern void mp_unmap_irq(int irq);
199extern int mp_register_ioapic(int id, u32 address, u32 gsi_base, 171extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
200 struct ioapic_domain_cfg *cfg); 172 struct ioapic_domain_cfg *cfg);
201extern int mp_unregister_ioapic(u32 gsi_base); 173extern int mp_unregister_ioapic(u32 gsi_base);
202extern int mp_ioapic_registered(u32 gsi_base); 174extern int mp_ioapic_registered(u32 gsi_base);
203extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, 175
204 irq_hw_number_t hwirq); 176extern void ioapic_set_alloc_attr(struct irq_alloc_info *info,
205extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq); 177 int node, int trigger, int polarity);
206extern int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node);
207extern void __init pre_init_apic_IRQ0(void);
208 178
209extern void mp_save_irq(struct mpc_intsrc *m); 179extern void mp_save_irq(struct mpc_intsrc *m);
210 180
211extern void disable_ioapic_support(void); 181extern void disable_ioapic_support(void);
212 182
213extern void __init native_io_apic_init_mappings(void); 183extern void __init io_apic_init_mappings(void);
214extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); 184extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
215extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
216extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
217extern void native_disable_io_apic(void); 185extern void native_disable_io_apic(void);
218extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
219extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
220extern int native_ioapic_set_affinity(struct irq_data *,
221 const struct cpumask *,
222 bool);
223 186
224static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 187static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
225{ 188{
226 return x86_io_apic_ops.read(apic, reg); 189 return x86_io_apic_ops.read(apic, reg);
227} 190}
228 191
229static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
230{
231 x86_io_apic_ops.write(apic, reg, value);
232}
233static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
234{
235 x86_io_apic_ops.modify(apic, reg, value);
236}
237
238extern void io_apic_eoi(unsigned int apic, unsigned int vector);
239
240extern void setup_IO_APIC(void); 192extern void setup_IO_APIC(void);
241extern void enable_IO_APIC(void); 193extern void enable_IO_APIC(void);
242extern void disable_IO_APIC(void); 194extern void disable_IO_APIC(void);
@@ -253,8 +205,12 @@ static inline int arch_early_ioapic_init(void) { return 0; }
253static inline void print_IO_APICs(void) {} 205static inline void print_IO_APICs(void) {}
254#define gsi_top (NR_IRQS_LEGACY) 206#define gsi_top (NR_IRQS_LEGACY)
255static inline int mp_find_ioapic(u32 gsi) { return 0; } 207static inline int mp_find_ioapic(u32 gsi) { return 0; }
256static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; } 208static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags,
257static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; } 209 struct irq_alloc_info *info)
210{
211 return gsi;
212}
213
258static inline void mp_unmap_irq(int irq) { } 214static inline void mp_unmap_irq(int irq) { }
259 215
260static inline int save_ioapic_entries(void) 216static inline int save_ioapic_entries(void)
@@ -268,17 +224,11 @@ static inline int restore_ioapic_entries(void)
268 return -ENOMEM; 224 return -ENOMEM;
269} 225}
270 226
271static inline void mp_save_irq(struct mpc_intsrc *m) { }; 227static inline void mp_save_irq(struct mpc_intsrc *m) { }
272static inline void disable_ioapic_support(void) { } 228static inline void disable_ioapic_support(void) { }
273#define native_io_apic_init_mappings NULL 229static inline void io_apic_init_mappings(void) { }
274#define native_io_apic_read NULL 230#define native_io_apic_read NULL
275#define native_io_apic_write NULL
276#define native_io_apic_modify NULL
277#define native_disable_io_apic NULL 231#define native_disable_io_apic NULL
278#define native_io_apic_print_entries NULL
279#define native_ioapic_set_affinity NULL
280#define native_setup_ioapic_entry NULL
281#define native_eoi_ioapic_pin NULL
282 232
283static inline void setup_IO_APIC(void) { } 233static inline void setup_IO_APIC(void) { }
284static inline void enable_IO_APIC(void) { } 234static inline void enable_IO_APIC(void) { }
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index a80cbb88ea91..8008d06581c7 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -30,6 +30,10 @@ extern void fixup_irqs(void);
30extern void irq_force_complete_move(int); 30extern void irq_force_complete_move(int);
31#endif 31#endif
32 32
33#ifdef CONFIG_HAVE_KVM
34extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
35#endif
36
33extern void (*x86_platform_ipi_callback)(void); 37extern void (*x86_platform_ipi_callback)(void);
34extern void native_init_IRQ(void); 38extern void native_init_IRQ(void);
35extern bool handle_irq(unsigned irq, struct pt_regs *regs); 39extern bool handle_irq(unsigned irq, struct pt_regs *regs);
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 6224d316c405..046c7fb1ca43 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -22,84 +22,72 @@
22#ifndef __X86_IRQ_REMAPPING_H 22#ifndef __X86_IRQ_REMAPPING_H
23#define __X86_IRQ_REMAPPING_H 23#define __X86_IRQ_REMAPPING_H
24 24
25#include <asm/irqdomain.h>
26#include <asm/hw_irq.h>
25#include <asm/io_apic.h> 27#include <asm/io_apic.h>
26 28
27struct IO_APIC_route_entry;
28struct io_apic_irq_attr;
29struct irq_chip;
30struct msi_msg; 29struct msi_msg;
31struct pci_dev; 30struct irq_alloc_info;
32struct irq_cfg; 31
32enum irq_remap_cap {
33 IRQ_POSTING_CAP = 0,
34};
33 35
34#ifdef CONFIG_IRQ_REMAP 36#ifdef CONFIG_IRQ_REMAP
35 37
38extern bool irq_remapping_cap(enum irq_remap_cap cap);
36extern void set_irq_remapping_broken(void); 39extern void set_irq_remapping_broken(void);
37extern int irq_remapping_prepare(void); 40extern int irq_remapping_prepare(void);
38extern int irq_remapping_enable(void); 41extern int irq_remapping_enable(void);
39extern void irq_remapping_disable(void); 42extern void irq_remapping_disable(void);
40extern int irq_remapping_reenable(int); 43extern int irq_remapping_reenable(int);
41extern int irq_remap_enable_fault_handling(void); 44extern int irq_remap_enable_fault_handling(void);
42extern int setup_ioapic_remapped_entry(int irq,
43 struct IO_APIC_route_entry *entry,
44 unsigned int destination,
45 int vector,
46 struct io_apic_irq_attr *attr);
47extern void free_remapped_irq(int irq);
48extern void compose_remapped_msi_msg(struct pci_dev *pdev,
49 unsigned int irq, unsigned int dest,
50 struct msi_msg *msg, u8 hpet_id);
51extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
52extern void panic_if_irq_remap(const char *msg); 45extern void panic_if_irq_remap(const char *msg);
53extern bool setup_remapped_irq(int irq,
54 struct irq_cfg *cfg,
55 struct irq_chip *chip);
56 46
57void irq_remap_modify_chip_defaults(struct irq_chip *chip); 47extern struct irq_domain *
48irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info);
49extern struct irq_domain *
50irq_remapping_get_irq_domain(struct irq_alloc_info *info);
51
52/* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */
53extern struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent);
54
55/* Get parent irqdomain for interrupt remapping irqdomain */
56static inline struct irq_domain *arch_get_ir_parent_domain(void)
57{
58 return x86_vector_domain;
59}
60
61struct vcpu_data {
62 u64 pi_desc_addr; /* Physical address of PI Descriptor */
63 u32 vector; /* Guest vector of the interrupt */
64};
58 65
59#else /* CONFIG_IRQ_REMAP */ 66#else /* CONFIG_IRQ_REMAP */
60 67
68static inline bool irq_remapping_cap(enum irq_remap_cap cap) { return 0; }
61static inline void set_irq_remapping_broken(void) { } 69static inline void set_irq_remapping_broken(void) { }
62static inline int irq_remapping_prepare(void) { return -ENODEV; } 70static inline int irq_remapping_prepare(void) { return -ENODEV; }
63static inline int irq_remapping_enable(void) { return -ENODEV; } 71static inline int irq_remapping_enable(void) { return -ENODEV; }
64static inline void irq_remapping_disable(void) { } 72static inline void irq_remapping_disable(void) { }
65static inline int irq_remapping_reenable(int eim) { return -ENODEV; } 73static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
66static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; } 74static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
67static inline int setup_ioapic_remapped_entry(int irq,
68 struct IO_APIC_route_entry *entry,
69 unsigned int destination,
70 int vector,
71 struct io_apic_irq_attr *attr)
72{
73 return -ENODEV;
74}
75static inline void free_remapped_irq(int irq) { }
76static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
77 unsigned int irq, unsigned int dest,
78 struct msi_msg *msg, u8 hpet_id)
79{
80}
81static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
82{
83 return -ENODEV;
84}
85 75
86static inline void panic_if_irq_remap(const char *msg) 76static inline void panic_if_irq_remap(const char *msg)
87{ 77{
88} 78}
89 79
90static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) 80static inline struct irq_domain *
81irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info)
91{ 82{
83 return NULL;
92} 84}
93 85
94static inline bool setup_remapped_irq(int irq, 86static inline struct irq_domain *
95 struct irq_cfg *cfg, 87irq_remapping_get_irq_domain(struct irq_alloc_info *info)
96 struct irq_chip *chip)
97{ 88{
98 return false; 89 return NULL;
99} 90}
100#endif /* CONFIG_IRQ_REMAP */
101
102#define dmar_alloc_hwirq() irq_alloc_hwirq(-1)
103#define dmar_free_hwirq irq_free_hwirq
104 91
92#endif /* CONFIG_IRQ_REMAP */
105#endif /* __X86_IRQ_REMAPPING_H */ 93#endif /* __X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 666c89ec4bd7..4c2d2eb2060a 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -47,31 +47,12 @@
47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
48 48
49#define IA32_SYSCALL_VECTOR 0x80 49#define IA32_SYSCALL_VECTOR 0x80
50#ifdef CONFIG_X86_32
51# define SYSCALL_VECTOR 0x80
52#endif
53 50
54/* 51/*
55 * Vectors 0x30-0x3f are used for ISA interrupts. 52 * Vectors 0x30-0x3f are used for ISA interrupts.
56 * round up to the next 16-vector boundary 53 * round up to the next 16-vector boundary
57 */ 54 */
58#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15) 55#define ISA_IRQ_VECTOR(irq) (((FIRST_EXTERNAL_VECTOR + 16) & ~15) + irq)
59
60#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
61#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
62#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
63#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
64#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
65#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
66#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
67#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
68#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
69#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
70#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
71#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
72#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
73#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
74#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
75 56
76/* 57/*
77 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 58 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -102,21 +83,23 @@
102 */ 83 */
103#define X86_PLATFORM_IPI_VECTOR 0xf7 84#define X86_PLATFORM_IPI_VECTOR 0xf7
104 85
105/* Vector for KVM to deliver posted interrupt IPI */ 86#define POSTED_INTR_WAKEUP_VECTOR 0xf1
106#ifdef CONFIG_HAVE_KVM
107#define POSTED_INTR_VECTOR 0xf2
108#endif
109
110/* 87/*
111 * IRQ work vector: 88 * IRQ work vector:
112 */ 89 */
113#define IRQ_WORK_VECTOR 0xf6 90#define IRQ_WORK_VECTOR 0xf6
114 91
115#define UV_BAU_MESSAGE 0xf5 92#define UV_BAU_MESSAGE 0xf5
93#define DEFERRED_ERROR_VECTOR 0xf4
116 94
117/* Vector on which hypervisor callbacks will be delivered */ 95/* Vector on which hypervisor callbacks will be delivered */
118#define HYPERVISOR_CALLBACK_VECTOR 0xf3 96#define HYPERVISOR_CALLBACK_VECTOR 0xf3
119 97
98/* Vector for KVM to deliver posted interrupt IPI */
99#ifdef CONFIG_HAVE_KVM
100#define POSTED_INTR_VECTOR 0xf2
101#endif
102
120/* 103/*
121 * Local APIC timer IRQ vector is on a different priority level, 104 * Local APIC timer IRQ vector is on a different priority level,
122 * to work around the 'lost local interrupt if more than 2 IRQ 105 * to work around the 'lost local interrupt if more than 2 IRQ
@@ -155,18 +138,22 @@ static inline int invalid_vm86_irq(int irq)
155 * static arrays. 138 * static arrays.
156 */ 139 */
157 140
158#define NR_IRQS_LEGACY 16 141#define NR_IRQS_LEGACY 16
159 142
160#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 143#define CPU_VECTOR_LIMIT (64 * NR_CPUS)
144#define IO_APIC_VECTOR_LIMIT (32 * MAX_IO_APICS)
161 145
162#ifdef CONFIG_X86_IO_APIC 146#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_PCI_MSI)
163# define CPU_VECTOR_LIMIT (64 * NR_CPUS) 147#define NR_IRQS \
164# define NR_IRQS \
165 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 148 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
166 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 149 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
167 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 150 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
168#else /* !CONFIG_X86_IO_APIC: */ 151#elif defined(CONFIG_X86_IO_APIC)
169# define NR_IRQS NR_IRQS_LEGACY 152#define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
153#elif defined(CONFIG_PCI_MSI)
154#define NR_IRQS (NR_VECTORS + CPU_VECTOR_LIMIT)
155#else
156#define NR_IRQS NR_IRQS_LEGACY
170#endif 157#endif
171 158
172#endif /* _ASM_X86_IRQ_VECTORS_H */ 159#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
new file mode 100644
index 000000000000..d26075b52885
--- /dev/null
+++ b/arch/x86/include/asm/irqdomain.h
@@ -0,0 +1,63 @@
1#ifndef _ASM_IRQDOMAIN_H
2#define _ASM_IRQDOMAIN_H
3
4#include <linux/irqdomain.h>
5#include <asm/hw_irq.h>
6
7#ifdef CONFIG_X86_LOCAL_APIC
8enum {
9 /* Allocate contiguous CPU vectors */
10 X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 0x1,
11};
12
13extern struct irq_domain *x86_vector_domain;
14
15extern void init_irq_alloc_info(struct irq_alloc_info *info,
16 const struct cpumask *mask);
17extern void copy_irq_alloc_info(struct irq_alloc_info *dst,
18 struct irq_alloc_info *src);
19#endif /* CONFIG_X86_LOCAL_APIC */
20
21#ifdef CONFIG_X86_IO_APIC
22struct device_node;
23struct irq_data;
24
25enum ioapic_domain_type {
26 IOAPIC_DOMAIN_INVALID,
27 IOAPIC_DOMAIN_LEGACY,
28 IOAPIC_DOMAIN_STRICT,
29 IOAPIC_DOMAIN_DYNAMIC,
30};
31
32struct ioapic_domain_cfg {
33 enum ioapic_domain_type type;
34 const struct irq_domain_ops *ops;
35 struct device_node *dev;
36};
37
38extern const struct irq_domain_ops mp_ioapic_irqdomain_ops;
39
40extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
41 unsigned int nr_irqs, void *arg);
42extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
43 unsigned int nr_irqs);
44extern void mp_irqdomain_activate(struct irq_domain *domain,
45 struct irq_data *irq_data);
46extern void mp_irqdomain_deactivate(struct irq_domain *domain,
47 struct irq_data *irq_data);
48extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
49#endif /* CONFIG_X86_IO_APIC */
50
51#ifdef CONFIG_PCI_MSI
52extern void arch_init_msi_domain(struct irq_domain *domain);
53#else
54static inline void arch_init_msi_domain(struct irq_domain *domain) { }
55#endif
56
57#ifdef CONFIG_HT_IRQ
58extern void arch_init_htirq_domain(struct irq_domain *domain);
59#else
60static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
61#endif
62
63#endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 1f5a86d518db..982dfc3679ad 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -17,11 +17,16 @@
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) 17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
19#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */ 19#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */
20#define MCG_LMCE_P (1ULL<<27) /* Local machine check supported */
20 21
21/* MCG_STATUS register defines */ 22/* MCG_STATUS register defines */
22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 23#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
23#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 24#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
24#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 25#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
26#define MCG_STATUS_LMCES (1ULL<<3) /* LMCE signaled */
27
28/* MCG_EXT_CTL register defines */
29#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Enable LMCE */
25 30
26/* MCi_STATUS register defines */ 31/* MCi_STATUS register defines */
27#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 32#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
@@ -104,6 +109,7 @@ struct mce_log {
104struct mca_config { 109struct mca_config {
105 bool dont_log_ce; 110 bool dont_log_ce;
106 bool cmci_disabled; 111 bool cmci_disabled;
112 bool lmce_disabled;
107 bool ignore_ce; 113 bool ignore_ce;
108 bool disabled; 114 bool disabled;
109 bool ser; 115 bool ser;
@@ -117,8 +123,19 @@ struct mca_config {
117}; 123};
118 124
119struct mce_vendor_flags { 125struct mce_vendor_flags {
120 __u64 overflow_recov : 1, /* cpuid_ebx(80000007) */ 126 /*
121 __reserved_0 : 63; 127 * overflow recovery cpuid bit indicates that overflow
128 * conditions are not fatal
129 */
130 __u64 overflow_recov : 1,
131
132 /*
133 * SUCCOR stands for S/W UnCorrectable error COntainment
134 * and Recovery. It indicates support for data poisoning
135 * in HW and deferred error interrupts.
136 */
137 succor : 1,
138 __reserved_0 : 62;
122}; 139};
123extern struct mce_vendor_flags mce_flags; 140extern struct mce_vendor_flags mce_flags;
124 141
@@ -168,12 +185,16 @@ void cmci_clear(void);
168void cmci_reenable(void); 185void cmci_reenable(void);
169void cmci_rediscover(void); 186void cmci_rediscover(void);
170void cmci_recheck(void); 187void cmci_recheck(void);
188void lmce_clear(void);
189void lmce_enable(void);
171#else 190#else
172static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } 191static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
173static inline void cmci_clear(void) {} 192static inline void cmci_clear(void) {}
174static inline void cmci_reenable(void) {} 193static inline void cmci_reenable(void) {}
175static inline void cmci_rediscover(void) {} 194static inline void cmci_rediscover(void) {}
176static inline void cmci_recheck(void) {} 195static inline void cmci_recheck(void) {}
196static inline void lmce_clear(void) {}
197static inline void lmce_enable(void) {}
177#endif 198#endif
178 199
179#ifdef CONFIG_X86_MCE_AMD 200#ifdef CONFIG_X86_MCE_AMD
@@ -223,6 +244,9 @@ void do_machine_check(struct pt_regs *, long);
223extern void (*mce_threshold_vector)(void); 244extern void (*mce_threshold_vector)(void);
224extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 245extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
225 246
247/* Deferred error interrupt handler */
248extern void (*deferred_error_int_vector)(void);
249
226/* 250/*
227 * Thermal handler 251 * Thermal handler
228 */ 252 */
diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
new file mode 100644
index 000000000000..93724cc62177
--- /dev/null
+++ b/arch/x86/include/asm/msi.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_X86_MSI_H
2#define _ASM_X86_MSI_H
3#include <asm/hw_irq.h>
4
5typedef struct irq_alloc_info msi_alloc_info_t;
6
7#endif /* _ASM_X86_MSI_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
new file mode 100644
index 000000000000..9ebc3d009373
--- /dev/null
+++ b/arch/x86/include/asm/msr-index.h
@@ -0,0 +1,665 @@
1#ifndef _ASM_X86_MSR_INDEX_H
2#define _ASM_X86_MSR_INDEX_H
3
4/* CPU model specific register (MSR) numbers */
5
6/* x86-64 specific MSRs */
7#define MSR_EFER 0xc0000080 /* extended feature register */
8#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
9#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
10#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
11#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
15#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
16
17/* EFER bits: */
18#define _EFER_SCE 0 /* SYSCALL/SYSRET */
19#define _EFER_LME 8 /* Long mode enable */
20#define _EFER_LMA 10 /* Long mode active (read-only) */
21#define _EFER_NX 11 /* No execute enable */
22#define _EFER_SVME 12 /* Enable virtualization */
23#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
24#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
25
26#define EFER_SCE (1<<_EFER_SCE)
27#define EFER_LME (1<<_EFER_LME)
28#define EFER_LMA (1<<_EFER_LMA)
29#define EFER_NX (1<<_EFER_NX)
30#define EFER_SVME (1<<_EFER_SVME)
31#define EFER_LMSLE (1<<_EFER_LMSLE)
32#define EFER_FFXSR (1<<_EFER_FFXSR)
33
34/* Intel MSRs. Some also available on other CPUs */
35#define MSR_IA32_PERFCTR0 0x000000c1
36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd
38#define MSR_NHM_PLATFORM_INFO 0x000000ce
39
40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
41#define NHM_C3_AUTO_DEMOTE (1UL << 25)
42#define NHM_C1_AUTO_DEMOTE (1UL << 26)
43#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46
47#define MSR_PLATFORM_INFO 0x000000ce
48#define MSR_MTRRcap 0x000000fe
49#define MSR_IA32_BBL_CR_CTL 0x00000119
50#define MSR_IA32_BBL_CR_CTL3 0x0000011e
51
52#define MSR_IA32_SYSENTER_CS 0x00000174
53#define MSR_IA32_SYSENTER_ESP 0x00000175
54#define MSR_IA32_SYSENTER_EIP 0x00000176
55
56#define MSR_IA32_MCG_CAP 0x00000179
57#define MSR_IA32_MCG_STATUS 0x0000017a
58#define MSR_IA32_MCG_CTL 0x0000017b
59#define MSR_IA32_MCG_EXT_CTL 0x000004d0
60
61#define MSR_OFFCORE_RSP_0 0x000001a6
62#define MSR_OFFCORE_RSP_1 0x000001a7
63#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
64#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae
65#define MSR_TURBO_RATIO_LIMIT 0x000001ad
66#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
67#define MSR_TURBO_RATIO_LIMIT2 0x000001af
68
69#define MSR_LBR_SELECT 0x000001c8
70#define MSR_LBR_TOS 0x000001c9
71#define MSR_LBR_NHM_FROM 0x00000680
72#define MSR_LBR_NHM_TO 0x000006c0
73#define MSR_LBR_CORE_FROM 0x00000040
74#define MSR_LBR_CORE_TO 0x00000060
75
76#define MSR_IA32_PEBS_ENABLE 0x000003f1
77#define MSR_IA32_DS_AREA 0x00000600
78#define MSR_IA32_PERF_CAPABILITIES 0x00000345
79#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
80
81#define MSR_IA32_RTIT_CTL 0x00000570
82#define RTIT_CTL_TRACEEN BIT(0)
83#define RTIT_CTL_OS BIT(2)
84#define RTIT_CTL_USR BIT(3)
85#define RTIT_CTL_CR3EN BIT(7)
86#define RTIT_CTL_TOPA BIT(8)
87#define RTIT_CTL_TSC_EN BIT(10)
88#define RTIT_CTL_DISRETC BIT(11)
89#define RTIT_CTL_BRANCH_EN BIT(13)
90#define MSR_IA32_RTIT_STATUS 0x00000571
91#define RTIT_STATUS_CONTEXTEN BIT(1)
92#define RTIT_STATUS_TRIGGEREN BIT(2)
93#define RTIT_STATUS_ERROR BIT(4)
94#define RTIT_STATUS_STOPPED BIT(5)
95#define MSR_IA32_RTIT_CR3_MATCH 0x00000572
96#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560
97#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561
98
99#define MSR_MTRRfix64K_00000 0x00000250
100#define MSR_MTRRfix16K_80000 0x00000258
101#define MSR_MTRRfix16K_A0000 0x00000259
102#define MSR_MTRRfix4K_C0000 0x00000268
103#define MSR_MTRRfix4K_C8000 0x00000269
104#define MSR_MTRRfix4K_D0000 0x0000026a
105#define MSR_MTRRfix4K_D8000 0x0000026b
106#define MSR_MTRRfix4K_E0000 0x0000026c
107#define MSR_MTRRfix4K_E8000 0x0000026d
108#define MSR_MTRRfix4K_F0000 0x0000026e
109#define MSR_MTRRfix4K_F8000 0x0000026f
110#define MSR_MTRRdefType 0x000002ff
111
112#define MSR_IA32_CR_PAT 0x00000277
113
114#define MSR_IA32_DEBUGCTLMSR 0x000001d9
115#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
116#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
117#define MSR_IA32_LASTINTFROMIP 0x000001dd
118#define MSR_IA32_LASTINTTOIP 0x000001de
119
120/* DEBUGCTLMSR bits (others vary by model): */
121#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
122#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
123#define DEBUGCTLMSR_TR (1UL << 6)
124#define DEBUGCTLMSR_BTS (1UL << 7)
125#define DEBUGCTLMSR_BTINT (1UL << 8)
126#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
127#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
128#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
129
130#define MSR_IA32_POWER_CTL 0x000001fc
131
132#define MSR_IA32_MC0_CTL 0x00000400
133#define MSR_IA32_MC0_STATUS 0x00000401
134#define MSR_IA32_MC0_ADDR 0x00000402
135#define MSR_IA32_MC0_MISC 0x00000403
136
137/* C-state Residency Counters */
138#define MSR_PKG_C3_RESIDENCY 0x000003f8
139#define MSR_PKG_C6_RESIDENCY 0x000003f9
140#define MSR_PKG_C7_RESIDENCY 0x000003fa
141#define MSR_CORE_C3_RESIDENCY 0x000003fc
142#define MSR_CORE_C6_RESIDENCY 0x000003fd
143#define MSR_CORE_C7_RESIDENCY 0x000003fe
144#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff
145#define MSR_PKG_C2_RESIDENCY 0x0000060d
146#define MSR_PKG_C8_RESIDENCY 0x00000630
147#define MSR_PKG_C9_RESIDENCY 0x00000631
148#define MSR_PKG_C10_RESIDENCY 0x00000632
149
150/* Run Time Average Power Limiting (RAPL) Interface */
151
152#define MSR_RAPL_POWER_UNIT 0x00000606
153
154#define MSR_PKG_POWER_LIMIT 0x00000610
155#define MSR_PKG_ENERGY_STATUS 0x00000611
156#define MSR_PKG_PERF_STATUS 0x00000613
157#define MSR_PKG_POWER_INFO 0x00000614
158
159#define MSR_DRAM_POWER_LIMIT 0x00000618
160#define MSR_DRAM_ENERGY_STATUS 0x00000619
161#define MSR_DRAM_PERF_STATUS 0x0000061b
162#define MSR_DRAM_POWER_INFO 0x0000061c
163
164#define MSR_PP0_POWER_LIMIT 0x00000638
165#define MSR_PP0_ENERGY_STATUS 0x00000639
166#define MSR_PP0_POLICY 0x0000063a
167#define MSR_PP0_PERF_STATUS 0x0000063b
168
169#define MSR_PP1_POWER_LIMIT 0x00000640
170#define MSR_PP1_ENERGY_STATUS 0x00000641
171#define MSR_PP1_POLICY 0x00000642
172
173#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
174#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
175#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
176#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
177
178#define MSR_CORE_C1_RES 0x00000660
179
180#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
181#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
182
183#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
184#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
185#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
186
187/* Hardware P state interface */
188#define MSR_PPERF 0x0000064e
189#define MSR_PERF_LIMIT_REASONS 0x0000064f
190#define MSR_PM_ENABLE 0x00000770
191#define MSR_HWP_CAPABILITIES 0x00000771
192#define MSR_HWP_REQUEST_PKG 0x00000772
193#define MSR_HWP_INTERRUPT 0x00000773
194#define MSR_HWP_REQUEST 0x00000774
195#define MSR_HWP_STATUS 0x00000777
196
197/* CPUID.6.EAX */
198#define HWP_BASE_BIT (1<<7)
199#define HWP_NOTIFICATIONS_BIT (1<<8)
200#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
201#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
202#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
203
204/* IA32_HWP_CAPABILITIES */
205#define HWP_HIGHEST_PERF(x) (x & 0xff)
206#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
207#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
208#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
209
210/* IA32_HWP_REQUEST */
211#define HWP_MIN_PERF(x) (x & 0xff)
212#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
213#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
214#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24)
215#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32)
216#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42)
217
218/* IA32_HWP_STATUS */
219#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
220#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
221
222/* IA32_HWP_INTERRUPT */
223#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
224#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
225
226#define MSR_AMD64_MC0_MASK 0xc0010044
227
228#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
229#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
230#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
231#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
232
233#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
234
235/* These are consecutive and not in the normal 4er MCE bank block */
236#define MSR_IA32_MC0_CTL2 0x00000280
237#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
238
239#define MSR_P6_PERFCTR0 0x000000c1
240#define MSR_P6_PERFCTR1 0x000000c2
241#define MSR_P6_EVNTSEL0 0x00000186
242#define MSR_P6_EVNTSEL1 0x00000187
243
244#define MSR_KNC_PERFCTR0 0x00000020
245#define MSR_KNC_PERFCTR1 0x00000021
246#define MSR_KNC_EVNTSEL0 0x00000028
247#define MSR_KNC_EVNTSEL1 0x00000029
248
249/* Alternative perfctr range with full access. */
250#define MSR_IA32_PMC0 0x000004c1
251
252/* AMD64 MSRs. Not complete. See the architecture manual for a more
253 complete list. */
254
255#define MSR_AMD64_PATCH_LEVEL 0x0000008b
256#define MSR_AMD64_TSC_RATIO 0xc0000104
257#define MSR_AMD64_NB_CFG 0xc001001f
258#define MSR_AMD64_PATCH_LOADER 0xc0010020
259#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
260#define MSR_AMD64_OSVW_STATUS 0xc0010141
261#define MSR_AMD64_LS_CFG 0xc0011020
262#define MSR_AMD64_DC_CFG 0xc0011022
263#define MSR_AMD64_BU_CFG2 0xc001102a
264#define MSR_AMD64_IBSFETCHCTL 0xc0011030
265#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
266#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
267#define MSR_AMD64_IBSFETCH_REG_COUNT 3
268#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
269#define MSR_AMD64_IBSOPCTL 0xc0011033
270#define MSR_AMD64_IBSOPRIP 0xc0011034
271#define MSR_AMD64_IBSOPDATA 0xc0011035
272#define MSR_AMD64_IBSOPDATA2 0xc0011036
273#define MSR_AMD64_IBSOPDATA3 0xc0011037
274#define MSR_AMD64_IBSDCLINAD 0xc0011038
275#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
276#define MSR_AMD64_IBSOP_REG_COUNT 7
277#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
278#define MSR_AMD64_IBSCTL 0xc001103a
279#define MSR_AMD64_IBSBRTARGET 0xc001103b
280#define MSR_AMD64_IBSOPDATA4 0xc001103d
281#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
282
283/* Fam 16h MSRs */
284#define MSR_F16H_L2I_PERF_CTL 0xc0010230
285#define MSR_F16H_L2I_PERF_CTR 0xc0010231
286#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
287#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
288#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
289#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
290
291/* Fam 15h MSRs */
292#define MSR_F15H_PERF_CTL 0xc0010200
293#define MSR_F15H_PERF_CTR 0xc0010201
294#define MSR_F15H_NB_PERF_CTL 0xc0010240
295#define MSR_F15H_NB_PERF_CTR 0xc0010241
296
297/* Fam 10h MSRs */
298#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
299#define FAM10H_MMIO_CONF_ENABLE (1<<0)
300#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
301#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
302#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
303#define FAM10H_MMIO_CONF_BASE_SHIFT 20
304#define MSR_FAM10H_NODE_ID 0xc001100c
305
306/* K8 MSRs */
307#define MSR_K8_TOP_MEM1 0xc001001a
308#define MSR_K8_TOP_MEM2 0xc001001d
309#define MSR_K8_SYSCFG 0xc0010010
310#define MSR_K8_INT_PENDING_MSG 0xc0010055
311/* C1E active bits in int pending message */
312#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
313#define MSR_K8_TSEG_ADDR 0xc0010112
314#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
315#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
316#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
317
318/* K7 MSRs */
319#define MSR_K7_EVNTSEL0 0xc0010000
320#define MSR_K7_PERFCTR0 0xc0010004
321#define MSR_K7_EVNTSEL1 0xc0010001
322#define MSR_K7_PERFCTR1 0xc0010005
323#define MSR_K7_EVNTSEL2 0xc0010002
324#define MSR_K7_PERFCTR2 0xc0010006
325#define MSR_K7_EVNTSEL3 0xc0010003
326#define MSR_K7_PERFCTR3 0xc0010007
327#define MSR_K7_CLK_CTL 0xc001001b
328#define MSR_K7_HWCR 0xc0010015
329#define MSR_K7_FID_VID_CTL 0xc0010041
330#define MSR_K7_FID_VID_STATUS 0xc0010042
331
332/* K6 MSRs */
333#define MSR_K6_WHCR 0xc0000082
334#define MSR_K6_UWCCR 0xc0000085
335#define MSR_K6_EPMR 0xc0000086
336#define MSR_K6_PSOR 0xc0000087
337#define MSR_K6_PFIR 0xc0000088
338
339/* Centaur-Hauls/IDT defined MSRs. */
340#define MSR_IDT_FCR1 0x00000107
341#define MSR_IDT_FCR2 0x00000108
342#define MSR_IDT_FCR3 0x00000109
343#define MSR_IDT_FCR4 0x0000010a
344
345#define MSR_IDT_MCR0 0x00000110
346#define MSR_IDT_MCR1 0x00000111
347#define MSR_IDT_MCR2 0x00000112
348#define MSR_IDT_MCR3 0x00000113
349#define MSR_IDT_MCR4 0x00000114
350#define MSR_IDT_MCR5 0x00000115
351#define MSR_IDT_MCR6 0x00000116
352#define MSR_IDT_MCR7 0x00000117
353#define MSR_IDT_MCR_CTRL 0x00000120
354
355/* VIA Cyrix defined MSRs*/
356#define MSR_VIA_FCR 0x00001107
357#define MSR_VIA_LONGHAUL 0x0000110a
358#define MSR_VIA_RNG 0x0000110b
359#define MSR_VIA_BCR2 0x00001147
360
361/* Transmeta defined MSRs */
362#define MSR_TMTA_LONGRUN_CTRL 0x80868010
363#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
364#define MSR_TMTA_LRTI_READOUT 0x80868018
365#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
366
367/* Intel defined MSRs. */
368#define MSR_IA32_P5_MC_ADDR 0x00000000
369#define MSR_IA32_P5_MC_TYPE 0x00000001
370#define MSR_IA32_TSC 0x00000010
371#define MSR_IA32_PLATFORM_ID 0x00000017
372#define MSR_IA32_EBL_CR_POWERON 0x0000002a
373#define MSR_EBC_FREQUENCY_ID 0x0000002c
374#define MSR_SMI_COUNT 0x00000034
375#define MSR_IA32_FEATURE_CONTROL 0x0000003a
376#define MSR_IA32_TSC_ADJUST 0x0000003b
377#define MSR_IA32_BNDCFGS 0x00000d90
378
379#define MSR_IA32_XSS 0x00000da0
380
381#define FEATURE_CONTROL_LOCKED (1<<0)
382#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
383#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
384#define FEATURE_CONTROL_LMCE (1<<20)
385
386#define MSR_IA32_APICBASE 0x0000001b
387#define MSR_IA32_APICBASE_BSP (1<<8)
388#define MSR_IA32_APICBASE_ENABLE (1<<11)
389#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
390
391#define MSR_IA32_TSCDEADLINE 0x000006e0
392
393#define MSR_IA32_UCODE_WRITE 0x00000079
394#define MSR_IA32_UCODE_REV 0x0000008b
395
396#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
397#define MSR_IA32_SMBASE 0x0000009e
398
399#define MSR_IA32_PERF_STATUS 0x00000198
400#define MSR_IA32_PERF_CTL 0x00000199
401#define INTEL_PERF_CTL_MASK 0xffff
402#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
403#define MSR_AMD_PERF_STATUS 0xc0010063
404#define MSR_AMD_PERF_CTL 0xc0010062
405
406#define MSR_IA32_MPERF 0x000000e7
407#define MSR_IA32_APERF 0x000000e8
408
409#define MSR_IA32_THERM_CONTROL 0x0000019a
410#define MSR_IA32_THERM_INTERRUPT 0x0000019b
411
412#define THERM_INT_HIGH_ENABLE (1 << 0)
413#define THERM_INT_LOW_ENABLE (1 << 1)
414#define THERM_INT_PLN_ENABLE (1 << 24)
415
416#define MSR_IA32_THERM_STATUS 0x0000019c
417
418#define THERM_STATUS_PROCHOT (1 << 0)
419#define THERM_STATUS_POWER_LIMIT (1 << 10)
420
421#define MSR_THERM2_CTL 0x0000019d
422
423#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
424
425#define MSR_IA32_MISC_ENABLE 0x000001a0
426
427#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
428
429#define MSR_MISC_PWR_MGMT 0x000001aa
430
431#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
432#define ENERGY_PERF_BIAS_PERFORMANCE 0
433#define ENERGY_PERF_BIAS_NORMAL 6
434#define ENERGY_PERF_BIAS_POWERSAVE 15
435
436#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
437
438#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
439#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
440
441#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
442
443#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
444#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
445#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
446
447/* Thermal Thresholds Support */
448#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
449#define THERM_SHIFT_THRESHOLD0 8
450#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
451#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
452#define THERM_SHIFT_THRESHOLD1 16
453#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
454#define THERM_STATUS_THRESHOLD0 (1 << 6)
455#define THERM_LOG_THRESHOLD0 (1 << 7)
456#define THERM_STATUS_THRESHOLD1 (1 << 8)
457#define THERM_LOG_THRESHOLD1 (1 << 9)
458
459/* MISC_ENABLE bits: architectural */
460#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0
461#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
462#define MSR_IA32_MISC_ENABLE_TCC_BIT 1
463#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
464#define MSR_IA32_MISC_ENABLE_EMON_BIT 7
465#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
466#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11
467#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
468#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12
469#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
470#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16
471#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
472#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
473#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
474#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
475#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
476#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
477#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
478#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
479#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
480
481/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
482#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2
483#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
484#define MSR_IA32_MISC_ENABLE_TM1_BIT 3
485#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
486#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4
487#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
488#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6
489#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
490#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8
491#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
492#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9
493#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
494#define MSR_IA32_MISC_ENABLE_FERR_BIT 10
495#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
496#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10
497#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
498#define MSR_IA32_MISC_ENABLE_TM2_BIT 13
499#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
500#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19
501#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
502#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20
503#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
504#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24
505#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
506#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37
507#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
508#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
509#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
510#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
511#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
512
513#define MSR_IA32_TSC_DEADLINE 0x000006E0
514
515/* P4/Xeon+ specific */
516#define MSR_IA32_MCG_EAX 0x00000180
517#define MSR_IA32_MCG_EBX 0x00000181
518#define MSR_IA32_MCG_ECX 0x00000182
519#define MSR_IA32_MCG_EDX 0x00000183
520#define MSR_IA32_MCG_ESI 0x00000184
521#define MSR_IA32_MCG_EDI 0x00000185
522#define MSR_IA32_MCG_EBP 0x00000186
523#define MSR_IA32_MCG_ESP 0x00000187
524#define MSR_IA32_MCG_EFLAGS 0x00000188
525#define MSR_IA32_MCG_EIP 0x00000189
526#define MSR_IA32_MCG_RESERVED 0x0000018a
527
528/* Pentium IV performance counter MSRs */
529#define MSR_P4_BPU_PERFCTR0 0x00000300
530#define MSR_P4_BPU_PERFCTR1 0x00000301
531#define MSR_P4_BPU_PERFCTR2 0x00000302
532#define MSR_P4_BPU_PERFCTR3 0x00000303
533#define MSR_P4_MS_PERFCTR0 0x00000304
534#define MSR_P4_MS_PERFCTR1 0x00000305
535#define MSR_P4_MS_PERFCTR2 0x00000306
536#define MSR_P4_MS_PERFCTR3 0x00000307
537#define MSR_P4_FLAME_PERFCTR0 0x00000308
538#define MSR_P4_FLAME_PERFCTR1 0x00000309
539#define MSR_P4_FLAME_PERFCTR2 0x0000030a
540#define MSR_P4_FLAME_PERFCTR3 0x0000030b
541#define MSR_P4_IQ_PERFCTR0 0x0000030c
542#define MSR_P4_IQ_PERFCTR1 0x0000030d
543#define MSR_P4_IQ_PERFCTR2 0x0000030e
544#define MSR_P4_IQ_PERFCTR3 0x0000030f
545#define MSR_P4_IQ_PERFCTR4 0x00000310
546#define MSR_P4_IQ_PERFCTR5 0x00000311
547#define MSR_P4_BPU_CCCR0 0x00000360
548#define MSR_P4_BPU_CCCR1 0x00000361
549#define MSR_P4_BPU_CCCR2 0x00000362
550#define MSR_P4_BPU_CCCR3 0x00000363
551#define MSR_P4_MS_CCCR0 0x00000364
552#define MSR_P4_MS_CCCR1 0x00000365
553#define MSR_P4_MS_CCCR2 0x00000366
554#define MSR_P4_MS_CCCR3 0x00000367
555#define MSR_P4_FLAME_CCCR0 0x00000368
556#define MSR_P4_FLAME_CCCR1 0x00000369
557#define MSR_P4_FLAME_CCCR2 0x0000036a
558#define MSR_P4_FLAME_CCCR3 0x0000036b
559#define MSR_P4_IQ_CCCR0 0x0000036c
560#define MSR_P4_IQ_CCCR1 0x0000036d
561#define MSR_P4_IQ_CCCR2 0x0000036e
562#define MSR_P4_IQ_CCCR3 0x0000036f
563#define MSR_P4_IQ_CCCR4 0x00000370
564#define MSR_P4_IQ_CCCR5 0x00000371
565#define MSR_P4_ALF_ESCR0 0x000003ca
566#define MSR_P4_ALF_ESCR1 0x000003cb
567#define MSR_P4_BPU_ESCR0 0x000003b2
568#define MSR_P4_BPU_ESCR1 0x000003b3
569#define MSR_P4_BSU_ESCR0 0x000003a0
570#define MSR_P4_BSU_ESCR1 0x000003a1
571#define MSR_P4_CRU_ESCR0 0x000003b8
572#define MSR_P4_CRU_ESCR1 0x000003b9
573#define MSR_P4_CRU_ESCR2 0x000003cc
574#define MSR_P4_CRU_ESCR3 0x000003cd
575#define MSR_P4_CRU_ESCR4 0x000003e0
576#define MSR_P4_CRU_ESCR5 0x000003e1
577#define MSR_P4_DAC_ESCR0 0x000003a8
578#define MSR_P4_DAC_ESCR1 0x000003a9
579#define MSR_P4_FIRM_ESCR0 0x000003a4
580#define MSR_P4_FIRM_ESCR1 0x000003a5
581#define MSR_P4_FLAME_ESCR0 0x000003a6
582#define MSR_P4_FLAME_ESCR1 0x000003a7
583#define MSR_P4_FSB_ESCR0 0x000003a2
584#define MSR_P4_FSB_ESCR1 0x000003a3
585#define MSR_P4_IQ_ESCR0 0x000003ba
586#define MSR_P4_IQ_ESCR1 0x000003bb
587#define MSR_P4_IS_ESCR0 0x000003b4
588#define MSR_P4_IS_ESCR1 0x000003b5
589#define MSR_P4_ITLB_ESCR0 0x000003b6
590#define MSR_P4_ITLB_ESCR1 0x000003b7
591#define MSR_P4_IX_ESCR0 0x000003c8
592#define MSR_P4_IX_ESCR1 0x000003c9
593#define MSR_P4_MOB_ESCR0 0x000003aa
594#define MSR_P4_MOB_ESCR1 0x000003ab
595#define MSR_P4_MS_ESCR0 0x000003c0
596#define MSR_P4_MS_ESCR1 0x000003c1
597#define MSR_P4_PMH_ESCR0 0x000003ac
598#define MSR_P4_PMH_ESCR1 0x000003ad
599#define MSR_P4_RAT_ESCR0 0x000003bc
600#define MSR_P4_RAT_ESCR1 0x000003bd
601#define MSR_P4_SAAT_ESCR0 0x000003ae
602#define MSR_P4_SAAT_ESCR1 0x000003af
603#define MSR_P4_SSU_ESCR0 0x000003be
604#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
605
606#define MSR_P4_TBPU_ESCR0 0x000003c2
607#define MSR_P4_TBPU_ESCR1 0x000003c3
608#define MSR_P4_TC_ESCR0 0x000003c4
609#define MSR_P4_TC_ESCR1 0x000003c5
610#define MSR_P4_U2L_ESCR0 0x000003b0
611#define MSR_P4_U2L_ESCR1 0x000003b1
612
613#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
614
615/* Intel Core-based CPU performance counters */
616#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
617#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
618#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
619#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
620#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
621#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
622#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
623
624/* Geode defined MSRs */
625#define MSR_GEODE_BUSCONT_CONF0 0x00001900
626
627/* Intel VT MSRs */
628#define MSR_IA32_VMX_BASIC 0x00000480
629#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
630#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
631#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
632#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
633#define MSR_IA32_VMX_MISC 0x00000485
634#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
635#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
636#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
637#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
638#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
639#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
640#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
641#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
642#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
643#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
644#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
645#define MSR_IA32_VMX_VMFUNC 0x00000491
646
647/* VMX_BASIC bits and bitmasks */
648#define VMX_BASIC_VMCS_SIZE_SHIFT 32
649#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
650#define VMX_BASIC_64 0x0001000000000000LLU
651#define VMX_BASIC_MEM_TYPE_SHIFT 50
652#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
653#define VMX_BASIC_MEM_TYPE_WB 6LLU
654#define VMX_BASIC_INOUT 0x0040000000000000LLU
655
656/* MSR_IA32_VMX_MISC bits */
657#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
658#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
659/* AMD-V MSRs */
660
661#define MSR_VM_CR 0xc0010114
662#define MSR_VM_IGNNE 0xc0010115
663#define MSR_VM_HSAVE_PA 0xc0010117
664
665#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index de36f22eb0b9..e6a707eb5081 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -1,13 +1,14 @@
1#ifndef _ASM_X86_MSR_H 1#ifndef _ASM_X86_MSR_H
2#define _ASM_X86_MSR_H 2#define _ASM_X86_MSR_H
3 3
4#include <uapi/asm/msr.h> 4#include "msr-index.h"
5 5
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7 7
8#include <asm/asm.h> 8#include <asm/asm.h>
9#include <asm/errno.h> 9#include <asm/errno.h>
10#include <asm/cpumask.h> 10#include <asm/cpumask.h>
11#include <uapi/asm/msr.h>
11 12
12struct msr { 13struct msr {
13 union { 14 union {
@@ -205,8 +206,13 @@ do { \
205 206
206#endif /* !CONFIG_PARAVIRT */ 207#endif /* !CONFIG_PARAVIRT */
207 208
208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ 209/*
209 (u32)((val) >> 32)) 210 * 64-bit version of wrmsr_safe():
211 */
212static inline int wrmsrl_safe(u32 msr, u64 val)
213{
214 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
215}
210 216
211#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) 217#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
212 218
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index f768f6298419..b94f6f64e23d 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -31,7 +31,7 @@
31 * arch_phys_wc_add and arch_phys_wc_del. 31 * arch_phys_wc_add and arch_phys_wc_del.
32 */ 32 */
33# ifdef CONFIG_MTRR 33# ifdef CONFIG_MTRR
34extern u8 mtrr_type_lookup(u64 addr, u64 end); 34extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
35extern void mtrr_save_fixed_ranges(void *); 35extern void mtrr_save_fixed_ranges(void *);
36extern void mtrr_save_state(void); 36extern void mtrr_save_state(void);
37extern int mtrr_add(unsigned long base, unsigned long size, 37extern int mtrr_add(unsigned long base, unsigned long size,
@@ -48,14 +48,13 @@ extern void mtrr_aps_init(void);
48extern void mtrr_bp_restore(void); 48extern void mtrr_bp_restore(void);
49extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 49extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
50extern int amd_special_default_mtrr(void); 50extern int amd_special_default_mtrr(void);
51extern int phys_wc_to_mtrr_index(int handle);
52# else 51# else
53static inline u8 mtrr_type_lookup(u64 addr, u64 end) 52static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
54{ 53{
55 /* 54 /*
56 * Return no-MTRRs: 55 * Return no-MTRRs:
57 */ 56 */
58 return 0xff; 57 return MTRR_TYPE_INVALID;
59} 58}
60#define mtrr_save_fixed_ranges(arg) do {} while (0) 59#define mtrr_save_fixed_ranges(arg) do {} while (0)
61#define mtrr_save_state() do {} while (0) 60#define mtrr_save_state() do {} while (0)
@@ -84,10 +83,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
84static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) 83static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
85{ 84{
86} 85}
87static inline int phys_wc_to_mtrr_index(int handle)
88{
89 return -1;
90}
91 86
92#define mtrr_ap_init() do {} while (0) 87#define mtrr_ap_init() do {} while (0)
93#define mtrr_bp_init() do {} while (0) 88#define mtrr_bp_init() do {} while (0)
@@ -127,4 +122,8 @@ struct mtrr_gentry32 {
127 _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) 122 _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
128#endif /* CONFIG_COMPAT */ 123#endif /* CONFIG_COMPAT */
129 124
125/* Bit fields for enabled in struct mtrr_state_type */
126#define MTRR_STATE_MTRR_FIXED_ENABLED 0x01
127#define MTRR_STATE_MTRR_ENABLED 0x02
128
130#endif /* _ASM_X86_MTRR_H */ 129#endif /* _ASM_X86_MTRR_H */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8766c7c395c2..a6b8f9fadb06 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -160,13 +160,14 @@ struct pv_cpu_ops {
160 u64 (*read_pmc)(int counter); 160 u64 (*read_pmc)(int counter);
161 unsigned long long (*read_tscp)(unsigned int *aux); 161 unsigned long long (*read_tscp)(unsigned int *aux);
162 162
163#ifdef CONFIG_X86_32
163 /* 164 /*
164 * Atomically enable interrupts and return to userspace. This 165 * Atomically enable interrupts and return to userspace. This
165 * is only ever used to return to 32-bit processes; in a 166 * is only used in 32-bit kernels. 64-bit kernels use
166 * 64-bit kernel, it's used for 32-on-64 compat processes, but 167 * usergs_sysret32 instead.
167 * never native 64-bit processes. (Jump, not call.)
168 */ 168 */
169 void (*irq_enable_sysexit)(void); 169 void (*irq_enable_sysexit)(void);
170#endif
170 171
171 /* 172 /*
172 * Switch to usermode gs and return to 64-bit usermode using 173 * Switch to usermode gs and return to 64-bit usermode using
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 91bc4ba95f91..ca6c228d5e62 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -4,14 +4,9 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable_types.h> 5#include <asm/pgtable_types.h>
6 6
7#ifdef CONFIG_X86_PAT 7bool pat_enabled(void);
8extern int pat_enabled;
9#else
10static const int pat_enabled;
11#endif
12
13extern void pat_init(void); 8extern void pat_init(void);
14void pat_init_cache_modes(void); 9void pat_init_cache_modes(u64);
15 10
16extern int reserve_memtype(u64 start, u64 end, 11extern int reserve_memtype(u64 start, u64 end,
17 enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); 12 enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 4e370a5d8117..d8c80ff32e8c 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -96,15 +96,10 @@ extern void pci_iommu_alloc(void);
96#ifdef CONFIG_PCI_MSI 96#ifdef CONFIG_PCI_MSI
97/* implemented in arch/x86/kernel/apic/io_apic. */ 97/* implemented in arch/x86/kernel/apic/io_apic. */
98struct msi_desc; 98struct msi_desc;
99void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq,
100 unsigned int dest, struct msi_msg *msg, u8 hpet_id);
101int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 99int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
102void native_teardown_msi_irq(unsigned int irq); 100void native_teardown_msi_irq(unsigned int irq);
103void native_restore_msi_irqs(struct pci_dev *dev); 101void native_restore_msi_irqs(struct pci_dev *dev);
104int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
105 unsigned int irq_base, unsigned int irq_offset);
106#else 102#else
107#define native_compose_msi_msg NULL
108#define native_setup_msi_irqs NULL 103#define native_setup_msi_irqs NULL
109#define native_teardown_msi_irq NULL 104#define native_teardown_msi_irq NULL
110#endif 105#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index fe57e7a98839..2562e303405b 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -398,11 +398,17 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
398 * requested memtype: 398 * requested memtype:
399 * - request is uncached, return cannot be write-back 399 * - request is uncached, return cannot be write-back
400 * - request is write-combine, return cannot be write-back 400 * - request is write-combine, return cannot be write-back
401 * - request is write-through, return cannot be write-back
402 * - request is write-through, return cannot be write-combine
401 */ 403 */
402 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && 404 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
403 new_pcm == _PAGE_CACHE_MODE_WB) || 405 new_pcm == _PAGE_CACHE_MODE_WB) ||
404 (pcm == _PAGE_CACHE_MODE_WC && 406 (pcm == _PAGE_CACHE_MODE_WC &&
405 new_pcm == _PAGE_CACHE_MODE_WB)) { 407 new_pcm == _PAGE_CACHE_MODE_WB) ||
408 (pcm == _PAGE_CACHE_MODE_WT &&
409 new_pcm == _PAGE_CACHE_MODE_WB) ||
410 (pcm == _PAGE_CACHE_MODE_WT &&
411 new_pcm == _PAGE_CACHE_MODE_WC)) {
406 return 0; 412 return 0;
407 } 413 }
408 414
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 78f0c8cbe316..13f310bfc09a 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -367,6 +367,9 @@ extern int nx_enabled;
367#define pgprot_writecombine pgprot_writecombine 367#define pgprot_writecombine pgprot_writecombine
368extern pgprot_t pgprot_writecombine(pgprot_t prot); 368extern pgprot_t pgprot_writecombine(pgprot_t prot);
369 369
370#define pgprot_writethrough pgprot_writethrough
371extern pgprot_t pgprot_writethrough(pgprot_t prot);
372
370/* Indicate that x86 has its own track and untrack pfn vma functions */ 373/* Indicate that x86 has its own track and untrack pfn vma functions */
371#define __HAVE_PFNMAP_TRACKING 374#define __HAVE_PFNMAP_TRACKING
372 375
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index a90f8972dad5..a4a77286cb1d 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -5,12 +5,14 @@
5 5
6/* misc architecture specific prototypes */ 6/* misc architecture specific prototypes */
7 7
8void system_call(void);
9void syscall_init(void); 8void syscall_init(void);
10 9
11void ia32_syscall(void); 10void entry_SYSCALL_64(void);
12void ia32_cstar_target(void); 11void entry_SYSCALL_compat(void);
13void ia32_sysenter_target(void); 12void entry_INT80_32(void);
13void entry_INT80_compat(void);
14void entry_SYSENTER_32(void);
15void entry_SYSENTER_compat(void);
14 16
15void x86_configure_nx(void); 17void x86_configure_nx(void);
16void x86_report_nx(void); 18void x86_report_nx(void);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index aeb4666e0c0a..2270e41b32fd 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -215,6 +215,44 @@ static inline void clwb(volatile void *__p)
215 : [pax] "a" (p)); 215 : [pax] "a" (p));
216} 216}
217 217
218/**
219 * pcommit_sfence() - persistent commit and fence
220 *
221 * The PCOMMIT instruction ensures that data that has been flushed from the
222 * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to
223 * memory and is durable on the DIMM. The primary use case for this is
224 * persistent memory.
225 *
226 * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT
227 * with appropriate fencing.
228 *
229 * Example:
230 * void flush_and_commit_buffer(void *vaddr, unsigned int size)
231 * {
232 * unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
233 * void *vend = vaddr + size;
234 * void *p;
235 *
236 * for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
237 * p < vend; p += boot_cpu_data.x86_clflush_size)
238 * clwb(p);
239 *
240 * // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes
241 * // MFENCE via mb() also works
242 * wmb();
243 *
244 * // PCOMMIT and the required SFENCE for ordering
245 * pcommit_sfence();
246 * }
247 *
248 * After this function completes the data pointed to by 'vaddr' has been
249 * accepted to memory and will be durable if the 'vaddr' points to persistent
250 * memory.
251 *
252 * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify
253 * things we include both the PCOMMIT and the required SFENCE in the
254 * alternatives generated by pcommit_sfence().
255 */
218static inline void pcommit_sfence(void) 256static inline void pcommit_sfence(void)
219{ 257{
220 alternative(ASM_NOP7, 258 alternative(ASM_NOP7,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index b4bdec3e9523..225ee545e1a0 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -177,8 +177,6 @@ struct thread_info {
177 */ 177 */
178#ifndef __ASSEMBLY__ 178#ifndef __ASSEMBLY__
179 179
180DECLARE_PER_CPU(unsigned long, kernel_stack);
181
182static inline struct thread_info *current_thread_info(void) 180static inline struct thread_info *current_thread_info(void)
183{ 181{
184 return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE); 182 return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
@@ -197,9 +195,13 @@ static inline unsigned long current_stack_pointer(void)
197 195
198#else /* !__ASSEMBLY__ */ 196#else /* !__ASSEMBLY__ */
199 197
198#ifdef CONFIG_X86_64
199# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
200#endif
201
200/* Load thread_info address into "reg" */ 202/* Load thread_info address into "reg" */
201#define GET_THREAD_INFO(reg) \ 203#define GET_THREAD_INFO(reg) \
202 _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \ 204 _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
203 _ASM_SUB $(THREAD_SIZE),reg ; 205 _ASM_SUB $(THREAD_SIZE),reg ;
204 206
205/* 207/*
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 5a77593fdace..0fb46482dfde 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -26,7 +26,7 @@
26#define _ASM_X86_TOPOLOGY_H 26#define _ASM_X86_TOPOLOGY_H
27 27
28#ifdef CONFIG_X86_32 28#ifdef CONFIG_X86_32
29# ifdef CONFIG_X86_HT 29# ifdef CONFIG_SMP
30# define ENABLE_TOPO_DEFINES 30# define ENABLE_TOPO_DEFINES
31# endif 31# endif
32#else 32#else
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 4cab890007a7..38a09a13a9bc 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -101,6 +101,12 @@ DEFINE_IRQ_VECTOR_EVENT(call_function_single);
101DEFINE_IRQ_VECTOR_EVENT(threshold_apic); 101DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
102 102
103/* 103/*
104 * deferred_error_apic - called when entering/exiting a deferred apic interrupt
105 * vector handler
106 */
107DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
108
109/*
104 * thermal_apic - called when entering/exiting a thermal apic interrupt 110 * thermal_apic - called when entering/exiting a thermal apic interrupt
105 * vector handler 111 * vector handler
106 */ 112 */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4e49d7dff78e..c5380bea2a36 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -108,7 +108,8 @@ extern int panic_on_unrecovered_nmi;
108void math_emulate(struct math_emu_info *); 108void math_emulate(struct math_emu_info *);
109#ifndef CONFIG_X86_32 109#ifndef CONFIG_X86_32
110asmlinkage void smp_thermal_interrupt(void); 110asmlinkage void smp_thermal_interrupt(void);
111asmlinkage void mce_threshold_interrupt(void); 111asmlinkage void smp_threshold_interrupt(void);
112asmlinkage void smp_deferred_error_interrupt(void);
112#endif 113#endif
113 114
114extern enum ctx_state ist_enter(struct pt_regs *regs); 115extern enum ctx_state ist_enter(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 7c8ad3451988..f5dcb5204dcd 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -59,6 +59,10 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
59 __put_user_size(*(u32 *)from, (u32 __user *)to, 59 __put_user_size(*(u32 *)from, (u32 __user *)to,
60 4, ret, 4); 60 4, ret, 4);
61 return ret; 61 return ret;
62 case 8:
63 __put_user_size(*(u64 *)from, (u64 __user *)to,
64 8, ret, 8);
65 return ret;
62 } 66 }
63 } 67 }
64 return __copy_to_user_ll(to, from, n); 68 return __copy_to_user_ll(to, from, n);
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index f58a9c7a3c86..48d34d28f5a6 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -171,38 +171,17 @@ struct x86_platform_ops {
171}; 171};
172 172
173struct pci_dev; 173struct pci_dev;
174struct msi_msg;
175 174
176struct x86_msi_ops { 175struct x86_msi_ops {
177 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 176 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
178 void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
179 unsigned int dest, struct msi_msg *msg,
180 u8 hpet_id);
181 void (*teardown_msi_irq)(unsigned int irq); 177 void (*teardown_msi_irq)(unsigned int irq);
182 void (*teardown_msi_irqs)(struct pci_dev *dev); 178 void (*teardown_msi_irqs)(struct pci_dev *dev);
183 void (*restore_msi_irqs)(struct pci_dev *dev); 179 void (*restore_msi_irqs)(struct pci_dev *dev);
184 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
185}; 180};
186 181
187struct IO_APIC_route_entry;
188struct io_apic_irq_attr;
189struct irq_data;
190struct cpumask;
191
192struct x86_io_apic_ops { 182struct x86_io_apic_ops {
193 void (*init) (void);
194 unsigned int (*read) (unsigned int apic, unsigned int reg); 183 unsigned int (*read) (unsigned int apic, unsigned int reg);
195 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
196 void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
197 void (*disable)(void); 184 void (*disable)(void);
198 void (*print_entries)(unsigned int apic, unsigned int nr_entries);
199 int (*set_affinity)(struct irq_data *data,
200 const struct cpumask *mask,
201 bool force);
202 int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
203 unsigned int destination, int vector,
204 struct io_apic_irq_attr *attr);
205 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
206}; 185};
207 186
208extern struct x86_init_ops x86_init; 187extern struct x86_init_ops x86_init;