aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-03-26 11:18:44 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-26 11:19:03 -0400
commit7fd52392c56361a40f0c630a82b36b95ca31eac6 (patch)
tree14091de24c6b28ea4cae9826f98aeedb7be091f5 /arch/x86/include/asm
parentb01c3a0010aabadf745f3e7fdb9cab682e0a28a2 (diff)
parente22057c8599373e5caef0bc42bdb95d2a361ab0d (diff)
Merge branch 'linus' into perf/urgent
Merge reason: we need to fix a non-trivial merge conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/alternative.h6
-rw-r--r--arch/x86/include/asm/apic.h6
-rw-r--r--arch/x86/include/asm/atomic64_32.h146
-rw-r--r--arch/x86/include/asm/cpu_device_id.h13
-rw-r--r--arch/x86/include/asm/cpufeature.h4
-rw-r--r--arch/x86/include/asm/debugreg.h67
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/fpu-internal.h520
-rw-r--r--arch/x86/include/asm/highmem.h2
-rw-r--r--arch/x86/include/asm/i387.h590
-rw-r--r--arch/x86/include/asm/irq_controller.h12
-rw-r--r--arch/x86/include/asm/kgdb.h10
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mrst.h4
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/processor.h64
-rw-r--r--arch/x86/include/asm/prom.h10
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/include/asm/spinlock_types.h1
-rw-r--r--arch/x86/include/asm/xen/interface.h1
20 files changed, 722 insertions, 743 deletions
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 37ad100a2210..49331bedc158 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -145,6 +145,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
145 */ 145 */
146#define ASM_OUTPUT2(a...) a 146#define ASM_OUTPUT2(a...) a
147 147
148/*
149 * use this macro if you need clobbers but no inputs in
150 * alternative_{input,io,call}()
151 */
152#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
153
148struct paravirt_patch_site; 154struct paravirt_patch_site;
149#ifdef CONFIG_PARAVIRT 155#ifdef CONFIG_PARAVIRT
150void apply_paravirt(struct paravirt_patch_site *start, 156void apply_paravirt(struct paravirt_patch_site *start,
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3ab9bdd87e79..a9371c91718c 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -288,6 +288,7 @@ struct apic {
288 288
289 int (*probe)(void); 289 int (*probe)(void);
290 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); 290 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
291 int (*apic_id_valid)(int apicid);
291 int (*apic_id_registered)(void); 292 int (*apic_id_registered)(void);
292 293
293 u32 irq_delivery_mode; 294 u32 irq_delivery_mode;
@@ -532,6 +533,11 @@ static inline unsigned int read_apic_id(void)
532 return apic->get_apic_id(reg); 533 return apic->get_apic_id(reg);
533} 534}
534 535
536static inline int default_apic_id_valid(int apicid)
537{
538 return x2apic_mode || (apicid < 255);
539}
540
535extern void default_setup_apic_routing(void); 541extern void default_setup_apic_routing(void);
536 542
537extern struct apic apic_noop; 543extern struct apic apic_noop;
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index fa13f0ec2874..198119910da5 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,13 +14,52 @@ typedef struct {
14 14
15#define ATOMIC64_INIT(val) { (val) } 15#define ATOMIC64_INIT(val) { (val) }
16 16
17#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
18#ifndef ATOMIC64_EXPORT
19#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
20#else
21#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
22 ATOMIC64_EXPORT(atomic64_##sym)
23#endif
24
17#ifdef CONFIG_X86_CMPXCHG64 25#ifdef CONFIG_X86_CMPXCHG64
18#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8" 26#define __alternative_atomic64(f, g, out, in...) \
27 asm volatile("call %P[func]" \
28 : out : [func] "i" (atomic64_##g##_cx8), ## in)
29
30#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
19#else 31#else
20#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8) 32#define __alternative_atomic64(f, g, out, in...) \
33 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
34 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
35
36#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
37 ATOMIC64_DECL_ONE(sym##_386)
38
39ATOMIC64_DECL_ONE(add_386);
40ATOMIC64_DECL_ONE(sub_386);
41ATOMIC64_DECL_ONE(inc_386);
42ATOMIC64_DECL_ONE(dec_386);
21#endif 43#endif
22 44
23#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f) 45#define alternative_atomic64(f, out, in...) \
46 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
47
48ATOMIC64_DECL(read);
49ATOMIC64_DECL(set);
50ATOMIC64_DECL(xchg);
51ATOMIC64_DECL(add_return);
52ATOMIC64_DECL(sub_return);
53ATOMIC64_DECL(inc_return);
54ATOMIC64_DECL(dec_return);
55ATOMIC64_DECL(dec_if_positive);
56ATOMIC64_DECL(inc_not_zero);
57ATOMIC64_DECL(add_unless);
58
59#undef ATOMIC64_DECL
60#undef ATOMIC64_DECL_ONE
61#undef __ATOMIC64_DECL
62#undef ATOMIC64_EXPORT
24 63
25/** 64/**
26 * atomic64_cmpxchg - cmpxchg atomic64 variable 65 * atomic64_cmpxchg - cmpxchg atomic64 variable
@@ -50,11 +89,9 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
50 long long o; 89 long long o;
51 unsigned high = (unsigned)(n >> 32); 90 unsigned high = (unsigned)(n >> 32);
52 unsigned low = (unsigned)n; 91 unsigned low = (unsigned)n;
53 asm volatile(ATOMIC64_ALTERNATIVE(xchg) 92 alternative_atomic64(xchg, "=&A" (o),
54 : "=A" (o), "+b" (low), "+c" (high) 93 "S" (v), "b" (low), "c" (high)
55 : "S" (v) 94 : "memory");
56 : "memory"
57 );
58 return o; 95 return o;
59} 96}
60 97
@@ -69,11 +106,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
69{ 106{
70 unsigned high = (unsigned)(i >> 32); 107 unsigned high = (unsigned)(i >> 32);
71 unsigned low = (unsigned)i; 108 unsigned low = (unsigned)i;
72 asm volatile(ATOMIC64_ALTERNATIVE(set) 109 alternative_atomic64(set, /* no output */,
73 : "+b" (low), "+c" (high) 110 "S" (v), "b" (low), "c" (high)
74 : "S" (v) 111 : "eax", "edx", "memory");
75 : "eax", "edx", "memory"
76 );
77} 112}
78 113
79/** 114/**
@@ -85,10 +120,7 @@ static inline void atomic64_set(atomic64_t *v, long long i)
85static inline long long atomic64_read(const atomic64_t *v) 120static inline long long atomic64_read(const atomic64_t *v)
86{ 121{
87 long long r; 122 long long r;
88 asm volatile(ATOMIC64_ALTERNATIVE(read) 123 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
89 : "=A" (r), "+c" (v)
90 : : "memory"
91 );
92 return r; 124 return r;
93 } 125 }
94 126
@@ -101,10 +133,9 @@ static inline long long atomic64_read(const atomic64_t *v)
101 */ 133 */
102static inline long long atomic64_add_return(long long i, atomic64_t *v) 134static inline long long atomic64_add_return(long long i, atomic64_t *v)
103{ 135{
104 asm volatile(ATOMIC64_ALTERNATIVE(add_return) 136 alternative_atomic64(add_return,
105 : "+A" (i), "+c" (v) 137 ASM_OUTPUT2("+A" (i), "+c" (v)),
106 : : "memory" 138 ASM_NO_INPUT_CLOBBER("memory"));
107 );
108 return i; 139 return i;
109} 140}
110 141
@@ -113,32 +144,25 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
113 */ 144 */
114static inline long long atomic64_sub_return(long long i, atomic64_t *v) 145static inline long long atomic64_sub_return(long long i, atomic64_t *v)
115{ 146{
116 asm volatile(ATOMIC64_ALTERNATIVE(sub_return) 147 alternative_atomic64(sub_return,
117 : "+A" (i), "+c" (v) 148 ASM_OUTPUT2("+A" (i), "+c" (v)),
118 : : "memory" 149 ASM_NO_INPUT_CLOBBER("memory"));
119 );
120 return i; 150 return i;
121} 151}
122 152
123static inline long long atomic64_inc_return(atomic64_t *v) 153static inline long long atomic64_inc_return(atomic64_t *v)
124{ 154{
125 long long a; 155 long long a;
126 asm volatile(ATOMIC64_ALTERNATIVE(inc_return) 156 alternative_atomic64(inc_return, "=&A" (a),
127 : "=A" (a) 157 "S" (v) : "memory", "ecx");
128 : "S" (v)
129 : "memory", "ecx"
130 );
131 return a; 158 return a;
132} 159}
133 160
134static inline long long atomic64_dec_return(atomic64_t *v) 161static inline long long atomic64_dec_return(atomic64_t *v)
135{ 162{
136 long long a; 163 long long a;
137 asm volatile(ATOMIC64_ALTERNATIVE(dec_return) 164 alternative_atomic64(dec_return, "=&A" (a),
138 : "=A" (a) 165 "S" (v) : "memory", "ecx");
139 : "S" (v)
140 : "memory", "ecx"
141 );
142 return a; 166 return a;
143} 167}
144 168
@@ -151,10 +175,9 @@ static inline long long atomic64_dec_return(atomic64_t *v)
151 */ 175 */
152static inline long long atomic64_add(long long i, atomic64_t *v) 176static inline long long atomic64_add(long long i, atomic64_t *v)
153{ 177{
154 asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return) 178 __alternative_atomic64(add, add_return,
155 : "+A" (i), "+c" (v) 179 ASM_OUTPUT2("+A" (i), "+c" (v)),
156 : : "memory" 180 ASM_NO_INPUT_CLOBBER("memory"));
157 );
158 return i; 181 return i;
159} 182}
160 183
@@ -167,10 +190,9 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
167 */ 190 */
168static inline long long atomic64_sub(long long i, atomic64_t *v) 191static inline long long atomic64_sub(long long i, atomic64_t *v)
169{ 192{
170 asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return) 193 __alternative_atomic64(sub, sub_return,
171 : "+A" (i), "+c" (v) 194 ASM_OUTPUT2("+A" (i), "+c" (v)),
172 : : "memory" 195 ASM_NO_INPUT_CLOBBER("memory"));
173 );
174 return i; 196 return i;
175} 197}
176 198
@@ -196,10 +218,8 @@ static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
196 */ 218 */
197static inline void atomic64_inc(atomic64_t *v) 219static inline void atomic64_inc(atomic64_t *v)
198{ 220{
199 asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return) 221 __alternative_atomic64(inc, inc_return, /* no output */,
200 : : "S" (v) 222 "S" (v) : "memory", "eax", "ecx", "edx");
201 : "memory", "eax", "ecx", "edx"
202 );
203} 223}
204 224
205/** 225/**
@@ -210,10 +230,8 @@ static inline void atomic64_inc(atomic64_t *v)
210 */ 230 */
211static inline void atomic64_dec(atomic64_t *v) 231static inline void atomic64_dec(atomic64_t *v)
212{ 232{
213 asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return) 233 __alternative_atomic64(dec, dec_return, /* no output */,
214 : : "S" (v) 234 "S" (v) : "memory", "eax", "ecx", "edx");
215 : "memory", "eax", "ecx", "edx"
216 );
217} 235}
218 236
219/** 237/**
@@ -263,15 +281,15 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
263 * @u: ...unless v is equal to u. 281 * @u: ...unless v is equal to u.
264 * 282 *
265 * Atomically adds @a to @v, so long as it was not @u. 283 * Atomically adds @a to @v, so long as it was not @u.
266 * Returns the old value of @v. 284 * Returns non-zero if the add was done, zero otherwise.
267 */ 285 */
268static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 286static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
269{ 287{
270 unsigned low = (unsigned)u; 288 unsigned low = (unsigned)u;
271 unsigned high = (unsigned)(u >> 32); 289 unsigned high = (unsigned)(u >> 32);
272 asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t" 290 alternative_atomic64(add_unless,
273 : "+A" (a), "+c" (v), "+S" (low), "+D" (high) 291 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
274 : : "memory"); 292 "S" (v) : "memory");
275 return (int)a; 293 return (int)a;
276} 294}
277 295
@@ -279,26 +297,20 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
279static inline int atomic64_inc_not_zero(atomic64_t *v) 297static inline int atomic64_inc_not_zero(atomic64_t *v)
280{ 298{
281 int r; 299 int r;
282 asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero) 300 alternative_atomic64(inc_not_zero, "=&a" (r),
283 : "=a" (r) 301 "S" (v) : "ecx", "edx", "memory");
284 : "S" (v)
285 : "ecx", "edx", "memory"
286 );
287 return r; 302 return r;
288} 303}
289 304
290static inline long long atomic64_dec_if_positive(atomic64_t *v) 305static inline long long atomic64_dec_if_positive(atomic64_t *v)
291{ 306{
292 long long r; 307 long long r;
293 asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive) 308 alternative_atomic64(dec_if_positive, "=&A" (r),
294 : "=A" (r) 309 "S" (v) : "ecx", "memory");
295 : "S" (v)
296 : "ecx", "memory"
297 );
298 return r; 310 return r;
299} 311}
300 312
301#undef ATOMIC64_ALTERNATIVE 313#undef alternative_atomic64
302#undef ATOMIC64_ALTERNATIVE_ 314#undef __alternative_atomic64
303 315
304#endif /* _ASM_X86_ATOMIC64_32_H */ 316#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
new file mode 100644
index 000000000000..ff501e511d91
--- /dev/null
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -0,0 +1,13 @@
1#ifndef _CPU_DEVICE_ID
2#define _CPU_DEVICE_ID 1
3
4/*
5 * Declare drivers belonging to specific x86 CPUs
6 * Similar in spirit to pci_device_id and related PCI functions
7 */
8
9#include <linux/mod_devicetable.h>
10
11extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
12
13#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 8d67d428b0f9..340ee49961a6 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -177,6 +177,7 @@
177#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ 177#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
178#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 178#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
179#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ 179#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
180#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
180 181
181/* Virtualization flags: Linux defined, word 8 */ 182/* Virtualization flags: Linux defined, word 8 */
182#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 183#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -199,10 +200,13 @@
199/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 200/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
200#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 201#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
201#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ 202#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
203#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
202#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ 204#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
203#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ 205#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
204#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ 206#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
205#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 207#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
208#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
209#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
206 210
207#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 211#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
208 212
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b903d5ea3941..2d91580bf228 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -78,8 +78,75 @@
78 */ 78 */
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80 80
81#include <linux/bug.h>
82
81DECLARE_PER_CPU(unsigned long, cpu_dr7); 83DECLARE_PER_CPU(unsigned long, cpu_dr7);
82 84
85#ifndef CONFIG_PARAVIRT
86/*
87 * These special macros can be used to get or set a debugging register
88 */
89#define get_debugreg(var, register) \
90 (var) = native_get_debugreg(register)
91#define set_debugreg(value, register) \
92 native_set_debugreg(register, value)
93#endif
94
95static inline unsigned long native_get_debugreg(int regno)
96{
97 unsigned long val = 0; /* Damn you, gcc! */
98
99 switch (regno) {
100 case 0:
101 asm("mov %%db0, %0" :"=r" (val));
102 break;
103 case 1:
104 asm("mov %%db1, %0" :"=r" (val));
105 break;
106 case 2:
107 asm("mov %%db2, %0" :"=r" (val));
108 break;
109 case 3:
110 asm("mov %%db3, %0" :"=r" (val));
111 break;
112 case 6:
113 asm("mov %%db6, %0" :"=r" (val));
114 break;
115 case 7:
116 asm("mov %%db7, %0" :"=r" (val));
117 break;
118 default:
119 BUG();
120 }
121 return val;
122}
123
124static inline void native_set_debugreg(int regno, unsigned long value)
125{
126 switch (regno) {
127 case 0:
128 asm("mov %0, %%db0" ::"r" (value));
129 break;
130 case 1:
131 asm("mov %0, %%db1" ::"r" (value));
132 break;
133 case 2:
134 asm("mov %0, %%db2" ::"r" (value));
135 break;
136 case 3:
137 asm("mov %0, %%db3" ::"r" (value));
138 break;
139 case 6:
140 asm("mov %0, %%db6" ::"r" (value));
141 break;
142 case 7:
143 asm("mov %0, %%db7" ::"r" (value));
144 break;
145 default:
146 BUG();
147 }
148}
149
83static inline void hw_breakpoint_disable(void) 150static inline void hw_breakpoint_disable(void)
84{ 151{
85 /* Zero the control register for HW Breakpoint */ 152 /* Zero the control register for HW Breakpoint */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 844f735fd63a..c9dcc181d4d1 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -95,7 +95,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
95 95
96extern int add_efi_memmap; 96extern int add_efi_memmap;
97extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 97extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern void efi_memblock_x86_reserve_range(void); 98extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 99extern void efi_call_phys_prelog(void);
100extern void efi_call_phys_epilog(void); 100extern void efi_call_phys_epilog(void);
101 101
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
new file mode 100644
index 000000000000..4fa88154e4de
--- /dev/null
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -0,0 +1,520 @@
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10#ifndef _FPU_INTERNAL_H
11#define _FPU_INTERNAL_H
12
13#include <linux/kernel_stat.h>
14#include <linux/regset.h>
15#include <linux/slab.h>
16#include <asm/asm.h>
17#include <asm/cpufeature.h>
18#include <asm/processor.h>
19#include <asm/sigcontext.h>
20#include <asm/user.h>
21#include <asm/uaccess.h>
22#include <asm/xsave.h>
23
24extern unsigned int sig_xstate_size;
25extern void fpu_init(void);
26
27DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
28
29extern user_regset_active_fn fpregs_active, xfpregs_active;
30extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
31 xstateregs_get;
32extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
33 xstateregs_set;
34
35
36/*
37 * xstateregs_active == fpregs_active. Please refer to the comment
38 * at the definition of fpregs_active.
39 */
40#define xstateregs_active fpregs_active
41
42extern struct _fpx_sw_bytes fx_sw_reserved;
43#ifdef CONFIG_IA32_EMULATION
44extern unsigned int sig_xstate_ia32_size;
45extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46struct _fpstate_ia32;
47struct _xstate_ia32;
48extern int save_i387_xstate_ia32(void __user *buf);
49extern int restore_i387_xstate_ia32(void __user *buf);
50#endif
51
52#ifdef CONFIG_MATH_EMULATION
53extern void finit_soft_fpu(struct i387_soft_struct *soft);
54#else
55static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56#endif
57
58#define X87_FSW_ES (1 << 7) /* Exception Summary */
59
60static __always_inline __pure bool use_xsaveopt(void)
61{
62 return static_cpu_has(X86_FEATURE_XSAVEOPT);
63}
64
65static __always_inline __pure bool use_xsave(void)
66{
67 return static_cpu_has(X86_FEATURE_XSAVE);
68}
69
70static __always_inline __pure bool use_fxsr(void)
71{
72 return static_cpu_has(X86_FEATURE_FXSR);
73}
74
75extern void __sanitize_i387_state(struct task_struct *);
76
77static inline void sanitize_i387_state(struct task_struct *tsk)
78{
79 if (!use_xsaveopt())
80 return;
81 __sanitize_i387_state(tsk);
82}
83
84#ifdef CONFIG_X86_64
85static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
86{
87 int err;
88
89 /* See comment in fxsave() below. */
90#ifdef CONFIG_AS_FXSAVEQ
91 asm volatile("1: fxrstorq %[fx]\n\t"
92 "2:\n"
93 ".section .fixup,\"ax\"\n"
94 "3: movl $-1,%[err]\n"
95 " jmp 2b\n"
96 ".previous\n"
97 _ASM_EXTABLE(1b, 3b)
98 : [err] "=r" (err)
99 : [fx] "m" (*fx), "0" (0));
100#else
101 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
102 "2:\n"
103 ".section .fixup,\"ax\"\n"
104 "3: movl $-1,%[err]\n"
105 " jmp 2b\n"
106 ".previous\n"
107 _ASM_EXTABLE(1b, 3b)
108 : [err] "=r" (err)
109 : [fx] "R" (fx), "m" (*fx), "0" (0));
110#endif
111 return err;
112}
113
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{
116 int err;
117
118 /*
119 * Clear the bytes not touched by the fxsave and reserved
120 * for the SW usage.
121 */
122 err = __clear_user(&fx->sw_reserved,
123 sizeof(struct _fpx_sw_bytes));
124 if (unlikely(err))
125 return -EFAULT;
126
127 /* See comment in fxsave() below. */
128#ifdef CONFIG_AS_FXSAVEQ
129 asm volatile("1: fxsaveq %[fx]\n\t"
130 "2:\n"
131 ".section .fixup,\"ax\"\n"
132 "3: movl $-1,%[err]\n"
133 " jmp 2b\n"
134 ".previous\n"
135 _ASM_EXTABLE(1b, 3b)
136 : [err] "=r" (err), [fx] "=m" (*fx)
137 : "0" (0));
138#else
139 asm volatile("1: rex64/fxsave (%[fx])\n\t"
140 "2:\n"
141 ".section .fixup,\"ax\"\n"
142 "3: movl $-1,%[err]\n"
143 " jmp 2b\n"
144 ".previous\n"
145 _ASM_EXTABLE(1b, 3b)
146 : [err] "=r" (err), "=m" (*fx)
147 : [fx] "R" (fx), "0" (0));
148#endif
149 if (unlikely(err) &&
150 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
151 err = -EFAULT;
152 /* No need to clear here because the caller clears USED_MATH */
153 return err;
154}
155
156static inline void fpu_fxsave(struct fpu *fpu)
157{
158 /* Using "rex64; fxsave %0" is broken because, if the memory operand
159 uses any extended registers for addressing, a second REX prefix
160 will be generated (to the assembler, rex64 followed by semicolon
161 is a separate instruction), and hence the 64-bitness is lost. */
162
163#ifdef CONFIG_AS_FXSAVEQ
164 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
165 starting with gas 2.16. */
166 __asm__ __volatile__("fxsaveq %0"
167 : "=m" (fpu->state->fxsave));
168#else
169 /* Using, as a workaround, the properly prefixed form below isn't
170 accepted by any binutils version so far released, complaining that
171 the same type of prefix is used twice if an extended register is
172 needed for addressing (fix submitted to mainline 2005-11-21).
173 asm volatile("rex64/fxsave %0"
174 : "=m" (fpu->state->fxsave));
175 This, however, we can work around by forcing the compiler to select
176 an addressing mode that doesn't require extended registers. */
177 asm volatile("rex64/fxsave (%[fx])"
178 : "=m" (fpu->state->fxsave)
179 : [fx] "R" (&fpu->state->fxsave));
180#endif
181}
182
183#else /* CONFIG_X86_32 */
184
185/* perform fxrstor iff the processor has extended states, otherwise frstor */
186static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
187{
188 /*
189 * The "nop" is needed to make the instructions the same
190 * length.
191 */
192 alternative_input(
193 "nop ; frstor %1",
194 "fxrstor %1",
195 X86_FEATURE_FXSR,
196 "m" (*fx));
197
198 return 0;
199}
200
201static inline void fpu_fxsave(struct fpu *fpu)
202{
203 asm volatile("fxsave %[fx]"
204 : [fx] "=m" (fpu->state->fxsave));
205}
206
207#endif /* CONFIG_X86_64 */
208
209/*
210 * These must be called with preempt disabled. Returns
211 * 'true' if the FPU state is still intact.
212 */
213static inline int fpu_save_init(struct fpu *fpu)
214{
215 if (use_xsave()) {
216 fpu_xsave(fpu);
217
218 /*
219 * xsave header may indicate the init state of the FP.
220 */
221 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
222 return 1;
223 } else if (use_fxsr()) {
224 fpu_fxsave(fpu);
225 } else {
226 asm volatile("fnsave %[fx]; fwait"
227 : [fx] "=m" (fpu->state->fsave));
228 return 0;
229 }
230
231 /*
232 * If exceptions are pending, we need to clear them so
233 * that we don't randomly get exceptions later.
234 *
235 * FIXME! Is this perhaps only true for the old-style
236 * irq13 case? Maybe we could leave the x87 state
237 * intact otherwise?
238 */
239 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
240 asm volatile("fnclex");
241 return 0;
242 }
243 return 1;
244}
245
246static inline int __save_init_fpu(struct task_struct *tsk)
247{
248 return fpu_save_init(&tsk->thread.fpu);
249}
250
251static inline int fpu_fxrstor_checking(struct fpu *fpu)
252{
253 return fxrstor_checking(&fpu->state->fxsave);
254}
255
256static inline int fpu_restore_checking(struct fpu *fpu)
257{
258 if (use_xsave())
259 return fpu_xrstor_checking(fpu);
260 else
261 return fpu_fxrstor_checking(fpu);
262}
263
264static inline int restore_fpu_checking(struct task_struct *tsk)
265{
266 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
267 is pending. Clear the x87 state here by setting it to fixed
268 values. "m" is a random variable that should be in L1 */
269 alternative_input(
270 ASM_NOP8 ASM_NOP2,
271 "emms\n\t" /* clear stack tags */
272 "fildl %P[addr]", /* set F?P to defined value */
273 X86_FEATURE_FXSAVE_LEAK,
274 [addr] "m" (tsk->thread.fpu.has_fpu));
275
276 return fpu_restore_checking(&tsk->thread.fpu);
277}
278
279/*
280 * Software FPU state helpers. Careful: these need to
281 * be preemption protection *and* they need to be
282 * properly paired with the CR0.TS changes!
283 */
284static inline int __thread_has_fpu(struct task_struct *tsk)
285{
286 return tsk->thread.fpu.has_fpu;
287}
288
289/* Must be paired with an 'stts' after! */
290static inline void __thread_clear_has_fpu(struct task_struct *tsk)
291{
292 tsk->thread.fpu.has_fpu = 0;
293 percpu_write(fpu_owner_task, NULL);
294}
295
296/* Must be paired with a 'clts' before! */
297static inline void __thread_set_has_fpu(struct task_struct *tsk)
298{
299 tsk->thread.fpu.has_fpu = 1;
300 percpu_write(fpu_owner_task, tsk);
301}
302
303/*
304 * Encapsulate the CR0.TS handling together with the
305 * software flag.
306 *
307 * These generally need preemption protection to work,
308 * do try to avoid using these on their own.
309 */
310static inline void __thread_fpu_end(struct task_struct *tsk)
311{
312 __thread_clear_has_fpu(tsk);
313 stts();
314}
315
316static inline void __thread_fpu_begin(struct task_struct *tsk)
317{
318 clts();
319 __thread_set_has_fpu(tsk);
320}
321
322/*
323 * FPU state switching for scheduling.
324 *
325 * This is a two-stage process:
326 *
327 * - switch_fpu_prepare() saves the old state and
328 * sets the new state of the CR0.TS bit. This is
329 * done within the context of the old process.
330 *
331 * - switch_fpu_finish() restores the new state as
332 * necessary.
333 */
334typedef struct { int preload; } fpu_switch_t;
335
336/*
337 * FIXME! We could do a totally lazy restore, but we need to
338 * add a per-cpu "this was the task that last touched the FPU
339 * on this CPU" variable, and the task needs to have a "I last
340 * touched the FPU on this CPU" and check them.
341 *
342 * We don't do that yet, so "fpu_lazy_restore()" always returns
343 * false, but some day..
344 */
345static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
346{
347 return new == percpu_read_stable(fpu_owner_task) &&
348 cpu == new->thread.fpu.last_cpu;
349}
350
351static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
352{
353 fpu_switch_t fpu;
354
355 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
356 if (__thread_has_fpu(old)) {
357 if (!__save_init_fpu(old))
358 cpu = ~0;
359 old->thread.fpu.last_cpu = cpu;
360 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
361
362 /* Don't change CR0.TS if we just switch! */
363 if (fpu.preload) {
364 new->fpu_counter++;
365 __thread_set_has_fpu(new);
366 prefetch(new->thread.fpu.state);
367 } else
368 stts();
369 } else {
370 old->fpu_counter = 0;
371 old->thread.fpu.last_cpu = ~0;
372 if (fpu.preload) {
373 new->fpu_counter++;
374 if (fpu_lazy_restore(new, cpu))
375 fpu.preload = 0;
376 else
377 prefetch(new->thread.fpu.state);
378 __thread_fpu_begin(new);
379 }
380 }
381 return fpu;
382}
383
384/*
385 * By the time this gets called, we've already cleared CR0.TS and
386 * given the process the FPU if we are going to preload the FPU
387 * state - all we need to do is to conditionally restore the register
388 * state itself.
389 */
390static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
391{
392 if (fpu.preload) {
393 if (unlikely(restore_fpu_checking(new)))
394 __thread_fpu_end(new);
395 }
396}
397
398/*
399 * Signal frame handlers...
400 */
401extern int save_i387_xstate(void __user *buf);
402extern int restore_i387_xstate(void __user *buf);
403
404static inline void __clear_fpu(struct task_struct *tsk)
405{
406 if (__thread_has_fpu(tsk)) {
407 /* Ignore delayed exceptions from user space */
408 asm volatile("1: fwait\n"
409 "2:\n"
410 _ASM_EXTABLE(1b, 2b));
411 __thread_fpu_end(tsk);
412 }
413}
414
415/*
416 * The actual user_fpu_begin/end() functions
417 * need to be preemption-safe.
418 *
419 * NOTE! user_fpu_end() must be used only after you
420 * have saved the FP state, and user_fpu_begin() must
421 * be used only immediately before restoring it.
422 * These functions do not do any save/restore on
423 * their own.
424 */
425static inline void user_fpu_end(void)
426{
427 preempt_disable();
428 __thread_fpu_end(current);
429 preempt_enable();
430}
431
432static inline void user_fpu_begin(void)
433{
434 preempt_disable();
435 if (!user_has_fpu())
436 __thread_fpu_begin(current);
437 preempt_enable();
438}
439
440/*
441 * These disable preemption on their own and are safe
442 */
443static inline void save_init_fpu(struct task_struct *tsk)
444{
445 WARN_ON_ONCE(!__thread_has_fpu(tsk));
446 preempt_disable();
447 __save_init_fpu(tsk);
448 __thread_fpu_end(tsk);
449 preempt_enable();
450}
451
452static inline void clear_fpu(struct task_struct *tsk)
453{
454 preempt_disable();
455 __clear_fpu(tsk);
456 preempt_enable();
457}
458
459/*
460 * i387 state interaction
461 */
462static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
463{
464 if (cpu_has_fxsr) {
465 return tsk->thread.fpu.state->fxsave.cwd;
466 } else {
467 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
468 }
469}
470
471static inline unsigned short get_fpu_swd(struct task_struct *tsk)
472{
473 if (cpu_has_fxsr) {
474 return tsk->thread.fpu.state->fxsave.swd;
475 } else {
476 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
477 }
478}
479
480static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
481{
482 if (cpu_has_xmm) {
483 return tsk->thread.fpu.state->fxsave.mxcsr;
484 } else {
485 return MXCSR_DEFAULT;
486 }
487}
488
489static bool fpu_allocated(struct fpu *fpu)
490{
491 return fpu->state != NULL;
492}
493
494static inline int fpu_alloc(struct fpu *fpu)
495{
496 if (fpu_allocated(fpu))
497 return 0;
498 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
499 if (!fpu->state)
500 return -ENOMEM;
501 WARN_ON((unsigned long)fpu->state & 15);
502 return 0;
503}
504
505static inline void fpu_free(struct fpu *fpu)
506{
507 if (fpu->state) {
508 kmem_cache_free(task_xstate_cachep, fpu->state);
509 fpu->state = NULL;
510 }
511}
512
513static inline void fpu_copy(struct fpu *dst, struct fpu *src)
514{
515 memcpy(dst->state, src->state, xstate_size);
516}
517
518extern void fpu_finit(struct fpu *fpu);
519
520#endif
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 3bd04022fd0c..302a323b3f67 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -61,7 +61,7 @@ void *kmap(struct page *page);
61void kunmap(struct page *page); 61void kunmap(struct page *page);
62 62
63void *kmap_atomic_prot(struct page *page, pgprot_t prot); 63void *kmap_atomic_prot(struct page *page, pgprot_t prot);
64void *__kmap_atomic(struct page *page); 64void *kmap_atomic(struct page *page);
65void __kunmap_atomic(void *kvaddr); 65void __kunmap_atomic(void *kvaddr);
66void *kmap_atomic_pfn(unsigned long pfn); 66void *kmap_atomic_pfn(unsigned long pfn);
67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 247904945d3f..7ce0798b1b26 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -13,476 +13,19 @@
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <linux/regset.h>
18#include <linux/hardirq.h> 16#include <linux/hardirq.h>
19#include <linux/slab.h> 17#include <asm/system.h>
20#include <asm/asm.h> 18
21#include <asm/cpufeature.h> 19struct pt_regs;
22#include <asm/processor.h> 20struct user_i387_struct;
23#include <asm/sigcontext.h>
24#include <asm/user.h>
25#include <asm/uaccess.h>
26#include <asm/xsave.h>
27 21
28extern unsigned int sig_xstate_size;
29extern void fpu_init(void);
30extern void mxcsr_feature_mask_init(void);
31extern int init_fpu(struct task_struct *child); 22extern int init_fpu(struct task_struct *child);
32extern void math_state_restore(void);
33extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 23extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
24extern void math_state_restore(void);
34 25
35DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); 26extern bool irq_fpu_usable(void);
36 27extern void kernel_fpu_begin(void);
37extern user_regset_active_fn fpregs_active, xfpregs_active; 28extern void kernel_fpu_end(void);
38extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
39 xstateregs_get;
40extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
41 xstateregs_set;
42
43/*
44 * xstateregs_active == fpregs_active. Please refer to the comment
45 * at the definition of fpregs_active.
46 */
47#define xstateregs_active fpregs_active
48
49extern struct _fpx_sw_bytes fx_sw_reserved;
50#ifdef CONFIG_IA32_EMULATION
51extern unsigned int sig_xstate_ia32_size;
52extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
53struct _fpstate_ia32;
54struct _xstate_ia32;
55extern int save_i387_xstate_ia32(void __user *buf);
56extern int restore_i387_xstate_ia32(void __user *buf);
57#endif
58
59#ifdef CONFIG_MATH_EMULATION
60extern void finit_soft_fpu(struct i387_soft_struct *soft);
61#else
62static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
63#endif
64
65#define X87_FSW_ES (1 << 7) /* Exception Summary */
66
67static __always_inline __pure bool use_xsaveopt(void)
68{
69 return static_cpu_has(X86_FEATURE_XSAVEOPT);
70}
71
72static __always_inline __pure bool use_xsave(void)
73{
74 return static_cpu_has(X86_FEATURE_XSAVE);
75}
76
77static __always_inline __pure bool use_fxsr(void)
78{
79 return static_cpu_has(X86_FEATURE_FXSR);
80}
81
82extern void __sanitize_i387_state(struct task_struct *);
83
84static inline void sanitize_i387_state(struct task_struct *tsk)
85{
86 if (!use_xsaveopt())
87 return;
88 __sanitize_i387_state(tsk);
89}
90
91#ifdef CONFIG_X86_64
92static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
93{
94 int err;
95
96 /* See comment in fxsave() below. */
97#ifdef CONFIG_AS_FXSAVEQ
98 asm volatile("1: fxrstorq %[fx]\n\t"
99 "2:\n"
100 ".section .fixup,\"ax\"\n"
101 "3: movl $-1,%[err]\n"
102 " jmp 2b\n"
103 ".previous\n"
104 _ASM_EXTABLE(1b, 3b)
105 : [err] "=r" (err)
106 : [fx] "m" (*fx), "0" (0));
107#else
108 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
109 "2:\n"
110 ".section .fixup,\"ax\"\n"
111 "3: movl $-1,%[err]\n"
112 " jmp 2b\n"
113 ".previous\n"
114 _ASM_EXTABLE(1b, 3b)
115 : [err] "=r" (err)
116 : [fx] "R" (fx), "m" (*fx), "0" (0));
117#endif
118 return err;
119}
120
121static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
122{
123 int err;
124
125 /*
126 * Clear the bytes not touched by the fxsave and reserved
127 * for the SW usage.
128 */
129 err = __clear_user(&fx->sw_reserved,
130 sizeof(struct _fpx_sw_bytes));
131 if (unlikely(err))
132 return -EFAULT;
133
134 /* See comment in fxsave() below. */
135#ifdef CONFIG_AS_FXSAVEQ
136 asm volatile("1: fxsaveq %[fx]\n\t"
137 "2:\n"
138 ".section .fixup,\"ax\"\n"
139 "3: movl $-1,%[err]\n"
140 " jmp 2b\n"
141 ".previous\n"
142 _ASM_EXTABLE(1b, 3b)
143 : [err] "=r" (err), [fx] "=m" (*fx)
144 : "0" (0));
145#else
146 asm volatile("1: rex64/fxsave (%[fx])\n\t"
147 "2:\n"
148 ".section .fixup,\"ax\"\n"
149 "3: movl $-1,%[err]\n"
150 " jmp 2b\n"
151 ".previous\n"
152 _ASM_EXTABLE(1b, 3b)
153 : [err] "=r" (err), "=m" (*fx)
154 : [fx] "R" (fx), "0" (0));
155#endif
156 if (unlikely(err) &&
157 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
158 err = -EFAULT;
159 /* No need to clear here because the caller clears USED_MATH */
160 return err;
161}
162
163static inline void fpu_fxsave(struct fpu *fpu)
164{
165 /* Using "rex64; fxsave %0" is broken because, if the memory operand
166 uses any extended registers for addressing, a second REX prefix
167 will be generated (to the assembler, rex64 followed by semicolon
168 is a separate instruction), and hence the 64-bitness is lost. */
169
170#ifdef CONFIG_AS_FXSAVEQ
171 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
172 starting with gas 2.16. */
173 __asm__ __volatile__("fxsaveq %0"
174 : "=m" (fpu->state->fxsave));
175#else
176 /* Using, as a workaround, the properly prefixed form below isn't
177 accepted by any binutils version so far released, complaining that
178 the same type of prefix is used twice if an extended register is
179 needed for addressing (fix submitted to mainline 2005-11-21).
180 asm volatile("rex64/fxsave %0"
181 : "=m" (fpu->state->fxsave));
182 This, however, we can work around by forcing the compiler to select
183 an addressing mode that doesn't require extended registers. */
184 asm volatile("rex64/fxsave (%[fx])"
185 : "=m" (fpu->state->fxsave)
186 : [fx] "R" (&fpu->state->fxsave));
187#endif
188}
189
190#else /* CONFIG_X86_32 */
191
192/* perform fxrstor iff the processor has extended states, otherwise frstor */
193static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
194{
195 /*
196 * The "nop" is needed to make the instructions the same
197 * length.
198 */
199 alternative_input(
200 "nop ; frstor %1",
201 "fxrstor %1",
202 X86_FEATURE_FXSR,
203 "m" (*fx));
204
205 return 0;
206}
207
208static inline void fpu_fxsave(struct fpu *fpu)
209{
210 asm volatile("fxsave %[fx]"
211 : [fx] "=m" (fpu->state->fxsave));
212}
213
214#endif /* CONFIG_X86_64 */
215
216/*
217 * These must be called with preempt disabled. Returns
218 * 'true' if the FPU state is still intact.
219 */
220static inline int fpu_save_init(struct fpu *fpu)
221{
222 if (use_xsave()) {
223 fpu_xsave(fpu);
224
225 /*
226 * xsave header may indicate the init state of the FP.
227 */
228 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
229 return 1;
230 } else if (use_fxsr()) {
231 fpu_fxsave(fpu);
232 } else {
233 asm volatile("fnsave %[fx]; fwait"
234 : [fx] "=m" (fpu->state->fsave));
235 return 0;
236 }
237
238 /*
239 * If exceptions are pending, we need to clear them so
240 * that we don't randomly get exceptions later.
241 *
242 * FIXME! Is this perhaps only true for the old-style
243 * irq13 case? Maybe we could leave the x87 state
244 * intact otherwise?
245 */
246 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
247 asm volatile("fnclex");
248 return 0;
249 }
250 return 1;
251}
252
253static inline int __save_init_fpu(struct task_struct *tsk)
254{
255 return fpu_save_init(&tsk->thread.fpu);
256}
257
258static inline int fpu_fxrstor_checking(struct fpu *fpu)
259{
260 return fxrstor_checking(&fpu->state->fxsave);
261}
262
263static inline int fpu_restore_checking(struct fpu *fpu)
264{
265 if (use_xsave())
266 return fpu_xrstor_checking(fpu);
267 else
268 return fpu_fxrstor_checking(fpu);
269}
270
271static inline int restore_fpu_checking(struct task_struct *tsk)
272{
273 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
274 is pending. Clear the x87 state here by setting it to fixed
275 values. "m" is a random variable that should be in L1 */
276 alternative_input(
277 ASM_NOP8 ASM_NOP2,
278 "emms\n\t" /* clear stack tags */
279 "fildl %P[addr]", /* set F?P to defined value */
280 X86_FEATURE_FXSAVE_LEAK,
281 [addr] "m" (tsk->thread.fpu.has_fpu));
282
283 return fpu_restore_checking(&tsk->thread.fpu);
284}
285
286/*
287 * Software FPU state helpers. Careful: these need to
288 * be preemption protection *and* they need to be
289 * properly paired with the CR0.TS changes!
290 */
291static inline int __thread_has_fpu(struct task_struct *tsk)
292{
293 return tsk->thread.fpu.has_fpu;
294}
295
296/* Must be paired with an 'stts' after! */
297static inline void __thread_clear_has_fpu(struct task_struct *tsk)
298{
299 tsk->thread.fpu.has_fpu = 0;
300 percpu_write(fpu_owner_task, NULL);
301}
302
303/* Must be paired with a 'clts' before! */
304static inline void __thread_set_has_fpu(struct task_struct *tsk)
305{
306 tsk->thread.fpu.has_fpu = 1;
307 percpu_write(fpu_owner_task, tsk);
308}
309
310/*
311 * Encapsulate the CR0.TS handling together with the
312 * software flag.
313 *
314 * These generally need preemption protection to work,
315 * do try to avoid using these on their own.
316 */
317static inline void __thread_fpu_end(struct task_struct *tsk)
318{
319 __thread_clear_has_fpu(tsk);
320 stts();
321}
322
323static inline void __thread_fpu_begin(struct task_struct *tsk)
324{
325 clts();
326 __thread_set_has_fpu(tsk);
327}
328
329/*
330 * FPU state switching for scheduling.
331 *
332 * This is a two-stage process:
333 *
334 * - switch_fpu_prepare() saves the old state and
335 * sets the new state of the CR0.TS bit. This is
336 * done within the context of the old process.
337 *
338 * - switch_fpu_finish() restores the new state as
339 * necessary.
340 */
341typedef struct { int preload; } fpu_switch_t;
342
343/*
344 * FIXME! We could do a totally lazy restore, but we need to
345 * add a per-cpu "this was the task that last touched the FPU
346 * on this CPU" variable, and the task needs to have a "I last
347 * touched the FPU on this CPU" and check them.
348 *
349 * We don't do that yet, so "fpu_lazy_restore()" always returns
350 * false, but some day..
351 */
352static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
353{
354 return new == percpu_read_stable(fpu_owner_task) &&
355 cpu == new->thread.fpu.last_cpu;
356}
357
358static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
359{
360 fpu_switch_t fpu;
361
362 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
363 if (__thread_has_fpu(old)) {
364 if (!__save_init_fpu(old))
365 cpu = ~0;
366 old->thread.fpu.last_cpu = cpu;
367 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
368
369 /* Don't change CR0.TS if we just switch! */
370 if (fpu.preload) {
371 new->fpu_counter++;
372 __thread_set_has_fpu(new);
373 prefetch(new->thread.fpu.state);
374 } else
375 stts();
376 } else {
377 old->fpu_counter = 0;
378 old->thread.fpu.last_cpu = ~0;
379 if (fpu.preload) {
380 new->fpu_counter++;
381 if (fpu_lazy_restore(new, cpu))
382 fpu.preload = 0;
383 else
384 prefetch(new->thread.fpu.state);
385 __thread_fpu_begin(new);
386 }
387 }
388 return fpu;
389}
390
391/*
392 * By the time this gets called, we've already cleared CR0.TS and
393 * given the process the FPU if we are going to preload the FPU
394 * state - all we need to do is to conditionally restore the register
395 * state itself.
396 */
397static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
398{
399 if (fpu.preload) {
400 if (unlikely(restore_fpu_checking(new)))
401 __thread_fpu_end(new);
402 }
403}
404
405/*
406 * Signal frame handlers...
407 */
408extern int save_i387_xstate(void __user *buf);
409extern int restore_i387_xstate(void __user *buf);
410
411static inline void __clear_fpu(struct task_struct *tsk)
412{
413 if (__thread_has_fpu(tsk)) {
414 /* Ignore delayed exceptions from user space */
415 asm volatile("1: fwait\n"
416 "2:\n"
417 _ASM_EXTABLE(1b, 2b));
418 __thread_fpu_end(tsk);
419 }
420}
421
422/*
423 * Were we in an interrupt that interrupted kernel mode?
424 *
425 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
426 * pair does nothing at all: the thread must not have fpu (so
427 * that we don't try to save the FPU state), and TS must
428 * be set (so that the clts/stts pair does nothing that is
429 * visible in the interrupted kernel thread).
430 */
431static inline bool interrupted_kernel_fpu_idle(void)
432{
433 return !__thread_has_fpu(current) &&
434 (read_cr0() & X86_CR0_TS);
435}
436
437/*
438 * Were we in user mode (or vm86 mode) when we were
439 * interrupted?
440 *
441 * Doing kernel_fpu_begin/end() is ok if we are running
442 * in an interrupt context from user mode - we'll just
443 * save the FPU state as required.
444 */
445static inline bool interrupted_user_mode(void)
446{
447 struct pt_regs *regs = get_irq_regs();
448 return regs && user_mode_vm(regs);
449}
450
451/*
452 * Can we use the FPU in kernel mode with the
453 * whole "kernel_fpu_begin/end()" sequence?
454 *
455 * It's always ok in process context (ie "not interrupt")
456 * but it is sometimes ok even from an irq.
457 */
458static inline bool irq_fpu_usable(void)
459{
460 return !in_interrupt() ||
461 interrupted_user_mode() ||
462 interrupted_kernel_fpu_idle();
463}
464
465static inline void kernel_fpu_begin(void)
466{
467 struct task_struct *me = current;
468
469 WARN_ON_ONCE(!irq_fpu_usable());
470 preempt_disable();
471 if (__thread_has_fpu(me)) {
472 __save_init_fpu(me);
473 __thread_clear_has_fpu(me);
474 /* We do 'stts()' in kernel_fpu_end() */
475 } else {
476 percpu_write(fpu_owner_task, NULL);
477 clts();
478 }
479}
480
481static inline void kernel_fpu_end(void)
482{
483 stts();
484 preempt_enable();
485}
486 29
487/* 30/*
488 * Some instructions like VIA's padlock instructions generate a spurious 31 * Some instructions like VIA's padlock instructions generate a spurious
@@ -524,126 +67,13 @@ static inline void irq_ts_restore(int TS_state)
524 * we can just assume we have FPU access - typically 67 * we can just assume we have FPU access - typically
525 * to save the FP state - we'll just take a #NM 68 * to save the FP state - we'll just take a #NM
526 * fault and get the FPU access back. 69 * fault and get the FPU access back.
527 *
528 * The actual user_fpu_begin/end() functions
529 * need to be preemption-safe, though.
530 *
531 * NOTE! user_fpu_end() must be used only after you
532 * have saved the FP state, and user_fpu_begin() must
533 * be used only immediately before restoring it.
534 * These functions do not do any save/restore on
535 * their own.
536 */ 70 */
537static inline int user_has_fpu(void) 71static inline int user_has_fpu(void)
538{ 72{
539 return __thread_has_fpu(current); 73 return current->thread.fpu.has_fpu;
540}
541
542static inline void user_fpu_end(void)
543{
544 preempt_disable();
545 __thread_fpu_end(current);
546 preempt_enable();
547}
548
549static inline void user_fpu_begin(void)
550{
551 preempt_disable();
552 if (!user_has_fpu())
553 __thread_fpu_begin(current);
554 preempt_enable();
555}
556
557/*
558 * These disable preemption on their own and are safe
559 */
560static inline void save_init_fpu(struct task_struct *tsk)
561{
562 WARN_ON_ONCE(!__thread_has_fpu(tsk));
563 preempt_disable();
564 __save_init_fpu(tsk);
565 __thread_fpu_end(tsk);
566 preempt_enable();
567}
568
569static inline void unlazy_fpu(struct task_struct *tsk)
570{
571 preempt_disable();
572 if (__thread_has_fpu(tsk)) {
573 __save_init_fpu(tsk);
574 __thread_fpu_end(tsk);
575 } else
576 tsk->fpu_counter = 0;
577 preempt_enable();
578}
579
580static inline void clear_fpu(struct task_struct *tsk)
581{
582 preempt_disable();
583 __clear_fpu(tsk);
584 preempt_enable();
585}
586
587/*
588 * i387 state interaction
589 */
590static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
591{
592 if (cpu_has_fxsr) {
593 return tsk->thread.fpu.state->fxsave.cwd;
594 } else {
595 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
596 }
597}
598
599static inline unsigned short get_fpu_swd(struct task_struct *tsk)
600{
601 if (cpu_has_fxsr) {
602 return tsk->thread.fpu.state->fxsave.swd;
603 } else {
604 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
605 }
606}
607
608static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
609{
610 if (cpu_has_xmm) {
611 return tsk->thread.fpu.state->fxsave.mxcsr;
612 } else {
613 return MXCSR_DEFAULT;
614 }
615}
616
617static bool fpu_allocated(struct fpu *fpu)
618{
619 return fpu->state != NULL;
620}
621
622static inline int fpu_alloc(struct fpu *fpu)
623{
624 if (fpu_allocated(fpu))
625 return 0;
626 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
627 if (!fpu->state)
628 return -ENOMEM;
629 WARN_ON((unsigned long)fpu->state & 15);
630 return 0;
631}
632
633static inline void fpu_free(struct fpu *fpu)
634{
635 if (fpu->state) {
636 kmem_cache_free(task_xstate_cachep, fpu->state);
637 fpu->state = NULL;
638 }
639}
640
641static inline void fpu_copy(struct fpu *dst, struct fpu *src)
642{
643 memcpy(dst->state, src->state, xstate_size);
644} 74}
645 75
646extern void fpu_finit(struct fpu *fpu); 76extern void unlazy_fpu(struct task_struct *tsk);
647 77
648#endif /* __ASSEMBLY__ */ 78#endif /* __ASSEMBLY__ */
649 79
diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h
deleted file mode 100644
index 423bbbddf36d..000000000000
--- a/arch/x86/include/asm/irq_controller.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __IRQ_CONTROLLER__
2#define __IRQ_CONTROLLER__
3
4struct irq_domain {
5 int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
6 u32 *out_hwirq, u32 *out_type);
7 void *priv;
8 struct device_node *controller;
9 struct list_head l;
10};
11
12#endif
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 77e95f54570a..332f98c9111f 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -64,11 +64,15 @@ enum regnames {
64 GDB_PS, /* 17 */ 64 GDB_PS, /* 17 */
65 GDB_CS, /* 18 */ 65 GDB_CS, /* 18 */
66 GDB_SS, /* 19 */ 66 GDB_SS, /* 19 */
67 GDB_DS, /* 20 */
68 GDB_ES, /* 21 */
69 GDB_FS, /* 22 */
70 GDB_GS, /* 23 */
67}; 71};
68#define GDB_ORIG_AX 57 72#define GDB_ORIG_AX 57
69#define DBG_MAX_REG_NUM 20 73#define DBG_MAX_REG_NUM 24
70/* 17 64 bit regs and 3 32 bit regs */ 74/* 17 64 bit regs and 5 32 bit regs */
71#define NUMREGBYTES ((17 * 8) + (3 * 4)) 75#define NUMREGBYTES ((17 * 8) + (5 * 4))
72#endif /* ! CONFIG_X86_32 */ 76#endif /* ! CONFIG_X86_32 */
73 77
74static inline void arch_kgdb_breakpoint(void) 78static inline void arch_kgdb_breakpoint(void)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6aefb14cbbc5..441520e4174f 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -151,7 +151,7 @@ static inline void enable_p5_mce(void) {}
151 151
152void mce_setup(struct mce *m); 152void mce_setup(struct mce *m);
153void mce_log(struct mce *m); 153void mce_log(struct mce *m);
154extern struct device *mce_device[CONFIG_NR_CPUS]; 154DECLARE_PER_CPU(struct device *, mce_device);
155 155
156/* 156/*
157 * Maximum banks number. 157 * Maximum banks number.
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 0a0a95460434..fc18bf3ce7c8 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -26,8 +26,8 @@ extern struct sfi_rtc_table_entry sfi_mrtc_array[];
26 * identified via MSRs. 26 * identified via MSRs.
27 */ 27 */
28enum mrst_cpu_type { 28enum mrst_cpu_type {
29 MRST_CPU_CHIP_LINCROFT = 1, 29 /* 1 was Moorestown */
30 MRST_CPU_CHIP_PENWELL, 30 MRST_CPU_CHIP_PENWELL = 2,
31}; 31};
32 32
33extern enum mrst_cpu_type __mrst_cpu_chip; 33extern enum mrst_cpu_type __mrst_cpu_chip;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c0180fd372d2..aa0f91308367 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -10,6 +10,7 @@
10#include <asm/paravirt_types.h> 10#include <asm/paravirt_types.h>
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/bug.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/cpumask.h> 15#include <linux/cpumask.h>
15 16
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 58545c97d071..5533b30cac07 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -162,6 +162,7 @@ extern void early_cpu_init(void);
162extern void identify_boot_cpu(void); 162extern void identify_boot_cpu(void);
163extern void identify_secondary_cpu(struct cpuinfo_x86 *); 163extern void identify_secondary_cpu(struct cpuinfo_x86 *);
164extern void print_cpu_info(struct cpuinfo_x86 *); 164extern void print_cpu_info(struct cpuinfo_x86 *);
165void print_cpu_msr(struct cpuinfo_x86 *);
165extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 166extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
166extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 167extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
167extern unsigned short num_cache_leaves; 168extern unsigned short num_cache_leaves;
@@ -474,61 +475,6 @@ struct thread_struct {
474 unsigned io_bitmap_max; 475 unsigned io_bitmap_max;
475}; 476};
476 477
477static inline unsigned long native_get_debugreg(int regno)
478{
479 unsigned long val = 0; /* Damn you, gcc! */
480
481 switch (regno) {
482 case 0:
483 asm("mov %%db0, %0" :"=r" (val));
484 break;
485 case 1:
486 asm("mov %%db1, %0" :"=r" (val));
487 break;
488 case 2:
489 asm("mov %%db2, %0" :"=r" (val));
490 break;
491 case 3:
492 asm("mov %%db3, %0" :"=r" (val));
493 break;
494 case 6:
495 asm("mov %%db6, %0" :"=r" (val));
496 break;
497 case 7:
498 asm("mov %%db7, %0" :"=r" (val));
499 break;
500 default:
501 BUG();
502 }
503 return val;
504}
505
506static inline void native_set_debugreg(int regno, unsigned long value)
507{
508 switch (regno) {
509 case 0:
510 asm("mov %0, %%db0" ::"r" (value));
511 break;
512 case 1:
513 asm("mov %0, %%db1" ::"r" (value));
514 break;
515 case 2:
516 asm("mov %0, %%db2" ::"r" (value));
517 break;
518 case 3:
519 asm("mov %0, %%db3" ::"r" (value));
520 break;
521 case 6:
522 asm("mov %0, %%db6" ::"r" (value));
523 break;
524 case 7:
525 asm("mov %0, %%db7" ::"r" (value));
526 break;
527 default:
528 BUG();
529 }
530}
531
532/* 478/*
533 * Set IOPL bits in EFLAGS from given mask 479 * Set IOPL bits in EFLAGS from given mask
534 */ 480 */
@@ -574,14 +520,6 @@ static inline void native_swapgs(void)
574#define __cpuid native_cpuid 520#define __cpuid native_cpuid
575#define paravirt_enabled() 0 521#define paravirt_enabled() 0
576 522
577/*
578 * These special macros can be used to get or set a debugging register
579 */
580#define get_debugreg(var, register) \
581 (var) = native_get_debugreg(register)
582#define set_debugreg(value, register) \
583 native_set_debugreg(register, value)
584
585static inline void load_sp0(struct tss_struct *tss, 523static inline void load_sp0(struct tss_struct *tss,
586 struct thread_struct *thread) 524 struct thread_struct *thread)
587{ 525{
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 644dd885f05a..60bef663609a 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -21,7 +21,6 @@
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/irq_controller.h>
25 24
26#ifdef CONFIG_OF 25#ifdef CONFIG_OF
27extern int of_ioapic; 26extern int of_ioapic;
@@ -43,15 +42,6 @@ extern char cmd_line[COMMAND_LINE_SIZE];
43#define pci_address_to_pio pci_address_to_pio 42#define pci_address_to_pio pci_address_to_pio
44unsigned long pci_address_to_pio(phys_addr_t addr); 43unsigned long pci_address_to_pio(phys_addr_t addr);
45 44
46/**
47 * irq_dispose_mapping - Unmap an interrupt
48 * @virq: linux virq number of the interrupt to unmap
49 *
50 * FIXME: We really should implement proper virq handling like power,
51 * but that's going to be major surgery.
52 */
53static inline void irq_dispose_mapping(unsigned int virq) { }
54
55#define HAVE_ARCH_DEVTREE_FIXUPS 45#define HAVE_ARCH_DEVTREE_FIXUPS
56 46
57#endif /* __ASSEMBLY__ */ 47#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index a82c2bf504b6..76bfa2cf301d 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -88,14 +88,14 @@ static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
88{ 88{
89 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 89 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
90 90
91 return !!(tmp.tail ^ tmp.head); 91 return tmp.tail != tmp.head;
92} 92}
93 93
94static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 94static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
95{ 95{
96 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 96 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
97 97
98 return ((tmp.tail - tmp.head) & TICKET_MASK) > 1; 98 return (__ticket_t)(tmp.tail - tmp.head) > 1;
99} 99}
100 100
101#ifndef CONFIG_PARAVIRT_SPINLOCKS 101#ifndef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 8ebd5df7451e..ad0ad07fc006 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -16,7 +16,6 @@ typedef u32 __ticketpair_t;
16#endif 16#endif
17 17
18#define TICKET_SHIFT (sizeof(__ticket_t) * 8) 18#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
19#define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))
20 19
21typedef struct arch_spinlock { 20typedef struct arch_spinlock {
22 union { 21 union {
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index a1f2db5f1170..cbf0c9d50b92 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -56,6 +56,7 @@ DEFINE_GUEST_HANDLE(int);
56DEFINE_GUEST_HANDLE(long); 56DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void); 57DEFINE_GUEST_HANDLE(void);
58DEFINE_GUEST_HANDLE(uint64_t); 58DEFINE_GUEST_HANDLE(uint64_t);
59DEFINE_GUEST_HANDLE(uint32_t);
59#endif 60#endif
60 61
61#ifndef HYPERVISOR_VIRT_START 62#ifndef HYPERVISOR_VIRT_START