diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-03-09 11:11:53 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-09 11:11:53 -0500 |
commit | 548b84166917d6f5e2296123b85ad24aecd3801d (patch) | |
tree | 0ab0300e23a02df0fe3c0579627e4998bb122c00 /arch/x86/include | |
parent | cfb581bcd4f8c158c6f2b48bf5e232bb9e6855c0 (diff) | |
parent | 57d54889cd00db2752994b389ba714138652e60c (diff) |
Merge commit 'v2.6.34-rc1' into perf/urgent
Conflicts:
tools/perf/util/probe-event.c
Merge reason: Pick up -rc1 and resolve the conflict as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include')
58 files changed, 1581 insertions, 1548 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 9f828f87ca35..493092efaa3b 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -11,6 +11,7 @@ header-y += sigcontext32.h | |||
11 | header-y += ucontext.h | 11 | header-y += ucontext.h |
12 | header-y += processor-flags.h | 12 | header-y += processor-flags.h |
13 | header-y += hw_breakpoint.h | 13 | header-y += hw_breakpoint.h |
14 | header-y += hyperv.h | ||
14 | 15 | ||
15 | unifdef-y += e820.h | 16 | unifdef-y += e820.h |
16 | unifdef-y += ist.h | 17 | unifdef-y += ist.h |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index ac80b7d70014..b09ec55650b3 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -130,11 +130,16 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
130 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ | 130 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
131 | : output : "i" (0), ## input) | 131 | : output : "i" (0), ## input) |
132 | 132 | ||
133 | /* Like alternative_io, but for replacing a direct call with another one. */ | ||
134 | #define alternative_call(oldfunc, newfunc, feature, output, input...) \ | ||
135 | asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ | ||
136 | : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) | ||
137 | |||
133 | /* | 138 | /* |
134 | * use this macro(s) if you need more than one output parameter | 139 | * use this macro(s) if you need more than one output parameter |
135 | * in alternative_io | 140 | * in alternative_io |
136 | */ | 141 | */ |
137 | #define ASM_OUTPUT2(a, b) a, b | 142 | #define ASM_OUTPUT2(a...) a |
138 | 143 | ||
139 | struct paravirt_patch_site; | 144 | struct paravirt_patch_site; |
140 | #ifdef CONFIG_PARAVIRT | 145 | #ifdef CONFIG_PARAVIRT |
@@ -160,10 +165,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
160 | * invalid instruction possible) or if the instructions are changed from a | 165 | * invalid instruction possible) or if the instructions are changed from a |
161 | * consistent state to another consistent state atomically. | 166 | * consistent state to another consistent state atomically. |
162 | * More care must be taken when modifying code in the SMP case because of | 167 | * More care must be taken when modifying code in the SMP case because of |
163 | * Intel's errata. | 168 | * Intel's errata. text_poke_smp() takes care that errata, but still |
169 | * doesn't support NMI/MCE handler code modifying. | ||
164 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | 170 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
165 | * inconsistent instruction while you patch. | 171 | * inconsistent instruction while you patch. |
166 | */ | 172 | */ |
167 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 173 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
174 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | ||
168 | 175 | ||
169 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 176 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h new file mode 100644 index 000000000000..c74a2eebe570 --- /dev/null +++ b/arch/x86/include/asm/apb_timer.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | * | ||
12 | * Note: | ||
13 | */ | ||
14 | |||
15 | #ifndef ASM_X86_APBT_H | ||
16 | #define ASM_X86_APBT_H | ||
17 | #include <linux/sfi.h> | ||
18 | |||
19 | #ifdef CONFIG_APB_TIMER | ||
20 | |||
21 | /* Langwell DW APB timer registers */ | ||
22 | #define APBTMR_N_LOAD_COUNT 0x00 | ||
23 | #define APBTMR_N_CURRENT_VALUE 0x04 | ||
24 | #define APBTMR_N_CONTROL 0x08 | ||
25 | #define APBTMR_N_EOI 0x0c | ||
26 | #define APBTMR_N_INT_STATUS 0x10 | ||
27 | |||
28 | #define APBTMRS_INT_STATUS 0xa0 | ||
29 | #define APBTMRS_EOI 0xa4 | ||
30 | #define APBTMRS_RAW_INT_STATUS 0xa8 | ||
31 | #define APBTMRS_COMP_VERSION 0xac | ||
32 | #define APBTMRS_REG_SIZE 0x14 | ||
33 | |||
34 | /* register bits */ | ||
35 | #define APBTMR_CONTROL_ENABLE (1<<0) | ||
36 | #define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */ | ||
37 | #define APBTMR_CONTROL_INT (1<<2) | ||
38 | |||
39 | /* default memory mapped register base */ | ||
40 | #define LNW_SCU_ADDR 0xFF100000 | ||
41 | #define LNW_EXT_TIMER_OFFSET 0x1B800 | ||
42 | #define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET) | ||
43 | #define LNW_EXT_TIMER_PGOFFSET 0x800 | ||
44 | |||
45 | /* APBT clock speed range from PCLK to fabric base, 25-100MHz */ | ||
46 | #define APBT_MAX_FREQ 50 | ||
47 | #define APBT_MIN_FREQ 1 | ||
48 | #define APBT_MMAP_SIZE 1024 | ||
49 | |||
50 | #define APBT_DEV_USED 1 | ||
51 | |||
52 | extern void apbt_time_init(void); | ||
53 | extern struct clock_event_device *global_clock_event; | ||
54 | extern unsigned long apbt_quick_calibrate(void); | ||
55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); | ||
56 | extern void apbt_setup_secondary_clock(void); | ||
57 | extern unsigned int boot_cpu_id; | ||
58 | extern int disable_apbt_percpu; | ||
59 | |||
60 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); | ||
61 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); | ||
62 | extern int sfi_mtimer_num; | ||
63 | |||
64 | #else /* CONFIG_APB_TIMER */ | ||
65 | |||
66 | static inline unsigned long apbt_quick_calibrate(void) {return 0; } | ||
67 | static inline void apbt_time_init(void) {return 0; } | ||
68 | |||
69 | #endif | ||
70 | #endif /* ASM_X86_APBT_H */ | ||
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 4e1b8873c474..8f8217b9bdac 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -1,5 +1,300 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_H | ||
2 | #define _ASM_X86_ATOMIC_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/alternative.h> | ||
8 | #include <asm/cmpxchg.h> | ||
9 | |||
10 | /* | ||
11 | * Atomic operations that C can't guarantee us. Useful for | ||
12 | * resource counting etc.. | ||
13 | */ | ||
14 | |||
15 | #define ATOMIC_INIT(i) { (i) } | ||
16 | |||
17 | /** | ||
18 | * atomic_read - read atomic variable | ||
19 | * @v: pointer of type atomic_t | ||
20 | * | ||
21 | * Atomically reads the value of @v. | ||
22 | */ | ||
23 | static inline int atomic_read(const atomic_t *v) | ||
24 | { | ||
25 | return v->counter; | ||
26 | } | ||
27 | |||
28 | /** | ||
29 | * atomic_set - set atomic variable | ||
30 | * @v: pointer of type atomic_t | ||
31 | * @i: required value | ||
32 | * | ||
33 | * Atomically sets the value of @v to @i. | ||
34 | */ | ||
35 | static inline void atomic_set(atomic_t *v, int i) | ||
36 | { | ||
37 | v->counter = i; | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * atomic_add - add integer to atomic variable | ||
42 | * @i: integer value to add | ||
43 | * @v: pointer of type atomic_t | ||
44 | * | ||
45 | * Atomically adds @i to @v. | ||
46 | */ | ||
47 | static inline void atomic_add(int i, atomic_t *v) | ||
48 | { | ||
49 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
50 | : "+m" (v->counter) | ||
51 | : "ir" (i)); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * atomic_sub - subtract integer from atomic variable | ||
56 | * @i: integer value to subtract | ||
57 | * @v: pointer of type atomic_t | ||
58 | * | ||
59 | * Atomically subtracts @i from @v. | ||
60 | */ | ||
61 | static inline void atomic_sub(int i, atomic_t *v) | ||
62 | { | ||
63 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
64 | : "+m" (v->counter) | ||
65 | : "ir" (i)); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * atomic_sub_and_test - subtract value from variable and test result | ||
70 | * @i: integer value to subtract | ||
71 | * @v: pointer of type atomic_t | ||
72 | * | ||
73 | * Atomically subtracts @i from @v and returns | ||
74 | * true if the result is zero, or false for all | ||
75 | * other cases. | ||
76 | */ | ||
77 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
78 | { | ||
79 | unsigned char c; | ||
80 | |||
81 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
82 | : "+m" (v->counter), "=qm" (c) | ||
83 | : "ir" (i) : "memory"); | ||
84 | return c; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * atomic_inc - increment atomic variable | ||
89 | * @v: pointer of type atomic_t | ||
90 | * | ||
91 | * Atomically increments @v by 1. | ||
92 | */ | ||
93 | static inline void atomic_inc(atomic_t *v) | ||
94 | { | ||
95 | asm volatile(LOCK_PREFIX "incl %0" | ||
96 | : "+m" (v->counter)); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * atomic_dec - decrement atomic variable | ||
101 | * @v: pointer of type atomic_t | ||
102 | * | ||
103 | * Atomically decrements @v by 1. | ||
104 | */ | ||
105 | static inline void atomic_dec(atomic_t *v) | ||
106 | { | ||
107 | asm volatile(LOCK_PREFIX "decl %0" | ||
108 | : "+m" (v->counter)); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * atomic_dec_and_test - decrement and test | ||
113 | * @v: pointer of type atomic_t | ||
114 | * | ||
115 | * Atomically decrements @v by 1 and | ||
116 | * returns true if the result is 0, or false for all other | ||
117 | * cases. | ||
118 | */ | ||
119 | static inline int atomic_dec_and_test(atomic_t *v) | ||
120 | { | ||
121 | unsigned char c; | ||
122 | |||
123 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
124 | : "+m" (v->counter), "=qm" (c) | ||
125 | : : "memory"); | ||
126 | return c != 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * atomic_inc_and_test - increment and test | ||
131 | * @v: pointer of type atomic_t | ||
132 | * | ||
133 | * Atomically increments @v by 1 | ||
134 | * and returns true if the result is zero, or false for all | ||
135 | * other cases. | ||
136 | */ | ||
137 | static inline int atomic_inc_and_test(atomic_t *v) | ||
138 | { | ||
139 | unsigned char c; | ||
140 | |||
141 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
142 | : "+m" (v->counter), "=qm" (c) | ||
143 | : : "memory"); | ||
144 | return c != 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * atomic_add_negative - add and test if negative | ||
149 | * @i: integer value to add | ||
150 | * @v: pointer of type atomic_t | ||
151 | * | ||
152 | * Atomically adds @i to @v and returns true | ||
153 | * if the result is negative, or false when | ||
154 | * result is greater than or equal to zero. | ||
155 | */ | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
157 | { | ||
158 | unsigned char c; | ||
159 | |||
160 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
161 | : "+m" (v->counter), "=qm" (c) | ||
162 | : "ir" (i) : "memory"); | ||
163 | return c; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * atomic_add_return - add integer and return | ||
168 | * @i: integer value to add | ||
169 | * @v: pointer of type atomic_t | ||
170 | * | ||
171 | * Atomically adds @i to @v and returns @i + @v | ||
172 | */ | ||
173 | static inline int atomic_add_return(int i, atomic_t *v) | ||
174 | { | ||
175 | int __i; | ||
176 | #ifdef CONFIG_M386 | ||
177 | unsigned long flags; | ||
178 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
179 | goto no_xadd; | ||
180 | #endif | ||
181 | /* Modern 486+ processor */ | ||
182 | __i = i; | ||
183 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
184 | : "+r" (i), "+m" (v->counter) | ||
185 | : : "memory"); | ||
186 | return i + __i; | ||
187 | |||
188 | #ifdef CONFIG_M386 | ||
189 | no_xadd: /* Legacy 386 processor */ | ||
190 | raw_local_irq_save(flags); | ||
191 | __i = atomic_read(v); | ||
192 | atomic_set(v, i + __i); | ||
193 | raw_local_irq_restore(flags); | ||
194 | return i + __i; | ||
195 | #endif | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * atomic_sub_return - subtract integer and return | ||
200 | * @v: pointer of type atomic_t | ||
201 | * @i: integer value to subtract | ||
202 | * | ||
203 | * Atomically subtracts @i from @v and returns @v - @i | ||
204 | */ | ||
205 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
206 | { | ||
207 | return atomic_add_return(-i, v); | ||
208 | } | ||
209 | |||
210 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
211 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
212 | |||
213 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
214 | { | ||
215 | return cmpxchg(&v->counter, old, new); | ||
216 | } | ||
217 | |||
218 | static inline int atomic_xchg(atomic_t *v, int new) | ||
219 | { | ||
220 | return xchg(&v->counter, new); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * atomic_add_unless - add unless the number is already a given value | ||
225 | * @v: pointer of type atomic_t | ||
226 | * @a: the amount to add to v... | ||
227 | * @u: ...unless v is equal to u. | ||
228 | * | ||
229 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
230 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
231 | */ | ||
232 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
233 | { | ||
234 | int c, old; | ||
235 | c = atomic_read(v); | ||
236 | for (;;) { | ||
237 | if (unlikely(c == (u))) | ||
238 | break; | ||
239 | old = atomic_cmpxchg((v), c, c + (a)); | ||
240 | if (likely(old == c)) | ||
241 | break; | ||
242 | c = old; | ||
243 | } | ||
244 | return c != (u); | ||
245 | } | ||
246 | |||
247 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
248 | |||
249 | /** | ||
250 | * atomic_inc_short - increment of a short integer | ||
251 | * @v: pointer to type int | ||
252 | * | ||
253 | * Atomically adds 1 to @v | ||
254 | * Returns the new value of @u | ||
255 | */ | ||
256 | static inline short int atomic_inc_short(short int *v) | ||
257 | { | ||
258 | asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); | ||
259 | return *v; | ||
260 | } | ||
261 | |||
262 | #ifdef CONFIG_X86_64 | ||
263 | /** | ||
264 | * atomic_or_long - OR of two long integers | ||
265 | * @v1: pointer to type unsigned long | ||
266 | * @v2: pointer to type unsigned long | ||
267 | * | ||
268 | * Atomically ORs @v1 and @v2 | ||
269 | * Returns the result of the OR | ||
270 | */ | ||
271 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
272 | { | ||
273 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | /* These are x86-specific, used by some header files */ | ||
278 | #define atomic_clear_mask(mask, addr) \ | ||
279 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
280 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
281 | |||
282 | #define atomic_set_mask(mask, addr) \ | ||
283 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
284 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | ||
285 | : "memory") | ||
286 | |||
287 | /* Atomic operations are already serializing on x86 */ | ||
288 | #define smp_mb__before_atomic_dec() barrier() | ||
289 | #define smp_mb__after_atomic_dec() barrier() | ||
290 | #define smp_mb__before_atomic_inc() barrier() | ||
291 | #define smp_mb__after_atomic_inc() barrier() | ||
292 | |||
1 | #ifdef CONFIG_X86_32 | 293 | #ifdef CONFIG_X86_32 |
2 | # include "atomic_32.h" | 294 | # include "atomic64_32.h" |
3 | #else | 295 | #else |
4 | # include "atomic_64.h" | 296 | # include "atomic64_64.h" |
5 | #endif | 297 | #endif |
298 | |||
299 | #include <asm-generic/atomic-long.h> | ||
300 | #endif /* _ASM_X86_ATOMIC_H */ | ||
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h new file mode 100644 index 000000000000..03027bf28de5 --- /dev/null +++ b/arch/x86/include/asm/atomic64_32.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC64_32_H | ||
2 | #define _ASM_X86_ATOMIC64_32_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | //#include <asm/cmpxchg.h> | ||
8 | |||
9 | /* An 64bit atomic type */ | ||
10 | |||
11 | typedef struct { | ||
12 | u64 __aligned(8) counter; | ||
13 | } atomic64_t; | ||
14 | |||
15 | #define ATOMIC64_INIT(val) { (val) } | ||
16 | |||
17 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | ||
18 | |||
19 | /** | ||
20 | * atomic64_xchg - xchg atomic64 variable | ||
21 | * @ptr: pointer to type atomic64_t | ||
22 | * @new_val: value to assign | ||
23 | * | ||
24 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
25 | * the old value. | ||
26 | */ | ||
27 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | ||
28 | |||
29 | /** | ||
30 | * atomic64_set - set atomic64 variable | ||
31 | * @ptr: pointer to type atomic64_t | ||
32 | * @new_val: value to assign | ||
33 | * | ||
34 | * Atomically sets the value of @ptr to @new_val. | ||
35 | */ | ||
36 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | ||
37 | |||
38 | /** | ||
39 | * atomic64_read - read atomic64 variable | ||
40 | * @ptr: pointer to type atomic64_t | ||
41 | * | ||
42 | * Atomically reads the value of @ptr and returns it. | ||
43 | */ | ||
44 | static inline u64 atomic64_read(atomic64_t *ptr) | ||
45 | { | ||
46 | u64 res; | ||
47 | |||
48 | /* | ||
49 | * Note, we inline this atomic64_t primitive because | ||
50 | * it only clobbers EAX/EDX and leaves the others | ||
51 | * untouched. We also (somewhat subtly) rely on the | ||
52 | * fact that cmpxchg8b returns the current 64-bit value | ||
53 | * of the memory location we are touching: | ||
54 | */ | ||
55 | asm volatile( | ||
56 | "mov %%ebx, %%eax\n\t" | ||
57 | "mov %%ecx, %%edx\n\t" | ||
58 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
59 | : "=&A" (res) | ||
60 | : "m" (*ptr) | ||
61 | ); | ||
62 | |||
63 | return res; | ||
64 | } | ||
65 | |||
66 | extern u64 atomic64_read(atomic64_t *ptr); | ||
67 | |||
68 | /** | ||
69 | * atomic64_add_return - add and return | ||
70 | * @delta: integer value to add | ||
71 | * @ptr: pointer to type atomic64_t | ||
72 | * | ||
73 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
74 | */ | ||
75 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | ||
76 | |||
77 | /* | ||
78 | * Other variants with different arithmetic operators: | ||
79 | */ | ||
80 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | ||
81 | extern u64 atomic64_inc_return(atomic64_t *ptr); | ||
82 | extern u64 atomic64_dec_return(atomic64_t *ptr); | ||
83 | |||
84 | /** | ||
85 | * atomic64_add - add integer to atomic64 variable | ||
86 | * @delta: integer value to add | ||
87 | * @ptr: pointer to type atomic64_t | ||
88 | * | ||
89 | * Atomically adds @delta to @ptr. | ||
90 | */ | ||
91 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | ||
92 | |||
93 | /** | ||
94 | * atomic64_sub - subtract the atomic64 variable | ||
95 | * @delta: integer value to subtract | ||
96 | * @ptr: pointer to type atomic64_t | ||
97 | * | ||
98 | * Atomically subtracts @delta from @ptr. | ||
99 | */ | ||
100 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | ||
101 | |||
102 | /** | ||
103 | * atomic64_sub_and_test - subtract value from variable and test result | ||
104 | * @delta: integer value to subtract | ||
105 | * @ptr: pointer to type atomic64_t | ||
106 | * | ||
107 | * Atomically subtracts @delta from @ptr and returns | ||
108 | * true if the result is zero, or false for all | ||
109 | * other cases. | ||
110 | */ | ||
111 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | ||
112 | |||
113 | /** | ||
114 | * atomic64_inc - increment atomic64 variable | ||
115 | * @ptr: pointer to type atomic64_t | ||
116 | * | ||
117 | * Atomically increments @ptr by 1. | ||
118 | */ | ||
119 | extern void atomic64_inc(atomic64_t *ptr); | ||
120 | |||
121 | /** | ||
122 | * atomic64_dec - decrement atomic64 variable | ||
123 | * @ptr: pointer to type atomic64_t | ||
124 | * | ||
125 | * Atomically decrements @ptr by 1. | ||
126 | */ | ||
127 | extern void atomic64_dec(atomic64_t *ptr); | ||
128 | |||
129 | /** | ||
130 | * atomic64_dec_and_test - decrement and test | ||
131 | * @ptr: pointer to type atomic64_t | ||
132 | * | ||
133 | * Atomically decrements @ptr by 1 and | ||
134 | * returns true if the result is 0, or false for all other | ||
135 | * cases. | ||
136 | */ | ||
137 | extern int atomic64_dec_and_test(atomic64_t *ptr); | ||
138 | |||
139 | /** | ||
140 | * atomic64_inc_and_test - increment and test | ||
141 | * @ptr: pointer to type atomic64_t | ||
142 | * | ||
143 | * Atomically increments @ptr by 1 | ||
144 | * and returns true if the result is zero, or false for all | ||
145 | * other cases. | ||
146 | */ | ||
147 | extern int atomic64_inc_and_test(atomic64_t *ptr); | ||
148 | |||
149 | /** | ||
150 | * atomic64_add_negative - add and test if negative | ||
151 | * @delta: integer value to add | ||
152 | * @ptr: pointer to type atomic64_t | ||
153 | * | ||
154 | * Atomically adds @delta to @ptr and returns true | ||
155 | * if the result is negative, or false when | ||
156 | * result is greater than or equal to zero. | ||
157 | */ | ||
158 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | ||
159 | |||
160 | #endif /* _ASM_X86_ATOMIC64_32_H */ | ||
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h new file mode 100644 index 000000000000..51c5b4056929 --- /dev/null +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -0,0 +1,224 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC64_64_H | ||
2 | #define _ASM_X86_ATOMIC64_64_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/alternative.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | /* The 64-bit atomic type */ | ||
9 | |||
10 | #define ATOMIC64_INIT(i) { (i) } | ||
11 | |||
12 | /** | ||
13 | * atomic64_read - read atomic64 variable | ||
14 | * @v: pointer of type atomic64_t | ||
15 | * | ||
16 | * Atomically reads the value of @v. | ||
17 | * Doesn't imply a read memory barrier. | ||
18 | */ | ||
19 | static inline long atomic64_read(const atomic64_t *v) | ||
20 | { | ||
21 | return v->counter; | ||
22 | } | ||
23 | |||
24 | /** | ||
25 | * atomic64_set - set atomic64 variable | ||
26 | * @v: pointer to type atomic64_t | ||
27 | * @i: required value | ||
28 | * | ||
29 | * Atomically sets the value of @v to @i. | ||
30 | */ | ||
31 | static inline void atomic64_set(atomic64_t *v, long i) | ||
32 | { | ||
33 | v->counter = i; | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * atomic64_add - add integer to atomic64 variable | ||
38 | * @i: integer value to add | ||
39 | * @v: pointer to type atomic64_t | ||
40 | * | ||
41 | * Atomically adds @i to @v. | ||
42 | */ | ||
43 | static inline void atomic64_add(long i, atomic64_t *v) | ||
44 | { | ||
45 | asm volatile(LOCK_PREFIX "addq %1,%0" | ||
46 | : "=m" (v->counter) | ||
47 | : "er" (i), "m" (v->counter)); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * atomic64_sub - subtract the atomic64 variable | ||
52 | * @i: integer value to subtract | ||
53 | * @v: pointer to type atomic64_t | ||
54 | * | ||
55 | * Atomically subtracts @i from @v. | ||
56 | */ | ||
57 | static inline void atomic64_sub(long i, atomic64_t *v) | ||
58 | { | ||
59 | asm volatile(LOCK_PREFIX "subq %1,%0" | ||
60 | : "=m" (v->counter) | ||
61 | : "er" (i), "m" (v->counter)); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * atomic64_sub_and_test - subtract value from variable and test result | ||
66 | * @i: integer value to subtract | ||
67 | * @v: pointer to type atomic64_t | ||
68 | * | ||
69 | * Atomically subtracts @i from @v and returns | ||
70 | * true if the result is zero, or false for all | ||
71 | * other cases. | ||
72 | */ | ||
73 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) | ||
74 | { | ||
75 | unsigned char c; | ||
76 | |||
77 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | ||
78 | : "=m" (v->counter), "=qm" (c) | ||
79 | : "er" (i), "m" (v->counter) : "memory"); | ||
80 | return c; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic64_inc - increment atomic64 variable | ||
85 | * @v: pointer to type atomic64_t | ||
86 | * | ||
87 | * Atomically increments @v by 1. | ||
88 | */ | ||
89 | static inline void atomic64_inc(atomic64_t *v) | ||
90 | { | ||
91 | asm volatile(LOCK_PREFIX "incq %0" | ||
92 | : "=m" (v->counter) | ||
93 | : "m" (v->counter)); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * atomic64_dec - decrement atomic64 variable | ||
98 | * @v: pointer to type atomic64_t | ||
99 | * | ||
100 | * Atomically decrements @v by 1. | ||
101 | */ | ||
102 | static inline void atomic64_dec(atomic64_t *v) | ||
103 | { | ||
104 | asm volatile(LOCK_PREFIX "decq %0" | ||
105 | : "=m" (v->counter) | ||
106 | : "m" (v->counter)); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * atomic64_dec_and_test - decrement and test | ||
111 | * @v: pointer to type atomic64_t | ||
112 | * | ||
113 | * Atomically decrements @v by 1 and | ||
114 | * returns true if the result is 0, or false for all other | ||
115 | * cases. | ||
116 | */ | ||
117 | static inline int atomic64_dec_and_test(atomic64_t *v) | ||
118 | { | ||
119 | unsigned char c; | ||
120 | |||
121 | asm volatile(LOCK_PREFIX "decq %0; sete %1" | ||
122 | : "=m" (v->counter), "=qm" (c) | ||
123 | : "m" (v->counter) : "memory"); | ||
124 | return c != 0; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * atomic64_inc_and_test - increment and test | ||
129 | * @v: pointer to type atomic64_t | ||
130 | * | ||
131 | * Atomically increments @v by 1 | ||
132 | * and returns true if the result is zero, or false for all | ||
133 | * other cases. | ||
134 | */ | ||
135 | static inline int atomic64_inc_and_test(atomic64_t *v) | ||
136 | { | ||
137 | unsigned char c; | ||
138 | |||
139 | asm volatile(LOCK_PREFIX "incq %0; sete %1" | ||
140 | : "=m" (v->counter), "=qm" (c) | ||
141 | : "m" (v->counter) : "memory"); | ||
142 | return c != 0; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * atomic64_add_negative - add and test if negative | ||
147 | * @i: integer value to add | ||
148 | * @v: pointer to type atomic64_t | ||
149 | * | ||
150 | * Atomically adds @i to @v and returns true | ||
151 | * if the result is negative, or false when | ||
152 | * result is greater than or equal to zero. | ||
153 | */ | ||
154 | static inline int atomic64_add_negative(long i, atomic64_t *v) | ||
155 | { | ||
156 | unsigned char c; | ||
157 | |||
158 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | ||
159 | : "=m" (v->counter), "=qm" (c) | ||
160 | : "er" (i), "m" (v->counter) : "memory"); | ||
161 | return c; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * atomic64_add_return - add and return | ||
166 | * @i: integer value to add | ||
167 | * @v: pointer to type atomic64_t | ||
168 | * | ||
169 | * Atomically adds @i to @v and returns @i + @v | ||
170 | */ | ||
171 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
172 | { | ||
173 | long __i = i; | ||
174 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" | ||
175 | : "+r" (i), "+m" (v->counter) | ||
176 | : : "memory"); | ||
177 | return i + __i; | ||
178 | } | ||
179 | |||
180 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
181 | { | ||
182 | return atomic64_add_return(-i, v); | ||
183 | } | ||
184 | |||
185 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | ||
186 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | ||
187 | |||
188 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | ||
189 | { | ||
190 | return cmpxchg(&v->counter, old, new); | ||
191 | } | ||
192 | |||
193 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
194 | { | ||
195 | return xchg(&v->counter, new); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * atomic64_add_unless - add unless the number is a given value | ||
200 | * @v: pointer of type atomic64_t | ||
201 | * @a: the amount to add to v... | ||
202 | * @u: ...unless v is equal to u. | ||
203 | * | ||
204 | * Atomically adds @a to @v, so long as it was not @u. | ||
205 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
206 | */ | ||
207 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
208 | { | ||
209 | long c, old; | ||
210 | c = atomic64_read(v); | ||
211 | for (;;) { | ||
212 | if (unlikely(c == (u))) | ||
213 | break; | ||
214 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
215 | if (likely(old == c)) | ||
216 | break; | ||
217 | c = old; | ||
218 | } | ||
219 | return c != (u); | ||
220 | } | ||
221 | |||
222 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
223 | |||
224 | #endif /* _ASM_X86_ATOMIC64_64_H */ | ||
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h deleted file mode 100644 index dc5a667ff791..000000000000 --- a/arch/x86/include/asm/atomic_32.h +++ /dev/null | |||
@@ -1,415 +0,0 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_32_H | ||
2 | #define _ASM_X86_ATOMIC_32_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | |||
9 | /* | ||
10 | * Atomic operations that C can't guarantee us. Useful for | ||
11 | * resource counting etc.. | ||
12 | */ | ||
13 | |||
14 | #define ATOMIC_INIT(i) { (i) } | ||
15 | |||
16 | /** | ||
17 | * atomic_read - read atomic variable | ||
18 | * @v: pointer of type atomic_t | ||
19 | * | ||
20 | * Atomically reads the value of @v. | ||
21 | */ | ||
22 | static inline int atomic_read(const atomic_t *v) | ||
23 | { | ||
24 | return v->counter; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * atomic_set - set atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * @i: required value | ||
31 | * | ||
32 | * Atomically sets the value of @v to @i. | ||
33 | */ | ||
34 | static inline void atomic_set(atomic_t *v, int i) | ||
35 | { | ||
36 | v->counter = i; | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * atomic_add - add integer to atomic variable | ||
41 | * @i: integer value to add | ||
42 | * @v: pointer of type atomic_t | ||
43 | * | ||
44 | * Atomically adds @i to @v. | ||
45 | */ | ||
46 | static inline void atomic_add(int i, atomic_t *v) | ||
47 | { | ||
48 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
49 | : "+m" (v->counter) | ||
50 | : "ir" (i)); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * atomic_sub - subtract integer from atomic variable | ||
55 | * @i: integer value to subtract | ||
56 | * @v: pointer of type atomic_t | ||
57 | * | ||
58 | * Atomically subtracts @i from @v. | ||
59 | */ | ||
60 | static inline void atomic_sub(int i, atomic_t *v) | ||
61 | { | ||
62 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
63 | : "+m" (v->counter) | ||
64 | : "ir" (i)); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * atomic_sub_and_test - subtract value from variable and test result | ||
69 | * @i: integer value to subtract | ||
70 | * @v: pointer of type atomic_t | ||
71 | * | ||
72 | * Atomically subtracts @i from @v and returns | ||
73 | * true if the result is zero, or false for all | ||
74 | * other cases. | ||
75 | */ | ||
76 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
77 | { | ||
78 | unsigned char c; | ||
79 | |||
80 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
81 | : "+m" (v->counter), "=qm" (c) | ||
82 | : "ir" (i) : "memory"); | ||
83 | return c; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * atomic_inc - increment atomic variable | ||
88 | * @v: pointer of type atomic_t | ||
89 | * | ||
90 | * Atomically increments @v by 1. | ||
91 | */ | ||
92 | static inline void atomic_inc(atomic_t *v) | ||
93 | { | ||
94 | asm volatile(LOCK_PREFIX "incl %0" | ||
95 | : "+m" (v->counter)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_dec - decrement atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1. | ||
103 | */ | ||
104 | static inline void atomic_dec(atomic_t *v) | ||
105 | { | ||
106 | asm volatile(LOCK_PREFIX "decl %0" | ||
107 | : "+m" (v->counter)); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * atomic_dec_and_test - decrement and test | ||
112 | * @v: pointer of type atomic_t | ||
113 | * | ||
114 | * Atomically decrements @v by 1 and | ||
115 | * returns true if the result is 0, or false for all other | ||
116 | * cases. | ||
117 | */ | ||
118 | static inline int atomic_dec_and_test(atomic_t *v) | ||
119 | { | ||
120 | unsigned char c; | ||
121 | |||
122 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
123 | : "+m" (v->counter), "=qm" (c) | ||
124 | : : "memory"); | ||
125 | return c != 0; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * atomic_inc_and_test - increment and test | ||
130 | * @v: pointer of type atomic_t | ||
131 | * | ||
132 | * Atomically increments @v by 1 | ||
133 | * and returns true if the result is zero, or false for all | ||
134 | * other cases. | ||
135 | */ | ||
136 | static inline int atomic_inc_and_test(atomic_t *v) | ||
137 | { | ||
138 | unsigned char c; | ||
139 | |||
140 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
141 | : "+m" (v->counter), "=qm" (c) | ||
142 | : : "memory"); | ||
143 | return c != 0; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * atomic_add_negative - add and test if negative | ||
148 | * @v: pointer of type atomic_t | ||
149 | * @i: integer value to add | ||
150 | * | ||
151 | * Atomically adds @i to @v and returns true | ||
152 | * if the result is negative, or false when | ||
153 | * result is greater than or equal to zero. | ||
154 | */ | ||
155 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
156 | { | ||
157 | unsigned char c; | ||
158 | |||
159 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
160 | : "+m" (v->counter), "=qm" (c) | ||
161 | : "ir" (i) : "memory"); | ||
162 | return c; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic_add_return - add integer and return | ||
167 | * @v: pointer of type atomic_t | ||
168 | * @i: integer value to add | ||
169 | * | ||
170 | * Atomically adds @i to @v and returns @i + @v | ||
171 | */ | ||
172 | static inline int atomic_add_return(int i, atomic_t *v) | ||
173 | { | ||
174 | int __i; | ||
175 | #ifdef CONFIG_M386 | ||
176 | unsigned long flags; | ||
177 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
178 | goto no_xadd; | ||
179 | #endif | ||
180 | /* Modern 486+ processor */ | ||
181 | __i = i; | ||
182 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
183 | : "+r" (i), "+m" (v->counter) | ||
184 | : : "memory"); | ||
185 | return i + __i; | ||
186 | |||
187 | #ifdef CONFIG_M386 | ||
188 | no_xadd: /* Legacy 386 processor */ | ||
189 | local_irq_save(flags); | ||
190 | __i = atomic_read(v); | ||
191 | atomic_set(v, i + __i); | ||
192 | local_irq_restore(flags); | ||
193 | return i + __i; | ||
194 | #endif | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * atomic_sub_return - subtract integer and return | ||
199 | * @v: pointer of type atomic_t | ||
200 | * @i: integer value to subtract | ||
201 | * | ||
202 | * Atomically subtracts @i from @v and returns @v - @i | ||
203 | */ | ||
204 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
205 | { | ||
206 | return atomic_add_return(-i, v); | ||
207 | } | ||
208 | |||
209 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
210 | { | ||
211 | return cmpxchg(&v->counter, old, new); | ||
212 | } | ||
213 | |||
214 | static inline int atomic_xchg(atomic_t *v, int new) | ||
215 | { | ||
216 | return xchg(&v->counter, new); | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * atomic_add_unless - add unless the number is already a given value | ||
221 | * @v: pointer of type atomic_t | ||
222 | * @a: the amount to add to v... | ||
223 | * @u: ...unless v is equal to u. | ||
224 | * | ||
225 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
226 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
227 | */ | ||
228 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
229 | { | ||
230 | int c, old; | ||
231 | c = atomic_read(v); | ||
232 | for (;;) { | ||
233 | if (unlikely(c == (u))) | ||
234 | break; | ||
235 | old = atomic_cmpxchg((v), c, c + (a)); | ||
236 | if (likely(old == c)) | ||
237 | break; | ||
238 | c = old; | ||
239 | } | ||
240 | return c != (u); | ||
241 | } | ||
242 | |||
243 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
244 | |||
245 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
246 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
247 | |||
248 | /* These are x86-specific, used by some header files */ | ||
249 | #define atomic_clear_mask(mask, addr) \ | ||
250 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
251 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
252 | |||
253 | #define atomic_set_mask(mask, addr) \ | ||
254 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
255 | : : "r" (mask), "m" (*(addr)) : "memory") | ||
256 | |||
257 | /* Atomic operations are already serializing on x86 */ | ||
258 | #define smp_mb__before_atomic_dec() barrier() | ||
259 | #define smp_mb__after_atomic_dec() barrier() | ||
260 | #define smp_mb__before_atomic_inc() barrier() | ||
261 | #define smp_mb__after_atomic_inc() barrier() | ||
262 | |||
263 | /* An 64bit atomic type */ | ||
264 | |||
265 | typedef struct { | ||
266 | u64 __aligned(8) counter; | ||
267 | } atomic64_t; | ||
268 | |||
269 | #define ATOMIC64_INIT(val) { (val) } | ||
270 | |||
271 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | ||
272 | |||
273 | /** | ||
274 | * atomic64_xchg - xchg atomic64 variable | ||
275 | * @ptr: pointer to type atomic64_t | ||
276 | * @new_val: value to assign | ||
277 | * | ||
278 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
279 | * the old value. | ||
280 | */ | ||
281 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | ||
282 | |||
283 | /** | ||
284 | * atomic64_set - set atomic64 variable | ||
285 | * @ptr: pointer to type atomic64_t | ||
286 | * @new_val: value to assign | ||
287 | * | ||
288 | * Atomically sets the value of @ptr to @new_val. | ||
289 | */ | ||
290 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | ||
291 | |||
292 | /** | ||
293 | * atomic64_read - read atomic64 variable | ||
294 | * @ptr: pointer to type atomic64_t | ||
295 | * | ||
296 | * Atomically reads the value of @ptr and returns it. | ||
297 | */ | ||
298 | static inline u64 atomic64_read(atomic64_t *ptr) | ||
299 | { | ||
300 | u64 res; | ||
301 | |||
302 | /* | ||
303 | * Note, we inline this atomic64_t primitive because | ||
304 | * it only clobbers EAX/EDX and leaves the others | ||
305 | * untouched. We also (somewhat subtly) rely on the | ||
306 | * fact that cmpxchg8b returns the current 64-bit value | ||
307 | * of the memory location we are touching: | ||
308 | */ | ||
309 | asm volatile( | ||
310 | "mov %%ebx, %%eax\n\t" | ||
311 | "mov %%ecx, %%edx\n\t" | ||
312 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
313 | : "=&A" (res) | ||
314 | : "m" (*ptr) | ||
315 | ); | ||
316 | |||
317 | return res; | ||
318 | } | ||
319 | |||
320 | extern u64 atomic64_read(atomic64_t *ptr); | ||
321 | |||
322 | /** | ||
323 | * atomic64_add_return - add and return | ||
324 | * @delta: integer value to add | ||
325 | * @ptr: pointer to type atomic64_t | ||
326 | * | ||
327 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
328 | */ | ||
329 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | ||
330 | |||
331 | /* | ||
332 | * Other variants with different arithmetic operators: | ||
333 | */ | ||
334 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | ||
335 | extern u64 atomic64_inc_return(atomic64_t *ptr); | ||
336 | extern u64 atomic64_dec_return(atomic64_t *ptr); | ||
337 | |||
338 | /** | ||
339 | * atomic64_add - add integer to atomic64 variable | ||
340 | * @delta: integer value to add | ||
341 | * @ptr: pointer to type atomic64_t | ||
342 | * | ||
343 | * Atomically adds @delta to @ptr. | ||
344 | */ | ||
345 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | ||
346 | |||
347 | /** | ||
348 | * atomic64_sub - subtract the atomic64 variable | ||
349 | * @delta: integer value to subtract | ||
350 | * @ptr: pointer to type atomic64_t | ||
351 | * | ||
352 | * Atomically subtracts @delta from @ptr. | ||
353 | */ | ||
354 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | ||
355 | |||
356 | /** | ||
357 | * atomic64_sub_and_test - subtract value from variable and test result | ||
358 | * @delta: integer value to subtract | ||
359 | * @ptr: pointer to type atomic64_t | ||
360 | * | ||
361 | * Atomically subtracts @delta from @ptr and returns | ||
362 | * true if the result is zero, or false for all | ||
363 | * other cases. | ||
364 | */ | ||
365 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | ||
366 | |||
367 | /** | ||
368 | * atomic64_inc - increment atomic64 variable | ||
369 | * @ptr: pointer to type atomic64_t | ||
370 | * | ||
371 | * Atomically increments @ptr by 1. | ||
372 | */ | ||
373 | extern void atomic64_inc(atomic64_t *ptr); | ||
374 | |||
375 | /** | ||
376 | * atomic64_dec - decrement atomic64 variable | ||
377 | * @ptr: pointer to type atomic64_t | ||
378 | * | ||
379 | * Atomically decrements @ptr by 1. | ||
380 | */ | ||
381 | extern void atomic64_dec(atomic64_t *ptr); | ||
382 | |||
383 | /** | ||
384 | * atomic64_dec_and_test - decrement and test | ||
385 | * @ptr: pointer to type atomic64_t | ||
386 | * | ||
387 | * Atomically decrements @ptr by 1 and | ||
388 | * returns true if the result is 0, or false for all other | ||
389 | * cases. | ||
390 | */ | ||
391 | extern int atomic64_dec_and_test(atomic64_t *ptr); | ||
392 | |||
393 | /** | ||
394 | * atomic64_inc_and_test - increment and test | ||
395 | * @ptr: pointer to type atomic64_t | ||
396 | * | ||
397 | * Atomically increments @ptr by 1 | ||
398 | * and returns true if the result is zero, or false for all | ||
399 | * other cases. | ||
400 | */ | ||
401 | extern int atomic64_inc_and_test(atomic64_t *ptr); | ||
402 | |||
403 | /** | ||
404 | * atomic64_add_negative - add and test if negative | ||
405 | * @delta: integer value to add | ||
406 | * @ptr: pointer to type atomic64_t | ||
407 | * | ||
408 | * Atomically adds @delta to @ptr and returns true | ||
409 | * if the result is negative, or false when | ||
410 | * result is greater than or equal to zero. | ||
411 | */ | ||
412 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | ||
413 | |||
414 | #include <asm-generic/atomic-long.h> | ||
415 | #endif /* _ASM_X86_ATOMIC_32_H */ | ||
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h deleted file mode 100644 index d605dc268e79..000000000000 --- a/arch/x86/include/asm/atomic_64.h +++ /dev/null | |||
@@ -1,485 +0,0 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_64_H | ||
2 | #define _ASM_X86_ATOMIC_64_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/alternative.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | /* | ||
9 | * Atomic operations that C can't guarantee us. Useful for | ||
10 | * resource counting etc.. | ||
11 | */ | ||
12 | |||
13 | #define ATOMIC_INIT(i) { (i) } | ||
14 | |||
15 | /** | ||
16 | * atomic_read - read atomic variable | ||
17 | * @v: pointer of type atomic_t | ||
18 | * | ||
19 | * Atomically reads the value of @v. | ||
20 | */ | ||
21 | static inline int atomic_read(const atomic_t *v) | ||
22 | { | ||
23 | return v->counter; | ||
24 | } | ||
25 | |||
26 | /** | ||
27 | * atomic_set - set atomic variable | ||
28 | * @v: pointer of type atomic_t | ||
29 | * @i: required value | ||
30 | * | ||
31 | * Atomically sets the value of @v to @i. | ||
32 | */ | ||
33 | static inline void atomic_set(atomic_t *v, int i) | ||
34 | { | ||
35 | v->counter = i; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_add - add integer to atomic variable | ||
40 | * @i: integer value to add | ||
41 | * @v: pointer of type atomic_t | ||
42 | * | ||
43 | * Atomically adds @i to @v. | ||
44 | */ | ||
45 | static inline void atomic_add(int i, atomic_t *v) | ||
46 | { | ||
47 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
48 | : "=m" (v->counter) | ||
49 | : "ir" (i), "m" (v->counter)); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * atomic_sub - subtract the atomic variable | ||
54 | * @i: integer value to subtract | ||
55 | * @v: pointer of type atomic_t | ||
56 | * | ||
57 | * Atomically subtracts @i from @v. | ||
58 | */ | ||
59 | static inline void atomic_sub(int i, atomic_t *v) | ||
60 | { | ||
61 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
62 | : "=m" (v->counter) | ||
63 | : "ir" (i), "m" (v->counter)); | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * atomic_sub_and_test - subtract value from variable and test result | ||
68 | * @i: integer value to subtract | ||
69 | * @v: pointer of type atomic_t | ||
70 | * | ||
71 | * Atomically subtracts @i from @v and returns | ||
72 | * true if the result is zero, or false for all | ||
73 | * other cases. | ||
74 | */ | ||
75 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
76 | { | ||
77 | unsigned char c; | ||
78 | |||
79 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
80 | : "=m" (v->counter), "=qm" (c) | ||
81 | : "ir" (i), "m" (v->counter) : "memory"); | ||
82 | return c; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * atomic_inc - increment atomic variable | ||
87 | * @v: pointer of type atomic_t | ||
88 | * | ||
89 | * Atomically increments @v by 1. | ||
90 | */ | ||
91 | static inline void atomic_inc(atomic_t *v) | ||
92 | { | ||
93 | asm volatile(LOCK_PREFIX "incl %0" | ||
94 | : "=m" (v->counter) | ||
95 | : "m" (v->counter)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_dec - decrement atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1. | ||
103 | */ | ||
104 | static inline void atomic_dec(atomic_t *v) | ||
105 | { | ||
106 | asm volatile(LOCK_PREFIX "decl %0" | ||
107 | : "=m" (v->counter) | ||
108 | : "m" (v->counter)); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * atomic_dec_and_test - decrement and test | ||
113 | * @v: pointer of type atomic_t | ||
114 | * | ||
115 | * Atomically decrements @v by 1 and | ||
116 | * returns true if the result is 0, or false for all other | ||
117 | * cases. | ||
118 | */ | ||
119 | static inline int atomic_dec_and_test(atomic_t *v) | ||
120 | { | ||
121 | unsigned char c; | ||
122 | |||
123 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
124 | : "=m" (v->counter), "=qm" (c) | ||
125 | : "m" (v->counter) : "memory"); | ||
126 | return c != 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * atomic_inc_and_test - increment and test | ||
131 | * @v: pointer of type atomic_t | ||
132 | * | ||
133 | * Atomically increments @v by 1 | ||
134 | * and returns true if the result is zero, or false for all | ||
135 | * other cases. | ||
136 | */ | ||
137 | static inline int atomic_inc_and_test(atomic_t *v) | ||
138 | { | ||
139 | unsigned char c; | ||
140 | |||
141 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
142 | : "=m" (v->counter), "=qm" (c) | ||
143 | : "m" (v->counter) : "memory"); | ||
144 | return c != 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * atomic_add_negative - add and test if negative | ||
149 | * @i: integer value to add | ||
150 | * @v: pointer of type atomic_t | ||
151 | * | ||
152 | * Atomically adds @i to @v and returns true | ||
153 | * if the result is negative, or false when | ||
154 | * result is greater than or equal to zero. | ||
155 | */ | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
157 | { | ||
158 | unsigned char c; | ||
159 | |||
160 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
161 | : "=m" (v->counter), "=qm" (c) | ||
162 | : "ir" (i), "m" (v->counter) : "memory"); | ||
163 | return c; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * atomic_add_return - add and return | ||
168 | * @i: integer value to add | ||
169 | * @v: pointer of type atomic_t | ||
170 | * | ||
171 | * Atomically adds @i to @v and returns @i + @v | ||
172 | */ | ||
173 | static inline int atomic_add_return(int i, atomic_t *v) | ||
174 | { | ||
175 | int __i = i; | ||
176 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
177 | : "+r" (i), "+m" (v->counter) | ||
178 | : : "memory"); | ||
179 | return i + __i; | ||
180 | } | ||
181 | |||
182 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
183 | { | ||
184 | return atomic_add_return(-i, v); | ||
185 | } | ||
186 | |||
187 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
188 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
189 | |||
190 | /* The 64-bit atomic type */ | ||
191 | |||
192 | #define ATOMIC64_INIT(i) { (i) } | ||
193 | |||
194 | /** | ||
195 | * atomic64_read - read atomic64 variable | ||
196 | * @v: pointer of type atomic64_t | ||
197 | * | ||
198 | * Atomically reads the value of @v. | ||
199 | * Doesn't imply a read memory barrier. | ||
200 | */ | ||
201 | static inline long atomic64_read(const atomic64_t *v) | ||
202 | { | ||
203 | return v->counter; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * atomic64_set - set atomic64 variable | ||
208 | * @v: pointer to type atomic64_t | ||
209 | * @i: required value | ||
210 | * | ||
211 | * Atomically sets the value of @v to @i. | ||
212 | */ | ||
213 | static inline void atomic64_set(atomic64_t *v, long i) | ||
214 | { | ||
215 | v->counter = i; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * atomic64_add - add integer to atomic64 variable | ||
220 | * @i: integer value to add | ||
221 | * @v: pointer to type atomic64_t | ||
222 | * | ||
223 | * Atomically adds @i to @v. | ||
224 | */ | ||
225 | static inline void atomic64_add(long i, atomic64_t *v) | ||
226 | { | ||
227 | asm volatile(LOCK_PREFIX "addq %1,%0" | ||
228 | : "=m" (v->counter) | ||
229 | : "er" (i), "m" (v->counter)); | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * atomic64_sub - subtract the atomic64 variable | ||
234 | * @i: integer value to subtract | ||
235 | * @v: pointer to type atomic64_t | ||
236 | * | ||
237 | * Atomically subtracts @i from @v. | ||
238 | */ | ||
239 | static inline void atomic64_sub(long i, atomic64_t *v) | ||
240 | { | ||
241 | asm volatile(LOCK_PREFIX "subq %1,%0" | ||
242 | : "=m" (v->counter) | ||
243 | : "er" (i), "m" (v->counter)); | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * atomic64_sub_and_test - subtract value from variable and test result | ||
248 | * @i: integer value to subtract | ||
249 | * @v: pointer to type atomic64_t | ||
250 | * | ||
251 | * Atomically subtracts @i from @v and returns | ||
252 | * true if the result is zero, or false for all | ||
253 | * other cases. | ||
254 | */ | ||
255 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) | ||
256 | { | ||
257 | unsigned char c; | ||
258 | |||
259 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | ||
260 | : "=m" (v->counter), "=qm" (c) | ||
261 | : "er" (i), "m" (v->counter) : "memory"); | ||
262 | return c; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * atomic64_inc - increment atomic64 variable | ||
267 | * @v: pointer to type atomic64_t | ||
268 | * | ||
269 | * Atomically increments @v by 1. | ||
270 | */ | ||
271 | static inline void atomic64_inc(atomic64_t *v) | ||
272 | { | ||
273 | asm volatile(LOCK_PREFIX "incq %0" | ||
274 | : "=m" (v->counter) | ||
275 | : "m" (v->counter)); | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * atomic64_dec - decrement atomic64 variable | ||
280 | * @v: pointer to type atomic64_t | ||
281 | * | ||
282 | * Atomically decrements @v by 1. | ||
283 | */ | ||
284 | static inline void atomic64_dec(atomic64_t *v) | ||
285 | { | ||
286 | asm volatile(LOCK_PREFIX "decq %0" | ||
287 | : "=m" (v->counter) | ||
288 | : "m" (v->counter)); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * atomic64_dec_and_test - decrement and test | ||
293 | * @v: pointer to type atomic64_t | ||
294 | * | ||
295 | * Atomically decrements @v by 1 and | ||
296 | * returns true if the result is 0, or false for all other | ||
297 | * cases. | ||
298 | */ | ||
299 | static inline int atomic64_dec_and_test(atomic64_t *v) | ||
300 | { | ||
301 | unsigned char c; | ||
302 | |||
303 | asm volatile(LOCK_PREFIX "decq %0; sete %1" | ||
304 | : "=m" (v->counter), "=qm" (c) | ||
305 | : "m" (v->counter) : "memory"); | ||
306 | return c != 0; | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * atomic64_inc_and_test - increment and test | ||
311 | * @v: pointer to type atomic64_t | ||
312 | * | ||
313 | * Atomically increments @v by 1 | ||
314 | * and returns true if the result is zero, or false for all | ||
315 | * other cases. | ||
316 | */ | ||
317 | static inline int atomic64_inc_and_test(atomic64_t *v) | ||
318 | { | ||
319 | unsigned char c; | ||
320 | |||
321 | asm volatile(LOCK_PREFIX "incq %0; sete %1" | ||
322 | : "=m" (v->counter), "=qm" (c) | ||
323 | : "m" (v->counter) : "memory"); | ||
324 | return c != 0; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * atomic64_add_negative - add and test if negative | ||
329 | * @i: integer value to add | ||
330 | * @v: pointer to type atomic64_t | ||
331 | * | ||
332 | * Atomically adds @i to @v and returns true | ||
333 | * if the result is negative, or false when | ||
334 | * result is greater than or equal to zero. | ||
335 | */ | ||
336 | static inline int atomic64_add_negative(long i, atomic64_t *v) | ||
337 | { | ||
338 | unsigned char c; | ||
339 | |||
340 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | ||
341 | : "=m" (v->counter), "=qm" (c) | ||
342 | : "er" (i), "m" (v->counter) : "memory"); | ||
343 | return c; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * atomic64_add_return - add and return | ||
348 | * @i: integer value to add | ||
349 | * @v: pointer to type atomic64_t | ||
350 | * | ||
351 | * Atomically adds @i to @v and returns @i + @v | ||
352 | */ | ||
353 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
354 | { | ||
355 | long __i = i; | ||
356 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" | ||
357 | : "+r" (i), "+m" (v->counter) | ||
358 | : : "memory"); | ||
359 | return i + __i; | ||
360 | } | ||
361 | |||
362 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
363 | { | ||
364 | return atomic64_add_return(-i, v); | ||
365 | } | ||
366 | |||
367 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | ||
368 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | ||
369 | |||
370 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | ||
371 | { | ||
372 | return cmpxchg(&v->counter, old, new); | ||
373 | } | ||
374 | |||
375 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
376 | { | ||
377 | return xchg(&v->counter, new); | ||
378 | } | ||
379 | |||
380 | static inline long atomic_cmpxchg(atomic_t *v, int old, int new) | ||
381 | { | ||
382 | return cmpxchg(&v->counter, old, new); | ||
383 | } | ||
384 | |||
385 | static inline long atomic_xchg(atomic_t *v, int new) | ||
386 | { | ||
387 | return xchg(&v->counter, new); | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * atomic_add_unless - add unless the number is a given value | ||
392 | * @v: pointer of type atomic_t | ||
393 | * @a: the amount to add to v... | ||
394 | * @u: ...unless v is equal to u. | ||
395 | * | ||
396 | * Atomically adds @a to @v, so long as it was not @u. | ||
397 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
398 | */ | ||
399 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
400 | { | ||
401 | int c, old; | ||
402 | c = atomic_read(v); | ||
403 | for (;;) { | ||
404 | if (unlikely(c == (u))) | ||
405 | break; | ||
406 | old = atomic_cmpxchg((v), c, c + (a)); | ||
407 | if (likely(old == c)) | ||
408 | break; | ||
409 | c = old; | ||
410 | } | ||
411 | return c != (u); | ||
412 | } | ||
413 | |||
414 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
415 | |||
416 | /** | ||
417 | * atomic64_add_unless - add unless the number is a given value | ||
418 | * @v: pointer of type atomic64_t | ||
419 | * @a: the amount to add to v... | ||
420 | * @u: ...unless v is equal to u. | ||
421 | * | ||
422 | * Atomically adds @a to @v, so long as it was not @u. | ||
423 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
424 | */ | ||
425 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
426 | { | ||
427 | long c, old; | ||
428 | c = atomic64_read(v); | ||
429 | for (;;) { | ||
430 | if (unlikely(c == (u))) | ||
431 | break; | ||
432 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
433 | if (likely(old == c)) | ||
434 | break; | ||
435 | c = old; | ||
436 | } | ||
437 | return c != (u); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * atomic_inc_short - increment of a short integer | ||
442 | * @v: pointer to type int | ||
443 | * | ||
444 | * Atomically adds 1 to @v | ||
445 | * Returns the new value of @u | ||
446 | */ | ||
447 | static inline short int atomic_inc_short(short int *v) | ||
448 | { | ||
449 | asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); | ||
450 | return *v; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * atomic_or_long - OR of two long integers | ||
455 | * @v1: pointer to type unsigned long | ||
456 | * @v2: pointer to type unsigned long | ||
457 | * | ||
458 | * Atomically ORs @v1 and @v2 | ||
459 | * Returns the result of the OR | ||
460 | */ | ||
461 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
462 | { | ||
463 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
464 | } | ||
465 | |||
466 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
467 | |||
468 | /* These are x86-specific, used by some header files */ | ||
469 | #define atomic_clear_mask(mask, addr) \ | ||
470 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
471 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
472 | |||
473 | #define atomic_set_mask(mask, addr) \ | ||
474 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
475 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | ||
476 | : "memory") | ||
477 | |||
478 | /* Atomic operations are already serializing on x86 */ | ||
479 | #define smp_mb__before_atomic_dec() barrier() | ||
480 | #define smp_mb__after_atomic_dec() barrier() | ||
481 | #define smp_mb__before_atomic_inc() barrier() | ||
482 | #define smp_mb__after_atomic_inc() barrier() | ||
483 | |||
484 | #include <asm-generic/atomic-long.h> | ||
485 | #endif /* _ASM_X86_ATOMIC_64_H */ | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 637e1ec963c3..0cd82d068613 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -168,6 +168,10 @@ | |||
168 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ | 168 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ |
169 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ | 169 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ |
170 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ | 170 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ |
171 | #define X86_FEATURE_NPT (8*32+5) /* AMD Nested Page Table support */ | ||
172 | #define X86_FEATURE_LBRV (8*32+6) /* AMD LBR Virtualization support */ | ||
173 | #define X86_FEATURE_SVML (8*32+7) /* "svm_lock" AMD SVM locking MSR */ | ||
174 | #define X86_FEATURE_NRIPS (8*32+8) /* "nrip_save" AMD SVM next_rip save */ | ||
171 | 175 | ||
172 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 176 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
173 | 177 | ||
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 761249e396fe..0e22296790d3 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -111,11 +111,8 @@ extern unsigned long end_user_pfn; | |||
111 | 111 | ||
112 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); | 112 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); |
113 | extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); | 113 | extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); |
114 | extern void reserve_early(u64 start, u64 end, char *name); | ||
115 | extern void reserve_early_overlap_ok(u64 start, u64 end, char *name); | ||
116 | extern void free_early(u64 start, u64 end); | ||
117 | extern void early_res_to_bootmem(u64 start, u64 end); | ||
118 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); | 114 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); |
115 | #include <linux/early_res.h> | ||
119 | 116 | ||
120 | extern unsigned long e820_end_of_ram_pfn(void); | 117 | extern unsigned long e820_end_of_ram_pfn(void); |
121 | extern unsigned long e820_end_of_low_ram_pfn(void); | 118 | extern unsigned long e820_end_of_low_ram_pfn(void); |
diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h index 53018464aea6..2519d0679d99 100644 --- a/arch/x86/include/asm/fb.h +++ b/arch/x86/include/asm/fb.h | |||
@@ -12,10 +12,6 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | |||
12 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | 12 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; |
13 | } | 13 | } |
14 | 14 | ||
15 | #ifdef CONFIG_X86_32 | ||
16 | extern int fb_is_primary_device(struct fb_info *info); | 15 | extern int fb_is_primary_device(struct fb_info *info); |
17 | #else | ||
18 | static inline int fb_is_primary_device(struct fb_info *info) { return 0; } | ||
19 | #endif | ||
20 | 16 | ||
21 | #endif /* _ASM_X86_FB_H */ | 17 | #endif /* _ASM_X86_FB_H */ |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 14f9890eb495..635f03bb4995 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -118,14 +118,20 @@ enum fixed_addresses { | |||
118 | * 256 temporary boot-time mappings, used by early_ioremap(), | 118 | * 256 temporary boot-time mappings, used by early_ioremap(), |
119 | * before ioremap() is functional. | 119 | * before ioremap() is functional. |
120 | * | 120 | * |
121 | * We round it up to the next 256 pages boundary so that we | 121 | * If necessary we round it up to the next 256 pages boundary so |
122 | * can have a single pgd entry and a single pte table: | 122 | * that we can have a single pgd entry and a single pte table: |
123 | */ | 123 | */ |
124 | #define NR_FIX_BTMAPS 64 | 124 | #define NR_FIX_BTMAPS 64 |
125 | #define FIX_BTMAPS_SLOTS 4 | 125 | #define FIX_BTMAPS_SLOTS 4 |
126 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | 126 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
127 | (__end_of_permanent_fixed_addresses & 255), | 127 | FIX_BTMAP_END = |
128 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | 128 | (__end_of_permanent_fixed_addresses ^ |
129 | (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) & | ||
130 | -PTRS_PER_PTE | ||
131 | ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - | ||
132 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) | ||
133 | : __end_of_permanent_fixed_addresses, | ||
134 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | ||
129 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 135 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
130 | FIX_OHCI1394_BASE, | 136 | FIX_OHCI1394_BASE, |
131 | #endif | 137 | #endif |
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 014c2b85ae45..a726650fc80f 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | |||
66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
67 | struct page *kmap_atomic_to_page(void *ptr); | 67 | struct page *kmap_atomic_to_page(void *ptr); |
68 | 68 | ||
69 | #ifndef CONFIG_PARAVIRT | ||
70 | #define kmap_atomic_pte(page, type) kmap_atomic(page, type) | ||
71 | #endif | ||
72 | |||
73 | #define flush_cache_kmaps() do { } while (0) | 69 | #define flush_cache_kmaps() do { } while (0) |
74 | 70 | ||
75 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, | 71 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index eeac829a0f44..a929c9ede33d 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -53,13 +53,6 @@ extern void threshold_interrupt(void); | |||
53 | extern void call_function_interrupt(void); | 53 | extern void call_function_interrupt(void); |
54 | extern void call_function_single_interrupt(void); | 54 | extern void call_function_single_interrupt(void); |
55 | 55 | ||
56 | /* PIC specific functions */ | ||
57 | extern void disable_8259A_irq(unsigned int irq); | ||
58 | extern void enable_8259A_irq(unsigned int irq); | ||
59 | extern int i8259A_irq_pending(unsigned int irq); | ||
60 | extern void make_8259A_irq(unsigned int irq); | ||
61 | extern void init_8259A(int aeoi); | ||
62 | |||
63 | /* IOAPIC */ | 56 | /* IOAPIC */ |
64 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) | 57 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) |
65 | extern unsigned long io_apic_irqs; | 58 | extern unsigned long io_apic_irqs; |
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h new file mode 100644 index 000000000000..e153a2b3889a --- /dev/null +++ b/arch/x86/include/asm/hyperv.h | |||
@@ -0,0 +1,186 @@ | |||
1 | #ifndef _ASM_X86_KVM_HYPERV_H | ||
2 | #define _ASM_X86_KVM_HYPERV_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent | ||
8 | * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). | ||
9 | */ | ||
10 | #define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 | ||
11 | #define HYPERV_CPUID_INTERFACE 0x40000001 | ||
12 | #define HYPERV_CPUID_VERSION 0x40000002 | ||
13 | #define HYPERV_CPUID_FEATURES 0x40000003 | ||
14 | #define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 | ||
15 | #define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 | ||
16 | |||
17 | /* | ||
18 | * Feature identification. EAX indicates which features are available | ||
19 | * to the partition based upon the current partition privileges. | ||
20 | */ | ||
21 | |||
22 | /* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ | ||
23 | #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) | ||
24 | /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ | ||
25 | #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) | ||
26 | /* | ||
27 | * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM | ||
28 | * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available | ||
29 | */ | ||
30 | #define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) | ||
31 | /* | ||
32 | * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through | ||
33 | * HV_X64_MSR_STIMER3_COUNT) available | ||
34 | */ | ||
35 | #define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3) | ||
36 | /* | ||
37 | * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) | ||
38 | * are available | ||
39 | */ | ||
40 | #define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) | ||
41 | /* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ | ||
42 | #define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) | ||
43 | /* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ | ||
44 | #define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) | ||
45 | /* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ | ||
46 | #define HV_X64_MSR_RESET_AVAILABLE (1 << 7) | ||
47 | /* | ||
48 | * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, | ||
49 | * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, | ||
50 | * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available | ||
51 | */ | ||
52 | #define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) | ||
53 | |||
54 | /* | ||
55 | * Feature identification: EBX indicates which flags were specified at | ||
56 | * partition creation. The format is the same as the partition creation | ||
57 | * flag structure defined in section Partition Creation Flags. | ||
58 | */ | ||
59 | #define HV_X64_CREATE_PARTITIONS (1 << 0) | ||
60 | #define HV_X64_ACCESS_PARTITION_ID (1 << 1) | ||
61 | #define HV_X64_ACCESS_MEMORY_POOL (1 << 2) | ||
62 | #define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) | ||
63 | #define HV_X64_POST_MESSAGES (1 << 4) | ||
64 | #define HV_X64_SIGNAL_EVENTS (1 << 5) | ||
65 | #define HV_X64_CREATE_PORT (1 << 6) | ||
66 | #define HV_X64_CONNECT_PORT (1 << 7) | ||
67 | #define HV_X64_ACCESS_STATS (1 << 8) | ||
68 | #define HV_X64_DEBUGGING (1 << 11) | ||
69 | #define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) | ||
70 | #define HV_X64_CONFIGURE_PROFILER (1 << 13) | ||
71 | |||
72 | /* | ||
73 | * Feature identification. EDX indicates which miscellaneous features | ||
74 | * are available to the partition. | ||
75 | */ | ||
76 | /* The MWAIT instruction is available (per section MONITOR / MWAIT) */ | ||
77 | #define HV_X64_MWAIT_AVAILABLE (1 << 0) | ||
78 | /* Guest debugging support is available */ | ||
79 | #define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) | ||
80 | /* Performance Monitor support is available*/ | ||
81 | #define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) | ||
82 | /* Support for physical CPU dynamic partitioning events is available*/ | ||
83 | #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) | ||
84 | /* | ||
85 | * Support for passing hypercall input parameter block via XMM | ||
86 | * registers is available | ||
87 | */ | ||
88 | #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) | ||
89 | /* Support for a virtual guest idle state is available */ | ||
90 | #define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) | ||
91 | |||
92 | /* | ||
93 | * Implementation recommendations. Indicates which behaviors the hypervisor | ||
94 | * recommends the OS implement for optimal performance. | ||
95 | */ | ||
96 | /* | ||
97 | * Recommend using hypercall for address space switches rather | ||
98 | * than MOV to CR3 instruction | ||
99 | */ | ||
100 | #define HV_X64_MWAIT_RECOMMENDED (1 << 0) | ||
101 | /* Recommend using hypercall for local TLB flushes rather | ||
102 | * than INVLPG or MOV to CR3 instructions */ | ||
103 | #define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) | ||
104 | /* | ||
105 | * Recommend using hypercall for remote TLB flushes rather | ||
106 | * than inter-processor interrupts | ||
107 | */ | ||
108 | #define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) | ||
109 | /* | ||
110 | * Recommend using MSRs for accessing APIC registers | ||
111 | * EOI, ICR and TPR rather than their memory-mapped counterparts | ||
112 | */ | ||
113 | #define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) | ||
114 | /* Recommend using the hypervisor-provided MSR to initiate a system RESET */ | ||
115 | #define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) | ||
116 | /* | ||
117 | * Recommend using relaxed timing for this partition. If used, | ||
118 | * the VM should disable any watchdog timeouts that rely on the | ||
119 | * timely delivery of external interrupts | ||
120 | */ | ||
121 | #define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) | ||
122 | |||
123 | /* MSR used to identify the guest OS. */ | ||
124 | #define HV_X64_MSR_GUEST_OS_ID 0x40000000 | ||
125 | |||
126 | /* MSR used to setup pages used to communicate with the hypervisor. */ | ||
127 | #define HV_X64_MSR_HYPERCALL 0x40000001 | ||
128 | |||
129 | /* MSR used to provide vcpu index */ | ||
130 | #define HV_X64_MSR_VP_INDEX 0x40000002 | ||
131 | |||
132 | /* Define the virtual APIC registers */ | ||
133 | #define HV_X64_MSR_EOI 0x40000070 | ||
134 | #define HV_X64_MSR_ICR 0x40000071 | ||
135 | #define HV_X64_MSR_TPR 0x40000072 | ||
136 | #define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073 | ||
137 | |||
138 | /* Define synthetic interrupt controller model specific registers. */ | ||
139 | #define HV_X64_MSR_SCONTROL 0x40000080 | ||
140 | #define HV_X64_MSR_SVERSION 0x40000081 | ||
141 | #define HV_X64_MSR_SIEFP 0x40000082 | ||
142 | #define HV_X64_MSR_SIMP 0x40000083 | ||
143 | #define HV_X64_MSR_EOM 0x40000084 | ||
144 | #define HV_X64_MSR_SINT0 0x40000090 | ||
145 | #define HV_X64_MSR_SINT1 0x40000091 | ||
146 | #define HV_X64_MSR_SINT2 0x40000092 | ||
147 | #define HV_X64_MSR_SINT3 0x40000093 | ||
148 | #define HV_X64_MSR_SINT4 0x40000094 | ||
149 | #define HV_X64_MSR_SINT5 0x40000095 | ||
150 | #define HV_X64_MSR_SINT6 0x40000096 | ||
151 | #define HV_X64_MSR_SINT7 0x40000097 | ||
152 | #define HV_X64_MSR_SINT8 0x40000098 | ||
153 | #define HV_X64_MSR_SINT9 0x40000099 | ||
154 | #define HV_X64_MSR_SINT10 0x4000009A | ||
155 | #define HV_X64_MSR_SINT11 0x4000009B | ||
156 | #define HV_X64_MSR_SINT12 0x4000009C | ||
157 | #define HV_X64_MSR_SINT13 0x4000009D | ||
158 | #define HV_X64_MSR_SINT14 0x4000009E | ||
159 | #define HV_X64_MSR_SINT15 0x4000009F | ||
160 | |||
161 | |||
162 | #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 | ||
163 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 | ||
164 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ | ||
165 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) | ||
166 | |||
167 | /* Declare the various hypercall operations. */ | ||
168 | #define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008 | ||
169 | |||
170 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 | ||
171 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 | ||
172 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \ | ||
173 | (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) | ||
174 | |||
175 | #define HV_PROCESSOR_POWER_STATE_C0 0 | ||
176 | #define HV_PROCESSOR_POWER_STATE_C1 1 | ||
177 | #define HV_PROCESSOR_POWER_STATE_C2 2 | ||
178 | #define HV_PROCESSOR_POWER_STATE_C3 3 | ||
179 | |||
180 | /* hypercall status code */ | ||
181 | #define HV_STATUS_SUCCESS 0 | ||
182 | #define HV_STATUS_INVALID_HYPERCALL_CODE 2 | ||
183 | #define HV_STATUS_INVALID_HYPERCALL_INPUT 3 | ||
184 | #define HV_STATUS_INVALID_ALIGNMENT 4 | ||
185 | |||
186 | #endif | ||
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index ebfb8a9e11f7..da2930924501 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -33,8 +33,16 @@ extern void init_thread_xstate(void); | |||
33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
34 | 34 | ||
35 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 35 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
36 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | 36 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
37 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | 37 | xstateregs_get; |
38 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | ||
39 | xstateregs_set; | ||
40 | |||
41 | /* | ||
42 | * xstateregs_active == fpregs_active. Please refer to the comment | ||
43 | * at the definition of fpregs_active. | ||
44 | */ | ||
45 | #define xstateregs_active fpregs_active | ||
38 | 46 | ||
39 | extern struct _fpx_sw_bytes fx_sw_reserved; | 47 | extern struct _fpx_sw_bytes fx_sw_reserved; |
40 | #ifdef CONFIG_IA32_EMULATION | 48 | #ifdef CONFIG_IA32_EMULATION |
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 58d7091eeb1f..1655147646aa 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
@@ -24,12 +24,7 @@ extern unsigned int cached_irq_mask; | |||
24 | #define SLAVE_ICW4_DEFAULT 0x01 | 24 | #define SLAVE_ICW4_DEFAULT 0x01 |
25 | #define PIC_ICW4_AEOI 2 | 25 | #define PIC_ICW4_AEOI 2 |
26 | 26 | ||
27 | extern spinlock_t i8259A_lock; | 27 | extern raw_spinlock_t i8259A_lock; |
28 | |||
29 | extern void init_8259A(int auto_eoi); | ||
30 | extern void enable_8259A_irq(unsigned int irq); | ||
31 | extern void disable_8259A_irq(unsigned int irq); | ||
32 | extern unsigned int startup_8259A_irq(unsigned int irq); | ||
33 | 28 | ||
34 | /* the PIC may need a careful delay on some platforms, hence specific calls */ | 29 | /* the PIC may need a careful delay on some platforms, hence specific calls */ |
35 | static inline unsigned char inb_pic(unsigned int port) | 30 | static inline unsigned char inb_pic(unsigned int port) |
@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned char value, unsigned int port) | |||
57 | 52 | ||
58 | extern struct irq_chip i8259A_chip; | 53 | extern struct irq_chip i8259A_chip; |
59 | 54 | ||
60 | extern void mask_8259A(void); | 55 | struct legacy_pic { |
61 | extern void unmask_8259A(void); | 56 | int nr_legacy_irqs; |
57 | struct irq_chip *chip; | ||
58 | void (*mask_all)(void); | ||
59 | void (*restore_mask)(void); | ||
60 | void (*init)(int auto_eoi); | ||
61 | int (*irq_pending)(unsigned int irq); | ||
62 | void (*make_irq)(unsigned int irq); | ||
63 | }; | ||
64 | |||
65 | extern struct legacy_pic *legacy_pic; | ||
66 | extern struct legacy_pic null_legacy_pic; | ||
62 | 67 | ||
63 | #endif /* _ASM_X86_I8259_H */ | 68 | #endif /* _ASM_X86_I8259_H */ |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 73739322b6d0..a1dcfa3ab17d 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -1,8 +1,42 @@ | |||
1 | #ifndef _ASM_X86_IO_H | 1 | #ifndef _ASM_X86_IO_H |
2 | #define _ASM_X86_IO_H | 2 | #define _ASM_X86_IO_H |
3 | 3 | ||
4 | /* | ||
5 | * This file contains the definitions for the x86 IO instructions | ||
6 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
7 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
8 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
9 | * | ||
10 | * This file is not meant to be obfuscating: it's just complicated | ||
11 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
12 | * as well as possible and (b) trying to avoid writing the same thing | ||
13 | * over and over again with slight variations and possibly making a | ||
14 | * mistake somewhere. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | * Thanks to James van Artsdalen for a better timing-fix than | ||
19 | * the two short jumps: using outb's to a nonexistent port seems | ||
20 | * to guarantee better timings even on fast machines. | ||
21 | * | ||
22 | * On the other hand, I'd like to be sure of a non-existent port: | ||
23 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
24 | * | ||
25 | * Linus | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Bit simplified and optimized by Jan Hubicka | ||
30 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
31 | * | ||
32 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
33 | * isa_read[wl] and isa_write[wl] fixed | ||
34 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
35 | */ | ||
36 | |||
4 | #define ARCH_HAS_IOREMAP_WC | 37 | #define ARCH_HAS_IOREMAP_WC |
5 | 38 | ||
39 | #include <linux/string.h> | ||
6 | #include <linux/compiler.h> | 40 | #include <linux/compiler.h> |
7 | #include <asm-generic/int-ll64.h> | 41 | #include <asm-generic/int-ll64.h> |
8 | #include <asm/page.h> | 42 | #include <asm/page.h> |
@@ -173,11 +207,126 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | |||
173 | extern void iounmap(volatile void __iomem *addr); | 207 | extern void iounmap(volatile void __iomem *addr); |
174 | 208 | ||
175 | 209 | ||
176 | #ifdef CONFIG_X86_32 | 210 | #ifdef __KERNEL__ |
177 | # include "io_32.h" | 211 | |
212 | #include <asm-generic/iomap.h> | ||
213 | |||
214 | #include <linux/vmalloc.h> | ||
215 | |||
216 | /* | ||
217 | * Convert a virtual cached pointer to an uncached pointer | ||
218 | */ | ||
219 | #define xlate_dev_kmem_ptr(p) p | ||
220 | |||
221 | static inline void | ||
222 | memset_io(volatile void __iomem *addr, unsigned char val, size_t count) | ||
223 | { | ||
224 | memset((void __force *)addr, val, count); | ||
225 | } | ||
226 | |||
227 | static inline void | ||
228 | memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) | ||
229 | { | ||
230 | memcpy(dst, (const void __force *)src, count); | ||
231 | } | ||
232 | |||
233 | static inline void | ||
234 | memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) | ||
235 | { | ||
236 | memcpy((void __force *)dst, src, count); | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
241 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
242 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
243 | * are physical addresses. The following constant pointer can be | ||
244 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
245 | * analogy with PCI is quite large): | ||
246 | */ | ||
247 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
248 | |||
249 | /* | ||
250 | * Cache management | ||
251 | * | ||
252 | * This needed for two cases | ||
253 | * 1. Out of order aware processors | ||
254 | * 2. Accidentally out of order processors (PPro errata #51) | ||
255 | */ | ||
256 | |||
257 | static inline void flush_write_buffers(void) | ||
258 | { | ||
259 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
260 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | ||
261 | #endif | ||
262 | } | ||
263 | |||
264 | #endif /* __KERNEL__ */ | ||
265 | |||
266 | extern void native_io_delay(void); | ||
267 | |||
268 | extern int io_delay_type; | ||
269 | extern void io_delay_init(void); | ||
270 | |||
271 | #if defined(CONFIG_PARAVIRT) | ||
272 | #include <asm/paravirt.h> | ||
178 | #else | 273 | #else |
179 | # include "io_64.h" | 274 | |
275 | static inline void slow_down_io(void) | ||
276 | { | ||
277 | native_io_delay(); | ||
278 | #ifdef REALLY_SLOW_IO | ||
279 | native_io_delay(); | ||
280 | native_io_delay(); | ||
281 | native_io_delay(); | ||
180 | #endif | 282 | #endif |
283 | } | ||
284 | |||
285 | #endif | ||
286 | |||
287 | #define BUILDIO(bwl, bw, type) \ | ||
288 | static inline void out##bwl(unsigned type value, int port) \ | ||
289 | { \ | ||
290 | asm volatile("out" #bwl " %" #bw "0, %w1" \ | ||
291 | : : "a"(value), "Nd"(port)); \ | ||
292 | } \ | ||
293 | \ | ||
294 | static inline unsigned type in##bwl(int port) \ | ||
295 | { \ | ||
296 | unsigned type value; \ | ||
297 | asm volatile("in" #bwl " %w1, %" #bw "0" \ | ||
298 | : "=a"(value) : "Nd"(port)); \ | ||
299 | return value; \ | ||
300 | } \ | ||
301 | \ | ||
302 | static inline void out##bwl##_p(unsigned type value, int port) \ | ||
303 | { \ | ||
304 | out##bwl(value, port); \ | ||
305 | slow_down_io(); \ | ||
306 | } \ | ||
307 | \ | ||
308 | static inline unsigned type in##bwl##_p(int port) \ | ||
309 | { \ | ||
310 | unsigned type value = in##bwl(port); \ | ||
311 | slow_down_io(); \ | ||
312 | return value; \ | ||
313 | } \ | ||
314 | \ | ||
315 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ | ||
316 | { \ | ||
317 | asm volatile("rep; outs" #bwl \ | ||
318 | : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
319 | } \ | ||
320 | \ | ||
321 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ | ||
322 | { \ | ||
323 | asm volatile("rep; ins" #bwl \ | ||
324 | : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
325 | } | ||
326 | |||
327 | BUILDIO(b, b, char) | ||
328 | BUILDIO(w, w, short) | ||
329 | BUILDIO(l, , int) | ||
181 | 330 | ||
182 | extern void *xlate_dev_mem_ptr(unsigned long phys); | 331 | extern void *xlate_dev_mem_ptr(unsigned long phys); |
183 | extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | 332 | extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); |
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h deleted file mode 100644 index a299900f5920..000000000000 --- a/arch/x86/include/asm/io_32.h +++ /dev/null | |||
@@ -1,196 +0,0 @@ | |||
1 | #ifndef _ASM_X86_IO_32_H | ||
2 | #define _ASM_X86_IO_32_H | ||
3 | |||
4 | #include <linux/string.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | /* | ||
8 | * This file contains the definitions for the x86 IO instructions | ||
9 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
10 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
11 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
12 | * | ||
13 | * This file is not meant to be obfuscating: it's just complicated | ||
14 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
15 | * as well as possible and (b) trying to avoid writing the same thing | ||
16 | * over and over again with slight variations and possibly making a | ||
17 | * mistake somewhere. | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * Thanks to James van Artsdalen for a better timing-fix than | ||
22 | * the two short jumps: using outb's to a nonexistent port seems | ||
23 | * to guarantee better timings even on fast machines. | ||
24 | * | ||
25 | * On the other hand, I'd like to be sure of a non-existent port: | ||
26 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
27 | * | ||
28 | * Linus | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * Bit simplified and optimized by Jan Hubicka | ||
33 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
34 | * | ||
35 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
36 | * isa_read[wl] and isa_write[wl] fixed | ||
37 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
38 | */ | ||
39 | |||
40 | #define XQUAD_PORTIO_BASE 0xfe400000 | ||
41 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | ||
42 | |||
43 | #ifdef __KERNEL__ | ||
44 | |||
45 | #include <asm-generic/iomap.h> | ||
46 | |||
47 | #include <linux/vmalloc.h> | ||
48 | |||
49 | /* | ||
50 | * Convert a virtual cached pointer to an uncached pointer | ||
51 | */ | ||
52 | #define xlate_dev_kmem_ptr(p) p | ||
53 | |||
54 | static inline void | ||
55 | memset_io(volatile void __iomem *addr, unsigned char val, int count) | ||
56 | { | ||
57 | memset((void __force *)addr, val, count); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | memcpy_fromio(void *dst, const volatile void __iomem *src, int count) | ||
62 | { | ||
63 | __memcpy(dst, (const void __force *)src, count); | ||
64 | } | ||
65 | |||
66 | static inline void | ||
67 | memcpy_toio(volatile void __iomem *dst, const void *src, int count) | ||
68 | { | ||
69 | __memcpy((void __force *)dst, src, count); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
74 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
75 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
76 | * are physical addresses. The following constant pointer can be | ||
77 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
78 | * analogy with PCI is quite large): | ||
79 | */ | ||
80 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
81 | |||
82 | /* | ||
83 | * Cache management | ||
84 | * | ||
85 | * This needed for two cases | ||
86 | * 1. Out of order aware processors | ||
87 | * 2. Accidentally out of order processors (PPro errata #51) | ||
88 | */ | ||
89 | |||
90 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
91 | |||
92 | static inline void flush_write_buffers(void) | ||
93 | { | ||
94 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | ||
95 | } | ||
96 | |||
97 | #else | ||
98 | |||
99 | #define flush_write_buffers() do { } while (0) | ||
100 | |||
101 | #endif | ||
102 | |||
103 | #endif /* __KERNEL__ */ | ||
104 | |||
105 | extern void native_io_delay(void); | ||
106 | |||
107 | extern int io_delay_type; | ||
108 | extern void io_delay_init(void); | ||
109 | |||
110 | #if defined(CONFIG_PARAVIRT) | ||
111 | #include <asm/paravirt.h> | ||
112 | #else | ||
113 | |||
114 | static inline void slow_down_io(void) | ||
115 | { | ||
116 | native_io_delay(); | ||
117 | #ifdef REALLY_SLOW_IO | ||
118 | native_io_delay(); | ||
119 | native_io_delay(); | ||
120 | native_io_delay(); | ||
121 | #endif | ||
122 | } | ||
123 | |||
124 | #endif | ||
125 | |||
126 | #define __BUILDIO(bwl, bw, type) \ | ||
127 | static inline void out##bwl(unsigned type value, int port) \ | ||
128 | { \ | ||
129 | out##bwl##_local(value, port); \ | ||
130 | } \ | ||
131 | \ | ||
132 | static inline unsigned type in##bwl(int port) \ | ||
133 | { \ | ||
134 | return in##bwl##_local(port); \ | ||
135 | } | ||
136 | |||
137 | #define BUILDIO(bwl, bw, type) \ | ||
138 | static inline void out##bwl##_local(unsigned type value, int port) \ | ||
139 | { \ | ||
140 | asm volatile("out" #bwl " %" #bw "0, %w1" \ | ||
141 | : : "a"(value), "Nd"(port)); \ | ||
142 | } \ | ||
143 | \ | ||
144 | static inline unsigned type in##bwl##_local(int port) \ | ||
145 | { \ | ||
146 | unsigned type value; \ | ||
147 | asm volatile("in" #bwl " %w1, %" #bw "0" \ | ||
148 | : "=a"(value) : "Nd"(port)); \ | ||
149 | return value; \ | ||
150 | } \ | ||
151 | \ | ||
152 | static inline void out##bwl##_local_p(unsigned type value, int port) \ | ||
153 | { \ | ||
154 | out##bwl##_local(value, port); \ | ||
155 | slow_down_io(); \ | ||
156 | } \ | ||
157 | \ | ||
158 | static inline unsigned type in##bwl##_local_p(int port) \ | ||
159 | { \ | ||
160 | unsigned type value = in##bwl##_local(port); \ | ||
161 | slow_down_io(); \ | ||
162 | return value; \ | ||
163 | } \ | ||
164 | \ | ||
165 | __BUILDIO(bwl, bw, type) \ | ||
166 | \ | ||
167 | static inline void out##bwl##_p(unsigned type value, int port) \ | ||
168 | { \ | ||
169 | out##bwl(value, port); \ | ||
170 | slow_down_io(); \ | ||
171 | } \ | ||
172 | \ | ||
173 | static inline unsigned type in##bwl##_p(int port) \ | ||
174 | { \ | ||
175 | unsigned type value = in##bwl(port); \ | ||
176 | slow_down_io(); \ | ||
177 | return value; \ | ||
178 | } \ | ||
179 | \ | ||
180 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ | ||
181 | { \ | ||
182 | asm volatile("rep; outs" #bwl \ | ||
183 | : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
184 | } \ | ||
185 | \ | ||
186 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ | ||
187 | { \ | ||
188 | asm volatile("rep; ins" #bwl \ | ||
189 | : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
190 | } | ||
191 | |||
192 | BUILDIO(b, b, char) | ||
193 | BUILDIO(w, w, short) | ||
194 | BUILDIO(l, , int) | ||
195 | |||
196 | #endif /* _ASM_X86_IO_32_H */ | ||
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h deleted file mode 100644 index 244067893af4..000000000000 --- a/arch/x86/include/asm/io_64.h +++ /dev/null | |||
@@ -1,181 +0,0 @@ | |||
1 | #ifndef _ASM_X86_IO_64_H | ||
2 | #define _ASM_X86_IO_64_H | ||
3 | |||
4 | |||
5 | /* | ||
6 | * This file contains the definitions for the x86 IO instructions | ||
7 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
8 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
9 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
10 | * | ||
11 | * This file is not meant to be obfuscating: it's just complicated | ||
12 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
13 | * as well as possible and (b) trying to avoid writing the same thing | ||
14 | * over and over again with slight variations and possibly making a | ||
15 | * mistake somewhere. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * Thanks to James van Artsdalen for a better timing-fix than | ||
20 | * the two short jumps: using outb's to a nonexistent port seems | ||
21 | * to guarantee better timings even on fast machines. | ||
22 | * | ||
23 | * On the other hand, I'd like to be sure of a non-existent port: | ||
24 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
25 | * | ||
26 | * Linus | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Bit simplified and optimized by Jan Hubicka | ||
31 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
32 | * | ||
33 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
34 | * isa_read[wl] and isa_write[wl] fixed | ||
35 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
36 | */ | ||
37 | |||
38 | extern void native_io_delay(void); | ||
39 | |||
40 | extern int io_delay_type; | ||
41 | extern void io_delay_init(void); | ||
42 | |||
43 | #if defined(CONFIG_PARAVIRT) | ||
44 | #include <asm/paravirt.h> | ||
45 | #else | ||
46 | |||
47 | static inline void slow_down_io(void) | ||
48 | { | ||
49 | native_io_delay(); | ||
50 | #ifdef REALLY_SLOW_IO | ||
51 | native_io_delay(); | ||
52 | native_io_delay(); | ||
53 | native_io_delay(); | ||
54 | #endif | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | /* | ||
59 | * Talk about misusing macros.. | ||
60 | */ | ||
61 | #define __OUT1(s, x) \ | ||
62 | static inline void out##s(unsigned x value, unsigned short port) { | ||
63 | |||
64 | #define __OUT2(s, s1, s2) \ | ||
65 | asm volatile ("out" #s " %" s1 "0,%" s2 "1" | ||
66 | |||
67 | #ifndef REALLY_SLOW_IO | ||
68 | #define REALLY_SLOW_IO | ||
69 | #define UNSET_REALLY_SLOW_IO | ||
70 | #endif | ||
71 | |||
72 | #define __OUT(s, s1, x) \ | ||
73 | __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ | ||
74 | } \ | ||
75 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ | ||
76 | slow_down_io(); \ | ||
77 | } | ||
78 | |||
79 | #define __IN1(s) \ | ||
80 | static inline RETURN_TYPE in##s(unsigned short port) \ | ||
81 | { \ | ||
82 | RETURN_TYPE _v; | ||
83 | |||
84 | #define __IN2(s, s1, s2) \ | ||
85 | asm volatile ("in" #s " %" s2 "1,%" s1 "0" | ||
86 | |||
87 | #define __IN(s, s1, i...) \ | ||
88 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | ||
89 | return _v; \ | ||
90 | } \ | ||
91 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | ||
92 | slow_down_io(); \ | ||
93 | return _v; } | ||
94 | |||
95 | #ifdef UNSET_REALLY_SLOW_IO | ||
96 | #undef REALLY_SLOW_IO | ||
97 | #endif | ||
98 | |||
99 | #define __INS(s) \ | ||
100 | static inline void ins##s(unsigned short port, void *addr, \ | ||
101 | unsigned long count) \ | ||
102 | { \ | ||
103 | asm volatile ("rep ; ins" #s \ | ||
104 | : "=D" (addr), "=c" (count) \ | ||
105 | : "d" (port), "0" (addr), "1" (count)); \ | ||
106 | } | ||
107 | |||
108 | #define __OUTS(s) \ | ||
109 | static inline void outs##s(unsigned short port, const void *addr, \ | ||
110 | unsigned long count) \ | ||
111 | { \ | ||
112 | asm volatile ("rep ; outs" #s \ | ||
113 | : "=S" (addr), "=c" (count) \ | ||
114 | : "d" (port), "0" (addr), "1" (count)); \ | ||
115 | } | ||
116 | |||
117 | #define RETURN_TYPE unsigned char | ||
118 | __IN(b, "") | ||
119 | #undef RETURN_TYPE | ||
120 | #define RETURN_TYPE unsigned short | ||
121 | __IN(w, "") | ||
122 | #undef RETURN_TYPE | ||
123 | #define RETURN_TYPE unsigned int | ||
124 | __IN(l, "") | ||
125 | #undef RETURN_TYPE | ||
126 | |||
127 | __OUT(b, "b", char) | ||
128 | __OUT(w, "w", short) | ||
129 | __OUT(l, , int) | ||
130 | |||
131 | __INS(b) | ||
132 | __INS(w) | ||
133 | __INS(l) | ||
134 | |||
135 | __OUTS(b) | ||
136 | __OUTS(w) | ||
137 | __OUTS(l) | ||
138 | |||
139 | #if defined(__KERNEL__) && defined(__x86_64__) | ||
140 | |||
141 | #include <linux/vmalloc.h> | ||
142 | |||
143 | #include <asm-generic/iomap.h> | ||
144 | |||
145 | void __memcpy_fromio(void *, unsigned long, unsigned); | ||
146 | void __memcpy_toio(unsigned long, const void *, unsigned); | ||
147 | |||
148 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, | ||
149 | unsigned len) | ||
150 | { | ||
151 | __memcpy_fromio(to, (unsigned long)from, len); | ||
152 | } | ||
153 | |||
154 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, | ||
155 | unsigned len) | ||
156 | { | ||
157 | __memcpy_toio((unsigned long)to, from, len); | ||
158 | } | ||
159 | |||
160 | void memset_io(volatile void __iomem *a, int b, size_t c); | ||
161 | |||
162 | /* | ||
163 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
164 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
165 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
166 | * are physical addresses. The following constant pointer can be | ||
167 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
168 | * analogy with PCI is quite large): | ||
169 | */ | ||
170 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
171 | |||
172 | #define flush_write_buffers() | ||
173 | |||
174 | /* | ||
175 | * Convert a virtual cached pointer to an uncached pointer | ||
176 | */ | ||
177 | #define xlate_dev_kmem_ptr(p) p | ||
178 | |||
179 | #endif /* __KERNEL__ */ | ||
180 | |||
181 | #endif /* _ASM_X86_IO_64_H */ | ||
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 7c7c16cde1f8..35832a03a515 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -143,8 +143,6 @@ extern int noioapicreroute; | |||
143 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ | 143 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ |
144 | extern int timer_through_8259; | 144 | extern int timer_through_8259; |
145 | 145 | ||
146 | extern void io_apic_disable_legacy(void); | ||
147 | |||
148 | /* | 146 | /* |
149 | * If we use the IO-APIC for IRQ routing, disable automatic | 147 | * If we use the IO-APIC for IRQ routing, disable automatic |
150 | * assignment of PCI IRQ's. | 148 | * assignment of PCI IRQ's. |
@@ -160,6 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic); | |||
160 | struct io_apic_irq_attr; | 158 | struct io_apic_irq_attr; |
161 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 159 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
162 | struct io_apic_irq_attr *irq_attr); | 160 | struct io_apic_irq_attr *irq_attr); |
161 | void setup_IO_APIC_irq_extra(u32 gsi); | ||
163 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 162 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
164 | extern void ioapic_init_mappings(void); | 163 | extern void ioapic_init_mappings(void); |
165 | extern void ioapic_insert_resources(void); | 164 | extern void ioapic_insert_resources(void); |
@@ -188,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_routing[]; | |||
188 | int mp_find_ioapic(int gsi); | 187 | int mp_find_ioapic(int gsi); |
189 | int mp_find_ioapic_pin(int ioapic, int gsi); | 188 | int mp_find_ioapic_pin(int ioapic, int gsi); |
190 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); | 189 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); |
190 | extern void __init pre_init_apic_IRQ0(void); | ||
191 | 191 | ||
192 | #else /* !CONFIG_X86_IO_APIC */ | 192 | #else /* !CONFIG_X86_IO_APIC */ |
193 | 193 | ||
@@ -197,7 +197,11 @@ static const int timer_through_8259 = 0; | |||
197 | static inline void ioapic_init_mappings(void) { } | 197 | static inline void ioapic_init_mappings(void) { } |
198 | static inline void ioapic_insert_resources(void) { } | 198 | static inline void ioapic_insert_resources(void) { } |
199 | static inline void probe_nr_irqs_gsi(void) { } | 199 | static inline void probe_nr_irqs_gsi(void) { } |
200 | static inline int mp_find_ioapic(int gsi) { return 0; } | ||
200 | 201 | ||
202 | struct io_apic_irq_attr; | ||
203 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, | ||
204 | struct io_apic_irq_attr *irq_attr) { return 0; } | ||
201 | #endif | 205 | #endif |
202 | 206 | ||
203 | #endif /* _ASM_X86_IO_APIC_H */ | 207 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 4611f085cd43..8767d99c4f64 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -28,28 +28,33 @@ | |||
28 | #define MCE_VECTOR 0x12 | 28 | #define MCE_VECTOR 0x12 |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * IDT vectors usable for external interrupt sources start | 31 | * IDT vectors usable for external interrupt sources start at 0x20. |
32 | * at 0x20: | 32 | * (0x80 is the syscall vector, 0x30-0x3f are for ISA) |
33 | */ | 33 | */ |
34 | #define FIRST_EXTERNAL_VECTOR 0x20 | 34 | #define FIRST_EXTERNAL_VECTOR 0x20 |
35 | 35 | /* | |
36 | #ifdef CONFIG_X86_32 | 36 | * We start allocating at 0x21 to spread out vectors evenly between |
37 | # define SYSCALL_VECTOR 0x80 | 37 | * priority levels. (0x80 is the syscall vector) |
38 | # define IA32_SYSCALL_VECTOR 0x80 | 38 | */ |
39 | #else | 39 | #define VECTOR_OFFSET_START 1 |
40 | # define IA32_SYSCALL_VECTOR 0x80 | ||
41 | #endif | ||
42 | 40 | ||
43 | /* | 41 | /* |
44 | * Reserve the lowest usable priority level 0x20 - 0x2f for triggering | 42 | * Reserve the lowest usable vector (and hence lowest priority) 0x20 for |
45 | * cleanup after irq migration. | 43 | * triggering cleanup after irq migration. 0x21-0x2f will still be used |
44 | * for device interrupts. | ||
46 | */ | 45 | */ |
47 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR | 46 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR |
48 | 47 | ||
48 | #define IA32_SYSCALL_VECTOR 0x80 | ||
49 | #ifdef CONFIG_X86_32 | ||
50 | # define SYSCALL_VECTOR 0x80 | ||
51 | #endif | ||
52 | |||
49 | /* | 53 | /* |
50 | * Vectors 0x30-0x3f are used for ISA interrupts. | 54 | * Vectors 0x30-0x3f are used for ISA interrupts. |
55 | * round up to the next 16-vector boundary | ||
51 | */ | 56 | */ |
52 | #define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) | 57 | #define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15) |
53 | 58 | ||
54 | #define IRQ1_VECTOR (IRQ0_VECTOR + 1) | 59 | #define IRQ1_VECTOR (IRQ0_VECTOR + 1) |
55 | #define IRQ2_VECTOR (IRQ0_VECTOR + 2) | 60 | #define IRQ2_VECTOR (IRQ0_VECTOR + 2) |
@@ -120,13 +125,6 @@ | |||
120 | */ | 125 | */ |
121 | #define MCE_SELF_VECTOR 0xeb | 126 | #define MCE_SELF_VECTOR 0xeb |
122 | 127 | ||
123 | /* | ||
124 | * First APIC vector available to drivers: (vectors 0x30-0xee) we | ||
125 | * start at 0x31(0x41) to spread out vectors evenly between priority | ||
126 | * levels. (0x80 is the syscall vector) | ||
127 | */ | ||
128 | #define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) | ||
129 | |||
130 | #define NR_VECTORS 256 | 128 | #define NR_VECTORS 256 |
131 | 129 | ||
132 | #define FPU_IRQ 13 | 130 | #define FPU_IRQ 13 |
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq) | |||
154 | 152 | ||
155 | #define NR_IRQS_LEGACY 16 | 153 | #define NR_IRQS_LEGACY 16 |
156 | 154 | ||
157 | #define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) | ||
158 | #define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) | 155 | #define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) |
159 | 156 | ||
160 | #ifdef CONFIG_X86_IO_APIC | 157 | #ifdef CONFIG_X86_IO_APIC |
161 | # ifdef CONFIG_SPARSE_IRQ | 158 | # ifdef CONFIG_SPARSE_IRQ |
159 | # define CPU_VECTOR_LIMIT (64 * NR_CPUS) | ||
162 | # define NR_IRQS \ | 160 | # define NR_IRQS \ |
163 | (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ | 161 | (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ |
164 | (NR_VECTORS + CPU_VECTOR_LIMIT) : \ | 162 | (NR_VECTORS + CPU_VECTOR_LIMIT) : \ |
165 | (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) | 163 | (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) |
166 | # else | 164 | # else |
167 | # if NR_CPUS < MAX_IO_APICS | 165 | # define CPU_VECTOR_LIMIT (32 * NR_CPUS) |
168 | # define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) | 166 | # define NR_IRQS \ |
169 | # else | 167 | (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \ |
170 | # define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) | 168 | (NR_VECTORS + CPU_VECTOR_LIMIT) : \ |
171 | # endif | 169 | (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) |
172 | # endif | 170 | # endif |
173 | #else /* !CONFIG_X86_IO_APIC: */ | 171 | #else /* !CONFIG_X86_IO_APIC: */ |
174 | # define NR_IRQS NR_IRQS_LEGACY | 172 | # define NR_IRQS NR_IRQS_LEGACY |
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 4fe681de1e76..4ffa345a8ccb 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -32,7 +32,10 @@ struct kprobe; | |||
32 | 32 | ||
33 | typedef u8 kprobe_opcode_t; | 33 | typedef u8 kprobe_opcode_t; |
34 | #define BREAKPOINT_INSTRUCTION 0xcc | 34 | #define BREAKPOINT_INSTRUCTION 0xcc |
35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | 35 | #define RELATIVEJUMP_OPCODE 0xe9 |
36 | #define RELATIVEJUMP_SIZE 5 | ||
37 | #define RELATIVECALL_OPCODE 0xe8 | ||
38 | #define RELATIVE_ADDR_SIZE 4 | ||
36 | #define MAX_INSN_SIZE 16 | 39 | #define MAX_INSN_SIZE 16 |
37 | #define MAX_STACK_SIZE 64 | 40 | #define MAX_STACK_SIZE 64 |
38 | #define MIN_STACK_SIZE(ADDR) \ | 41 | #define MIN_STACK_SIZE(ADDR) \ |
@@ -44,6 +47,17 @@ typedef u8 kprobe_opcode_t; | |||
44 | 47 | ||
45 | #define flush_insn_slot(p) do { } while (0) | 48 | #define flush_insn_slot(p) do { } while (0) |
46 | 49 | ||
50 | /* optinsn template addresses */ | ||
51 | extern kprobe_opcode_t optprobe_template_entry; | ||
52 | extern kprobe_opcode_t optprobe_template_val; | ||
53 | extern kprobe_opcode_t optprobe_template_call; | ||
54 | extern kprobe_opcode_t optprobe_template_end; | ||
55 | #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) | ||
56 | #define MAX_OPTINSN_SIZE \ | ||
57 | (((unsigned long)&optprobe_template_end - \ | ||
58 | (unsigned long)&optprobe_template_entry) + \ | ||
59 | MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE) | ||
60 | |||
47 | extern const int kretprobe_blacklist_size; | 61 | extern const int kretprobe_blacklist_size; |
48 | 62 | ||
49 | void arch_remove_kprobe(struct kprobe *p); | 63 | void arch_remove_kprobe(struct kprobe *p); |
@@ -64,6 +78,21 @@ struct arch_specific_insn { | |||
64 | int boostable; | 78 | int boostable; |
65 | }; | 79 | }; |
66 | 80 | ||
81 | struct arch_optimized_insn { | ||
82 | /* copy of the original instructions */ | ||
83 | kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE]; | ||
84 | /* detour code buffer */ | ||
85 | kprobe_opcode_t *insn; | ||
86 | /* the size of instructions copied to detour code buffer */ | ||
87 | size_t size; | ||
88 | }; | ||
89 | |||
90 | /* Return true (!0) if optinsn is prepared for optimization. */ | ||
91 | static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn) | ||
92 | { | ||
93 | return optinsn->size; | ||
94 | } | ||
95 | |||
67 | struct prev_kprobe { | 96 | struct prev_kprobe { |
68 | struct kprobe *kp; | 97 | struct kprobe *kp; |
69 | unsigned long status; | 98 | unsigned long status; |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7c18e1230f54..7a6f54fa13ba 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt; | |||
54 | struct x86_emulate_ops { | 54 | struct x86_emulate_ops { |
55 | /* | 55 | /* |
56 | * read_std: Read bytes of standard (non-emulated/special) memory. | 56 | * read_std: Read bytes of standard (non-emulated/special) memory. |
57 | * Used for instruction fetch, stack operations, and others. | 57 | * Used for descriptor reading. |
58 | * @addr: [IN ] Linear address from which to read. | 58 | * @addr: [IN ] Linear address from which to read. |
59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | 59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. |
60 | * @bytes: [IN ] Number of bytes to read from memory. | 60 | * @bytes: [IN ] Number of bytes to read from memory. |
61 | */ | 61 | */ |
62 | int (*read_std)(unsigned long addr, void *val, | 62 | int (*read_std)(unsigned long addr, void *val, |
63 | unsigned int bytes, struct kvm_vcpu *vcpu); | 63 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); |
64 | |||
65 | /* | ||
66 | * fetch: Read bytes of standard (non-emulated/special) memory. | ||
67 | * Used for instruction fetch. | ||
68 | * @addr: [IN ] Linear address from which to read. | ||
69 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | ||
70 | * @bytes: [IN ] Number of bytes to read from memory. | ||
71 | */ | ||
72 | int (*fetch)(unsigned long addr, void *val, | ||
73 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | ||
64 | 74 | ||
65 | /* | 75 | /* |
66 | * read_emulated: Read bytes from emulated/special memory area. | 76 | * read_emulated: Read bytes from emulated/special memory area. |
@@ -74,7 +84,7 @@ struct x86_emulate_ops { | |||
74 | struct kvm_vcpu *vcpu); | 84 | struct kvm_vcpu *vcpu); |
75 | 85 | ||
76 | /* | 86 | /* |
77 | * write_emulated: Read bytes from emulated/special memory area. | 87 | * write_emulated: Write bytes to emulated/special memory area. |
78 | * @addr: [IN ] Linear address to which to write. | 88 | * @addr: [IN ] Linear address to which to write. |
79 | * @val: [IN ] Value to write to memory (low-order bytes used as | 89 | * @val: [IN ] Value to write to memory (low-order bytes used as |
80 | * required). | 90 | * required). |
@@ -168,6 +178,7 @@ struct x86_emulate_ctxt { | |||
168 | 178 | ||
169 | /* Execution mode, passed to the emulator. */ | 179 | /* Execution mode, passed to the emulator. */ |
170 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | 180 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ |
181 | #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */ | ||
171 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ | 182 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ |
172 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ | 183 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ |
173 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | 184 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4f865e8b8540..06d9e79ca37d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/mtrr.h> | 25 | #include <asm/mtrr.h> |
26 | #include <asm/msr-index.h> | 26 | #include <asm/msr-index.h> |
27 | 27 | ||
28 | #define KVM_MAX_VCPUS 16 | 28 | #define KVM_MAX_VCPUS 64 |
29 | #define KVM_MEMORY_SLOTS 32 | 29 | #define KVM_MEMORY_SLOTS 32 |
30 | /* memory slots that does not exposed to userspace */ | 30 | /* memory slots that does not exposed to userspace */ |
31 | #define KVM_PRIVATE_MEM_SLOTS 4 | 31 | #define KVM_PRIVATE_MEM_SLOTS 4 |
@@ -38,19 +38,6 @@ | |||
38 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ | 38 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
39 | 0xFFFFFF0000000000ULL) | 39 | 0xFFFFFF0000000000ULL) |
40 | 40 | ||
41 | #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ | ||
42 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) | ||
43 | #define KVM_GUEST_CR0_MASK \ | ||
44 | (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||
45 | #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ | ||
46 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) | ||
47 | #define KVM_VM_CR0_ALWAYS_ON \ | ||
48 | (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||
49 | #define KVM_GUEST_CR4_MASK \ | ||
50 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | ||
51 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | ||
52 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | ||
53 | |||
54 | #define INVALID_PAGE (~(hpa_t)0) | 41 | #define INVALID_PAGE (~(hpa_t)0) |
55 | #define UNMAPPED_GVA (~(gpa_t)0) | 42 | #define UNMAPPED_GVA (~(gpa_t)0) |
56 | 43 | ||
@@ -256,7 +243,8 @@ struct kvm_mmu { | |||
256 | void (*new_cr3)(struct kvm_vcpu *vcpu); | 243 | void (*new_cr3)(struct kvm_vcpu *vcpu); |
257 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | 244 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); |
258 | void (*free)(struct kvm_vcpu *vcpu); | 245 | void (*free)(struct kvm_vcpu *vcpu); |
259 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | 246 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
247 | u32 *error); | ||
260 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | 248 | void (*prefetch_page)(struct kvm_vcpu *vcpu, |
261 | struct kvm_mmu_page *page); | 249 | struct kvm_mmu_page *page); |
262 | int (*sync_page)(struct kvm_vcpu *vcpu, | 250 | int (*sync_page)(struct kvm_vcpu *vcpu, |
@@ -282,13 +270,15 @@ struct kvm_vcpu_arch { | |||
282 | u32 regs_dirty; | 270 | u32 regs_dirty; |
283 | 271 | ||
284 | unsigned long cr0; | 272 | unsigned long cr0; |
273 | unsigned long cr0_guest_owned_bits; | ||
285 | unsigned long cr2; | 274 | unsigned long cr2; |
286 | unsigned long cr3; | 275 | unsigned long cr3; |
287 | unsigned long cr4; | 276 | unsigned long cr4; |
277 | unsigned long cr4_guest_owned_bits; | ||
288 | unsigned long cr8; | 278 | unsigned long cr8; |
289 | u32 hflags; | 279 | u32 hflags; |
290 | u64 pdptrs[4]; /* pae */ | 280 | u64 pdptrs[4]; /* pae */ |
291 | u64 shadow_efer; | 281 | u64 efer; |
292 | u64 apic_base; | 282 | u64 apic_base; |
293 | struct kvm_lapic *apic; /* kernel irqchip context */ | 283 | struct kvm_lapic *apic; /* kernel irqchip context */ |
294 | int32_t apic_arb_prio; | 284 | int32_t apic_arb_prio; |
@@ -374,17 +364,27 @@ struct kvm_vcpu_arch { | |||
374 | /* used for guest single stepping over the given code position */ | 364 | /* used for guest single stepping over the given code position */ |
375 | u16 singlestep_cs; | 365 | u16 singlestep_cs; |
376 | unsigned long singlestep_rip; | 366 | unsigned long singlestep_rip; |
367 | /* fields used by HYPER-V emulation */ | ||
368 | u64 hv_vapic; | ||
377 | }; | 369 | }; |
378 | 370 | ||
379 | struct kvm_mem_alias { | 371 | struct kvm_mem_alias { |
380 | gfn_t base_gfn; | 372 | gfn_t base_gfn; |
381 | unsigned long npages; | 373 | unsigned long npages; |
382 | gfn_t target_gfn; | 374 | gfn_t target_gfn; |
375 | #define KVM_ALIAS_INVALID 1UL | ||
376 | unsigned long flags; | ||
383 | }; | 377 | }; |
384 | 378 | ||
385 | struct kvm_arch{ | 379 | #define KVM_ARCH_HAS_UNALIAS_INSTANTIATION |
386 | int naliases; | 380 | |
381 | struct kvm_mem_aliases { | ||
387 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | 382 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; |
383 | int naliases; | ||
384 | }; | ||
385 | |||
386 | struct kvm_arch { | ||
387 | struct kvm_mem_aliases *aliases; | ||
388 | 388 | ||
389 | unsigned int n_free_mmu_pages; | 389 | unsigned int n_free_mmu_pages; |
390 | unsigned int n_requested_mmu_pages; | 390 | unsigned int n_requested_mmu_pages; |
@@ -416,6 +416,10 @@ struct kvm_arch{ | |||
416 | s64 kvmclock_offset; | 416 | s64 kvmclock_offset; |
417 | 417 | ||
418 | struct kvm_xen_hvm_config xen_hvm_config; | 418 | struct kvm_xen_hvm_config xen_hvm_config; |
419 | |||
420 | /* fields used by HYPER-V emulation */ | ||
421 | u64 hv_guest_os_id; | ||
422 | u64 hv_hypercall; | ||
419 | }; | 423 | }; |
420 | 424 | ||
421 | struct kvm_vm_stat { | 425 | struct kvm_vm_stat { |
@@ -471,6 +475,7 @@ struct kvm_x86_ops { | |||
471 | int (*hardware_setup)(void); /* __init */ | 475 | int (*hardware_setup)(void); /* __init */ |
472 | void (*hardware_unsetup)(void); /* __exit */ | 476 | void (*hardware_unsetup)(void); /* __exit */ |
473 | bool (*cpu_has_accelerated_tpr)(void); | 477 | bool (*cpu_has_accelerated_tpr)(void); |
478 | void (*cpuid_update)(struct kvm_vcpu *vcpu); | ||
474 | 479 | ||
475 | /* Create, but do not attach this VCPU */ | 480 | /* Create, but do not attach this VCPU */ |
476 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | 481 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); |
@@ -492,6 +497,7 @@ struct kvm_x86_ops { | |||
492 | void (*set_segment)(struct kvm_vcpu *vcpu, | 497 | void (*set_segment)(struct kvm_vcpu *vcpu, |
493 | struct kvm_segment *var, int seg); | 498 | struct kvm_segment *var, int seg); |
494 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | 499 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
500 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); | ||
495 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); | 501 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
496 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | 502 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
497 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | 503 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
@@ -501,12 +507,13 @@ struct kvm_x86_ops { | |||
501 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 507 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
502 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 508 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
503 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 509 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
504 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | 510 | int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); |
505 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 511 | int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); |
506 | int *exception); | ||
507 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); | 512 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
508 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | 513 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
509 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | 514 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
515 | void (*fpu_activate)(struct kvm_vcpu *vcpu); | ||
516 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); | ||
510 | 517 | ||
511 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | 518 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
512 | 519 | ||
@@ -531,7 +538,8 @@ struct kvm_x86_ops { | |||
531 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 538 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
532 | int (*get_tdp_level)(void); | 539 | int (*get_tdp_level)(void); |
533 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); | 540 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
534 | bool (*gb_page_enable)(void); | 541 | int (*get_lpage_level)(void); |
542 | bool (*rdtscp_supported)(void); | ||
535 | 543 | ||
536 | const struct trace_print_flags *exit_reasons_str; | 544 | const struct trace_print_flags *exit_reasons_str; |
537 | }; | 545 | }; |
@@ -606,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
606 | unsigned long value); | 614 | unsigned long value); |
607 | 615 | ||
608 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 616 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
609 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 617 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
610 | int type_bits, int seg); | ||
611 | 618 | ||
612 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); | 619 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); |
613 | 620 | ||
@@ -653,6 +660,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |||
653 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 660 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
654 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 661 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
655 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 662 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
663 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
664 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
665 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
666 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
656 | 667 | ||
657 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 668 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
658 | 669 | ||
@@ -666,6 +677,7 @@ void kvm_disable_tdp(void); | |||
666 | 677 | ||
667 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | 678 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
668 | int complete_pio(struct kvm_vcpu *vcpu); | 679 | int complete_pio(struct kvm_vcpu *vcpu); |
680 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); | ||
669 | 681 | ||
670 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | 682 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); |
671 | 683 | ||
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index c584076a47f4..ffae1420e7d7 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_KVM_PARA_H | 2 | #define _ASM_X86_KVM_PARA_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/hyperv.h> | ||
5 | 6 | ||
6 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It | 7 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It |
7 | * should be used to determine that a VM is running under KVM. | 8 | * should be used to determine that a VM is running under KVM. |
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 47b9b6f19057..2e9972468a5d 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h | |||
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l) | |||
195 | #define __local_add(i, l) local_add((i), (l)) | 195 | #define __local_add(i, l) local_add((i), (l)) |
196 | #define __local_sub(i, l) local_sub((i), (l)) | 196 | #define __local_sub(i, l) local_sub((i), (l)) |
197 | 197 | ||
198 | /* Use these for per-cpu local_t variables: on some archs they are | ||
199 | * much more efficient than these naive implementations. Note they take | ||
200 | * a variable, not an address. | ||
201 | * | ||
202 | * X86_64: This could be done better if we moved the per cpu data directly | ||
203 | * after GS. | ||
204 | */ | ||
205 | |||
206 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
207 | still access a variable of a previous CPU in a non atomic way. */ | ||
208 | #define cpu_local_wrap_v(l) \ | ||
209 | ({ \ | ||
210 | local_t res__; \ | ||
211 | preempt_disable(); \ | ||
212 | res__ = (l); \ | ||
213 | preempt_enable(); \ | ||
214 | res__; \ | ||
215 | }) | ||
216 | #define cpu_local_wrap(l) \ | ||
217 | ({ \ | ||
218 | preempt_disable(); \ | ||
219 | (l); \ | ||
220 | preempt_enable(); \ | ||
221 | }) \ | ||
222 | |||
223 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) | ||
224 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) | ||
225 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) | ||
226 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) | ||
227 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) | ||
228 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) | ||
229 | |||
230 | #define __cpu_local_inc(l) cpu_local_inc((l)) | ||
231 | #define __cpu_local_dec(l) cpu_local_dec((l)) | ||
232 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
233 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
234 | |||
235 | #endif /* _ASM_X86_LOCAL_H */ | 198 | #endif /* _ASM_X86_LOCAL_H */ |
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index a29f48c2a322..288b96f815a6 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h | |||
@@ -39,11 +39,5 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
39 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 39 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
40 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | 40 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ |
41 | NODE_DATA(nid)->node_spanned_pages) | 41 | NODE_DATA(nid)->node_spanned_pages) |
42 | |||
43 | #ifdef CONFIG_NUMA_EMU | ||
44 | #define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) | ||
45 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | ||
46 | #endif | ||
47 | |||
48 | #endif | 42 | #endif |
49 | #endif /* _ASM_X86_MMZONE_64_H */ | 43 | #endif /* _ASM_X86_MMZONE_64_H */ |
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h new file mode 100644 index 000000000000..451d30e7f62d --- /dev/null +++ b/arch/x86/include/asm/mrst.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * mrst.h: Intel Moorestown platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | #ifndef _ASM_X86_MRST_H | ||
12 | #define _ASM_X86_MRST_H | ||
13 | extern int pci_mrst_init(void); | ||
14 | int __init sfi_parse_mrtc(struct sfi_table_header *table); | ||
15 | |||
16 | #define SFI_MTMR_MAX_NUM 8 | ||
17 | #define SFI_MRTC_MAX 8 | ||
18 | |||
19 | #endif /* _ASM_X86_MRST_H */ | ||
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index c4ae822e415f..823e070e7c26 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h | |||
@@ -36,6 +36,11 @@ extern void __cpuinit numa_set_node(int cpu, int node); | |||
36 | extern void __cpuinit numa_clear_node(int cpu); | 36 | extern void __cpuinit numa_clear_node(int cpu); |
37 | extern void __cpuinit numa_add_cpu(int cpu); | 37 | extern void __cpuinit numa_add_cpu(int cpu); |
38 | extern void __cpuinit numa_remove_cpu(int cpu); | 38 | extern void __cpuinit numa_remove_cpu(int cpu); |
39 | |||
40 | #ifdef CONFIG_NUMA_EMU | ||
41 | #define FAKE_NODE_MIN_SIZE ((u64)64 << 20) | ||
42 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | ||
43 | #endif /* CONFIG_NUMA_EMU */ | ||
39 | #else | 44 | #else |
40 | static inline void init_cpu_to_node(void) { } | 45 | static inline void init_cpu_to_node(void) { } |
41 | static inline void numa_set_node(int cpu, int node) { } | 46 | static inline void numa_set_node(int cpu, int node) { } |
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h index 9f0a5f5d29ec..37c516545ec8 100644 --- a/arch/x86/include/asm/numaq.h +++ b/arch/x86/include/asm/numaq.h | |||
@@ -30,9 +30,14 @@ | |||
30 | 30 | ||
31 | extern int found_numaq; | 31 | extern int found_numaq; |
32 | extern int get_memcfg_numaq(void); | 32 | extern int get_memcfg_numaq(void); |
33 | extern int pci_numaq_init(void); | ||
33 | 34 | ||
34 | extern void *xquad_portio; | 35 | extern void *xquad_portio; |
35 | 36 | ||
37 | #define XQUAD_PORTIO_BASE 0xfe400000 | ||
38 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | ||
39 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | ||
40 | |||
36 | /* | 41 | /* |
37 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the | 42 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the |
38 | */ | 43 | */ |
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h index 3a57385d9fa7..101229b0d8ed 100644 --- a/arch/x86/include/asm/olpc.h +++ b/arch/x86/include/asm/olpc.h | |||
@@ -13,7 +13,6 @@ struct olpc_platform_t { | |||
13 | 13 | ||
14 | #define OLPC_F_PRESENT 0x01 | 14 | #define OLPC_F_PRESENT 0x01 |
15 | #define OLPC_F_DCON 0x02 | 15 | #define OLPC_F_DCON 0x02 |
16 | #define OLPC_F_VSA 0x04 | ||
17 | 16 | ||
18 | #ifdef CONFIG_OLPC | 17 | #ifdef CONFIG_OLPC |
19 | 18 | ||
@@ -51,18 +50,6 @@ static inline int olpc_has_dcon(void) | |||
51 | } | 50 | } |
52 | 51 | ||
53 | /* | 52 | /* |
54 | * The VSA is software from AMD that typical Geode bioses will include. | ||
55 | * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does | ||
56 | * not include the VSA; instead, PCI is emulated by the kernel. | ||
57 | * | ||
58 | * The VSA is described further in arch/x86/pci/olpc.c. | ||
59 | */ | ||
60 | static inline int olpc_has_vsa(void) | ||
61 | { | ||
62 | return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * The "Mass Production" version of OLPC's XO is identified as being model | 53 | * The "Mass Production" version of OLPC's XO is identified as being model |
67 | * C2. During the prototype phase, the following models (in chronological | 54 | * C2. During the prototype phase, the following models (in chronological |
68 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models | 55 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models |
@@ -87,13 +74,10 @@ static inline int olpc_has_dcon(void) | |||
87 | return 0; | 74 | return 0; |
88 | } | 75 | } |
89 | 76 | ||
90 | static inline int olpc_has_vsa(void) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | #endif | 77 | #endif |
96 | 78 | ||
79 | extern int pci_olpc_init(void); | ||
80 | |||
97 | /* EC related functions */ | 81 | /* EC related functions */ |
98 | 82 | ||
99 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | 83 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 642fe34b36a2..a667f24c7254 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #ifndef __ASSEMBLY__ | 41 | #ifndef __ASSEMBLY__ |
42 | 42 | ||
43 | extern int page_is_ram(unsigned long pagenr); | ||
44 | extern int devmem_is_allowed(unsigned long pagenr); | 43 | extern int devmem_is_allowed(unsigned long pagenr); |
45 | 44 | ||
46 | extern unsigned long max_low_pfn_mapped; | 45 | extern unsigned long max_low_pfn_mapped; |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index dd59a85a918f..5653f43d90e5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn) | |||
435 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | 435 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); |
436 | } | 436 | } |
437 | 437 | ||
438 | #ifdef CONFIG_HIGHPTE | ||
439 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | ||
440 | { | ||
441 | unsigned long ret; | ||
442 | ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); | ||
443 | return (void *)ret; | ||
444 | } | ||
445 | #endif | ||
446 | |||
447 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, | 438 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, |
448 | pte_t *ptep) | 439 | pte_t *ptep) |
449 | { | 440 | { |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index b1e70d51e40c..db9ef5532341 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -304,10 +304,6 @@ struct pv_mmu_ops { | |||
304 | #endif /* PAGETABLE_LEVELS == 4 */ | 304 | #endif /* PAGETABLE_LEVELS == 4 */ |
305 | #endif /* PAGETABLE_LEVELS >= 3 */ | 305 | #endif /* PAGETABLE_LEVELS >= 3 */ |
306 | 306 | ||
307 | #ifdef CONFIG_HIGHPTE | ||
308 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
309 | #endif | ||
310 | |||
311 | struct pv_lazy_ops lazy_mode; | 307 | struct pv_lazy_ops lazy_mode; |
312 | 308 | ||
313 | /* dom0 ops */ | 309 | /* dom0 ops */ |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index ada8c201d513..3e002ca5a287 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
45 | 45 | ||
46 | #ifdef CONFIG_PCI | 46 | #ifdef CONFIG_PCI |
47 | extern unsigned int pcibios_assign_all_busses(void); | 47 | extern unsigned int pcibios_assign_all_busses(void); |
48 | extern int pci_legacy_init(void); | ||
49 | # ifdef CONFIG_ACPI | ||
50 | # define x86_default_pci_init pci_acpi_init | ||
51 | # else | ||
52 | # define x86_default_pci_init pci_legacy_init | ||
53 | # endif | ||
48 | #else | 54 | #else |
49 | #define pcibios_assign_all_busses() 0 | 55 | # define pcibios_assign_all_busses() 0 |
56 | # define x86_default_pci_init NULL | ||
50 | #endif | 57 | #endif |
51 | 58 | ||
52 | extern unsigned long pci_mem_start; | 59 | extern unsigned long pci_mem_start; |
@@ -124,6 +131,8 @@ extern void pci_iommu_alloc(void); | |||
124 | #include "pci_64.h" | 131 | #include "pci_64.h" |
125 | #endif | 132 | #endif |
126 | 133 | ||
134 | void dma32_reserve_bootmem(void); | ||
135 | |||
127 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 136 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
128 | #include <asm-generic/pci-dma-compat.h> | 137 | #include <asm-generic/pci-dma-compat.h> |
129 | 138 | ||
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h index ae5e40f67daf..fe15cfb21b9b 100644 --- a/arch/x86/include/asm/pci_64.h +++ b/arch/x86/include/asm/pci_64.h | |||
@@ -22,8 +22,6 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn, | |||
22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, | 22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, |
23 | int reg, int len, u32 value); | 23 | int reg, int len, u32 value); |
24 | 24 | ||
25 | extern void dma32_reserve_bootmem(void); | ||
26 | |||
27 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
28 | 26 | ||
29 | #endif /* _ASM_X86_PCI_64_H */ | 27 | #endif /* _ASM_X86_PCI_64_H */ |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index b4bf9a942ed0..1a0422348d6d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 | 29 | #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 |
30 | #define PCI_HAS_IO_ECS 0x40000 | 30 | #define PCI_HAS_IO_ECS 0x40000 |
31 | #define PCI_NOASSIGN_ROMS 0x80000 | 31 | #define PCI_NOASSIGN_ROMS 0x80000 |
32 | #define PCI_ROOT_NO_CRS 0x100000 | ||
32 | 33 | ||
33 | extern unsigned int pci_probe; | 34 | extern unsigned int pci_probe; |
34 | extern unsigned long pirq_table_addr; | 35 | extern unsigned long pirq_table_addr; |
@@ -82,7 +83,6 @@ struct irq_routing_table { | |||
82 | 83 | ||
83 | extern unsigned int pcibios_irq_mask; | 84 | extern unsigned int pcibios_irq_mask; |
84 | 85 | ||
85 | extern int pcibios_scanned; | ||
86 | extern spinlock_t pci_config_lock; | 86 | extern spinlock_t pci_config_lock; |
87 | 87 | ||
88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
@@ -105,16 +105,15 @@ extern bool port_cf9_safe; | |||
105 | extern int pci_direct_probe(void); | 105 | extern int pci_direct_probe(void); |
106 | extern void pci_direct_init(int type); | 106 | extern void pci_direct_init(int type); |
107 | extern void pci_pcbios_init(void); | 107 | extern void pci_pcbios_init(void); |
108 | extern int pci_olpc_init(void); | ||
109 | extern void __init dmi_check_pciprobe(void); | 108 | extern void __init dmi_check_pciprobe(void); |
110 | extern void __init dmi_check_skip_isa_align(void); | 109 | extern void __init dmi_check_skip_isa_align(void); |
111 | 110 | ||
112 | /* some common used subsys_initcalls */ | 111 | /* some common used subsys_initcalls */ |
113 | extern int __init pci_acpi_init(void); | 112 | extern int __init pci_acpi_init(void); |
114 | extern int __init pcibios_irq_init(void); | 113 | extern void __init pcibios_irq_init(void); |
115 | extern int __init pci_visws_init(void); | ||
116 | extern int __init pci_numaq_init(void); | ||
117 | extern int __init pcibios_init(void); | 114 | extern int __init pcibios_init(void); |
115 | extern int pci_legacy_init(void); | ||
116 | extern void pcibios_fixup_irqs(void); | ||
118 | 117 | ||
119 | /* pci-mmconfig.c */ | 118 | /* pci-mmconfig.c */ |
120 | 119 | ||
@@ -182,3 +181,17 @@ static inline void mmio_config_writel(void __iomem *pos, u32 val) | |||
182 | { | 181 | { |
183 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); | 182 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); |
184 | } | 183 | } |
184 | |||
185 | #ifdef CONFIG_PCI | ||
186 | # ifdef CONFIG_ACPI | ||
187 | # define x86_default_pci_init pci_acpi_init | ||
188 | # else | ||
189 | # define x86_default_pci_init pci_legacy_init | ||
190 | # endif | ||
191 | # define x86_default_pci_init_irq pcibios_irq_init | ||
192 | # define x86_default_pci_fixup_irqs pcibios_fixup_irqs | ||
193 | #else | ||
194 | # define x86_default_pci_init NULL | ||
195 | # define x86_default_pci_init_irq NULL | ||
196 | # define x86_default_pci_fixup_irqs NULL | ||
197 | #endif | ||
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0c44196b78ac..66a272dfd8b8 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -25,19 +25,18 @@ | |||
25 | */ | 25 | */ |
26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
27 | #define PER_CPU(var, reg) \ | 27 | #define PER_CPU(var, reg) \ |
28 | __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ | 28 | __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
29 | lea per_cpu__##var(reg), reg | 29 | lea var(reg), reg |
30 | #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var | 30 | #define PER_CPU_VAR(var) %__percpu_seg:var |
31 | #else /* ! SMP */ | 31 | #else /* ! SMP */ |
32 | #define PER_CPU(var, reg) \ | 32 | #define PER_CPU(var, reg) __percpu_mov_op $var, reg |
33 | __percpu_mov_op $per_cpu__##var, reg | 33 | #define PER_CPU_VAR(var) var |
34 | #define PER_CPU_VAR(var) per_cpu__##var | ||
35 | #endif /* SMP */ | 34 | #endif /* SMP */ |
36 | 35 | ||
37 | #ifdef CONFIG_X86_64_SMP | 36 | #ifdef CONFIG_X86_64_SMP |
38 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | 37 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var |
39 | #else | 38 | #else |
40 | #define INIT_PER_CPU_VAR(var) per_cpu__##var | 39 | #define INIT_PER_CPU_VAR(var) var |
41 | #endif | 40 | #endif |
42 | 41 | ||
43 | #else /* ...!ASSEMBLY */ | 42 | #else /* ...!ASSEMBLY */ |
@@ -60,12 +59,12 @@ | |||
60 | * There also must be an entry in vmlinux_64.lds.S | 59 | * There also must be an entry in vmlinux_64.lds.S |
61 | */ | 60 | */ |
62 | #define DECLARE_INIT_PER_CPU(var) \ | 61 | #define DECLARE_INIT_PER_CPU(var) \ |
63 | extern typeof(per_cpu_var(var)) init_per_cpu_var(var) | 62 | extern typeof(var) init_per_cpu_var(var) |
64 | 63 | ||
65 | #ifdef CONFIG_X86_64_SMP | 64 | #ifdef CONFIG_X86_64_SMP |
66 | #define init_per_cpu_var(var) init_per_cpu__##var | 65 | #define init_per_cpu_var(var) init_per_cpu__##var |
67 | #else | 66 | #else |
68 | #define init_per_cpu_var(var) per_cpu_var(var) | 67 | #define init_per_cpu_var(var) var |
69 | #endif | 68 | #endif |
70 | 69 | ||
71 | /* For arch-specific code, we can use direct single-insn ops (they | 70 | /* For arch-specific code, we can use direct single-insn ops (they |
@@ -104,6 +103,64 @@ do { \ | |||
104 | } \ | 103 | } \ |
105 | } while (0) | 104 | } while (0) |
106 | 105 | ||
106 | /* | ||
107 | * Generate a percpu add to memory instruction and optimize code | ||
108 | * if a one is added or subtracted. | ||
109 | */ | ||
110 | #define percpu_add_op(var, val) \ | ||
111 | do { \ | ||
112 | typedef typeof(var) pao_T__; \ | ||
113 | const int pao_ID__ = (__builtin_constant_p(val) && \ | ||
114 | ((val) == 1 || (val) == -1)) ? (val) : 0; \ | ||
115 | if (0) { \ | ||
116 | pao_T__ pao_tmp__; \ | ||
117 | pao_tmp__ = (val); \ | ||
118 | } \ | ||
119 | switch (sizeof(var)) { \ | ||
120 | case 1: \ | ||
121 | if (pao_ID__ == 1) \ | ||
122 | asm("incb "__percpu_arg(0) : "+m" (var)); \ | ||
123 | else if (pao_ID__ == -1) \ | ||
124 | asm("decb "__percpu_arg(0) : "+m" (var)); \ | ||
125 | else \ | ||
126 | asm("addb %1, "__percpu_arg(0) \ | ||
127 | : "+m" (var) \ | ||
128 | : "qi" ((pao_T__)(val))); \ | ||
129 | break; \ | ||
130 | case 2: \ | ||
131 | if (pao_ID__ == 1) \ | ||
132 | asm("incw "__percpu_arg(0) : "+m" (var)); \ | ||
133 | else if (pao_ID__ == -1) \ | ||
134 | asm("decw "__percpu_arg(0) : "+m" (var)); \ | ||
135 | else \ | ||
136 | asm("addw %1, "__percpu_arg(0) \ | ||
137 | : "+m" (var) \ | ||
138 | : "ri" ((pao_T__)(val))); \ | ||
139 | break; \ | ||
140 | case 4: \ | ||
141 | if (pao_ID__ == 1) \ | ||
142 | asm("incl "__percpu_arg(0) : "+m" (var)); \ | ||
143 | else if (pao_ID__ == -1) \ | ||
144 | asm("decl "__percpu_arg(0) : "+m" (var)); \ | ||
145 | else \ | ||
146 | asm("addl %1, "__percpu_arg(0) \ | ||
147 | : "+m" (var) \ | ||
148 | : "ri" ((pao_T__)(val))); \ | ||
149 | break; \ | ||
150 | case 8: \ | ||
151 | if (pao_ID__ == 1) \ | ||
152 | asm("incq "__percpu_arg(0) : "+m" (var)); \ | ||
153 | else if (pao_ID__ == -1) \ | ||
154 | asm("decq "__percpu_arg(0) : "+m" (var)); \ | ||
155 | else \ | ||
156 | asm("addq %1, "__percpu_arg(0) \ | ||
157 | : "+m" (var) \ | ||
158 | : "re" ((pao_T__)(val))); \ | ||
159 | break; \ | ||
160 | default: __bad_percpu_size(); \ | ||
161 | } \ | ||
162 | } while (0) | ||
163 | |||
107 | #define percpu_from_op(op, var, constraint) \ | 164 | #define percpu_from_op(op, var, constraint) \ |
108 | ({ \ | 165 | ({ \ |
109 | typeof(var) pfo_ret__; \ | 166 | typeof(var) pfo_ret__; \ |
@@ -142,16 +199,14 @@ do { \ | |||
142 | * per-thread variables implemented as per-cpu variables and thus | 199 | * per-thread variables implemented as per-cpu variables and thus |
143 | * stable for the duration of the respective task. | 200 | * stable for the duration of the respective task. |
144 | */ | 201 | */ |
145 | #define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ | 202 | #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) |
146 | "m" (per_cpu__##var)) | 203 | #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) |
147 | #define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ | 204 | #define percpu_write(var, val) percpu_to_op("mov", var, val) |
148 | "p" (&per_cpu__##var)) | 205 | #define percpu_add(var, val) percpu_add_op(var, val) |
149 | #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) | 206 | #define percpu_sub(var, val) percpu_add_op(var, -(val)) |
150 | #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) | 207 | #define percpu_and(var, val) percpu_to_op("and", var, val) |
151 | #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) | 208 | #define percpu_or(var, val) percpu_to_op("or", var, val) |
152 | #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) | 209 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) |
153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) | ||
154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) | ||
155 | 210 | ||
156 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 211 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
157 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 212 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
@@ -160,9 +215,9 @@ do { \ | |||
160 | #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | 215 | #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) |
161 | #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | 216 | #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) |
162 | #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | 217 | #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) |
163 | #define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | 218 | #define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
164 | #define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | 219 | #define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
165 | #define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | 220 | #define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
166 | #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | 221 | #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
167 | #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | 222 | #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
168 | #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | 223 | #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
@@ -179,9 +234,9 @@ do { \ | |||
179 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | 234 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) |
180 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | 235 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) |
181 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | 236 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) |
182 | #define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | 237 | #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
183 | #define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | 238 | #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
184 | #define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | 239 | #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
185 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | 240 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
186 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | 241 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
187 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | 242 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
@@ -192,9 +247,9 @@ do { \ | |||
192 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 247 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
193 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 248 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
194 | 249 | ||
195 | #define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | 250 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
196 | #define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | 251 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
197 | #define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | 252 | #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
198 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | 253 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
199 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | 254 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
200 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | 255 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
@@ -212,19 +267,19 @@ do { \ | |||
212 | #ifdef CONFIG_X86_64 | 267 | #ifdef CONFIG_X86_64 |
213 | #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 268 | #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
214 | #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | 269 | #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
215 | #define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | 270 | #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
216 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 271 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
217 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 272 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
218 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 273 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
219 | 274 | ||
220 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 275 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
221 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | 276 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
222 | #define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | 277 | #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
223 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 278 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
224 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 279 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
225 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 280 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
226 | 281 | ||
227 | #define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | 282 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
228 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 283 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
229 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 284 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
230 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 285 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
@@ -236,7 +291,7 @@ do { \ | |||
236 | ({ \ | 291 | ({ \ |
237 | int old__; \ | 292 | int old__; \ |
238 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ | 293 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
239 | : "=r" (old__), "+m" (per_cpu__##var) \ | 294 | : "=r" (old__), "+m" (var) \ |
240 | : "dIr" (bit)); \ | 295 | : "dIr" (bit)); \ |
241 | old__; \ | 296 | old__; \ |
242 | }) | 297 | }) |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 0e8c2a0fd922..271de94c3810 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -23,6 +23,11 @@ static inline void paravirt_release_pud(unsigned long pfn) {} | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Flags to use when allocating a user page table page. | ||
27 | */ | ||
28 | extern gfp_t __userpte_alloc_gfp; | ||
29 | |||
30 | /* | ||
26 | * Allocate and free page tables. | 31 | * Allocate and free page tables. |
27 | */ | 32 | */ |
28 | extern pgd_t *pgd_alloc(struct mm_struct *); | 33 | extern pgd_t *pgd_alloc(struct mm_struct *); |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd9461d323..47339a1ac7b6 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
54 | in_irq() ? KM_IRQ_PTE : \ | 54 | in_irq() ? KM_IRQ_PTE : \ |
55 | KM_PTE0) | 55 | KM_PTE0) |
56 | #define pte_offset_map(dir, address) \ | 56 | #define pte_offset_map(dir, address) \ |
57 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ | 57 | ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ |
58 | pte_index((address))) | 58 | pte_index((address))) |
59 | #define pte_offset_map_nested(dir, address) \ | 59 | #define pte_offset_map_nested(dir, address) \ |
60 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ | 60 | ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \ |
61 | pte_index((address))) | 61 | pte_index((address))) |
62 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) | 62 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) |
63 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | 63 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) |
@@ -80,7 +80,7 @@ do { \ | |||
80 | * The i386 doesn't have any external MMU info: the kernel page | 80 | * The i386 doesn't have any external MMU info: the kernel page |
81 | * tables contain all the necessary information. | 81 | * tables contain all the necessary information. |
82 | */ | 82 | */ |
83 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 83 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
84 | 84 | ||
85 | #endif /* !__ASSEMBLY__ */ | 85 | #endif /* !__ASSEMBLY__ */ |
86 | 86 | ||
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c57a30117149..181be528c612 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -129,7 +129,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
129 | #define pte_unmap(pte) /* NOP */ | 129 | #define pte_unmap(pte) /* NOP */ |
130 | #define pte_unmap_nested(pte) /* NOP */ | 130 | #define pte_unmap_nested(pte) /* NOP */ |
131 | 131 | ||
132 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 132 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
133 | 133 | ||
134 | /* Encode and de-code a swap entry */ | 134 | /* Encode and de-code a swap entry */ |
135 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 135 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 4009f6534f52..6f414ed88620 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h | |||
@@ -23,14 +23,4 @@ extern int reboot_force; | |||
23 | 23 | ||
24 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 24 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
25 | 25 | ||
26 | /* | ||
27 | * This looks more complex than it should be. But we need to | ||
28 | * get the type for the ~ right in round_down (it needs to be | ||
29 | * as wide as the result!), and we want to evaluate the macro | ||
30 | * arguments just once each. | ||
31 | */ | ||
32 | #define __round_mask(x,y) ((__typeof__(x))((y)-1)) | ||
33 | #define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1) | ||
34 | #define round_down(x,y) ((x) & ~__round_mask(x,y)) | ||
35 | |||
36 | #endif /* _ASM_X86_PROTO_H */ | 26 | #endif /* _ASM_X86_PROTO_H */ |
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index ca7517d33776..606ede126972 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
43 | #include <linux/lockdep.h> | 43 | #include <linux/lockdep.h> |
44 | #include <asm/asm.h> | ||
44 | 45 | ||
45 | struct rwsem_waiter; | 46 | struct rwsem_waiter; |
46 | 47 | ||
@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore * | |||
55 | 56 | ||
56 | /* | 57 | /* |
57 | * the semaphore definition | 58 | * the semaphore definition |
59 | * | ||
60 | * The bias values and the counter type limits the number of | ||
61 | * potential readers/writers to 32767 for 32 bits and 2147483647 | ||
62 | * for 64 bits. | ||
58 | */ | 63 | */ |
59 | 64 | ||
60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 65 | #ifdef CONFIG_X86_64 |
61 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 66 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
62 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 67 | #else |
63 | #define RWSEM_WAITING_BIAS (-0x00010000) | 68 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
69 | #endif | ||
70 | |||
71 | #define RWSEM_UNLOCKED_VALUE 0x00000000L | ||
72 | #define RWSEM_ACTIVE_BIAS 0x00000001L | ||
73 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | ||
64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 74 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 75 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
66 | 76 | ||
77 | typedef signed long rwsem_count_t; | ||
78 | |||
67 | struct rw_semaphore { | 79 | struct rw_semaphore { |
68 | signed long count; | 80 | rwsem_count_t count; |
69 | spinlock_t wait_lock; | 81 | spinlock_t wait_lock; |
70 | struct list_head wait_list; | 82 | struct list_head wait_list; |
71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -105,7 +117,7 @@ do { \ | |||
105 | static inline void __down_read(struct rw_semaphore *sem) | 117 | static inline void __down_read(struct rw_semaphore *sem) |
106 | { | 118 | { |
107 | asm volatile("# beginning down_read\n\t" | 119 | asm volatile("# beginning down_read\n\t" |
108 | LOCK_PREFIX " incl (%%eax)\n\t" | 120 | LOCK_PREFIX _ASM_INC "(%1)\n\t" |
109 | /* adds 0x00000001, returns the old value */ | 121 | /* adds 0x00000001, returns the old value */ |
110 | " jns 1f\n" | 122 | " jns 1f\n" |
111 | " call call_rwsem_down_read_failed\n" | 123 | " call call_rwsem_down_read_failed\n" |
@@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
121 | */ | 133 | */ |
122 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 134 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
123 | { | 135 | { |
124 | __s32 result, tmp; | 136 | rwsem_count_t result, tmp; |
125 | asm volatile("# beginning __down_read_trylock\n\t" | 137 | asm volatile("# beginning __down_read_trylock\n\t" |
126 | " movl %0,%1\n\t" | 138 | " mov %0,%1\n\t" |
127 | "1:\n\t" | 139 | "1:\n\t" |
128 | " movl %1,%2\n\t" | 140 | " mov %1,%2\n\t" |
129 | " addl %3,%2\n\t" | 141 | " add %3,%2\n\t" |
130 | " jle 2f\n\t" | 142 | " jle 2f\n\t" |
131 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 143 | LOCK_PREFIX " cmpxchg %2,%0\n\t" |
132 | " jnz 1b\n\t" | 144 | " jnz 1b\n\t" |
133 | "2:\n\t" | 145 | "2:\n\t" |
134 | "# ending __down_read_trylock\n\t" | 146 | "# ending __down_read_trylock\n\t" |
@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
143 | */ | 155 | */ |
144 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 156 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
145 | { | 157 | { |
146 | int tmp; | 158 | rwsem_count_t tmp; |
147 | 159 | ||
148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 160 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
149 | asm volatile("# beginning down_write\n\t" | 161 | asm volatile("# beginning down_write\n\t" |
150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 162 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
151 | /* subtract 0x0000ffff, returns the old value */ | 163 | /* subtract 0x0000ffff, returns the old value */ |
152 | " testl %%edx,%%edx\n\t" | 164 | " test %1,%1\n\t" |
153 | /* was the count 0 before? */ | 165 | /* was the count 0 before? */ |
154 | " jz 1f\n" | 166 | " jz 1f\n" |
155 | " call call_rwsem_down_write_failed\n" | 167 | " call call_rwsem_down_write_failed\n" |
@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
170 | */ | 182 | */ |
171 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 183 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
172 | { | 184 | { |
173 | signed long ret = cmpxchg(&sem->count, | 185 | rwsem_count_t ret = cmpxchg(&sem->count, |
174 | RWSEM_UNLOCKED_VALUE, | 186 | RWSEM_UNLOCKED_VALUE, |
175 | RWSEM_ACTIVE_WRITE_BIAS); | 187 | RWSEM_ACTIVE_WRITE_BIAS); |
176 | if (ret == RWSEM_UNLOCKED_VALUE) | 188 | if (ret == RWSEM_UNLOCKED_VALUE) |
177 | return 1; | 189 | return 1; |
178 | return 0; | 190 | return 0; |
@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
183 | */ | 195 | */ |
184 | static inline void __up_read(struct rw_semaphore *sem) | 196 | static inline void __up_read(struct rw_semaphore *sem) |
185 | { | 197 | { |
186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 198 | rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; |
187 | asm volatile("# beginning __up_read\n\t" | 199 | asm volatile("# beginning __up_read\n\t" |
188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 200 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
189 | /* subtracts 1, returns the old value */ | 201 | /* subtracts 1, returns the old value */ |
190 | " jns 1f\n\t" | 202 | " jns 1f\n\t" |
191 | " call call_rwsem_wake\n" | 203 | " call call_rwsem_wake\n" |
@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
201 | */ | 213 | */ |
202 | static inline void __up_write(struct rw_semaphore *sem) | 214 | static inline void __up_write(struct rw_semaphore *sem) |
203 | { | 215 | { |
216 | rwsem_count_t tmp; | ||
204 | asm volatile("# beginning __up_write\n\t" | 217 | asm volatile("# beginning __up_write\n\t" |
205 | " movl %2,%%edx\n\t" | 218 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" | ||
207 | /* tries to transition | 219 | /* tries to transition |
208 | 0xffff0001 -> 0x00000000 */ | 220 | 0xffff0001 -> 0x00000000 */ |
209 | " jz 1f\n" | 221 | " jz 1f\n" |
210 | " call call_rwsem_wake\n" | 222 | " call call_rwsem_wake\n" |
211 | "1:\n\t" | 223 | "1:\n\t" |
212 | "# ending __up_write\n" | 224 | "# ending __up_write\n" |
213 | : "+m" (sem->count) | 225 | : "+m" (sem->count), "=d" (tmp) |
214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 226 | : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | 227 | : "memory", "cc"); |
216 | } | 228 | } |
217 | 229 | ||
218 | /* | 230 | /* |
@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
221 | static inline void __downgrade_write(struct rw_semaphore *sem) | 233 | static inline void __downgrade_write(struct rw_semaphore *sem) |
222 | { | 234 | { |
223 | asm volatile("# beginning __downgrade_write\n\t" | 235 | asm volatile("# beginning __downgrade_write\n\t" |
224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" | 236 | LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" |
225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 237 | /* |
238 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) | ||
239 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) | ||
240 | */ | ||
226 | " jns 1f\n\t" | 241 | " jns 1f\n\t" |
227 | " call call_rwsem_downgrade_wake\n" | 242 | " call call_rwsem_downgrade_wake\n" |
228 | "1:\n\t" | 243 | "1:\n\t" |
229 | "# ending __downgrade_write\n" | 244 | "# ending __downgrade_write\n" |
230 | : "+m" (sem->count) | 245 | : "+m" (sem->count) |
231 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 246 | : "a" (sem), "er" (-RWSEM_WAITING_BIAS) |
232 | : "memory", "cc"); | 247 | : "memory", "cc"); |
233 | } | 248 | } |
234 | 249 | ||
235 | /* | 250 | /* |
236 | * implement atomic add functionality | 251 | * implement atomic add functionality |
237 | */ | 252 | */ |
238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 253 | static inline void rwsem_atomic_add(rwsem_count_t delta, |
254 | struct rw_semaphore *sem) | ||
239 | { | 255 | { |
240 | asm volatile(LOCK_PREFIX "addl %1,%0" | 256 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
241 | : "+m" (sem->count) | 257 | : "+m" (sem->count) |
242 | : "ir" (delta)); | 258 | : "er" (delta)); |
243 | } | 259 | } |
244 | 260 | ||
245 | /* | 261 | /* |
246 | * implement exchange and add functionality | 262 | * implement exchange and add functionality |
247 | */ | 263 | */ |
248 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | 264 | static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, |
265 | struct rw_semaphore *sem) | ||
249 | { | 266 | { |
250 | int tmp = delta; | 267 | rwsem_count_t tmp = delta; |
251 | 268 | ||
252 | asm volatile(LOCK_PREFIX "xadd %0,%1" | 269 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
253 | : "+r" (tmp), "+m" (sem->count) | 270 | : "+r" (tmp), "+m" (sem->count) |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 18e496c98ff0..86b1506f4179 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void); | |||
37 | 37 | ||
38 | #ifdef CONFIG_X86_VISWS | 38 | #ifdef CONFIG_X86_VISWS |
39 | extern void visws_early_detect(void); | 39 | extern void visws_early_detect(void); |
40 | extern int is_visws_box(void); | ||
41 | #else | 40 | #else |
42 | static inline void visws_early_detect(void) { } | 41 | static inline void visws_early_detect(void) { } |
43 | static inline int is_visws_box(void) { return 0; } | ||
44 | #endif | 42 | #endif |
45 | 43 | ||
46 | extern unsigned long saved_video_mode; | 44 | extern unsigned long saved_video_mode; |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 1e796782cd7b..4cfc90824068 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -135,6 +135,8 @@ int native_cpu_disable(void); | |||
135 | void native_cpu_die(unsigned int cpu); | 135 | void native_cpu_die(unsigned int cpu); |
136 | void native_play_dead(void); | 136 | void native_play_dead(void); |
137 | void play_dead_common(void); | 137 | void play_dead_common(void); |
138 | void wbinvd_on_cpu(int cpu); | ||
139 | int wbinvd_on_all_cpus(void); | ||
138 | 140 | ||
139 | void native_send_call_func_ipi(const struct cpumask *mask); | 141 | void native_send_call_func_ipi(const struct cpumask *mask); |
140 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void) | |||
147 | { | 149 | { |
148 | return cpumask_weight(cpu_callout_mask); | 150 | return cpumask_weight(cpu_callout_mask); |
149 | } | 151 | } |
152 | #else /* !CONFIG_SMP */ | ||
153 | #define wbinvd_on_cpu(cpu) wbinvd() | ||
154 | static inline int wbinvd_on_all_cpus(void) | ||
155 | { | ||
156 | wbinvd(); | ||
157 | return 0; | ||
158 | } | ||
150 | #endif /* CONFIG_SMP */ | 159 | #endif /* CONFIG_SMP */ |
151 | 160 | ||
152 | extern unsigned disabled_cpus __cpuinitdata; | 161 | extern unsigned disabled_cpus __cpuinitdata; |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1fecb7e61130..38638cd2fa4c 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -313,7 +313,7 @@ struct __attribute__ ((__packed__)) vmcb { | |||
313 | 313 | ||
314 | #define SVM_EXIT_ERR -1 | 314 | #define SVM_EXIT_ERR -1 |
315 | 315 | ||
316 | #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ | 316 | #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) |
317 | 317 | ||
318 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | 318 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" |
319 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" | 319 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 8d33bc5462d1..c4a348f7bd43 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | 18 | ||
19 | extern const unsigned long sys_call_table[]; | ||
20 | |||
19 | /* | 21 | /* |
20 | * Only the low 32 bits of orig_ax are meaningful, so we return int. | 22 | * Only the low 32 bits of orig_ax are meaningful, so we return int. |
21 | * This importantly ignores the high bits on 64-bit, so comparisons | 23 | * This importantly ignores the high bits on 64-bit, so comparisons |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index e04740f7a0bb..b8fe48ee2ed9 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -32,7 +32,7 @@ extern void show_regs_common(void); | |||
32 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | 32 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
33 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | 33 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" |
34 | #define __switch_canary_oparam \ | 34 | #define __switch_canary_oparam \ |
35 | , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) | 35 | , [stack_canary] "=m" (stack_canary.canary) |
36 | #define __switch_canary_iparam \ | 36 | #define __switch_canary_iparam \ |
37 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 37 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
38 | #else /* CC_STACKPROTECTOR */ | 38 | #else /* CC_STACKPROTECTOR */ |
@@ -114,7 +114,7 @@ do { \ | |||
114 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | 114 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ |
115 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | 115 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" |
116 | #define __switch_canary_oparam \ | 116 | #define __switch_canary_oparam \ |
117 | , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | 117 | , [gs_canary] "=m" (irq_stack_union.stack_canary) |
118 | #define __switch_canary_iparam \ | 118 | #define __switch_canary_iparam \ |
119 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 119 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
120 | #else /* CC_STACKPROTECTOR */ | 120 | #else /* CC_STACKPROTECTOR */ |
@@ -133,7 +133,7 @@ do { \ | |||
133 | __switch_canary \ | 133 | __switch_canary \ |
134 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 134 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
135 | "movq %%rax,%%rdi\n\t" \ | 135 | "movq %%rax,%%rdi\n\t" \ |
136 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | 136 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
137 | "jnz ret_from_fork\n\t" \ | 137 | "jnz ret_from_fork\n\t" \ |
138 | RESTORE_CONTEXT \ | 138 | RESTORE_CONTEXT \ |
139 | : "=a" (last) \ | 139 | : "=a" (last) \ |
@@ -143,7 +143,7 @@ do { \ | |||
143 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | 143 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ |
144 | [_tif_fork] "i" (_TIF_FORK), \ | 144 | [_tif_fork] "i" (_TIF_FORK), \ |
145 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | 145 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
146 | [current_task] "m" (per_cpu_var(current_task)) \ | 146 | [current_task] "m" (current_task) \ |
147 | __switch_canary_iparam \ | 147 | __switch_canary_iparam \ |
148 | : "memory", "cc" __EXTRA_CLOBBER) | 148 | : "memory", "cc" __EXTRA_CLOBBER) |
149 | #endif | 149 | #endif |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 535e421498f6..316708d5af92 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
10 | #include <linux/lockdep.h> | 10 | #include <linux/lockdep.h> |
11 | #include <asm/alternative.h> | ||
12 | #include <asm/cpufeature.h> | ||
11 | #include <asm/page.h> | 13 | #include <asm/page.h> |
12 | 14 | ||
13 | /* | 15 | /* |
@@ -16,7 +18,24 @@ | |||
16 | 18 | ||
17 | /* Handles exceptions in both to and from, but doesn't do access_ok */ | 19 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
18 | __must_check unsigned long | 20 | __must_check unsigned long |
19 | copy_user_generic(void *to, const void *from, unsigned len); | 21 | copy_user_generic_string(void *to, const void *from, unsigned len); |
22 | __must_check unsigned long | ||
23 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); | ||
24 | |||
25 | static __always_inline __must_check unsigned long | ||
26 | copy_user_generic(void *to, const void *from, unsigned len) | ||
27 | { | ||
28 | unsigned ret; | ||
29 | |||
30 | alternative_call(copy_user_generic_unrolled, | ||
31 | copy_user_generic_string, | ||
32 | X86_FEATURE_REP_GOOD, | ||
33 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), | ||
34 | "=d" (len)), | ||
35 | "1" (to), "2" (from), "3" (len) | ||
36 | : "memory", "rcx", "r8", "r9", "r10", "r11"); | ||
37 | return ret; | ||
38 | } | ||
20 | 39 | ||
21 | __must_check unsigned long | 40 | __must_check unsigned long |
22 | _copy_to_user(void __user *to, const void *from, unsigned len); | 41 | _copy_to_user(void __user *to, const void *from, unsigned len); |
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h index 999873b22e7f..24532c7da3d6 100644 --- a/arch/x86/include/asm/user.h +++ b/arch/x86/include/asm/user.h | |||
@@ -1,5 +1,63 @@ | |||
1 | #ifndef _ASM_X86_USER_H | ||
2 | #define _ASM_X86_USER_H | ||
3 | |||
1 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
2 | # include "user_32.h" | 5 | # include "user_32.h" |
3 | #else | 6 | #else |
4 | # include "user_64.h" | 7 | # include "user_64.h" |
5 | #endif | 8 | #endif |
9 | |||
10 | #include <asm/types.h> | ||
11 | |||
12 | struct user_ymmh_regs { | ||
13 | /* 16 * 16 bytes for each YMMH-reg */ | ||
14 | __u32 ymmh_space[64]; | ||
15 | }; | ||
16 | |||
17 | struct user_xsave_hdr { | ||
18 | __u64 xstate_bv; | ||
19 | __u64 reserved1[2]; | ||
20 | __u64 reserved2[5]; | ||
21 | }; | ||
22 | |||
23 | /* | ||
24 | * The structure layout of user_xstateregs, used for exporting the | ||
25 | * extended register state through ptrace and core-dump (NT_X86_XSTATE note) | ||
26 | * interfaces will be same as the memory layout of xsave used by the processor | ||
27 | * (except for the bytes 464..511, which can be used by the software) and hence | ||
28 | * the size of this structure varies depending on the features supported by the | ||
29 | * processor and OS. The size of the structure that users need to use can be | ||
30 | * obtained by doing: | ||
31 | * cpuid_count(0xd, 0, &eax, &ptrace_xstateregs_struct_size, &ecx, &edx); | ||
32 | * i.e., cpuid.(eax=0xd,ecx=0).ebx will be the size that user (debuggers, etc.) | ||
33 | * need to use. | ||
34 | * | ||
35 | * For now, only the first 8 bytes of the software usable bytes[464..471] will | ||
36 | * be used and will be set to OS enabled xstate mask (which is same as the | ||
37 | * 64bit mask returned by the xgetbv's xCR0). Users (analyzing core dump | ||
38 | * remotely, etc.) can use this mask as well as the mask saved in the | ||
39 | * xstate_hdr bytes and interpret what states the processor/OS supports | ||
40 | * and what states are in modified/initialized conditions for the | ||
41 | * particular process/thread. | ||
42 | * | ||
43 | * Also when the user modifies certain state FP/SSE/etc through the | ||
44 | * ptrace interface, they must ensure that the xsave_hdr.xstate_bv | ||
45 | * bytes[512..519] of the memory layout are updated correspondingly. | ||
46 | * i.e., for example when FP state is modified to a non-init state, | ||
47 | * xsave_hdr.xstate_bv's bit 0 must be set to '1', when SSE is modified to | ||
48 | * non-init state, xsave_hdr.xstate_bv's bit 1 must to be set to '1', etc. | ||
49 | */ | ||
50 | #define USER_XSTATE_FX_SW_WORDS 6 | ||
51 | #define USER_XSTATE_XCR0_WORD 0 | ||
52 | |||
53 | struct user_xstateregs { | ||
54 | struct { | ||
55 | __u64 fpx_space[58]; | ||
56 | __u64 xstate_fx_sw[USER_XSTATE_FX_SW_WORDS]; | ||
57 | } i387; | ||
58 | struct user_xsave_hdr xsave_hdr; | ||
59 | struct user_ymmh_regs ymmh; | ||
60 | /* further processor state extensions go here */ | ||
61 | }; | ||
62 | |||
63 | #endif /* _ASM_X86_USER_H */ | ||
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index 2751f3075d8b..71605c7d5c5c 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h | |||
@@ -18,8 +18,8 @@ | |||
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | * | 20 | * |
21 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | 21 | * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. |
22 | * Copyright (c) Russ Anderson | 22 | * Copyright (c) Russ Anderson <rja@sgi.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/rtc.h> | 25 | #include <linux/rtc.h> |
@@ -36,7 +36,8 @@ enum uv_bios_cmd { | |||
36 | UV_BIOS_WATCHLIST_ALLOC, | 36 | UV_BIOS_WATCHLIST_ALLOC, |
37 | UV_BIOS_WATCHLIST_FREE, | 37 | UV_BIOS_WATCHLIST_FREE, |
38 | UV_BIOS_MEMPROTECT, | 38 | UV_BIOS_MEMPROTECT, |
39 | UV_BIOS_GET_PARTITION_ADDR | 39 | UV_BIOS_GET_PARTITION_ADDR, |
40 | UV_BIOS_SET_LEGACY_VGA_TARGET | ||
40 | }; | 41 | }; |
41 | 42 | ||
42 | /* | 43 | /* |
@@ -89,13 +90,14 @@ extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64); | |||
89 | extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64); | 90 | extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64); |
90 | extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64); | 91 | extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64); |
91 | 92 | ||
92 | extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *); | 93 | extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *, long *); |
93 | extern s64 uv_bios_freq_base(u64, u64 *); | 94 | extern s64 uv_bios_freq_base(u64, u64 *); |
94 | extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int, | 95 | extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int, |
95 | unsigned long *); | 96 | unsigned long *); |
96 | extern int uv_bios_mq_watchlist_free(int, int); | 97 | extern int uv_bios_mq_watchlist_free(int, int); |
97 | extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); | 98 | extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); |
98 | extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); | 99 | extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); |
100 | extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus); | ||
99 | 101 | ||
100 | extern void uv_bios_init(void); | 102 | extern void uv_bios_init(void); |
101 | 103 | ||
@@ -104,6 +106,7 @@ extern int uv_type; | |||
104 | extern long sn_partition_id; | 106 | extern long sn_partition_id; |
105 | extern long sn_coherency_id; | 107 | extern long sn_coherency_id; |
106 | extern long sn_region_size; | 108 | extern long sn_region_size; |
109 | extern long system_serial_number; | ||
107 | #define partition_coherence_id() (sn_coherency_id) | 110 | #define partition_coherence_id() (sn_coherency_id) |
108 | 111 | ||
109 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ | 112 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index c0a01b5d985b..3bb9491b7659 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h | |||
@@ -11,6 +11,7 @@ struct mm_struct; | |||
11 | extern enum uv_system_type get_uv_system_type(void); | 11 | extern enum uv_system_type get_uv_system_type(void); |
12 | extern int is_uv_system(void); | 12 | extern int is_uv_system(void); |
13 | extern void uv_cpu_init(void); | 13 | extern void uv_cpu_init(void); |
14 | extern void uv_nmi_init(void); | ||
14 | extern void uv_system_init(void); | 15 | extern void uv_system_init(void); |
15 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
16 | struct mm_struct *mm, | 17 | struct mm_struct *mm, |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 40be813fefb1..14cc74ba5d23 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -329,7 +329,8 @@ static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset | |||
329 | */ | 329 | */ |
330 | static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset) | 330 | static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset) |
331 | { | 331 | { |
332 | return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val); | 332 | return UV_GLOBAL_GRU_MMR_BASE | offset | |
333 | ((unsigned long)pnode << uv_hub_info->m_val); | ||
333 | } | 334 | } |
334 | 335 | ||
335 | static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) | 336 | static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) |
diff --git a/arch/x86/include/asm/visws/cobalt.h b/arch/x86/include/asm/visws/cobalt.h index 166adf61e770..2edb37637ead 100644 --- a/arch/x86/include/asm/visws/cobalt.h +++ b/arch/x86/include/asm/visws/cobalt.h | |||
@@ -122,4 +122,6 @@ extern char visws_board_type; | |||
122 | 122 | ||
123 | extern char visws_board_rev; | 123 | extern char visws_board_rev; |
124 | 124 | ||
125 | extern int pci_visws_init(void); | ||
126 | |||
125 | #endif /* _ASM_X86_VISWS_COBALT_H */ | 127 | #endif /* _ASM_X86_VISWS_COBALT_H */ |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 2b4945419a84..fb9a080740ec 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -53,6 +53,7 @@ | |||
53 | */ | 53 | */ |
54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | 54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 |
55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | 55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 |
56 | #define SECONDARY_EXEC_RDTSCP 0x00000008 | ||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | 57 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 |
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | 58 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 |
58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | 59 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 |
@@ -251,6 +252,7 @@ enum vmcs_field { | |||
251 | #define EXIT_REASON_MSR_READ 31 | 252 | #define EXIT_REASON_MSR_READ 31 |
252 | #define EXIT_REASON_MSR_WRITE 32 | 253 | #define EXIT_REASON_MSR_WRITE 32 |
253 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 254 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
255 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
254 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | 256 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 |
255 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | 257 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 |
256 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 258 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
@@ -362,6 +364,7 @@ enum vmcs_field { | |||
362 | #define VMX_EPTP_UC_BIT (1ull << 8) | 364 | #define VMX_EPTP_UC_BIT (1ull << 8) |
363 | #define VMX_EPTP_WB_BIT (1ull << 14) | 365 | #define VMX_EPTP_WB_BIT (1ull << 14) |
364 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) | 366 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) |
367 | #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) | ||
365 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) | 368 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) |
366 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) | 369 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) |
367 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) | 370 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) |
@@ -374,7 +377,7 @@ enum vmcs_field { | |||
374 | #define VMX_EPT_READABLE_MASK 0x1ull | 377 | #define VMX_EPT_READABLE_MASK 0x1ull |
375 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 378 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
376 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 379 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
377 | #define VMX_EPT_IGMT_BIT (1ull << 6) | 380 | #define VMX_EPT_IPAT_BIT (1ull << 6) |
378 | 381 | ||
379 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 382 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
380 | 383 | ||
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index ea0e8ea15e15..519b54327d75 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -99,6 +99,20 @@ struct x86_init_iommu { | |||
99 | }; | 99 | }; |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * struct x86_init_pci - platform specific pci init functions | ||
103 | * @arch_init: platform specific pci arch init call | ||
104 | * @init: platform specific pci subsystem init | ||
105 | * @init_irq: platform specific pci irq init | ||
106 | * @fixup_irqs: platform specific pci irq fixup | ||
107 | */ | ||
108 | struct x86_init_pci { | ||
109 | int (*arch_init)(void); | ||
110 | int (*init)(void); | ||
111 | void (*init_irq)(void); | ||
112 | void (*fixup_irqs)(void); | ||
113 | }; | ||
114 | |||
115 | /** | ||
102 | * struct x86_init_ops - functions for platform specific setup | 116 | * struct x86_init_ops - functions for platform specific setup |
103 | * | 117 | * |
104 | */ | 118 | */ |
@@ -110,6 +124,7 @@ struct x86_init_ops { | |||
110 | struct x86_init_paging paging; | 124 | struct x86_init_paging paging; |
111 | struct x86_init_timers timers; | 125 | struct x86_init_timers timers; |
112 | struct x86_init_iommu iommu; | 126 | struct x86_init_iommu iommu; |
127 | struct x86_init_pci pci; | ||
113 | }; | 128 | }; |
114 | 129 | ||
115 | /** | 130 | /** |
@@ -126,6 +141,7 @@ struct x86_cpuinit_ops { | |||
126 | * @get_wallclock: get time from HW clock like RTC etc. | 141 | * @get_wallclock: get time from HW clock like RTC etc. |
127 | * @set_wallclock: set time back to HW clock | 142 | * @set_wallclock: set time back to HW clock |
128 | * @is_untracked_pat_range exclude from PAT logic | 143 | * @is_untracked_pat_range exclude from PAT logic |
144 | * @nmi_init enable NMI on cpus | ||
129 | */ | 145 | */ |
130 | struct x86_platform_ops { | 146 | struct x86_platform_ops { |
131 | unsigned long (*calibrate_tsc)(void); | 147 | unsigned long (*calibrate_tsc)(void); |
@@ -133,6 +149,7 @@ struct x86_platform_ops { | |||
133 | int (*set_wallclock)(unsigned long nowtime); | 149 | int (*set_wallclock)(unsigned long nowtime); |
134 | void (*iommu_shutdown)(void); | 150 | void (*iommu_shutdown)(void); |
135 | bool (*is_untracked_pat_range)(u64 start, u64 end); | 151 | bool (*is_untracked_pat_range)(u64 start, u64 end); |
152 | void (*nmi_init)(void); | ||
136 | }; | 153 | }; |
137 | 154 | ||
138 | extern struct x86_init_ops x86_init; | 155 | extern struct x86_init_ops x86_init; |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 727acc152344..ddc04ccad03b 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -27,9 +27,11 @@ | |||
27 | extern unsigned int xstate_size; | 27 | extern unsigned int xstate_size; |
28 | extern u64 pcntxt_mask; | 28 | extern u64 pcntxt_mask; |
29 | extern struct xsave_struct *init_xstate_buf; | 29 | extern struct xsave_struct *init_xstate_buf; |
30 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; | ||
30 | 31 | ||
31 | extern void xsave_cntxt_init(void); | 32 | extern void xsave_cntxt_init(void); |
32 | extern void xsave_init(void); | 33 | extern void xsave_init(void); |
34 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); | ||
33 | extern int init_fpu(struct task_struct *child); | 35 | extern int init_fpu(struct task_struct *child); |
34 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | 36 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, |
35 | void __user *fpstate, | 37 | void __user *fpstate, |