diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:35:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:35:09 -0500 |
commit | a7f16d10b510f9ee3500af7831f2e3094fab3dca (patch) | |
tree | bd2bff5e13083e1103205ff926342d6674f8e5c5 | |
parent | f66ffdedbf0fc059a92219bb08c1dbcac88f074b (diff) | |
parent | 17c0e7107bed3d578864e6519f7f4e4c324c8f58 (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: Mark atomic irq ops raw for 32bit legacy
x86: Merge show_regs()
x86: Macroise x86 cache descriptors
x86-32: clean up rwsem inline asm statements
x86: Merge asm/atomic_{32,64}.h
x86: Sync asm/atomic_32.h and asm/atomic_64.h
x86: Split atomic64_t functions into seperate headers
x86-64: Modify memcpy()/memset() alternatives mechanism
x86-64: Modify copy_user_generic() alternatives mechanism
x86: Lift restriction on the location of FIX_BTMAP_*
x86, core: Optimize hweight32()
-rw-r--r-- | arch/x86/include/asm/alternative.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic.h | 299 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic64_32.h | 160 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic64_64.h | 224 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic_32.h | 415 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic_64.h | 485 | ||||
-rw-r--r-- | arch/x86/include/asm/fixmap.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/rwsem.h | 30 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 21 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 84 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/x8664_ksyms_64.c | 3 | ||||
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 6 | ||||
-rw-r--r-- | arch/x86/lib/memcpy_64.S | 23 | ||||
-rw-r--r-- | arch/x86/lib/memset_64.S | 18 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 4 | ||||
-rw-r--r-- | lib/hweight.c | 7 |
20 files changed, 813 insertions, 1012 deletions
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index ac80b7d70014..f1e253ceba4b 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -130,11 +130,16 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
130 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ | 130 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
131 | : output : "i" (0), ## input) | 131 | : output : "i" (0), ## input) |
132 | 132 | ||
133 | /* Like alternative_io, but for replacing a direct call with another one. */ | ||
134 | #define alternative_call(oldfunc, newfunc, feature, output, input...) \ | ||
135 | asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ | ||
136 | : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) | ||
137 | |||
133 | /* | 138 | /* |
134 | * use this macro(s) if you need more than one output parameter | 139 | * use this macro(s) if you need more than one output parameter |
135 | * in alternative_io | 140 | * in alternative_io |
136 | */ | 141 | */ |
137 | #define ASM_OUTPUT2(a, b) a, b | 142 | #define ASM_OUTPUT2(a...) a |
138 | 143 | ||
139 | struct paravirt_patch_site; | 144 | struct paravirt_patch_site; |
140 | #ifdef CONFIG_PARAVIRT | 145 | #ifdef CONFIG_PARAVIRT |
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 4e1b8873c474..8f8217b9bdac 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -1,5 +1,300 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_H | ||
2 | #define _ASM_X86_ATOMIC_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/alternative.h> | ||
8 | #include <asm/cmpxchg.h> | ||
9 | |||
10 | /* | ||
11 | * Atomic operations that C can't guarantee us. Useful for | ||
12 | * resource counting etc.. | ||
13 | */ | ||
14 | |||
15 | #define ATOMIC_INIT(i) { (i) } | ||
16 | |||
17 | /** | ||
18 | * atomic_read - read atomic variable | ||
19 | * @v: pointer of type atomic_t | ||
20 | * | ||
21 | * Atomically reads the value of @v. | ||
22 | */ | ||
23 | static inline int atomic_read(const atomic_t *v) | ||
24 | { | ||
25 | return v->counter; | ||
26 | } | ||
27 | |||
28 | /** | ||
29 | * atomic_set - set atomic variable | ||
30 | * @v: pointer of type atomic_t | ||
31 | * @i: required value | ||
32 | * | ||
33 | * Atomically sets the value of @v to @i. | ||
34 | */ | ||
35 | static inline void atomic_set(atomic_t *v, int i) | ||
36 | { | ||
37 | v->counter = i; | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * atomic_add - add integer to atomic variable | ||
42 | * @i: integer value to add | ||
43 | * @v: pointer of type atomic_t | ||
44 | * | ||
45 | * Atomically adds @i to @v. | ||
46 | */ | ||
47 | static inline void atomic_add(int i, atomic_t *v) | ||
48 | { | ||
49 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
50 | : "+m" (v->counter) | ||
51 | : "ir" (i)); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * atomic_sub - subtract integer from atomic variable | ||
56 | * @i: integer value to subtract | ||
57 | * @v: pointer of type atomic_t | ||
58 | * | ||
59 | * Atomically subtracts @i from @v. | ||
60 | */ | ||
61 | static inline void atomic_sub(int i, atomic_t *v) | ||
62 | { | ||
63 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
64 | : "+m" (v->counter) | ||
65 | : "ir" (i)); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * atomic_sub_and_test - subtract value from variable and test result | ||
70 | * @i: integer value to subtract | ||
71 | * @v: pointer of type atomic_t | ||
72 | * | ||
73 | * Atomically subtracts @i from @v and returns | ||
74 | * true if the result is zero, or false for all | ||
75 | * other cases. | ||
76 | */ | ||
77 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
78 | { | ||
79 | unsigned char c; | ||
80 | |||
81 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
82 | : "+m" (v->counter), "=qm" (c) | ||
83 | : "ir" (i) : "memory"); | ||
84 | return c; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * atomic_inc - increment atomic variable | ||
89 | * @v: pointer of type atomic_t | ||
90 | * | ||
91 | * Atomically increments @v by 1. | ||
92 | */ | ||
93 | static inline void atomic_inc(atomic_t *v) | ||
94 | { | ||
95 | asm volatile(LOCK_PREFIX "incl %0" | ||
96 | : "+m" (v->counter)); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * atomic_dec - decrement atomic variable | ||
101 | * @v: pointer of type atomic_t | ||
102 | * | ||
103 | * Atomically decrements @v by 1. | ||
104 | */ | ||
105 | static inline void atomic_dec(atomic_t *v) | ||
106 | { | ||
107 | asm volatile(LOCK_PREFIX "decl %0" | ||
108 | : "+m" (v->counter)); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * atomic_dec_and_test - decrement and test | ||
113 | * @v: pointer of type atomic_t | ||
114 | * | ||
115 | * Atomically decrements @v by 1 and | ||
116 | * returns true if the result is 0, or false for all other | ||
117 | * cases. | ||
118 | */ | ||
119 | static inline int atomic_dec_and_test(atomic_t *v) | ||
120 | { | ||
121 | unsigned char c; | ||
122 | |||
123 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
124 | : "+m" (v->counter), "=qm" (c) | ||
125 | : : "memory"); | ||
126 | return c != 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * atomic_inc_and_test - increment and test | ||
131 | * @v: pointer of type atomic_t | ||
132 | * | ||
133 | * Atomically increments @v by 1 | ||
134 | * and returns true if the result is zero, or false for all | ||
135 | * other cases. | ||
136 | */ | ||
137 | static inline int atomic_inc_and_test(atomic_t *v) | ||
138 | { | ||
139 | unsigned char c; | ||
140 | |||
141 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
142 | : "+m" (v->counter), "=qm" (c) | ||
143 | : : "memory"); | ||
144 | return c != 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * atomic_add_negative - add and test if negative | ||
149 | * @i: integer value to add | ||
150 | * @v: pointer of type atomic_t | ||
151 | * | ||
152 | * Atomically adds @i to @v and returns true | ||
153 | * if the result is negative, or false when | ||
154 | * result is greater than or equal to zero. | ||
155 | */ | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
157 | { | ||
158 | unsigned char c; | ||
159 | |||
160 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
161 | : "+m" (v->counter), "=qm" (c) | ||
162 | : "ir" (i) : "memory"); | ||
163 | return c; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * atomic_add_return - add integer and return | ||
168 | * @i: integer value to add | ||
169 | * @v: pointer of type atomic_t | ||
170 | * | ||
171 | * Atomically adds @i to @v and returns @i + @v | ||
172 | */ | ||
173 | static inline int atomic_add_return(int i, atomic_t *v) | ||
174 | { | ||
175 | int __i; | ||
176 | #ifdef CONFIG_M386 | ||
177 | unsigned long flags; | ||
178 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
179 | goto no_xadd; | ||
180 | #endif | ||
181 | /* Modern 486+ processor */ | ||
182 | __i = i; | ||
183 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
184 | : "+r" (i), "+m" (v->counter) | ||
185 | : : "memory"); | ||
186 | return i + __i; | ||
187 | |||
188 | #ifdef CONFIG_M386 | ||
189 | no_xadd: /* Legacy 386 processor */ | ||
190 | raw_local_irq_save(flags); | ||
191 | __i = atomic_read(v); | ||
192 | atomic_set(v, i + __i); | ||
193 | raw_local_irq_restore(flags); | ||
194 | return i + __i; | ||
195 | #endif | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * atomic_sub_return - subtract integer and return | ||
200 | * @v: pointer of type atomic_t | ||
201 | * @i: integer value to subtract | ||
202 | * | ||
203 | * Atomically subtracts @i from @v and returns @v - @i | ||
204 | */ | ||
205 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
206 | { | ||
207 | return atomic_add_return(-i, v); | ||
208 | } | ||
209 | |||
210 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
211 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
212 | |||
213 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
214 | { | ||
215 | return cmpxchg(&v->counter, old, new); | ||
216 | } | ||
217 | |||
218 | static inline int atomic_xchg(atomic_t *v, int new) | ||
219 | { | ||
220 | return xchg(&v->counter, new); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * atomic_add_unless - add unless the number is already a given value | ||
225 | * @v: pointer of type atomic_t | ||
226 | * @a: the amount to add to v... | ||
227 | * @u: ...unless v is equal to u. | ||
228 | * | ||
229 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
230 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
231 | */ | ||
232 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
233 | { | ||
234 | int c, old; | ||
235 | c = atomic_read(v); | ||
236 | for (;;) { | ||
237 | if (unlikely(c == (u))) | ||
238 | break; | ||
239 | old = atomic_cmpxchg((v), c, c + (a)); | ||
240 | if (likely(old == c)) | ||
241 | break; | ||
242 | c = old; | ||
243 | } | ||
244 | return c != (u); | ||
245 | } | ||
246 | |||
247 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
248 | |||
249 | /** | ||
250 | * atomic_inc_short - increment of a short integer | ||
251 | * @v: pointer to type int | ||
252 | * | ||
253 | * Atomically adds 1 to @v | ||
254 | * Returns the new value of @u | ||
255 | */ | ||
256 | static inline short int atomic_inc_short(short int *v) | ||
257 | { | ||
258 | asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); | ||
259 | return *v; | ||
260 | } | ||
261 | |||
262 | #ifdef CONFIG_X86_64 | ||
263 | /** | ||
264 | * atomic_or_long - OR of two long integers | ||
265 | * @v1: pointer to type unsigned long | ||
266 | * @v2: pointer to type unsigned long | ||
267 | * | ||
268 | * Atomically ORs @v1 and @v2 | ||
269 | * Returns the result of the OR | ||
270 | */ | ||
271 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
272 | { | ||
273 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | /* These are x86-specific, used by some header files */ | ||
278 | #define atomic_clear_mask(mask, addr) \ | ||
279 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
280 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
281 | |||
282 | #define atomic_set_mask(mask, addr) \ | ||
283 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
284 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | ||
285 | : "memory") | ||
286 | |||
287 | /* Atomic operations are already serializing on x86 */ | ||
288 | #define smp_mb__before_atomic_dec() barrier() | ||
289 | #define smp_mb__after_atomic_dec() barrier() | ||
290 | #define smp_mb__before_atomic_inc() barrier() | ||
291 | #define smp_mb__after_atomic_inc() barrier() | ||
292 | |||
1 | #ifdef CONFIG_X86_32 | 293 | #ifdef CONFIG_X86_32 |
2 | # include "atomic_32.h" | 294 | # include "atomic64_32.h" |
3 | #else | 295 | #else |
4 | # include "atomic_64.h" | 296 | # include "atomic64_64.h" |
5 | #endif | 297 | #endif |
298 | |||
299 | #include <asm-generic/atomic-long.h> | ||
300 | #endif /* _ASM_X86_ATOMIC_H */ | ||
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h new file mode 100644 index 000000000000..03027bf28de5 --- /dev/null +++ b/arch/x86/include/asm/atomic64_32.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC64_32_H | ||
2 | #define _ASM_X86_ATOMIC64_32_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | //#include <asm/cmpxchg.h> | ||
8 | |||
9 | /* An 64bit atomic type */ | ||
10 | |||
11 | typedef struct { | ||
12 | u64 __aligned(8) counter; | ||
13 | } atomic64_t; | ||
14 | |||
15 | #define ATOMIC64_INIT(val) { (val) } | ||
16 | |||
17 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | ||
18 | |||
19 | /** | ||
20 | * atomic64_xchg - xchg atomic64 variable | ||
21 | * @ptr: pointer to type atomic64_t | ||
22 | * @new_val: value to assign | ||
23 | * | ||
24 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
25 | * the old value. | ||
26 | */ | ||
27 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | ||
28 | |||
29 | /** | ||
30 | * atomic64_set - set atomic64 variable | ||
31 | * @ptr: pointer to type atomic64_t | ||
32 | * @new_val: value to assign | ||
33 | * | ||
34 | * Atomically sets the value of @ptr to @new_val. | ||
35 | */ | ||
36 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | ||
37 | |||
38 | /** | ||
39 | * atomic64_read - read atomic64 variable | ||
40 | * @ptr: pointer to type atomic64_t | ||
41 | * | ||
42 | * Atomically reads the value of @ptr and returns it. | ||
43 | */ | ||
44 | static inline u64 atomic64_read(atomic64_t *ptr) | ||
45 | { | ||
46 | u64 res; | ||
47 | |||
48 | /* | ||
49 | * Note, we inline this atomic64_t primitive because | ||
50 | * it only clobbers EAX/EDX and leaves the others | ||
51 | * untouched. We also (somewhat subtly) rely on the | ||
52 | * fact that cmpxchg8b returns the current 64-bit value | ||
53 | * of the memory location we are touching: | ||
54 | */ | ||
55 | asm volatile( | ||
56 | "mov %%ebx, %%eax\n\t" | ||
57 | "mov %%ecx, %%edx\n\t" | ||
58 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
59 | : "=&A" (res) | ||
60 | : "m" (*ptr) | ||
61 | ); | ||
62 | |||
63 | return res; | ||
64 | } | ||
65 | |||
66 | extern u64 atomic64_read(atomic64_t *ptr); | ||
67 | |||
68 | /** | ||
69 | * atomic64_add_return - add and return | ||
70 | * @delta: integer value to add | ||
71 | * @ptr: pointer to type atomic64_t | ||
72 | * | ||
73 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
74 | */ | ||
75 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | ||
76 | |||
77 | /* | ||
78 | * Other variants with different arithmetic operators: | ||
79 | */ | ||
80 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | ||
81 | extern u64 atomic64_inc_return(atomic64_t *ptr); | ||
82 | extern u64 atomic64_dec_return(atomic64_t *ptr); | ||
83 | |||
84 | /** | ||
85 | * atomic64_add - add integer to atomic64 variable | ||
86 | * @delta: integer value to add | ||
87 | * @ptr: pointer to type atomic64_t | ||
88 | * | ||
89 | * Atomically adds @delta to @ptr. | ||
90 | */ | ||
91 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | ||
92 | |||
93 | /** | ||
94 | * atomic64_sub - subtract the atomic64 variable | ||
95 | * @delta: integer value to subtract | ||
96 | * @ptr: pointer to type atomic64_t | ||
97 | * | ||
98 | * Atomically subtracts @delta from @ptr. | ||
99 | */ | ||
100 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | ||
101 | |||
102 | /** | ||
103 | * atomic64_sub_and_test - subtract value from variable and test result | ||
104 | * @delta: integer value to subtract | ||
105 | * @ptr: pointer to type atomic64_t | ||
106 | * | ||
107 | * Atomically subtracts @delta from @ptr and returns | ||
108 | * true if the result is zero, or false for all | ||
109 | * other cases. | ||
110 | */ | ||
111 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | ||
112 | |||
113 | /** | ||
114 | * atomic64_inc - increment atomic64 variable | ||
115 | * @ptr: pointer to type atomic64_t | ||
116 | * | ||
117 | * Atomically increments @ptr by 1. | ||
118 | */ | ||
119 | extern void atomic64_inc(atomic64_t *ptr); | ||
120 | |||
121 | /** | ||
122 | * atomic64_dec - decrement atomic64 variable | ||
123 | * @ptr: pointer to type atomic64_t | ||
124 | * | ||
125 | * Atomically decrements @ptr by 1. | ||
126 | */ | ||
127 | extern void atomic64_dec(atomic64_t *ptr); | ||
128 | |||
129 | /** | ||
130 | * atomic64_dec_and_test - decrement and test | ||
131 | * @ptr: pointer to type atomic64_t | ||
132 | * | ||
133 | * Atomically decrements @ptr by 1 and | ||
134 | * returns true if the result is 0, or false for all other | ||
135 | * cases. | ||
136 | */ | ||
137 | extern int atomic64_dec_and_test(atomic64_t *ptr); | ||
138 | |||
139 | /** | ||
140 | * atomic64_inc_and_test - increment and test | ||
141 | * @ptr: pointer to type atomic64_t | ||
142 | * | ||
143 | * Atomically increments @ptr by 1 | ||
144 | * and returns true if the result is zero, or false for all | ||
145 | * other cases. | ||
146 | */ | ||
147 | extern int atomic64_inc_and_test(atomic64_t *ptr); | ||
148 | |||
149 | /** | ||
150 | * atomic64_add_negative - add and test if negative | ||
151 | * @delta: integer value to add | ||
152 | * @ptr: pointer to type atomic64_t | ||
153 | * | ||
154 | * Atomically adds @delta to @ptr and returns true | ||
155 | * if the result is negative, or false when | ||
156 | * result is greater than or equal to zero. | ||
157 | */ | ||
158 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | ||
159 | |||
160 | #endif /* _ASM_X86_ATOMIC64_32_H */ | ||
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h new file mode 100644 index 000000000000..51c5b4056929 --- /dev/null +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -0,0 +1,224 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC64_64_H | ||
2 | #define _ASM_X86_ATOMIC64_64_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/alternative.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | /* The 64-bit atomic type */ | ||
9 | |||
10 | #define ATOMIC64_INIT(i) { (i) } | ||
11 | |||
12 | /** | ||
13 | * atomic64_read - read atomic64 variable | ||
14 | * @v: pointer of type atomic64_t | ||
15 | * | ||
16 | * Atomically reads the value of @v. | ||
17 | * Doesn't imply a read memory barrier. | ||
18 | */ | ||
19 | static inline long atomic64_read(const atomic64_t *v) | ||
20 | { | ||
21 | return v->counter; | ||
22 | } | ||
23 | |||
24 | /** | ||
25 | * atomic64_set - set atomic64 variable | ||
26 | * @v: pointer to type atomic64_t | ||
27 | * @i: required value | ||
28 | * | ||
29 | * Atomically sets the value of @v to @i. | ||
30 | */ | ||
31 | static inline void atomic64_set(atomic64_t *v, long i) | ||
32 | { | ||
33 | v->counter = i; | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * atomic64_add - add integer to atomic64 variable | ||
38 | * @i: integer value to add | ||
39 | * @v: pointer to type atomic64_t | ||
40 | * | ||
41 | * Atomically adds @i to @v. | ||
42 | */ | ||
43 | static inline void atomic64_add(long i, atomic64_t *v) | ||
44 | { | ||
45 | asm volatile(LOCK_PREFIX "addq %1,%0" | ||
46 | : "=m" (v->counter) | ||
47 | : "er" (i), "m" (v->counter)); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * atomic64_sub - subtract the atomic64 variable | ||
52 | * @i: integer value to subtract | ||
53 | * @v: pointer to type atomic64_t | ||
54 | * | ||
55 | * Atomically subtracts @i from @v. | ||
56 | */ | ||
57 | static inline void atomic64_sub(long i, atomic64_t *v) | ||
58 | { | ||
59 | asm volatile(LOCK_PREFIX "subq %1,%0" | ||
60 | : "=m" (v->counter) | ||
61 | : "er" (i), "m" (v->counter)); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * atomic64_sub_and_test - subtract value from variable and test result | ||
66 | * @i: integer value to subtract | ||
67 | * @v: pointer to type atomic64_t | ||
68 | * | ||
69 | * Atomically subtracts @i from @v and returns | ||
70 | * true if the result is zero, or false for all | ||
71 | * other cases. | ||
72 | */ | ||
73 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) | ||
74 | { | ||
75 | unsigned char c; | ||
76 | |||
77 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | ||
78 | : "=m" (v->counter), "=qm" (c) | ||
79 | : "er" (i), "m" (v->counter) : "memory"); | ||
80 | return c; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic64_inc - increment atomic64 variable | ||
85 | * @v: pointer to type atomic64_t | ||
86 | * | ||
87 | * Atomically increments @v by 1. | ||
88 | */ | ||
89 | static inline void atomic64_inc(atomic64_t *v) | ||
90 | { | ||
91 | asm volatile(LOCK_PREFIX "incq %0" | ||
92 | : "=m" (v->counter) | ||
93 | : "m" (v->counter)); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * atomic64_dec - decrement atomic64 variable | ||
98 | * @v: pointer to type atomic64_t | ||
99 | * | ||
100 | * Atomically decrements @v by 1. | ||
101 | */ | ||
102 | static inline void atomic64_dec(atomic64_t *v) | ||
103 | { | ||
104 | asm volatile(LOCK_PREFIX "decq %0" | ||
105 | : "=m" (v->counter) | ||
106 | : "m" (v->counter)); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * atomic64_dec_and_test - decrement and test | ||
111 | * @v: pointer to type atomic64_t | ||
112 | * | ||
113 | * Atomically decrements @v by 1 and | ||
114 | * returns true if the result is 0, or false for all other | ||
115 | * cases. | ||
116 | */ | ||
117 | static inline int atomic64_dec_and_test(atomic64_t *v) | ||
118 | { | ||
119 | unsigned char c; | ||
120 | |||
121 | asm volatile(LOCK_PREFIX "decq %0; sete %1" | ||
122 | : "=m" (v->counter), "=qm" (c) | ||
123 | : "m" (v->counter) : "memory"); | ||
124 | return c != 0; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * atomic64_inc_and_test - increment and test | ||
129 | * @v: pointer to type atomic64_t | ||
130 | * | ||
131 | * Atomically increments @v by 1 | ||
132 | * and returns true if the result is zero, or false for all | ||
133 | * other cases. | ||
134 | */ | ||
135 | static inline int atomic64_inc_and_test(atomic64_t *v) | ||
136 | { | ||
137 | unsigned char c; | ||
138 | |||
139 | asm volatile(LOCK_PREFIX "incq %0; sete %1" | ||
140 | : "=m" (v->counter), "=qm" (c) | ||
141 | : "m" (v->counter) : "memory"); | ||
142 | return c != 0; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * atomic64_add_negative - add and test if negative | ||
147 | * @i: integer value to add | ||
148 | * @v: pointer to type atomic64_t | ||
149 | * | ||
150 | * Atomically adds @i to @v and returns true | ||
151 | * if the result is negative, or false when | ||
152 | * result is greater than or equal to zero. | ||
153 | */ | ||
154 | static inline int atomic64_add_negative(long i, atomic64_t *v) | ||
155 | { | ||
156 | unsigned char c; | ||
157 | |||
158 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | ||
159 | : "=m" (v->counter), "=qm" (c) | ||
160 | : "er" (i), "m" (v->counter) : "memory"); | ||
161 | return c; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * atomic64_add_return - add and return | ||
166 | * @i: integer value to add | ||
167 | * @v: pointer to type atomic64_t | ||
168 | * | ||
169 | * Atomically adds @i to @v and returns @i + @v | ||
170 | */ | ||
171 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
172 | { | ||
173 | long __i = i; | ||
174 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" | ||
175 | : "+r" (i), "+m" (v->counter) | ||
176 | : : "memory"); | ||
177 | return i + __i; | ||
178 | } | ||
179 | |||
180 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
181 | { | ||
182 | return atomic64_add_return(-i, v); | ||
183 | } | ||
184 | |||
185 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | ||
186 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | ||
187 | |||
188 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | ||
189 | { | ||
190 | return cmpxchg(&v->counter, old, new); | ||
191 | } | ||
192 | |||
193 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
194 | { | ||
195 | return xchg(&v->counter, new); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * atomic64_add_unless - add unless the number is a given value | ||
200 | * @v: pointer of type atomic64_t | ||
201 | * @a: the amount to add to v... | ||
202 | * @u: ...unless v is equal to u. | ||
203 | * | ||
204 | * Atomically adds @a to @v, so long as it was not @u. | ||
205 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
206 | */ | ||
207 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
208 | { | ||
209 | long c, old; | ||
210 | c = atomic64_read(v); | ||
211 | for (;;) { | ||
212 | if (unlikely(c == (u))) | ||
213 | break; | ||
214 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
215 | if (likely(old == c)) | ||
216 | break; | ||
217 | c = old; | ||
218 | } | ||
219 | return c != (u); | ||
220 | } | ||
221 | |||
222 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
223 | |||
224 | #endif /* _ASM_X86_ATOMIC64_64_H */ | ||
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h deleted file mode 100644 index dc5a667ff791..000000000000 --- a/arch/x86/include/asm/atomic_32.h +++ /dev/null | |||
@@ -1,415 +0,0 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_32_H | ||
2 | #define _ASM_X86_ATOMIC_32_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | |||
9 | /* | ||
10 | * Atomic operations that C can't guarantee us. Useful for | ||
11 | * resource counting etc.. | ||
12 | */ | ||
13 | |||
14 | #define ATOMIC_INIT(i) { (i) } | ||
15 | |||
16 | /** | ||
17 | * atomic_read - read atomic variable | ||
18 | * @v: pointer of type atomic_t | ||
19 | * | ||
20 | * Atomically reads the value of @v. | ||
21 | */ | ||
22 | static inline int atomic_read(const atomic_t *v) | ||
23 | { | ||
24 | return v->counter; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * atomic_set - set atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * @i: required value | ||
31 | * | ||
32 | * Atomically sets the value of @v to @i. | ||
33 | */ | ||
34 | static inline void atomic_set(atomic_t *v, int i) | ||
35 | { | ||
36 | v->counter = i; | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * atomic_add - add integer to atomic variable | ||
41 | * @i: integer value to add | ||
42 | * @v: pointer of type atomic_t | ||
43 | * | ||
44 | * Atomically adds @i to @v. | ||
45 | */ | ||
46 | static inline void atomic_add(int i, atomic_t *v) | ||
47 | { | ||
48 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
49 | : "+m" (v->counter) | ||
50 | : "ir" (i)); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * atomic_sub - subtract integer from atomic variable | ||
55 | * @i: integer value to subtract | ||
56 | * @v: pointer of type atomic_t | ||
57 | * | ||
58 | * Atomically subtracts @i from @v. | ||
59 | */ | ||
60 | static inline void atomic_sub(int i, atomic_t *v) | ||
61 | { | ||
62 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
63 | : "+m" (v->counter) | ||
64 | : "ir" (i)); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * atomic_sub_and_test - subtract value from variable and test result | ||
69 | * @i: integer value to subtract | ||
70 | * @v: pointer of type atomic_t | ||
71 | * | ||
72 | * Atomically subtracts @i from @v and returns | ||
73 | * true if the result is zero, or false for all | ||
74 | * other cases. | ||
75 | */ | ||
76 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
77 | { | ||
78 | unsigned char c; | ||
79 | |||
80 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
81 | : "+m" (v->counter), "=qm" (c) | ||
82 | : "ir" (i) : "memory"); | ||
83 | return c; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * atomic_inc - increment atomic variable | ||
88 | * @v: pointer of type atomic_t | ||
89 | * | ||
90 | * Atomically increments @v by 1. | ||
91 | */ | ||
92 | static inline void atomic_inc(atomic_t *v) | ||
93 | { | ||
94 | asm volatile(LOCK_PREFIX "incl %0" | ||
95 | : "+m" (v->counter)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_dec - decrement atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1. | ||
103 | */ | ||
104 | static inline void atomic_dec(atomic_t *v) | ||
105 | { | ||
106 | asm volatile(LOCK_PREFIX "decl %0" | ||
107 | : "+m" (v->counter)); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * atomic_dec_and_test - decrement and test | ||
112 | * @v: pointer of type atomic_t | ||
113 | * | ||
114 | * Atomically decrements @v by 1 and | ||
115 | * returns true if the result is 0, or false for all other | ||
116 | * cases. | ||
117 | */ | ||
118 | static inline int atomic_dec_and_test(atomic_t *v) | ||
119 | { | ||
120 | unsigned char c; | ||
121 | |||
122 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
123 | : "+m" (v->counter), "=qm" (c) | ||
124 | : : "memory"); | ||
125 | return c != 0; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * atomic_inc_and_test - increment and test | ||
130 | * @v: pointer of type atomic_t | ||
131 | * | ||
132 | * Atomically increments @v by 1 | ||
133 | * and returns true if the result is zero, or false for all | ||
134 | * other cases. | ||
135 | */ | ||
136 | static inline int atomic_inc_and_test(atomic_t *v) | ||
137 | { | ||
138 | unsigned char c; | ||
139 | |||
140 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
141 | : "+m" (v->counter), "=qm" (c) | ||
142 | : : "memory"); | ||
143 | return c != 0; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * atomic_add_negative - add and test if negative | ||
148 | * @v: pointer of type atomic_t | ||
149 | * @i: integer value to add | ||
150 | * | ||
151 | * Atomically adds @i to @v and returns true | ||
152 | * if the result is negative, or false when | ||
153 | * result is greater than or equal to zero. | ||
154 | */ | ||
155 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
156 | { | ||
157 | unsigned char c; | ||
158 | |||
159 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
160 | : "+m" (v->counter), "=qm" (c) | ||
161 | : "ir" (i) : "memory"); | ||
162 | return c; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic_add_return - add integer and return | ||
167 | * @v: pointer of type atomic_t | ||
168 | * @i: integer value to add | ||
169 | * | ||
170 | * Atomically adds @i to @v and returns @i + @v | ||
171 | */ | ||
172 | static inline int atomic_add_return(int i, atomic_t *v) | ||
173 | { | ||
174 | int __i; | ||
175 | #ifdef CONFIG_M386 | ||
176 | unsigned long flags; | ||
177 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
178 | goto no_xadd; | ||
179 | #endif | ||
180 | /* Modern 486+ processor */ | ||
181 | __i = i; | ||
182 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
183 | : "+r" (i), "+m" (v->counter) | ||
184 | : : "memory"); | ||
185 | return i + __i; | ||
186 | |||
187 | #ifdef CONFIG_M386 | ||
188 | no_xadd: /* Legacy 386 processor */ | ||
189 | local_irq_save(flags); | ||
190 | __i = atomic_read(v); | ||
191 | atomic_set(v, i + __i); | ||
192 | local_irq_restore(flags); | ||
193 | return i + __i; | ||
194 | #endif | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * atomic_sub_return - subtract integer and return | ||
199 | * @v: pointer of type atomic_t | ||
200 | * @i: integer value to subtract | ||
201 | * | ||
202 | * Atomically subtracts @i from @v and returns @v - @i | ||
203 | */ | ||
204 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
205 | { | ||
206 | return atomic_add_return(-i, v); | ||
207 | } | ||
208 | |||
209 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
210 | { | ||
211 | return cmpxchg(&v->counter, old, new); | ||
212 | } | ||
213 | |||
214 | static inline int atomic_xchg(atomic_t *v, int new) | ||
215 | { | ||
216 | return xchg(&v->counter, new); | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * atomic_add_unless - add unless the number is already a given value | ||
221 | * @v: pointer of type atomic_t | ||
222 | * @a: the amount to add to v... | ||
223 | * @u: ...unless v is equal to u. | ||
224 | * | ||
225 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
226 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
227 | */ | ||
228 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
229 | { | ||
230 | int c, old; | ||
231 | c = atomic_read(v); | ||
232 | for (;;) { | ||
233 | if (unlikely(c == (u))) | ||
234 | break; | ||
235 | old = atomic_cmpxchg((v), c, c + (a)); | ||
236 | if (likely(old == c)) | ||
237 | break; | ||
238 | c = old; | ||
239 | } | ||
240 | return c != (u); | ||
241 | } | ||
242 | |||
243 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
244 | |||
245 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
246 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
247 | |||
248 | /* These are x86-specific, used by some header files */ | ||
249 | #define atomic_clear_mask(mask, addr) \ | ||
250 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
251 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
252 | |||
253 | #define atomic_set_mask(mask, addr) \ | ||
254 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
255 | : : "r" (mask), "m" (*(addr)) : "memory") | ||
256 | |||
257 | /* Atomic operations are already serializing on x86 */ | ||
258 | #define smp_mb__before_atomic_dec() barrier() | ||
259 | #define smp_mb__after_atomic_dec() barrier() | ||
260 | #define smp_mb__before_atomic_inc() barrier() | ||
261 | #define smp_mb__after_atomic_inc() barrier() | ||
262 | |||
263 | /* An 64bit atomic type */ | ||
264 | |||
265 | typedef struct { | ||
266 | u64 __aligned(8) counter; | ||
267 | } atomic64_t; | ||
268 | |||
269 | #define ATOMIC64_INIT(val) { (val) } | ||
270 | |||
271 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | ||
272 | |||
273 | /** | ||
274 | * atomic64_xchg - xchg atomic64 variable | ||
275 | * @ptr: pointer to type atomic64_t | ||
276 | * @new_val: value to assign | ||
277 | * | ||
278 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
279 | * the old value. | ||
280 | */ | ||
281 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | ||
282 | |||
283 | /** | ||
284 | * atomic64_set - set atomic64 variable | ||
285 | * @ptr: pointer to type atomic64_t | ||
286 | * @new_val: value to assign | ||
287 | * | ||
288 | * Atomically sets the value of @ptr to @new_val. | ||
289 | */ | ||
290 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | ||
291 | |||
292 | /** | ||
293 | * atomic64_read - read atomic64 variable | ||
294 | * @ptr: pointer to type atomic64_t | ||
295 | * | ||
296 | * Atomically reads the value of @ptr and returns it. | ||
297 | */ | ||
298 | static inline u64 atomic64_read(atomic64_t *ptr) | ||
299 | { | ||
300 | u64 res; | ||
301 | |||
302 | /* | ||
303 | * Note, we inline this atomic64_t primitive because | ||
304 | * it only clobbers EAX/EDX and leaves the others | ||
305 | * untouched. We also (somewhat subtly) rely on the | ||
306 | * fact that cmpxchg8b returns the current 64-bit value | ||
307 | * of the memory location we are touching: | ||
308 | */ | ||
309 | asm volatile( | ||
310 | "mov %%ebx, %%eax\n\t" | ||
311 | "mov %%ecx, %%edx\n\t" | ||
312 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
313 | : "=&A" (res) | ||
314 | : "m" (*ptr) | ||
315 | ); | ||
316 | |||
317 | return res; | ||
318 | } | ||
319 | |||
320 | extern u64 atomic64_read(atomic64_t *ptr); | ||
321 | |||
322 | /** | ||
323 | * atomic64_add_return - add and return | ||
324 | * @delta: integer value to add | ||
325 | * @ptr: pointer to type atomic64_t | ||
326 | * | ||
327 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
328 | */ | ||
329 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | ||
330 | |||
331 | /* | ||
332 | * Other variants with different arithmetic operators: | ||
333 | */ | ||
334 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | ||
335 | extern u64 atomic64_inc_return(atomic64_t *ptr); | ||
336 | extern u64 atomic64_dec_return(atomic64_t *ptr); | ||
337 | |||
338 | /** | ||
339 | * atomic64_add - add integer to atomic64 variable | ||
340 | * @delta: integer value to add | ||
341 | * @ptr: pointer to type atomic64_t | ||
342 | * | ||
343 | * Atomically adds @delta to @ptr. | ||
344 | */ | ||
345 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | ||
346 | |||
347 | /** | ||
348 | * atomic64_sub - subtract the atomic64 variable | ||
349 | * @delta: integer value to subtract | ||
350 | * @ptr: pointer to type atomic64_t | ||
351 | * | ||
352 | * Atomically subtracts @delta from @ptr. | ||
353 | */ | ||
354 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | ||
355 | |||
356 | /** | ||
357 | * atomic64_sub_and_test - subtract value from variable and test result | ||
358 | * @delta: integer value to subtract | ||
359 | * @ptr: pointer to type atomic64_t | ||
360 | * | ||
361 | * Atomically subtracts @delta from @ptr and returns | ||
362 | * true if the result is zero, or false for all | ||
363 | * other cases. | ||
364 | */ | ||
365 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | ||
366 | |||
367 | /** | ||
368 | * atomic64_inc - increment atomic64 variable | ||
369 | * @ptr: pointer to type atomic64_t | ||
370 | * | ||
371 | * Atomically increments @ptr by 1. | ||
372 | */ | ||
373 | extern void atomic64_inc(atomic64_t *ptr); | ||
374 | |||
375 | /** | ||
376 | * atomic64_dec - decrement atomic64 variable | ||
377 | * @ptr: pointer to type atomic64_t | ||
378 | * | ||
379 | * Atomically decrements @ptr by 1. | ||
380 | */ | ||
381 | extern void atomic64_dec(atomic64_t *ptr); | ||
382 | |||
383 | /** | ||
384 | * atomic64_dec_and_test - decrement and test | ||
385 | * @ptr: pointer to type atomic64_t | ||
386 | * | ||
387 | * Atomically decrements @ptr by 1 and | ||
388 | * returns true if the result is 0, or false for all other | ||
389 | * cases. | ||
390 | */ | ||
391 | extern int atomic64_dec_and_test(atomic64_t *ptr); | ||
392 | |||
393 | /** | ||
394 | * atomic64_inc_and_test - increment and test | ||
395 | * @ptr: pointer to type atomic64_t | ||
396 | * | ||
397 | * Atomically increments @ptr by 1 | ||
398 | * and returns true if the result is zero, or false for all | ||
399 | * other cases. | ||
400 | */ | ||
401 | extern int atomic64_inc_and_test(atomic64_t *ptr); | ||
402 | |||
403 | /** | ||
404 | * atomic64_add_negative - add and test if negative | ||
405 | * @delta: integer value to add | ||
406 | * @ptr: pointer to type atomic64_t | ||
407 | * | ||
408 | * Atomically adds @delta to @ptr and returns true | ||
409 | * if the result is negative, or false when | ||
410 | * result is greater than or equal to zero. | ||
411 | */ | ||
412 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | ||
413 | |||
414 | #include <asm-generic/atomic-long.h> | ||
415 | #endif /* _ASM_X86_ATOMIC_32_H */ | ||
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h deleted file mode 100644 index d605dc268e79..000000000000 --- a/arch/x86/include/asm/atomic_64.h +++ /dev/null | |||
@@ -1,485 +0,0 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_64_H | ||
2 | #define _ASM_X86_ATOMIC_64_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/alternative.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | /* | ||
9 | * Atomic operations that C can't guarantee us. Useful for | ||
10 | * resource counting etc.. | ||
11 | */ | ||
12 | |||
13 | #define ATOMIC_INIT(i) { (i) } | ||
14 | |||
15 | /** | ||
16 | * atomic_read - read atomic variable | ||
17 | * @v: pointer of type atomic_t | ||
18 | * | ||
19 | * Atomically reads the value of @v. | ||
20 | */ | ||
21 | static inline int atomic_read(const atomic_t *v) | ||
22 | { | ||
23 | return v->counter; | ||
24 | } | ||
25 | |||
26 | /** | ||
27 | * atomic_set - set atomic variable | ||
28 | * @v: pointer of type atomic_t | ||
29 | * @i: required value | ||
30 | * | ||
31 | * Atomically sets the value of @v to @i. | ||
32 | */ | ||
33 | static inline void atomic_set(atomic_t *v, int i) | ||
34 | { | ||
35 | v->counter = i; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_add - add integer to atomic variable | ||
40 | * @i: integer value to add | ||
41 | * @v: pointer of type atomic_t | ||
42 | * | ||
43 | * Atomically adds @i to @v. | ||
44 | */ | ||
45 | static inline void atomic_add(int i, atomic_t *v) | ||
46 | { | ||
47 | asm volatile(LOCK_PREFIX "addl %1,%0" | ||
48 | : "=m" (v->counter) | ||
49 | : "ir" (i), "m" (v->counter)); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * atomic_sub - subtract the atomic variable | ||
54 | * @i: integer value to subtract | ||
55 | * @v: pointer of type atomic_t | ||
56 | * | ||
57 | * Atomically subtracts @i from @v. | ||
58 | */ | ||
59 | static inline void atomic_sub(int i, atomic_t *v) | ||
60 | { | ||
61 | asm volatile(LOCK_PREFIX "subl %1,%0" | ||
62 | : "=m" (v->counter) | ||
63 | : "ir" (i), "m" (v->counter)); | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * atomic_sub_and_test - subtract value from variable and test result | ||
68 | * @i: integer value to subtract | ||
69 | * @v: pointer of type atomic_t | ||
70 | * | ||
71 | * Atomically subtracts @i from @v and returns | ||
72 | * true if the result is zero, or false for all | ||
73 | * other cases. | ||
74 | */ | ||
75 | static inline int atomic_sub_and_test(int i, atomic_t *v) | ||
76 | { | ||
77 | unsigned char c; | ||
78 | |||
79 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" | ||
80 | : "=m" (v->counter), "=qm" (c) | ||
81 | : "ir" (i), "m" (v->counter) : "memory"); | ||
82 | return c; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * atomic_inc - increment atomic variable | ||
87 | * @v: pointer of type atomic_t | ||
88 | * | ||
89 | * Atomically increments @v by 1. | ||
90 | */ | ||
91 | static inline void atomic_inc(atomic_t *v) | ||
92 | { | ||
93 | asm volatile(LOCK_PREFIX "incl %0" | ||
94 | : "=m" (v->counter) | ||
95 | : "m" (v->counter)); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_dec - decrement atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1. | ||
103 | */ | ||
104 | static inline void atomic_dec(atomic_t *v) | ||
105 | { | ||
106 | asm volatile(LOCK_PREFIX "decl %0" | ||
107 | : "=m" (v->counter) | ||
108 | : "m" (v->counter)); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * atomic_dec_and_test - decrement and test | ||
113 | * @v: pointer of type atomic_t | ||
114 | * | ||
115 | * Atomically decrements @v by 1 and | ||
116 | * returns true if the result is 0, or false for all other | ||
117 | * cases. | ||
118 | */ | ||
119 | static inline int atomic_dec_and_test(atomic_t *v) | ||
120 | { | ||
121 | unsigned char c; | ||
122 | |||
123 | asm volatile(LOCK_PREFIX "decl %0; sete %1" | ||
124 | : "=m" (v->counter), "=qm" (c) | ||
125 | : "m" (v->counter) : "memory"); | ||
126 | return c != 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * atomic_inc_and_test - increment and test | ||
131 | * @v: pointer of type atomic_t | ||
132 | * | ||
133 | * Atomically increments @v by 1 | ||
134 | * and returns true if the result is zero, or false for all | ||
135 | * other cases. | ||
136 | */ | ||
137 | static inline int atomic_inc_and_test(atomic_t *v) | ||
138 | { | ||
139 | unsigned char c; | ||
140 | |||
141 | asm volatile(LOCK_PREFIX "incl %0; sete %1" | ||
142 | : "=m" (v->counter), "=qm" (c) | ||
143 | : "m" (v->counter) : "memory"); | ||
144 | return c != 0; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * atomic_add_negative - add and test if negative | ||
149 | * @i: integer value to add | ||
150 | * @v: pointer of type atomic_t | ||
151 | * | ||
152 | * Atomically adds @i to @v and returns true | ||
153 | * if the result is negative, or false when | ||
154 | * result is greater than or equal to zero. | ||
155 | */ | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
157 | { | ||
158 | unsigned char c; | ||
159 | |||
160 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" | ||
161 | : "=m" (v->counter), "=qm" (c) | ||
162 | : "ir" (i), "m" (v->counter) : "memory"); | ||
163 | return c; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * atomic_add_return - add and return | ||
168 | * @i: integer value to add | ||
169 | * @v: pointer of type atomic_t | ||
170 | * | ||
171 | * Atomically adds @i to @v and returns @i + @v | ||
172 | */ | ||
173 | static inline int atomic_add_return(int i, atomic_t *v) | ||
174 | { | ||
175 | int __i = i; | ||
176 | asm volatile(LOCK_PREFIX "xaddl %0, %1" | ||
177 | : "+r" (i), "+m" (v->counter) | ||
178 | : : "memory"); | ||
179 | return i + __i; | ||
180 | } | ||
181 | |||
182 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
183 | { | ||
184 | return atomic_add_return(-i, v); | ||
185 | } | ||
186 | |||
187 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
188 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
189 | |||
190 | /* The 64-bit atomic type */ | ||
191 | |||
192 | #define ATOMIC64_INIT(i) { (i) } | ||
193 | |||
194 | /** | ||
195 | * atomic64_read - read atomic64 variable | ||
196 | * @v: pointer of type atomic64_t | ||
197 | * | ||
198 | * Atomically reads the value of @v. | ||
199 | * Doesn't imply a read memory barrier. | ||
200 | */ | ||
201 | static inline long atomic64_read(const atomic64_t *v) | ||
202 | { | ||
203 | return v->counter; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * atomic64_set - set atomic64 variable | ||
208 | * @v: pointer to type atomic64_t | ||
209 | * @i: required value | ||
210 | * | ||
211 | * Atomically sets the value of @v to @i. | ||
212 | */ | ||
213 | static inline void atomic64_set(atomic64_t *v, long i) | ||
214 | { | ||
215 | v->counter = i; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * atomic64_add - add integer to atomic64 variable | ||
220 | * @i: integer value to add | ||
221 | * @v: pointer to type atomic64_t | ||
222 | * | ||
223 | * Atomically adds @i to @v. | ||
224 | */ | ||
225 | static inline void atomic64_add(long i, atomic64_t *v) | ||
226 | { | ||
227 | asm volatile(LOCK_PREFIX "addq %1,%0" | ||
228 | : "=m" (v->counter) | ||
229 | : "er" (i), "m" (v->counter)); | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * atomic64_sub - subtract the atomic64 variable | ||
234 | * @i: integer value to subtract | ||
235 | * @v: pointer to type atomic64_t | ||
236 | * | ||
237 | * Atomically subtracts @i from @v. | ||
238 | */ | ||
239 | static inline void atomic64_sub(long i, atomic64_t *v) | ||
240 | { | ||
241 | asm volatile(LOCK_PREFIX "subq %1,%0" | ||
242 | : "=m" (v->counter) | ||
243 | : "er" (i), "m" (v->counter)); | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * atomic64_sub_and_test - subtract value from variable and test result | ||
248 | * @i: integer value to subtract | ||
249 | * @v: pointer to type atomic64_t | ||
250 | * | ||
251 | * Atomically subtracts @i from @v and returns | ||
252 | * true if the result is zero, or false for all | ||
253 | * other cases. | ||
254 | */ | ||
255 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) | ||
256 | { | ||
257 | unsigned char c; | ||
258 | |||
259 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" | ||
260 | : "=m" (v->counter), "=qm" (c) | ||
261 | : "er" (i), "m" (v->counter) : "memory"); | ||
262 | return c; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * atomic64_inc - increment atomic64 variable | ||
267 | * @v: pointer to type atomic64_t | ||
268 | * | ||
269 | * Atomically increments @v by 1. | ||
270 | */ | ||
271 | static inline void atomic64_inc(atomic64_t *v) | ||
272 | { | ||
273 | asm volatile(LOCK_PREFIX "incq %0" | ||
274 | : "=m" (v->counter) | ||
275 | : "m" (v->counter)); | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * atomic64_dec - decrement atomic64 variable | ||
280 | * @v: pointer to type atomic64_t | ||
281 | * | ||
282 | * Atomically decrements @v by 1. | ||
283 | */ | ||
284 | static inline void atomic64_dec(atomic64_t *v) | ||
285 | { | ||
286 | asm volatile(LOCK_PREFIX "decq %0" | ||
287 | : "=m" (v->counter) | ||
288 | : "m" (v->counter)); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * atomic64_dec_and_test - decrement and test | ||
293 | * @v: pointer to type atomic64_t | ||
294 | * | ||
295 | * Atomically decrements @v by 1 and | ||
296 | * returns true if the result is 0, or false for all other | ||
297 | * cases. | ||
298 | */ | ||
299 | static inline int atomic64_dec_and_test(atomic64_t *v) | ||
300 | { | ||
301 | unsigned char c; | ||
302 | |||
303 | asm volatile(LOCK_PREFIX "decq %0; sete %1" | ||
304 | : "=m" (v->counter), "=qm" (c) | ||
305 | : "m" (v->counter) : "memory"); | ||
306 | return c != 0; | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * atomic64_inc_and_test - increment and test | ||
311 | * @v: pointer to type atomic64_t | ||
312 | * | ||
313 | * Atomically increments @v by 1 | ||
314 | * and returns true if the result is zero, or false for all | ||
315 | * other cases. | ||
316 | */ | ||
317 | static inline int atomic64_inc_and_test(atomic64_t *v) | ||
318 | { | ||
319 | unsigned char c; | ||
320 | |||
321 | asm volatile(LOCK_PREFIX "incq %0; sete %1" | ||
322 | : "=m" (v->counter), "=qm" (c) | ||
323 | : "m" (v->counter) : "memory"); | ||
324 | return c != 0; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * atomic64_add_negative - add and test if negative | ||
329 | * @i: integer value to add | ||
330 | * @v: pointer to type atomic64_t | ||
331 | * | ||
332 | * Atomically adds @i to @v and returns true | ||
333 | * if the result is negative, or false when | ||
334 | * result is greater than or equal to zero. | ||
335 | */ | ||
336 | static inline int atomic64_add_negative(long i, atomic64_t *v) | ||
337 | { | ||
338 | unsigned char c; | ||
339 | |||
340 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" | ||
341 | : "=m" (v->counter), "=qm" (c) | ||
342 | : "er" (i), "m" (v->counter) : "memory"); | ||
343 | return c; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * atomic64_add_return - add and return | ||
348 | * @i: integer value to add | ||
349 | * @v: pointer to type atomic64_t | ||
350 | * | ||
351 | * Atomically adds @i to @v and returns @i + @v | ||
352 | */ | ||
353 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
354 | { | ||
355 | long __i = i; | ||
356 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" | ||
357 | : "+r" (i), "+m" (v->counter) | ||
358 | : : "memory"); | ||
359 | return i + __i; | ||
360 | } | ||
361 | |||
362 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
363 | { | ||
364 | return atomic64_add_return(-i, v); | ||
365 | } | ||
366 | |||
367 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | ||
368 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | ||
369 | |||
370 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | ||
371 | { | ||
372 | return cmpxchg(&v->counter, old, new); | ||
373 | } | ||
374 | |||
375 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
376 | { | ||
377 | return xchg(&v->counter, new); | ||
378 | } | ||
379 | |||
380 | static inline long atomic_cmpxchg(atomic_t *v, int old, int new) | ||
381 | { | ||
382 | return cmpxchg(&v->counter, old, new); | ||
383 | } | ||
384 | |||
385 | static inline long atomic_xchg(atomic_t *v, int new) | ||
386 | { | ||
387 | return xchg(&v->counter, new); | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * atomic_add_unless - add unless the number is a given value | ||
392 | * @v: pointer of type atomic_t | ||
393 | * @a: the amount to add to v... | ||
394 | * @u: ...unless v is equal to u. | ||
395 | * | ||
396 | * Atomically adds @a to @v, so long as it was not @u. | ||
397 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
398 | */ | ||
399 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
400 | { | ||
401 | int c, old; | ||
402 | c = atomic_read(v); | ||
403 | for (;;) { | ||
404 | if (unlikely(c == (u))) | ||
405 | break; | ||
406 | old = atomic_cmpxchg((v), c, c + (a)); | ||
407 | if (likely(old == c)) | ||
408 | break; | ||
409 | c = old; | ||
410 | } | ||
411 | return c != (u); | ||
412 | } | ||
413 | |||
414 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
415 | |||
416 | /** | ||
417 | * atomic64_add_unless - add unless the number is a given value | ||
418 | * @v: pointer of type atomic64_t | ||
419 | * @a: the amount to add to v... | ||
420 | * @u: ...unless v is equal to u. | ||
421 | * | ||
422 | * Atomically adds @a to @v, so long as it was not @u. | ||
423 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
424 | */ | ||
425 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
426 | { | ||
427 | long c, old; | ||
428 | c = atomic64_read(v); | ||
429 | for (;;) { | ||
430 | if (unlikely(c == (u))) | ||
431 | break; | ||
432 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
433 | if (likely(old == c)) | ||
434 | break; | ||
435 | c = old; | ||
436 | } | ||
437 | return c != (u); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * atomic_inc_short - increment of a short integer | ||
442 | * @v: pointer to type int | ||
443 | * | ||
444 | * Atomically adds 1 to @v | ||
445 | * Returns the new value of @u | ||
446 | */ | ||
447 | static inline short int atomic_inc_short(short int *v) | ||
448 | { | ||
449 | asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); | ||
450 | return *v; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * atomic_or_long - OR of two long integers | ||
455 | * @v1: pointer to type unsigned long | ||
456 | * @v2: pointer to type unsigned long | ||
457 | * | ||
458 | * Atomically ORs @v1 and @v2 | ||
459 | * Returns the result of the OR | ||
460 | */ | ||
461 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
462 | { | ||
463 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
464 | } | ||
465 | |||
466 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
467 | |||
468 | /* These are x86-specific, used by some header files */ | ||
469 | #define atomic_clear_mask(mask, addr) \ | ||
470 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | ||
471 | : : "r" (~(mask)), "m" (*(addr)) : "memory") | ||
472 | |||
473 | #define atomic_set_mask(mask, addr) \ | ||
474 | asm volatile(LOCK_PREFIX "orl %0,%1" \ | ||
475 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | ||
476 | : "memory") | ||
477 | |||
478 | /* Atomic operations are already serializing on x86 */ | ||
479 | #define smp_mb__before_atomic_dec() barrier() | ||
480 | #define smp_mb__after_atomic_dec() barrier() | ||
481 | #define smp_mb__before_atomic_inc() barrier() | ||
482 | #define smp_mb__after_atomic_inc() barrier() | ||
483 | |||
484 | #include <asm-generic/atomic-long.h> | ||
485 | #endif /* _ASM_X86_ATOMIC_64_H */ | ||
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 14f9890eb495..635f03bb4995 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -118,14 +118,20 @@ enum fixed_addresses { | |||
118 | * 256 temporary boot-time mappings, used by early_ioremap(), | 118 | * 256 temporary boot-time mappings, used by early_ioremap(), |
119 | * before ioremap() is functional. | 119 | * before ioremap() is functional. |
120 | * | 120 | * |
121 | * We round it up to the next 256 pages boundary so that we | 121 | * If necessary we round it up to the next 256 pages boundary so |
122 | * can have a single pgd entry and a single pte table: | 122 | * that we can have a single pgd entry and a single pte table: |
123 | */ | 123 | */ |
124 | #define NR_FIX_BTMAPS 64 | 124 | #define NR_FIX_BTMAPS 64 |
125 | #define FIX_BTMAPS_SLOTS 4 | 125 | #define FIX_BTMAPS_SLOTS 4 |
126 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | 126 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
127 | (__end_of_permanent_fixed_addresses & 255), | 127 | FIX_BTMAP_END = |
128 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | 128 | (__end_of_permanent_fixed_addresses ^ |
129 | (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) & | ||
130 | -PTRS_PER_PTE | ||
131 | ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - | ||
132 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) | ||
133 | : __end_of_permanent_fixed_addresses, | ||
134 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | ||
129 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 135 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
130 | FIX_OHCI1394_BASE, | 136 | FIX_OHCI1394_BASE, |
131 | #endif | 137 | #endif |
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index ca7517d33776..413620024768 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -105,7 +105,7 @@ do { \ | |||
105 | static inline void __down_read(struct rw_semaphore *sem) | 105 | static inline void __down_read(struct rw_semaphore *sem) |
106 | { | 106 | { |
107 | asm volatile("# beginning down_read\n\t" | 107 | asm volatile("# beginning down_read\n\t" |
108 | LOCK_PREFIX " incl (%%eax)\n\t" | 108 | LOCK_PREFIX " inc%z0 (%1)\n\t" |
109 | /* adds 0x00000001, returns the old value */ | 109 | /* adds 0x00000001, returns the old value */ |
110 | " jns 1f\n" | 110 | " jns 1f\n" |
111 | " call call_rwsem_down_read_failed\n" | 111 | " call call_rwsem_down_read_failed\n" |
@@ -123,12 +123,12 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
123 | { | 123 | { |
124 | __s32 result, tmp; | 124 | __s32 result, tmp; |
125 | asm volatile("# beginning __down_read_trylock\n\t" | 125 | asm volatile("# beginning __down_read_trylock\n\t" |
126 | " movl %0,%1\n\t" | 126 | " mov %0,%1\n\t" |
127 | "1:\n\t" | 127 | "1:\n\t" |
128 | " movl %1,%2\n\t" | 128 | " mov %1,%2\n\t" |
129 | " addl %3,%2\n\t" | 129 | " add %3,%2\n\t" |
130 | " jle 2f\n\t" | 130 | " jle 2f\n\t" |
131 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 131 | LOCK_PREFIX " cmpxchg %2,%0\n\t" |
132 | " jnz 1b\n\t" | 132 | " jnz 1b\n\t" |
133 | "2:\n\t" | 133 | "2:\n\t" |
134 | "# ending __down_read_trylock\n\t" | 134 | "# ending __down_read_trylock\n\t" |
@@ -147,9 +147,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
147 | 147 | ||
148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
149 | asm volatile("# beginning down_write\n\t" | 149 | asm volatile("# beginning down_write\n\t" |
150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 150 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
151 | /* subtract 0x0000ffff, returns the old value */ | 151 | /* subtract 0x0000ffff, returns the old value */ |
152 | " testl %%edx,%%edx\n\t" | 152 | " test %1,%1\n\t" |
153 | /* was the count 0 before? */ | 153 | /* was the count 0 before? */ |
154 | " jz 1f\n" | 154 | " jz 1f\n" |
155 | " call call_rwsem_down_write_failed\n" | 155 | " call call_rwsem_down_write_failed\n" |
@@ -185,7 +185,7 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
185 | { | 185 | { |
186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; |
187 | asm volatile("# beginning __up_read\n\t" | 187 | asm volatile("# beginning __up_read\n\t" |
188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" | 188 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
189 | /* subtracts 1, returns the old value */ | 189 | /* subtracts 1, returns the old value */ |
190 | " jns 1f\n\t" | 190 | " jns 1f\n\t" |
191 | " call call_rwsem_wake\n" | 191 | " call call_rwsem_wake\n" |
@@ -201,18 +201,18 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
201 | */ | 201 | */ |
202 | static inline void __up_write(struct rw_semaphore *sem) | 202 | static inline void __up_write(struct rw_semaphore *sem) |
203 | { | 203 | { |
204 | unsigned long tmp; | ||
204 | asm volatile("# beginning __up_write\n\t" | 205 | asm volatile("# beginning __up_write\n\t" |
205 | " movl %2,%%edx\n\t" | 206 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" | ||
207 | /* tries to transition | 207 | /* tries to transition |
208 | 0xffff0001 -> 0x00000000 */ | 208 | 0xffff0001 -> 0x00000000 */ |
209 | " jz 1f\n" | 209 | " jz 1f\n" |
210 | " call call_rwsem_wake\n" | 210 | " call call_rwsem_wake\n" |
211 | "1:\n\t" | 211 | "1:\n\t" |
212 | "# ending __up_write\n" | 212 | "# ending __up_write\n" |
213 | : "+m" (sem->count) | 213 | : "+m" (sem->count), "=d" (tmp) |
214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 214 | : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | 215 | : "memory", "cc"); |
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | /* |
@@ -221,7 +221,7 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
221 | static inline void __downgrade_write(struct rw_semaphore *sem) | 221 | static inline void __downgrade_write(struct rw_semaphore *sem) |
222 | { | 222 | { |
223 | asm volatile("# beginning __downgrade_write\n\t" | 223 | asm volatile("# beginning __downgrade_write\n\t" |
224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" | 224 | LOCK_PREFIX " add%z0 %2,(%1)\n\t" |
225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ |
226 | " jns 1f\n\t" | 226 | " jns 1f\n\t" |
227 | " call call_rwsem_downgrade_wake\n" | 227 | " call call_rwsem_downgrade_wake\n" |
@@ -237,7 +237,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
237 | */ | 237 | */ |
238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) |
239 | { | 239 | { |
240 | asm volatile(LOCK_PREFIX "addl %1,%0" | 240 | asm volatile(LOCK_PREFIX "add%z0 %1,%0" |
241 | : "+m" (sem->count) | 241 | : "+m" (sem->count) |
242 | : "ir" (delta)); | 242 | : "ir" (delta)); |
243 | } | 243 | } |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 535e421498f6..316708d5af92 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
10 | #include <linux/lockdep.h> | 10 | #include <linux/lockdep.h> |
11 | #include <asm/alternative.h> | ||
12 | #include <asm/cpufeature.h> | ||
11 | #include <asm/page.h> | 13 | #include <asm/page.h> |
12 | 14 | ||
13 | /* | 15 | /* |
@@ -16,7 +18,24 @@ | |||
16 | 18 | ||
17 | /* Handles exceptions in both to and from, but doesn't do access_ok */ | 19 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
18 | __must_check unsigned long | 20 | __must_check unsigned long |
19 | copy_user_generic(void *to, const void *from, unsigned len); | 21 | copy_user_generic_string(void *to, const void *from, unsigned len); |
22 | __must_check unsigned long | ||
23 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); | ||
24 | |||
25 | static __always_inline __must_check unsigned long | ||
26 | copy_user_generic(void *to, const void *from, unsigned len) | ||
27 | { | ||
28 | unsigned ret; | ||
29 | |||
30 | alternative_call(copy_user_generic_unrolled, | ||
31 | copy_user_generic_string, | ||
32 | X86_FEATURE_REP_GOOD, | ||
33 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), | ||
34 | "=d" (len)), | ||
35 | "1" (to), "2" (from), "3" (len) | ||
36 | : "memory", "rcx", "r8", "r9", "r10", "r11"); | ||
37 | return ret; | ||
38 | } | ||
20 | 39 | ||
21 | __must_check unsigned long | 40 | __must_check unsigned long |
22 | _copy_to_user(void __user *to, const void *from, unsigned len); | 41 | _copy_to_user(void __user *to, const void *from, unsigned len); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index e63b80e5861c..e6ea0342c8f8 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -205,7 +205,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, | |||
205 | struct alt_instr *end) | 205 | struct alt_instr *end) |
206 | { | 206 | { |
207 | struct alt_instr *a; | 207 | struct alt_instr *a; |
208 | char insnbuf[MAX_PATCH_LEN]; | 208 | u8 insnbuf[MAX_PATCH_LEN]; |
209 | 209 | ||
210 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); | 210 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); |
211 | for (a = start; a < end; a++) { | 211 | for (a = start; a < end; a++) { |
@@ -223,6 +223,8 @@ void __init_or_module apply_alternatives(struct alt_instr *start, | |||
223 | } | 223 | } |
224 | #endif | 224 | #endif |
225 | memcpy(insnbuf, a->replacement, a->replacementlen); | 225 | memcpy(insnbuf, a->replacement, a->replacementlen); |
226 | if (*insnbuf == 0xe8 && a->replacementlen == 5) | ||
227 | *(s32 *)(insnbuf + 1) += a->replacement - a->instr; | ||
226 | add_nops(insnbuf + a->replacementlen, | 228 | add_nops(insnbuf + a->replacementlen, |
227 | a->instrlen - a->replacementlen); | 229 | a->instrlen - a->replacementlen); |
228 | text_poke_early(instr, insnbuf, a->instrlen); | 230 | text_poke_early(instr, insnbuf, a->instrlen); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fc6c8ef92dcc..c2b722d5a722 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -31,6 +31,8 @@ struct _cache_table { | |||
31 | short size; | 31 | short size; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | #define MB(x) ((x) * 1024) | ||
35 | |||
34 | /* All the cache descriptor types we care about (no TLB or | 36 | /* All the cache descriptor types we care about (no TLB or |
35 | trace cache entries) */ | 37 | trace cache entries) */ |
36 | 38 | ||
@@ -44,9 +46,9 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
44 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ | 46 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ |
45 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ | 47 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ |
46 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 48 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
47 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 49 | { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
48 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 50 | { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
49 | { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 51 | { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
50 | { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ | 52 | { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ |
51 | { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ | 53 | { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ |
52 | { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 54 | { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
@@ -59,16 +61,16 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
59 | { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ | 61 | { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ |
60 | { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ | 62 | { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ |
61 | { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ | 63 | { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ |
62 | { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ | 64 | { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ |
63 | { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ | 65 | { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ |
64 | { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */ | 66 | { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ |
65 | { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */ | 67 | { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ |
66 | { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | 68 | { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ |
67 | { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */ | 69 | { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ |
68 | { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | 70 | { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ |
69 | { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */ | 71 | { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ |
70 | { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */ | 72 | { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ |
71 | { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */ | 73 | { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ |
72 | { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 74 | { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
73 | { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 75 | { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
74 | { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 76 | { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
@@ -77,34 +79,34 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
77 | { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ | 79 | { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ |
78 | { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ | 80 | { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ |
79 | { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ | 81 | { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ |
80 | { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ | 82 | { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ |
81 | { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 83 | { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
82 | { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 84 | { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
83 | { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 85 | { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
84 | { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 86 | { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
85 | { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */ | 87 | { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ |
86 | { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ | 88 | { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ |
87 | { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ | 89 | { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ |
88 | { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ | 90 | { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ |
89 | { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */ | 91 | { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ |
90 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ | 92 | { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ |
91 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ | 93 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ |
92 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ | 94 | { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ |
93 | { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ | 95 | { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ |
94 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ | 96 | { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ |
95 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ | 97 | { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ |
96 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ | 98 | { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ |
97 | { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */ | 99 | { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ |
98 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | 100 | { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ |
99 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ | 101 | { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ |
100 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | 102 | { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ |
101 | { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */ | 103 | { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ |
102 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ | 104 | { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ |
103 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | 105 | { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ |
104 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | 106 | { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ |
105 | { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */ | 107 | { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ |
106 | { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */ | 108 | { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ |
107 | { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */ | 109 | { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ |
108 | { 0x00, 0, 0} | 110 | { 0x00, 0, 0} |
109 | }; | 111 | }; |
110 | 112 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c9b3522b6b46..02d678065d7d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -92,6 +92,13 @@ void exit_thread(void) | |||
92 | } | 92 | } |
93 | } | 93 | } |
94 | 94 | ||
95 | void show_regs(struct pt_regs *regs) | ||
96 | { | ||
97 | show_registers(regs); | ||
98 | show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), | ||
99 | regs->bp); | ||
100 | } | ||
101 | |||
95 | void show_regs_common(void) | 102 | void show_regs_common(void) |
96 | { | 103 | { |
97 | const char *board, *product; | 104 | const char *board, *product; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 37ad1e046aae..f6c62667e30c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -174,12 +174,6 @@ void __show_regs(struct pt_regs *regs, int all) | |||
174 | d6, d7); | 174 | d6, d7); |
175 | } | 175 | } |
176 | 176 | ||
177 | void show_regs(struct pt_regs *regs) | ||
178 | { | ||
179 | show_registers(regs); | ||
180 | show_trace(NULL, regs, ®s->sp, regs->bp); | ||
181 | } | ||
182 | |||
183 | void release_thread(struct task_struct *dead_task) | 177 | void release_thread(struct task_struct *dead_task) |
184 | { | 178 | { |
185 | BUG_ON(dead_task->mm); | 179 | BUG_ON(dead_task->mm); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 126f0b493d04..dc9690b4c4cc 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -211,12 +211,6 @@ void __show_regs(struct pt_regs *regs, int all) | |||
211 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 211 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); |
212 | } | 212 | } |
213 | 213 | ||
214 | void show_regs(struct pt_regs *regs) | ||
215 | { | ||
216 | show_registers(regs); | ||
217 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | ||
218 | } | ||
219 | |||
220 | void release_thread(struct task_struct *dead_task) | 214 | void release_thread(struct task_struct *dead_task) |
221 | { | 215 | { |
222 | if (dead_task->mm) { | 216 | if (dead_task->mm) { |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 619f7f88b8cc..693920b22496 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -26,7 +26,8 @@ EXPORT_SYMBOL(__put_user_2); | |||
26 | EXPORT_SYMBOL(__put_user_4); | 26 | EXPORT_SYMBOL(__put_user_4); |
27 | EXPORT_SYMBOL(__put_user_8); | 27 | EXPORT_SYMBOL(__put_user_8); |
28 | 28 | ||
29 | EXPORT_SYMBOL(copy_user_generic); | 29 | EXPORT_SYMBOL(copy_user_generic_string); |
30 | EXPORT_SYMBOL(copy_user_generic_unrolled); | ||
30 | EXPORT_SYMBOL(__copy_user_nocache); | 31 | EXPORT_SYMBOL(__copy_user_nocache); |
31 | EXPORT_SYMBOL(_copy_from_user); | 32 | EXPORT_SYMBOL(_copy_from_user); |
32 | EXPORT_SYMBOL(_copy_to_user); | 33 | EXPORT_SYMBOL(_copy_to_user); |
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index cf889d4e076a..71100c98e337 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S | |||
@@ -90,12 +90,6 @@ ENTRY(_copy_from_user) | |||
90 | CFI_ENDPROC | 90 | CFI_ENDPROC |
91 | ENDPROC(_copy_from_user) | 91 | ENDPROC(_copy_from_user) |
92 | 92 | ||
93 | ENTRY(copy_user_generic) | ||
94 | CFI_STARTPROC | ||
95 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string | ||
96 | CFI_ENDPROC | ||
97 | ENDPROC(copy_user_generic) | ||
98 | |||
99 | .section .fixup,"ax" | 93 | .section .fixup,"ax" |
100 | /* must zero dest */ | 94 | /* must zero dest */ |
101 | ENTRY(bad_from_user) | 95 | ENTRY(bad_from_user) |
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index ad5441ed1b57..f82e884928af 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S | |||
@@ -20,12 +20,11 @@ | |||
20 | /* | 20 | /* |
21 | * memcpy_c() - fast string ops (REP MOVSQ) based variant. | 21 | * memcpy_c() - fast string ops (REP MOVSQ) based variant. |
22 | * | 22 | * |
23 | * Calls to this get patched into the kernel image via the | 23 | * This gets patched over the unrolled variant (below) via the |
24 | * alternative instructions framework: | 24 | * alternative instructions framework: |
25 | */ | 25 | */ |
26 | ALIGN | 26 | .section .altinstr_replacement, "ax", @progbits |
27 | memcpy_c: | 27 | .Lmemcpy_c: |
28 | CFI_STARTPROC | ||
29 | movq %rdi, %rax | 28 | movq %rdi, %rax |
30 | 29 | ||
31 | movl %edx, %ecx | 30 | movl %edx, %ecx |
@@ -35,8 +34,8 @@ memcpy_c: | |||
35 | movl %edx, %ecx | 34 | movl %edx, %ecx |
36 | rep movsb | 35 | rep movsb |
37 | ret | 36 | ret |
38 | CFI_ENDPROC | 37 | .Lmemcpy_e: |
39 | ENDPROC(memcpy_c) | 38 | .previous |
40 | 39 | ||
41 | ENTRY(__memcpy) | 40 | ENTRY(__memcpy) |
42 | ENTRY(memcpy) | 41 | ENTRY(memcpy) |
@@ -128,16 +127,10 @@ ENDPROC(__memcpy) | |||
128 | * It is also a lot simpler. Use this when possible: | 127 | * It is also a lot simpler. Use this when possible: |
129 | */ | 128 | */ |
130 | 129 | ||
131 | .section .altinstr_replacement, "ax" | ||
132 | 1: .byte 0xeb /* jmp <disp8> */ | ||
133 | .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */ | ||
134 | 2: | ||
135 | .previous | ||
136 | |||
137 | .section .altinstructions, "a" | 130 | .section .altinstructions, "a" |
138 | .align 8 | 131 | .align 8 |
139 | .quad memcpy | 132 | .quad memcpy |
140 | .quad 1b | 133 | .quad .Lmemcpy_c |
141 | .byte X86_FEATURE_REP_GOOD | 134 | .byte X86_FEATURE_REP_GOOD |
142 | 135 | ||
143 | /* | 136 | /* |
@@ -145,6 +138,6 @@ ENDPROC(__memcpy) | |||
145 | * so it is silly to overwrite itself with nops - reboot is the | 138 | * so it is silly to overwrite itself with nops - reboot is the |
146 | * only outcome... | 139 | * only outcome... |
147 | */ | 140 | */ |
148 | .byte 2b - 1b | 141 | .byte .Lmemcpy_e - .Lmemcpy_c |
149 | .byte 2b - 1b | 142 | .byte .Lmemcpy_e - .Lmemcpy_c |
150 | .previous | 143 | .previous |
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 2c5948116bd2..e88d3b81644a 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S | |||
@@ -12,9 +12,8 @@ | |||
12 | * | 12 | * |
13 | * rax original destination | 13 | * rax original destination |
14 | */ | 14 | */ |
15 | ALIGN | 15 | .section .altinstr_replacement, "ax", @progbits |
16 | memset_c: | 16 | .Lmemset_c: |
17 | CFI_STARTPROC | ||
18 | movq %rdi,%r9 | 17 | movq %rdi,%r9 |
19 | movl %edx,%r8d | 18 | movl %edx,%r8d |
20 | andl $7,%r8d | 19 | andl $7,%r8d |
@@ -29,8 +28,8 @@ memset_c: | |||
29 | rep stosb | 28 | rep stosb |
30 | movq %r9,%rax | 29 | movq %r9,%rax |
31 | ret | 30 | ret |
32 | CFI_ENDPROC | 31 | .Lmemset_e: |
33 | ENDPROC(memset_c) | 32 | .previous |
34 | 33 | ||
35 | ENTRY(memset) | 34 | ENTRY(memset) |
36 | ENTRY(__memset) | 35 | ENTRY(__memset) |
@@ -118,16 +117,11 @@ ENDPROC(__memset) | |||
118 | 117 | ||
119 | #include <asm/cpufeature.h> | 118 | #include <asm/cpufeature.h> |
120 | 119 | ||
121 | .section .altinstr_replacement,"ax" | ||
122 | 1: .byte 0xeb /* jmp <disp8> */ | ||
123 | .byte (memset_c - memset) - (2f - 1b) /* offset */ | ||
124 | 2: | ||
125 | .previous | ||
126 | .section .altinstructions,"a" | 120 | .section .altinstructions,"a" |
127 | .align 8 | 121 | .align 8 |
128 | .quad memset | 122 | .quad memset |
129 | .quad 1b | 123 | .quad .Lmemset_c |
130 | .byte X86_FEATURE_REP_GOOD | 124 | .byte X86_FEATURE_REP_GOOD |
131 | .byte .Lfinal - memset | 125 | .byte .Lfinal - memset |
132 | .byte 2b - 1b | 126 | .byte .Lmemset_e - .Lmemset_c |
133 | .previous | 127 | .previous |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index c246d259822d..03c75ffd5c2a 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -422,6 +422,10 @@ void __init early_ioremap_init(void) | |||
422 | * The boot-ioremap range spans multiple pmds, for which | 422 | * The boot-ioremap range spans multiple pmds, for which |
423 | * we are not prepared: | 423 | * we are not prepared: |
424 | */ | 424 | */ |
425 | #define __FIXADDR_TOP (-PAGE_SIZE) | ||
426 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | ||
427 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | ||
428 | #undef __FIXADDR_TOP | ||
425 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { | 429 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
426 | WARN_ON(1); | 430 | WARN_ON(1); |
427 | printk(KERN_WARNING "pmd %p != %p\n", | 431 | printk(KERN_WARNING "pmd %p != %p\n", |
diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb129..63ee4eb1228d 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -11,11 +11,18 @@ | |||
11 | 11 | ||
12 | unsigned int hweight32(unsigned int w) | 12 | unsigned int hweight32(unsigned int w) |
13 | { | 13 | { |
14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | ||
15 | w -= (w >> 1) & 0x55555555; | ||
16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | ||
17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | ||
18 | return (w * 0x01010101) >> 24; | ||
19 | #else | ||
14 | unsigned int res = w - ((w >> 1) & 0x55555555); | 20 | unsigned int res = w - ((w >> 1) & 0x55555555); |
15 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | 21 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
16 | res = (res + (res >> 4)) & 0x0F0F0F0F; | 22 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
17 | res = res + (res >> 8); | 23 | res = res + (res >> 8); |
18 | return (res + (res >> 16)) & 0x000000FF; | 24 | return (res + (res >> 16)) & 0x000000FF; |
25 | #endif | ||
19 | } | 26 | } |
20 | EXPORT_SYMBOL(hweight32); | 27 | EXPORT_SYMBOL(hweight32); |
21 | 28 | ||