diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2011-08-18 14:48:06 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-08-29 16:42:10 -0400 |
commit | e9826380d83d1bda3ee5663bf3fa4667a6fbe60a (patch) | |
tree | f7de160fc158e87f8bd78d67a4f0c43bf4356b4c | |
parent | 00a41546e8008b9944382eed1841c785f4fc8d9c (diff) |
x86, cmpxchg: Unify cmpxchg into cmpxchg.h
Everything that's actually common between 32 and 64-bit is moved into
cmpxchg.h.
xchg/cmpxchg will fail with a link error if they're passed an
unsupported size (which includes 64-bit args on 32-bit systems).
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/include/asm/cmpxchg.h | 155 | ||||
-rw-r--r-- | arch/x86/include/asm/cmpxchg_32.h | 113 | ||||
-rw-r--r-- | arch/x86/include/asm/cmpxchg_64.h | 131 |
3 files changed, 155 insertions, 244 deletions
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index a460fa088d4c..efe3ec778a58 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -1,5 +1,160 @@ | |||
1 | #ifndef ASM_X86_CMPXCHG_H | ||
2 | #define ASM_X86_CMPXCHG_H | ||
3 | |||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | ||
5 | |||
6 | /* Non-existant functions to indicate usage errors at link time. */ | ||
7 | extern void __xchg_wrong_size(void); | ||
8 | extern void __cmpxchg_wrong_size(void); | ||
9 | |||
10 | /* | ||
11 | * Constants for operation sizes. On 32-bit, the 64-bit size it set to | ||
12 | * -1 because sizeof will never return -1, thereby making those switch | ||
13 | * case statements guaranteeed dead code which the compiler will | ||
14 | * eliminate, and allowing the "missing symbol in the default case" to | ||
15 | * indicate a usage error. | ||
16 | */ | ||
17 | #define __X86_CASE_B 1 | ||
18 | #define __X86_CASE_W 2 | ||
19 | #define __X86_CASE_L 4 | ||
20 | #ifdef CONFIG_64BIT | ||
21 | #define __X86_CASE_Q 8 | ||
22 | #else | ||
23 | #define __X86_CASE_Q -1 /* sizeof will never return -1 */ | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. | ||
28 | * Since this is generally used to protect other memory information, we | ||
29 | * use "asm volatile" and "memory" clobbers to prevent gcc from moving | ||
30 | * information around. | ||
31 | */ | ||
32 | #define __xchg(x, ptr, size) \ | ||
33 | ({ \ | ||
34 | __typeof(*(ptr)) __x = (x); \ | ||
35 | switch (size) { \ | ||
36 | case __X86_CASE_B: \ | ||
37 | { \ | ||
38 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | ||
39 | asm volatile("xchgb %0,%1" \ | ||
40 | : "=q" (__x), "+m" (*__ptr) \ | ||
41 | : "0" (__x) \ | ||
42 | : "memory"); \ | ||
43 | break; \ | ||
44 | } \ | ||
45 | case __X86_CASE_W: \ | ||
46 | { \ | ||
47 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | ||
48 | asm volatile("xchgw %0,%1" \ | ||
49 | : "=r" (__x), "+m" (*__ptr) \ | ||
50 | : "0" (__x) \ | ||
51 | : "memory"); \ | ||
52 | break; \ | ||
53 | } \ | ||
54 | case __X86_CASE_L: \ | ||
55 | { \ | ||
56 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | ||
57 | asm volatile("xchgl %0,%1" \ | ||
58 | : "=r" (__x), "+m" (*__ptr) \ | ||
59 | : "0" (__x) \ | ||
60 | : "memory"); \ | ||
61 | break; \ | ||
62 | } \ | ||
63 | case __X86_CASE_Q: \ | ||
64 | { \ | ||
65 | volatile u64 *__ptr = (volatile u64 *)(ptr); \ | ||
66 | asm volatile("xchgq %0,%1" \ | ||
67 | : "=r" (__x), "+m" (*__ptr) \ | ||
68 | : "0" (__x) \ | ||
69 | : "memory"); \ | ||
70 | break; \ | ||
71 | } \ | ||
72 | default: \ | ||
73 | __xchg_wrong_size(); \ | ||
74 | } \ | ||
75 | __x; \ | ||
76 | }) | ||
77 | |||
78 | #define xchg(ptr, v) \ | ||
79 | __xchg((v), (ptr), sizeof(*ptr)) | ||
80 | |||
81 | /* | ||
82 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
83 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
84 | * indicated by comparing RETURN with OLD. | ||
85 | */ | ||
86 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
87 | ({ \ | ||
88 | __typeof__(*(ptr)) __ret; \ | ||
89 | __typeof__(*(ptr)) __old = (old); \ | ||
90 | __typeof__(*(ptr)) __new = (new); \ | ||
91 | switch (size) { \ | ||
92 | case __X86_CASE_B: \ | ||
93 | { \ | ||
94 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | ||
95 | asm volatile(lock "cmpxchgb %2,%1" \ | ||
96 | : "=a" (__ret), "+m" (*__ptr) \ | ||
97 | : "q" (__new), "0" (__old) \ | ||
98 | : "memory"); \ | ||
99 | break; \ | ||
100 | } \ | ||
101 | case __X86_CASE_W: \ | ||
102 | { \ | ||
103 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | ||
104 | asm volatile(lock "cmpxchgw %2,%1" \ | ||
105 | : "=a" (__ret), "+m" (*__ptr) \ | ||
106 | : "r" (__new), "0" (__old) \ | ||
107 | : "memory"); \ | ||
108 | break; \ | ||
109 | } \ | ||
110 | case __X86_CASE_L: \ | ||
111 | { \ | ||
112 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | ||
113 | asm volatile(lock "cmpxchgl %2,%1" \ | ||
114 | : "=a" (__ret), "+m" (*__ptr) \ | ||
115 | : "r" (__new), "0" (__old) \ | ||
116 | : "memory"); \ | ||
117 | break; \ | ||
118 | } \ | ||
119 | case __X86_CASE_Q: \ | ||
120 | { \ | ||
121 | volatile u64 *__ptr = (volatile u64 *)(ptr); \ | ||
122 | asm volatile(lock "cmpxchgq %2,%1" \ | ||
123 | : "=a" (__ret), "+m" (*__ptr) \ | ||
124 | : "r" (__new), "0" (__old) \ | ||
125 | : "memory"); \ | ||
126 | break; \ | ||
127 | } \ | ||
128 | default: \ | ||
129 | __cmpxchg_wrong_size(); \ | ||
130 | } \ | ||
131 | __ret; \ | ||
132 | }) | ||
133 | |||
134 | #define __cmpxchg(ptr, old, new, size) \ | ||
135 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
136 | |||
137 | #define __sync_cmpxchg(ptr, old, new, size) \ | ||
138 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||
139 | |||
140 | #define __cmpxchg_local(ptr, old, new, size) \ | ||
141 | __raw_cmpxchg((ptr), (old), (new), (size), "") | ||
142 | |||
1 | #ifdef CONFIG_X86_32 | 143 | #ifdef CONFIG_X86_32 |
2 | # include "cmpxchg_32.h" | 144 | # include "cmpxchg_32.h" |
3 | #else | 145 | #else |
4 | # include "cmpxchg_64.h" | 146 | # include "cmpxchg_64.h" |
5 | #endif | 147 | #endif |
148 | |||
149 | #ifdef __HAVE_ARCH_CMPXCHG | ||
150 | #define cmpxchg(ptr, old, new) \ | ||
151 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||
152 | |||
153 | #define sync_cmpxchg(ptr, old, new) \ | ||
154 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||
155 | |||
156 | #define cmpxchg_local(ptr, old, new) \ | ||
157 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | ||
158 | #endif | ||
159 | |||
160 | #endif /* ASM_X86_CMPXCHG_H */ | ||
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 59d8e36d1eed..fbebb07dd80b 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -1,62 +1,11 @@ | |||
1 | #ifndef _ASM_X86_CMPXCHG_32_H | 1 | #ifndef _ASM_X86_CMPXCHG_32_H |
2 | #define _ASM_X86_CMPXCHG_32_H | 2 | #define _ASM_X86_CMPXCHG_32_H |
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | ||
5 | |||
6 | /* | 4 | /* |
7 | * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you | 5 | * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
8 | * you need to test for the feature in boot_cpu_data. | 6 | * you need to test for the feature in boot_cpu_data. |
9 | */ | 7 | */ |
10 | 8 | ||
11 | extern void __xchg_wrong_size(void); | ||
12 | extern void __cmpxchg_wrong_size(void); | ||
13 | |||
14 | /* | ||
15 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. | ||
16 | * Since this is generally used to protect other memory information, we | ||
17 | * use "asm volatile" and "memory" clobbers to prevent gcc from moving | ||
18 | * information around. | ||
19 | */ | ||
20 | #define __xchg(x, ptr, size) \ | ||
21 | ({ \ | ||
22 | __typeof(*(ptr)) __x = (x); \ | ||
23 | switch (size) { \ | ||
24 | case 1: \ | ||
25 | { \ | ||
26 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | ||
27 | asm volatile("xchgb %0,%1" \ | ||
28 | : "=q" (__x), "+m" (*__ptr) \ | ||
29 | : "0" (__x) \ | ||
30 | : "memory"); \ | ||
31 | break; \ | ||
32 | } \ | ||
33 | case 2: \ | ||
34 | { \ | ||
35 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | ||
36 | asm volatile("xchgw %0,%1" \ | ||
37 | : "=r" (__x), "+m" (*__ptr) \ | ||
38 | : "0" (__x) \ | ||
39 | : "memory"); \ | ||
40 | break; \ | ||
41 | } \ | ||
42 | case 4: \ | ||
43 | { \ | ||
44 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | ||
45 | asm volatile("xchgl %0,%1" \ | ||
46 | : "=r" (__x), "+m" (*__ptr) \ | ||
47 | : "0" (__x) \ | ||
48 | : "memory"); \ | ||
49 | break; \ | ||
50 | } \ | ||
51 | default: \ | ||
52 | __xchg_wrong_size(); \ | ||
53 | } \ | ||
54 | __x; \ | ||
55 | }) | ||
56 | |||
57 | #define xchg(ptr, v) \ | ||
58 | __xchg((v), (ptr), sizeof(*ptr)) | ||
59 | |||
60 | /* | 9 | /* |
61 | * CMPXCHG8B only writes to the target if we had the previous | 10 | * CMPXCHG8B only writes to the target if we had the previous |
62 | * value in registers, otherwise it acts as a read and gives us the | 11 | * value in registers, otherwise it acts as a read and gives us the |
@@ -85,70 +34,8 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) | |||
85 | : "memory"); | 34 | : "memory"); |
86 | } | 35 | } |
87 | 36 | ||
88 | /* | ||
89 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
90 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
91 | * indicated by comparing RETURN with OLD. | ||
92 | */ | ||
93 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
94 | ({ \ | ||
95 | __typeof__(*(ptr)) __ret; \ | ||
96 | __typeof__(*(ptr)) __old = (old); \ | ||
97 | __typeof__(*(ptr)) __new = (new); \ | ||
98 | switch (size) { \ | ||
99 | case 1: \ | ||
100 | { \ | ||
101 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | ||
102 | asm volatile(lock "cmpxchgb %2,%1" \ | ||
103 | : "=a" (__ret), "+m" (*__ptr) \ | ||
104 | : "q" (__new), "0" (__old) \ | ||
105 | : "memory"); \ | ||
106 | break; \ | ||
107 | } \ | ||
108 | case 2: \ | ||
109 | { \ | ||
110 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | ||
111 | asm volatile(lock "cmpxchgw %2,%1" \ | ||
112 | : "=a" (__ret), "+m" (*__ptr) \ | ||
113 | : "r" (__new), "0" (__old) \ | ||
114 | : "memory"); \ | ||
115 | break; \ | ||
116 | } \ | ||
117 | case 4: \ | ||
118 | { \ | ||
119 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | ||
120 | asm volatile(lock "cmpxchgl %2,%1" \ | ||
121 | : "=a" (__ret), "+m" (*__ptr) \ | ||
122 | : "r" (__new), "0" (__old) \ | ||
123 | : "memory"); \ | ||
124 | break; \ | ||
125 | } \ | ||
126 | default: \ | ||
127 | __cmpxchg_wrong_size(); \ | ||
128 | } \ | ||
129 | __ret; \ | ||
130 | }) | ||
131 | |||
132 | #define __cmpxchg(ptr, old, new, size) \ | ||
133 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
134 | |||
135 | #define __sync_cmpxchg(ptr, old, new, size) \ | ||
136 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||
137 | |||
138 | #define __cmpxchg_local(ptr, old, new, size) \ | ||
139 | __raw_cmpxchg((ptr), (old), (new), (size), "") | ||
140 | |||
141 | #ifdef CONFIG_X86_CMPXCHG | 37 | #ifdef CONFIG_X86_CMPXCHG |
142 | #define __HAVE_ARCH_CMPXCHG 1 | 38 | #define __HAVE_ARCH_CMPXCHG 1 |
143 | |||
144 | #define cmpxchg(ptr, old, new) \ | ||
145 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||
146 | |||
147 | #define sync_cmpxchg(ptr, old, new) \ | ||
148 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||
149 | |||
150 | #define cmpxchg_local(ptr, old, new) \ | ||
151 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | ||
152 | #endif | 39 | #endif |
153 | 40 | ||
154 | #ifdef CONFIG_X86_CMPXCHG64 | 41 | #ifdef CONFIG_X86_CMPXCHG64 |
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 5bfa560ef14e..285da02c38fa 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h | |||
@@ -1,66 +1,6 @@ | |||
1 | #ifndef _ASM_X86_CMPXCHG_64_H | 1 | #ifndef _ASM_X86_CMPXCHG_64_H |
2 | #define _ASM_X86_CMPXCHG_64_H | 2 | #define _ASM_X86_CMPXCHG_64_H |
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | ||
5 | |||
6 | extern void __xchg_wrong_size(void); | ||
7 | extern void __cmpxchg_wrong_size(void); | ||
8 | |||
9 | /* | ||
10 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. | ||
11 | * Since this is generally used to protect other memory information, we | ||
12 | * use "asm volatile" and "memory" clobbers to prevent gcc from moving | ||
13 | * information around. | ||
14 | */ | ||
15 | #define __xchg(x, ptr, size) \ | ||
16 | ({ \ | ||
17 | __typeof(*(ptr)) __x = (x); \ | ||
18 | switch (size) { \ | ||
19 | case 1: \ | ||
20 | { \ | ||
21 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | ||
22 | asm volatile("xchgb %0,%1" \ | ||
23 | : "=q" (__x), "+m" (*__ptr) \ | ||
24 | : "0" (__x) \ | ||
25 | : "memory"); \ | ||
26 | break; \ | ||
27 | } \ | ||
28 | case 2: \ | ||
29 | { \ | ||
30 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | ||
31 | asm volatile("xchgw %0,%1" \ | ||
32 | : "=r" (__x), "+m" (*__ptr) \ | ||
33 | : "0" (__x) \ | ||
34 | : "memory"); \ | ||
35 | break; \ | ||
36 | } \ | ||
37 | case 4: \ | ||
38 | { \ | ||
39 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | ||
40 | asm volatile("xchgl %0,%1" \ | ||
41 | : "=r" (__x), "+m" (*__ptr) \ | ||
42 | : "0" (__x) \ | ||
43 | : "memory"); \ | ||
44 | break; \ | ||
45 | } \ | ||
46 | case 8: \ | ||
47 | { \ | ||
48 | volatile u64 *__ptr = (volatile u64 *)(ptr); \ | ||
49 | asm volatile("xchgq %0,%1" \ | ||
50 | : "=r" (__x), "+m" (*__ptr) \ | ||
51 | : "0" (__x) \ | ||
52 | : "memory"); \ | ||
53 | break; \ | ||
54 | } \ | ||
55 | default: \ | ||
56 | __xchg_wrong_size(); \ | ||
57 | } \ | ||
58 | __x; \ | ||
59 | }) | ||
60 | |||
61 | #define xchg(ptr, v) \ | ||
62 | __xchg((v), (ptr), sizeof(*ptr)) | ||
63 | |||
64 | static inline void set_64bit(volatile u64 *ptr, u64 val) | 4 | static inline void set_64bit(volatile u64 *ptr, u64 val) |
65 | { | 5 | { |
66 | *ptr = val; | 6 | *ptr = val; |
@@ -68,77 +8,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) | |||
68 | 8 | ||
69 | #define __HAVE_ARCH_CMPXCHG 1 | 9 | #define __HAVE_ARCH_CMPXCHG 1 |
70 | 10 | ||
71 | /* | ||
72 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
73 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
74 | * indicated by comparing RETURN with OLD. | ||
75 | */ | ||
76 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
77 | ({ \ | ||
78 | __typeof__(*(ptr)) __ret; \ | ||
79 | __typeof__(*(ptr)) __old = (old); \ | ||
80 | __typeof__(*(ptr)) __new = (new); \ | ||
81 | switch (size) { \ | ||
82 | case 1: \ | ||
83 | { \ | ||
84 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ | ||
85 | asm volatile(lock "cmpxchgb %2,%1" \ | ||
86 | : "=a" (__ret), "+m" (*__ptr) \ | ||
87 | : "q" (__new), "0" (__old) \ | ||
88 | : "memory"); \ | ||
89 | break; \ | ||
90 | } \ | ||
91 | case 2: \ | ||
92 | { \ | ||
93 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ | ||
94 | asm volatile(lock "cmpxchgw %2,%1" \ | ||
95 | : "=a" (__ret), "+m" (*__ptr) \ | ||
96 | : "r" (__new), "0" (__old) \ | ||
97 | : "memory"); \ | ||
98 | break; \ | ||
99 | } \ | ||
100 | case 4: \ | ||
101 | { \ | ||
102 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ | ||
103 | asm volatile(lock "cmpxchgl %2,%1" \ | ||
104 | : "=a" (__ret), "+m" (*__ptr) \ | ||
105 | : "r" (__new), "0" (__old) \ | ||
106 | : "memory"); \ | ||
107 | break; \ | ||
108 | } \ | ||
109 | case 8: \ | ||
110 | { \ | ||
111 | volatile u64 *__ptr = (volatile u64 *)(ptr); \ | ||
112 | asm volatile(lock "cmpxchgq %2,%1" \ | ||
113 | : "=a" (__ret), "+m" (*__ptr) \ | ||
114 | : "r" (__new), "0" (__old) \ | ||
115 | : "memory"); \ | ||
116 | break; \ | ||
117 | } \ | ||
118 | default: \ | ||
119 | __cmpxchg_wrong_size(); \ | ||
120 | } \ | ||
121 | __ret; \ | ||
122 | }) | ||
123 | |||
124 | #define __cmpxchg(ptr, old, new, size) \ | ||
125 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
126 | |||
127 | #define __sync_cmpxchg(ptr, old, new, size) \ | ||
128 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||
129 | |||
130 | #define __cmpxchg_local(ptr, old, new, size) \ | ||
131 | __raw_cmpxchg((ptr), (old), (new), (size), "") | ||
132 | |||
133 | #define cmpxchg(ptr, old, new) \ | ||
134 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||
135 | |||
136 | #define sync_cmpxchg(ptr, old, new) \ | ||
137 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||
138 | |||
139 | #define cmpxchg_local(ptr, old, new) \ | ||
140 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | ||
141 | |||
142 | #define cmpxchg64(ptr, o, n) \ | 11 | #define cmpxchg64(ptr, o, n) \ |
143 | ({ \ | 12 | ({ \ |
144 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 13 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |