diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-03-26 12:45:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 04:42:01 -0400 |
commit | 7bd3e239d6c6d1cad276e8f130b386df4234dcd7 (patch) | |
tree | 260eb35894afb72ab32e860f37960b2dd6114e12 /include/linux/compiler.h | |
parent | e6beaa363d56d7fc2f8cd6f7291e4d93911a428a (diff) |
locking: Remove atomicy checks from {READ,WRITE}_ONCE
The fact that volatile allows for atomic load/stores is a special case
not a requirement for {READ,WRITE}_ONCE(). Their primary purpose is to
force the compiler to emit load/stores _once_.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r-- | include/linux/compiler.h | 16 |
1 files changed, 0 insertions, 16 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 1b45e4a0519b..0e41ca0e5927 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -192,29 +192,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
192 | 192 | ||
193 | #include <uapi/linux/types.h> | 193 | #include <uapi/linux/types.h> |
194 | 194 | ||
195 | static __always_inline void data_access_exceeds_word_size(void) | ||
196 | #ifdef __compiletime_warning | ||
197 | __compiletime_warning("data access exceeds word size and won't be atomic") | ||
198 | #endif | ||
199 | ; | ||
200 | |||
201 | static __always_inline void data_access_exceeds_word_size(void) | ||
202 | { | ||
203 | } | ||
204 | |||
205 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) | 195 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) |
206 | { | 196 | { |
207 | switch (size) { | 197 | switch (size) { |
208 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | 198 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; |
209 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; | 199 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; |
210 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; | 200 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; |
211 | #ifdef CONFIG_64BIT | ||
212 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; | 201 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; |
213 | #endif | ||
214 | default: | 202 | default: |
215 | barrier(); | 203 | barrier(); |
216 | __builtin_memcpy((void *)res, (const void *)p, size); | 204 | __builtin_memcpy((void *)res, (const void *)p, size); |
217 | data_access_exceeds_word_size(); | ||
218 | barrier(); | 205 | barrier(); |
219 | } | 206 | } |
220 | } | 207 | } |
@@ -225,13 +212,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
225 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | 212 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
226 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | 213 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
227 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | 214 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
228 | #ifdef CONFIG_64BIT | ||
229 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | 215 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
230 | #endif | ||
231 | default: | 216 | default: |
232 | barrier(); | 217 | barrier(); |
233 | __builtin_memcpy((void *)p, (const void *)res, size); | 218 | __builtin_memcpy((void *)p, (const void *)res, size); |
234 | data_access_exceeds_word_size(); | ||
235 | barrier(); | 219 | barrier(); |
236 | } | 220 | } |
237 | } | 221 | } |