diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-20 19:48:59 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-20 19:48:59 -0500 |
| commit | 60815cf2e05057db5b78e398d9734c493560b11e (patch) | |
| tree | 23d7f55df13cc5a0c072cc8a6f361f8e7050b825 /include | |
| parent | bfc7249cc293deac8f2678b7ec3d2407b68c0a33 (diff) | |
| parent | 5de72a2247ac05bde7c89039631b3d0c6186fafb (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux
Pull ACCESS_ONCE cleanup preparation from Christian Borntraeger:
"kernel: Provide READ_ONCE and ASSIGN_ONCE
As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com
ACCESS_ONCE might fail with specific compilers for non-scalar
accesses.
Here is a set of patches to tackle that problem.
The first patch introduce READ_ONCE and ASSIGN_ONCE. If the data
structure is larger than the machine word size memcpy is used and a
warning is emitted. The next patches fix up several in-tree users of
ACCESS_ONCE on non-scalar types.
This does not yet contain a patch that forces ACCESS_ONCE to work only
on scalar types. This is targetted for the next merge window as Linux
next already contains new offenders regarding ACCESS_ONCE vs.
non-scalar types"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux:
s390/kvm: REPLACE barrier fixup with READ_ONCE
arm/spinlock: Replace ACCESS_ONCE with READ_ONCE
arm64/spinlock: Replace ACCESS_ONCE READ_ONCE
mips/gup: Replace ACCESS_ONCE with READ_ONCE
x86/gup: Replace ACCESS_ONCE with READ_ONCE
x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
mm: replace ACCESS_ONCE with READ_ONCE or barriers
kernel: Provide READ_ONCE and ASSIGN_ONCE
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/compiler.h | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d5ad7b1118fc..a1c81f80978e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
| 186 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) | 186 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 187 | #endif | 187 | #endif |
| 188 | 188 | ||
| 189 | #include <uapi/linux/types.h> | ||
| 190 | |||
| 191 | static __always_inline void data_access_exceeds_word_size(void) | ||
| 192 | #ifdef __compiletime_warning | ||
| 193 | __compiletime_warning("data access exceeds word size and won't be atomic") | ||
| 194 | #endif | ||
| 195 | ; | ||
| 196 | |||
| 197 | static __always_inline void data_access_exceeds_word_size(void) | ||
| 198 | { | ||
| 199 | } | ||
| 200 | |||
| 201 | static __always_inline void __read_once_size(volatile void *p, void *res, int size) | ||
| 202 | { | ||
| 203 | switch (size) { | ||
| 204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | ||
| 205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; | ||
| 206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; | ||
| 207 | #ifdef CONFIG_64BIT | ||
| 208 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; | ||
| 209 | #endif | ||
| 210 | default: | ||
| 211 | barrier(); | ||
| 212 | __builtin_memcpy((void *)res, (const void *)p, size); | ||
| 213 | data_access_exceeds_word_size(); | ||
| 214 | barrier(); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | static __always_inline void __assign_once_size(volatile void *p, void *res, int size) | ||
| 219 | { | ||
| 220 | switch (size) { | ||
| 221 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | ||
| 222 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | ||
| 223 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | ||
| 224 | #ifdef CONFIG_64BIT | ||
| 225 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | ||
| 226 | #endif | ||
| 227 | default: | ||
| 228 | barrier(); | ||
| 229 | __builtin_memcpy((void *)p, (const void *)res, size); | ||
| 230 | data_access_exceeds_word_size(); | ||
| 231 | barrier(); | ||
| 232 | } | ||
| 233 | } | ||
| 234 | |||
| 235 | /* | ||
| 236 | * Prevent the compiler from merging or refetching reads or writes. The | ||
| 237 | * compiler is also forbidden from reordering successive instances of | ||
| 238 | * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the | ||
| 239 | * compiler is aware of some particular ordering. One way to make the | ||
| 240 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | ||
| 241 | * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. | ||
| 242 | * | ||
| 243 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | ||
| 244 | * data types like structs or unions. If the size of the accessed data | ||
| 245 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | ||
| 246 | * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a | ||
| 247 | * compile-time warning. | ||
| 248 | * | ||
| 249 | * Their two major use cases are: (1) Mediating communication between | ||
| 250 | * process-level code and irq/NMI handlers, all running on the same CPU, | ||
| 251 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
| 252 | * mutilate accesses that either do not require ordering or that interact | ||
| 253 | * with an explicit memory barrier or atomic instruction that provides the | ||
| 254 | * required ordering. | ||
| 255 | */ | ||
| 256 | |||
| 257 | #define READ_ONCE(x) \ | ||
| 258 | ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) | ||
| 259 | |||
| 260 | #define ASSIGN_ONCE(val, x) \ | ||
| 261 | ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) | ||
| 262 | |||
| 189 | #endif /* __KERNEL__ */ | 263 | #endif /* __KERNEL__ */ |
| 190 | 264 | ||
| 191 | #endif /* __ASSEMBLY__ */ | 265 | #endif /* __ASSEMBLY__ */ |
