diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-04-14 11:51:33 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-04-14 11:51:33 -0400 |
| commit | 85a3685852d9ac7d92be9d824533c915a4597fa4 (patch) | |
| tree | b7c542e2061cf96c9f7ad500fa12567f9ff0b39f /include/linux/compiler.h | |
| parent | 92bac83dd79e60e65c475222e41a992a70434beb (diff) | |
| parent | 8b8a518ef16be2de27207991e32fc32b0475c767 (diff) | |
Merge branch 'next' into for-linus
Prepare first round of input updates for 4.1 merge window.
Diffstat (limited to 'include/linux/compiler.h')
| -rw-r--r-- | include/linux/compiler.h | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 33063f872ee3..1b45e4a0519b 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -54,7 +54,11 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
| 54 | #include <linux/compiler-gcc.h> | 54 | #include <linux/compiler-gcc.h> |
| 55 | #endif | 55 | #endif |
| 56 | 56 | ||
| 57 | #ifdef CC_USING_HOTPATCH | ||
| 58 | #define notrace __attribute__((hotpatch(0,0))) | ||
| 59 | #else | ||
| 57 | #define notrace __attribute__((no_instrument_function)) | 60 | #define notrace __attribute__((no_instrument_function)) |
| 61 | #endif | ||
| 58 | 62 | ||
| 59 | /* Intel compiler defines __GNUC__. So we will overwrite implementations | 63 | /* Intel compiler defines __GNUC__. So we will overwrite implementations |
| 60 | * coming from above header files here | 64 | * coming from above header files here |
| @@ -198,7 +202,7 @@ static __always_inline void data_access_exceeds_word_size(void) | |||
| 198 | { | 202 | { |
| 199 | } | 203 | } |
| 200 | 204 | ||
| 201 | static __always_inline void __read_once_size(volatile void *p, void *res, int size) | 205 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) |
| 202 | { | 206 | { |
| 203 | switch (size) { | 207 | switch (size) { |
| 204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | 208 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; |
| @@ -255,10 +259,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 255 | */ | 259 | */ |
| 256 | 260 | ||
| 257 | #define READ_ONCE(x) \ | 261 | #define READ_ONCE(x) \ |
| 258 | ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) | 262 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
| 259 | 263 | ||
| 260 | #define WRITE_ONCE(x, val) \ | 264 | #define WRITE_ONCE(x, val) \ |
| 261 | ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) | 265 | ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) |
| 262 | 266 | ||
| 263 | #endif /* __KERNEL__ */ | 267 | #endif /* __KERNEL__ */ |
| 264 | 268 | ||
| @@ -385,7 +389,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 385 | 389 | ||
| 386 | /* Is this type a native word size -- useful for atomic operations */ | 390 | /* Is this type a native word size -- useful for atomic operations */ |
| 387 | #ifndef __native_word | 391 | #ifndef __native_word |
| 388 | # define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) | 392 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) |
| 389 | #endif | 393 | #endif |
| 390 | 394 | ||
| 391 | /* Compile time object size, -1 for unknown */ | 395 | /* Compile time object size, -1 for unknown */ |
| @@ -447,12 +451,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 447 | * to make the compiler aware of ordering is to put the two invocations of | 451 | * to make the compiler aware of ordering is to put the two invocations of |
| 448 | * ACCESS_ONCE() in different C statements. | 452 | * ACCESS_ONCE() in different C statements. |
| 449 | * | 453 | * |
| 450 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | 454 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE |
| 451 | * merging, or refetching absolutely anything at any time. Its main intended | 455 | * on a union member will work as long as the size of the member matches the |
| 452 | * use is to mediate communication between process-level code and irq/NMI | 456 | * size of the union and the size is smaller than word size. |
| 453 | * handlers, all running on the same CPU. | 457 | * |
| 458 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication | ||
| 459 | * between process-level code and irq/NMI handlers, all running on the same CPU, | ||
| 460 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
| 461 | * mutilate accesses that either do not require ordering or that interact | ||
| 462 | * with an explicit memory barrier or atomic instruction that provides the | ||
| 463 | * required ordering. | ||
| 464 | * | ||
| 465 | * If possible use READ_ONCE/ASSIGN_ONCE instead. | ||
| 454 | */ | 466 | */ |
| 455 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 467 | #define __ACCESS_ONCE(x) ({ \ |
| 468 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ | ||
| 469 | (volatile typeof(x) *)&(x); }) | ||
| 470 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | ||
| 456 | 471 | ||
| 457 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ | 472 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
| 458 | #ifdef CONFIG_KPROBES | 473 | #ifdef CONFIG_KPROBES |
