aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/compiler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r--include/linux/compiler.h74
1 files changed, 74 insertions, 0 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d5ad7b1118fc..a1c81f80978e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
186# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 186# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187#endif 187#endif
188 188
189#include <uapi/linux/types.h>
190
191static __always_inline void data_access_exceeds_word_size(void)
192#ifdef __compiletime_warning
193__compiletime_warning("data access exceeds word size and won't be atomic")
194#endif
195;
196
197static __always_inline void data_access_exceeds_word_size(void)
198{
199}
200
201static __always_inline void __read_once_size(volatile void *p, void *res, int size)
202{
203 switch (size) {
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
207#ifdef CONFIG_64BIT
208 case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
209#endif
210 default:
211 barrier();
212 __builtin_memcpy((void *)res, (const void *)p, size);
213 data_access_exceeds_word_size();
214 barrier();
215 }
216}
217
218static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
219{
220 switch (size) {
221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
222 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
223 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
224#ifdef CONFIG_64BIT
225 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
226#endif
227 default:
228 barrier();
229 __builtin_memcpy((void *)p, (const void *)res, size);
230 data_access_exceeds_word_size();
231 barrier();
232 }
233}
234
235/*
236 * Prevent the compiler from merging or refetching reads or writes. The
237 * compiler is also forbidden from reordering successive instances of
238 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
239 * compiler is aware of some particular ordering. One way to make the
240 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
242 *
243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244 * data types like structs or unions. If the size of the accessed data
245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246 * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
247 * compile-time warning.
248 *
249 * Their two major use cases are: (1) Mediating communication between
250 * process-level code and irq/NMI handlers, all running on the same CPU,
251 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
252 * mutilate accesses that either do not require ordering or that interact
253 * with an explicit memory barrier or atomic instruction that provides the
254 * required ordering.
255 */
256
257#define READ_ONCE(x) \
258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259
260#define ASSIGN_ONCE(val, x) \
261 ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
262
189#endif /* __KERNEL__ */ 263#endif /* __KERNEL__ */
190 264
191#endif /* __ASSEMBLY__ */ 265#endif /* __ASSEMBLY__ */