diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-02-11 08:27:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-02-11 08:27:05 -0500 |
commit | 41b8687191cfd0326db03b0e82fb09d8c98ca641 (patch) | |
tree | a214babca0a181c67c25a615d608465a5f92f8c4 | |
parent | 49262de2270e09882d7bd8866a691cdd69ab32f6 (diff) | |
parent | bdf37b4dd35d2517cadc10735cd33022da7df133 (diff) |
Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
Signed-off-by: Ingo Molnar <mingo@kernel.org>
36 files changed, 5894 insertions, 1812 deletions
@@ -6,7 +6,8 @@ | |||
6 | # 2) Generate timeconst.h | 6 | # 2) Generate timeconst.h |
7 | # 3) Generate asm-offsets.h (may need bounds.h and timeconst.h) | 7 | # 3) Generate asm-offsets.h (may need bounds.h and timeconst.h) |
8 | # 4) Check for missing system calls | 8 | # 4) Check for missing system calls |
9 | # 5) Generate constants.py (may need bounds.h) | 9 | # 5) check atomics headers are up-to-date |
10 | # 6) Generate constants.py (may need bounds.h) | ||
10 | 11 | ||
11 | ##### | 12 | ##### |
12 | # 1) Generate bounds.h | 13 | # 1) Generate bounds.h |
@@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE | |||
59 | $(call cmd,syscalls) | 60 | $(call cmd,syscalls) |
60 | 61 | ||
61 | ##### | 62 | ##### |
62 | # 5) Generate constants for Python GDB integration | 63 | # 5) Check atomic headers are up-to-date |
64 | # | ||
65 | |||
66 | always += old-atomics | ||
67 | targets += old-atomics | ||
68 | |||
69 | quiet_cmd_atomics = CALL $< | ||
70 | cmd_atomics = $(CONFIG_SHELL) $< | ||
71 | |||
72 | old-atomics: scripts/atomic/check-atomics.sh FORCE | ||
73 | $(call cmd,atomics) | ||
74 | |||
75 | ##### | ||
76 | # 6) Generate constants for Python GDB integration | ||
63 | # | 77 | # |
64 | 78 | ||
65 | extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py | 79 | extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py |
diff --git a/MAINTAINERS b/MAINTAINERS index 8c68de3cfd80..8b95aa5363dd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2609,6 +2609,7 @@ L: linux-kernel@vger.kernel.org | |||
2609 | S: Maintained | 2609 | S: Maintained |
2610 | F: arch/*/include/asm/atomic*.h | 2610 | F: arch/*/include/asm/atomic*.h |
2611 | F: include/*/atomic*.h | 2611 | F: include/*/atomic*.h |
2612 | F: scripts/atomic/ | ||
2612 | 2613 | ||
2613 | ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER | 2614 | ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER |
2614 | M: Bradley Grove <linuxdrivers@attotech.com> | 2615 | M: Bradley Grove <linuxdrivers@attotech.com> |
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 9bca54dda75c..1f4e9ee641c9 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -42,124 +42,131 @@ | |||
42 | 42 | ||
43 | #define ATOMIC_INIT(i) { (i) } | 43 | #define ATOMIC_INIT(i) { (i) } |
44 | 44 | ||
45 | #define atomic_read(v) READ_ONCE((v)->counter) | 45 | #define arch_atomic_read(v) READ_ONCE((v)->counter) |
46 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) | 46 | #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
47 | 47 | ||
48 | #define atomic_add_return_relaxed atomic_add_return_relaxed | 48 | #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed |
49 | #define atomic_add_return_acquire atomic_add_return_acquire | 49 | #define arch_atomic_add_return_acquire arch_atomic_add_return_acquire |
50 | #define atomic_add_return_release atomic_add_return_release | 50 | #define arch_atomic_add_return_release arch_atomic_add_return_release |
51 | #define atomic_add_return atomic_add_return | 51 | #define arch_atomic_add_return arch_atomic_add_return |
52 | 52 | ||
53 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | 53 | #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed |
54 | #define atomic_sub_return_acquire atomic_sub_return_acquire | 54 | #define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire |
55 | #define atomic_sub_return_release atomic_sub_return_release | 55 | #define arch_atomic_sub_return_release arch_atomic_sub_return_release |
56 | #define atomic_sub_return atomic_sub_return | 56 | #define arch_atomic_sub_return arch_atomic_sub_return |
57 | 57 | ||
58 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed | 58 | #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed |
59 | #define atomic_fetch_add_acquire atomic_fetch_add_acquire | 59 | #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire |
60 | #define atomic_fetch_add_release atomic_fetch_add_release | 60 | #define arch_atomic_fetch_add_release arch_atomic_fetch_add_release |
61 | #define atomic_fetch_add atomic_fetch_add | 61 | #define arch_atomic_fetch_add arch_atomic_fetch_add |
62 | 62 | ||
63 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | 63 | #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed |
64 | #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire | 64 | #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire |
65 | #define atomic_fetch_sub_release atomic_fetch_sub_release | 65 | #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release |
66 | #define atomic_fetch_sub atomic_fetch_sub | 66 | #define arch_atomic_fetch_sub arch_atomic_fetch_sub |
67 | 67 | ||
68 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | 68 | #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed |
69 | #define atomic_fetch_and_acquire atomic_fetch_and_acquire | 69 | #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire |
70 | #define atomic_fetch_and_release atomic_fetch_and_release | 70 | #define arch_atomic_fetch_and_release arch_atomic_fetch_and_release |
71 | #define atomic_fetch_and atomic_fetch_and | 71 | #define arch_atomic_fetch_and arch_atomic_fetch_and |
72 | 72 | ||
73 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | 73 | #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed |
74 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | 74 | #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire |
75 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | 75 | #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release |
76 | #define atomic_fetch_andnot atomic_fetch_andnot | 76 | #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot |
77 | 77 | ||
78 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | 78 | #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed |
79 | #define atomic_fetch_or_acquire atomic_fetch_or_acquire | 79 | #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire |
80 | #define atomic_fetch_or_release atomic_fetch_or_release | 80 | #define arch_atomic_fetch_or_release arch_atomic_fetch_or_release |
81 | #define atomic_fetch_or atomic_fetch_or | 81 | #define arch_atomic_fetch_or arch_atomic_fetch_or |
82 | 82 | ||
83 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | 83 | #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed |
84 | #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire | 84 | #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire |
85 | #define atomic_fetch_xor_release atomic_fetch_xor_release | 85 | #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release |
86 | #define atomic_fetch_xor atomic_fetch_xor | 86 | #define arch_atomic_fetch_xor arch_atomic_fetch_xor |
87 | 87 | ||
88 | #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) | 88 | #define arch_atomic_xchg_relaxed(v, new) \ |
89 | #define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) | 89 | arch_xchg_relaxed(&((v)->counter), (new)) |
90 | #define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) | 90 | #define arch_atomic_xchg_acquire(v, new) \ |
91 | #define atomic_xchg(v, new) xchg(&((v)->counter), (new)) | 91 | arch_xchg_acquire(&((v)->counter), (new)) |
92 | 92 | #define arch_atomic_xchg_release(v, new) \ | |
93 | #define atomic_cmpxchg_relaxed(v, old, new) \ | 93 | arch_xchg_release(&((v)->counter), (new)) |
94 | cmpxchg_relaxed(&((v)->counter), (old), (new)) | 94 | #define arch_atomic_xchg(v, new) \ |
95 | #define atomic_cmpxchg_acquire(v, old, new) \ | 95 | arch_xchg(&((v)->counter), (new)) |
96 | cmpxchg_acquire(&((v)->counter), (old), (new)) | 96 | |
97 | #define atomic_cmpxchg_release(v, old, new) \ | 97 | #define arch_atomic_cmpxchg_relaxed(v, old, new) \ |
98 | cmpxchg_release(&((v)->counter), (old), (new)) | 98 | arch_cmpxchg_relaxed(&((v)->counter), (old), (new)) |
99 | #define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) | 99 | #define arch_atomic_cmpxchg_acquire(v, old, new) \ |
100 | 100 | arch_cmpxchg_acquire(&((v)->counter), (old), (new)) | |
101 | #define atomic_andnot atomic_andnot | 101 | #define arch_atomic_cmpxchg_release(v, old, new) \ |
102 | arch_cmpxchg_release(&((v)->counter), (old), (new)) | ||
103 | #define arch_atomic_cmpxchg(v, old, new) \ | ||
104 | arch_cmpxchg(&((v)->counter), (old), (new)) | ||
105 | |||
106 | #define arch_atomic_andnot arch_atomic_andnot | ||
102 | 107 | ||
103 | /* | 108 | /* |
104 | * 64-bit atomic operations. | 109 | * 64-bit arch_atomic operations. |
105 | */ | 110 | */ |
106 | #define ATOMIC64_INIT ATOMIC_INIT | 111 | #define ATOMIC64_INIT ATOMIC_INIT |
107 | #define atomic64_read atomic_read | 112 | #define arch_atomic64_read arch_atomic_read |
108 | #define atomic64_set atomic_set | 113 | #define arch_atomic64_set arch_atomic_set |
109 | 114 | ||
110 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed | 115 | #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed |
111 | #define atomic64_add_return_acquire atomic64_add_return_acquire | 116 | #define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire |
112 | #define atomic64_add_return_release atomic64_add_return_release | 117 | #define arch_atomic64_add_return_release arch_atomic64_add_return_release |
113 | #define atomic64_add_return atomic64_add_return | 118 | #define arch_atomic64_add_return arch_atomic64_add_return |
114 | 119 | ||
115 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | 120 | #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed |
116 | #define atomic64_sub_return_acquire atomic64_sub_return_acquire | 121 | #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire |
117 | #define atomic64_sub_return_release atomic64_sub_return_release | 122 | #define arch_atomic64_sub_return_release arch_atomic64_sub_return_release |
118 | #define atomic64_sub_return atomic64_sub_return | 123 | #define arch_atomic64_sub_return arch_atomic64_sub_return |
119 | 124 | ||
120 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | 125 | #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed |
121 | #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire | 126 | #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire |
122 | #define atomic64_fetch_add_release atomic64_fetch_add_release | 127 | #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release |
123 | #define atomic64_fetch_add atomic64_fetch_add | 128 | #define arch_atomic64_fetch_add arch_atomic64_fetch_add |
124 | 129 | ||
125 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | 130 | #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed |
126 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire | 131 | #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire |
127 | #define atomic64_fetch_sub_release atomic64_fetch_sub_release | 132 | #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release |
128 | #define atomic64_fetch_sub atomic64_fetch_sub | 133 | #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub |
129 | 134 | ||
130 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | 135 | #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed |
131 | #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire | 136 | #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire |
132 | #define atomic64_fetch_and_release atomic64_fetch_and_release | 137 | #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release |
133 | #define atomic64_fetch_and atomic64_fetch_and | 138 | #define arch_atomic64_fetch_and arch_atomic64_fetch_and |
134 | 139 | ||
135 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | 140 | #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed |
136 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | 141 | #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire |
137 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | 142 | #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release |
138 | #define atomic64_fetch_andnot atomic64_fetch_andnot | 143 | #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot |
139 | 144 | ||
140 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | 145 | #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed |
141 | #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire | 146 | #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire |
142 | #define atomic64_fetch_or_release atomic64_fetch_or_release | 147 | #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release |
143 | #define atomic64_fetch_or atomic64_fetch_or | 148 | #define arch_atomic64_fetch_or arch_atomic64_fetch_or |
144 | 149 | ||
145 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | 150 | #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed |
146 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire | 151 | #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire |
147 | #define atomic64_fetch_xor_release atomic64_fetch_xor_release | 152 | #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release |
148 | #define atomic64_fetch_xor atomic64_fetch_xor | 153 | #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor |
149 | 154 | ||
150 | #define atomic64_xchg_relaxed atomic_xchg_relaxed | 155 | #define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed |
151 | #define atomic64_xchg_acquire atomic_xchg_acquire | 156 | #define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire |
152 | #define atomic64_xchg_release atomic_xchg_release | 157 | #define arch_atomic64_xchg_release arch_atomic_xchg_release |
153 | #define atomic64_xchg atomic_xchg | 158 | #define arch_atomic64_xchg arch_atomic_xchg |
154 | 159 | ||
155 | #define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed | 160 | #define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed |
156 | #define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire | 161 | #define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire |
157 | #define atomic64_cmpxchg_release atomic_cmpxchg_release | 162 | #define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release |
158 | #define atomic64_cmpxchg atomic_cmpxchg | 163 | #define arch_atomic64_cmpxchg arch_atomic_cmpxchg |
159 | 164 | ||
160 | #define atomic64_andnot atomic64_andnot | 165 | #define arch_atomic64_andnot arch_atomic64_andnot |
161 | 166 | ||
162 | #define atomic64_dec_if_positive atomic64_dec_if_positive | 167 | #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive |
168 | |||
169 | #include <asm-generic/atomic-instrumented.h> | ||
163 | 170 | ||
164 | #endif | 171 | #endif |
165 | #endif | 172 | #endif |
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index af7b99005453..e321293e0c89 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | #define ATOMIC_OP(op, asm_op) \ | 40 | #define ATOMIC_OP(op, asm_op) \ |
41 | __LL_SC_INLINE void \ | 41 | __LL_SC_INLINE void \ |
42 | __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ | 42 | __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ |
43 | { \ | 43 | { \ |
44 | unsigned long tmp; \ | 44 | unsigned long tmp; \ |
45 | int result; \ | 45 | int result; \ |
@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ | |||
53 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | 53 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
54 | : "Ir" (i)); \ | 54 | : "Ir" (i)); \ |
55 | } \ | 55 | } \ |
56 | __LL_SC_EXPORT(atomic_##op); | 56 | __LL_SC_EXPORT(arch_atomic_##op); |
57 | 57 | ||
58 | #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ | 58 | #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ |
59 | __LL_SC_INLINE int \ | 59 | __LL_SC_INLINE int \ |
60 | __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ | 60 | __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ |
61 | { \ | 61 | { \ |
62 | unsigned long tmp; \ | 62 | unsigned long tmp; \ |
63 | int result; \ | 63 | int result; \ |
@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ | |||
75 | \ | 75 | \ |
76 | return result; \ | 76 | return result; \ |
77 | } \ | 77 | } \ |
78 | __LL_SC_EXPORT(atomic_##op##_return##name); | 78 | __LL_SC_EXPORT(arch_atomic_##op##_return##name); |
79 | 79 | ||
80 | #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ | 80 | #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ |
81 | __LL_SC_INLINE int \ | 81 | __LL_SC_INLINE int \ |
82 | __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ | 82 | __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ |
83 | { \ | 83 | { \ |
84 | unsigned long tmp; \ | 84 | unsigned long tmp; \ |
85 | int val, result; \ | 85 | int val, result; \ |
@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ | |||
97 | \ | 97 | \ |
98 | return result; \ | 98 | return result; \ |
99 | } \ | 99 | } \ |
100 | __LL_SC_EXPORT(atomic_fetch_##op##name); | 100 | __LL_SC_EXPORT(arch_atomic_fetch_##op##name); |
101 | 101 | ||
102 | #define ATOMIC_OPS(...) \ | 102 | #define ATOMIC_OPS(...) \ |
103 | ATOMIC_OP(__VA_ARGS__) \ | 103 | ATOMIC_OP(__VA_ARGS__) \ |
@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor) | |||
133 | 133 | ||
134 | #define ATOMIC64_OP(op, asm_op) \ | 134 | #define ATOMIC64_OP(op, asm_op) \ |
135 | __LL_SC_INLINE void \ | 135 | __LL_SC_INLINE void \ |
136 | __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ | 136 | __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \ |
137 | { \ | 137 | { \ |
138 | long result; \ | 138 | long result; \ |
139 | unsigned long tmp; \ | 139 | unsigned long tmp; \ |
@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ | |||
147 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | 147 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
148 | : "Ir" (i)); \ | 148 | : "Ir" (i)); \ |
149 | } \ | 149 | } \ |
150 | __LL_SC_EXPORT(atomic64_##op); | 150 | __LL_SC_EXPORT(arch_atomic64_##op); |
151 | 151 | ||
152 | #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ | 152 | #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ |
153 | __LL_SC_INLINE long \ | 153 | __LL_SC_INLINE long \ |
154 | __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ | 154 | __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\ |
155 | { \ | 155 | { \ |
156 | long result; \ | 156 | long result; \ |
157 | unsigned long tmp; \ | 157 | unsigned long tmp; \ |
@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ | |||
169 | \ | 169 | \ |
170 | return result; \ | 170 | return result; \ |
171 | } \ | 171 | } \ |
172 | __LL_SC_EXPORT(atomic64_##op##_return##name); | 172 | __LL_SC_EXPORT(arch_atomic64_##op##_return##name); |
173 | 173 | ||
174 | #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ | 174 | #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ |
175 | __LL_SC_INLINE long \ | 175 | __LL_SC_INLINE long \ |
176 | __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ | 176 | __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \ |
177 | { \ | 177 | { \ |
178 | long result, val; \ | 178 | long result, val; \ |
179 | unsigned long tmp; \ | 179 | unsigned long tmp; \ |
@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ | |||
191 | \ | 191 | \ |
192 | return result; \ | 192 | return result; \ |
193 | } \ | 193 | } \ |
194 | __LL_SC_EXPORT(atomic64_fetch_##op##name); | 194 | __LL_SC_EXPORT(arch_atomic64_fetch_##op##name); |
195 | 195 | ||
196 | #define ATOMIC64_OPS(...) \ | 196 | #define ATOMIC64_OPS(...) \ |
197 | ATOMIC64_OP(__VA_ARGS__) \ | 197 | ATOMIC64_OP(__VA_ARGS__) \ |
@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor) | |||
226 | #undef ATOMIC64_OP | 226 | #undef ATOMIC64_OP |
227 | 227 | ||
228 | __LL_SC_INLINE long | 228 | __LL_SC_INLINE long |
229 | __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) | 229 | __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) |
230 | { | 230 | { |
231 | long result; | 231 | long result; |
232 | unsigned long tmp; | 232 | unsigned long tmp; |
@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) | |||
246 | 246 | ||
247 | return result; | 247 | return result; |
248 | } | 248 | } |
249 | __LL_SC_EXPORT(atomic64_dec_if_positive); | 249 | __LL_SC_EXPORT(arch_atomic64_dec_if_positive); |
250 | 250 | ||
251 | #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ | 251 | #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ |
252 | __LL_SC_INLINE u##sz \ | 252 | __LL_SC_INLINE u##sz \ |
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index a424355240c5..9256a3921e4b 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h | |||
@@ -25,9 +25,9 @@ | |||
25 | #error "please don't include this file directly" | 25 | #error "please don't include this file directly" |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) | 28 | #define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op) |
29 | #define ATOMIC_OP(op, asm_op) \ | 29 | #define ATOMIC_OP(op, asm_op) \ |
30 | static inline void atomic_##op(int i, atomic_t *v) \ | 30 | static inline void arch_atomic_##op(int i, atomic_t *v) \ |
31 | { \ | 31 | { \ |
32 | register int w0 asm ("w0") = i; \ | 32 | register int w0 asm ("w0") = i; \ |
33 | register atomic_t *x1 asm ("x1") = v; \ | 33 | register atomic_t *x1 asm ("x1") = v; \ |
@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd) | |||
47 | #undef ATOMIC_OP | 47 | #undef ATOMIC_OP |
48 | 48 | ||
49 | #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ | 49 | #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ |
50 | static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ | 50 | static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \ |
51 | { \ | 51 | { \ |
52 | register int w0 asm ("w0") = i; \ | 52 | register int w0 asm ("w0") = i; \ |
53 | register atomic_t *x1 asm ("x1") = v; \ | 53 | register atomic_t *x1 asm ("x1") = v; \ |
@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd) | |||
79 | #undef ATOMIC_FETCH_OPS | 79 | #undef ATOMIC_FETCH_OPS |
80 | 80 | ||
81 | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ | 81 | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ |
82 | static inline int atomic_add_return##name(int i, atomic_t *v) \ | 82 | static inline int arch_atomic_add_return##name(int i, atomic_t *v) \ |
83 | { \ | 83 | { \ |
84 | register int w0 asm ("w0") = i; \ | 84 | register int w0 asm ("w0") = i; \ |
85 | register atomic_t *x1 asm ("x1") = v; \ | 85 | register atomic_t *x1 asm ("x1") = v; \ |
@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") | |||
105 | 105 | ||
106 | #undef ATOMIC_OP_ADD_RETURN | 106 | #undef ATOMIC_OP_ADD_RETURN |
107 | 107 | ||
108 | static inline void atomic_and(int i, atomic_t *v) | 108 | static inline void arch_atomic_and(int i, atomic_t *v) |
109 | { | 109 | { |
110 | register int w0 asm ("w0") = i; | 110 | register int w0 asm ("w0") = i; |
111 | register atomic_t *x1 asm ("x1") = v; | 111 | register atomic_t *x1 asm ("x1") = v; |
@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v) | |||
123 | } | 123 | } |
124 | 124 | ||
125 | #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ | 125 | #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ |
126 | static inline int atomic_fetch_and##name(int i, atomic_t *v) \ | 126 | static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \ |
127 | { \ | 127 | { \ |
128 | register int w0 asm ("w0") = i; \ | 128 | register int w0 asm ("w0") = i; \ |
129 | register atomic_t *x1 asm ("x1") = v; \ | 129 | register atomic_t *x1 asm ("x1") = v; \ |
@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") | |||
149 | 149 | ||
150 | #undef ATOMIC_FETCH_OP_AND | 150 | #undef ATOMIC_FETCH_OP_AND |
151 | 151 | ||
152 | static inline void atomic_sub(int i, atomic_t *v) | 152 | static inline void arch_atomic_sub(int i, atomic_t *v) |
153 | { | 153 | { |
154 | register int w0 asm ("w0") = i; | 154 | register int w0 asm ("w0") = i; |
155 | register atomic_t *x1 asm ("x1") = v; | 155 | register atomic_t *x1 asm ("x1") = v; |
@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
167 | } | 167 | } |
168 | 168 | ||
169 | #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ | 169 | #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ |
170 | static inline int atomic_sub_return##name(int i, atomic_t *v) \ | 170 | static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \ |
171 | { \ | 171 | { \ |
172 | register int w0 asm ("w0") = i; \ | 172 | register int w0 asm ("w0") = i; \ |
173 | register atomic_t *x1 asm ("x1") = v; \ | 173 | register atomic_t *x1 asm ("x1") = v; \ |
@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") | |||
195 | #undef ATOMIC_OP_SUB_RETURN | 195 | #undef ATOMIC_OP_SUB_RETURN |
196 | 196 | ||
197 | #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ | 197 | #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ |
198 | static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ | 198 | static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \ |
199 | { \ | 199 | { \ |
200 | register int w0 asm ("w0") = i; \ | 200 | register int w0 asm ("w0") = i; \ |
201 | register atomic_t *x1 asm ("x1") = v; \ | 201 | register atomic_t *x1 asm ("x1") = v; \ |
@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") | |||
222 | #undef ATOMIC_FETCH_OP_SUB | 222 | #undef ATOMIC_FETCH_OP_SUB |
223 | #undef __LL_SC_ATOMIC | 223 | #undef __LL_SC_ATOMIC |
224 | 224 | ||
225 | #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) | 225 | #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op) |
226 | #define ATOMIC64_OP(op, asm_op) \ | 226 | #define ATOMIC64_OP(op, asm_op) \ |
227 | static inline void atomic64_##op(long i, atomic64_t *v) \ | 227 | static inline void arch_atomic64_##op(long i, atomic64_t *v) \ |
228 | { \ | 228 | { \ |
229 | register long x0 asm ("x0") = i; \ | 229 | register long x0 asm ("x0") = i; \ |
230 | register atomic64_t *x1 asm ("x1") = v; \ | 230 | register atomic64_t *x1 asm ("x1") = v; \ |
@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd) | |||
244 | #undef ATOMIC64_OP | 244 | #undef ATOMIC64_OP |
245 | 245 | ||
246 | #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ | 246 | #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ |
247 | static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ | 247 | static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\ |
248 | { \ | 248 | { \ |
249 | register long x0 asm ("x0") = i; \ | 249 | register long x0 asm ("x0") = i; \ |
250 | register atomic64_t *x1 asm ("x1") = v; \ | 250 | register atomic64_t *x1 asm ("x1") = v; \ |
@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd) | |||
276 | #undef ATOMIC64_FETCH_OPS | 276 | #undef ATOMIC64_FETCH_OPS |
277 | 277 | ||
278 | #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ | 278 | #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ |
279 | static inline long atomic64_add_return##name(long i, atomic64_t *v) \ | 279 | static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\ |
280 | { \ | 280 | { \ |
281 | register long x0 asm ("x0") = i; \ | 281 | register long x0 asm ("x0") = i; \ |
282 | register atomic64_t *x1 asm ("x1") = v; \ | 282 | register atomic64_t *x1 asm ("x1") = v; \ |
@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") | |||
302 | 302 | ||
303 | #undef ATOMIC64_OP_ADD_RETURN | 303 | #undef ATOMIC64_OP_ADD_RETURN |
304 | 304 | ||
305 | static inline void atomic64_and(long i, atomic64_t *v) | 305 | static inline void arch_atomic64_and(long i, atomic64_t *v) |
306 | { | 306 | { |
307 | register long x0 asm ("x0") = i; | 307 | register long x0 asm ("x0") = i; |
308 | register atomic64_t *x1 asm ("x1") = v; | 308 | register atomic64_t *x1 asm ("x1") = v; |
@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v) | |||
320 | } | 320 | } |
321 | 321 | ||
322 | #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ | 322 | #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ |
323 | static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ | 323 | static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \ |
324 | { \ | 324 | { \ |
325 | register long x0 asm ("x0") = i; \ | 325 | register long x0 asm ("x0") = i; \ |
326 | register atomic64_t *x1 asm ("x1") = v; \ | 326 | register atomic64_t *x1 asm ("x1") = v; \ |
@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") | |||
346 | 346 | ||
347 | #undef ATOMIC64_FETCH_OP_AND | 347 | #undef ATOMIC64_FETCH_OP_AND |
348 | 348 | ||
349 | static inline void atomic64_sub(long i, atomic64_t *v) | 349 | static inline void arch_atomic64_sub(long i, atomic64_t *v) |
350 | { | 350 | { |
351 | register long x0 asm ("x0") = i; | 351 | register long x0 asm ("x0") = i; |
352 | register atomic64_t *x1 asm ("x1") = v; | 352 | register atomic64_t *x1 asm ("x1") = v; |
@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ | 366 | #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ |
367 | static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ | 367 | static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\ |
368 | { \ | 368 | { \ |
369 | register long x0 asm ("x0") = i; \ | 369 | register long x0 asm ("x0") = i; \ |
370 | register atomic64_t *x1 asm ("x1") = v; \ | 370 | register atomic64_t *x1 asm ("x1") = v; \ |
@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") | |||
392 | #undef ATOMIC64_OP_SUB_RETURN | 392 | #undef ATOMIC64_OP_SUB_RETURN |
393 | 393 | ||
394 | #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ | 394 | #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ |
395 | static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ | 395 | static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \ |
396 | { \ | 396 | { \ |
397 | register long x0 asm ("x0") = i; \ | 397 | register long x0 asm ("x0") = i; \ |
398 | register atomic64_t *x1 asm ("x1") = v; \ | 398 | register atomic64_t *x1 asm ("x1") = v; \ |
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") | |||
418 | 418 | ||
419 | #undef ATOMIC64_FETCH_OP_SUB | 419 | #undef ATOMIC64_FETCH_OP_SUB |
420 | 420 | ||
421 | static inline long atomic64_dec_if_positive(atomic64_t *v) | 421 | static inline long arch_atomic64_dec_if_positive(atomic64_t *v) |
422 | { | 422 | { |
423 | register long x0 asm ("x0") = (long)v; | 423 | register long x0 asm ("x0") = (long)v; |
424 | 424 | ||
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3f9376f1c409..e6ea0f42e097 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -110,10 +110,10 @@ __XCHG_GEN(_mb) | |||
110 | }) | 110 | }) |
111 | 111 | ||
112 | /* xchg */ | 112 | /* xchg */ |
113 | #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) | 113 | #define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) |
114 | #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) | 114 | #define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) |
115 | #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) | 115 | #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) |
116 | #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) | 116 | #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) |
117 | 117 | ||
118 | #define __CMPXCHG_GEN(sfx) \ | 118 | #define __CMPXCHG_GEN(sfx) \ |
119 | static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ | 119 | static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ |
@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb) | |||
154 | }) | 154 | }) |
155 | 155 | ||
156 | /* cmpxchg */ | 156 | /* cmpxchg */ |
157 | #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) | 157 | #define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) |
158 | #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) | 158 | #define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) |
159 | #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) | 159 | #define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) |
160 | #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) | 160 | #define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) |
161 | #define cmpxchg_local cmpxchg_relaxed | 161 | #define arch_cmpxchg_local arch_cmpxchg_relaxed |
162 | 162 | ||
163 | /* cmpxchg64 */ | 163 | /* cmpxchg64 */ |
164 | #define cmpxchg64_relaxed cmpxchg_relaxed | 164 | #define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed |
165 | #define cmpxchg64_acquire cmpxchg_acquire | 165 | #define arch_cmpxchg64_acquire arch_cmpxchg_acquire |
166 | #define cmpxchg64_release cmpxchg_release | 166 | #define arch_cmpxchg64_release arch_cmpxchg_release |
167 | #define cmpxchg64 cmpxchg | 167 | #define arch_cmpxchg64 arch_cmpxchg |
168 | #define cmpxchg64_local cmpxchg_local | 168 | #define arch_cmpxchg64_local arch_cmpxchg_local |
169 | 169 | ||
170 | /* cmpxchg_double */ | 170 | /* cmpxchg_double */ |
171 | #define system_has_cmpxchg_double() 1 | 171 | #define system_has_cmpxchg_double() 1 |
@@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb) | |||
177 | VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ | 177 | VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ |
178 | }) | 178 | }) |
179 | 179 | ||
180 | #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ | 180 | #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ |
181 | ({\ | 181 | ({ \ |
182 | int __ret;\ | 182 | int __ret; \ |
183 | __cmpxchg_double_check(ptr1, ptr2); \ | 183 | __cmpxchg_double_check(ptr1, ptr2); \ |
184 | __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ | 184 | __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ |
185 | (unsigned long)(n1), (unsigned long)(n2), \ | 185 | (unsigned long)(n1), (unsigned long)(n2), \ |
186 | ptr1); \ | 186 | ptr1); \ |
187 | __ret; \ | 187 | __ret; \ |
188 | }) | 188 | }) |
189 | 189 | ||
190 | #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ | 190 | #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ |
191 | ({\ | 191 | ({ \ |
192 | int __ret;\ | 192 | int __ret; \ |
193 | __cmpxchg_double_check(ptr1, ptr2); \ | 193 | __cmpxchg_double_check(ptr1, ptr2); \ |
194 | __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ | 194 | __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ |
195 | (unsigned long)(n1), (unsigned long)(n2), \ | 195 | (unsigned long)(n1), (unsigned long)(n2), \ |
196 | ptr1); \ | 196 | ptr1); \ |
197 | __ret; \ | 197 | __ret; \ |
198 | }) | 198 | }) |
199 | 199 | ||
200 | #define __CMPWAIT_CASE(w, sfx, sz) \ | 200 | #define __CMPWAIT_CASE(w, sfx, sz) \ |
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h index eee31a9f72a5..e9c1a02c2154 100644 --- a/arch/arm64/include/asm/sync_bitops.h +++ b/arch/arm64/include/asm/sync_bitops.h | |||
@@ -15,13 +15,13 @@ | |||
15 | * ops which are SMP safe even on a UP kernel. | 15 | * ops which are SMP safe even on a UP kernel. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define sync_set_bit(nr, p) set_bit(nr, p) | 18 | #define sync_set_bit(nr, p) set_bit(nr, p) |
19 | #define sync_clear_bit(nr, p) clear_bit(nr, p) | 19 | #define sync_clear_bit(nr, p) clear_bit(nr, p) |
20 | #define sync_change_bit(nr, p) change_bit(nr, p) | 20 | #define sync_change_bit(nr, p) change_bit(nr, p) |
21 | #define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) | 21 | #define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) |
22 | #define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) | 22 | #define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) |
23 | #define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) | 23 | #define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) |
24 | #define sync_test_bit(nr, addr) test_bit(nr, addr) | 24 | #define sync_test_bit(nr, addr) test_bit(nr, addr) |
25 | #define sync_cmpxchg cmpxchg | 25 | #define arch_sync_cmpxchg arch_cmpxchg |
26 | 26 | ||
27 | #endif | 27 | #endif |
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 0d4b1d3dbc1e..b8f5b35216e1 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h | |||
@@ -1,3 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | // Generated by scripts/atomic/gen-atomic-instrumented.sh | ||
4 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
5 | |||
1 | /* | 6 | /* |
2 | * This file provides wrappers with KASAN instrumentation for atomic operations. | 7 | * This file provides wrappers with KASAN instrumentation for atomic operations. |
3 | * To use this functionality an arch's atomic.h file needs to define all | 8 | * To use this functionality an arch's atomic.h file needs to define all |
@@ -9,459 +14,1774 @@ | |||
9 | * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid | 14 | * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid |
10 | * double instrumentation. | 15 | * double instrumentation. |
11 | */ | 16 | */ |
12 | 17 | #ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H | |
13 | #ifndef _LINUX_ATOMIC_INSTRUMENTED_H | 18 | #define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H |
14 | #define _LINUX_ATOMIC_INSTRUMENTED_H | ||
15 | 19 | ||
16 | #include <linux/build_bug.h> | 20 | #include <linux/build_bug.h> |
17 | #include <linux/kasan-checks.h> | 21 | #include <linux/kasan-checks.h> |
18 | 22 | ||
19 | static __always_inline int atomic_read(const atomic_t *v) | 23 | static inline int |
24 | atomic_read(const atomic_t *v) | ||
20 | { | 25 | { |
21 | kasan_check_read(v, sizeof(*v)); | 26 | kasan_check_read(v, sizeof(*v)); |
22 | return arch_atomic_read(v); | 27 | return arch_atomic_read(v); |
23 | } | 28 | } |
29 | #define atomic_read atomic_read | ||
24 | 30 | ||
25 | static __always_inline s64 atomic64_read(const atomic64_t *v) | 31 | #if defined(arch_atomic_read_acquire) |
32 | static inline int | ||
33 | atomic_read_acquire(const atomic_t *v) | ||
26 | { | 34 | { |
27 | kasan_check_read(v, sizeof(*v)); | 35 | kasan_check_read(v, sizeof(*v)); |
28 | return arch_atomic64_read(v); | 36 | return arch_atomic_read_acquire(v); |
29 | } | 37 | } |
38 | #define atomic_read_acquire atomic_read_acquire | ||
39 | #endif | ||
30 | 40 | ||
31 | static __always_inline void atomic_set(atomic_t *v, int i) | 41 | static inline void |
42 | atomic_set(atomic_t *v, int i) | ||
32 | { | 43 | { |
33 | kasan_check_write(v, sizeof(*v)); | 44 | kasan_check_write(v, sizeof(*v)); |
34 | arch_atomic_set(v, i); | 45 | arch_atomic_set(v, i); |
35 | } | 46 | } |
47 | #define atomic_set atomic_set | ||
36 | 48 | ||
37 | static __always_inline void atomic64_set(atomic64_t *v, s64 i) | 49 | #if defined(arch_atomic_set_release) |
50 | static inline void | ||
51 | atomic_set_release(atomic_t *v, int i) | ||
38 | { | 52 | { |
39 | kasan_check_write(v, sizeof(*v)); | 53 | kasan_check_write(v, sizeof(*v)); |
40 | arch_atomic64_set(v, i); | 54 | arch_atomic_set_release(v, i); |
41 | } | 55 | } |
56 | #define atomic_set_release atomic_set_release | ||
57 | #endif | ||
42 | 58 | ||
43 | static __always_inline int atomic_xchg(atomic_t *v, int i) | 59 | static inline void |
60 | atomic_add(int i, atomic_t *v) | ||
44 | { | 61 | { |
45 | kasan_check_write(v, sizeof(*v)); | 62 | kasan_check_write(v, sizeof(*v)); |
46 | return arch_atomic_xchg(v, i); | 63 | arch_atomic_add(i, v); |
47 | } | 64 | } |
65 | #define atomic_add atomic_add | ||
48 | 66 | ||
49 | static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) | 67 | #if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) |
68 | static inline int | ||
69 | atomic_add_return(int i, atomic_t *v) | ||
50 | { | 70 | { |
51 | kasan_check_write(v, sizeof(*v)); | 71 | kasan_check_write(v, sizeof(*v)); |
52 | return arch_atomic64_xchg(v, i); | 72 | return arch_atomic_add_return(i, v); |
53 | } | 73 | } |
74 | #define atomic_add_return atomic_add_return | ||
75 | #endif | ||
54 | 76 | ||
55 | static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 77 | #if defined(arch_atomic_add_return_acquire) |
78 | static inline int | ||
79 | atomic_add_return_acquire(int i, atomic_t *v) | ||
56 | { | 80 | { |
57 | kasan_check_write(v, sizeof(*v)); | 81 | kasan_check_write(v, sizeof(*v)); |
58 | return arch_atomic_cmpxchg(v, old, new); | 82 | return arch_atomic_add_return_acquire(i, v); |
59 | } | 83 | } |
84 | #define atomic_add_return_acquire atomic_add_return_acquire | ||
85 | #endif | ||
60 | 86 | ||
61 | static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) | 87 | #if defined(arch_atomic_add_return_release) |
88 | static inline int | ||
89 | atomic_add_return_release(int i, atomic_t *v) | ||
62 | { | 90 | { |
63 | kasan_check_write(v, sizeof(*v)); | 91 | kasan_check_write(v, sizeof(*v)); |
64 | return arch_atomic64_cmpxchg(v, old, new); | 92 | return arch_atomic_add_return_release(i, v); |
65 | } | 93 | } |
94 | #define atomic_add_return_release atomic_add_return_release | ||
95 | #endif | ||
66 | 96 | ||
67 | #ifdef arch_atomic_try_cmpxchg | 97 | #if defined(arch_atomic_add_return_relaxed) |
68 | #define atomic_try_cmpxchg atomic_try_cmpxchg | 98 | static inline int |
69 | static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) | 99 | atomic_add_return_relaxed(int i, atomic_t *v) |
70 | { | 100 | { |
71 | kasan_check_write(v, sizeof(*v)); | 101 | kasan_check_write(v, sizeof(*v)); |
72 | kasan_check_read(old, sizeof(*old)); | 102 | return arch_atomic_add_return_relaxed(i, v); |
73 | return arch_atomic_try_cmpxchg(v, old, new); | ||
74 | } | 103 | } |
104 | #define atomic_add_return_relaxed atomic_add_return_relaxed | ||
75 | #endif | 105 | #endif |
76 | 106 | ||
77 | #ifdef arch_atomic64_try_cmpxchg | 107 | #if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) |
78 | #define atomic64_try_cmpxchg atomic64_try_cmpxchg | 108 | static inline int |
79 | static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) | 109 | atomic_fetch_add(int i, atomic_t *v) |
80 | { | 110 | { |
81 | kasan_check_write(v, sizeof(*v)); | 111 | kasan_check_write(v, sizeof(*v)); |
82 | kasan_check_read(old, sizeof(*old)); | 112 | return arch_atomic_fetch_add(i, v); |
83 | return arch_atomic64_try_cmpxchg(v, old, new); | ||
84 | } | 113 | } |
114 | #define atomic_fetch_add atomic_fetch_add | ||
85 | #endif | 115 | #endif |
86 | 116 | ||
87 | #ifdef arch_atomic_fetch_add_unless | 117 | #if defined(arch_atomic_fetch_add_acquire) |
88 | #define atomic_fetch_add_unless atomic_fetch_add_unless | 118 | static inline int |
89 | static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) | 119 | atomic_fetch_add_acquire(int i, atomic_t *v) |
90 | { | 120 | { |
91 | kasan_check_write(v, sizeof(*v)); | 121 | kasan_check_write(v, sizeof(*v)); |
92 | return arch_atomic_fetch_add_unless(v, a, u); | 122 | return arch_atomic_fetch_add_acquire(i, v); |
93 | } | 123 | } |
124 | #define atomic_fetch_add_acquire atomic_fetch_add_acquire | ||
94 | #endif | 125 | #endif |
95 | 126 | ||
96 | #ifdef arch_atomic64_fetch_add_unless | 127 | #if defined(arch_atomic_fetch_add_release) |
97 | #define atomic64_fetch_add_unless atomic64_fetch_add_unless | 128 | static inline int |
98 | static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) | 129 | atomic_fetch_add_release(int i, atomic_t *v) |
99 | { | 130 | { |
100 | kasan_check_write(v, sizeof(*v)); | 131 | kasan_check_write(v, sizeof(*v)); |
101 | return arch_atomic64_fetch_add_unless(v, a, u); | 132 | return arch_atomic_fetch_add_release(i, v); |
102 | } | 133 | } |
134 | #define atomic_fetch_add_release atomic_fetch_add_release | ||
103 | #endif | 135 | #endif |
104 | 136 | ||
105 | #ifdef arch_atomic_inc | 137 | #if defined(arch_atomic_fetch_add_relaxed) |
106 | #define atomic_inc atomic_inc | 138 | static inline int |
107 | static __always_inline void atomic_inc(atomic_t *v) | 139 | atomic_fetch_add_relaxed(int i, atomic_t *v) |
140 | { | ||
141 | kasan_check_write(v, sizeof(*v)); | ||
142 | return arch_atomic_fetch_add_relaxed(i, v); | ||
143 | } | ||
144 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed | ||
145 | #endif | ||
146 | |||
147 | static inline void | ||
148 | atomic_sub(int i, atomic_t *v) | ||
149 | { | ||
150 | kasan_check_write(v, sizeof(*v)); | ||
151 | arch_atomic_sub(i, v); | ||
152 | } | ||
153 | #define atomic_sub atomic_sub | ||
154 | |||
155 | #if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) | ||
156 | static inline int | ||
157 | atomic_sub_return(int i, atomic_t *v) | ||
158 | { | ||
159 | kasan_check_write(v, sizeof(*v)); | ||
160 | return arch_atomic_sub_return(i, v); | ||
161 | } | ||
162 | #define atomic_sub_return atomic_sub_return | ||
163 | #endif | ||
164 | |||
165 | #if defined(arch_atomic_sub_return_acquire) | ||
166 | static inline int | ||
167 | atomic_sub_return_acquire(int i, atomic_t *v) | ||
168 | { | ||
169 | kasan_check_write(v, sizeof(*v)); | ||
170 | return arch_atomic_sub_return_acquire(i, v); | ||
171 | } | ||
172 | #define atomic_sub_return_acquire atomic_sub_return_acquire | ||
173 | #endif | ||
174 | |||
175 | #if defined(arch_atomic_sub_return_release) | ||
176 | static inline int | ||
177 | atomic_sub_return_release(int i, atomic_t *v) | ||
178 | { | ||
179 | kasan_check_write(v, sizeof(*v)); | ||
180 | return arch_atomic_sub_return_release(i, v); | ||
181 | } | ||
182 | #define atomic_sub_return_release atomic_sub_return_release | ||
183 | #endif | ||
184 | |||
185 | #if defined(arch_atomic_sub_return_relaxed) | ||
186 | static inline int | ||
187 | atomic_sub_return_relaxed(int i, atomic_t *v) | ||
188 | { | ||
189 | kasan_check_write(v, sizeof(*v)); | ||
190 | return arch_atomic_sub_return_relaxed(i, v); | ||
191 | } | ||
192 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | ||
193 | #endif | ||
194 | |||
195 | #if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) | ||
196 | static inline int | ||
197 | atomic_fetch_sub(int i, atomic_t *v) | ||
198 | { | ||
199 | kasan_check_write(v, sizeof(*v)); | ||
200 | return arch_atomic_fetch_sub(i, v); | ||
201 | } | ||
202 | #define atomic_fetch_sub atomic_fetch_sub | ||
203 | #endif | ||
204 | |||
205 | #if defined(arch_atomic_fetch_sub_acquire) | ||
206 | static inline int | ||
207 | atomic_fetch_sub_acquire(int i, atomic_t *v) | ||
208 | { | ||
209 | kasan_check_write(v, sizeof(*v)); | ||
210 | return arch_atomic_fetch_sub_acquire(i, v); | ||
211 | } | ||
212 | #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire | ||
213 | #endif | ||
214 | |||
215 | #if defined(arch_atomic_fetch_sub_release) | ||
216 | static inline int | ||
217 | atomic_fetch_sub_release(int i, atomic_t *v) | ||
218 | { | ||
219 | kasan_check_write(v, sizeof(*v)); | ||
220 | return arch_atomic_fetch_sub_release(i, v); | ||
221 | } | ||
222 | #define atomic_fetch_sub_release atomic_fetch_sub_release | ||
223 | #endif | ||
224 | |||
225 | #if defined(arch_atomic_fetch_sub_relaxed) | ||
226 | static inline int | ||
227 | atomic_fetch_sub_relaxed(int i, atomic_t *v) | ||
228 | { | ||
229 | kasan_check_write(v, sizeof(*v)); | ||
230 | return arch_atomic_fetch_sub_relaxed(i, v); | ||
231 | } | ||
232 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | ||
233 | #endif | ||
234 | |||
235 | #if defined(arch_atomic_inc) | ||
236 | static inline void | ||
237 | atomic_inc(atomic_t *v) | ||
108 | { | 238 | { |
109 | kasan_check_write(v, sizeof(*v)); | 239 | kasan_check_write(v, sizeof(*v)); |
110 | arch_atomic_inc(v); | 240 | arch_atomic_inc(v); |
111 | } | 241 | } |
242 | #define atomic_inc atomic_inc | ||
112 | #endif | 243 | #endif |
113 | 244 | ||
114 | #ifdef arch_atomic64_inc | 245 | #if defined(arch_atomic_inc_return) |
115 | #define atomic64_inc atomic64_inc | 246 | static inline int |
116 | static __always_inline void atomic64_inc(atomic64_t *v) | 247 | atomic_inc_return(atomic_t *v) |
117 | { | 248 | { |
118 | kasan_check_write(v, sizeof(*v)); | 249 | kasan_check_write(v, sizeof(*v)); |
119 | arch_atomic64_inc(v); | 250 | return arch_atomic_inc_return(v); |
120 | } | 251 | } |
252 | #define atomic_inc_return atomic_inc_return | ||
121 | #endif | 253 | #endif |
122 | 254 | ||
123 | #ifdef arch_atomic_dec | 255 | #if defined(arch_atomic_inc_return_acquire) |
124 | #define atomic_dec atomic_dec | 256 | static inline int |
125 | static __always_inline void atomic_dec(atomic_t *v) | 257 | atomic_inc_return_acquire(atomic_t *v) |
258 | { | ||
259 | kasan_check_write(v, sizeof(*v)); | ||
260 | return arch_atomic_inc_return_acquire(v); | ||
261 | } | ||
262 | #define atomic_inc_return_acquire atomic_inc_return_acquire | ||
263 | #endif | ||
264 | |||
265 | #if defined(arch_atomic_inc_return_release) | ||
266 | static inline int | ||
267 | atomic_inc_return_release(atomic_t *v) | ||
268 | { | ||
269 | kasan_check_write(v, sizeof(*v)); | ||
270 | return arch_atomic_inc_return_release(v); | ||
271 | } | ||
272 | #define atomic_inc_return_release atomic_inc_return_release | ||
273 | #endif | ||
274 | |||
275 | #if defined(arch_atomic_inc_return_relaxed) | ||
276 | static inline int | ||
277 | atomic_inc_return_relaxed(atomic_t *v) | ||
278 | { | ||
279 | kasan_check_write(v, sizeof(*v)); | ||
280 | return arch_atomic_inc_return_relaxed(v); | ||
281 | } | ||
282 | #define atomic_inc_return_relaxed atomic_inc_return_relaxed | ||
283 | #endif | ||
284 | |||
285 | #if defined(arch_atomic_fetch_inc) | ||
286 | static inline int | ||
287 | atomic_fetch_inc(atomic_t *v) | ||
288 | { | ||
289 | kasan_check_write(v, sizeof(*v)); | ||
290 | return arch_atomic_fetch_inc(v); | ||
291 | } | ||
292 | #define atomic_fetch_inc atomic_fetch_inc | ||
293 | #endif | ||
294 | |||
295 | #if defined(arch_atomic_fetch_inc_acquire) | ||
296 | static inline int | ||
297 | atomic_fetch_inc_acquire(atomic_t *v) | ||
298 | { | ||
299 | kasan_check_write(v, sizeof(*v)); | ||
300 | return arch_atomic_fetch_inc_acquire(v); | ||
301 | } | ||
302 | #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire | ||
303 | #endif | ||
304 | |||
305 | #if defined(arch_atomic_fetch_inc_release) | ||
306 | static inline int | ||
307 | atomic_fetch_inc_release(atomic_t *v) | ||
308 | { | ||
309 | kasan_check_write(v, sizeof(*v)); | ||
310 | return arch_atomic_fetch_inc_release(v); | ||
311 | } | ||
312 | #define atomic_fetch_inc_release atomic_fetch_inc_release | ||
313 | #endif | ||
314 | |||
315 | #if defined(arch_atomic_fetch_inc_relaxed) | ||
316 | static inline int | ||
317 | atomic_fetch_inc_relaxed(atomic_t *v) | ||
318 | { | ||
319 | kasan_check_write(v, sizeof(*v)); | ||
320 | return arch_atomic_fetch_inc_relaxed(v); | ||
321 | } | ||
322 | #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed | ||
323 | #endif | ||
324 | |||
325 | #if defined(arch_atomic_dec) | ||
326 | static inline void | ||
327 | atomic_dec(atomic_t *v) | ||
126 | { | 328 | { |
127 | kasan_check_write(v, sizeof(*v)); | 329 | kasan_check_write(v, sizeof(*v)); |
128 | arch_atomic_dec(v); | 330 | arch_atomic_dec(v); |
129 | } | 331 | } |
332 | #define atomic_dec atomic_dec | ||
130 | #endif | 333 | #endif |
131 | 334 | ||
132 | #ifdef atch_atomic64_dec | 335 | #if defined(arch_atomic_dec_return) |
133 | #define atomic64_dec | 336 | static inline int |
134 | static __always_inline void atomic64_dec(atomic64_t *v) | 337 | atomic_dec_return(atomic_t *v) |
135 | { | 338 | { |
136 | kasan_check_write(v, sizeof(*v)); | 339 | kasan_check_write(v, sizeof(*v)); |
137 | arch_atomic64_dec(v); | 340 | return arch_atomic_dec_return(v); |
138 | } | 341 | } |
342 | #define atomic_dec_return atomic_dec_return | ||
139 | #endif | 343 | #endif |
140 | 344 | ||
141 | static __always_inline void atomic_add(int i, atomic_t *v) | 345 | #if defined(arch_atomic_dec_return_acquire) |
346 | static inline int | ||
347 | atomic_dec_return_acquire(atomic_t *v) | ||
142 | { | 348 | { |
143 | kasan_check_write(v, sizeof(*v)); | 349 | kasan_check_write(v, sizeof(*v)); |
144 | arch_atomic_add(i, v); | 350 | return arch_atomic_dec_return_acquire(v); |
145 | } | 351 | } |
352 | #define atomic_dec_return_acquire atomic_dec_return_acquire | ||
353 | #endif | ||
146 | 354 | ||
147 | static __always_inline void atomic64_add(s64 i, atomic64_t *v) | 355 | #if defined(arch_atomic_dec_return_release) |
356 | static inline int | ||
357 | atomic_dec_return_release(atomic_t *v) | ||
148 | { | 358 | { |
149 | kasan_check_write(v, sizeof(*v)); | 359 | kasan_check_write(v, sizeof(*v)); |
150 | arch_atomic64_add(i, v); | 360 | return arch_atomic_dec_return_release(v); |
151 | } | 361 | } |
362 | #define atomic_dec_return_release atomic_dec_return_release | ||
363 | #endif | ||
152 | 364 | ||
153 | static __always_inline void atomic_sub(int i, atomic_t *v) | 365 | #if defined(arch_atomic_dec_return_relaxed) |
366 | static inline int | ||
367 | atomic_dec_return_relaxed(atomic_t *v) | ||
154 | { | 368 | { |
155 | kasan_check_write(v, sizeof(*v)); | 369 | kasan_check_write(v, sizeof(*v)); |
156 | arch_atomic_sub(i, v); | 370 | return arch_atomic_dec_return_relaxed(v); |
157 | } | 371 | } |
372 | #define atomic_dec_return_relaxed atomic_dec_return_relaxed | ||
373 | #endif | ||
158 | 374 | ||
159 | static __always_inline void atomic64_sub(s64 i, atomic64_t *v) | 375 | #if defined(arch_atomic_fetch_dec) |
376 | static inline int | ||
377 | atomic_fetch_dec(atomic_t *v) | ||
160 | { | 378 | { |
161 | kasan_check_write(v, sizeof(*v)); | 379 | kasan_check_write(v, sizeof(*v)); |
162 | arch_atomic64_sub(i, v); | 380 | return arch_atomic_fetch_dec(v); |
163 | } | 381 | } |
382 | #define atomic_fetch_dec atomic_fetch_dec | ||
383 | #endif | ||
164 | 384 | ||
165 | static __always_inline void atomic_and(int i, atomic_t *v) | 385 | #if defined(arch_atomic_fetch_dec_acquire) |
386 | static inline int | ||
387 | atomic_fetch_dec_acquire(atomic_t *v) | ||
388 | { | ||
389 | kasan_check_write(v, sizeof(*v)); | ||
390 | return arch_atomic_fetch_dec_acquire(v); | ||
391 | } | ||
392 | #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire | ||
393 | #endif | ||
394 | |||
395 | #if defined(arch_atomic_fetch_dec_release) | ||
396 | static inline int | ||
397 | atomic_fetch_dec_release(atomic_t *v) | ||
398 | { | ||
399 | kasan_check_write(v, sizeof(*v)); | ||
400 | return arch_atomic_fetch_dec_release(v); | ||
401 | } | ||
402 | #define atomic_fetch_dec_release atomic_fetch_dec_release | ||
403 | #endif | ||
404 | |||
405 | #if defined(arch_atomic_fetch_dec_relaxed) | ||
406 | static inline int | ||
407 | atomic_fetch_dec_relaxed(atomic_t *v) | ||
408 | { | ||
409 | kasan_check_write(v, sizeof(*v)); | ||
410 | return arch_atomic_fetch_dec_relaxed(v); | ||
411 | } | ||
412 | #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed | ||
413 | #endif | ||
414 | |||
415 | static inline void | ||
416 | atomic_and(int i, atomic_t *v) | ||
166 | { | 417 | { |
167 | kasan_check_write(v, sizeof(*v)); | 418 | kasan_check_write(v, sizeof(*v)); |
168 | arch_atomic_and(i, v); | 419 | arch_atomic_and(i, v); |
169 | } | 420 | } |
421 | #define atomic_and atomic_and | ||
170 | 422 | ||
171 | static __always_inline void atomic64_and(s64 i, atomic64_t *v) | 423 | #if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) |
424 | static inline int | ||
425 | atomic_fetch_and(int i, atomic_t *v) | ||
172 | { | 426 | { |
173 | kasan_check_write(v, sizeof(*v)); | 427 | kasan_check_write(v, sizeof(*v)); |
174 | arch_atomic64_and(i, v); | 428 | return arch_atomic_fetch_and(i, v); |
429 | } | ||
430 | #define atomic_fetch_and atomic_fetch_and | ||
431 | #endif | ||
432 | |||
433 | #if defined(arch_atomic_fetch_and_acquire) | ||
434 | static inline int | ||
435 | atomic_fetch_and_acquire(int i, atomic_t *v) | ||
436 | { | ||
437 | kasan_check_write(v, sizeof(*v)); | ||
438 | return arch_atomic_fetch_and_acquire(i, v); | ||
439 | } | ||
440 | #define atomic_fetch_and_acquire atomic_fetch_and_acquire | ||
441 | #endif | ||
442 | |||
443 | #if defined(arch_atomic_fetch_and_release) | ||
444 | static inline int | ||
445 | atomic_fetch_and_release(int i, atomic_t *v) | ||
446 | { | ||
447 | kasan_check_write(v, sizeof(*v)); | ||
448 | return arch_atomic_fetch_and_release(i, v); | ||
449 | } | ||
450 | #define atomic_fetch_and_release atomic_fetch_and_release | ||
451 | #endif | ||
452 | |||
453 | #if defined(arch_atomic_fetch_and_relaxed) | ||
454 | static inline int | ||
455 | atomic_fetch_and_relaxed(int i, atomic_t *v) | ||
456 | { | ||
457 | kasan_check_write(v, sizeof(*v)); | ||
458 | return arch_atomic_fetch_and_relaxed(i, v); | ||
175 | } | 459 | } |
460 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | ||
461 | #endif | ||
462 | |||
463 | #if defined(arch_atomic_andnot) | ||
464 | static inline void | ||
465 | atomic_andnot(int i, atomic_t *v) | ||
466 | { | ||
467 | kasan_check_write(v, sizeof(*v)); | ||
468 | arch_atomic_andnot(i, v); | ||
469 | } | ||
470 | #define atomic_andnot atomic_andnot | ||
471 | #endif | ||
176 | 472 | ||
177 | static __always_inline void atomic_or(int i, atomic_t *v) | 473 | #if defined(arch_atomic_fetch_andnot) |
474 | static inline int | ||
475 | atomic_fetch_andnot(int i, atomic_t *v) | ||
476 | { | ||
477 | kasan_check_write(v, sizeof(*v)); | ||
478 | return arch_atomic_fetch_andnot(i, v); | ||
479 | } | ||
480 | #define atomic_fetch_andnot atomic_fetch_andnot | ||
481 | #endif | ||
482 | |||
483 | #if defined(arch_atomic_fetch_andnot_acquire) | ||
484 | static inline int | ||
485 | atomic_fetch_andnot_acquire(int i, atomic_t *v) | ||
486 | { | ||
487 | kasan_check_write(v, sizeof(*v)); | ||
488 | return arch_atomic_fetch_andnot_acquire(i, v); | ||
489 | } | ||
490 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | ||
491 | #endif | ||
492 | |||
493 | #if defined(arch_atomic_fetch_andnot_release) | ||
494 | static inline int | ||
495 | atomic_fetch_andnot_release(int i, atomic_t *v) | ||
496 | { | ||
497 | kasan_check_write(v, sizeof(*v)); | ||
498 | return arch_atomic_fetch_andnot_release(i, v); | ||
499 | } | ||
500 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | ||
501 | #endif | ||
502 | |||
503 | #if defined(arch_atomic_fetch_andnot_relaxed) | ||
504 | static inline int | ||
505 | atomic_fetch_andnot_relaxed(int i, atomic_t *v) | ||
506 | { | ||
507 | kasan_check_write(v, sizeof(*v)); | ||
508 | return arch_atomic_fetch_andnot_relaxed(i, v); | ||
509 | } | ||
510 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | ||
511 | #endif | ||
512 | |||
513 | static inline void | ||
514 | atomic_or(int i, atomic_t *v) | ||
178 | { | 515 | { |
179 | kasan_check_write(v, sizeof(*v)); | 516 | kasan_check_write(v, sizeof(*v)); |
180 | arch_atomic_or(i, v); | 517 | arch_atomic_or(i, v); |
181 | } | 518 | } |
519 | #define atomic_or atomic_or | ||
182 | 520 | ||
183 | static __always_inline void atomic64_or(s64 i, atomic64_t *v) | 521 | #if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) |
522 | static inline int | ||
523 | atomic_fetch_or(int i, atomic_t *v) | ||
184 | { | 524 | { |
185 | kasan_check_write(v, sizeof(*v)); | 525 | kasan_check_write(v, sizeof(*v)); |
186 | arch_atomic64_or(i, v); | 526 | return arch_atomic_fetch_or(i, v); |
527 | } | ||
528 | #define atomic_fetch_or atomic_fetch_or | ||
529 | #endif | ||
530 | |||
531 | #if defined(arch_atomic_fetch_or_acquire) | ||
532 | static inline int | ||
533 | atomic_fetch_or_acquire(int i, atomic_t *v) | ||
534 | { | ||
535 | kasan_check_write(v, sizeof(*v)); | ||
536 | return arch_atomic_fetch_or_acquire(i, v); | ||
537 | } | ||
538 | #define atomic_fetch_or_acquire atomic_fetch_or_acquire | ||
539 | #endif | ||
540 | |||
541 | #if defined(arch_atomic_fetch_or_release) | ||
542 | static inline int | ||
543 | atomic_fetch_or_release(int i, atomic_t *v) | ||
544 | { | ||
545 | kasan_check_write(v, sizeof(*v)); | ||
546 | return arch_atomic_fetch_or_release(i, v); | ||
547 | } | ||
548 | #define atomic_fetch_or_release atomic_fetch_or_release | ||
549 | #endif | ||
550 | |||
551 | #if defined(arch_atomic_fetch_or_relaxed) | ||
552 | static inline int | ||
553 | atomic_fetch_or_relaxed(int i, atomic_t *v) | ||
554 | { | ||
555 | kasan_check_write(v, sizeof(*v)); | ||
556 | return arch_atomic_fetch_or_relaxed(i, v); | ||
187 | } | 557 | } |
558 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | ||
559 | #endif | ||
188 | 560 | ||
189 | static __always_inline void atomic_xor(int i, atomic_t *v) | 561 | static inline void |
562 | atomic_xor(int i, atomic_t *v) | ||
190 | { | 563 | { |
191 | kasan_check_write(v, sizeof(*v)); | 564 | kasan_check_write(v, sizeof(*v)); |
192 | arch_atomic_xor(i, v); | 565 | arch_atomic_xor(i, v); |
193 | } | 566 | } |
567 | #define atomic_xor atomic_xor | ||
194 | 568 | ||
195 | static __always_inline void atomic64_xor(s64 i, atomic64_t *v) | 569 | #if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) |
570 | static inline int | ||
571 | atomic_fetch_xor(int i, atomic_t *v) | ||
196 | { | 572 | { |
197 | kasan_check_write(v, sizeof(*v)); | 573 | kasan_check_write(v, sizeof(*v)); |
198 | arch_atomic64_xor(i, v); | 574 | return arch_atomic_fetch_xor(i, v); |
199 | } | 575 | } |
576 | #define atomic_fetch_xor atomic_fetch_xor | ||
577 | #endif | ||
200 | 578 | ||
201 | #ifdef arch_atomic_inc_return | 579 | #if defined(arch_atomic_fetch_xor_acquire) |
202 | #define atomic_inc_return atomic_inc_return | 580 | static inline int |
203 | static __always_inline int atomic_inc_return(atomic_t *v) | 581 | atomic_fetch_xor_acquire(int i, atomic_t *v) |
204 | { | 582 | { |
205 | kasan_check_write(v, sizeof(*v)); | 583 | kasan_check_write(v, sizeof(*v)); |
206 | return arch_atomic_inc_return(v); | 584 | return arch_atomic_fetch_xor_acquire(i, v); |
207 | } | 585 | } |
586 | #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire | ||
208 | #endif | 587 | #endif |
209 | 588 | ||
210 | #ifdef arch_atomic64_in_return | 589 | #if defined(arch_atomic_fetch_xor_release) |
211 | #define atomic64_inc_return atomic64_inc_return | 590 | static inline int |
212 | static __always_inline s64 atomic64_inc_return(atomic64_t *v) | 591 | atomic_fetch_xor_release(int i, atomic_t *v) |
213 | { | 592 | { |
214 | kasan_check_write(v, sizeof(*v)); | 593 | kasan_check_write(v, sizeof(*v)); |
215 | return arch_atomic64_inc_return(v); | 594 | return arch_atomic_fetch_xor_release(i, v); |
216 | } | 595 | } |
596 | #define atomic_fetch_xor_release atomic_fetch_xor_release | ||
217 | #endif | 597 | #endif |
218 | 598 | ||
219 | #ifdef arch_atomic_dec_return | 599 | #if defined(arch_atomic_fetch_xor_relaxed) |
220 | #define atomic_dec_return atomic_dec_return | 600 | static inline int |
221 | static __always_inline int atomic_dec_return(atomic_t *v) | 601 | atomic_fetch_xor_relaxed(int i, atomic_t *v) |
222 | { | 602 | { |
223 | kasan_check_write(v, sizeof(*v)); | 603 | kasan_check_write(v, sizeof(*v)); |
224 | return arch_atomic_dec_return(v); | 604 | return arch_atomic_fetch_xor_relaxed(i, v); |
225 | } | 605 | } |
606 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | ||
226 | #endif | 607 | #endif |
227 | 608 | ||
228 | #ifdef arch_atomic64_dec_return | 609 | #if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) |
229 | #define atomic64_dec_return atomic64_dec_return | 610 | static inline int |
230 | static __always_inline s64 atomic64_dec_return(atomic64_t *v) | 611 | atomic_xchg(atomic_t *v, int i) |
231 | { | 612 | { |
232 | kasan_check_write(v, sizeof(*v)); | 613 | kasan_check_write(v, sizeof(*v)); |
233 | return arch_atomic64_dec_return(v); | 614 | return arch_atomic_xchg(v, i); |
234 | } | 615 | } |
616 | #define atomic_xchg atomic_xchg | ||
235 | #endif | 617 | #endif |
236 | 618 | ||
237 | #ifdef arch_atomic64_inc_not_zero | 619 | #if defined(arch_atomic_xchg_acquire) |
238 | #define atomic64_inc_not_zero atomic64_inc_not_zero | 620 | static inline int |
239 | static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) | 621 | atomic_xchg_acquire(atomic_t *v, int i) |
240 | { | 622 | { |
241 | kasan_check_write(v, sizeof(*v)); | 623 | kasan_check_write(v, sizeof(*v)); |
242 | return arch_atomic64_inc_not_zero(v); | 624 | return arch_atomic_xchg_acquire(v, i); |
243 | } | 625 | } |
626 | #define atomic_xchg_acquire atomic_xchg_acquire | ||
244 | #endif | 627 | #endif |
245 | 628 | ||
246 | #ifdef arch_atomic64_dec_if_positive | 629 | #if defined(arch_atomic_xchg_release) |
247 | #define atomic64_dec_if_positive atomic64_dec_if_positive | 630 | static inline int |
248 | static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) | 631 | atomic_xchg_release(atomic_t *v, int i) |
249 | { | 632 | { |
250 | kasan_check_write(v, sizeof(*v)); | 633 | kasan_check_write(v, sizeof(*v)); |
251 | return arch_atomic64_dec_if_positive(v); | 634 | return arch_atomic_xchg_release(v, i); |
252 | } | 635 | } |
636 | #define atomic_xchg_release atomic_xchg_release | ||
253 | #endif | 637 | #endif |
254 | 638 | ||
255 | #ifdef arch_atomic_dec_and_test | 639 | #if defined(arch_atomic_xchg_relaxed) |
256 | #define atomic_dec_and_test atomic_dec_and_test | 640 | static inline int |
257 | static __always_inline bool atomic_dec_and_test(atomic_t *v) | 641 | atomic_xchg_relaxed(atomic_t *v, int i) |
258 | { | 642 | { |
259 | kasan_check_write(v, sizeof(*v)); | 643 | kasan_check_write(v, sizeof(*v)); |
260 | return arch_atomic_dec_and_test(v); | 644 | return arch_atomic_xchg_relaxed(v, i); |
261 | } | 645 | } |
646 | #define atomic_xchg_relaxed atomic_xchg_relaxed | ||
262 | #endif | 647 | #endif |
263 | 648 | ||
264 | #ifdef arch_atomic64_dec_and_test | 649 | #if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) |
265 | #define atomic64_dec_and_test atomic64_dec_and_test | 650 | static inline int |
266 | static __always_inline bool atomic64_dec_and_test(atomic64_t *v) | 651 | atomic_cmpxchg(atomic_t *v, int old, int new) |
267 | { | 652 | { |
268 | kasan_check_write(v, sizeof(*v)); | 653 | kasan_check_write(v, sizeof(*v)); |
269 | return arch_atomic64_dec_and_test(v); | 654 | return arch_atomic_cmpxchg(v, old, new); |
270 | } | 655 | } |
656 | #define atomic_cmpxchg atomic_cmpxchg | ||
271 | #endif | 657 | #endif |
272 | 658 | ||
273 | #ifdef arch_atomic_inc_and_test | 659 | #if defined(arch_atomic_cmpxchg_acquire) |
274 | #define atomic_inc_and_test atomic_inc_and_test | 660 | static inline int |
275 | static __always_inline bool atomic_inc_and_test(atomic_t *v) | 661 | atomic_cmpxchg_acquire(atomic_t *v, int old, int new) |
662 | { | ||
663 | kasan_check_write(v, sizeof(*v)); | ||
664 | return arch_atomic_cmpxchg_acquire(v, old, new); | ||
665 | } | ||
666 | #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire | ||
667 | #endif | ||
668 | |||
669 | #if defined(arch_atomic_cmpxchg_release) | ||
670 | static inline int | ||
671 | atomic_cmpxchg_release(atomic_t *v, int old, int new) | ||
672 | { | ||
673 | kasan_check_write(v, sizeof(*v)); | ||
674 | return arch_atomic_cmpxchg_release(v, old, new); | ||
675 | } | ||
676 | #define atomic_cmpxchg_release atomic_cmpxchg_release | ||
677 | #endif | ||
678 | |||
679 | #if defined(arch_atomic_cmpxchg_relaxed) | ||
680 | static inline int | ||
681 | atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) | ||
682 | { | ||
683 | kasan_check_write(v, sizeof(*v)); | ||
684 | return arch_atomic_cmpxchg_relaxed(v, old, new); | ||
685 | } | ||
686 | #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed | ||
687 | #endif | ||
688 | |||
689 | #if defined(arch_atomic_try_cmpxchg) | ||
690 | static inline bool | ||
691 | atomic_try_cmpxchg(atomic_t *v, int *old, int new) | ||
692 | { | ||
693 | kasan_check_write(v, sizeof(*v)); | ||
694 | kasan_check_write(old, sizeof(*old)); | ||
695 | return arch_atomic_try_cmpxchg(v, old, new); | ||
696 | } | ||
697 | #define atomic_try_cmpxchg atomic_try_cmpxchg | ||
698 | #endif | ||
699 | |||
700 | #if defined(arch_atomic_try_cmpxchg_acquire) | ||
701 | static inline bool | ||
702 | atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) | ||
703 | { | ||
704 | kasan_check_write(v, sizeof(*v)); | ||
705 | kasan_check_write(old, sizeof(*old)); | ||
706 | return arch_atomic_try_cmpxchg_acquire(v, old, new); | ||
707 | } | ||
708 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire | ||
709 | #endif | ||
710 | |||
711 | #if defined(arch_atomic_try_cmpxchg_release) | ||
712 | static inline bool | ||
713 | atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) | ||
714 | { | ||
715 | kasan_check_write(v, sizeof(*v)); | ||
716 | kasan_check_write(old, sizeof(*old)); | ||
717 | return arch_atomic_try_cmpxchg_release(v, old, new); | ||
718 | } | ||
719 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release | ||
720 | #endif | ||
721 | |||
722 | #if defined(arch_atomic_try_cmpxchg_relaxed) | ||
723 | static inline bool | ||
724 | atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) | ||
725 | { | ||
726 | kasan_check_write(v, sizeof(*v)); | ||
727 | kasan_check_write(old, sizeof(*old)); | ||
728 | return arch_atomic_try_cmpxchg_relaxed(v, old, new); | ||
729 | } | ||
730 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed | ||
731 | #endif | ||
732 | |||
733 | #if defined(arch_atomic_sub_and_test) | ||
734 | static inline bool | ||
735 | atomic_sub_and_test(int i, atomic_t *v) | ||
736 | { | ||
737 | kasan_check_write(v, sizeof(*v)); | ||
738 | return arch_atomic_sub_and_test(i, v); | ||
739 | } | ||
740 | #define atomic_sub_and_test atomic_sub_and_test | ||
741 | #endif | ||
742 | |||
743 | #if defined(arch_atomic_dec_and_test) | ||
744 | static inline bool | ||
745 | atomic_dec_and_test(atomic_t *v) | ||
746 | { | ||
747 | kasan_check_write(v, sizeof(*v)); | ||
748 | return arch_atomic_dec_and_test(v); | ||
749 | } | ||
750 | #define atomic_dec_and_test atomic_dec_and_test | ||
751 | #endif | ||
752 | |||
753 | #if defined(arch_atomic_inc_and_test) | ||
754 | static inline bool | ||
755 | atomic_inc_and_test(atomic_t *v) | ||
276 | { | 756 | { |
277 | kasan_check_write(v, sizeof(*v)); | 757 | kasan_check_write(v, sizeof(*v)); |
278 | return arch_atomic_inc_and_test(v); | 758 | return arch_atomic_inc_and_test(v); |
279 | } | 759 | } |
760 | #define atomic_inc_and_test atomic_inc_and_test | ||
280 | #endif | 761 | #endif |
281 | 762 | ||
282 | #ifdef arch_atomic64_inc_and_test | 763 | #if defined(arch_atomic_add_negative) |
283 | #define atomic64_inc_and_test atomic64_inc_and_test | 764 | static inline bool |
284 | static __always_inline bool atomic64_inc_and_test(atomic64_t *v) | 765 | atomic_add_negative(int i, atomic_t *v) |
285 | { | 766 | { |
286 | kasan_check_write(v, sizeof(*v)); | 767 | kasan_check_write(v, sizeof(*v)); |
287 | return arch_atomic64_inc_and_test(v); | 768 | return arch_atomic_add_negative(i, v); |
288 | } | 769 | } |
770 | #define atomic_add_negative atomic_add_negative | ||
289 | #endif | 771 | #endif |
290 | 772 | ||
291 | static __always_inline int atomic_add_return(int i, atomic_t *v) | 773 | #if defined(arch_atomic_fetch_add_unless) |
774 | static inline int | ||
775 | atomic_fetch_add_unless(atomic_t *v, int a, int u) | ||
292 | { | 776 | { |
293 | kasan_check_write(v, sizeof(*v)); | 777 | kasan_check_write(v, sizeof(*v)); |
294 | return arch_atomic_add_return(i, v); | 778 | return arch_atomic_fetch_add_unless(v, a, u); |
779 | } | ||
780 | #define atomic_fetch_add_unless atomic_fetch_add_unless | ||
781 | #endif | ||
782 | |||
783 | #if defined(arch_atomic_add_unless) | ||
784 | static inline bool | ||
785 | atomic_add_unless(atomic_t *v, int a, int u) | ||
786 | { | ||
787 | kasan_check_write(v, sizeof(*v)); | ||
788 | return arch_atomic_add_unless(v, a, u); | ||
789 | } | ||
790 | #define atomic_add_unless atomic_add_unless | ||
791 | #endif | ||
792 | |||
793 | #if defined(arch_atomic_inc_not_zero) | ||
794 | static inline bool | ||
795 | atomic_inc_not_zero(atomic_t *v) | ||
796 | { | ||
797 | kasan_check_write(v, sizeof(*v)); | ||
798 | return arch_atomic_inc_not_zero(v); | ||
799 | } | ||
800 | #define atomic_inc_not_zero atomic_inc_not_zero | ||
801 | #endif | ||
802 | |||
803 | #if defined(arch_atomic_inc_unless_negative) | ||
804 | static inline bool | ||
805 | atomic_inc_unless_negative(atomic_t *v) | ||
806 | { | ||
807 | kasan_check_write(v, sizeof(*v)); | ||
808 | return arch_atomic_inc_unless_negative(v); | ||
809 | } | ||
810 | #define atomic_inc_unless_negative atomic_inc_unless_negative | ||
811 | #endif | ||
812 | |||
813 | #if defined(arch_atomic_dec_unless_positive) | ||
814 | static inline bool | ||
815 | atomic_dec_unless_positive(atomic_t *v) | ||
816 | { | ||
817 | kasan_check_write(v, sizeof(*v)); | ||
818 | return arch_atomic_dec_unless_positive(v); | ||
819 | } | ||
820 | #define atomic_dec_unless_positive atomic_dec_unless_positive | ||
821 | #endif | ||
822 | |||
823 | #if defined(arch_atomic_dec_if_positive) | ||
824 | static inline int | ||
825 | atomic_dec_if_positive(atomic_t *v) | ||
826 | { | ||
827 | kasan_check_write(v, sizeof(*v)); | ||
828 | return arch_atomic_dec_if_positive(v); | ||
829 | } | ||
830 | #define atomic_dec_if_positive atomic_dec_if_positive | ||
831 | #endif | ||
832 | |||
833 | static inline s64 | ||
834 | atomic64_read(const atomic64_t *v) | ||
835 | { | ||
836 | kasan_check_read(v, sizeof(*v)); | ||
837 | return arch_atomic64_read(v); | ||
838 | } | ||
839 | #define atomic64_read atomic64_read | ||
840 | |||
841 | #if defined(arch_atomic64_read_acquire) | ||
842 | static inline s64 | ||
843 | atomic64_read_acquire(const atomic64_t *v) | ||
844 | { | ||
845 | kasan_check_read(v, sizeof(*v)); | ||
846 | return arch_atomic64_read_acquire(v); | ||
847 | } | ||
848 | #define atomic64_read_acquire atomic64_read_acquire | ||
849 | #endif | ||
850 | |||
851 | static inline void | ||
852 | atomic64_set(atomic64_t *v, s64 i) | ||
853 | { | ||
854 | kasan_check_write(v, sizeof(*v)); | ||
855 | arch_atomic64_set(v, i); | ||
856 | } | ||
857 | #define atomic64_set atomic64_set | ||
858 | |||
859 | #if defined(arch_atomic64_set_release) | ||
860 | static inline void | ||
861 | atomic64_set_release(atomic64_t *v, s64 i) | ||
862 | { | ||
863 | kasan_check_write(v, sizeof(*v)); | ||
864 | arch_atomic64_set_release(v, i); | ||
295 | } | 865 | } |
866 | #define atomic64_set_release atomic64_set_release | ||
867 | #endif | ||
296 | 868 | ||
297 | static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) | 869 | static inline void |
870 | atomic64_add(s64 i, atomic64_t *v) | ||
871 | { | ||
872 | kasan_check_write(v, sizeof(*v)); | ||
873 | arch_atomic64_add(i, v); | ||
874 | } | ||
875 | #define atomic64_add atomic64_add | ||
876 | |||
877 | #if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) | ||
878 | static inline s64 | ||
879 | atomic64_add_return(s64 i, atomic64_t *v) | ||
298 | { | 880 | { |
299 | kasan_check_write(v, sizeof(*v)); | 881 | kasan_check_write(v, sizeof(*v)); |
300 | return arch_atomic64_add_return(i, v); | 882 | return arch_atomic64_add_return(i, v); |
301 | } | 883 | } |
884 | #define atomic64_add_return atomic64_add_return | ||
885 | #endif | ||
302 | 886 | ||
303 | static __always_inline int atomic_sub_return(int i, atomic_t *v) | 887 | #if defined(arch_atomic64_add_return_acquire) |
888 | static inline s64 | ||
889 | atomic64_add_return_acquire(s64 i, atomic64_t *v) | ||
304 | { | 890 | { |
305 | kasan_check_write(v, sizeof(*v)); | 891 | kasan_check_write(v, sizeof(*v)); |
306 | return arch_atomic_sub_return(i, v); | 892 | return arch_atomic64_add_return_acquire(i, v); |
307 | } | 893 | } |
894 | #define atomic64_add_return_acquire atomic64_add_return_acquire | ||
895 | #endif | ||
308 | 896 | ||
309 | static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) | 897 | #if defined(arch_atomic64_add_return_release) |
898 | static inline s64 | ||
899 | atomic64_add_return_release(s64 i, atomic64_t *v) | ||
310 | { | 900 | { |
311 | kasan_check_write(v, sizeof(*v)); | 901 | kasan_check_write(v, sizeof(*v)); |
312 | return arch_atomic64_sub_return(i, v); | 902 | return arch_atomic64_add_return_release(i, v); |
313 | } | 903 | } |
904 | #define atomic64_add_return_release atomic64_add_return_release | ||
905 | #endif | ||
314 | 906 | ||
315 | static __always_inline int atomic_fetch_add(int i, atomic_t *v) | 907 | #if defined(arch_atomic64_add_return_relaxed) |
908 | static inline s64 | ||
909 | atomic64_add_return_relaxed(s64 i, atomic64_t *v) | ||
316 | { | 910 | { |
317 | kasan_check_write(v, sizeof(*v)); | 911 | kasan_check_write(v, sizeof(*v)); |
318 | return arch_atomic_fetch_add(i, v); | 912 | return arch_atomic64_add_return_relaxed(i, v); |
319 | } | 913 | } |
914 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed | ||
915 | #endif | ||
320 | 916 | ||
321 | static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) | 917 | #if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) |
918 | static inline s64 | ||
919 | atomic64_fetch_add(s64 i, atomic64_t *v) | ||
322 | { | 920 | { |
323 | kasan_check_write(v, sizeof(*v)); | 921 | kasan_check_write(v, sizeof(*v)); |
324 | return arch_atomic64_fetch_add(i, v); | 922 | return arch_atomic64_fetch_add(i, v); |
325 | } | 923 | } |
924 | #define atomic64_fetch_add atomic64_fetch_add | ||
925 | #endif | ||
326 | 926 | ||
327 | static __always_inline int atomic_fetch_sub(int i, atomic_t *v) | 927 | #if defined(arch_atomic64_fetch_add_acquire) |
928 | static inline s64 | ||
929 | atomic64_fetch_add_acquire(s64 i, atomic64_t *v) | ||
328 | { | 930 | { |
329 | kasan_check_write(v, sizeof(*v)); | 931 | kasan_check_write(v, sizeof(*v)); |
330 | return arch_atomic_fetch_sub(i, v); | 932 | return arch_atomic64_fetch_add_acquire(i, v); |
331 | } | 933 | } |
934 | #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire | ||
935 | #endif | ||
936 | |||
937 | #if defined(arch_atomic64_fetch_add_release) | ||
938 | static inline s64 | ||
939 | atomic64_fetch_add_release(s64 i, atomic64_t *v) | ||
940 | { | ||
941 | kasan_check_write(v, sizeof(*v)); | ||
942 | return arch_atomic64_fetch_add_release(i, v); | ||
943 | } | ||
944 | #define atomic64_fetch_add_release atomic64_fetch_add_release | ||
945 | #endif | ||
332 | 946 | ||
333 | static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) | 947 | #if defined(arch_atomic64_fetch_add_relaxed) |
948 | static inline s64 | ||
949 | atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) | ||
950 | { | ||
951 | kasan_check_write(v, sizeof(*v)); | ||
952 | return arch_atomic64_fetch_add_relaxed(i, v); | ||
953 | } | ||
954 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | ||
955 | #endif | ||
956 | |||
957 | static inline void | ||
958 | atomic64_sub(s64 i, atomic64_t *v) | ||
959 | { | ||
960 | kasan_check_write(v, sizeof(*v)); | ||
961 | arch_atomic64_sub(i, v); | ||
962 | } | ||
963 | #define atomic64_sub atomic64_sub | ||
964 | |||
965 | #if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) | ||
966 | static inline s64 | ||
967 | atomic64_sub_return(s64 i, atomic64_t *v) | ||
968 | { | ||
969 | kasan_check_write(v, sizeof(*v)); | ||
970 | return arch_atomic64_sub_return(i, v); | ||
971 | } | ||
972 | #define atomic64_sub_return atomic64_sub_return | ||
973 | #endif | ||
974 | |||
975 | #if defined(arch_atomic64_sub_return_acquire) | ||
976 | static inline s64 | ||
977 | atomic64_sub_return_acquire(s64 i, atomic64_t *v) | ||
978 | { | ||
979 | kasan_check_write(v, sizeof(*v)); | ||
980 | return arch_atomic64_sub_return_acquire(i, v); | ||
981 | } | ||
982 | #define atomic64_sub_return_acquire atomic64_sub_return_acquire | ||
983 | #endif | ||
984 | |||
985 | #if defined(arch_atomic64_sub_return_release) | ||
986 | static inline s64 | ||
987 | atomic64_sub_return_release(s64 i, atomic64_t *v) | ||
988 | { | ||
989 | kasan_check_write(v, sizeof(*v)); | ||
990 | return arch_atomic64_sub_return_release(i, v); | ||
991 | } | ||
992 | #define atomic64_sub_return_release atomic64_sub_return_release | ||
993 | #endif | ||
994 | |||
995 | #if defined(arch_atomic64_sub_return_relaxed) | ||
996 | static inline s64 | ||
997 | atomic64_sub_return_relaxed(s64 i, atomic64_t *v) | ||
998 | { | ||
999 | kasan_check_write(v, sizeof(*v)); | ||
1000 | return arch_atomic64_sub_return_relaxed(i, v); | ||
1001 | } | ||
1002 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | ||
1003 | #endif | ||
1004 | |||
1005 | #if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) | ||
1006 | static inline s64 | ||
1007 | atomic64_fetch_sub(s64 i, atomic64_t *v) | ||
334 | { | 1008 | { |
335 | kasan_check_write(v, sizeof(*v)); | 1009 | kasan_check_write(v, sizeof(*v)); |
336 | return arch_atomic64_fetch_sub(i, v); | 1010 | return arch_atomic64_fetch_sub(i, v); |
337 | } | 1011 | } |
1012 | #define atomic64_fetch_sub atomic64_fetch_sub | ||
1013 | #endif | ||
338 | 1014 | ||
339 | static __always_inline int atomic_fetch_and(int i, atomic_t *v) | 1015 | #if defined(arch_atomic64_fetch_sub_acquire) |
1016 | static inline s64 | ||
1017 | atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) | ||
340 | { | 1018 | { |
341 | kasan_check_write(v, sizeof(*v)); | 1019 | kasan_check_write(v, sizeof(*v)); |
342 | return arch_atomic_fetch_and(i, v); | 1020 | return arch_atomic64_fetch_sub_acquire(i, v); |
1021 | } | ||
1022 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire | ||
1023 | #endif | ||
1024 | |||
1025 | #if defined(arch_atomic64_fetch_sub_release) | ||
1026 | static inline s64 | ||
1027 | atomic64_fetch_sub_release(s64 i, atomic64_t *v) | ||
1028 | { | ||
1029 | kasan_check_write(v, sizeof(*v)); | ||
1030 | return arch_atomic64_fetch_sub_release(i, v); | ||
343 | } | 1031 | } |
1032 | #define atomic64_fetch_sub_release atomic64_fetch_sub_release | ||
1033 | #endif | ||
344 | 1034 | ||
345 | static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) | 1035 | #if defined(arch_atomic64_fetch_sub_relaxed) |
1036 | static inline s64 | ||
1037 | atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) | ||
1038 | { | ||
1039 | kasan_check_write(v, sizeof(*v)); | ||
1040 | return arch_atomic64_fetch_sub_relaxed(i, v); | ||
1041 | } | ||
1042 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | ||
1043 | #endif | ||
1044 | |||
1045 | #if defined(arch_atomic64_inc) | ||
1046 | static inline void | ||
1047 | atomic64_inc(atomic64_t *v) | ||
1048 | { | ||
1049 | kasan_check_write(v, sizeof(*v)); | ||
1050 | arch_atomic64_inc(v); | ||
1051 | } | ||
1052 | #define atomic64_inc atomic64_inc | ||
1053 | #endif | ||
1054 | |||
1055 | #if defined(arch_atomic64_inc_return) | ||
1056 | static inline s64 | ||
1057 | atomic64_inc_return(atomic64_t *v) | ||
1058 | { | ||
1059 | kasan_check_write(v, sizeof(*v)); | ||
1060 | return arch_atomic64_inc_return(v); | ||
1061 | } | ||
1062 | #define atomic64_inc_return atomic64_inc_return | ||
1063 | #endif | ||
1064 | |||
1065 | #if defined(arch_atomic64_inc_return_acquire) | ||
1066 | static inline s64 | ||
1067 | atomic64_inc_return_acquire(atomic64_t *v) | ||
1068 | { | ||
1069 | kasan_check_write(v, sizeof(*v)); | ||
1070 | return arch_atomic64_inc_return_acquire(v); | ||
1071 | } | ||
1072 | #define atomic64_inc_return_acquire atomic64_inc_return_acquire | ||
1073 | #endif | ||
1074 | |||
1075 | #if defined(arch_atomic64_inc_return_release) | ||
1076 | static inline s64 | ||
1077 | atomic64_inc_return_release(atomic64_t *v) | ||
1078 | { | ||
1079 | kasan_check_write(v, sizeof(*v)); | ||
1080 | return arch_atomic64_inc_return_release(v); | ||
1081 | } | ||
1082 | #define atomic64_inc_return_release atomic64_inc_return_release | ||
1083 | #endif | ||
1084 | |||
1085 | #if defined(arch_atomic64_inc_return_relaxed) | ||
1086 | static inline s64 | ||
1087 | atomic64_inc_return_relaxed(atomic64_t *v) | ||
1088 | { | ||
1089 | kasan_check_write(v, sizeof(*v)); | ||
1090 | return arch_atomic64_inc_return_relaxed(v); | ||
1091 | } | ||
1092 | #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed | ||
1093 | #endif | ||
1094 | |||
1095 | #if defined(arch_atomic64_fetch_inc) | ||
1096 | static inline s64 | ||
1097 | atomic64_fetch_inc(atomic64_t *v) | ||
1098 | { | ||
1099 | kasan_check_write(v, sizeof(*v)); | ||
1100 | return arch_atomic64_fetch_inc(v); | ||
1101 | } | ||
1102 | #define atomic64_fetch_inc atomic64_fetch_inc | ||
1103 | #endif | ||
1104 | |||
1105 | #if defined(arch_atomic64_fetch_inc_acquire) | ||
1106 | static inline s64 | ||
1107 | atomic64_fetch_inc_acquire(atomic64_t *v) | ||
1108 | { | ||
1109 | kasan_check_write(v, sizeof(*v)); | ||
1110 | return arch_atomic64_fetch_inc_acquire(v); | ||
1111 | } | ||
1112 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire | ||
1113 | #endif | ||
1114 | |||
1115 | #if defined(arch_atomic64_fetch_inc_release) | ||
1116 | static inline s64 | ||
1117 | atomic64_fetch_inc_release(atomic64_t *v) | ||
1118 | { | ||
1119 | kasan_check_write(v, sizeof(*v)); | ||
1120 | return arch_atomic64_fetch_inc_release(v); | ||
1121 | } | ||
1122 | #define atomic64_fetch_inc_release atomic64_fetch_inc_release | ||
1123 | #endif | ||
1124 | |||
1125 | #if defined(arch_atomic64_fetch_inc_relaxed) | ||
1126 | static inline s64 | ||
1127 | atomic64_fetch_inc_relaxed(atomic64_t *v) | ||
1128 | { | ||
1129 | kasan_check_write(v, sizeof(*v)); | ||
1130 | return arch_atomic64_fetch_inc_relaxed(v); | ||
1131 | } | ||
1132 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed | ||
1133 | #endif | ||
1134 | |||
1135 | #if defined(arch_atomic64_dec) | ||
1136 | static inline void | ||
1137 | atomic64_dec(atomic64_t *v) | ||
1138 | { | ||
1139 | kasan_check_write(v, sizeof(*v)); | ||
1140 | arch_atomic64_dec(v); | ||
1141 | } | ||
1142 | #define atomic64_dec atomic64_dec | ||
1143 | #endif | ||
1144 | |||
1145 | #if defined(arch_atomic64_dec_return) | ||
1146 | static inline s64 | ||
1147 | atomic64_dec_return(atomic64_t *v) | ||
1148 | { | ||
1149 | kasan_check_write(v, sizeof(*v)); | ||
1150 | return arch_atomic64_dec_return(v); | ||
1151 | } | ||
1152 | #define atomic64_dec_return atomic64_dec_return | ||
1153 | #endif | ||
1154 | |||
1155 | #if defined(arch_atomic64_dec_return_acquire) | ||
1156 | static inline s64 | ||
1157 | atomic64_dec_return_acquire(atomic64_t *v) | ||
1158 | { | ||
1159 | kasan_check_write(v, sizeof(*v)); | ||
1160 | return arch_atomic64_dec_return_acquire(v); | ||
1161 | } | ||
1162 | #define atomic64_dec_return_acquire atomic64_dec_return_acquire | ||
1163 | #endif | ||
1164 | |||
1165 | #if defined(arch_atomic64_dec_return_release) | ||
1166 | static inline s64 | ||
1167 | atomic64_dec_return_release(atomic64_t *v) | ||
1168 | { | ||
1169 | kasan_check_write(v, sizeof(*v)); | ||
1170 | return arch_atomic64_dec_return_release(v); | ||
1171 | } | ||
1172 | #define atomic64_dec_return_release atomic64_dec_return_release | ||
1173 | #endif | ||
1174 | |||
1175 | #if defined(arch_atomic64_dec_return_relaxed) | ||
1176 | static inline s64 | ||
1177 | atomic64_dec_return_relaxed(atomic64_t *v) | ||
1178 | { | ||
1179 | kasan_check_write(v, sizeof(*v)); | ||
1180 | return arch_atomic64_dec_return_relaxed(v); | ||
1181 | } | ||
1182 | #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed | ||
1183 | #endif | ||
1184 | |||
1185 | #if defined(arch_atomic64_fetch_dec) | ||
1186 | static inline s64 | ||
1187 | atomic64_fetch_dec(atomic64_t *v) | ||
1188 | { | ||
1189 | kasan_check_write(v, sizeof(*v)); | ||
1190 | return arch_atomic64_fetch_dec(v); | ||
1191 | } | ||
1192 | #define atomic64_fetch_dec atomic64_fetch_dec | ||
1193 | #endif | ||
1194 | |||
1195 | #if defined(arch_atomic64_fetch_dec_acquire) | ||
1196 | static inline s64 | ||
1197 | atomic64_fetch_dec_acquire(atomic64_t *v) | ||
1198 | { | ||
1199 | kasan_check_write(v, sizeof(*v)); | ||
1200 | return arch_atomic64_fetch_dec_acquire(v); | ||
1201 | } | ||
1202 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire | ||
1203 | #endif | ||
1204 | |||
1205 | #if defined(arch_atomic64_fetch_dec_release) | ||
1206 | static inline s64 | ||
1207 | atomic64_fetch_dec_release(atomic64_t *v) | ||
1208 | { | ||
1209 | kasan_check_write(v, sizeof(*v)); | ||
1210 | return arch_atomic64_fetch_dec_release(v); | ||
1211 | } | ||
1212 | #define atomic64_fetch_dec_release atomic64_fetch_dec_release | ||
1213 | #endif | ||
1214 | |||
1215 | #if defined(arch_atomic64_fetch_dec_relaxed) | ||
1216 | static inline s64 | ||
1217 | atomic64_fetch_dec_relaxed(atomic64_t *v) | ||
1218 | { | ||
1219 | kasan_check_write(v, sizeof(*v)); | ||
1220 | return arch_atomic64_fetch_dec_relaxed(v); | ||
1221 | } | ||
1222 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed | ||
1223 | #endif | ||
1224 | |||
1225 | static inline void | ||
1226 | atomic64_and(s64 i, atomic64_t *v) | ||
1227 | { | ||
1228 | kasan_check_write(v, sizeof(*v)); | ||
1229 | arch_atomic64_and(i, v); | ||
1230 | } | ||
1231 | #define atomic64_and atomic64_and | ||
1232 | |||
1233 | #if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) | ||
1234 | static inline s64 | ||
1235 | atomic64_fetch_and(s64 i, atomic64_t *v) | ||
346 | { | 1236 | { |
347 | kasan_check_write(v, sizeof(*v)); | 1237 | kasan_check_write(v, sizeof(*v)); |
348 | return arch_atomic64_fetch_and(i, v); | 1238 | return arch_atomic64_fetch_and(i, v); |
349 | } | 1239 | } |
1240 | #define atomic64_fetch_and atomic64_fetch_and | ||
1241 | #endif | ||
350 | 1242 | ||
351 | static __always_inline int atomic_fetch_or(int i, atomic_t *v) | 1243 | #if defined(arch_atomic64_fetch_and_acquire) |
1244 | static inline s64 | ||
1245 | atomic64_fetch_and_acquire(s64 i, atomic64_t *v) | ||
352 | { | 1246 | { |
353 | kasan_check_write(v, sizeof(*v)); | 1247 | kasan_check_write(v, sizeof(*v)); |
354 | return arch_atomic_fetch_or(i, v); | 1248 | return arch_atomic64_fetch_and_acquire(i, v); |
1249 | } | ||
1250 | #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire | ||
1251 | #endif | ||
1252 | |||
1253 | #if defined(arch_atomic64_fetch_and_release) | ||
1254 | static inline s64 | ||
1255 | atomic64_fetch_and_release(s64 i, atomic64_t *v) | ||
1256 | { | ||
1257 | kasan_check_write(v, sizeof(*v)); | ||
1258 | return arch_atomic64_fetch_and_release(i, v); | ||
1259 | } | ||
1260 | #define atomic64_fetch_and_release atomic64_fetch_and_release | ||
1261 | #endif | ||
1262 | |||
1263 | #if defined(arch_atomic64_fetch_and_relaxed) | ||
1264 | static inline s64 | ||
1265 | atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) | ||
1266 | { | ||
1267 | kasan_check_write(v, sizeof(*v)); | ||
1268 | return arch_atomic64_fetch_and_relaxed(i, v); | ||
1269 | } | ||
1270 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | ||
1271 | #endif | ||
1272 | |||
1273 | #if defined(arch_atomic64_andnot) | ||
1274 | static inline void | ||
1275 | atomic64_andnot(s64 i, atomic64_t *v) | ||
1276 | { | ||
1277 | kasan_check_write(v, sizeof(*v)); | ||
1278 | arch_atomic64_andnot(i, v); | ||
1279 | } | ||
1280 | #define atomic64_andnot atomic64_andnot | ||
1281 | #endif | ||
1282 | |||
1283 | #if defined(arch_atomic64_fetch_andnot) | ||
1284 | static inline s64 | ||
1285 | atomic64_fetch_andnot(s64 i, atomic64_t *v) | ||
1286 | { | ||
1287 | kasan_check_write(v, sizeof(*v)); | ||
1288 | return arch_atomic64_fetch_andnot(i, v); | ||
1289 | } | ||
1290 | #define atomic64_fetch_andnot atomic64_fetch_andnot | ||
1291 | #endif | ||
1292 | |||
1293 | #if defined(arch_atomic64_fetch_andnot_acquire) | ||
1294 | static inline s64 | ||
1295 | atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) | ||
1296 | { | ||
1297 | kasan_check_write(v, sizeof(*v)); | ||
1298 | return arch_atomic64_fetch_andnot_acquire(i, v); | ||
1299 | } | ||
1300 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | ||
1301 | #endif | ||
1302 | |||
1303 | #if defined(arch_atomic64_fetch_andnot_release) | ||
1304 | static inline s64 | ||
1305 | atomic64_fetch_andnot_release(s64 i, atomic64_t *v) | ||
1306 | { | ||
1307 | kasan_check_write(v, sizeof(*v)); | ||
1308 | return arch_atomic64_fetch_andnot_release(i, v); | ||
1309 | } | ||
1310 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | ||
1311 | #endif | ||
1312 | |||
1313 | #if defined(arch_atomic64_fetch_andnot_relaxed) | ||
1314 | static inline s64 | ||
1315 | atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) | ||
1316 | { | ||
1317 | kasan_check_write(v, sizeof(*v)); | ||
1318 | return arch_atomic64_fetch_andnot_relaxed(i, v); | ||
1319 | } | ||
1320 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | ||
1321 | #endif | ||
1322 | |||
1323 | static inline void | ||
1324 | atomic64_or(s64 i, atomic64_t *v) | ||
1325 | { | ||
1326 | kasan_check_write(v, sizeof(*v)); | ||
1327 | arch_atomic64_or(i, v); | ||
355 | } | 1328 | } |
1329 | #define atomic64_or atomic64_or | ||
356 | 1330 | ||
357 | static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) | 1331 | #if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) |
1332 | static inline s64 | ||
1333 | atomic64_fetch_or(s64 i, atomic64_t *v) | ||
358 | { | 1334 | { |
359 | kasan_check_write(v, sizeof(*v)); | 1335 | kasan_check_write(v, sizeof(*v)); |
360 | return arch_atomic64_fetch_or(i, v); | 1336 | return arch_atomic64_fetch_or(i, v); |
361 | } | 1337 | } |
1338 | #define atomic64_fetch_or atomic64_fetch_or | ||
1339 | #endif | ||
362 | 1340 | ||
363 | static __always_inline int atomic_fetch_xor(int i, atomic_t *v) | 1341 | #if defined(arch_atomic64_fetch_or_acquire) |
1342 | static inline s64 | ||
1343 | atomic64_fetch_or_acquire(s64 i, atomic64_t *v) | ||
364 | { | 1344 | { |
365 | kasan_check_write(v, sizeof(*v)); | 1345 | kasan_check_write(v, sizeof(*v)); |
366 | return arch_atomic_fetch_xor(i, v); | 1346 | return arch_atomic64_fetch_or_acquire(i, v); |
1347 | } | ||
1348 | #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire | ||
1349 | #endif | ||
1350 | |||
1351 | #if defined(arch_atomic64_fetch_or_release) | ||
1352 | static inline s64 | ||
1353 | atomic64_fetch_or_release(s64 i, atomic64_t *v) | ||
1354 | { | ||
1355 | kasan_check_write(v, sizeof(*v)); | ||
1356 | return arch_atomic64_fetch_or_release(i, v); | ||
1357 | } | ||
1358 | #define atomic64_fetch_or_release atomic64_fetch_or_release | ||
1359 | #endif | ||
1360 | |||
1361 | #if defined(arch_atomic64_fetch_or_relaxed) | ||
1362 | static inline s64 | ||
1363 | atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) | ||
1364 | { | ||
1365 | kasan_check_write(v, sizeof(*v)); | ||
1366 | return arch_atomic64_fetch_or_relaxed(i, v); | ||
1367 | } | ||
1368 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | ||
1369 | #endif | ||
1370 | |||
1371 | static inline void | ||
1372 | atomic64_xor(s64 i, atomic64_t *v) | ||
1373 | { | ||
1374 | kasan_check_write(v, sizeof(*v)); | ||
1375 | arch_atomic64_xor(i, v); | ||
367 | } | 1376 | } |
1377 | #define atomic64_xor atomic64_xor | ||
368 | 1378 | ||
369 | static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) | 1379 | #if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) |
1380 | static inline s64 | ||
1381 | atomic64_fetch_xor(s64 i, atomic64_t *v) | ||
370 | { | 1382 | { |
371 | kasan_check_write(v, sizeof(*v)); | 1383 | kasan_check_write(v, sizeof(*v)); |
372 | return arch_atomic64_fetch_xor(i, v); | 1384 | return arch_atomic64_fetch_xor(i, v); |
373 | } | 1385 | } |
1386 | #define atomic64_fetch_xor atomic64_fetch_xor | ||
1387 | #endif | ||
374 | 1388 | ||
375 | #ifdef arch_atomic_sub_and_test | 1389 | #if defined(arch_atomic64_fetch_xor_acquire) |
376 | #define atomic_sub_and_test atomic_sub_and_test | 1390 | static inline s64 |
377 | static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) | 1391 | atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) |
378 | { | 1392 | { |
379 | kasan_check_write(v, sizeof(*v)); | 1393 | kasan_check_write(v, sizeof(*v)); |
380 | return arch_atomic_sub_and_test(i, v); | 1394 | return arch_atomic64_fetch_xor_acquire(i, v); |
381 | } | 1395 | } |
1396 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire | ||
382 | #endif | 1397 | #endif |
383 | 1398 | ||
384 | #ifdef arch_atomic64_sub_and_test | 1399 | #if defined(arch_atomic64_fetch_xor_release) |
385 | #define atomic64_sub_and_test atomic64_sub_and_test | 1400 | static inline s64 |
386 | static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) | 1401 | atomic64_fetch_xor_release(s64 i, atomic64_t *v) |
1402 | { | ||
1403 | kasan_check_write(v, sizeof(*v)); | ||
1404 | return arch_atomic64_fetch_xor_release(i, v); | ||
1405 | } | ||
1406 | #define atomic64_fetch_xor_release atomic64_fetch_xor_release | ||
1407 | #endif | ||
1408 | |||
1409 | #if defined(arch_atomic64_fetch_xor_relaxed) | ||
1410 | static inline s64 | ||
1411 | atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) | ||
1412 | { | ||
1413 | kasan_check_write(v, sizeof(*v)); | ||
1414 | return arch_atomic64_fetch_xor_relaxed(i, v); | ||
1415 | } | ||
1416 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | ||
1417 | #endif | ||
1418 | |||
1419 | #if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) | ||
1420 | static inline s64 | ||
1421 | atomic64_xchg(atomic64_t *v, s64 i) | ||
1422 | { | ||
1423 | kasan_check_write(v, sizeof(*v)); | ||
1424 | return arch_atomic64_xchg(v, i); | ||
1425 | } | ||
1426 | #define atomic64_xchg atomic64_xchg | ||
1427 | #endif | ||
1428 | |||
1429 | #if defined(arch_atomic64_xchg_acquire) | ||
1430 | static inline s64 | ||
1431 | atomic64_xchg_acquire(atomic64_t *v, s64 i) | ||
1432 | { | ||
1433 | kasan_check_write(v, sizeof(*v)); | ||
1434 | return arch_atomic64_xchg_acquire(v, i); | ||
1435 | } | ||
1436 | #define atomic64_xchg_acquire atomic64_xchg_acquire | ||
1437 | #endif | ||
1438 | |||
1439 | #if defined(arch_atomic64_xchg_release) | ||
1440 | static inline s64 | ||
1441 | atomic64_xchg_release(atomic64_t *v, s64 i) | ||
1442 | { | ||
1443 | kasan_check_write(v, sizeof(*v)); | ||
1444 | return arch_atomic64_xchg_release(v, i); | ||
1445 | } | ||
1446 | #define atomic64_xchg_release atomic64_xchg_release | ||
1447 | #endif | ||
1448 | |||
1449 | #if defined(arch_atomic64_xchg_relaxed) | ||
1450 | static inline s64 | ||
1451 | atomic64_xchg_relaxed(atomic64_t *v, s64 i) | ||
1452 | { | ||
1453 | kasan_check_write(v, sizeof(*v)); | ||
1454 | return arch_atomic64_xchg_relaxed(v, i); | ||
1455 | } | ||
1456 | #define atomic64_xchg_relaxed atomic64_xchg_relaxed | ||
1457 | #endif | ||
1458 | |||
1459 | #if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) | ||
1460 | static inline s64 | ||
1461 | atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) | ||
1462 | { | ||
1463 | kasan_check_write(v, sizeof(*v)); | ||
1464 | return arch_atomic64_cmpxchg(v, old, new); | ||
1465 | } | ||
1466 | #define atomic64_cmpxchg atomic64_cmpxchg | ||
1467 | #endif | ||
1468 | |||
1469 | #if defined(arch_atomic64_cmpxchg_acquire) | ||
1470 | static inline s64 | ||
1471 | atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) | ||
1472 | { | ||
1473 | kasan_check_write(v, sizeof(*v)); | ||
1474 | return arch_atomic64_cmpxchg_acquire(v, old, new); | ||
1475 | } | ||
1476 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire | ||
1477 | #endif | ||
1478 | |||
1479 | #if defined(arch_atomic64_cmpxchg_release) | ||
1480 | static inline s64 | ||
1481 | atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) | ||
1482 | { | ||
1483 | kasan_check_write(v, sizeof(*v)); | ||
1484 | return arch_atomic64_cmpxchg_release(v, old, new); | ||
1485 | } | ||
1486 | #define atomic64_cmpxchg_release atomic64_cmpxchg_release | ||
1487 | #endif | ||
1488 | |||
1489 | #if defined(arch_atomic64_cmpxchg_relaxed) | ||
1490 | static inline s64 | ||
1491 | atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) | ||
1492 | { | ||
1493 | kasan_check_write(v, sizeof(*v)); | ||
1494 | return arch_atomic64_cmpxchg_relaxed(v, old, new); | ||
1495 | } | ||
1496 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed | ||
1497 | #endif | ||
1498 | |||
1499 | #if defined(arch_atomic64_try_cmpxchg) | ||
1500 | static inline bool | ||
1501 | atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) | ||
1502 | { | ||
1503 | kasan_check_write(v, sizeof(*v)); | ||
1504 | kasan_check_write(old, sizeof(*old)); | ||
1505 | return arch_atomic64_try_cmpxchg(v, old, new); | ||
1506 | } | ||
1507 | #define atomic64_try_cmpxchg atomic64_try_cmpxchg | ||
1508 | #endif | ||
1509 | |||
1510 | #if defined(arch_atomic64_try_cmpxchg_acquire) | ||
1511 | static inline bool | ||
1512 | atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) | ||
1513 | { | ||
1514 | kasan_check_write(v, sizeof(*v)); | ||
1515 | kasan_check_write(old, sizeof(*old)); | ||
1516 | return arch_atomic64_try_cmpxchg_acquire(v, old, new); | ||
1517 | } | ||
1518 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire | ||
1519 | #endif | ||
1520 | |||
1521 | #if defined(arch_atomic64_try_cmpxchg_release) | ||
1522 | static inline bool | ||
1523 | atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) | ||
1524 | { | ||
1525 | kasan_check_write(v, sizeof(*v)); | ||
1526 | kasan_check_write(old, sizeof(*old)); | ||
1527 | return arch_atomic64_try_cmpxchg_release(v, old, new); | ||
1528 | } | ||
1529 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release | ||
1530 | #endif | ||
1531 | |||
1532 | #if defined(arch_atomic64_try_cmpxchg_relaxed) | ||
1533 | static inline bool | ||
1534 | atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) | ||
1535 | { | ||
1536 | kasan_check_write(v, sizeof(*v)); | ||
1537 | kasan_check_write(old, sizeof(*old)); | ||
1538 | return arch_atomic64_try_cmpxchg_relaxed(v, old, new); | ||
1539 | } | ||
1540 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed | ||
1541 | #endif | ||
1542 | |||
1543 | #if defined(arch_atomic64_sub_and_test) | ||
1544 | static inline bool | ||
1545 | atomic64_sub_and_test(s64 i, atomic64_t *v) | ||
387 | { | 1546 | { |
388 | kasan_check_write(v, sizeof(*v)); | 1547 | kasan_check_write(v, sizeof(*v)); |
389 | return arch_atomic64_sub_and_test(i, v); | 1548 | return arch_atomic64_sub_and_test(i, v); |
390 | } | 1549 | } |
1550 | #define atomic64_sub_and_test atomic64_sub_and_test | ||
391 | #endif | 1551 | #endif |
392 | 1552 | ||
393 | #ifdef arch_atomic_add_negative | 1553 | #if defined(arch_atomic64_dec_and_test) |
394 | #define atomic_add_negative atomic_add_negative | 1554 | static inline bool |
395 | static __always_inline bool atomic_add_negative(int i, atomic_t *v) | 1555 | atomic64_dec_and_test(atomic64_t *v) |
396 | { | 1556 | { |
397 | kasan_check_write(v, sizeof(*v)); | 1557 | kasan_check_write(v, sizeof(*v)); |
398 | return arch_atomic_add_negative(i, v); | 1558 | return arch_atomic64_dec_and_test(v); |
399 | } | 1559 | } |
1560 | #define atomic64_dec_and_test atomic64_dec_and_test | ||
400 | #endif | 1561 | #endif |
401 | 1562 | ||
402 | #ifdef arch_atomic64_add_negative | 1563 | #if defined(arch_atomic64_inc_and_test) |
403 | #define atomic64_add_negative atomic64_add_negative | 1564 | static inline bool |
404 | static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) | 1565 | atomic64_inc_and_test(atomic64_t *v) |
1566 | { | ||
1567 | kasan_check_write(v, sizeof(*v)); | ||
1568 | return arch_atomic64_inc_and_test(v); | ||
1569 | } | ||
1570 | #define atomic64_inc_and_test atomic64_inc_and_test | ||
1571 | #endif | ||
1572 | |||
1573 | #if defined(arch_atomic64_add_negative) | ||
1574 | static inline bool | ||
1575 | atomic64_add_negative(s64 i, atomic64_t *v) | ||
405 | { | 1576 | { |
406 | kasan_check_write(v, sizeof(*v)); | 1577 | kasan_check_write(v, sizeof(*v)); |
407 | return arch_atomic64_add_negative(i, v); | 1578 | return arch_atomic64_add_negative(i, v); |
408 | } | 1579 | } |
1580 | #define atomic64_add_negative atomic64_add_negative | ||
1581 | #endif | ||
1582 | |||
1583 | #if defined(arch_atomic64_fetch_add_unless) | ||
1584 | static inline s64 | ||
1585 | atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) | ||
1586 | { | ||
1587 | kasan_check_write(v, sizeof(*v)); | ||
1588 | return arch_atomic64_fetch_add_unless(v, a, u); | ||
1589 | } | ||
1590 | #define atomic64_fetch_add_unless atomic64_fetch_add_unless | ||
1591 | #endif | ||
1592 | |||
1593 | #if defined(arch_atomic64_add_unless) | ||
1594 | static inline bool | ||
1595 | atomic64_add_unless(atomic64_t *v, s64 a, s64 u) | ||
1596 | { | ||
1597 | kasan_check_write(v, sizeof(*v)); | ||
1598 | return arch_atomic64_add_unless(v, a, u); | ||
1599 | } | ||
1600 | #define atomic64_add_unless atomic64_add_unless | ||
1601 | #endif | ||
1602 | |||
1603 | #if defined(arch_atomic64_inc_not_zero) | ||
1604 | static inline bool | ||
1605 | atomic64_inc_not_zero(atomic64_t *v) | ||
1606 | { | ||
1607 | kasan_check_write(v, sizeof(*v)); | ||
1608 | return arch_atomic64_inc_not_zero(v); | ||
1609 | } | ||
1610 | #define atomic64_inc_not_zero atomic64_inc_not_zero | ||
1611 | #endif | ||
1612 | |||
1613 | #if defined(arch_atomic64_inc_unless_negative) | ||
1614 | static inline bool | ||
1615 | atomic64_inc_unless_negative(atomic64_t *v) | ||
1616 | { | ||
1617 | kasan_check_write(v, sizeof(*v)); | ||
1618 | return arch_atomic64_inc_unless_negative(v); | ||
1619 | } | ||
1620 | #define atomic64_inc_unless_negative atomic64_inc_unless_negative | ||
1621 | #endif | ||
1622 | |||
1623 | #if defined(arch_atomic64_dec_unless_positive) | ||
1624 | static inline bool | ||
1625 | atomic64_dec_unless_positive(atomic64_t *v) | ||
1626 | { | ||
1627 | kasan_check_write(v, sizeof(*v)); | ||
1628 | return arch_atomic64_dec_unless_positive(v); | ||
1629 | } | ||
1630 | #define atomic64_dec_unless_positive atomic64_dec_unless_positive | ||
1631 | #endif | ||
1632 | |||
1633 | #if defined(arch_atomic64_dec_if_positive) | ||
1634 | static inline s64 | ||
1635 | atomic64_dec_if_positive(atomic64_t *v) | ||
1636 | { | ||
1637 | kasan_check_write(v, sizeof(*v)); | ||
1638 | return arch_atomic64_dec_if_positive(v); | ||
1639 | } | ||
1640 | #define atomic64_dec_if_positive atomic64_dec_if_positive | ||
1641 | #endif | ||
1642 | |||
1643 | #if !defined(arch_xchg_relaxed) || defined(arch_xchg) | ||
1644 | #define xchg(ptr, ...) \ | ||
1645 | ({ \ | ||
1646 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1647 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1648 | arch_xchg(__ai_ptr, __VA_ARGS__); \ | ||
1649 | }) | ||
1650 | #endif | ||
1651 | |||
1652 | #if defined(arch_xchg_acquire) | ||
1653 | #define xchg_acquire(ptr, ...) \ | ||
1654 | ({ \ | ||
1655 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1656 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1657 | arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ | ||
1658 | }) | ||
1659 | #endif | ||
1660 | |||
1661 | #if defined(arch_xchg_release) | ||
1662 | #define xchg_release(ptr, ...) \ | ||
1663 | ({ \ | ||
1664 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1665 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1666 | arch_xchg_release(__ai_ptr, __VA_ARGS__); \ | ||
1667 | }) | ||
1668 | #endif | ||
1669 | |||
1670 | #if defined(arch_xchg_relaxed) | ||
1671 | #define xchg_relaxed(ptr, ...) \ | ||
1672 | ({ \ | ||
1673 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1674 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1675 | arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ | ||
1676 | }) | ||
1677 | #endif | ||
1678 | |||
1679 | #if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) | ||
1680 | #define cmpxchg(ptr, ...) \ | ||
1681 | ({ \ | ||
1682 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1683 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1684 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ | ||
1685 | }) | ||
1686 | #endif | ||
1687 | |||
1688 | #if defined(arch_cmpxchg_acquire) | ||
1689 | #define cmpxchg_acquire(ptr, ...) \ | ||
1690 | ({ \ | ||
1691 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1692 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1693 | arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ | ||
1694 | }) | ||
1695 | #endif | ||
1696 | |||
1697 | #if defined(arch_cmpxchg_release) | ||
1698 | #define cmpxchg_release(ptr, ...) \ | ||
1699 | ({ \ | ||
1700 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1701 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | ||
1702 | arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ | ||
1703 | }) | ||
409 | #endif | 1704 | #endif |
410 | 1705 | ||
411 | #define xchg(ptr, new) \ | 1706 | #if defined(arch_cmpxchg_relaxed) |
1707 | #define cmpxchg_relaxed(ptr, ...) \ | ||
412 | ({ \ | 1708 | ({ \ |
413 | typeof(ptr) __ai_ptr = (ptr); \ | 1709 | typeof(ptr) __ai_ptr = (ptr); \ |
414 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | 1710 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
415 | arch_xchg(__ai_ptr, (new)); \ | 1711 | arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ |
416 | }) | 1712 | }) |
1713 | #endif | ||
417 | 1714 | ||
418 | #define cmpxchg(ptr, old, new) \ | 1715 | #if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) |
1716 | #define cmpxchg64(ptr, ...) \ | ||
419 | ({ \ | 1717 | ({ \ |
420 | typeof(ptr) __ai_ptr = (ptr); \ | 1718 | typeof(ptr) __ai_ptr = (ptr); \ |
421 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | 1719 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
422 | arch_cmpxchg(__ai_ptr, (old), (new)); \ | 1720 | arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ |
423 | }) | 1721 | }) |
1722 | #endif | ||
424 | 1723 | ||
425 | #define sync_cmpxchg(ptr, old, new) \ | 1724 | #if defined(arch_cmpxchg64_acquire) |
1725 | #define cmpxchg64_acquire(ptr, ...) \ | ||
426 | ({ \ | 1726 | ({ \ |
427 | typeof(ptr) __ai_ptr = (ptr); \ | 1727 | typeof(ptr) __ai_ptr = (ptr); \ |
428 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | 1728 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
429 | arch_sync_cmpxchg(__ai_ptr, (old), (new)); \ | 1729 | arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ |
430 | }) | 1730 | }) |
1731 | #endif | ||
431 | 1732 | ||
432 | #define cmpxchg_local(ptr, old, new) \ | 1733 | #if defined(arch_cmpxchg64_release) |
1734 | #define cmpxchg64_release(ptr, ...) \ | ||
433 | ({ \ | 1735 | ({ \ |
434 | typeof(ptr) __ai_ptr = (ptr); \ | 1736 | typeof(ptr) __ai_ptr = (ptr); \ |
435 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | 1737 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
436 | arch_cmpxchg_local(__ai_ptr, (old), (new)); \ | 1738 | arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ |
437 | }) | 1739 | }) |
1740 | #endif | ||
438 | 1741 | ||
439 | #define cmpxchg64(ptr, old, new) \ | 1742 | #if defined(arch_cmpxchg64_relaxed) |
1743 | #define cmpxchg64_relaxed(ptr, ...) \ | ||
440 | ({ \ | 1744 | ({ \ |
441 | typeof(ptr) __ai_ptr = (ptr); \ | 1745 | typeof(ptr) __ai_ptr = (ptr); \ |
442 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | 1746 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
443 | arch_cmpxchg64(__ai_ptr, (old), (new)); \ | 1747 | arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ |
444 | }) | 1748 | }) |
1749 | #endif | ||
445 | 1750 | ||
446 | #define cmpxchg64_local(ptr, old, new) \ | 1751 | #define cmpxchg_local(ptr, ...) \ |
447 | ({ \ | 1752 | ({ \ |
448 | typeof(ptr) __ai_ptr = (ptr); \ | 1753 | typeof(ptr) __ai_ptr = (ptr); \ |
449 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ | 1754 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
450 | arch_cmpxchg64_local(__ai_ptr, (old), (new)); \ | 1755 | arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ |
451 | }) | 1756 | }) |
452 | 1757 | ||
453 | #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ | 1758 | #define cmpxchg64_local(ptr, ...) \ |
454 | ({ \ | 1759 | ({ \ |
455 | typeof(p1) __ai_p1 = (p1); \ | 1760 | typeof(ptr) __ai_ptr = (ptr); \ |
456 | kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \ | 1761 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
457 | arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \ | 1762 | arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ |
458 | }) | 1763 | }) |
459 | 1764 | ||
460 | #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ | 1765 | #define sync_cmpxchg(ptr, ...) \ |
461 | ({ \ | 1766 | ({ \ |
462 | typeof(p1) __ai_p1 = (p1); \ | 1767 | typeof(ptr) __ai_ptr = (ptr); \ |
463 | kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \ | 1768 | kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ |
464 | arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \ | 1769 | arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ |
1770 | }) | ||
1771 | |||
1772 | #define cmpxchg_double(ptr, ...) \ | ||
1773 | ({ \ | ||
1774 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1775 | kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ | ||
1776 | arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ | ||
1777 | }) | ||
1778 | |||
1779 | |||
1780 | #define cmpxchg_double_local(ptr, ...) \ | ||
1781 | ({ \ | ||
1782 | typeof(ptr) __ai_ptr = (ptr); \ | ||
1783 | kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ | ||
1784 | arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ | ||
465 | }) | 1785 | }) |
466 | 1786 | ||
467 | #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ | 1787 | #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ |
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 87d14476edc2..a833d385a70b 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h | |||
@@ -1,269 +1,1012 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |||
3 | // Generated by scripts/atomic/gen-atomic-long.sh | ||
4 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
5 | |||
2 | #ifndef _ASM_GENERIC_ATOMIC_LONG_H | 6 | #ifndef _ASM_GENERIC_ATOMIC_LONG_H |
3 | #define _ASM_GENERIC_ATOMIC_LONG_H | 7 | #define _ASM_GENERIC_ATOMIC_LONG_H |
4 | /* | ||
5 | * Copyright (C) 2005 Silicon Graphics, Inc. | ||
6 | * Christoph Lameter | ||
7 | * | ||
8 | * Allows to provide arch independent atomic definitions without the need to | ||
9 | * edit all arch specific atomic.h files. | ||
10 | */ | ||
11 | 8 | ||
12 | #include <asm/types.h> | 9 | #include <asm/types.h> |
13 | 10 | ||
14 | /* | 11 | #ifdef CONFIG_64BIT |
15 | * Suppport for atomic_long_t | 12 | typedef atomic64_t atomic_long_t; |
16 | * | 13 | #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) |
17 | * Casts for parameters are avoided for existing atomic functions in order to | 14 | #define atomic_long_cond_read_acquire atomic64_cond_read_acquire |
18 | * avoid issues with cast-as-lval under gcc 4.x and other limitations that the | 15 | #define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed |
19 | * macros of a platform may have. | 16 | #else |
20 | */ | 17 | typedef atomic_t atomic_long_t; |
18 | #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) | ||
19 | #define atomic_long_cond_read_acquire atomic_cond_read_acquire | ||
20 | #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed | ||
21 | #endif | ||
21 | 22 | ||
22 | #if BITS_PER_LONG == 64 | 23 | #ifdef CONFIG_64BIT |
23 | 24 | ||
24 | typedef atomic64_t atomic_long_t; | 25 | static inline long |
26 | atomic_long_read(const atomic_long_t *v) | ||
27 | { | ||
28 | return atomic64_read(v); | ||
29 | } | ||
25 | 30 | ||
26 | #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) | 31 | static inline long |
27 | #define ATOMIC_LONG_PFX(x) atomic64 ## x | 32 | atomic_long_read_acquire(const atomic_long_t *v) |
28 | #define ATOMIC_LONG_TYPE s64 | 33 | { |
34 | return atomic64_read_acquire(v); | ||
35 | } | ||
29 | 36 | ||
30 | #else | 37 | static inline void |
38 | atomic_long_set(atomic_long_t *v, long i) | ||
39 | { | ||
40 | atomic64_set(v, i); | ||
41 | } | ||
31 | 42 | ||
32 | typedef atomic_t atomic_long_t; | 43 | static inline void |
44 | atomic_long_set_release(atomic_long_t *v, long i) | ||
45 | { | ||
46 | atomic64_set_release(v, i); | ||
47 | } | ||
33 | 48 | ||
34 | #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) | 49 | static inline void |
35 | #define ATOMIC_LONG_PFX(x) atomic ## x | 50 | atomic_long_add(long i, atomic_long_t *v) |
36 | #define ATOMIC_LONG_TYPE int | 51 | { |
52 | atomic64_add(i, v); | ||
53 | } | ||
37 | 54 | ||
38 | #endif | 55 | static inline long |
56 | atomic_long_add_return(long i, atomic_long_t *v) | ||
57 | { | ||
58 | return atomic64_add_return(i, v); | ||
59 | } | ||
60 | |||
61 | static inline long | ||
62 | atomic_long_add_return_acquire(long i, atomic_long_t *v) | ||
63 | { | ||
64 | return atomic64_add_return_acquire(i, v); | ||
65 | } | ||
66 | |||
67 | static inline long | ||
68 | atomic_long_add_return_release(long i, atomic_long_t *v) | ||
69 | { | ||
70 | return atomic64_add_return_release(i, v); | ||
71 | } | ||
72 | |||
73 | static inline long | ||
74 | atomic_long_add_return_relaxed(long i, atomic_long_t *v) | ||
75 | { | ||
76 | return atomic64_add_return_relaxed(i, v); | ||
77 | } | ||
78 | |||
79 | static inline long | ||
80 | atomic_long_fetch_add(long i, atomic_long_t *v) | ||
81 | { | ||
82 | return atomic64_fetch_add(i, v); | ||
83 | } | ||
84 | |||
85 | static inline long | ||
86 | atomic_long_fetch_add_acquire(long i, atomic_long_t *v) | ||
87 | { | ||
88 | return atomic64_fetch_add_acquire(i, v); | ||
89 | } | ||
90 | |||
91 | static inline long | ||
92 | atomic_long_fetch_add_release(long i, atomic_long_t *v) | ||
93 | { | ||
94 | return atomic64_fetch_add_release(i, v); | ||
95 | } | ||
96 | |||
97 | static inline long | ||
98 | atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) | ||
99 | { | ||
100 | return atomic64_fetch_add_relaxed(i, v); | ||
101 | } | ||
102 | |||
103 | static inline void | ||
104 | atomic_long_sub(long i, atomic_long_t *v) | ||
105 | { | ||
106 | atomic64_sub(i, v); | ||
107 | } | ||
108 | |||
109 | static inline long | ||
110 | atomic_long_sub_return(long i, atomic_long_t *v) | ||
111 | { | ||
112 | return atomic64_sub_return(i, v); | ||
113 | } | ||
114 | |||
115 | static inline long | ||
116 | atomic_long_sub_return_acquire(long i, atomic_long_t *v) | ||
117 | { | ||
118 | return atomic64_sub_return_acquire(i, v); | ||
119 | } | ||
120 | |||
121 | static inline long | ||
122 | atomic_long_sub_return_release(long i, atomic_long_t *v) | ||
123 | { | ||
124 | return atomic64_sub_return_release(i, v); | ||
125 | } | ||
126 | |||
127 | static inline long | ||
128 | atomic_long_sub_return_relaxed(long i, atomic_long_t *v) | ||
129 | { | ||
130 | return atomic64_sub_return_relaxed(i, v); | ||
131 | } | ||
132 | |||
133 | static inline long | ||
134 | atomic_long_fetch_sub(long i, atomic_long_t *v) | ||
135 | { | ||
136 | return atomic64_fetch_sub(i, v); | ||
137 | } | ||
138 | |||
139 | static inline long | ||
140 | atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) | ||
141 | { | ||
142 | return atomic64_fetch_sub_acquire(i, v); | ||
143 | } | ||
144 | |||
145 | static inline long | ||
146 | atomic_long_fetch_sub_release(long i, atomic_long_t *v) | ||
147 | { | ||
148 | return atomic64_fetch_sub_release(i, v); | ||
149 | } | ||
150 | |||
151 | static inline long | ||
152 | atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) | ||
153 | { | ||
154 | return atomic64_fetch_sub_relaxed(i, v); | ||
155 | } | ||
156 | |||
157 | static inline void | ||
158 | atomic_long_inc(atomic_long_t *v) | ||
159 | { | ||
160 | atomic64_inc(v); | ||
161 | } | ||
162 | |||
163 | static inline long | ||
164 | atomic_long_inc_return(atomic_long_t *v) | ||
165 | { | ||
166 | return atomic64_inc_return(v); | ||
167 | } | ||
168 | |||
169 | static inline long | ||
170 | atomic_long_inc_return_acquire(atomic_long_t *v) | ||
171 | { | ||
172 | return atomic64_inc_return_acquire(v); | ||
173 | } | ||
174 | |||
175 | static inline long | ||
176 | atomic_long_inc_return_release(atomic_long_t *v) | ||
177 | { | ||
178 | return atomic64_inc_return_release(v); | ||
179 | } | ||
180 | |||
181 | static inline long | ||
182 | atomic_long_inc_return_relaxed(atomic_long_t *v) | ||
183 | { | ||
184 | return atomic64_inc_return_relaxed(v); | ||
185 | } | ||
186 | |||
187 | static inline long | ||
188 | atomic_long_fetch_inc(atomic_long_t *v) | ||
189 | { | ||
190 | return atomic64_fetch_inc(v); | ||
191 | } | ||
192 | |||
193 | static inline long | ||
194 | atomic_long_fetch_inc_acquire(atomic_long_t *v) | ||
195 | { | ||
196 | return atomic64_fetch_inc_acquire(v); | ||
197 | } | ||
198 | |||
199 | static inline long | ||
200 | atomic_long_fetch_inc_release(atomic_long_t *v) | ||
201 | { | ||
202 | return atomic64_fetch_inc_release(v); | ||
203 | } | ||
204 | |||
205 | static inline long | ||
206 | atomic_long_fetch_inc_relaxed(atomic_long_t *v) | ||
207 | { | ||
208 | return atomic64_fetch_inc_relaxed(v); | ||
209 | } | ||
210 | |||
211 | static inline void | ||
212 | atomic_long_dec(atomic_long_t *v) | ||
213 | { | ||
214 | atomic64_dec(v); | ||
215 | } | ||
216 | |||
217 | static inline long | ||
218 | atomic_long_dec_return(atomic_long_t *v) | ||
219 | { | ||
220 | return atomic64_dec_return(v); | ||
221 | } | ||
222 | |||
223 | static inline long | ||
224 | atomic_long_dec_return_acquire(atomic_long_t *v) | ||
225 | { | ||
226 | return atomic64_dec_return_acquire(v); | ||
227 | } | ||
228 | |||
229 | static inline long | ||
230 | atomic_long_dec_return_release(atomic_long_t *v) | ||
231 | { | ||
232 | return atomic64_dec_return_release(v); | ||
233 | } | ||
234 | |||
235 | static inline long | ||
236 | atomic_long_dec_return_relaxed(atomic_long_t *v) | ||
237 | { | ||
238 | return atomic64_dec_return_relaxed(v); | ||
239 | } | ||
240 | |||
241 | static inline long | ||
242 | atomic_long_fetch_dec(atomic_long_t *v) | ||
243 | { | ||
244 | return atomic64_fetch_dec(v); | ||
245 | } | ||
246 | |||
247 | static inline long | ||
248 | atomic_long_fetch_dec_acquire(atomic_long_t *v) | ||
249 | { | ||
250 | return atomic64_fetch_dec_acquire(v); | ||
251 | } | ||
252 | |||
253 | static inline long | ||
254 | atomic_long_fetch_dec_release(atomic_long_t *v) | ||
255 | { | ||
256 | return atomic64_fetch_dec_release(v); | ||
257 | } | ||
258 | |||
259 | static inline long | ||
260 | atomic_long_fetch_dec_relaxed(atomic_long_t *v) | ||
261 | { | ||
262 | return atomic64_fetch_dec_relaxed(v); | ||
263 | } | ||
264 | |||
265 | static inline void | ||
266 | atomic_long_and(long i, atomic_long_t *v) | ||
267 | { | ||
268 | atomic64_and(i, v); | ||
269 | } | ||
270 | |||
271 | static inline long | ||
272 | atomic_long_fetch_and(long i, atomic_long_t *v) | ||
273 | { | ||
274 | return atomic64_fetch_and(i, v); | ||
275 | } | ||
276 | |||
277 | static inline long | ||
278 | atomic_long_fetch_and_acquire(long i, atomic_long_t *v) | ||
279 | { | ||
280 | return atomic64_fetch_and_acquire(i, v); | ||
281 | } | ||
282 | |||
283 | static inline long | ||
284 | atomic_long_fetch_and_release(long i, atomic_long_t *v) | ||
285 | { | ||
286 | return atomic64_fetch_and_release(i, v); | ||
287 | } | ||
288 | |||
289 | static inline long | ||
290 | atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) | ||
291 | { | ||
292 | return atomic64_fetch_and_relaxed(i, v); | ||
293 | } | ||
294 | |||
295 | static inline void | ||
296 | atomic_long_andnot(long i, atomic_long_t *v) | ||
297 | { | ||
298 | atomic64_andnot(i, v); | ||
299 | } | ||
300 | |||
301 | static inline long | ||
302 | atomic_long_fetch_andnot(long i, atomic_long_t *v) | ||
303 | { | ||
304 | return atomic64_fetch_andnot(i, v); | ||
305 | } | ||
306 | |||
307 | static inline long | ||
308 | atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) | ||
309 | { | ||
310 | return atomic64_fetch_andnot_acquire(i, v); | ||
311 | } | ||
312 | |||
313 | static inline long | ||
314 | atomic_long_fetch_andnot_release(long i, atomic_long_t *v) | ||
315 | { | ||
316 | return atomic64_fetch_andnot_release(i, v); | ||
317 | } | ||
318 | |||
319 | static inline long | ||
320 | atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) | ||
321 | { | ||
322 | return atomic64_fetch_andnot_relaxed(i, v); | ||
323 | } | ||
324 | |||
325 | static inline void | ||
326 | atomic_long_or(long i, atomic_long_t *v) | ||
327 | { | ||
328 | atomic64_or(i, v); | ||
329 | } | ||
330 | |||
331 | static inline long | ||
332 | atomic_long_fetch_or(long i, atomic_long_t *v) | ||
333 | { | ||
334 | return atomic64_fetch_or(i, v); | ||
335 | } | ||
336 | |||
337 | static inline long | ||
338 | atomic_long_fetch_or_acquire(long i, atomic_long_t *v) | ||
339 | { | ||
340 | return atomic64_fetch_or_acquire(i, v); | ||
341 | } | ||
342 | |||
343 | static inline long | ||
344 | atomic_long_fetch_or_release(long i, atomic_long_t *v) | ||
345 | { | ||
346 | return atomic64_fetch_or_release(i, v); | ||
347 | } | ||
348 | |||
349 | static inline long | ||
350 | atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) | ||
351 | { | ||
352 | return atomic64_fetch_or_relaxed(i, v); | ||
353 | } | ||
354 | |||
355 | static inline void | ||
356 | atomic_long_xor(long i, atomic_long_t *v) | ||
357 | { | ||
358 | atomic64_xor(i, v); | ||
359 | } | ||
360 | |||
361 | static inline long | ||
362 | atomic_long_fetch_xor(long i, atomic_long_t *v) | ||
363 | { | ||
364 | return atomic64_fetch_xor(i, v); | ||
365 | } | ||
366 | |||
367 | static inline long | ||
368 | atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) | ||
369 | { | ||
370 | return atomic64_fetch_xor_acquire(i, v); | ||
371 | } | ||
372 | |||
373 | static inline long | ||
374 | atomic_long_fetch_xor_release(long i, atomic_long_t *v) | ||
375 | { | ||
376 | return atomic64_fetch_xor_release(i, v); | ||
377 | } | ||
378 | |||
379 | static inline long | ||
380 | atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) | ||
381 | { | ||
382 | return atomic64_fetch_xor_relaxed(i, v); | ||
383 | } | ||
384 | |||
385 | static inline long | ||
386 | atomic_long_xchg(atomic_long_t *v, long i) | ||
387 | { | ||
388 | return atomic64_xchg(v, i); | ||
389 | } | ||
390 | |||
391 | static inline long | ||
392 | atomic_long_xchg_acquire(atomic_long_t *v, long i) | ||
393 | { | ||
394 | return atomic64_xchg_acquire(v, i); | ||
395 | } | ||
396 | |||
397 | static inline long | ||
398 | atomic_long_xchg_release(atomic_long_t *v, long i) | ||
399 | { | ||
400 | return atomic64_xchg_release(v, i); | ||
401 | } | ||
402 | |||
403 | static inline long | ||
404 | atomic_long_xchg_relaxed(atomic_long_t *v, long i) | ||
405 | { | ||
406 | return atomic64_xchg_relaxed(v, i); | ||
407 | } | ||
408 | |||
409 | static inline long | ||
410 | atomic_long_cmpxchg(atomic_long_t *v, long old, long new) | ||
411 | { | ||
412 | return atomic64_cmpxchg(v, old, new); | ||
413 | } | ||
414 | |||
415 | static inline long | ||
416 | atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) | ||
417 | { | ||
418 | return atomic64_cmpxchg_acquire(v, old, new); | ||
419 | } | ||
420 | |||
421 | static inline long | ||
422 | atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) | ||
423 | { | ||
424 | return atomic64_cmpxchg_release(v, old, new); | ||
425 | } | ||
426 | |||
427 | static inline long | ||
428 | atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) | ||
429 | { | ||
430 | return atomic64_cmpxchg_relaxed(v, old, new); | ||
431 | } | ||
432 | |||
433 | static inline bool | ||
434 | atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) | ||
435 | { | ||
436 | return atomic64_try_cmpxchg(v, (s64 *)old, new); | ||
437 | } | ||
438 | |||
439 | static inline bool | ||
440 | atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) | ||
441 | { | ||
442 | return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); | ||
443 | } | ||
444 | |||
445 | static inline bool | ||
446 | atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) | ||
447 | { | ||
448 | return atomic64_try_cmpxchg_release(v, (s64 *)old, new); | ||
449 | } | ||
450 | |||
451 | static inline bool | ||
452 | atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) | ||
453 | { | ||
454 | return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); | ||
455 | } | ||
456 | |||
457 | static inline bool | ||
458 | atomic_long_sub_and_test(long i, atomic_long_t *v) | ||
459 | { | ||
460 | return atomic64_sub_and_test(i, v); | ||
461 | } | ||
462 | |||
463 | static inline bool | ||
464 | atomic_long_dec_and_test(atomic_long_t *v) | ||
465 | { | ||
466 | return atomic64_dec_and_test(v); | ||
467 | } | ||
468 | |||
469 | static inline bool | ||
470 | atomic_long_inc_and_test(atomic_long_t *v) | ||
471 | { | ||
472 | return atomic64_inc_and_test(v); | ||
473 | } | ||
474 | |||
475 | static inline bool | ||
476 | atomic_long_add_negative(long i, atomic_long_t *v) | ||
477 | { | ||
478 | return atomic64_add_negative(i, v); | ||
479 | } | ||
480 | |||
481 | static inline long | ||
482 | atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) | ||
483 | { | ||
484 | return atomic64_fetch_add_unless(v, a, u); | ||
485 | } | ||
486 | |||
487 | static inline bool | ||
488 | atomic_long_add_unless(atomic_long_t *v, long a, long u) | ||
489 | { | ||
490 | return atomic64_add_unless(v, a, u); | ||
491 | } | ||
492 | |||
493 | static inline bool | ||
494 | atomic_long_inc_not_zero(atomic_long_t *v) | ||
495 | { | ||
496 | return atomic64_inc_not_zero(v); | ||
497 | } | ||
498 | |||
499 | static inline bool | ||
500 | atomic_long_inc_unless_negative(atomic_long_t *v) | ||
501 | { | ||
502 | return atomic64_inc_unless_negative(v); | ||
503 | } | ||
504 | |||
505 | static inline bool | ||
506 | atomic_long_dec_unless_positive(atomic_long_t *v) | ||
507 | { | ||
508 | return atomic64_dec_unless_positive(v); | ||
509 | } | ||
510 | |||
511 | static inline long | ||
512 | atomic_long_dec_if_positive(atomic_long_t *v) | ||
513 | { | ||
514 | return atomic64_dec_if_positive(v); | ||
515 | } | ||
516 | |||
517 | #else /* CONFIG_64BIT */ | ||
518 | |||
519 | static inline long | ||
520 | atomic_long_read(const atomic_long_t *v) | ||
521 | { | ||
522 | return atomic_read(v); | ||
523 | } | ||
524 | |||
525 | static inline long | ||
526 | atomic_long_read_acquire(const atomic_long_t *v) | ||
527 | { | ||
528 | return atomic_read_acquire(v); | ||
529 | } | ||
530 | |||
531 | static inline void | ||
532 | atomic_long_set(atomic_long_t *v, long i) | ||
533 | { | ||
534 | atomic_set(v, i); | ||
535 | } | ||
536 | |||
537 | static inline void | ||
538 | atomic_long_set_release(atomic_long_t *v, long i) | ||
539 | { | ||
540 | atomic_set_release(v, i); | ||
541 | } | ||
542 | |||
543 | static inline void | ||
544 | atomic_long_add(long i, atomic_long_t *v) | ||
545 | { | ||
546 | atomic_add(i, v); | ||
547 | } | ||
548 | |||
549 | static inline long | ||
550 | atomic_long_add_return(long i, atomic_long_t *v) | ||
551 | { | ||
552 | return atomic_add_return(i, v); | ||
553 | } | ||
554 | |||
555 | static inline long | ||
556 | atomic_long_add_return_acquire(long i, atomic_long_t *v) | ||
557 | { | ||
558 | return atomic_add_return_acquire(i, v); | ||
559 | } | ||
560 | |||
561 | static inline long | ||
562 | atomic_long_add_return_release(long i, atomic_long_t *v) | ||
563 | { | ||
564 | return atomic_add_return_release(i, v); | ||
565 | } | ||
566 | |||
567 | static inline long | ||
568 | atomic_long_add_return_relaxed(long i, atomic_long_t *v) | ||
569 | { | ||
570 | return atomic_add_return_relaxed(i, v); | ||
571 | } | ||
572 | |||
573 | static inline long | ||
574 | atomic_long_fetch_add(long i, atomic_long_t *v) | ||
575 | { | ||
576 | return atomic_fetch_add(i, v); | ||
577 | } | ||
578 | |||
579 | static inline long | ||
580 | atomic_long_fetch_add_acquire(long i, atomic_long_t *v) | ||
581 | { | ||
582 | return atomic_fetch_add_acquire(i, v); | ||
583 | } | ||
584 | |||
585 | static inline long | ||
586 | atomic_long_fetch_add_release(long i, atomic_long_t *v) | ||
587 | { | ||
588 | return atomic_fetch_add_release(i, v); | ||
589 | } | ||
590 | |||
591 | static inline long | ||
592 | atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) | ||
593 | { | ||
594 | return atomic_fetch_add_relaxed(i, v); | ||
595 | } | ||
596 | |||
597 | static inline void | ||
598 | atomic_long_sub(long i, atomic_long_t *v) | ||
599 | { | ||
600 | atomic_sub(i, v); | ||
601 | } | ||
602 | |||
603 | static inline long | ||
604 | atomic_long_sub_return(long i, atomic_long_t *v) | ||
605 | { | ||
606 | return atomic_sub_return(i, v); | ||
607 | } | ||
608 | |||
609 | static inline long | ||
610 | atomic_long_sub_return_acquire(long i, atomic_long_t *v) | ||
611 | { | ||
612 | return atomic_sub_return_acquire(i, v); | ||
613 | } | ||
614 | |||
615 | static inline long | ||
616 | atomic_long_sub_return_release(long i, atomic_long_t *v) | ||
617 | { | ||
618 | return atomic_sub_return_release(i, v); | ||
619 | } | ||
620 | |||
621 | static inline long | ||
622 | atomic_long_sub_return_relaxed(long i, atomic_long_t *v) | ||
623 | { | ||
624 | return atomic_sub_return_relaxed(i, v); | ||
625 | } | ||
626 | |||
627 | static inline long | ||
628 | atomic_long_fetch_sub(long i, atomic_long_t *v) | ||
629 | { | ||
630 | return atomic_fetch_sub(i, v); | ||
631 | } | ||
632 | |||
633 | static inline long | ||
634 | atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) | ||
635 | { | ||
636 | return atomic_fetch_sub_acquire(i, v); | ||
637 | } | ||
638 | |||
639 | static inline long | ||
640 | atomic_long_fetch_sub_release(long i, atomic_long_t *v) | ||
641 | { | ||
642 | return atomic_fetch_sub_release(i, v); | ||
643 | } | ||
644 | |||
645 | static inline long | ||
646 | atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) | ||
647 | { | ||
648 | return atomic_fetch_sub_relaxed(i, v); | ||
649 | } | ||
650 | |||
651 | static inline void | ||
652 | atomic_long_inc(atomic_long_t *v) | ||
653 | { | ||
654 | atomic_inc(v); | ||
655 | } | ||
656 | |||
657 | static inline long | ||
658 | atomic_long_inc_return(atomic_long_t *v) | ||
659 | { | ||
660 | return atomic_inc_return(v); | ||
661 | } | ||
662 | |||
663 | static inline long | ||
664 | atomic_long_inc_return_acquire(atomic_long_t *v) | ||
665 | { | ||
666 | return atomic_inc_return_acquire(v); | ||
667 | } | ||
668 | |||
669 | static inline long | ||
670 | atomic_long_inc_return_release(atomic_long_t *v) | ||
671 | { | ||
672 | return atomic_inc_return_release(v); | ||
673 | } | ||
674 | |||
675 | static inline long | ||
676 | atomic_long_inc_return_relaxed(atomic_long_t *v) | ||
677 | { | ||
678 | return atomic_inc_return_relaxed(v); | ||
679 | } | ||
680 | |||
681 | static inline long | ||
682 | atomic_long_fetch_inc(atomic_long_t *v) | ||
683 | { | ||
684 | return atomic_fetch_inc(v); | ||
685 | } | ||
686 | |||
687 | static inline long | ||
688 | atomic_long_fetch_inc_acquire(atomic_long_t *v) | ||
689 | { | ||
690 | return atomic_fetch_inc_acquire(v); | ||
691 | } | ||
692 | |||
693 | static inline long | ||
694 | atomic_long_fetch_inc_release(atomic_long_t *v) | ||
695 | { | ||
696 | return atomic_fetch_inc_release(v); | ||
697 | } | ||
698 | |||
699 | static inline long | ||
700 | atomic_long_fetch_inc_relaxed(atomic_long_t *v) | ||
701 | { | ||
702 | return atomic_fetch_inc_relaxed(v); | ||
703 | } | ||
704 | |||
705 | static inline void | ||
706 | atomic_long_dec(atomic_long_t *v) | ||
707 | { | ||
708 | atomic_dec(v); | ||
709 | } | ||
710 | |||
711 | static inline long | ||
712 | atomic_long_dec_return(atomic_long_t *v) | ||
713 | { | ||
714 | return atomic_dec_return(v); | ||
715 | } | ||
716 | |||
717 | static inline long | ||
718 | atomic_long_dec_return_acquire(atomic_long_t *v) | ||
719 | { | ||
720 | return atomic_dec_return_acquire(v); | ||
721 | } | ||
722 | |||
723 | static inline long | ||
724 | atomic_long_dec_return_release(atomic_long_t *v) | ||
725 | { | ||
726 | return atomic_dec_return_release(v); | ||
727 | } | ||
728 | |||
729 | static inline long | ||
730 | atomic_long_dec_return_relaxed(atomic_long_t *v) | ||
731 | { | ||
732 | return atomic_dec_return_relaxed(v); | ||
733 | } | ||
734 | |||
735 | static inline long | ||
736 | atomic_long_fetch_dec(atomic_long_t *v) | ||
737 | { | ||
738 | return atomic_fetch_dec(v); | ||
739 | } | ||
740 | |||
741 | static inline long | ||
742 | atomic_long_fetch_dec_acquire(atomic_long_t *v) | ||
743 | { | ||
744 | return atomic_fetch_dec_acquire(v); | ||
745 | } | ||
746 | |||
747 | static inline long | ||
748 | atomic_long_fetch_dec_release(atomic_long_t *v) | ||
749 | { | ||
750 | return atomic_fetch_dec_release(v); | ||
751 | } | ||
752 | |||
753 | static inline long | ||
754 | atomic_long_fetch_dec_relaxed(atomic_long_t *v) | ||
755 | { | ||
756 | return atomic_fetch_dec_relaxed(v); | ||
757 | } | ||
758 | |||
759 | static inline void | ||
760 | atomic_long_and(long i, atomic_long_t *v) | ||
761 | { | ||
762 | atomic_and(i, v); | ||
763 | } | ||
764 | |||
765 | static inline long | ||
766 | atomic_long_fetch_and(long i, atomic_long_t *v) | ||
767 | { | ||
768 | return atomic_fetch_and(i, v); | ||
769 | } | ||
770 | |||
771 | static inline long | ||
772 | atomic_long_fetch_and_acquire(long i, atomic_long_t *v) | ||
773 | { | ||
774 | return atomic_fetch_and_acquire(i, v); | ||
775 | } | ||
776 | |||
777 | static inline long | ||
778 | atomic_long_fetch_and_release(long i, atomic_long_t *v) | ||
779 | { | ||
780 | return atomic_fetch_and_release(i, v); | ||
781 | } | ||
782 | |||
783 | static inline long | ||
784 | atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) | ||
785 | { | ||
786 | return atomic_fetch_and_relaxed(i, v); | ||
787 | } | ||
788 | |||
789 | static inline void | ||
790 | atomic_long_andnot(long i, atomic_long_t *v) | ||
791 | { | ||
792 | atomic_andnot(i, v); | ||
793 | } | ||
794 | |||
795 | static inline long | ||
796 | atomic_long_fetch_andnot(long i, atomic_long_t *v) | ||
797 | { | ||
798 | return atomic_fetch_andnot(i, v); | ||
799 | } | ||
800 | |||
801 | static inline long | ||
802 | atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) | ||
803 | { | ||
804 | return atomic_fetch_andnot_acquire(i, v); | ||
805 | } | ||
806 | |||
807 | static inline long | ||
808 | atomic_long_fetch_andnot_release(long i, atomic_long_t *v) | ||
809 | { | ||
810 | return atomic_fetch_andnot_release(i, v); | ||
811 | } | ||
812 | |||
813 | static inline long | ||
814 | atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) | ||
815 | { | ||
816 | return atomic_fetch_andnot_relaxed(i, v); | ||
817 | } | ||
39 | 818 | ||
40 | #define ATOMIC_LONG_READ_OP(mo) \ | 819 | static inline void |
41 | static inline long atomic_long_read##mo(const atomic_long_t *l) \ | 820 | atomic_long_or(long i, atomic_long_t *v) |
42 | { \ | ||
43 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
44 | \ | ||
45 | return (long)ATOMIC_LONG_PFX(_read##mo)(v); \ | ||
46 | } | ||
47 | ATOMIC_LONG_READ_OP() | ||
48 | ATOMIC_LONG_READ_OP(_acquire) | ||
49 | |||
50 | #undef ATOMIC_LONG_READ_OP | ||
51 | |||
52 | #define ATOMIC_LONG_SET_OP(mo) \ | ||
53 | static inline void atomic_long_set##mo(atomic_long_t *l, long i) \ | ||
54 | { \ | ||
55 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
56 | \ | ||
57 | ATOMIC_LONG_PFX(_set##mo)(v, i); \ | ||
58 | } | ||
59 | ATOMIC_LONG_SET_OP() | ||
60 | ATOMIC_LONG_SET_OP(_release) | ||
61 | |||
62 | #undef ATOMIC_LONG_SET_OP | ||
63 | |||
64 | #define ATOMIC_LONG_ADD_SUB_OP(op, mo) \ | ||
65 | static inline long \ | ||
66 | atomic_long_##op##_return##mo(long i, atomic_long_t *l) \ | ||
67 | { \ | ||
68 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
69 | \ | ||
70 | return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \ | ||
71 | } | ||
72 | ATOMIC_LONG_ADD_SUB_OP(add,) | ||
73 | ATOMIC_LONG_ADD_SUB_OP(add, _relaxed) | ||
74 | ATOMIC_LONG_ADD_SUB_OP(add, _acquire) | ||
75 | ATOMIC_LONG_ADD_SUB_OP(add, _release) | ||
76 | ATOMIC_LONG_ADD_SUB_OP(sub,) | ||
77 | ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed) | ||
78 | ATOMIC_LONG_ADD_SUB_OP(sub, _acquire) | ||
79 | ATOMIC_LONG_ADD_SUB_OP(sub, _release) | ||
80 | |||
81 | #undef ATOMIC_LONG_ADD_SUB_OP | ||
82 | |||
83 | #define atomic_long_cmpxchg_relaxed(l, old, new) \ | ||
84 | (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
85 | (old), (new))) | ||
86 | #define atomic_long_cmpxchg_acquire(l, old, new) \ | ||
87 | (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
88 | (old), (new))) | ||
89 | #define atomic_long_cmpxchg_release(l, old, new) \ | ||
90 | (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
91 | (old), (new))) | ||
92 | #define atomic_long_cmpxchg(l, old, new) \ | ||
93 | (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) | ||
94 | |||
95 | |||
96 | #define atomic_long_try_cmpxchg_relaxed(l, old, new) \ | ||
97 | (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
98 | (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) | ||
99 | #define atomic_long_try_cmpxchg_acquire(l, old, new) \ | ||
100 | (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
101 | (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) | ||
102 | #define atomic_long_try_cmpxchg_release(l, old, new) \ | ||
103 | (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
104 | (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) | ||
105 | #define atomic_long_try_cmpxchg(l, old, new) \ | ||
106 | (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \ | ||
107 | (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) | ||
108 | |||
109 | |||
110 | #define atomic_long_xchg_relaxed(v, new) \ | ||
111 | (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) | ||
112 | #define atomic_long_xchg_acquire(v, new) \ | ||
113 | (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new))) | ||
114 | #define atomic_long_xchg_release(v, new) \ | ||
115 | (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new))) | ||
116 | #define atomic_long_xchg(v, new) \ | ||
117 | (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) | ||
118 | |||
119 | static __always_inline void atomic_long_inc(atomic_long_t *l) | ||
120 | { | ||
121 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | ||
122 | |||
123 | ATOMIC_LONG_PFX(_inc)(v); | ||
124 | } | ||
125 | |||
126 | static __always_inline void atomic_long_dec(atomic_long_t *l) | ||
127 | { | 821 | { |
128 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 822 | atomic_or(i, v); |
823 | } | ||
129 | 824 | ||
130 | ATOMIC_LONG_PFX(_dec)(v); | 825 | static inline long |
826 | atomic_long_fetch_or(long i, atomic_long_t *v) | ||
827 | { | ||
828 | return atomic_fetch_or(i, v); | ||
131 | } | 829 | } |
132 | 830 | ||
133 | #define ATOMIC_LONG_FETCH_OP(op, mo) \ | 831 | static inline long |
134 | static inline long \ | 832 | atomic_long_fetch_or_acquire(long i, atomic_long_t *v) |
135 | atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ | 833 | { |
136 | { \ | 834 | return atomic_fetch_or_acquire(i, v); |
137 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
138 | \ | ||
139 | return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ | ||
140 | } | 835 | } |
141 | 836 | ||
142 | ATOMIC_LONG_FETCH_OP(add, ) | 837 | static inline long |
143 | ATOMIC_LONG_FETCH_OP(add, _relaxed) | 838 | atomic_long_fetch_or_release(long i, atomic_long_t *v) |
144 | ATOMIC_LONG_FETCH_OP(add, _acquire) | 839 | { |
145 | ATOMIC_LONG_FETCH_OP(add, _release) | 840 | return atomic_fetch_or_release(i, v); |
146 | ATOMIC_LONG_FETCH_OP(sub, ) | 841 | } |
147 | ATOMIC_LONG_FETCH_OP(sub, _relaxed) | ||
148 | ATOMIC_LONG_FETCH_OP(sub, _acquire) | ||
149 | ATOMIC_LONG_FETCH_OP(sub, _release) | ||
150 | ATOMIC_LONG_FETCH_OP(and, ) | ||
151 | ATOMIC_LONG_FETCH_OP(and, _relaxed) | ||
152 | ATOMIC_LONG_FETCH_OP(and, _acquire) | ||
153 | ATOMIC_LONG_FETCH_OP(and, _release) | ||
154 | ATOMIC_LONG_FETCH_OP(andnot, ) | ||
155 | ATOMIC_LONG_FETCH_OP(andnot, _relaxed) | ||
156 | ATOMIC_LONG_FETCH_OP(andnot, _acquire) | ||
157 | ATOMIC_LONG_FETCH_OP(andnot, _release) | ||
158 | ATOMIC_LONG_FETCH_OP(or, ) | ||
159 | ATOMIC_LONG_FETCH_OP(or, _relaxed) | ||
160 | ATOMIC_LONG_FETCH_OP(or, _acquire) | ||
161 | ATOMIC_LONG_FETCH_OP(or, _release) | ||
162 | ATOMIC_LONG_FETCH_OP(xor, ) | ||
163 | ATOMIC_LONG_FETCH_OP(xor, _relaxed) | ||
164 | ATOMIC_LONG_FETCH_OP(xor, _acquire) | ||
165 | ATOMIC_LONG_FETCH_OP(xor, _release) | ||
166 | 842 | ||
167 | #undef ATOMIC_LONG_FETCH_OP | 843 | static inline long |
844 | atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) | ||
845 | { | ||
846 | return atomic_fetch_or_relaxed(i, v); | ||
847 | } | ||
168 | 848 | ||
169 | #define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \ | 849 | static inline void |
170 | static inline long \ | 850 | atomic_long_xor(long i, atomic_long_t *v) |
171 | atomic_long_fetch_##op##mo(atomic_long_t *l) \ | 851 | { |
172 | { \ | 852 | atomic_xor(i, v); |
173 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
174 | \ | ||
175 | return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \ | ||
176 | } | 853 | } |
177 | 854 | ||
178 | ATOMIC_LONG_FETCH_INC_DEC_OP(inc,) | 855 | static inline long |
179 | ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed) | 856 | atomic_long_fetch_xor(long i, atomic_long_t *v) |
180 | ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire) | 857 | { |
181 | ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release) | 858 | return atomic_fetch_xor(i, v); |
182 | ATOMIC_LONG_FETCH_INC_DEC_OP(dec,) | 859 | } |
183 | ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed) | ||
184 | ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire) | ||
185 | ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release) | ||
186 | 860 | ||
187 | #undef ATOMIC_LONG_FETCH_INC_DEC_OP | 861 | static inline long |
862 | atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) | ||
863 | { | ||
864 | return atomic_fetch_xor_acquire(i, v); | ||
865 | } | ||
188 | 866 | ||
189 | #define ATOMIC_LONG_OP(op) \ | 867 | static inline long |
190 | static __always_inline void \ | 868 | atomic_long_fetch_xor_release(long i, atomic_long_t *v) |
191 | atomic_long_##op(long i, atomic_long_t *l) \ | 869 | { |
192 | { \ | 870 | return atomic_fetch_xor_release(i, v); |
193 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
194 | \ | ||
195 | ATOMIC_LONG_PFX(_##op)(i, v); \ | ||
196 | } | 871 | } |
197 | 872 | ||
198 | ATOMIC_LONG_OP(add) | 873 | static inline long |
199 | ATOMIC_LONG_OP(sub) | 874 | atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) |
200 | ATOMIC_LONG_OP(and) | 875 | { |
201 | ATOMIC_LONG_OP(andnot) | 876 | return atomic_fetch_xor_relaxed(i, v); |
202 | ATOMIC_LONG_OP(or) | 877 | } |
203 | ATOMIC_LONG_OP(xor) | ||
204 | 878 | ||
205 | #undef ATOMIC_LONG_OP | 879 | static inline long |
880 | atomic_long_xchg(atomic_long_t *v, long i) | ||
881 | { | ||
882 | return atomic_xchg(v, i); | ||
883 | } | ||
206 | 884 | ||
207 | static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) | 885 | static inline long |
886 | atomic_long_xchg_acquire(atomic_long_t *v, long i) | ||
208 | { | 887 | { |
209 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 888 | return atomic_xchg_acquire(v, i); |
889 | } | ||
210 | 890 | ||
211 | return ATOMIC_LONG_PFX(_sub_and_test)(i, v); | 891 | static inline long |
892 | atomic_long_xchg_release(atomic_long_t *v, long i) | ||
893 | { | ||
894 | return atomic_xchg_release(v, i); | ||
212 | } | 895 | } |
213 | 896 | ||
214 | static inline int atomic_long_dec_and_test(atomic_long_t *l) | 897 | static inline long |
898 | atomic_long_xchg_relaxed(atomic_long_t *v, long i) | ||
215 | { | 899 | { |
216 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 900 | return atomic_xchg_relaxed(v, i); |
901 | } | ||
217 | 902 | ||
218 | return ATOMIC_LONG_PFX(_dec_and_test)(v); | 903 | static inline long |
904 | atomic_long_cmpxchg(atomic_long_t *v, long old, long new) | ||
905 | { | ||
906 | return atomic_cmpxchg(v, old, new); | ||
219 | } | 907 | } |
220 | 908 | ||
221 | static inline int atomic_long_inc_and_test(atomic_long_t *l) | 909 | static inline long |
910 | atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) | ||
222 | { | 911 | { |
223 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 912 | return atomic_cmpxchg_acquire(v, old, new); |
913 | } | ||
224 | 914 | ||
225 | return ATOMIC_LONG_PFX(_inc_and_test)(v); | 915 | static inline long |
916 | atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) | ||
917 | { | ||
918 | return atomic_cmpxchg_release(v, old, new); | ||
226 | } | 919 | } |
227 | 920 | ||
228 | static inline int atomic_long_add_negative(long i, atomic_long_t *l) | 921 | static inline long |
922 | atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) | ||
229 | { | 923 | { |
230 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 924 | return atomic_cmpxchg_relaxed(v, old, new); |
925 | } | ||
231 | 926 | ||
232 | return ATOMIC_LONG_PFX(_add_negative)(i, v); | 927 | static inline bool |
928 | atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) | ||
929 | { | ||
930 | return atomic_try_cmpxchg(v, (int *)old, new); | ||
233 | } | 931 | } |
234 | 932 | ||
235 | #define ATOMIC_LONG_INC_DEC_OP(op, mo) \ | 933 | static inline bool |
236 | static inline long \ | 934 | atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) |
237 | atomic_long_##op##_return##mo(atomic_long_t *l) \ | 935 | { |
238 | { \ | 936 | return atomic_try_cmpxchg_acquire(v, (int *)old, new); |
239 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
240 | \ | ||
241 | return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \ | ||
242 | } | 937 | } |
243 | ATOMIC_LONG_INC_DEC_OP(inc,) | ||
244 | ATOMIC_LONG_INC_DEC_OP(inc, _relaxed) | ||
245 | ATOMIC_LONG_INC_DEC_OP(inc, _acquire) | ||
246 | ATOMIC_LONG_INC_DEC_OP(inc, _release) | ||
247 | ATOMIC_LONG_INC_DEC_OP(dec,) | ||
248 | ATOMIC_LONG_INC_DEC_OP(dec, _relaxed) | ||
249 | ATOMIC_LONG_INC_DEC_OP(dec, _acquire) | ||
250 | ATOMIC_LONG_INC_DEC_OP(dec, _release) | ||
251 | 938 | ||
252 | #undef ATOMIC_LONG_INC_DEC_OP | 939 | static inline bool |
940 | atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) | ||
941 | { | ||
942 | return atomic_try_cmpxchg_release(v, (int *)old, new); | ||
943 | } | ||
944 | |||
945 | static inline bool | ||
946 | atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) | ||
947 | { | ||
948 | return atomic_try_cmpxchg_relaxed(v, (int *)old, new); | ||
949 | } | ||
950 | |||
951 | static inline bool | ||
952 | atomic_long_sub_and_test(long i, atomic_long_t *v) | ||
953 | { | ||
954 | return atomic_sub_and_test(i, v); | ||
955 | } | ||
956 | |||
957 | static inline bool | ||
958 | atomic_long_dec_and_test(atomic_long_t *v) | ||
959 | { | ||
960 | return atomic_dec_and_test(v); | ||
961 | } | ||
962 | |||
963 | static inline bool | ||
964 | atomic_long_inc_and_test(atomic_long_t *v) | ||
965 | { | ||
966 | return atomic_inc_and_test(v); | ||
967 | } | ||
968 | |||
969 | static inline bool | ||
970 | atomic_long_add_negative(long i, atomic_long_t *v) | ||
971 | { | ||
972 | return atomic_add_negative(i, v); | ||
973 | } | ||
974 | |||
975 | static inline long | ||
976 | atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) | ||
977 | { | ||
978 | return atomic_fetch_add_unless(v, a, u); | ||
979 | } | ||
980 | |||
981 | static inline bool | ||
982 | atomic_long_add_unless(atomic_long_t *v, long a, long u) | ||
983 | { | ||
984 | return atomic_add_unless(v, a, u); | ||
985 | } | ||
253 | 986 | ||
254 | static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) | 987 | static inline bool |
988 | atomic_long_inc_not_zero(atomic_long_t *v) | ||
255 | { | 989 | { |
256 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; | 990 | return atomic_inc_not_zero(v); |
991 | } | ||
257 | 992 | ||
258 | return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u); | 993 | static inline bool |
994 | atomic_long_inc_unless_negative(atomic_long_t *v) | ||
995 | { | ||
996 | return atomic_inc_unless_negative(v); | ||
259 | } | 997 | } |
260 | 998 | ||
261 | #define atomic_long_inc_not_zero(l) \ | 999 | static inline bool |
262 | ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) | 1000 | atomic_long_dec_unless_positive(atomic_long_t *v) |
1001 | { | ||
1002 | return atomic_dec_unless_positive(v); | ||
1003 | } | ||
263 | 1004 | ||
264 | #define atomic_long_cond_read_relaxed(v, c) \ | 1005 | static inline long |
265 | ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c)) | 1006 | atomic_long_dec_if_positive(atomic_long_t *v) |
266 | #define atomic_long_cond_read_acquire(v, c) \ | 1007 | { |
267 | ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) | 1008 | return atomic_dec_if_positive(v); |
1009 | } | ||
268 | 1010 | ||
269 | #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ | 1011 | #endif /* CONFIG_64BIT */ |
1012 | #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ | ||
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h new file mode 100644 index 000000000000..1c02c0112fbb --- /dev/null +++ b/include/linux/atomic-fallback.h | |||
@@ -0,0 +1,2294 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | // Generated by scripts/atomic/gen-atomic-fallback.sh | ||
4 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
5 | |||
6 | #ifndef _LINUX_ATOMIC_FALLBACK_H | ||
7 | #define _LINUX_ATOMIC_FALLBACK_H | ||
8 | |||
9 | #ifndef xchg_relaxed | ||
10 | #define xchg_relaxed xchg | ||
11 | #define xchg_acquire xchg | ||
12 | #define xchg_release xchg | ||
13 | #else /* xchg_relaxed */ | ||
14 | |||
15 | #ifndef xchg_acquire | ||
16 | #define xchg_acquire(...) \ | ||
17 | __atomic_op_acquire(xchg, __VA_ARGS__) | ||
18 | #endif | ||
19 | |||
20 | #ifndef xchg_release | ||
21 | #define xchg_release(...) \ | ||
22 | __atomic_op_release(xchg, __VA_ARGS__) | ||
23 | #endif | ||
24 | |||
25 | #ifndef xchg | ||
26 | #define xchg(...) \ | ||
27 | __atomic_op_fence(xchg, __VA_ARGS__) | ||
28 | #endif | ||
29 | |||
30 | #endif /* xchg_relaxed */ | ||
31 | |||
32 | #ifndef cmpxchg_relaxed | ||
33 | #define cmpxchg_relaxed cmpxchg | ||
34 | #define cmpxchg_acquire cmpxchg | ||
35 | #define cmpxchg_release cmpxchg | ||
36 | #else /* cmpxchg_relaxed */ | ||
37 | |||
38 | #ifndef cmpxchg_acquire | ||
39 | #define cmpxchg_acquire(...) \ | ||
40 | __atomic_op_acquire(cmpxchg, __VA_ARGS__) | ||
41 | #endif | ||
42 | |||
43 | #ifndef cmpxchg_release | ||
44 | #define cmpxchg_release(...) \ | ||
45 | __atomic_op_release(cmpxchg, __VA_ARGS__) | ||
46 | #endif | ||
47 | |||
48 | #ifndef cmpxchg | ||
49 | #define cmpxchg(...) \ | ||
50 | __atomic_op_fence(cmpxchg, __VA_ARGS__) | ||
51 | #endif | ||
52 | |||
53 | #endif /* cmpxchg_relaxed */ | ||
54 | |||
55 | #ifndef cmpxchg64_relaxed | ||
56 | #define cmpxchg64_relaxed cmpxchg64 | ||
57 | #define cmpxchg64_acquire cmpxchg64 | ||
58 | #define cmpxchg64_release cmpxchg64 | ||
59 | #else /* cmpxchg64_relaxed */ | ||
60 | |||
61 | #ifndef cmpxchg64_acquire | ||
62 | #define cmpxchg64_acquire(...) \ | ||
63 | __atomic_op_acquire(cmpxchg64, __VA_ARGS__) | ||
64 | #endif | ||
65 | |||
66 | #ifndef cmpxchg64_release | ||
67 | #define cmpxchg64_release(...) \ | ||
68 | __atomic_op_release(cmpxchg64, __VA_ARGS__) | ||
69 | #endif | ||
70 | |||
71 | #ifndef cmpxchg64 | ||
72 | #define cmpxchg64(...) \ | ||
73 | __atomic_op_fence(cmpxchg64, __VA_ARGS__) | ||
74 | #endif | ||
75 | |||
76 | #endif /* cmpxchg64_relaxed */ | ||
77 | |||
78 | #ifndef atomic_read_acquire | ||
79 | static inline int | ||
80 | atomic_read_acquire(const atomic_t *v) | ||
81 | { | ||
82 | return smp_load_acquire(&(v)->counter); | ||
83 | } | ||
84 | #define atomic_read_acquire atomic_read_acquire | ||
85 | #endif | ||
86 | |||
87 | #ifndef atomic_set_release | ||
88 | static inline void | ||
89 | atomic_set_release(atomic_t *v, int i) | ||
90 | { | ||
91 | smp_store_release(&(v)->counter, i); | ||
92 | } | ||
93 | #define atomic_set_release atomic_set_release | ||
94 | #endif | ||
95 | |||
96 | #ifndef atomic_add_return_relaxed | ||
97 | #define atomic_add_return_acquire atomic_add_return | ||
98 | #define atomic_add_return_release atomic_add_return | ||
99 | #define atomic_add_return_relaxed atomic_add_return | ||
100 | #else /* atomic_add_return_relaxed */ | ||
101 | |||
102 | #ifndef atomic_add_return_acquire | ||
103 | static inline int | ||
104 | atomic_add_return_acquire(int i, atomic_t *v) | ||
105 | { | ||
106 | int ret = atomic_add_return_relaxed(i, v); | ||
107 | __atomic_acquire_fence(); | ||
108 | return ret; | ||
109 | } | ||
110 | #define atomic_add_return_acquire atomic_add_return_acquire | ||
111 | #endif | ||
112 | |||
113 | #ifndef atomic_add_return_release | ||
114 | static inline int | ||
115 | atomic_add_return_release(int i, atomic_t *v) | ||
116 | { | ||
117 | __atomic_release_fence(); | ||
118 | return atomic_add_return_relaxed(i, v); | ||
119 | } | ||
120 | #define atomic_add_return_release atomic_add_return_release | ||
121 | #endif | ||
122 | |||
123 | #ifndef atomic_add_return | ||
124 | static inline int | ||
125 | atomic_add_return(int i, atomic_t *v) | ||
126 | { | ||
127 | int ret; | ||
128 | __atomic_pre_full_fence(); | ||
129 | ret = atomic_add_return_relaxed(i, v); | ||
130 | __atomic_post_full_fence(); | ||
131 | return ret; | ||
132 | } | ||
133 | #define atomic_add_return atomic_add_return | ||
134 | #endif | ||
135 | |||
136 | #endif /* atomic_add_return_relaxed */ | ||
137 | |||
138 | #ifndef atomic_fetch_add_relaxed | ||
139 | #define atomic_fetch_add_acquire atomic_fetch_add | ||
140 | #define atomic_fetch_add_release atomic_fetch_add | ||
141 | #define atomic_fetch_add_relaxed atomic_fetch_add | ||
142 | #else /* atomic_fetch_add_relaxed */ | ||
143 | |||
144 | #ifndef atomic_fetch_add_acquire | ||
145 | static inline int | ||
146 | atomic_fetch_add_acquire(int i, atomic_t *v) | ||
147 | { | ||
148 | int ret = atomic_fetch_add_relaxed(i, v); | ||
149 | __atomic_acquire_fence(); | ||
150 | return ret; | ||
151 | } | ||
152 | #define atomic_fetch_add_acquire atomic_fetch_add_acquire | ||
153 | #endif | ||
154 | |||
155 | #ifndef atomic_fetch_add_release | ||
156 | static inline int | ||
157 | atomic_fetch_add_release(int i, atomic_t *v) | ||
158 | { | ||
159 | __atomic_release_fence(); | ||
160 | return atomic_fetch_add_relaxed(i, v); | ||
161 | } | ||
162 | #define atomic_fetch_add_release atomic_fetch_add_release | ||
163 | #endif | ||
164 | |||
165 | #ifndef atomic_fetch_add | ||
166 | static inline int | ||
167 | atomic_fetch_add(int i, atomic_t *v) | ||
168 | { | ||
169 | int ret; | ||
170 | __atomic_pre_full_fence(); | ||
171 | ret = atomic_fetch_add_relaxed(i, v); | ||
172 | __atomic_post_full_fence(); | ||
173 | return ret; | ||
174 | } | ||
175 | #define atomic_fetch_add atomic_fetch_add | ||
176 | #endif | ||
177 | |||
178 | #endif /* atomic_fetch_add_relaxed */ | ||
179 | |||
180 | #ifndef atomic_sub_return_relaxed | ||
181 | #define atomic_sub_return_acquire atomic_sub_return | ||
182 | #define atomic_sub_return_release atomic_sub_return | ||
183 | #define atomic_sub_return_relaxed atomic_sub_return | ||
184 | #else /* atomic_sub_return_relaxed */ | ||
185 | |||
186 | #ifndef atomic_sub_return_acquire | ||
187 | static inline int | ||
188 | atomic_sub_return_acquire(int i, atomic_t *v) | ||
189 | { | ||
190 | int ret = atomic_sub_return_relaxed(i, v); | ||
191 | __atomic_acquire_fence(); | ||
192 | return ret; | ||
193 | } | ||
194 | #define atomic_sub_return_acquire atomic_sub_return_acquire | ||
195 | #endif | ||
196 | |||
197 | #ifndef atomic_sub_return_release | ||
198 | static inline int | ||
199 | atomic_sub_return_release(int i, atomic_t *v) | ||
200 | { | ||
201 | __atomic_release_fence(); | ||
202 | return atomic_sub_return_relaxed(i, v); | ||
203 | } | ||
204 | #define atomic_sub_return_release atomic_sub_return_release | ||
205 | #endif | ||
206 | |||
207 | #ifndef atomic_sub_return | ||
208 | static inline int | ||
209 | atomic_sub_return(int i, atomic_t *v) | ||
210 | { | ||
211 | int ret; | ||
212 | __atomic_pre_full_fence(); | ||
213 | ret = atomic_sub_return_relaxed(i, v); | ||
214 | __atomic_post_full_fence(); | ||
215 | return ret; | ||
216 | } | ||
217 | #define atomic_sub_return atomic_sub_return | ||
218 | #endif | ||
219 | |||
220 | #endif /* atomic_sub_return_relaxed */ | ||
221 | |||
222 | #ifndef atomic_fetch_sub_relaxed | ||
223 | #define atomic_fetch_sub_acquire atomic_fetch_sub | ||
224 | #define atomic_fetch_sub_release atomic_fetch_sub | ||
225 | #define atomic_fetch_sub_relaxed atomic_fetch_sub | ||
226 | #else /* atomic_fetch_sub_relaxed */ | ||
227 | |||
228 | #ifndef atomic_fetch_sub_acquire | ||
229 | static inline int | ||
230 | atomic_fetch_sub_acquire(int i, atomic_t *v) | ||
231 | { | ||
232 | int ret = atomic_fetch_sub_relaxed(i, v); | ||
233 | __atomic_acquire_fence(); | ||
234 | return ret; | ||
235 | } | ||
236 | #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire | ||
237 | #endif | ||
238 | |||
239 | #ifndef atomic_fetch_sub_release | ||
240 | static inline int | ||
241 | atomic_fetch_sub_release(int i, atomic_t *v) | ||
242 | { | ||
243 | __atomic_release_fence(); | ||
244 | return atomic_fetch_sub_relaxed(i, v); | ||
245 | } | ||
246 | #define atomic_fetch_sub_release atomic_fetch_sub_release | ||
247 | #endif | ||
248 | |||
249 | #ifndef atomic_fetch_sub | ||
250 | static inline int | ||
251 | atomic_fetch_sub(int i, atomic_t *v) | ||
252 | { | ||
253 | int ret; | ||
254 | __atomic_pre_full_fence(); | ||
255 | ret = atomic_fetch_sub_relaxed(i, v); | ||
256 | __atomic_post_full_fence(); | ||
257 | return ret; | ||
258 | } | ||
259 | #define atomic_fetch_sub atomic_fetch_sub | ||
260 | #endif | ||
261 | |||
262 | #endif /* atomic_fetch_sub_relaxed */ | ||
263 | |||
264 | #ifndef atomic_inc | ||
265 | static inline void | ||
266 | atomic_inc(atomic_t *v) | ||
267 | { | ||
268 | atomic_add(1, v); | ||
269 | } | ||
270 | #define atomic_inc atomic_inc | ||
271 | #endif | ||
272 | |||
273 | #ifndef atomic_inc_return_relaxed | ||
274 | #ifdef atomic_inc_return | ||
275 | #define atomic_inc_return_acquire atomic_inc_return | ||
276 | #define atomic_inc_return_release atomic_inc_return | ||
277 | #define atomic_inc_return_relaxed atomic_inc_return | ||
278 | #endif /* atomic_inc_return */ | ||
279 | |||
280 | #ifndef atomic_inc_return | ||
281 | static inline int | ||
282 | atomic_inc_return(atomic_t *v) | ||
283 | { | ||
284 | return atomic_add_return(1, v); | ||
285 | } | ||
286 | #define atomic_inc_return atomic_inc_return | ||
287 | #endif | ||
288 | |||
289 | #ifndef atomic_inc_return_acquire | ||
290 | static inline int | ||
291 | atomic_inc_return_acquire(atomic_t *v) | ||
292 | { | ||
293 | return atomic_add_return_acquire(1, v); | ||
294 | } | ||
295 | #define atomic_inc_return_acquire atomic_inc_return_acquire | ||
296 | #endif | ||
297 | |||
298 | #ifndef atomic_inc_return_release | ||
299 | static inline int | ||
300 | atomic_inc_return_release(atomic_t *v) | ||
301 | { | ||
302 | return atomic_add_return_release(1, v); | ||
303 | } | ||
304 | #define atomic_inc_return_release atomic_inc_return_release | ||
305 | #endif | ||
306 | |||
307 | #ifndef atomic_inc_return_relaxed | ||
308 | static inline int | ||
309 | atomic_inc_return_relaxed(atomic_t *v) | ||
310 | { | ||
311 | return atomic_add_return_relaxed(1, v); | ||
312 | } | ||
313 | #define atomic_inc_return_relaxed atomic_inc_return_relaxed | ||
314 | #endif | ||
315 | |||
316 | #else /* atomic_inc_return_relaxed */ | ||
317 | |||
318 | #ifndef atomic_inc_return_acquire | ||
319 | static inline int | ||
320 | atomic_inc_return_acquire(atomic_t *v) | ||
321 | { | ||
322 | int ret = atomic_inc_return_relaxed(v); | ||
323 | __atomic_acquire_fence(); | ||
324 | return ret; | ||
325 | } | ||
326 | #define atomic_inc_return_acquire atomic_inc_return_acquire | ||
327 | #endif | ||
328 | |||
329 | #ifndef atomic_inc_return_release | ||
330 | static inline int | ||
331 | atomic_inc_return_release(atomic_t *v) | ||
332 | { | ||
333 | __atomic_release_fence(); | ||
334 | return atomic_inc_return_relaxed(v); | ||
335 | } | ||
336 | #define atomic_inc_return_release atomic_inc_return_release | ||
337 | #endif | ||
338 | |||
339 | #ifndef atomic_inc_return | ||
340 | static inline int | ||
341 | atomic_inc_return(atomic_t *v) | ||
342 | { | ||
343 | int ret; | ||
344 | __atomic_pre_full_fence(); | ||
345 | ret = atomic_inc_return_relaxed(v); | ||
346 | __atomic_post_full_fence(); | ||
347 | return ret; | ||
348 | } | ||
349 | #define atomic_inc_return atomic_inc_return | ||
350 | #endif | ||
351 | |||
352 | #endif /* atomic_inc_return_relaxed */ | ||
353 | |||
354 | #ifndef atomic_fetch_inc_relaxed | ||
355 | #ifdef atomic_fetch_inc | ||
356 | #define atomic_fetch_inc_acquire atomic_fetch_inc | ||
357 | #define atomic_fetch_inc_release atomic_fetch_inc | ||
358 | #define atomic_fetch_inc_relaxed atomic_fetch_inc | ||
359 | #endif /* atomic_fetch_inc */ | ||
360 | |||
361 | #ifndef atomic_fetch_inc | ||
362 | static inline int | ||
363 | atomic_fetch_inc(atomic_t *v) | ||
364 | { | ||
365 | return atomic_fetch_add(1, v); | ||
366 | } | ||
367 | #define atomic_fetch_inc atomic_fetch_inc | ||
368 | #endif | ||
369 | |||
370 | #ifndef atomic_fetch_inc_acquire | ||
371 | static inline int | ||
372 | atomic_fetch_inc_acquire(atomic_t *v) | ||
373 | { | ||
374 | return atomic_fetch_add_acquire(1, v); | ||
375 | } | ||
376 | #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire | ||
377 | #endif | ||
378 | |||
379 | #ifndef atomic_fetch_inc_release | ||
380 | static inline int | ||
381 | atomic_fetch_inc_release(atomic_t *v) | ||
382 | { | ||
383 | return atomic_fetch_add_release(1, v); | ||
384 | } | ||
385 | #define atomic_fetch_inc_release atomic_fetch_inc_release | ||
386 | #endif | ||
387 | |||
388 | #ifndef atomic_fetch_inc_relaxed | ||
389 | static inline int | ||
390 | atomic_fetch_inc_relaxed(atomic_t *v) | ||
391 | { | ||
392 | return atomic_fetch_add_relaxed(1, v); | ||
393 | } | ||
394 | #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed | ||
395 | #endif | ||
396 | |||
397 | #else /* atomic_fetch_inc_relaxed */ | ||
398 | |||
399 | #ifndef atomic_fetch_inc_acquire | ||
400 | static inline int | ||
401 | atomic_fetch_inc_acquire(atomic_t *v) | ||
402 | { | ||
403 | int ret = atomic_fetch_inc_relaxed(v); | ||
404 | __atomic_acquire_fence(); | ||
405 | return ret; | ||
406 | } | ||
407 | #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire | ||
408 | #endif | ||
409 | |||
410 | #ifndef atomic_fetch_inc_release | ||
411 | static inline int | ||
412 | atomic_fetch_inc_release(atomic_t *v) | ||
413 | { | ||
414 | __atomic_release_fence(); | ||
415 | return atomic_fetch_inc_relaxed(v); | ||
416 | } | ||
417 | #define atomic_fetch_inc_release atomic_fetch_inc_release | ||
418 | #endif | ||
419 | |||
420 | #ifndef atomic_fetch_inc | ||
421 | static inline int | ||
422 | atomic_fetch_inc(atomic_t *v) | ||
423 | { | ||
424 | int ret; | ||
425 | __atomic_pre_full_fence(); | ||
426 | ret = atomic_fetch_inc_relaxed(v); | ||
427 | __atomic_post_full_fence(); | ||
428 | return ret; | ||
429 | } | ||
430 | #define atomic_fetch_inc atomic_fetch_inc | ||
431 | #endif | ||
432 | |||
433 | #endif /* atomic_fetch_inc_relaxed */ | ||
434 | |||
435 | #ifndef atomic_dec | ||
436 | static inline void | ||
437 | atomic_dec(atomic_t *v) | ||
438 | { | ||
439 | atomic_sub(1, v); | ||
440 | } | ||
441 | #define atomic_dec atomic_dec | ||
442 | #endif | ||
443 | |||
444 | #ifndef atomic_dec_return_relaxed | ||
445 | #ifdef atomic_dec_return | ||
446 | #define atomic_dec_return_acquire atomic_dec_return | ||
447 | #define atomic_dec_return_release atomic_dec_return | ||
448 | #define atomic_dec_return_relaxed atomic_dec_return | ||
449 | #endif /* atomic_dec_return */ | ||
450 | |||
451 | #ifndef atomic_dec_return | ||
452 | static inline int | ||
453 | atomic_dec_return(atomic_t *v) | ||
454 | { | ||
455 | return atomic_sub_return(1, v); | ||
456 | } | ||
457 | #define atomic_dec_return atomic_dec_return | ||
458 | #endif | ||
459 | |||
460 | #ifndef atomic_dec_return_acquire | ||
461 | static inline int | ||
462 | atomic_dec_return_acquire(atomic_t *v) | ||
463 | { | ||
464 | return atomic_sub_return_acquire(1, v); | ||
465 | } | ||
466 | #define atomic_dec_return_acquire atomic_dec_return_acquire | ||
467 | #endif | ||
468 | |||
469 | #ifndef atomic_dec_return_release | ||
470 | static inline int | ||
471 | atomic_dec_return_release(atomic_t *v) | ||
472 | { | ||
473 | return atomic_sub_return_release(1, v); | ||
474 | } | ||
475 | #define atomic_dec_return_release atomic_dec_return_release | ||
476 | #endif | ||
477 | |||
478 | #ifndef atomic_dec_return_relaxed | ||
479 | static inline int | ||
480 | atomic_dec_return_relaxed(atomic_t *v) | ||
481 | { | ||
482 | return atomic_sub_return_relaxed(1, v); | ||
483 | } | ||
484 | #define atomic_dec_return_relaxed atomic_dec_return_relaxed | ||
485 | #endif | ||
486 | |||
487 | #else /* atomic_dec_return_relaxed */ | ||
488 | |||
489 | #ifndef atomic_dec_return_acquire | ||
490 | static inline int | ||
491 | atomic_dec_return_acquire(atomic_t *v) | ||
492 | { | ||
493 | int ret = atomic_dec_return_relaxed(v); | ||
494 | __atomic_acquire_fence(); | ||
495 | return ret; | ||
496 | } | ||
497 | #define atomic_dec_return_acquire atomic_dec_return_acquire | ||
498 | #endif | ||
499 | |||
500 | #ifndef atomic_dec_return_release | ||
501 | static inline int | ||
502 | atomic_dec_return_release(atomic_t *v) | ||
503 | { | ||
504 | __atomic_release_fence(); | ||
505 | return atomic_dec_return_relaxed(v); | ||
506 | } | ||
507 | #define atomic_dec_return_release atomic_dec_return_release | ||
508 | #endif | ||
509 | |||
510 | #ifndef atomic_dec_return | ||
511 | static inline int | ||
512 | atomic_dec_return(atomic_t *v) | ||
513 | { | ||
514 | int ret; | ||
515 | __atomic_pre_full_fence(); | ||
516 | ret = atomic_dec_return_relaxed(v); | ||
517 | __atomic_post_full_fence(); | ||
518 | return ret; | ||
519 | } | ||
520 | #define atomic_dec_return atomic_dec_return | ||
521 | #endif | ||
522 | |||
523 | #endif /* atomic_dec_return_relaxed */ | ||
524 | |||
525 | #ifndef atomic_fetch_dec_relaxed | ||
526 | #ifdef atomic_fetch_dec | ||
527 | #define atomic_fetch_dec_acquire atomic_fetch_dec | ||
528 | #define atomic_fetch_dec_release atomic_fetch_dec | ||
529 | #define atomic_fetch_dec_relaxed atomic_fetch_dec | ||
530 | #endif /* atomic_fetch_dec */ | ||
531 | |||
532 | #ifndef atomic_fetch_dec | ||
533 | static inline int | ||
534 | atomic_fetch_dec(atomic_t *v) | ||
535 | { | ||
536 | return atomic_fetch_sub(1, v); | ||
537 | } | ||
538 | #define atomic_fetch_dec atomic_fetch_dec | ||
539 | #endif | ||
540 | |||
541 | #ifndef atomic_fetch_dec_acquire | ||
542 | static inline int | ||
543 | atomic_fetch_dec_acquire(atomic_t *v) | ||
544 | { | ||
545 | return atomic_fetch_sub_acquire(1, v); | ||
546 | } | ||
547 | #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire | ||
548 | #endif | ||
549 | |||
550 | #ifndef atomic_fetch_dec_release | ||
551 | static inline int | ||
552 | atomic_fetch_dec_release(atomic_t *v) | ||
553 | { | ||
554 | return atomic_fetch_sub_release(1, v); | ||
555 | } | ||
556 | #define atomic_fetch_dec_release atomic_fetch_dec_release | ||
557 | #endif | ||
558 | |||
559 | #ifndef atomic_fetch_dec_relaxed | ||
560 | static inline int | ||
561 | atomic_fetch_dec_relaxed(atomic_t *v) | ||
562 | { | ||
563 | return atomic_fetch_sub_relaxed(1, v); | ||
564 | } | ||
565 | #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed | ||
566 | #endif | ||
567 | |||
568 | #else /* atomic_fetch_dec_relaxed */ | ||
569 | |||
570 | #ifndef atomic_fetch_dec_acquire | ||
571 | static inline int | ||
572 | atomic_fetch_dec_acquire(atomic_t *v) | ||
573 | { | ||
574 | int ret = atomic_fetch_dec_relaxed(v); | ||
575 | __atomic_acquire_fence(); | ||
576 | return ret; | ||
577 | } | ||
578 | #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire | ||
579 | #endif | ||
580 | |||
581 | #ifndef atomic_fetch_dec_release | ||
582 | static inline int | ||
583 | atomic_fetch_dec_release(atomic_t *v) | ||
584 | { | ||
585 | __atomic_release_fence(); | ||
586 | return atomic_fetch_dec_relaxed(v); | ||
587 | } | ||
588 | #define atomic_fetch_dec_release atomic_fetch_dec_release | ||
589 | #endif | ||
590 | |||
591 | #ifndef atomic_fetch_dec | ||
592 | static inline int | ||
593 | atomic_fetch_dec(atomic_t *v) | ||
594 | { | ||
595 | int ret; | ||
596 | __atomic_pre_full_fence(); | ||
597 | ret = atomic_fetch_dec_relaxed(v); | ||
598 | __atomic_post_full_fence(); | ||
599 | return ret; | ||
600 | } | ||
601 | #define atomic_fetch_dec atomic_fetch_dec | ||
602 | #endif | ||
603 | |||
604 | #endif /* atomic_fetch_dec_relaxed */ | ||
605 | |||
606 | #ifndef atomic_fetch_and_relaxed | ||
607 | #define atomic_fetch_and_acquire atomic_fetch_and | ||
608 | #define atomic_fetch_and_release atomic_fetch_and | ||
609 | #define atomic_fetch_and_relaxed atomic_fetch_and | ||
610 | #else /* atomic_fetch_and_relaxed */ | ||
611 | |||
612 | #ifndef atomic_fetch_and_acquire | ||
613 | static inline int | ||
614 | atomic_fetch_and_acquire(int i, atomic_t *v) | ||
615 | { | ||
616 | int ret = atomic_fetch_and_relaxed(i, v); | ||
617 | __atomic_acquire_fence(); | ||
618 | return ret; | ||
619 | } | ||
620 | #define atomic_fetch_and_acquire atomic_fetch_and_acquire | ||
621 | #endif | ||
622 | |||
623 | #ifndef atomic_fetch_and_release | ||
624 | static inline int | ||
625 | atomic_fetch_and_release(int i, atomic_t *v) | ||
626 | { | ||
627 | __atomic_release_fence(); | ||
628 | return atomic_fetch_and_relaxed(i, v); | ||
629 | } | ||
630 | #define atomic_fetch_and_release atomic_fetch_and_release | ||
631 | #endif | ||
632 | |||
633 | #ifndef atomic_fetch_and | ||
634 | static inline int | ||
635 | atomic_fetch_and(int i, atomic_t *v) | ||
636 | { | ||
637 | int ret; | ||
638 | __atomic_pre_full_fence(); | ||
639 | ret = atomic_fetch_and_relaxed(i, v); | ||
640 | __atomic_post_full_fence(); | ||
641 | return ret; | ||
642 | } | ||
643 | #define atomic_fetch_and atomic_fetch_and | ||
644 | #endif | ||
645 | |||
646 | #endif /* atomic_fetch_and_relaxed */ | ||
647 | |||
648 | #ifndef atomic_andnot | ||
649 | static inline void | ||
650 | atomic_andnot(int i, atomic_t *v) | ||
651 | { | ||
652 | atomic_and(~i, v); | ||
653 | } | ||
654 | #define atomic_andnot atomic_andnot | ||
655 | #endif | ||
656 | |||
657 | #ifndef atomic_fetch_andnot_relaxed | ||
658 | #ifdef atomic_fetch_andnot | ||
659 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot | ||
660 | #define atomic_fetch_andnot_release atomic_fetch_andnot | ||
661 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot | ||
662 | #endif /* atomic_fetch_andnot */ | ||
663 | |||
664 | #ifndef atomic_fetch_andnot | ||
665 | static inline int | ||
666 | atomic_fetch_andnot(int i, atomic_t *v) | ||
667 | { | ||
668 | return atomic_fetch_and(~i, v); | ||
669 | } | ||
670 | #define atomic_fetch_andnot atomic_fetch_andnot | ||
671 | #endif | ||
672 | |||
673 | #ifndef atomic_fetch_andnot_acquire | ||
674 | static inline int | ||
675 | atomic_fetch_andnot_acquire(int i, atomic_t *v) | ||
676 | { | ||
677 | return atomic_fetch_and_acquire(~i, v); | ||
678 | } | ||
679 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | ||
680 | #endif | ||
681 | |||
682 | #ifndef atomic_fetch_andnot_release | ||
683 | static inline int | ||
684 | atomic_fetch_andnot_release(int i, atomic_t *v) | ||
685 | { | ||
686 | return atomic_fetch_and_release(~i, v); | ||
687 | } | ||
688 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | ||
689 | #endif | ||
690 | |||
691 | #ifndef atomic_fetch_andnot_relaxed | ||
692 | static inline int | ||
693 | atomic_fetch_andnot_relaxed(int i, atomic_t *v) | ||
694 | { | ||
695 | return atomic_fetch_and_relaxed(~i, v); | ||
696 | } | ||
697 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | ||
698 | #endif | ||
699 | |||
700 | #else /* atomic_fetch_andnot_relaxed */ | ||
701 | |||
702 | #ifndef atomic_fetch_andnot_acquire | ||
703 | static inline int | ||
704 | atomic_fetch_andnot_acquire(int i, atomic_t *v) | ||
705 | { | ||
706 | int ret = atomic_fetch_andnot_relaxed(i, v); | ||
707 | __atomic_acquire_fence(); | ||
708 | return ret; | ||
709 | } | ||
710 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | ||
711 | #endif | ||
712 | |||
713 | #ifndef atomic_fetch_andnot_release | ||
714 | static inline int | ||
715 | atomic_fetch_andnot_release(int i, atomic_t *v) | ||
716 | { | ||
717 | __atomic_release_fence(); | ||
718 | return atomic_fetch_andnot_relaxed(i, v); | ||
719 | } | ||
720 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | ||
721 | #endif | ||
722 | |||
723 | #ifndef atomic_fetch_andnot | ||
724 | static inline int | ||
725 | atomic_fetch_andnot(int i, atomic_t *v) | ||
726 | { | ||
727 | int ret; | ||
728 | __atomic_pre_full_fence(); | ||
729 | ret = atomic_fetch_andnot_relaxed(i, v); | ||
730 | __atomic_post_full_fence(); | ||
731 | return ret; | ||
732 | } | ||
733 | #define atomic_fetch_andnot atomic_fetch_andnot | ||
734 | #endif | ||
735 | |||
736 | #endif /* atomic_fetch_andnot_relaxed */ | ||
737 | |||
738 | #ifndef atomic_fetch_or_relaxed | ||
739 | #define atomic_fetch_or_acquire atomic_fetch_or | ||
740 | #define atomic_fetch_or_release atomic_fetch_or | ||
741 | #define atomic_fetch_or_relaxed atomic_fetch_or | ||
742 | #else /* atomic_fetch_or_relaxed */ | ||
743 | |||
744 | #ifndef atomic_fetch_or_acquire | ||
745 | static inline int | ||
746 | atomic_fetch_or_acquire(int i, atomic_t *v) | ||
747 | { | ||
748 | int ret = atomic_fetch_or_relaxed(i, v); | ||
749 | __atomic_acquire_fence(); | ||
750 | return ret; | ||
751 | } | ||
752 | #define atomic_fetch_or_acquire atomic_fetch_or_acquire | ||
753 | #endif | ||
754 | |||
755 | #ifndef atomic_fetch_or_release | ||
756 | static inline int | ||
757 | atomic_fetch_or_release(int i, atomic_t *v) | ||
758 | { | ||
759 | __atomic_release_fence(); | ||
760 | return atomic_fetch_or_relaxed(i, v); | ||
761 | } | ||
762 | #define atomic_fetch_or_release atomic_fetch_or_release | ||
763 | #endif | ||
764 | |||
765 | #ifndef atomic_fetch_or | ||
766 | static inline int | ||
767 | atomic_fetch_or(int i, atomic_t *v) | ||
768 | { | ||
769 | int ret; | ||
770 | __atomic_pre_full_fence(); | ||
771 | ret = atomic_fetch_or_relaxed(i, v); | ||
772 | __atomic_post_full_fence(); | ||
773 | return ret; | ||
774 | } | ||
775 | #define atomic_fetch_or atomic_fetch_or | ||
776 | #endif | ||
777 | |||
778 | #endif /* atomic_fetch_or_relaxed */ | ||
779 | |||
780 | #ifndef atomic_fetch_xor_relaxed | ||
781 | #define atomic_fetch_xor_acquire atomic_fetch_xor | ||
782 | #define atomic_fetch_xor_release atomic_fetch_xor | ||
783 | #define atomic_fetch_xor_relaxed atomic_fetch_xor | ||
784 | #else /* atomic_fetch_xor_relaxed */ | ||
785 | |||
786 | #ifndef atomic_fetch_xor_acquire | ||
787 | static inline int | ||
788 | atomic_fetch_xor_acquire(int i, atomic_t *v) | ||
789 | { | ||
790 | int ret = atomic_fetch_xor_relaxed(i, v); | ||
791 | __atomic_acquire_fence(); | ||
792 | return ret; | ||
793 | } | ||
794 | #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire | ||
795 | #endif | ||
796 | |||
797 | #ifndef atomic_fetch_xor_release | ||
798 | static inline int | ||
799 | atomic_fetch_xor_release(int i, atomic_t *v) | ||
800 | { | ||
801 | __atomic_release_fence(); | ||
802 | return atomic_fetch_xor_relaxed(i, v); | ||
803 | } | ||
804 | #define atomic_fetch_xor_release atomic_fetch_xor_release | ||
805 | #endif | ||
806 | |||
807 | #ifndef atomic_fetch_xor | ||
808 | static inline int | ||
809 | atomic_fetch_xor(int i, atomic_t *v) | ||
810 | { | ||
811 | int ret; | ||
812 | __atomic_pre_full_fence(); | ||
813 | ret = atomic_fetch_xor_relaxed(i, v); | ||
814 | __atomic_post_full_fence(); | ||
815 | return ret; | ||
816 | } | ||
817 | #define atomic_fetch_xor atomic_fetch_xor | ||
818 | #endif | ||
819 | |||
820 | #endif /* atomic_fetch_xor_relaxed */ | ||
821 | |||
822 | #ifndef atomic_xchg_relaxed | ||
823 | #define atomic_xchg_acquire atomic_xchg | ||
824 | #define atomic_xchg_release atomic_xchg | ||
825 | #define atomic_xchg_relaxed atomic_xchg | ||
826 | #else /* atomic_xchg_relaxed */ | ||
827 | |||
828 | #ifndef atomic_xchg_acquire | ||
829 | static inline int | ||
830 | atomic_xchg_acquire(atomic_t *v, int i) | ||
831 | { | ||
832 | int ret = atomic_xchg_relaxed(v, i); | ||
833 | __atomic_acquire_fence(); | ||
834 | return ret; | ||
835 | } | ||
836 | #define atomic_xchg_acquire atomic_xchg_acquire | ||
837 | #endif | ||
838 | |||
839 | #ifndef atomic_xchg_release | ||
840 | static inline int | ||
841 | atomic_xchg_release(atomic_t *v, int i) | ||
842 | { | ||
843 | __atomic_release_fence(); | ||
844 | return atomic_xchg_relaxed(v, i); | ||
845 | } | ||
846 | #define atomic_xchg_release atomic_xchg_release | ||
847 | #endif | ||
848 | |||
849 | #ifndef atomic_xchg | ||
850 | static inline int | ||
851 | atomic_xchg(atomic_t *v, int i) | ||
852 | { | ||
853 | int ret; | ||
854 | __atomic_pre_full_fence(); | ||
855 | ret = atomic_xchg_relaxed(v, i); | ||
856 | __atomic_post_full_fence(); | ||
857 | return ret; | ||
858 | } | ||
859 | #define atomic_xchg atomic_xchg | ||
860 | #endif | ||
861 | |||
862 | #endif /* atomic_xchg_relaxed */ | ||
863 | |||
864 | #ifndef atomic_cmpxchg_relaxed | ||
865 | #define atomic_cmpxchg_acquire atomic_cmpxchg | ||
866 | #define atomic_cmpxchg_release atomic_cmpxchg | ||
867 | #define atomic_cmpxchg_relaxed atomic_cmpxchg | ||
868 | #else /* atomic_cmpxchg_relaxed */ | ||
869 | |||
870 | #ifndef atomic_cmpxchg_acquire | ||
871 | static inline int | ||
872 | atomic_cmpxchg_acquire(atomic_t *v, int old, int new) | ||
873 | { | ||
874 | int ret = atomic_cmpxchg_relaxed(v, old, new); | ||
875 | __atomic_acquire_fence(); | ||
876 | return ret; | ||
877 | } | ||
878 | #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire | ||
879 | #endif | ||
880 | |||
881 | #ifndef atomic_cmpxchg_release | ||
882 | static inline int | ||
883 | atomic_cmpxchg_release(atomic_t *v, int old, int new) | ||
884 | { | ||
885 | __atomic_release_fence(); | ||
886 | return atomic_cmpxchg_relaxed(v, old, new); | ||
887 | } | ||
888 | #define atomic_cmpxchg_release atomic_cmpxchg_release | ||
889 | #endif | ||
890 | |||
891 | #ifndef atomic_cmpxchg | ||
892 | static inline int | ||
893 | atomic_cmpxchg(atomic_t *v, int old, int new) | ||
894 | { | ||
895 | int ret; | ||
896 | __atomic_pre_full_fence(); | ||
897 | ret = atomic_cmpxchg_relaxed(v, old, new); | ||
898 | __atomic_post_full_fence(); | ||
899 | return ret; | ||
900 | } | ||
901 | #define atomic_cmpxchg atomic_cmpxchg | ||
902 | #endif | ||
903 | |||
904 | #endif /* atomic_cmpxchg_relaxed */ | ||
905 | |||
906 | #ifndef atomic_try_cmpxchg_relaxed | ||
907 | #ifdef atomic_try_cmpxchg | ||
908 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg | ||
909 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg | ||
910 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg | ||
911 | #endif /* atomic_try_cmpxchg */ | ||
912 | |||
913 | #ifndef atomic_try_cmpxchg | ||
914 | static inline bool | ||
915 | atomic_try_cmpxchg(atomic_t *v, int *old, int new) | ||
916 | { | ||
917 | int r, o = *old; | ||
918 | r = atomic_cmpxchg(v, o, new); | ||
919 | if (unlikely(r != o)) | ||
920 | *old = r; | ||
921 | return likely(r == o); | ||
922 | } | ||
923 | #define atomic_try_cmpxchg atomic_try_cmpxchg | ||
924 | #endif | ||
925 | |||
926 | #ifndef atomic_try_cmpxchg_acquire | ||
927 | static inline bool | ||
928 | atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) | ||
929 | { | ||
930 | int r, o = *old; | ||
931 | r = atomic_cmpxchg_acquire(v, o, new); | ||
932 | if (unlikely(r != o)) | ||
933 | *old = r; | ||
934 | return likely(r == o); | ||
935 | } | ||
936 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire | ||
937 | #endif | ||
938 | |||
939 | #ifndef atomic_try_cmpxchg_release | ||
940 | static inline bool | ||
941 | atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) | ||
942 | { | ||
943 | int r, o = *old; | ||
944 | r = atomic_cmpxchg_release(v, o, new); | ||
945 | if (unlikely(r != o)) | ||
946 | *old = r; | ||
947 | return likely(r == o); | ||
948 | } | ||
949 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release | ||
950 | #endif | ||
951 | |||
952 | #ifndef atomic_try_cmpxchg_relaxed | ||
953 | static inline bool | ||
954 | atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) | ||
955 | { | ||
956 | int r, o = *old; | ||
957 | r = atomic_cmpxchg_relaxed(v, o, new); | ||
958 | if (unlikely(r != o)) | ||
959 | *old = r; | ||
960 | return likely(r == o); | ||
961 | } | ||
962 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed | ||
963 | #endif | ||
964 | |||
965 | #else /* atomic_try_cmpxchg_relaxed */ | ||
966 | |||
967 | #ifndef atomic_try_cmpxchg_acquire | ||
968 | static inline bool | ||
969 | atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) | ||
970 | { | ||
971 | bool ret = atomic_try_cmpxchg_relaxed(v, old, new); | ||
972 | __atomic_acquire_fence(); | ||
973 | return ret; | ||
974 | } | ||
975 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire | ||
976 | #endif | ||
977 | |||
978 | #ifndef atomic_try_cmpxchg_release | ||
979 | static inline bool | ||
980 | atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) | ||
981 | { | ||
982 | __atomic_release_fence(); | ||
983 | return atomic_try_cmpxchg_relaxed(v, old, new); | ||
984 | } | ||
985 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release | ||
986 | #endif | ||
987 | |||
988 | #ifndef atomic_try_cmpxchg | ||
989 | static inline bool | ||
990 | atomic_try_cmpxchg(atomic_t *v, int *old, int new) | ||
991 | { | ||
992 | bool ret; | ||
993 | __atomic_pre_full_fence(); | ||
994 | ret = atomic_try_cmpxchg_relaxed(v, old, new); | ||
995 | __atomic_post_full_fence(); | ||
996 | return ret; | ||
997 | } | ||
998 | #define atomic_try_cmpxchg atomic_try_cmpxchg | ||
999 | #endif | ||
1000 | |||
1001 | #endif /* atomic_try_cmpxchg_relaxed */ | ||
1002 | |||
1003 | #ifndef atomic_sub_and_test | ||
1004 | /** | ||
1005 | * atomic_sub_and_test - subtract value from variable and test result | ||
1006 | * @i: integer value to subtract | ||
1007 | * @v: pointer of type atomic_t | ||
1008 | * | ||
1009 | * Atomically subtracts @i from @v and returns | ||
1010 | * true if the result is zero, or false for all | ||
1011 | * other cases. | ||
1012 | */ | ||
1013 | static inline bool | ||
1014 | atomic_sub_and_test(int i, atomic_t *v) | ||
1015 | { | ||
1016 | return atomic_sub_return(i, v) == 0; | ||
1017 | } | ||
1018 | #define atomic_sub_and_test atomic_sub_and_test | ||
1019 | #endif | ||
1020 | |||
1021 | #ifndef atomic_dec_and_test | ||
1022 | /** | ||
1023 | * atomic_dec_and_test - decrement and test | ||
1024 | * @v: pointer of type atomic_t | ||
1025 | * | ||
1026 | * Atomically decrements @v by 1 and | ||
1027 | * returns true if the result is 0, or false for all other | ||
1028 | * cases. | ||
1029 | */ | ||
1030 | static inline bool | ||
1031 | atomic_dec_and_test(atomic_t *v) | ||
1032 | { | ||
1033 | return atomic_dec_return(v) == 0; | ||
1034 | } | ||
1035 | #define atomic_dec_and_test atomic_dec_and_test | ||
1036 | #endif | ||
1037 | |||
1038 | #ifndef atomic_inc_and_test | ||
1039 | /** | ||
1040 | * atomic_inc_and_test - increment and test | ||
1041 | * @v: pointer of type atomic_t | ||
1042 | * | ||
1043 | * Atomically increments @v by 1 | ||
1044 | * and returns true if the result is zero, or false for all | ||
1045 | * other cases. | ||
1046 | */ | ||
1047 | static inline bool | ||
1048 | atomic_inc_and_test(atomic_t *v) | ||
1049 | { | ||
1050 | return atomic_inc_return(v) == 0; | ||
1051 | } | ||
1052 | #define atomic_inc_and_test atomic_inc_and_test | ||
1053 | #endif | ||
1054 | |||
1055 | #ifndef atomic_add_negative | ||
1056 | /** | ||
1057 | * atomic_add_negative - add and test if negative | ||
1058 | * @i: integer value to add | ||
1059 | * @v: pointer of type atomic_t | ||
1060 | * | ||
1061 | * Atomically adds @i to @v and returns true | ||
1062 | * if the result is negative, or false when | ||
1063 | * result is greater than or equal to zero. | ||
1064 | */ | ||
1065 | static inline bool | ||
1066 | atomic_add_negative(int i, atomic_t *v) | ||
1067 | { | ||
1068 | return atomic_add_return(i, v) < 0; | ||
1069 | } | ||
1070 | #define atomic_add_negative atomic_add_negative | ||
1071 | #endif | ||
1072 | |||
1073 | #ifndef atomic_fetch_add_unless | ||
1074 | /** | ||
1075 | * atomic_fetch_add_unless - add unless the number is already a given value | ||
1076 | * @v: pointer of type atomic_t | ||
1077 | * @a: the amount to add to v... | ||
1078 | * @u: ...unless v is equal to u. | ||
1079 | * | ||
1080 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
1081 | * Returns original value of @v | ||
1082 | */ | ||
1083 | static inline int | ||
1084 | atomic_fetch_add_unless(atomic_t *v, int a, int u) | ||
1085 | { | ||
1086 | int c = atomic_read(v); | ||
1087 | |||
1088 | do { | ||
1089 | if (unlikely(c == u)) | ||
1090 | break; | ||
1091 | } while (!atomic_try_cmpxchg(v, &c, c + a)); | ||
1092 | |||
1093 | return c; | ||
1094 | } | ||
1095 | #define atomic_fetch_add_unless atomic_fetch_add_unless | ||
1096 | #endif | ||
1097 | |||
1098 | #ifndef atomic_add_unless | ||
1099 | /** | ||
1100 | * atomic_add_unless - add unless the number is already a given value | ||
1101 | * @v: pointer of type atomic_t | ||
1102 | * @a: the amount to add to v... | ||
1103 | * @u: ...unless v is equal to u. | ||
1104 | * | ||
1105 | * Atomically adds @a to @v, if @v was not already @u. | ||
1106 | * Returns true if the addition was done. | ||
1107 | */ | ||
1108 | static inline bool | ||
1109 | atomic_add_unless(atomic_t *v, int a, int u) | ||
1110 | { | ||
1111 | return atomic_fetch_add_unless(v, a, u) != u; | ||
1112 | } | ||
1113 | #define atomic_add_unless atomic_add_unless | ||
1114 | #endif | ||
1115 | |||
1116 | #ifndef atomic_inc_not_zero | ||
1117 | /** | ||
1118 | * atomic_inc_not_zero - increment unless the number is zero | ||
1119 | * @v: pointer of type atomic_t | ||
1120 | * | ||
1121 | * Atomically increments @v by 1, if @v is non-zero. | ||
1122 | * Returns true if the increment was done. | ||
1123 | */ | ||
1124 | static inline bool | ||
1125 | atomic_inc_not_zero(atomic_t *v) | ||
1126 | { | ||
1127 | return atomic_add_unless(v, 1, 0); | ||
1128 | } | ||
1129 | #define atomic_inc_not_zero atomic_inc_not_zero | ||
1130 | #endif | ||
1131 | |||
1132 | #ifndef atomic_inc_unless_negative | ||
1133 | static inline bool | ||
1134 | atomic_inc_unless_negative(atomic_t *v) | ||
1135 | { | ||
1136 | int c = atomic_read(v); | ||
1137 | |||
1138 | do { | ||
1139 | if (unlikely(c < 0)) | ||
1140 | return false; | ||
1141 | } while (!atomic_try_cmpxchg(v, &c, c + 1)); | ||
1142 | |||
1143 | return true; | ||
1144 | } | ||
1145 | #define atomic_inc_unless_negative atomic_inc_unless_negative | ||
1146 | #endif | ||
1147 | |||
1148 | #ifndef atomic_dec_unless_positive | ||
1149 | static inline bool | ||
1150 | atomic_dec_unless_positive(atomic_t *v) | ||
1151 | { | ||
1152 | int c = atomic_read(v); | ||
1153 | |||
1154 | do { | ||
1155 | if (unlikely(c > 0)) | ||
1156 | return false; | ||
1157 | } while (!atomic_try_cmpxchg(v, &c, c - 1)); | ||
1158 | |||
1159 | return true; | ||
1160 | } | ||
1161 | #define atomic_dec_unless_positive atomic_dec_unless_positive | ||
1162 | #endif | ||
1163 | |||
1164 | #ifndef atomic_dec_if_positive | ||
1165 | static inline int | ||
1166 | atomic_dec_if_positive(atomic_t *v) | ||
1167 | { | ||
1168 | int dec, c = atomic_read(v); | ||
1169 | |||
1170 | do { | ||
1171 | dec = c - 1; | ||
1172 | if (unlikely(dec < 0)) | ||
1173 | break; | ||
1174 | } while (!atomic_try_cmpxchg(v, &c, dec)); | ||
1175 | |||
1176 | return dec; | ||
1177 | } | ||
1178 | #define atomic_dec_if_positive atomic_dec_if_positive | ||
1179 | #endif | ||
1180 | |||
1181 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
1182 | #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
1183 | |||
1184 | #ifdef CONFIG_GENERIC_ATOMIC64 | ||
1185 | #include <asm-generic/atomic64.h> | ||
1186 | #endif | ||
1187 | |||
1188 | #ifndef atomic64_read_acquire | ||
1189 | static inline s64 | ||
1190 | atomic64_read_acquire(const atomic64_t *v) | ||
1191 | { | ||
1192 | return smp_load_acquire(&(v)->counter); | ||
1193 | } | ||
1194 | #define atomic64_read_acquire atomic64_read_acquire | ||
1195 | #endif | ||
1196 | |||
1197 | #ifndef atomic64_set_release | ||
1198 | static inline void | ||
1199 | atomic64_set_release(atomic64_t *v, s64 i) | ||
1200 | { | ||
1201 | smp_store_release(&(v)->counter, i); | ||
1202 | } | ||
1203 | #define atomic64_set_release atomic64_set_release | ||
1204 | #endif | ||
1205 | |||
1206 | #ifndef atomic64_add_return_relaxed | ||
1207 | #define atomic64_add_return_acquire atomic64_add_return | ||
1208 | #define atomic64_add_return_release atomic64_add_return | ||
1209 | #define atomic64_add_return_relaxed atomic64_add_return | ||
1210 | #else /* atomic64_add_return_relaxed */ | ||
1211 | |||
1212 | #ifndef atomic64_add_return_acquire | ||
1213 | static inline s64 | ||
1214 | atomic64_add_return_acquire(s64 i, atomic64_t *v) | ||
1215 | { | ||
1216 | s64 ret = atomic64_add_return_relaxed(i, v); | ||
1217 | __atomic_acquire_fence(); | ||
1218 | return ret; | ||
1219 | } | ||
1220 | #define atomic64_add_return_acquire atomic64_add_return_acquire | ||
1221 | #endif | ||
1222 | |||
1223 | #ifndef atomic64_add_return_release | ||
1224 | static inline s64 | ||
1225 | atomic64_add_return_release(s64 i, atomic64_t *v) | ||
1226 | { | ||
1227 | __atomic_release_fence(); | ||
1228 | return atomic64_add_return_relaxed(i, v); | ||
1229 | } | ||
1230 | #define atomic64_add_return_release atomic64_add_return_release | ||
1231 | #endif | ||
1232 | |||
1233 | #ifndef atomic64_add_return | ||
1234 | static inline s64 | ||
1235 | atomic64_add_return(s64 i, atomic64_t *v) | ||
1236 | { | ||
1237 | s64 ret; | ||
1238 | __atomic_pre_full_fence(); | ||
1239 | ret = atomic64_add_return_relaxed(i, v); | ||
1240 | __atomic_post_full_fence(); | ||
1241 | return ret; | ||
1242 | } | ||
1243 | #define atomic64_add_return atomic64_add_return | ||
1244 | #endif | ||
1245 | |||
1246 | #endif /* atomic64_add_return_relaxed */ | ||
1247 | |||
1248 | #ifndef atomic64_fetch_add_relaxed | ||
1249 | #define atomic64_fetch_add_acquire atomic64_fetch_add | ||
1250 | #define atomic64_fetch_add_release atomic64_fetch_add | ||
1251 | #define atomic64_fetch_add_relaxed atomic64_fetch_add | ||
1252 | #else /* atomic64_fetch_add_relaxed */ | ||
1253 | |||
1254 | #ifndef atomic64_fetch_add_acquire | ||
1255 | static inline s64 | ||
1256 | atomic64_fetch_add_acquire(s64 i, atomic64_t *v) | ||
1257 | { | ||
1258 | s64 ret = atomic64_fetch_add_relaxed(i, v); | ||
1259 | __atomic_acquire_fence(); | ||
1260 | return ret; | ||
1261 | } | ||
1262 | #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire | ||
1263 | #endif | ||
1264 | |||
1265 | #ifndef atomic64_fetch_add_release | ||
1266 | static inline s64 | ||
1267 | atomic64_fetch_add_release(s64 i, atomic64_t *v) | ||
1268 | { | ||
1269 | __atomic_release_fence(); | ||
1270 | return atomic64_fetch_add_relaxed(i, v); | ||
1271 | } | ||
1272 | #define atomic64_fetch_add_release atomic64_fetch_add_release | ||
1273 | #endif | ||
1274 | |||
1275 | #ifndef atomic64_fetch_add | ||
1276 | static inline s64 | ||
1277 | atomic64_fetch_add(s64 i, atomic64_t *v) | ||
1278 | { | ||
1279 | s64 ret; | ||
1280 | __atomic_pre_full_fence(); | ||
1281 | ret = atomic64_fetch_add_relaxed(i, v); | ||
1282 | __atomic_post_full_fence(); | ||
1283 | return ret; | ||
1284 | } | ||
1285 | #define atomic64_fetch_add atomic64_fetch_add | ||
1286 | #endif | ||
1287 | |||
1288 | #endif /* atomic64_fetch_add_relaxed */ | ||
1289 | |||
1290 | #ifndef atomic64_sub_return_relaxed | ||
1291 | #define atomic64_sub_return_acquire atomic64_sub_return | ||
1292 | #define atomic64_sub_return_release atomic64_sub_return | ||
1293 | #define atomic64_sub_return_relaxed atomic64_sub_return | ||
1294 | #else /* atomic64_sub_return_relaxed */ | ||
1295 | |||
1296 | #ifndef atomic64_sub_return_acquire | ||
1297 | static inline s64 | ||
1298 | atomic64_sub_return_acquire(s64 i, atomic64_t *v) | ||
1299 | { | ||
1300 | s64 ret = atomic64_sub_return_relaxed(i, v); | ||
1301 | __atomic_acquire_fence(); | ||
1302 | return ret; | ||
1303 | } | ||
1304 | #define atomic64_sub_return_acquire atomic64_sub_return_acquire | ||
1305 | #endif | ||
1306 | |||
1307 | #ifndef atomic64_sub_return_release | ||
1308 | static inline s64 | ||
1309 | atomic64_sub_return_release(s64 i, atomic64_t *v) | ||
1310 | { | ||
1311 | __atomic_release_fence(); | ||
1312 | return atomic64_sub_return_relaxed(i, v); | ||
1313 | } | ||
1314 | #define atomic64_sub_return_release atomic64_sub_return_release | ||
1315 | #endif | ||
1316 | |||
1317 | #ifndef atomic64_sub_return | ||
1318 | static inline s64 | ||
1319 | atomic64_sub_return(s64 i, atomic64_t *v) | ||
1320 | { | ||
1321 | s64 ret; | ||
1322 | __atomic_pre_full_fence(); | ||
1323 | ret = atomic64_sub_return_relaxed(i, v); | ||
1324 | __atomic_post_full_fence(); | ||
1325 | return ret; | ||
1326 | } | ||
1327 | #define atomic64_sub_return atomic64_sub_return | ||
1328 | #endif | ||
1329 | |||
1330 | #endif /* atomic64_sub_return_relaxed */ | ||
1331 | |||
1332 | #ifndef atomic64_fetch_sub_relaxed | ||
1333 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub | ||
1334 | #define atomic64_fetch_sub_release atomic64_fetch_sub | ||
1335 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub | ||
1336 | #else /* atomic64_fetch_sub_relaxed */ | ||
1337 | |||
1338 | #ifndef atomic64_fetch_sub_acquire | ||
1339 | static inline s64 | ||
1340 | atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) | ||
1341 | { | ||
1342 | s64 ret = atomic64_fetch_sub_relaxed(i, v); | ||
1343 | __atomic_acquire_fence(); | ||
1344 | return ret; | ||
1345 | } | ||
1346 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire | ||
1347 | #endif | ||
1348 | |||
1349 | #ifndef atomic64_fetch_sub_release | ||
1350 | static inline s64 | ||
1351 | atomic64_fetch_sub_release(s64 i, atomic64_t *v) | ||
1352 | { | ||
1353 | __atomic_release_fence(); | ||
1354 | return atomic64_fetch_sub_relaxed(i, v); | ||
1355 | } | ||
1356 | #define atomic64_fetch_sub_release atomic64_fetch_sub_release | ||
1357 | #endif | ||
1358 | |||
1359 | #ifndef atomic64_fetch_sub | ||
1360 | static inline s64 | ||
1361 | atomic64_fetch_sub(s64 i, atomic64_t *v) | ||
1362 | { | ||
1363 | s64 ret; | ||
1364 | __atomic_pre_full_fence(); | ||
1365 | ret = atomic64_fetch_sub_relaxed(i, v); | ||
1366 | __atomic_post_full_fence(); | ||
1367 | return ret; | ||
1368 | } | ||
1369 | #define atomic64_fetch_sub atomic64_fetch_sub | ||
1370 | #endif | ||
1371 | |||
1372 | #endif /* atomic64_fetch_sub_relaxed */ | ||
1373 | |||
1374 | #ifndef atomic64_inc | ||
1375 | static inline void | ||
1376 | atomic64_inc(atomic64_t *v) | ||
1377 | { | ||
1378 | atomic64_add(1, v); | ||
1379 | } | ||
1380 | #define atomic64_inc atomic64_inc | ||
1381 | #endif | ||
1382 | |||
1383 | #ifndef atomic64_inc_return_relaxed | ||
1384 | #ifdef atomic64_inc_return | ||
1385 | #define atomic64_inc_return_acquire atomic64_inc_return | ||
1386 | #define atomic64_inc_return_release atomic64_inc_return | ||
1387 | #define atomic64_inc_return_relaxed atomic64_inc_return | ||
1388 | #endif /* atomic64_inc_return */ | ||
1389 | |||
1390 | #ifndef atomic64_inc_return | ||
1391 | static inline s64 | ||
1392 | atomic64_inc_return(atomic64_t *v) | ||
1393 | { | ||
1394 | return atomic64_add_return(1, v); | ||
1395 | } | ||
1396 | #define atomic64_inc_return atomic64_inc_return | ||
1397 | #endif | ||
1398 | |||
1399 | #ifndef atomic64_inc_return_acquire | ||
1400 | static inline s64 | ||
1401 | atomic64_inc_return_acquire(atomic64_t *v) | ||
1402 | { | ||
1403 | return atomic64_add_return_acquire(1, v); | ||
1404 | } | ||
1405 | #define atomic64_inc_return_acquire atomic64_inc_return_acquire | ||
1406 | #endif | ||
1407 | |||
1408 | #ifndef atomic64_inc_return_release | ||
1409 | static inline s64 | ||
1410 | atomic64_inc_return_release(atomic64_t *v) | ||
1411 | { | ||
1412 | return atomic64_add_return_release(1, v); | ||
1413 | } | ||
1414 | #define atomic64_inc_return_release atomic64_inc_return_release | ||
1415 | #endif | ||
1416 | |||
1417 | #ifndef atomic64_inc_return_relaxed | ||
1418 | static inline s64 | ||
1419 | atomic64_inc_return_relaxed(atomic64_t *v) | ||
1420 | { | ||
1421 | return atomic64_add_return_relaxed(1, v); | ||
1422 | } | ||
1423 | #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed | ||
1424 | #endif | ||
1425 | |||
1426 | #else /* atomic64_inc_return_relaxed */ | ||
1427 | |||
1428 | #ifndef atomic64_inc_return_acquire | ||
1429 | static inline s64 | ||
1430 | atomic64_inc_return_acquire(atomic64_t *v) | ||
1431 | { | ||
1432 | s64 ret = atomic64_inc_return_relaxed(v); | ||
1433 | __atomic_acquire_fence(); | ||
1434 | return ret; | ||
1435 | } | ||
1436 | #define atomic64_inc_return_acquire atomic64_inc_return_acquire | ||
1437 | #endif | ||
1438 | |||
1439 | #ifndef atomic64_inc_return_release | ||
1440 | static inline s64 | ||
1441 | atomic64_inc_return_release(atomic64_t *v) | ||
1442 | { | ||
1443 | __atomic_release_fence(); | ||
1444 | return atomic64_inc_return_relaxed(v); | ||
1445 | } | ||
1446 | #define atomic64_inc_return_release atomic64_inc_return_release | ||
1447 | #endif | ||
1448 | |||
1449 | #ifndef atomic64_inc_return | ||
1450 | static inline s64 | ||
1451 | atomic64_inc_return(atomic64_t *v) | ||
1452 | { | ||
1453 | s64 ret; | ||
1454 | __atomic_pre_full_fence(); | ||
1455 | ret = atomic64_inc_return_relaxed(v); | ||
1456 | __atomic_post_full_fence(); | ||
1457 | return ret; | ||
1458 | } | ||
1459 | #define atomic64_inc_return atomic64_inc_return | ||
1460 | #endif | ||
1461 | |||
1462 | #endif /* atomic64_inc_return_relaxed */ | ||
1463 | |||
1464 | #ifndef atomic64_fetch_inc_relaxed | ||
1465 | #ifdef atomic64_fetch_inc | ||
1466 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc | ||
1467 | #define atomic64_fetch_inc_release atomic64_fetch_inc | ||
1468 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc | ||
1469 | #endif /* atomic64_fetch_inc */ | ||
1470 | |||
1471 | #ifndef atomic64_fetch_inc | ||
1472 | static inline s64 | ||
1473 | atomic64_fetch_inc(atomic64_t *v) | ||
1474 | { | ||
1475 | return atomic64_fetch_add(1, v); | ||
1476 | } | ||
1477 | #define atomic64_fetch_inc atomic64_fetch_inc | ||
1478 | #endif | ||
1479 | |||
1480 | #ifndef atomic64_fetch_inc_acquire | ||
1481 | static inline s64 | ||
1482 | atomic64_fetch_inc_acquire(atomic64_t *v) | ||
1483 | { | ||
1484 | return atomic64_fetch_add_acquire(1, v); | ||
1485 | } | ||
1486 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire | ||
1487 | #endif | ||
1488 | |||
1489 | #ifndef atomic64_fetch_inc_release | ||
1490 | static inline s64 | ||
1491 | atomic64_fetch_inc_release(atomic64_t *v) | ||
1492 | { | ||
1493 | return atomic64_fetch_add_release(1, v); | ||
1494 | } | ||
1495 | #define atomic64_fetch_inc_release atomic64_fetch_inc_release | ||
1496 | #endif | ||
1497 | |||
1498 | #ifndef atomic64_fetch_inc_relaxed | ||
1499 | static inline s64 | ||
1500 | atomic64_fetch_inc_relaxed(atomic64_t *v) | ||
1501 | { | ||
1502 | return atomic64_fetch_add_relaxed(1, v); | ||
1503 | } | ||
1504 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed | ||
1505 | #endif | ||
1506 | |||
1507 | #else /* atomic64_fetch_inc_relaxed */ | ||
1508 | |||
1509 | #ifndef atomic64_fetch_inc_acquire | ||
1510 | static inline s64 | ||
1511 | atomic64_fetch_inc_acquire(atomic64_t *v) | ||
1512 | { | ||
1513 | s64 ret = atomic64_fetch_inc_relaxed(v); | ||
1514 | __atomic_acquire_fence(); | ||
1515 | return ret; | ||
1516 | } | ||
1517 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire | ||
1518 | #endif | ||
1519 | |||
1520 | #ifndef atomic64_fetch_inc_release | ||
1521 | static inline s64 | ||
1522 | atomic64_fetch_inc_release(atomic64_t *v) | ||
1523 | { | ||
1524 | __atomic_release_fence(); | ||
1525 | return atomic64_fetch_inc_relaxed(v); | ||
1526 | } | ||
1527 | #define atomic64_fetch_inc_release atomic64_fetch_inc_release | ||
1528 | #endif | ||
1529 | |||
1530 | #ifndef atomic64_fetch_inc | ||
1531 | static inline s64 | ||
1532 | atomic64_fetch_inc(atomic64_t *v) | ||
1533 | { | ||
1534 | s64 ret; | ||
1535 | __atomic_pre_full_fence(); | ||
1536 | ret = atomic64_fetch_inc_relaxed(v); | ||
1537 | __atomic_post_full_fence(); | ||
1538 | return ret; | ||
1539 | } | ||
1540 | #define atomic64_fetch_inc atomic64_fetch_inc | ||
1541 | #endif | ||
1542 | |||
1543 | #endif /* atomic64_fetch_inc_relaxed */ | ||
1544 | |||
1545 | #ifndef atomic64_dec | ||
1546 | static inline void | ||
1547 | atomic64_dec(atomic64_t *v) | ||
1548 | { | ||
1549 | atomic64_sub(1, v); | ||
1550 | } | ||
1551 | #define atomic64_dec atomic64_dec | ||
1552 | #endif | ||
1553 | |||
1554 | #ifndef atomic64_dec_return_relaxed | ||
1555 | #ifdef atomic64_dec_return | ||
1556 | #define atomic64_dec_return_acquire atomic64_dec_return | ||
1557 | #define atomic64_dec_return_release atomic64_dec_return | ||
1558 | #define atomic64_dec_return_relaxed atomic64_dec_return | ||
1559 | #endif /* atomic64_dec_return */ | ||
1560 | |||
1561 | #ifndef atomic64_dec_return | ||
1562 | static inline s64 | ||
1563 | atomic64_dec_return(atomic64_t *v) | ||
1564 | { | ||
1565 | return atomic64_sub_return(1, v); | ||
1566 | } | ||
1567 | #define atomic64_dec_return atomic64_dec_return | ||
1568 | #endif | ||
1569 | |||
1570 | #ifndef atomic64_dec_return_acquire | ||
1571 | static inline s64 | ||
1572 | atomic64_dec_return_acquire(atomic64_t *v) | ||
1573 | { | ||
1574 | return atomic64_sub_return_acquire(1, v); | ||
1575 | } | ||
1576 | #define atomic64_dec_return_acquire atomic64_dec_return_acquire | ||
1577 | #endif | ||
1578 | |||
1579 | #ifndef atomic64_dec_return_release | ||
1580 | static inline s64 | ||
1581 | atomic64_dec_return_release(atomic64_t *v) | ||
1582 | { | ||
1583 | return atomic64_sub_return_release(1, v); | ||
1584 | } | ||
1585 | #define atomic64_dec_return_release atomic64_dec_return_release | ||
1586 | #endif | ||
1587 | |||
1588 | #ifndef atomic64_dec_return_relaxed | ||
1589 | static inline s64 | ||
1590 | atomic64_dec_return_relaxed(atomic64_t *v) | ||
1591 | { | ||
1592 | return atomic64_sub_return_relaxed(1, v); | ||
1593 | } | ||
1594 | #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed | ||
1595 | #endif | ||
1596 | |||
1597 | #else /* atomic64_dec_return_relaxed */ | ||
1598 | |||
1599 | #ifndef atomic64_dec_return_acquire | ||
1600 | static inline s64 | ||
1601 | atomic64_dec_return_acquire(atomic64_t *v) | ||
1602 | { | ||
1603 | s64 ret = atomic64_dec_return_relaxed(v); | ||
1604 | __atomic_acquire_fence(); | ||
1605 | return ret; | ||
1606 | } | ||
1607 | #define atomic64_dec_return_acquire atomic64_dec_return_acquire | ||
1608 | #endif | ||
1609 | |||
1610 | #ifndef atomic64_dec_return_release | ||
1611 | static inline s64 | ||
1612 | atomic64_dec_return_release(atomic64_t *v) | ||
1613 | { | ||
1614 | __atomic_release_fence(); | ||
1615 | return atomic64_dec_return_relaxed(v); | ||
1616 | } | ||
1617 | #define atomic64_dec_return_release atomic64_dec_return_release | ||
1618 | #endif | ||
1619 | |||
1620 | #ifndef atomic64_dec_return | ||
1621 | static inline s64 | ||
1622 | atomic64_dec_return(atomic64_t *v) | ||
1623 | { | ||
1624 | s64 ret; | ||
1625 | __atomic_pre_full_fence(); | ||
1626 | ret = atomic64_dec_return_relaxed(v); | ||
1627 | __atomic_post_full_fence(); | ||
1628 | return ret; | ||
1629 | } | ||
1630 | #define atomic64_dec_return atomic64_dec_return | ||
1631 | #endif | ||
1632 | |||
1633 | #endif /* atomic64_dec_return_relaxed */ | ||
1634 | |||
1635 | #ifndef atomic64_fetch_dec_relaxed | ||
1636 | #ifdef atomic64_fetch_dec | ||
1637 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec | ||
1638 | #define atomic64_fetch_dec_release atomic64_fetch_dec | ||
1639 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec | ||
1640 | #endif /* atomic64_fetch_dec */ | ||
1641 | |||
1642 | #ifndef atomic64_fetch_dec | ||
1643 | static inline s64 | ||
1644 | atomic64_fetch_dec(atomic64_t *v) | ||
1645 | { | ||
1646 | return atomic64_fetch_sub(1, v); | ||
1647 | } | ||
1648 | #define atomic64_fetch_dec atomic64_fetch_dec | ||
1649 | #endif | ||
1650 | |||
1651 | #ifndef atomic64_fetch_dec_acquire | ||
1652 | static inline s64 | ||
1653 | atomic64_fetch_dec_acquire(atomic64_t *v) | ||
1654 | { | ||
1655 | return atomic64_fetch_sub_acquire(1, v); | ||
1656 | } | ||
1657 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire | ||
1658 | #endif | ||
1659 | |||
1660 | #ifndef atomic64_fetch_dec_release | ||
1661 | static inline s64 | ||
1662 | atomic64_fetch_dec_release(atomic64_t *v) | ||
1663 | { | ||
1664 | return atomic64_fetch_sub_release(1, v); | ||
1665 | } | ||
1666 | #define atomic64_fetch_dec_release atomic64_fetch_dec_release | ||
1667 | #endif | ||
1668 | |||
1669 | #ifndef atomic64_fetch_dec_relaxed | ||
1670 | static inline s64 | ||
1671 | atomic64_fetch_dec_relaxed(atomic64_t *v) | ||
1672 | { | ||
1673 | return atomic64_fetch_sub_relaxed(1, v); | ||
1674 | } | ||
1675 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed | ||
1676 | #endif | ||
1677 | |||
1678 | #else /* atomic64_fetch_dec_relaxed */ | ||
1679 | |||
1680 | #ifndef atomic64_fetch_dec_acquire | ||
1681 | static inline s64 | ||
1682 | atomic64_fetch_dec_acquire(atomic64_t *v) | ||
1683 | { | ||
1684 | s64 ret = atomic64_fetch_dec_relaxed(v); | ||
1685 | __atomic_acquire_fence(); | ||
1686 | return ret; | ||
1687 | } | ||
1688 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire | ||
1689 | #endif | ||
1690 | |||
1691 | #ifndef atomic64_fetch_dec_release | ||
1692 | static inline s64 | ||
1693 | atomic64_fetch_dec_release(atomic64_t *v) | ||
1694 | { | ||
1695 | __atomic_release_fence(); | ||
1696 | return atomic64_fetch_dec_relaxed(v); | ||
1697 | } | ||
1698 | #define atomic64_fetch_dec_release atomic64_fetch_dec_release | ||
1699 | #endif | ||
1700 | |||
1701 | #ifndef atomic64_fetch_dec | ||
1702 | static inline s64 | ||
1703 | atomic64_fetch_dec(atomic64_t *v) | ||
1704 | { | ||
1705 | s64 ret; | ||
1706 | __atomic_pre_full_fence(); | ||
1707 | ret = atomic64_fetch_dec_relaxed(v); | ||
1708 | __atomic_post_full_fence(); | ||
1709 | return ret; | ||
1710 | } | ||
1711 | #define atomic64_fetch_dec atomic64_fetch_dec | ||
1712 | #endif | ||
1713 | |||
1714 | #endif /* atomic64_fetch_dec_relaxed */ | ||
1715 | |||
1716 | #ifndef atomic64_fetch_and_relaxed | ||
1717 | #define atomic64_fetch_and_acquire atomic64_fetch_and | ||
1718 | #define atomic64_fetch_and_release atomic64_fetch_and | ||
1719 | #define atomic64_fetch_and_relaxed atomic64_fetch_and | ||
1720 | #else /* atomic64_fetch_and_relaxed */ | ||
1721 | |||
1722 | #ifndef atomic64_fetch_and_acquire | ||
1723 | static inline s64 | ||
1724 | atomic64_fetch_and_acquire(s64 i, atomic64_t *v) | ||
1725 | { | ||
1726 | s64 ret = atomic64_fetch_and_relaxed(i, v); | ||
1727 | __atomic_acquire_fence(); | ||
1728 | return ret; | ||
1729 | } | ||
1730 | #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire | ||
1731 | #endif | ||
1732 | |||
1733 | #ifndef atomic64_fetch_and_release | ||
1734 | static inline s64 | ||
1735 | atomic64_fetch_and_release(s64 i, atomic64_t *v) | ||
1736 | { | ||
1737 | __atomic_release_fence(); | ||
1738 | return atomic64_fetch_and_relaxed(i, v); | ||
1739 | } | ||
1740 | #define atomic64_fetch_and_release atomic64_fetch_and_release | ||
1741 | #endif | ||
1742 | |||
1743 | #ifndef atomic64_fetch_and | ||
1744 | static inline s64 | ||
1745 | atomic64_fetch_and(s64 i, atomic64_t *v) | ||
1746 | { | ||
1747 | s64 ret; | ||
1748 | __atomic_pre_full_fence(); | ||
1749 | ret = atomic64_fetch_and_relaxed(i, v); | ||
1750 | __atomic_post_full_fence(); | ||
1751 | return ret; | ||
1752 | } | ||
1753 | #define atomic64_fetch_and atomic64_fetch_and | ||
1754 | #endif | ||
1755 | |||
1756 | #endif /* atomic64_fetch_and_relaxed */ | ||
1757 | |||
1758 | #ifndef atomic64_andnot | ||
1759 | static inline void | ||
1760 | atomic64_andnot(s64 i, atomic64_t *v) | ||
1761 | { | ||
1762 | atomic64_and(~i, v); | ||
1763 | } | ||
1764 | #define atomic64_andnot atomic64_andnot | ||
1765 | #endif | ||
1766 | |||
1767 | #ifndef atomic64_fetch_andnot_relaxed | ||
1768 | #ifdef atomic64_fetch_andnot | ||
1769 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | ||
1770 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | ||
1771 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | ||
1772 | #endif /* atomic64_fetch_andnot */ | ||
1773 | |||
1774 | #ifndef atomic64_fetch_andnot | ||
1775 | static inline s64 | ||
1776 | atomic64_fetch_andnot(s64 i, atomic64_t *v) | ||
1777 | { | ||
1778 | return atomic64_fetch_and(~i, v); | ||
1779 | } | ||
1780 | #define atomic64_fetch_andnot atomic64_fetch_andnot | ||
1781 | #endif | ||
1782 | |||
1783 | #ifndef atomic64_fetch_andnot_acquire | ||
1784 | static inline s64 | ||
1785 | atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) | ||
1786 | { | ||
1787 | return atomic64_fetch_and_acquire(~i, v); | ||
1788 | } | ||
1789 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | ||
1790 | #endif | ||
1791 | |||
1792 | #ifndef atomic64_fetch_andnot_release | ||
1793 | static inline s64 | ||
1794 | atomic64_fetch_andnot_release(s64 i, atomic64_t *v) | ||
1795 | { | ||
1796 | return atomic64_fetch_and_release(~i, v); | ||
1797 | } | ||
1798 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | ||
1799 | #endif | ||
1800 | |||
1801 | #ifndef atomic64_fetch_andnot_relaxed | ||
1802 | static inline s64 | ||
1803 | atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) | ||
1804 | { | ||
1805 | return atomic64_fetch_and_relaxed(~i, v); | ||
1806 | } | ||
1807 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | ||
1808 | #endif | ||
1809 | |||
1810 | #else /* atomic64_fetch_andnot_relaxed */ | ||
1811 | |||
1812 | #ifndef atomic64_fetch_andnot_acquire | ||
1813 | static inline s64 | ||
1814 | atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) | ||
1815 | { | ||
1816 | s64 ret = atomic64_fetch_andnot_relaxed(i, v); | ||
1817 | __atomic_acquire_fence(); | ||
1818 | return ret; | ||
1819 | } | ||
1820 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | ||
1821 | #endif | ||
1822 | |||
1823 | #ifndef atomic64_fetch_andnot_release | ||
1824 | static inline s64 | ||
1825 | atomic64_fetch_andnot_release(s64 i, atomic64_t *v) | ||
1826 | { | ||
1827 | __atomic_release_fence(); | ||
1828 | return atomic64_fetch_andnot_relaxed(i, v); | ||
1829 | } | ||
1830 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | ||
1831 | #endif | ||
1832 | |||
1833 | #ifndef atomic64_fetch_andnot | ||
1834 | static inline s64 | ||
1835 | atomic64_fetch_andnot(s64 i, atomic64_t *v) | ||
1836 | { | ||
1837 | s64 ret; | ||
1838 | __atomic_pre_full_fence(); | ||
1839 | ret = atomic64_fetch_andnot_relaxed(i, v); | ||
1840 | __atomic_post_full_fence(); | ||
1841 | return ret; | ||
1842 | } | ||
1843 | #define atomic64_fetch_andnot atomic64_fetch_andnot | ||
1844 | #endif | ||
1845 | |||
1846 | #endif /* atomic64_fetch_andnot_relaxed */ | ||
1847 | |||
1848 | #ifndef atomic64_fetch_or_relaxed | ||
1849 | #define atomic64_fetch_or_acquire atomic64_fetch_or | ||
1850 | #define atomic64_fetch_or_release atomic64_fetch_or | ||
1851 | #define atomic64_fetch_or_relaxed atomic64_fetch_or | ||
1852 | #else /* atomic64_fetch_or_relaxed */ | ||
1853 | |||
1854 | #ifndef atomic64_fetch_or_acquire | ||
1855 | static inline s64 | ||
1856 | atomic64_fetch_or_acquire(s64 i, atomic64_t *v) | ||
1857 | { | ||
1858 | s64 ret = atomic64_fetch_or_relaxed(i, v); | ||
1859 | __atomic_acquire_fence(); | ||
1860 | return ret; | ||
1861 | } | ||
1862 | #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire | ||
1863 | #endif | ||
1864 | |||
1865 | #ifndef atomic64_fetch_or_release | ||
1866 | static inline s64 | ||
1867 | atomic64_fetch_or_release(s64 i, atomic64_t *v) | ||
1868 | { | ||
1869 | __atomic_release_fence(); | ||
1870 | return atomic64_fetch_or_relaxed(i, v); | ||
1871 | } | ||
1872 | #define atomic64_fetch_or_release atomic64_fetch_or_release | ||
1873 | #endif | ||
1874 | |||
1875 | #ifndef atomic64_fetch_or | ||
1876 | static inline s64 | ||
1877 | atomic64_fetch_or(s64 i, atomic64_t *v) | ||
1878 | { | ||
1879 | s64 ret; | ||
1880 | __atomic_pre_full_fence(); | ||
1881 | ret = atomic64_fetch_or_relaxed(i, v); | ||
1882 | __atomic_post_full_fence(); | ||
1883 | return ret; | ||
1884 | } | ||
1885 | #define atomic64_fetch_or atomic64_fetch_or | ||
1886 | #endif | ||
1887 | |||
1888 | #endif /* atomic64_fetch_or_relaxed */ | ||
1889 | |||
1890 | #ifndef atomic64_fetch_xor_relaxed | ||
1891 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor | ||
1892 | #define atomic64_fetch_xor_release atomic64_fetch_xor | ||
1893 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor | ||
1894 | #else /* atomic64_fetch_xor_relaxed */ | ||
1895 | |||
1896 | #ifndef atomic64_fetch_xor_acquire | ||
1897 | static inline s64 | ||
1898 | atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) | ||
1899 | { | ||
1900 | s64 ret = atomic64_fetch_xor_relaxed(i, v); | ||
1901 | __atomic_acquire_fence(); | ||
1902 | return ret; | ||
1903 | } | ||
1904 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire | ||
1905 | #endif | ||
1906 | |||
1907 | #ifndef atomic64_fetch_xor_release | ||
1908 | static inline s64 | ||
1909 | atomic64_fetch_xor_release(s64 i, atomic64_t *v) | ||
1910 | { | ||
1911 | __atomic_release_fence(); | ||
1912 | return atomic64_fetch_xor_relaxed(i, v); | ||
1913 | } | ||
1914 | #define atomic64_fetch_xor_release atomic64_fetch_xor_release | ||
1915 | #endif | ||
1916 | |||
1917 | #ifndef atomic64_fetch_xor | ||
1918 | static inline s64 | ||
1919 | atomic64_fetch_xor(s64 i, atomic64_t *v) | ||
1920 | { | ||
1921 | s64 ret; | ||
1922 | __atomic_pre_full_fence(); | ||
1923 | ret = atomic64_fetch_xor_relaxed(i, v); | ||
1924 | __atomic_post_full_fence(); | ||
1925 | return ret; | ||
1926 | } | ||
1927 | #define atomic64_fetch_xor atomic64_fetch_xor | ||
1928 | #endif | ||
1929 | |||
1930 | #endif /* atomic64_fetch_xor_relaxed */ | ||
1931 | |||
1932 | #ifndef atomic64_xchg_relaxed | ||
1933 | #define atomic64_xchg_acquire atomic64_xchg | ||
1934 | #define atomic64_xchg_release atomic64_xchg | ||
1935 | #define atomic64_xchg_relaxed atomic64_xchg | ||
1936 | #else /* atomic64_xchg_relaxed */ | ||
1937 | |||
1938 | #ifndef atomic64_xchg_acquire | ||
1939 | static inline s64 | ||
1940 | atomic64_xchg_acquire(atomic64_t *v, s64 i) | ||
1941 | { | ||
1942 | s64 ret = atomic64_xchg_relaxed(v, i); | ||
1943 | __atomic_acquire_fence(); | ||
1944 | return ret; | ||
1945 | } | ||
1946 | #define atomic64_xchg_acquire atomic64_xchg_acquire | ||
1947 | #endif | ||
1948 | |||
1949 | #ifndef atomic64_xchg_release | ||
1950 | static inline s64 | ||
1951 | atomic64_xchg_release(atomic64_t *v, s64 i) | ||
1952 | { | ||
1953 | __atomic_release_fence(); | ||
1954 | return atomic64_xchg_relaxed(v, i); | ||
1955 | } | ||
1956 | #define atomic64_xchg_release atomic64_xchg_release | ||
1957 | #endif | ||
1958 | |||
1959 | #ifndef atomic64_xchg | ||
1960 | static inline s64 | ||
1961 | atomic64_xchg(atomic64_t *v, s64 i) | ||
1962 | { | ||
1963 | s64 ret; | ||
1964 | __atomic_pre_full_fence(); | ||
1965 | ret = atomic64_xchg_relaxed(v, i); | ||
1966 | __atomic_post_full_fence(); | ||
1967 | return ret; | ||
1968 | } | ||
1969 | #define atomic64_xchg atomic64_xchg | ||
1970 | #endif | ||
1971 | |||
1972 | #endif /* atomic64_xchg_relaxed */ | ||
1973 | |||
1974 | #ifndef atomic64_cmpxchg_relaxed | ||
1975 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | ||
1976 | #define atomic64_cmpxchg_release atomic64_cmpxchg | ||
1977 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | ||
1978 | #else /* atomic64_cmpxchg_relaxed */ | ||
1979 | |||
1980 | #ifndef atomic64_cmpxchg_acquire | ||
1981 | static inline s64 | ||
1982 | atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) | ||
1983 | { | ||
1984 | s64 ret = atomic64_cmpxchg_relaxed(v, old, new); | ||
1985 | __atomic_acquire_fence(); | ||
1986 | return ret; | ||
1987 | } | ||
1988 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire | ||
1989 | #endif | ||
1990 | |||
1991 | #ifndef atomic64_cmpxchg_release | ||
1992 | static inline s64 | ||
1993 | atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) | ||
1994 | { | ||
1995 | __atomic_release_fence(); | ||
1996 | return atomic64_cmpxchg_relaxed(v, old, new); | ||
1997 | } | ||
1998 | #define atomic64_cmpxchg_release atomic64_cmpxchg_release | ||
1999 | #endif | ||
2000 | |||
2001 | #ifndef atomic64_cmpxchg | ||
2002 | static inline s64 | ||
2003 | atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) | ||
2004 | { | ||
2005 | s64 ret; | ||
2006 | __atomic_pre_full_fence(); | ||
2007 | ret = atomic64_cmpxchg_relaxed(v, old, new); | ||
2008 | __atomic_post_full_fence(); | ||
2009 | return ret; | ||
2010 | } | ||
2011 | #define atomic64_cmpxchg atomic64_cmpxchg | ||
2012 | #endif | ||
2013 | |||
2014 | #endif /* atomic64_cmpxchg_relaxed */ | ||
2015 | |||
2016 | #ifndef atomic64_try_cmpxchg_relaxed | ||
2017 | #ifdef atomic64_try_cmpxchg | ||
2018 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg | ||
2019 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg | ||
2020 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg | ||
2021 | #endif /* atomic64_try_cmpxchg */ | ||
2022 | |||
2023 | #ifndef atomic64_try_cmpxchg | ||
2024 | static inline bool | ||
2025 | atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) | ||
2026 | { | ||
2027 | s64 r, o = *old; | ||
2028 | r = atomic64_cmpxchg(v, o, new); | ||
2029 | if (unlikely(r != o)) | ||
2030 | *old = r; | ||
2031 | return likely(r == o); | ||
2032 | } | ||
2033 | #define atomic64_try_cmpxchg atomic64_try_cmpxchg | ||
2034 | #endif | ||
2035 | |||
2036 | #ifndef atomic64_try_cmpxchg_acquire | ||
2037 | static inline bool | ||
2038 | atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) | ||
2039 | { | ||
2040 | s64 r, o = *old; | ||
2041 | r = atomic64_cmpxchg_acquire(v, o, new); | ||
2042 | if (unlikely(r != o)) | ||
2043 | *old = r; | ||
2044 | return likely(r == o); | ||
2045 | } | ||
2046 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire | ||
2047 | #endif | ||
2048 | |||
2049 | #ifndef atomic64_try_cmpxchg_release | ||
2050 | static inline bool | ||
2051 | atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) | ||
2052 | { | ||
2053 | s64 r, o = *old; | ||
2054 | r = atomic64_cmpxchg_release(v, o, new); | ||
2055 | if (unlikely(r != o)) | ||
2056 | *old = r; | ||
2057 | return likely(r == o); | ||
2058 | } | ||
2059 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release | ||
2060 | #endif | ||
2061 | |||
2062 | #ifndef atomic64_try_cmpxchg_relaxed | ||
2063 | static inline bool | ||
2064 | atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) | ||
2065 | { | ||
2066 | s64 r, o = *old; | ||
2067 | r = atomic64_cmpxchg_relaxed(v, o, new); | ||
2068 | if (unlikely(r != o)) | ||
2069 | *old = r; | ||
2070 | return likely(r == o); | ||
2071 | } | ||
2072 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed | ||
2073 | #endif | ||
2074 | |||
2075 | #else /* atomic64_try_cmpxchg_relaxed */ | ||
2076 | |||
2077 | #ifndef atomic64_try_cmpxchg_acquire | ||
2078 | static inline bool | ||
2079 | atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) | ||
2080 | { | ||
2081 | bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); | ||
2082 | __atomic_acquire_fence(); | ||
2083 | return ret; | ||
2084 | } | ||
2085 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire | ||
2086 | #endif | ||
2087 | |||
2088 | #ifndef atomic64_try_cmpxchg_release | ||
2089 | static inline bool | ||
2090 | atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) | ||
2091 | { | ||
2092 | __atomic_release_fence(); | ||
2093 | return atomic64_try_cmpxchg_relaxed(v, old, new); | ||
2094 | } | ||
2095 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release | ||
2096 | #endif | ||
2097 | |||
2098 | #ifndef atomic64_try_cmpxchg | ||
2099 | static inline bool | ||
2100 | atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) | ||
2101 | { | ||
2102 | bool ret; | ||
2103 | __atomic_pre_full_fence(); | ||
2104 | ret = atomic64_try_cmpxchg_relaxed(v, old, new); | ||
2105 | __atomic_post_full_fence(); | ||
2106 | return ret; | ||
2107 | } | ||
2108 | #define atomic64_try_cmpxchg atomic64_try_cmpxchg | ||
2109 | #endif | ||
2110 | |||
2111 | #endif /* atomic64_try_cmpxchg_relaxed */ | ||
2112 | |||
2113 | #ifndef atomic64_sub_and_test | ||
2114 | /** | ||
2115 | * atomic64_sub_and_test - subtract value from variable and test result | ||
2116 | * @i: integer value to subtract | ||
2117 | * @v: pointer of type atomic64_t | ||
2118 | * | ||
2119 | * Atomically subtracts @i from @v and returns | ||
2120 | * true if the result is zero, or false for all | ||
2121 | * other cases. | ||
2122 | */ | ||
2123 | static inline bool | ||
2124 | atomic64_sub_and_test(s64 i, atomic64_t *v) | ||
2125 | { | ||
2126 | return atomic64_sub_return(i, v) == 0; | ||
2127 | } | ||
2128 | #define atomic64_sub_and_test atomic64_sub_and_test | ||
2129 | #endif | ||
2130 | |||
2131 | #ifndef atomic64_dec_and_test | ||
2132 | /** | ||
2133 | * atomic64_dec_and_test - decrement and test | ||
2134 | * @v: pointer of type atomic64_t | ||
2135 | * | ||
2136 | * Atomically decrements @v by 1 and | ||
2137 | * returns true if the result is 0, or false for all other | ||
2138 | * cases. | ||
2139 | */ | ||
2140 | static inline bool | ||
2141 | atomic64_dec_and_test(atomic64_t *v) | ||
2142 | { | ||
2143 | return atomic64_dec_return(v) == 0; | ||
2144 | } | ||
2145 | #define atomic64_dec_and_test atomic64_dec_and_test | ||
2146 | #endif | ||
2147 | |||
2148 | #ifndef atomic64_inc_and_test | ||
2149 | /** | ||
2150 | * atomic64_inc_and_test - increment and test | ||
2151 | * @v: pointer of type atomic64_t | ||
2152 | * | ||
2153 | * Atomically increments @v by 1 | ||
2154 | * and returns true if the result is zero, or false for all | ||
2155 | * other cases. | ||
2156 | */ | ||
2157 | static inline bool | ||
2158 | atomic64_inc_and_test(atomic64_t *v) | ||
2159 | { | ||
2160 | return atomic64_inc_return(v) == 0; | ||
2161 | } | ||
2162 | #define atomic64_inc_and_test atomic64_inc_and_test | ||
2163 | #endif | ||
2164 | |||
2165 | #ifndef atomic64_add_negative | ||
2166 | /** | ||
2167 | * atomic64_add_negative - add and test if negative | ||
2168 | * @i: integer value to add | ||
2169 | * @v: pointer of type atomic64_t | ||
2170 | * | ||
2171 | * Atomically adds @i to @v and returns true | ||
2172 | * if the result is negative, or false when | ||
2173 | * result is greater than or equal to zero. | ||
2174 | */ | ||
2175 | static inline bool | ||
2176 | atomic64_add_negative(s64 i, atomic64_t *v) | ||
2177 | { | ||
2178 | return atomic64_add_return(i, v) < 0; | ||
2179 | } | ||
2180 | #define atomic64_add_negative atomic64_add_negative | ||
2181 | #endif | ||
2182 | |||
2183 | #ifndef atomic64_fetch_add_unless | ||
2184 | /** | ||
2185 | * atomic64_fetch_add_unless - add unless the number is already a given value | ||
2186 | * @v: pointer of type atomic64_t | ||
2187 | * @a: the amount to add to v... | ||
2188 | * @u: ...unless v is equal to u. | ||
2189 | * | ||
2190 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
2191 | * Returns original value of @v | ||
2192 | */ | ||
2193 | static inline s64 | ||
2194 | atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) | ||
2195 | { | ||
2196 | s64 c = atomic64_read(v); | ||
2197 | |||
2198 | do { | ||
2199 | if (unlikely(c == u)) | ||
2200 | break; | ||
2201 | } while (!atomic64_try_cmpxchg(v, &c, c + a)); | ||
2202 | |||
2203 | return c; | ||
2204 | } | ||
2205 | #define atomic64_fetch_add_unless atomic64_fetch_add_unless | ||
2206 | #endif | ||
2207 | |||
2208 | #ifndef atomic64_add_unless | ||
2209 | /** | ||
2210 | * atomic64_add_unless - add unless the number is already a given value | ||
2211 | * @v: pointer of type atomic64_t | ||
2212 | * @a: the amount to add to v... | ||
2213 | * @u: ...unless v is equal to u. | ||
2214 | * | ||
2215 | * Atomically adds @a to @v, if @v was not already @u. | ||
2216 | * Returns true if the addition was done. | ||
2217 | */ | ||
2218 | static inline bool | ||
2219 | atomic64_add_unless(atomic64_t *v, s64 a, s64 u) | ||
2220 | { | ||
2221 | return atomic64_fetch_add_unless(v, a, u) != u; | ||
2222 | } | ||
2223 | #define atomic64_add_unless atomic64_add_unless | ||
2224 | #endif | ||
2225 | |||
2226 | #ifndef atomic64_inc_not_zero | ||
2227 | /** | ||
2228 | * atomic64_inc_not_zero - increment unless the number is zero | ||
2229 | * @v: pointer of type atomic64_t | ||
2230 | * | ||
2231 | * Atomically increments @v by 1, if @v is non-zero. | ||
2232 | * Returns true if the increment was done. | ||
2233 | */ | ||
2234 | static inline bool | ||
2235 | atomic64_inc_not_zero(atomic64_t *v) | ||
2236 | { | ||
2237 | return atomic64_add_unless(v, 1, 0); | ||
2238 | } | ||
2239 | #define atomic64_inc_not_zero atomic64_inc_not_zero | ||
2240 | #endif | ||
2241 | |||
2242 | #ifndef atomic64_inc_unless_negative | ||
2243 | static inline bool | ||
2244 | atomic64_inc_unless_negative(atomic64_t *v) | ||
2245 | { | ||
2246 | s64 c = atomic64_read(v); | ||
2247 | |||
2248 | do { | ||
2249 | if (unlikely(c < 0)) | ||
2250 | return false; | ||
2251 | } while (!atomic64_try_cmpxchg(v, &c, c + 1)); | ||
2252 | |||
2253 | return true; | ||
2254 | } | ||
2255 | #define atomic64_inc_unless_negative atomic64_inc_unless_negative | ||
2256 | #endif | ||
2257 | |||
2258 | #ifndef atomic64_dec_unless_positive | ||
2259 | static inline bool | ||
2260 | atomic64_dec_unless_positive(atomic64_t *v) | ||
2261 | { | ||
2262 | s64 c = atomic64_read(v); | ||
2263 | |||
2264 | do { | ||
2265 | if (unlikely(c > 0)) | ||
2266 | return false; | ||
2267 | } while (!atomic64_try_cmpxchg(v, &c, c - 1)); | ||
2268 | |||
2269 | return true; | ||
2270 | } | ||
2271 | #define atomic64_dec_unless_positive atomic64_dec_unless_positive | ||
2272 | #endif | ||
2273 | |||
2274 | #ifndef atomic64_dec_if_positive | ||
2275 | static inline s64 | ||
2276 | atomic64_dec_if_positive(atomic64_t *v) | ||
2277 | { | ||
2278 | s64 dec, c = atomic64_read(v); | ||
2279 | |||
2280 | do { | ||
2281 | dec = c - 1; | ||
2282 | if (unlikely(dec < 0)) | ||
2283 | break; | ||
2284 | } while (!atomic64_try_cmpxchg(v, &c, dec)); | ||
2285 | |||
2286 | return dec; | ||
2287 | } | ||
2288 | #define atomic64_dec_if_positive atomic64_dec_if_positive | ||
2289 | #endif | ||
2290 | |||
2291 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
2292 | #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
2293 | |||
2294 | #endif /* _LINUX_ATOMIC_FALLBACK_H */ | ||
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 1e8e88bdaf09..4c0d009a46f0 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
@@ -25,14 +25,6 @@ | |||
25 | * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. | 25 | * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #ifndef atomic_read_acquire | ||
29 | #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) | ||
30 | #endif | ||
31 | |||
32 | #ifndef atomic_set_release | ||
33 | #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) | ||
34 | #endif | ||
35 | |||
36 | /* | 28 | /* |
37 | * The idea here is to build acquire/release variants by adding explicit | 29 | * The idea here is to build acquire/release variants by adding explicit |
38 | * barriers on top of the relaxed variant. In the case where the relaxed | 30 | * barriers on top of the relaxed variant. In the case where the relaxed |
@@ -79,1238 +71,7 @@ | |||
79 | __ret; \ | 71 | __ret; \ |
80 | }) | 72 | }) |
81 | 73 | ||
82 | /* atomic_add_return_relaxed */ | 74 | #include <linux/atomic-fallback.h> |
83 | #ifndef atomic_add_return_relaxed | ||
84 | #define atomic_add_return_relaxed atomic_add_return | ||
85 | #define atomic_add_return_acquire atomic_add_return | ||
86 | #define atomic_add_return_release atomic_add_return | ||
87 | |||
88 | #else /* atomic_add_return_relaxed */ | ||
89 | |||
90 | #ifndef atomic_add_return_acquire | ||
91 | #define atomic_add_return_acquire(...) \ | ||
92 | __atomic_op_acquire(atomic_add_return, __VA_ARGS__) | ||
93 | #endif | ||
94 | |||
95 | #ifndef atomic_add_return_release | ||
96 | #define atomic_add_return_release(...) \ | ||
97 | __atomic_op_release(atomic_add_return, __VA_ARGS__) | ||
98 | #endif | ||
99 | |||
100 | #ifndef atomic_add_return | ||
101 | #define atomic_add_return(...) \ | ||
102 | __atomic_op_fence(atomic_add_return, __VA_ARGS__) | ||
103 | #endif | ||
104 | #endif /* atomic_add_return_relaxed */ | ||
105 | |||
106 | #ifndef atomic_inc | ||
107 | #define atomic_inc(v) atomic_add(1, (v)) | ||
108 | #endif | ||
109 | |||
110 | /* atomic_inc_return_relaxed */ | ||
111 | #ifndef atomic_inc_return_relaxed | ||
112 | |||
113 | #ifndef atomic_inc_return | ||
114 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
115 | #define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) | ||
116 | #define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) | ||
117 | #define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) | ||
118 | #else /* atomic_inc_return */ | ||
119 | #define atomic_inc_return_relaxed atomic_inc_return | ||
120 | #define atomic_inc_return_acquire atomic_inc_return | ||
121 | #define atomic_inc_return_release atomic_inc_return | ||
122 | #endif /* atomic_inc_return */ | ||
123 | |||
124 | #else /* atomic_inc_return_relaxed */ | ||
125 | |||
126 | #ifndef atomic_inc_return_acquire | ||
127 | #define atomic_inc_return_acquire(...) \ | ||
128 | __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) | ||
129 | #endif | ||
130 | |||
131 | #ifndef atomic_inc_return_release | ||
132 | #define atomic_inc_return_release(...) \ | ||
133 | __atomic_op_release(atomic_inc_return, __VA_ARGS__) | ||
134 | #endif | ||
135 | |||
136 | #ifndef atomic_inc_return | ||
137 | #define atomic_inc_return(...) \ | ||
138 | __atomic_op_fence(atomic_inc_return, __VA_ARGS__) | ||
139 | #endif | ||
140 | #endif /* atomic_inc_return_relaxed */ | ||
141 | |||
142 | /* atomic_sub_return_relaxed */ | ||
143 | #ifndef atomic_sub_return_relaxed | ||
144 | #define atomic_sub_return_relaxed atomic_sub_return | ||
145 | #define atomic_sub_return_acquire atomic_sub_return | ||
146 | #define atomic_sub_return_release atomic_sub_return | ||
147 | |||
148 | #else /* atomic_sub_return_relaxed */ | ||
149 | |||
150 | #ifndef atomic_sub_return_acquire | ||
151 | #define atomic_sub_return_acquire(...) \ | ||
152 | __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) | ||
153 | #endif | ||
154 | |||
155 | #ifndef atomic_sub_return_release | ||
156 | #define atomic_sub_return_release(...) \ | ||
157 | __atomic_op_release(atomic_sub_return, __VA_ARGS__) | ||
158 | #endif | ||
159 | |||
160 | #ifndef atomic_sub_return | ||
161 | #define atomic_sub_return(...) \ | ||
162 | __atomic_op_fence(atomic_sub_return, __VA_ARGS__) | ||
163 | #endif | ||
164 | #endif /* atomic_sub_return_relaxed */ | ||
165 | |||
166 | #ifndef atomic_dec | ||
167 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
168 | #endif | ||
169 | |||
170 | /* atomic_dec_return_relaxed */ | ||
171 | #ifndef atomic_dec_return_relaxed | ||
172 | |||
173 | #ifndef atomic_dec_return | ||
174 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
175 | #define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) | ||
176 | #define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) | ||
177 | #define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) | ||
178 | #else /* atomic_dec_return */ | ||
179 | #define atomic_dec_return_relaxed atomic_dec_return | ||
180 | #define atomic_dec_return_acquire atomic_dec_return | ||
181 | #define atomic_dec_return_release atomic_dec_return | ||
182 | #endif /* atomic_dec_return */ | ||
183 | |||
184 | #else /* atomic_dec_return_relaxed */ | ||
185 | |||
186 | #ifndef atomic_dec_return_acquire | ||
187 | #define atomic_dec_return_acquire(...) \ | ||
188 | __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) | ||
189 | #endif | ||
190 | |||
191 | #ifndef atomic_dec_return_release | ||
192 | #define atomic_dec_return_release(...) \ | ||
193 | __atomic_op_release(atomic_dec_return, __VA_ARGS__) | ||
194 | #endif | ||
195 | |||
196 | #ifndef atomic_dec_return | ||
197 | #define atomic_dec_return(...) \ | ||
198 | __atomic_op_fence(atomic_dec_return, __VA_ARGS__) | ||
199 | #endif | ||
200 | #endif /* atomic_dec_return_relaxed */ | ||
201 | |||
202 | |||
203 | /* atomic_fetch_add_relaxed */ | ||
204 | #ifndef atomic_fetch_add_relaxed | ||
205 | #define atomic_fetch_add_relaxed atomic_fetch_add | ||
206 | #define atomic_fetch_add_acquire atomic_fetch_add | ||
207 | #define atomic_fetch_add_release atomic_fetch_add | ||
208 | |||
209 | #else /* atomic_fetch_add_relaxed */ | ||
210 | |||
211 | #ifndef atomic_fetch_add_acquire | ||
212 | #define atomic_fetch_add_acquire(...) \ | ||
213 | __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) | ||
214 | #endif | ||
215 | |||
216 | #ifndef atomic_fetch_add_release | ||
217 | #define atomic_fetch_add_release(...) \ | ||
218 | __atomic_op_release(atomic_fetch_add, __VA_ARGS__) | ||
219 | #endif | ||
220 | |||
221 | #ifndef atomic_fetch_add | ||
222 | #define atomic_fetch_add(...) \ | ||
223 | __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) | ||
224 | #endif | ||
225 | #endif /* atomic_fetch_add_relaxed */ | ||
226 | |||
227 | /* atomic_fetch_inc_relaxed */ | ||
228 | #ifndef atomic_fetch_inc_relaxed | ||
229 | |||
230 | #ifndef atomic_fetch_inc | ||
231 | #define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) | ||
232 | #define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) | ||
233 | #define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) | ||
234 | #define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) | ||
235 | #else /* atomic_fetch_inc */ | ||
236 | #define atomic_fetch_inc_relaxed atomic_fetch_inc | ||
237 | #define atomic_fetch_inc_acquire atomic_fetch_inc | ||
238 | #define atomic_fetch_inc_release atomic_fetch_inc | ||
239 | #endif /* atomic_fetch_inc */ | ||
240 | |||
241 | #else /* atomic_fetch_inc_relaxed */ | ||
242 | |||
243 | #ifndef atomic_fetch_inc_acquire | ||
244 | #define atomic_fetch_inc_acquire(...) \ | ||
245 | __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) | ||
246 | #endif | ||
247 | |||
248 | #ifndef atomic_fetch_inc_release | ||
249 | #define atomic_fetch_inc_release(...) \ | ||
250 | __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) | ||
251 | #endif | ||
252 | |||
253 | #ifndef atomic_fetch_inc | ||
254 | #define atomic_fetch_inc(...) \ | ||
255 | __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) | ||
256 | #endif | ||
257 | #endif /* atomic_fetch_inc_relaxed */ | ||
258 | |||
259 | /* atomic_fetch_sub_relaxed */ | ||
260 | #ifndef atomic_fetch_sub_relaxed | ||
261 | #define atomic_fetch_sub_relaxed atomic_fetch_sub | ||
262 | #define atomic_fetch_sub_acquire atomic_fetch_sub | ||
263 | #define atomic_fetch_sub_release atomic_fetch_sub | ||
264 | |||
265 | #else /* atomic_fetch_sub_relaxed */ | ||
266 | |||
267 | #ifndef atomic_fetch_sub_acquire | ||
268 | #define atomic_fetch_sub_acquire(...) \ | ||
269 | __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) | ||
270 | #endif | ||
271 | |||
272 | #ifndef atomic_fetch_sub_release | ||
273 | #define atomic_fetch_sub_release(...) \ | ||
274 | __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) | ||
275 | #endif | ||
276 | |||
277 | #ifndef atomic_fetch_sub | ||
278 | #define atomic_fetch_sub(...) \ | ||
279 | __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) | ||
280 | #endif | ||
281 | #endif /* atomic_fetch_sub_relaxed */ | ||
282 | |||
283 | /* atomic_fetch_dec_relaxed */ | ||
284 | #ifndef atomic_fetch_dec_relaxed | ||
285 | |||
286 | #ifndef atomic_fetch_dec | ||
287 | #define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) | ||
288 | #define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) | ||
289 | #define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) | ||
290 | #define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) | ||
291 | #else /* atomic_fetch_dec */ | ||
292 | #define atomic_fetch_dec_relaxed atomic_fetch_dec | ||
293 | #define atomic_fetch_dec_acquire atomic_fetch_dec | ||
294 | #define atomic_fetch_dec_release atomic_fetch_dec | ||
295 | #endif /* atomic_fetch_dec */ | ||
296 | |||
297 | #else /* atomic_fetch_dec_relaxed */ | ||
298 | |||
299 | #ifndef atomic_fetch_dec_acquire | ||
300 | #define atomic_fetch_dec_acquire(...) \ | ||
301 | __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) | ||
302 | #endif | ||
303 | |||
304 | #ifndef atomic_fetch_dec_release | ||
305 | #define atomic_fetch_dec_release(...) \ | ||
306 | __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) | ||
307 | #endif | ||
308 | |||
309 | #ifndef atomic_fetch_dec | ||
310 | #define atomic_fetch_dec(...) \ | ||
311 | __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) | ||
312 | #endif | ||
313 | #endif /* atomic_fetch_dec_relaxed */ | ||
314 | |||
315 | /* atomic_fetch_or_relaxed */ | ||
316 | #ifndef atomic_fetch_or_relaxed | ||
317 | #define atomic_fetch_or_relaxed atomic_fetch_or | ||
318 | #define atomic_fetch_or_acquire atomic_fetch_or | ||
319 | #define atomic_fetch_or_release atomic_fetch_or | ||
320 | |||
321 | #else /* atomic_fetch_or_relaxed */ | ||
322 | |||
323 | #ifndef atomic_fetch_or_acquire | ||
324 | #define atomic_fetch_or_acquire(...) \ | ||
325 | __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) | ||
326 | #endif | ||
327 | |||
328 | #ifndef atomic_fetch_or_release | ||
329 | #define atomic_fetch_or_release(...) \ | ||
330 | __atomic_op_release(atomic_fetch_or, __VA_ARGS__) | ||
331 | #endif | ||
332 | |||
333 | #ifndef atomic_fetch_or | ||
334 | #define atomic_fetch_or(...) \ | ||
335 | __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) | ||
336 | #endif | ||
337 | #endif /* atomic_fetch_or_relaxed */ | ||
338 | |||
339 | /* atomic_fetch_and_relaxed */ | ||
340 | #ifndef atomic_fetch_and_relaxed | ||
341 | #define atomic_fetch_and_relaxed atomic_fetch_and | ||
342 | #define atomic_fetch_and_acquire atomic_fetch_and | ||
343 | #define atomic_fetch_and_release atomic_fetch_and | ||
344 | |||
345 | #else /* atomic_fetch_and_relaxed */ | ||
346 | |||
347 | #ifndef atomic_fetch_and_acquire | ||
348 | #define atomic_fetch_and_acquire(...) \ | ||
349 | __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) | ||
350 | #endif | ||
351 | |||
352 | #ifndef atomic_fetch_and_release | ||
353 | #define atomic_fetch_and_release(...) \ | ||
354 | __atomic_op_release(atomic_fetch_and, __VA_ARGS__) | ||
355 | #endif | ||
356 | |||
357 | #ifndef atomic_fetch_and | ||
358 | #define atomic_fetch_and(...) \ | ||
359 | __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) | ||
360 | #endif | ||
361 | #endif /* atomic_fetch_and_relaxed */ | ||
362 | |||
363 | #ifndef atomic_andnot | ||
364 | #define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) | ||
365 | #endif | ||
366 | |||
367 | #ifndef atomic_fetch_andnot_relaxed | ||
368 | |||
369 | #ifndef atomic_fetch_andnot | ||
370 | #define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) | ||
371 | #define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) | ||
372 | #define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) | ||
373 | #define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) | ||
374 | #else /* atomic_fetch_andnot */ | ||
375 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot | ||
376 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot | ||
377 | #define atomic_fetch_andnot_release atomic_fetch_andnot | ||
378 | #endif /* atomic_fetch_andnot */ | ||
379 | |||
380 | #else /* atomic_fetch_andnot_relaxed */ | ||
381 | |||
382 | #ifndef atomic_fetch_andnot_acquire | ||
383 | #define atomic_fetch_andnot_acquire(...) \ | ||
384 | __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) | ||
385 | #endif | ||
386 | |||
387 | #ifndef atomic_fetch_andnot_release | ||
388 | #define atomic_fetch_andnot_release(...) \ | ||
389 | __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) | ||
390 | #endif | ||
391 | |||
392 | #ifndef atomic_fetch_andnot | ||
393 | #define atomic_fetch_andnot(...) \ | ||
394 | __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) | ||
395 | #endif | ||
396 | #endif /* atomic_fetch_andnot_relaxed */ | ||
397 | |||
398 | /* atomic_fetch_xor_relaxed */ | ||
399 | #ifndef atomic_fetch_xor_relaxed | ||
400 | #define atomic_fetch_xor_relaxed atomic_fetch_xor | ||
401 | #define atomic_fetch_xor_acquire atomic_fetch_xor | ||
402 | #define atomic_fetch_xor_release atomic_fetch_xor | ||
403 | |||
404 | #else /* atomic_fetch_xor_relaxed */ | ||
405 | |||
406 | #ifndef atomic_fetch_xor_acquire | ||
407 | #define atomic_fetch_xor_acquire(...) \ | ||
408 | __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) | ||
409 | #endif | ||
410 | |||
411 | #ifndef atomic_fetch_xor_release | ||
412 | #define atomic_fetch_xor_release(...) \ | ||
413 | __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) | ||
414 | #endif | ||
415 | |||
416 | #ifndef atomic_fetch_xor | ||
417 | #define atomic_fetch_xor(...) \ | ||
418 | __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) | ||
419 | #endif | ||
420 | #endif /* atomic_fetch_xor_relaxed */ | ||
421 | |||
422 | |||
423 | /* atomic_xchg_relaxed */ | ||
424 | #ifndef atomic_xchg_relaxed | ||
425 | #define atomic_xchg_relaxed atomic_xchg | ||
426 | #define atomic_xchg_acquire atomic_xchg | ||
427 | #define atomic_xchg_release atomic_xchg | ||
428 | |||
429 | #else /* atomic_xchg_relaxed */ | ||
430 | |||
431 | #ifndef atomic_xchg_acquire | ||
432 | #define atomic_xchg_acquire(...) \ | ||
433 | __atomic_op_acquire(atomic_xchg, __VA_ARGS__) | ||
434 | #endif | ||
435 | |||
436 | #ifndef atomic_xchg_release | ||
437 | #define atomic_xchg_release(...) \ | ||
438 | __atomic_op_release(atomic_xchg, __VA_ARGS__) | ||
439 | #endif | ||
440 | |||
441 | #ifndef atomic_xchg | ||
442 | #define atomic_xchg(...) \ | ||
443 | __atomic_op_fence(atomic_xchg, __VA_ARGS__) | ||
444 | #endif | ||
445 | #endif /* atomic_xchg_relaxed */ | ||
446 | |||
447 | /* atomic_cmpxchg_relaxed */ | ||
448 | #ifndef atomic_cmpxchg_relaxed | ||
449 | #define atomic_cmpxchg_relaxed atomic_cmpxchg | ||
450 | #define atomic_cmpxchg_acquire atomic_cmpxchg | ||
451 | #define atomic_cmpxchg_release atomic_cmpxchg | ||
452 | |||
453 | #else /* atomic_cmpxchg_relaxed */ | ||
454 | |||
455 | #ifndef atomic_cmpxchg_acquire | ||
456 | #define atomic_cmpxchg_acquire(...) \ | ||
457 | __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) | ||
458 | #endif | ||
459 | |||
460 | #ifndef atomic_cmpxchg_release | ||
461 | #define atomic_cmpxchg_release(...) \ | ||
462 | __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) | ||
463 | #endif | ||
464 | |||
465 | #ifndef atomic_cmpxchg | ||
466 | #define atomic_cmpxchg(...) \ | ||
467 | __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) | ||
468 | #endif | ||
469 | #endif /* atomic_cmpxchg_relaxed */ | ||
470 | |||
471 | #ifndef atomic_try_cmpxchg | ||
472 | |||
473 | #define __atomic_try_cmpxchg(type, _p, _po, _n) \ | ||
474 | ({ \ | ||
475 | typeof(_po) __po = (_po); \ | ||
476 | typeof(*(_po)) __r, __o = *__po; \ | ||
477 | __r = atomic_cmpxchg##type((_p), __o, (_n)); \ | ||
478 | if (unlikely(__r != __o)) \ | ||
479 | *__po = __r; \ | ||
480 | likely(__r == __o); \ | ||
481 | }) | ||
482 | |||
483 | #define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) | ||
484 | #define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n) | ||
485 | #define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n) | ||
486 | #define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n) | ||
487 | |||
488 | #else /* atomic_try_cmpxchg */ | ||
489 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg | ||
490 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg | ||
491 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg | ||
492 | #endif /* atomic_try_cmpxchg */ | ||
493 | |||
494 | /* cmpxchg_relaxed */ | ||
495 | #ifndef cmpxchg_relaxed | ||
496 | #define cmpxchg_relaxed cmpxchg | ||
497 | #define cmpxchg_acquire cmpxchg | ||
498 | #define cmpxchg_release cmpxchg | ||
499 | |||
500 | #else /* cmpxchg_relaxed */ | ||
501 | |||
502 | #ifndef cmpxchg_acquire | ||
503 | #define cmpxchg_acquire(...) \ | ||
504 | __atomic_op_acquire(cmpxchg, __VA_ARGS__) | ||
505 | #endif | ||
506 | |||
507 | #ifndef cmpxchg_release | ||
508 | #define cmpxchg_release(...) \ | ||
509 | __atomic_op_release(cmpxchg, __VA_ARGS__) | ||
510 | #endif | ||
511 | |||
512 | #ifndef cmpxchg | ||
513 | #define cmpxchg(...) \ | ||
514 | __atomic_op_fence(cmpxchg, __VA_ARGS__) | ||
515 | #endif | ||
516 | #endif /* cmpxchg_relaxed */ | ||
517 | |||
518 | /* cmpxchg64_relaxed */ | ||
519 | #ifndef cmpxchg64_relaxed | ||
520 | #define cmpxchg64_relaxed cmpxchg64 | ||
521 | #define cmpxchg64_acquire cmpxchg64 | ||
522 | #define cmpxchg64_release cmpxchg64 | ||
523 | |||
524 | #else /* cmpxchg64_relaxed */ | ||
525 | |||
526 | #ifndef cmpxchg64_acquire | ||
527 | #define cmpxchg64_acquire(...) \ | ||
528 | __atomic_op_acquire(cmpxchg64, __VA_ARGS__) | ||
529 | #endif | ||
530 | |||
531 | #ifndef cmpxchg64_release | ||
532 | #define cmpxchg64_release(...) \ | ||
533 | __atomic_op_release(cmpxchg64, __VA_ARGS__) | ||
534 | #endif | ||
535 | |||
536 | #ifndef cmpxchg64 | ||
537 | #define cmpxchg64(...) \ | ||
538 | __atomic_op_fence(cmpxchg64, __VA_ARGS__) | ||
539 | #endif | ||
540 | #endif /* cmpxchg64_relaxed */ | ||
541 | |||
542 | /* xchg_relaxed */ | ||
543 | #ifndef xchg_relaxed | ||
544 | #define xchg_relaxed xchg | ||
545 | #define xchg_acquire xchg | ||
546 | #define xchg_release xchg | ||
547 | |||
548 | #else /* xchg_relaxed */ | ||
549 | |||
550 | #ifndef xchg_acquire | ||
551 | #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) | ||
552 | #endif | ||
553 | |||
554 | #ifndef xchg_release | ||
555 | #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) | ||
556 | #endif | ||
557 | |||
558 | #ifndef xchg | ||
559 | #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) | ||
560 | #endif | ||
561 | #endif /* xchg_relaxed */ | ||
562 | |||
563 | /** | ||
564 | * atomic_fetch_add_unless - add unless the number is already a given value | ||
565 | * @v: pointer of type atomic_t | ||
566 | * @a: the amount to add to v... | ||
567 | * @u: ...unless v is equal to u. | ||
568 | * | ||
569 | * Atomically adds @a to @v, if @v was not already @u. | ||
570 | * Returns the original value of @v. | ||
571 | */ | ||
572 | #ifndef atomic_fetch_add_unless | ||
573 | static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) | ||
574 | { | ||
575 | int c = atomic_read(v); | ||
576 | |||
577 | do { | ||
578 | if (unlikely(c == u)) | ||
579 | break; | ||
580 | } while (!atomic_try_cmpxchg(v, &c, c + a)); | ||
581 | |||
582 | return c; | ||
583 | } | ||
584 | #endif | ||
585 | |||
586 | /** | ||
587 | * atomic_add_unless - add unless the number is already a given value | ||
588 | * @v: pointer of type atomic_t | ||
589 | * @a: the amount to add to v... | ||
590 | * @u: ...unless v is equal to u. | ||
591 | * | ||
592 | * Atomically adds @a to @v, if @v was not already @u. | ||
593 | * Returns true if the addition was done. | ||
594 | */ | ||
595 | static inline bool atomic_add_unless(atomic_t *v, int a, int u) | ||
596 | { | ||
597 | return atomic_fetch_add_unless(v, a, u) != u; | ||
598 | } | ||
599 | |||
600 | /** | ||
601 | * atomic_inc_not_zero - increment unless the number is zero | ||
602 | * @v: pointer of type atomic_t | ||
603 | * | ||
604 | * Atomically increments @v by 1, if @v is non-zero. | ||
605 | * Returns true if the increment was done. | ||
606 | */ | ||
607 | #ifndef atomic_inc_not_zero | ||
608 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
609 | #endif | ||
610 | |||
611 | /** | ||
612 | * atomic_inc_and_test - increment and test | ||
613 | * @v: pointer of type atomic_t | ||
614 | * | ||
615 | * Atomically increments @v by 1 | ||
616 | * and returns true if the result is zero, or false for all | ||
617 | * other cases. | ||
618 | */ | ||
619 | #ifndef atomic_inc_and_test | ||
620 | static inline bool atomic_inc_and_test(atomic_t *v) | ||
621 | { | ||
622 | return atomic_inc_return(v) == 0; | ||
623 | } | ||
624 | #endif | ||
625 | |||
626 | /** | ||
627 | * atomic_dec_and_test - decrement and test | ||
628 | * @v: pointer of type atomic_t | ||
629 | * | ||
630 | * Atomically decrements @v by 1 and | ||
631 | * returns true if the result is 0, or false for all other | ||
632 | * cases. | ||
633 | */ | ||
634 | #ifndef atomic_dec_and_test | ||
635 | static inline bool atomic_dec_and_test(atomic_t *v) | ||
636 | { | ||
637 | return atomic_dec_return(v) == 0; | ||
638 | } | ||
639 | #endif | ||
640 | |||
641 | /** | ||
642 | * atomic_sub_and_test - subtract value from variable and test result | ||
643 | * @i: integer value to subtract | ||
644 | * @v: pointer of type atomic_t | ||
645 | * | ||
646 | * Atomically subtracts @i from @v and returns | ||
647 | * true if the result is zero, or false for all | ||
648 | * other cases. | ||
649 | */ | ||
650 | #ifndef atomic_sub_and_test | ||
651 | static inline bool atomic_sub_and_test(int i, atomic_t *v) | ||
652 | { | ||
653 | return atomic_sub_return(i, v) == 0; | ||
654 | } | ||
655 | #endif | ||
656 | |||
657 | /** | ||
658 | * atomic_add_negative - add and test if negative | ||
659 | * @i: integer value to add | ||
660 | * @v: pointer of type atomic_t | ||
661 | * | ||
662 | * Atomically adds @i to @v and returns true | ||
663 | * if the result is negative, or false when | ||
664 | * result is greater than or equal to zero. | ||
665 | */ | ||
666 | #ifndef atomic_add_negative | ||
667 | static inline bool atomic_add_negative(int i, atomic_t *v) | ||
668 | { | ||
669 | return atomic_add_return(i, v) < 0; | ||
670 | } | ||
671 | #endif | ||
672 | |||
673 | #ifndef atomic_inc_unless_negative | ||
674 | static inline bool atomic_inc_unless_negative(atomic_t *v) | ||
675 | { | ||
676 | int c = atomic_read(v); | ||
677 | |||
678 | do { | ||
679 | if (unlikely(c < 0)) | ||
680 | return false; | ||
681 | } while (!atomic_try_cmpxchg(v, &c, c + 1)); | ||
682 | |||
683 | return true; | ||
684 | } | ||
685 | #endif | ||
686 | |||
687 | #ifndef atomic_dec_unless_positive | ||
688 | static inline bool atomic_dec_unless_positive(atomic_t *v) | ||
689 | { | ||
690 | int c = atomic_read(v); | ||
691 | |||
692 | do { | ||
693 | if (unlikely(c > 0)) | ||
694 | return false; | ||
695 | } while (!atomic_try_cmpxchg(v, &c, c - 1)); | ||
696 | |||
697 | return true; | ||
698 | } | ||
699 | #endif | ||
700 | |||
701 | /* | ||
702 | * atomic_dec_if_positive - decrement by 1 if old value positive | ||
703 | * @v: pointer of type atomic_t | ||
704 | * | ||
705 | * The function returns the old value of *v minus 1, even if | ||
706 | * the atomic variable, v, was not decremented. | ||
707 | */ | ||
708 | #ifndef atomic_dec_if_positive | ||
709 | static inline int atomic_dec_if_positive(atomic_t *v) | ||
710 | { | ||
711 | int dec, c = atomic_read(v); | ||
712 | |||
713 | do { | ||
714 | dec = c - 1; | ||
715 | if (unlikely(dec < 0)) | ||
716 | break; | ||
717 | } while (!atomic_try_cmpxchg(v, &c, dec)); | ||
718 | |||
719 | return dec; | ||
720 | } | ||
721 | #endif | ||
722 | |||
723 | #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
724 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
725 | |||
726 | #ifdef CONFIG_GENERIC_ATOMIC64 | ||
727 | #include <asm-generic/atomic64.h> | ||
728 | #endif | ||
729 | |||
730 | #ifndef atomic64_read_acquire | ||
731 | #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) | ||
732 | #endif | ||
733 | |||
734 | #ifndef atomic64_set_release | ||
735 | #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) | ||
736 | #endif | ||
737 | |||
738 | /* atomic64_add_return_relaxed */ | ||
739 | #ifndef atomic64_add_return_relaxed | ||
740 | #define atomic64_add_return_relaxed atomic64_add_return | ||
741 | #define atomic64_add_return_acquire atomic64_add_return | ||
742 | #define atomic64_add_return_release atomic64_add_return | ||
743 | |||
744 | #else /* atomic64_add_return_relaxed */ | ||
745 | |||
746 | #ifndef atomic64_add_return_acquire | ||
747 | #define atomic64_add_return_acquire(...) \ | ||
748 | __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) | ||
749 | #endif | ||
750 | |||
751 | #ifndef atomic64_add_return_release | ||
752 | #define atomic64_add_return_release(...) \ | ||
753 | __atomic_op_release(atomic64_add_return, __VA_ARGS__) | ||
754 | #endif | ||
755 | |||
756 | #ifndef atomic64_add_return | ||
757 | #define atomic64_add_return(...) \ | ||
758 | __atomic_op_fence(atomic64_add_return, __VA_ARGS__) | ||
759 | #endif | ||
760 | #endif /* atomic64_add_return_relaxed */ | ||
761 | |||
762 | #ifndef atomic64_inc | ||
763 | #define atomic64_inc(v) atomic64_add(1, (v)) | ||
764 | #endif | ||
765 | |||
766 | /* atomic64_inc_return_relaxed */ | ||
767 | #ifndef atomic64_inc_return_relaxed | ||
768 | |||
769 | #ifndef atomic64_inc_return | ||
770 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | ||
771 | #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) | ||
772 | #define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) | ||
773 | #define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) | ||
774 | #else /* atomic64_inc_return */ | ||
775 | #define atomic64_inc_return_relaxed atomic64_inc_return | ||
776 | #define atomic64_inc_return_acquire atomic64_inc_return | ||
777 | #define atomic64_inc_return_release atomic64_inc_return | ||
778 | #endif /* atomic64_inc_return */ | ||
779 | |||
780 | #else /* atomic64_inc_return_relaxed */ | ||
781 | |||
782 | #ifndef atomic64_inc_return_acquire | ||
783 | #define atomic64_inc_return_acquire(...) \ | ||
784 | __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) | ||
785 | #endif | ||
786 | |||
787 | #ifndef atomic64_inc_return_release | ||
788 | #define atomic64_inc_return_release(...) \ | ||
789 | __atomic_op_release(atomic64_inc_return, __VA_ARGS__) | ||
790 | #endif | ||
791 | |||
792 | #ifndef atomic64_inc_return | ||
793 | #define atomic64_inc_return(...) \ | ||
794 | __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) | ||
795 | #endif | ||
796 | #endif /* atomic64_inc_return_relaxed */ | ||
797 | |||
798 | |||
799 | /* atomic64_sub_return_relaxed */ | ||
800 | #ifndef atomic64_sub_return_relaxed | ||
801 | #define atomic64_sub_return_relaxed atomic64_sub_return | ||
802 | #define atomic64_sub_return_acquire atomic64_sub_return | ||
803 | #define atomic64_sub_return_release atomic64_sub_return | ||
804 | |||
805 | #else /* atomic64_sub_return_relaxed */ | ||
806 | |||
807 | #ifndef atomic64_sub_return_acquire | ||
808 | #define atomic64_sub_return_acquire(...) \ | ||
809 | __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) | ||
810 | #endif | ||
811 | |||
812 | #ifndef atomic64_sub_return_release | ||
813 | #define atomic64_sub_return_release(...) \ | ||
814 | __atomic_op_release(atomic64_sub_return, __VA_ARGS__) | ||
815 | #endif | ||
816 | |||
817 | #ifndef atomic64_sub_return | ||
818 | #define atomic64_sub_return(...) \ | ||
819 | __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) | ||
820 | #endif | ||
821 | #endif /* atomic64_sub_return_relaxed */ | ||
822 | |||
823 | #ifndef atomic64_dec | ||
824 | #define atomic64_dec(v) atomic64_sub(1, (v)) | ||
825 | #endif | ||
826 | |||
827 | /* atomic64_dec_return_relaxed */ | ||
828 | #ifndef atomic64_dec_return_relaxed | ||
829 | |||
830 | #ifndef atomic64_dec_return | ||
831 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | ||
832 | #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) | ||
833 | #define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) | ||
834 | #define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) | ||
835 | #else /* atomic64_dec_return */ | ||
836 | #define atomic64_dec_return_relaxed atomic64_dec_return | ||
837 | #define atomic64_dec_return_acquire atomic64_dec_return | ||
838 | #define atomic64_dec_return_release atomic64_dec_return | ||
839 | #endif /* atomic64_dec_return */ | ||
840 | |||
841 | #else /* atomic64_dec_return_relaxed */ | ||
842 | |||
843 | #ifndef atomic64_dec_return_acquire | ||
844 | #define atomic64_dec_return_acquire(...) \ | ||
845 | __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) | ||
846 | #endif | ||
847 | |||
848 | #ifndef atomic64_dec_return_release | ||
849 | #define atomic64_dec_return_release(...) \ | ||
850 | __atomic_op_release(atomic64_dec_return, __VA_ARGS__) | ||
851 | #endif | ||
852 | |||
853 | #ifndef atomic64_dec_return | ||
854 | #define atomic64_dec_return(...) \ | ||
855 | __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) | ||
856 | #endif | ||
857 | #endif /* atomic64_dec_return_relaxed */ | ||
858 | |||
859 | |||
860 | /* atomic64_fetch_add_relaxed */ | ||
861 | #ifndef atomic64_fetch_add_relaxed | ||
862 | #define atomic64_fetch_add_relaxed atomic64_fetch_add | ||
863 | #define atomic64_fetch_add_acquire atomic64_fetch_add | ||
864 | #define atomic64_fetch_add_release atomic64_fetch_add | ||
865 | |||
866 | #else /* atomic64_fetch_add_relaxed */ | ||
867 | |||
868 | #ifndef atomic64_fetch_add_acquire | ||
869 | #define atomic64_fetch_add_acquire(...) \ | ||
870 | __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) | ||
871 | #endif | ||
872 | |||
873 | #ifndef atomic64_fetch_add_release | ||
874 | #define atomic64_fetch_add_release(...) \ | ||
875 | __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) | ||
876 | #endif | ||
877 | |||
878 | #ifndef atomic64_fetch_add | ||
879 | #define atomic64_fetch_add(...) \ | ||
880 | __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) | ||
881 | #endif | ||
882 | #endif /* atomic64_fetch_add_relaxed */ | ||
883 | |||
884 | /* atomic64_fetch_inc_relaxed */ | ||
885 | #ifndef atomic64_fetch_inc_relaxed | ||
886 | |||
887 | #ifndef atomic64_fetch_inc | ||
888 | #define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) | ||
889 | #define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) | ||
890 | #define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) | ||
891 | #define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) | ||
892 | #else /* atomic64_fetch_inc */ | ||
893 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc | ||
894 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc | ||
895 | #define atomic64_fetch_inc_release atomic64_fetch_inc | ||
896 | #endif /* atomic64_fetch_inc */ | ||
897 | |||
898 | #else /* atomic64_fetch_inc_relaxed */ | ||
899 | |||
900 | #ifndef atomic64_fetch_inc_acquire | ||
901 | #define atomic64_fetch_inc_acquire(...) \ | ||
902 | __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) | ||
903 | #endif | ||
904 | |||
905 | #ifndef atomic64_fetch_inc_release | ||
906 | #define atomic64_fetch_inc_release(...) \ | ||
907 | __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) | ||
908 | #endif | ||
909 | |||
910 | #ifndef atomic64_fetch_inc | ||
911 | #define atomic64_fetch_inc(...) \ | ||
912 | __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) | ||
913 | #endif | ||
914 | #endif /* atomic64_fetch_inc_relaxed */ | ||
915 | |||
916 | /* atomic64_fetch_sub_relaxed */ | ||
917 | #ifndef atomic64_fetch_sub_relaxed | ||
918 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub | ||
919 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub | ||
920 | #define atomic64_fetch_sub_release atomic64_fetch_sub | ||
921 | |||
922 | #else /* atomic64_fetch_sub_relaxed */ | ||
923 | |||
924 | #ifndef atomic64_fetch_sub_acquire | ||
925 | #define atomic64_fetch_sub_acquire(...) \ | ||
926 | __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) | ||
927 | #endif | ||
928 | |||
929 | #ifndef atomic64_fetch_sub_release | ||
930 | #define atomic64_fetch_sub_release(...) \ | ||
931 | __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) | ||
932 | #endif | ||
933 | |||
934 | #ifndef atomic64_fetch_sub | ||
935 | #define atomic64_fetch_sub(...) \ | ||
936 | __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) | ||
937 | #endif | ||
938 | #endif /* atomic64_fetch_sub_relaxed */ | ||
939 | |||
940 | /* atomic64_fetch_dec_relaxed */ | ||
941 | #ifndef atomic64_fetch_dec_relaxed | ||
942 | |||
943 | #ifndef atomic64_fetch_dec | ||
944 | #define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) | ||
945 | #define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) | ||
946 | #define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) | ||
947 | #define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) | ||
948 | #else /* atomic64_fetch_dec */ | ||
949 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec | ||
950 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec | ||
951 | #define atomic64_fetch_dec_release atomic64_fetch_dec | ||
952 | #endif /* atomic64_fetch_dec */ | ||
953 | |||
954 | #else /* atomic64_fetch_dec_relaxed */ | ||
955 | |||
956 | #ifndef atomic64_fetch_dec_acquire | ||
957 | #define atomic64_fetch_dec_acquire(...) \ | ||
958 | __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) | ||
959 | #endif | ||
960 | |||
961 | #ifndef atomic64_fetch_dec_release | ||
962 | #define atomic64_fetch_dec_release(...) \ | ||
963 | __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) | ||
964 | #endif | ||
965 | |||
966 | #ifndef atomic64_fetch_dec | ||
967 | #define atomic64_fetch_dec(...) \ | ||
968 | __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) | ||
969 | #endif | ||
970 | #endif /* atomic64_fetch_dec_relaxed */ | ||
971 | |||
972 | /* atomic64_fetch_or_relaxed */ | ||
973 | #ifndef atomic64_fetch_or_relaxed | ||
974 | #define atomic64_fetch_or_relaxed atomic64_fetch_or | ||
975 | #define atomic64_fetch_or_acquire atomic64_fetch_or | ||
976 | #define atomic64_fetch_or_release atomic64_fetch_or | ||
977 | |||
978 | #else /* atomic64_fetch_or_relaxed */ | ||
979 | |||
980 | #ifndef atomic64_fetch_or_acquire | ||
981 | #define atomic64_fetch_or_acquire(...) \ | ||
982 | __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) | ||
983 | #endif | ||
984 | |||
985 | #ifndef atomic64_fetch_or_release | ||
986 | #define atomic64_fetch_or_release(...) \ | ||
987 | __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) | ||
988 | #endif | ||
989 | |||
990 | #ifndef atomic64_fetch_or | ||
991 | #define atomic64_fetch_or(...) \ | ||
992 | __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) | ||
993 | #endif | ||
994 | #endif /* atomic64_fetch_or_relaxed */ | ||
995 | |||
996 | /* atomic64_fetch_and_relaxed */ | ||
997 | #ifndef atomic64_fetch_and_relaxed | ||
998 | #define atomic64_fetch_and_relaxed atomic64_fetch_and | ||
999 | #define atomic64_fetch_and_acquire atomic64_fetch_and | ||
1000 | #define atomic64_fetch_and_release atomic64_fetch_and | ||
1001 | |||
1002 | #else /* atomic64_fetch_and_relaxed */ | ||
1003 | |||
1004 | #ifndef atomic64_fetch_and_acquire | ||
1005 | #define atomic64_fetch_and_acquire(...) \ | ||
1006 | __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) | ||
1007 | #endif | ||
1008 | |||
1009 | #ifndef atomic64_fetch_and_release | ||
1010 | #define atomic64_fetch_and_release(...) \ | ||
1011 | __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) | ||
1012 | #endif | ||
1013 | |||
1014 | #ifndef atomic64_fetch_and | ||
1015 | #define atomic64_fetch_and(...) \ | ||
1016 | __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) | ||
1017 | #endif | ||
1018 | #endif /* atomic64_fetch_and_relaxed */ | ||
1019 | |||
1020 | #ifndef atomic64_andnot | ||
1021 | #define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) | ||
1022 | #endif | ||
1023 | |||
1024 | #ifndef atomic64_fetch_andnot_relaxed | ||
1025 | |||
1026 | #ifndef atomic64_fetch_andnot | ||
1027 | #define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) | ||
1028 | #define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) | ||
1029 | #define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) | ||
1030 | #define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) | ||
1031 | #else /* atomic64_fetch_andnot */ | ||
1032 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | ||
1033 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | ||
1034 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | ||
1035 | #endif /* atomic64_fetch_andnot */ | ||
1036 | |||
1037 | #else /* atomic64_fetch_andnot_relaxed */ | ||
1038 | |||
1039 | #ifndef atomic64_fetch_andnot_acquire | ||
1040 | #define atomic64_fetch_andnot_acquire(...) \ | ||
1041 | __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) | ||
1042 | #endif | ||
1043 | |||
1044 | #ifndef atomic64_fetch_andnot_release | ||
1045 | #define atomic64_fetch_andnot_release(...) \ | ||
1046 | __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) | ||
1047 | #endif | ||
1048 | |||
1049 | #ifndef atomic64_fetch_andnot | ||
1050 | #define atomic64_fetch_andnot(...) \ | ||
1051 | __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) | ||
1052 | #endif | ||
1053 | #endif /* atomic64_fetch_andnot_relaxed */ | ||
1054 | |||
1055 | /* atomic64_fetch_xor_relaxed */ | ||
1056 | #ifndef atomic64_fetch_xor_relaxed | ||
1057 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor | ||
1058 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor | ||
1059 | #define atomic64_fetch_xor_release atomic64_fetch_xor | ||
1060 | |||
1061 | #else /* atomic64_fetch_xor_relaxed */ | ||
1062 | |||
1063 | #ifndef atomic64_fetch_xor_acquire | ||
1064 | #define atomic64_fetch_xor_acquire(...) \ | ||
1065 | __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) | ||
1066 | #endif | ||
1067 | |||
1068 | #ifndef atomic64_fetch_xor_release | ||
1069 | #define atomic64_fetch_xor_release(...) \ | ||
1070 | __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) | ||
1071 | #endif | ||
1072 | |||
1073 | #ifndef atomic64_fetch_xor | ||
1074 | #define atomic64_fetch_xor(...) \ | ||
1075 | __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) | ||
1076 | #endif | ||
1077 | #endif /* atomic64_fetch_xor_relaxed */ | ||
1078 | |||
1079 | |||
1080 | /* atomic64_xchg_relaxed */ | ||
1081 | #ifndef atomic64_xchg_relaxed | ||
1082 | #define atomic64_xchg_relaxed atomic64_xchg | ||
1083 | #define atomic64_xchg_acquire atomic64_xchg | ||
1084 | #define atomic64_xchg_release atomic64_xchg | ||
1085 | |||
1086 | #else /* atomic64_xchg_relaxed */ | ||
1087 | |||
1088 | #ifndef atomic64_xchg_acquire | ||
1089 | #define atomic64_xchg_acquire(...) \ | ||
1090 | __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) | ||
1091 | #endif | ||
1092 | |||
1093 | #ifndef atomic64_xchg_release | ||
1094 | #define atomic64_xchg_release(...) \ | ||
1095 | __atomic_op_release(atomic64_xchg, __VA_ARGS__) | ||
1096 | #endif | ||
1097 | |||
1098 | #ifndef atomic64_xchg | ||
1099 | #define atomic64_xchg(...) \ | ||
1100 | __atomic_op_fence(atomic64_xchg, __VA_ARGS__) | ||
1101 | #endif | ||
1102 | #endif /* atomic64_xchg_relaxed */ | ||
1103 | |||
1104 | /* atomic64_cmpxchg_relaxed */ | ||
1105 | #ifndef atomic64_cmpxchg_relaxed | ||
1106 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | ||
1107 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | ||
1108 | #define atomic64_cmpxchg_release atomic64_cmpxchg | ||
1109 | |||
1110 | #else /* atomic64_cmpxchg_relaxed */ | ||
1111 | |||
1112 | #ifndef atomic64_cmpxchg_acquire | ||
1113 | #define atomic64_cmpxchg_acquire(...) \ | ||
1114 | __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) | ||
1115 | #endif | ||
1116 | |||
1117 | #ifndef atomic64_cmpxchg_release | ||
1118 | #define atomic64_cmpxchg_release(...) \ | ||
1119 | __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) | ||
1120 | #endif | ||
1121 | |||
1122 | #ifndef atomic64_cmpxchg | ||
1123 | #define atomic64_cmpxchg(...) \ | ||
1124 | __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) | ||
1125 | #endif | ||
1126 | #endif /* atomic64_cmpxchg_relaxed */ | ||
1127 | |||
1128 | #ifndef atomic64_try_cmpxchg | ||
1129 | |||
1130 | #define __atomic64_try_cmpxchg(type, _p, _po, _n) \ | ||
1131 | ({ \ | ||
1132 | typeof(_po) __po = (_po); \ | ||
1133 | typeof(*(_po)) __r, __o = *__po; \ | ||
1134 | __r = atomic64_cmpxchg##type((_p), __o, (_n)); \ | ||
1135 | if (unlikely(__r != __o)) \ | ||
1136 | *__po = __r; \ | ||
1137 | likely(__r == __o); \ | ||
1138 | }) | ||
1139 | |||
1140 | #define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n) | ||
1141 | #define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n) | ||
1142 | #define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n) | ||
1143 | #define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n) | ||
1144 | |||
1145 | #else /* atomic64_try_cmpxchg */ | ||
1146 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg | ||
1147 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg | ||
1148 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg | ||
1149 | #endif /* atomic64_try_cmpxchg */ | ||
1150 | |||
1151 | /** | ||
1152 | * atomic64_fetch_add_unless - add unless the number is already a given value | ||
1153 | * @v: pointer of type atomic64_t | ||
1154 | * @a: the amount to add to v... | ||
1155 | * @u: ...unless v is equal to u. | ||
1156 | * | ||
1157 | * Atomically adds @a to @v, if @v was not already @u. | ||
1158 | * Returns the original value of @v. | ||
1159 | */ | ||
1160 | #ifndef atomic64_fetch_add_unless | ||
1161 | static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, | ||
1162 | long long u) | ||
1163 | { | ||
1164 | long long c = atomic64_read(v); | ||
1165 | |||
1166 | do { | ||
1167 | if (unlikely(c == u)) | ||
1168 | break; | ||
1169 | } while (!atomic64_try_cmpxchg(v, &c, c + a)); | ||
1170 | |||
1171 | return c; | ||
1172 | } | ||
1173 | #endif | ||
1174 | |||
1175 | /** | ||
1176 | * atomic64_add_unless - add unless the number is already a given value | ||
1177 | * @v: pointer of type atomic_t | ||
1178 | * @a: the amount to add to v... | ||
1179 | * @u: ...unless v is equal to u. | ||
1180 | * | ||
1181 | * Atomically adds @a to @v, if @v was not already @u. | ||
1182 | * Returns true if the addition was done. | ||
1183 | */ | ||
1184 | static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
1185 | { | ||
1186 | return atomic64_fetch_add_unless(v, a, u) != u; | ||
1187 | } | ||
1188 | |||
1189 | /** | ||
1190 | * atomic64_inc_not_zero - increment unless the number is zero | ||
1191 | * @v: pointer of type atomic64_t | ||
1192 | * | ||
1193 | * Atomically increments @v by 1, if @v is non-zero. | ||
1194 | * Returns true if the increment was done. | ||
1195 | */ | ||
1196 | #ifndef atomic64_inc_not_zero | ||
1197 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
1198 | #endif | ||
1199 | |||
1200 | /** | ||
1201 | * atomic64_inc_and_test - increment and test | ||
1202 | * @v: pointer of type atomic64_t | ||
1203 | * | ||
1204 | * Atomically increments @v by 1 | ||
1205 | * and returns true if the result is zero, or false for all | ||
1206 | * other cases. | ||
1207 | */ | ||
1208 | #ifndef atomic64_inc_and_test | ||
1209 | static inline bool atomic64_inc_and_test(atomic64_t *v) | ||
1210 | { | ||
1211 | return atomic64_inc_return(v) == 0; | ||
1212 | } | ||
1213 | #endif | ||
1214 | |||
1215 | /** | ||
1216 | * atomic64_dec_and_test - decrement and test | ||
1217 | * @v: pointer of type atomic64_t | ||
1218 | * | ||
1219 | * Atomically decrements @v by 1 and | ||
1220 | * returns true if the result is 0, or false for all other | ||
1221 | * cases. | ||
1222 | */ | ||
1223 | #ifndef atomic64_dec_and_test | ||
1224 | static inline bool atomic64_dec_and_test(atomic64_t *v) | ||
1225 | { | ||
1226 | return atomic64_dec_return(v) == 0; | ||
1227 | } | ||
1228 | #endif | ||
1229 | |||
1230 | /** | ||
1231 | * atomic64_sub_and_test - subtract value from variable and test result | ||
1232 | * @i: integer value to subtract | ||
1233 | * @v: pointer of type atomic64_t | ||
1234 | * | ||
1235 | * Atomically subtracts @i from @v and returns | ||
1236 | * true if the result is zero, or false for all | ||
1237 | * other cases. | ||
1238 | */ | ||
1239 | #ifndef atomic64_sub_and_test | ||
1240 | static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) | ||
1241 | { | ||
1242 | return atomic64_sub_return(i, v) == 0; | ||
1243 | } | ||
1244 | #endif | ||
1245 | |||
1246 | /** | ||
1247 | * atomic64_add_negative - add and test if negative | ||
1248 | * @i: integer value to add | ||
1249 | * @v: pointer of type atomic64_t | ||
1250 | * | ||
1251 | * Atomically adds @i to @v and returns true | ||
1252 | * if the result is negative, or false when | ||
1253 | * result is greater than or equal to zero. | ||
1254 | */ | ||
1255 | #ifndef atomic64_add_negative | ||
1256 | static inline bool atomic64_add_negative(long long i, atomic64_t *v) | ||
1257 | { | ||
1258 | return atomic64_add_return(i, v) < 0; | ||
1259 | } | ||
1260 | #endif | ||
1261 | |||
1262 | #ifndef atomic64_inc_unless_negative | ||
1263 | static inline bool atomic64_inc_unless_negative(atomic64_t *v) | ||
1264 | { | ||
1265 | long long c = atomic64_read(v); | ||
1266 | |||
1267 | do { | ||
1268 | if (unlikely(c < 0)) | ||
1269 | return false; | ||
1270 | } while (!atomic64_try_cmpxchg(v, &c, c + 1)); | ||
1271 | |||
1272 | return true; | ||
1273 | } | ||
1274 | #endif | ||
1275 | |||
1276 | #ifndef atomic64_dec_unless_positive | ||
1277 | static inline bool atomic64_dec_unless_positive(atomic64_t *v) | ||
1278 | { | ||
1279 | long long c = atomic64_read(v); | ||
1280 | |||
1281 | do { | ||
1282 | if (unlikely(c > 0)) | ||
1283 | return false; | ||
1284 | } while (!atomic64_try_cmpxchg(v, &c, c - 1)); | ||
1285 | |||
1286 | return true; | ||
1287 | } | ||
1288 | #endif | ||
1289 | |||
1290 | /* | ||
1291 | * atomic64_dec_if_positive - decrement by 1 if old value positive | ||
1292 | * @v: pointer of type atomic64_t | ||
1293 | * | ||
1294 | * The function returns the old value of *v minus 1, even if | ||
1295 | * the atomic64 variable, v, was not decremented. | ||
1296 | */ | ||
1297 | #ifndef atomic64_dec_if_positive | ||
1298 | static inline long long atomic64_dec_if_positive(atomic64_t *v) | ||
1299 | { | ||
1300 | long long dec, c = atomic64_read(v); | ||
1301 | |||
1302 | do { | ||
1303 | dec = c - 1; | ||
1304 | if (unlikely(dec < 0)) | ||
1305 | break; | ||
1306 | } while (!atomic64_try_cmpxchg(v, &c, dec)); | ||
1307 | |||
1308 | return dec; | ||
1309 | } | ||
1310 | #endif | ||
1311 | |||
1312 | #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
1313 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
1314 | 75 | ||
1315 | #include <asm-generic/atomic-long.h> | 76 | #include <asm-generic/atomic-long.h> |
1316 | 77 | ||
diff --git a/scripts/atomic/atomic-tbl.sh b/scripts/atomic/atomic-tbl.sh new file mode 100755 index 000000000000..9d6be538a987 --- /dev/null +++ b/scripts/atomic/atomic-tbl.sh | |||
@@ -0,0 +1,186 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | # helpers for dealing with atomics.tbl | ||
4 | |||
5 | #meta_in(meta, match) | ||
6 | meta_in() | ||
7 | { | ||
8 | case "$1" in | ||
9 | [$2]) return 0;; | ||
10 | esac | ||
11 | |||
12 | return 1 | ||
13 | } | ||
14 | |||
15 | #meta_has_ret(meta) | ||
16 | meta_has_ret() | ||
17 | { | ||
18 | meta_in "$1" "bBiIfFlR" | ||
19 | } | ||
20 | |||
21 | #meta_has_acquire(meta) | ||
22 | meta_has_acquire() | ||
23 | { | ||
24 | meta_in "$1" "BFIlR" | ||
25 | } | ||
26 | |||
27 | #meta_has_release(meta) | ||
28 | meta_has_release() | ||
29 | { | ||
30 | meta_in "$1" "BFIRs" | ||
31 | } | ||
32 | |||
33 | #meta_has_relaxed(meta) | ||
34 | meta_has_relaxed() | ||
35 | { | ||
36 | meta_in "$1" "BFIR" | ||
37 | } | ||
38 | |||
39 | #find_fallback_template(pfx, name, sfx, order) | ||
40 | find_fallback_template() | ||
41 | { | ||
42 | local pfx="$1"; shift | ||
43 | local name="$1"; shift | ||
44 | local sfx="$1"; shift | ||
45 | local order="$1"; shift | ||
46 | |||
47 | local base="" | ||
48 | local file="" | ||
49 | |||
50 | # We may have fallbacks for a specific case (e.g. read_acquire()), or | ||
51 | # an entire class, e.g. *inc*(). | ||
52 | # | ||
53 | # Start at the most specific, and fall back to the most general. Once | ||
54 | # we find a specific fallback, don't bother looking for more. | ||
55 | for base in "${pfx}${name}${sfx}${order}" "${name}"; do | ||
56 | file="${ATOMICDIR}/fallbacks/${base}" | ||
57 | |||
58 | if [ -f "${file}" ]; then | ||
59 | printf "${file}" | ||
60 | break | ||
61 | fi | ||
62 | done | ||
63 | } | ||
64 | |||
65 | #gen_ret_type(meta, int) | ||
66 | gen_ret_type() { | ||
67 | local meta="$1"; shift | ||
68 | local int="$1"; shift | ||
69 | |||
70 | case "${meta}" in | ||
71 | [sv]) printf "void";; | ||
72 | [bB]) printf "bool";; | ||
73 | [aiIfFlR]) printf "${int}";; | ||
74 | esac | ||
75 | } | ||
76 | |||
77 | #gen_ret_stmt(meta) | ||
78 | gen_ret_stmt() | ||
79 | { | ||
80 | if meta_has_ret "${meta}"; then | ||
81 | printf "return "; | ||
82 | fi | ||
83 | } | ||
84 | |||
85 | # gen_param_name(arg) | ||
86 | gen_param_name() | ||
87 | { | ||
88 | # strip off the leading 'c' for 'cv' | ||
89 | local name="${1#c}" | ||
90 | printf "${name#*:}" | ||
91 | } | ||
92 | |||
93 | # gen_param_type(arg, int, atomic) | ||
94 | gen_param_type() | ||
95 | { | ||
96 | local type="${1%%:*}"; shift | ||
97 | local int="$1"; shift | ||
98 | local atomic="$1"; shift | ||
99 | |||
100 | case "${type}" in | ||
101 | i) type="${int} ";; | ||
102 | p) type="${int} *";; | ||
103 | v) type="${atomic}_t *";; | ||
104 | cv) type="const ${atomic}_t *";; | ||
105 | esac | ||
106 | |||
107 | printf "${type}" | ||
108 | } | ||
109 | |||
110 | #gen_param(arg, int, atomic) | ||
111 | gen_param() | ||
112 | { | ||
113 | local arg="$1"; shift | ||
114 | local int="$1"; shift | ||
115 | local atomic="$1"; shift | ||
116 | local name="$(gen_param_name "${arg}")" | ||
117 | local type="$(gen_param_type "${arg}" "${int}" "${atomic}")" | ||
118 | |||
119 | printf "${type}${name}" | ||
120 | } | ||
121 | |||
122 | #gen_params(int, atomic, arg...) | ||
123 | gen_params() | ||
124 | { | ||
125 | local int="$1"; shift | ||
126 | local atomic="$1"; shift | ||
127 | |||
128 | while [ "$#" -gt 0 ]; do | ||
129 | gen_param "$1" "${int}" "${atomic}" | ||
130 | [ "$#" -gt 1 ] && printf ", " | ||
131 | shift; | ||
132 | done | ||
133 | } | ||
134 | |||
135 | #gen_args(arg...) | ||
136 | gen_args() | ||
137 | { | ||
138 | while [ "$#" -gt 0 ]; do | ||
139 | printf "$(gen_param_name "$1")" | ||
140 | [ "$#" -gt 1 ] && printf ", " | ||
141 | shift; | ||
142 | done | ||
143 | } | ||
144 | |||
145 | #gen_proto_order_variants(meta, pfx, name, sfx, ...) | ||
146 | gen_proto_order_variants() | ||
147 | { | ||
148 | local meta="$1"; shift | ||
149 | local pfx="$1"; shift | ||
150 | local name="$1"; shift | ||
151 | local sfx="$1"; shift | ||
152 | |||
153 | gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" | ||
154 | |||
155 | if meta_has_acquire "${meta}"; then | ||
156 | gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" | ||
157 | fi | ||
158 | if meta_has_release "${meta}"; then | ||
159 | gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" | ||
160 | fi | ||
161 | if meta_has_relaxed "${meta}"; then | ||
162 | gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@" | ||
163 | fi | ||
164 | } | ||
165 | |||
166 | #gen_proto_variants(meta, name, ...) | ||
167 | gen_proto_variants() | ||
168 | { | ||
169 | local meta="$1"; shift | ||
170 | local name="$1"; shift | ||
171 | local pfx="" | ||
172 | local sfx="" | ||
173 | |||
174 | meta_in "${meta}" "fF" && pfx="fetch_" | ||
175 | meta_in "${meta}" "R" && sfx="_return" | ||
176 | |||
177 | gen_proto_order_variants "${meta}" "${pfx}" "${name}" "${sfx}" "$@" | ||
178 | } | ||
179 | |||
180 | #gen_proto(meta, ...) | ||
181 | gen_proto() { | ||
182 | local meta="$1"; shift | ||
183 | for m in $(echo "${meta}" | fold -w1); do | ||
184 | gen_proto_variants "${m}" "$@" | ||
185 | done | ||
186 | } | ||
diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl new file mode 100755 index 000000000000..fbee2f6190d9 --- /dev/null +++ b/scripts/atomic/atomics.tbl | |||
@@ -0,0 +1,41 @@ | |||
1 | # name meta args... | ||
2 | # | ||
3 | # Where meta contains a string of variants to generate. | ||
4 | # Upper-case implies _{acquire,release,relaxed} variants. | ||
5 | # Valid meta values are: | ||
6 | # * B/b - bool: returns bool | ||
7 | # * v - void: returns void | ||
8 | # * I/i - int: returns base type | ||
9 | # * R - return: returns base type (has _return variants) | ||
10 | # * F/f - fetch: returns base type (has fetch_ variants) | ||
11 | # * l - load: returns base type (has _acquire order variant) | ||
12 | # * s - store: returns void (has _release order variant) | ||
13 | # | ||
14 | # Where args contains list of type[:name], where type is: | ||
15 | # * cv - const pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t) | ||
16 | # * v - pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t) | ||
17 | # * i - base type (int/s64/long) | ||
18 | # * p - pointer to base type (int/s64/long) | ||
19 | # | ||
20 | read l cv | ||
21 | set s v i | ||
22 | add vRF i v | ||
23 | sub vRF i v | ||
24 | inc vRF v | ||
25 | dec vRF v | ||
26 | and vF i v | ||
27 | andnot vF i v | ||
28 | or vF i v | ||
29 | xor vF i v | ||
30 | xchg I v i | ||
31 | cmpxchg I v i:old i:new | ||
32 | try_cmpxchg B v p:old i:new | ||
33 | sub_and_test b i v | ||
34 | dec_and_test b v | ||
35 | inc_and_test b v | ||
36 | add_negative b i v | ||
37 | add_unless fb v i:a i:u | ||
38 | inc_not_zero b v | ||
39 | inc_unless_negative b v | ||
40 | dec_unless_positive b v | ||
41 | dec_if_positive i v | ||
diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh new file mode 100755 index 000000000000..c30101cddf2d --- /dev/null +++ b/scripts/atomic/check-atomics.sh | |||
@@ -0,0 +1,19 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | # | ||
4 | # Check if atomic headers are up-to-date | ||
5 | |||
6 | ATOMICDIR=$(dirname $0) | ||
7 | ATOMICTBL=${ATOMICDIR}/atomics.tbl | ||
8 | LINUXDIR=${ATOMICDIR}/../.. | ||
9 | |||
10 | cat <<EOF | | ||
11 | gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h | ||
12 | gen-atomic-long.sh asm-generic/atomic-long.h | ||
13 | gen-atomic-fallback.sh linux/atomic-fallback.h | ||
14 | EOF | ||
15 | while read script header; do | ||
16 | if ! (${ATOMICDIR}/${script} ${ATOMICTBL} | diff - ${LINUXDIR}/include/${header} > /dev/null); then | ||
17 | printf "warning: include/${header} is out-of-date.\n" | ||
18 | fi | ||
19 | done | ||
diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire new file mode 100755 index 000000000000..e38871e64db6 --- /dev/null +++ b/scripts/atomic/fallbacks/acquire | |||
@@ -0,0 +1,9 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_${pfx}${name}${sfx}_acquire(${params}) | ||
4 | { | ||
5 | ${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args}); | ||
6 | __atomic_acquire_fence(); | ||
7 | return ret; | ||
8 | } | ||
9 | EOF | ||
diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative new file mode 100755 index 000000000000..e6f4815637de --- /dev/null +++ b/scripts/atomic/fallbacks/add_negative | |||
@@ -0,0 +1,16 @@ | |||
1 | cat <<EOF | ||
2 | /** | ||
3 | * ${atomic}_add_negative - add and test if negative | ||
4 | * @i: integer value to add | ||
5 | * @v: pointer of type ${atomic}_t | ||
6 | * | ||
7 | * Atomically adds @i to @v and returns true | ||
8 | * if the result is negative, or false when | ||
9 | * result is greater than or equal to zero. | ||
10 | */ | ||
11 | static inline bool | ||
12 | ${atomic}_add_negative(${int} i, ${atomic}_t *v) | ||
13 | { | ||
14 | return ${atomic}_add_return(i, v) < 0; | ||
15 | } | ||
16 | EOF | ||
diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless new file mode 100755 index 000000000000..792533885fbf --- /dev/null +++ b/scripts/atomic/fallbacks/add_unless | |||
@@ -0,0 +1,16 @@ | |||
1 | cat << EOF | ||
2 | /** | ||
3 | * ${atomic}_add_unless - add unless the number is already a given value | ||
4 | * @v: pointer of type ${atomic}_t | ||
5 | * @a: the amount to add to v... | ||
6 | * @u: ...unless v is equal to u. | ||
7 | * | ||
8 | * Atomically adds @a to @v, if @v was not already @u. | ||
9 | * Returns true if the addition was done. | ||
10 | */ | ||
11 | static inline bool | ||
12 | ${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) | ||
13 | { | ||
14 | return ${atomic}_fetch_add_unless(v, a, u) != u; | ||
15 | } | ||
16 | EOF | ||
diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot new file mode 100755 index 000000000000..9f3a3216b5e3 --- /dev/null +++ b/scripts/atomic/fallbacks/andnot | |||
@@ -0,0 +1,7 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) | ||
4 | { | ||
5 | ${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v); | ||
6 | } | ||
7 | EOF | ||
diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec new file mode 100755 index 000000000000..10bbc82be31d --- /dev/null +++ b/scripts/atomic/fallbacks/dec | |||
@@ -0,0 +1,7 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) | ||
4 | { | ||
5 | ${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v); | ||
6 | } | ||
7 | EOF | ||
diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test new file mode 100755 index 000000000000..0ce7103b3df2 --- /dev/null +++ b/scripts/atomic/fallbacks/dec_and_test | |||
@@ -0,0 +1,15 @@ | |||
1 | cat <<EOF | ||
2 | /** | ||
3 | * ${atomic}_dec_and_test - decrement and test | ||
4 | * @v: pointer of type ${atomic}_t | ||
5 | * | ||
6 | * Atomically decrements @v by 1 and | ||
7 | * returns true if the result is 0, or false for all other | ||
8 | * cases. | ||
9 | */ | ||
10 | static inline bool | ||
11 | ${atomic}_dec_and_test(${atomic}_t *v) | ||
12 | { | ||
13 | return ${atomic}_dec_return(v) == 0; | ||
14 | } | ||
15 | EOF | ||
diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive new file mode 100755 index 000000000000..c52eacec43c8 --- /dev/null +++ b/scripts/atomic/fallbacks/dec_if_positive | |||
@@ -0,0 +1,15 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_dec_if_positive(${atomic}_t *v) | ||
4 | { | ||
5 | ${int} dec, c = ${atomic}_read(v); | ||
6 | |||
7 | do { | ||
8 | dec = c - 1; | ||
9 | if (unlikely(dec < 0)) | ||
10 | break; | ||
11 | } while (!${atomic}_try_cmpxchg(v, &c, dec)); | ||
12 | |||
13 | return dec; | ||
14 | } | ||
15 | EOF | ||
diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive new file mode 100755 index 000000000000..8a2578f14268 --- /dev/null +++ b/scripts/atomic/fallbacks/dec_unless_positive | |||
@@ -0,0 +1,14 @@ | |||
1 | cat <<EOF | ||
2 | static inline bool | ||
3 | ${atomic}_dec_unless_positive(${atomic}_t *v) | ||
4 | { | ||
5 | ${int} c = ${atomic}_read(v); | ||
6 | |||
7 | do { | ||
8 | if (unlikely(c > 0)) | ||
9 | return false; | ||
10 | } while (!${atomic}_try_cmpxchg(v, &c, c - 1)); | ||
11 | |||
12 | return true; | ||
13 | } | ||
14 | EOF | ||
diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence new file mode 100755 index 000000000000..82f68fa6931a --- /dev/null +++ b/scripts/atomic/fallbacks/fence | |||
@@ -0,0 +1,11 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_${pfx}${name}${sfx}(${params}) | ||
4 | { | ||
5 | ${ret} ret; | ||
6 | __atomic_pre_full_fence(); | ||
7 | ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args}); | ||
8 | __atomic_post_full_fence(); | ||
9 | return ret; | ||
10 | } | ||
11 | EOF | ||
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless new file mode 100755 index 000000000000..d2c091db7eae --- /dev/null +++ b/scripts/atomic/fallbacks/fetch_add_unless | |||
@@ -0,0 +1,23 @@ | |||
1 | cat << EOF | ||
2 | /** | ||
3 | * ${atomic}_fetch_add_unless - add unless the number is already a given value | ||
4 | * @v: pointer of type ${atomic}_t | ||
5 | * @a: the amount to add to v... | ||
6 | * @u: ...unless v is equal to u. | ||
7 | * | ||
8 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
9 | * Returns original value of @v | ||
10 | */ | ||
11 | static inline ${int} | ||
12 | ${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) | ||
13 | { | ||
14 | ${int} c = ${atomic}_read(v); | ||
15 | |||
16 | do { | ||
17 | if (unlikely(c == u)) | ||
18 | break; | ||
19 | } while (!${atomic}_try_cmpxchg(v, &c, c + a)); | ||
20 | |||
21 | return c; | ||
22 | } | ||
23 | EOF | ||
diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc new file mode 100755 index 000000000000..f866b3ad2353 --- /dev/null +++ b/scripts/atomic/fallbacks/inc | |||
@@ -0,0 +1,7 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) | ||
4 | { | ||
5 | ${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v); | ||
6 | } | ||
7 | EOF | ||
diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test new file mode 100755 index 000000000000..4e2068869f7e --- /dev/null +++ b/scripts/atomic/fallbacks/inc_and_test | |||
@@ -0,0 +1,15 @@ | |||
1 | cat <<EOF | ||
2 | /** | ||
3 | * ${atomic}_inc_and_test - increment and test | ||
4 | * @v: pointer of type ${atomic}_t | ||
5 | * | ||
6 | * Atomically increments @v by 1 | ||
7 | * and returns true if the result is zero, or false for all | ||
8 | * other cases. | ||
9 | */ | ||
10 | static inline bool | ||
11 | ${atomic}_inc_and_test(${atomic}_t *v) | ||
12 | { | ||
13 | return ${atomic}_inc_return(v) == 0; | ||
14 | } | ||
15 | EOF | ||
diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero new file mode 100755 index 000000000000..a7c45c8d107c --- /dev/null +++ b/scripts/atomic/fallbacks/inc_not_zero | |||
@@ -0,0 +1,14 @@ | |||
1 | cat <<EOF | ||
2 | /** | ||
3 | * ${atomic}_inc_not_zero - increment unless the number is zero | ||
4 | * @v: pointer of type ${atomic}_t | ||
5 | * | ||
6 | * Atomically increments @v by 1, if @v is non-zero. | ||
7 | * Returns true if the increment was done. | ||
8 | */ | ||
9 | static inline bool | ||
10 | ${atomic}_inc_not_zero(${atomic}_t *v) | ||
11 | { | ||
12 | return ${atomic}_add_unless(v, 1, 0); | ||
13 | } | ||
14 | EOF | ||
diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative new file mode 100755 index 000000000000..0c266e71dbd4 --- /dev/null +++ b/scripts/atomic/fallbacks/inc_unless_negative | |||
@@ -0,0 +1,14 @@ | |||
1 | cat <<EOF | ||
2 | static inline bool | ||
3 | ${atomic}_inc_unless_negative(${atomic}_t *v) | ||
4 | { | ||
5 | ${int} c = ${atomic}_read(v); | ||
6 | |||
7 | do { | ||
8 | if (unlikely(c < 0)) | ||
9 | return false; | ||
10 | } while (!${atomic}_try_cmpxchg(v, &c, c + 1)); | ||
11 | |||
12 | return true; | ||
13 | } | ||
14 | EOF | ||
diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire new file mode 100755 index 000000000000..75863b5203f7 --- /dev/null +++ b/scripts/atomic/fallbacks/read_acquire | |||
@@ -0,0 +1,7 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_read_acquire(const ${atomic}_t *v) | ||
4 | { | ||
5 | return smp_load_acquire(&(v)->counter); | ||
6 | } | ||
7 | EOF | ||
diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release new file mode 100755 index 000000000000..3f628a3802d9 --- /dev/null +++ b/scripts/atomic/fallbacks/release | |||
@@ -0,0 +1,8 @@ | |||
1 | cat <<EOF | ||
2 | static inline ${ret} | ||
3 | ${atomic}_${pfx}${name}${sfx}_release(${params}) | ||
4 | { | ||
5 | __atomic_release_fence(); | ||
6 | ${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args}); | ||
7 | } | ||
8 | EOF | ||
diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release new file mode 100755 index 000000000000..45bb5e0cfc08 --- /dev/null +++ b/scripts/atomic/fallbacks/set_release | |||
@@ -0,0 +1,7 @@ | |||
1 | cat <<EOF | ||
2 | static inline void | ||
3 | ${atomic}_set_release(${atomic}_t *v, ${int} i) | ||
4 | { | ||
5 | smp_store_release(&(v)->counter, i); | ||
6 | } | ||
7 | EOF | ||
diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test new file mode 100755 index 000000000000..289ef17a2d7a --- /dev/null +++ b/scripts/atomic/fallbacks/sub_and_test | |||
@@ -0,0 +1,16 @@ | |||
1 | cat <<EOF | ||
2 | /** | ||
3 | * ${atomic}_sub_and_test - subtract value from variable and test result | ||
4 | * @i: integer value to subtract | ||
5 | * @v: pointer of type ${atomic}_t | ||
6 | * | ||
7 | * Atomically subtracts @i from @v and returns | ||
8 | * true if the result is zero, or false for all | ||
9 | * other cases. | ||
10 | */ | ||
11 | static inline bool | ||
12 | ${atomic}_sub_and_test(${int} i, ${atomic}_t *v) | ||
13 | { | ||
14 | return ${atomic}_sub_return(i, v) == 0; | ||
15 | } | ||
16 | EOF | ||
diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg new file mode 100755 index 000000000000..4ed85e2f5378 --- /dev/null +++ b/scripts/atomic/fallbacks/try_cmpxchg | |||
@@ -0,0 +1,11 @@ | |||
1 | cat <<EOF | ||
2 | static inline bool | ||
3 | ${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) | ||
4 | { | ||
5 | ${int} r, o = *old; | ||
6 | r = ${atomic}_cmpxchg${order}(v, o, new); | ||
7 | if (unlikely(r != o)) | ||
8 | *old = r; | ||
9 | return likely(r == o); | ||
10 | } | ||
11 | EOF | ||
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh new file mode 100755 index 000000000000..1bd7c1707633 --- /dev/null +++ b/scripts/atomic/gen-atomic-fallback.sh | |||
@@ -0,0 +1,181 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | ATOMICDIR=$(dirname $0) | ||
5 | |||
6 | . ${ATOMICDIR}/atomic-tbl.sh | ||
7 | |||
8 | #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) | ||
9 | gen_template_fallback() | ||
10 | { | ||
11 | local template="$1"; shift | ||
12 | local meta="$1"; shift | ||
13 | local pfx="$1"; shift | ||
14 | local name="$1"; shift | ||
15 | local sfx="$1"; shift | ||
16 | local order="$1"; shift | ||
17 | local atomic="$1"; shift | ||
18 | local int="$1"; shift | ||
19 | |||
20 | local atomicname="${atomic}_${pfx}${name}${sfx}${order}" | ||
21 | |||
22 | local ret="$(gen_ret_type "${meta}" "${int}")" | ||
23 | local retstmt="$(gen_ret_stmt "${meta}")" | ||
24 | local params="$(gen_params "${int}" "${atomic}" "$@")" | ||
25 | local args="$(gen_args "$@")" | ||
26 | |||
27 | if [ ! -z "${template}" ]; then | ||
28 | printf "#ifndef ${atomicname}\n" | ||
29 | . ${template} | ||
30 | printf "#define ${atomicname} ${atomicname}\n" | ||
31 | printf "#endif\n\n" | ||
32 | fi | ||
33 | } | ||
34 | |||
35 | #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) | ||
36 | gen_proto_fallback() | ||
37 | { | ||
38 | local meta="$1"; shift | ||
39 | local pfx="$1"; shift | ||
40 | local name="$1"; shift | ||
41 | local sfx="$1"; shift | ||
42 | local order="$1"; shift | ||
43 | |||
44 | local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" | ||
45 | gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" | ||
46 | } | ||
47 | |||
48 | #gen_basic_fallbacks(basename) | ||
49 | gen_basic_fallbacks() | ||
50 | { | ||
51 | local basename="$1"; shift | ||
52 | cat << EOF | ||
53 | #define ${basename}_acquire ${basename} | ||
54 | #define ${basename}_release ${basename} | ||
55 | #define ${basename}_relaxed ${basename} | ||
56 | EOF | ||
57 | } | ||
58 | |||
59 | #gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...) | ||
60 | gen_proto_order_variants() | ||
61 | { | ||
62 | local meta="$1"; shift | ||
63 | local pfx="$1"; shift | ||
64 | local name="$1"; shift | ||
65 | local sfx="$1"; shift | ||
66 | local atomic="$1" | ||
67 | |||
68 | local basename="${atomic}_${pfx}${name}${sfx}" | ||
69 | |||
70 | local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" | ||
71 | |||
72 | # If we don't have relaxed atomics, then we don't bother with ordering fallbacks | ||
73 | # read_acquire and set_release need to be templated, though | ||
74 | if ! meta_has_relaxed "${meta}"; then | ||
75 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" | ||
76 | |||
77 | if meta_has_acquire "${meta}"; then | ||
78 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" | ||
79 | fi | ||
80 | |||
81 | if meta_has_release "${meta}"; then | ||
82 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" | ||
83 | fi | ||
84 | |||
85 | return | ||
86 | fi | ||
87 | |||
88 | printf "#ifndef ${basename}_relaxed\n" | ||
89 | |||
90 | if [ ! -z "${template}" ]; then | ||
91 | printf "#ifdef ${basename}\n" | ||
92 | fi | ||
93 | |||
94 | gen_basic_fallbacks "${basename}" | ||
95 | |||
96 | if [ ! -z "${template}" ]; then | ||
97 | printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n" | ||
98 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" | ||
99 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" | ||
100 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" | ||
101 | gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@" | ||
102 | fi | ||
103 | |||
104 | printf "#else /* ${basename}_relaxed */\n\n" | ||
105 | |||
106 | gen_template_fallback "${ATOMICDIR}/fallbacks/acquire" "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" | ||
107 | gen_template_fallback "${ATOMICDIR}/fallbacks/release" "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" | ||
108 | gen_template_fallback "${ATOMICDIR}/fallbacks/fence" "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" | ||
109 | |||
110 | printf "#endif /* ${basename}_relaxed */\n\n" | ||
111 | } | ||
112 | |||
113 | gen_xchg_fallbacks() | ||
114 | { | ||
115 | local xchg="$1"; shift | ||
116 | cat <<EOF | ||
117 | #ifndef ${xchg}_relaxed | ||
118 | #define ${xchg}_relaxed ${xchg} | ||
119 | #define ${xchg}_acquire ${xchg} | ||
120 | #define ${xchg}_release ${xchg} | ||
121 | #else /* ${xchg}_relaxed */ | ||
122 | |||
123 | #ifndef ${xchg}_acquire | ||
124 | #define ${xchg}_acquire(...) \\ | ||
125 | __atomic_op_acquire(${xchg}, __VA_ARGS__) | ||
126 | #endif | ||
127 | |||
128 | #ifndef ${xchg}_release | ||
129 | #define ${xchg}_release(...) \\ | ||
130 | __atomic_op_release(${xchg}, __VA_ARGS__) | ||
131 | #endif | ||
132 | |||
133 | #ifndef ${xchg} | ||
134 | #define ${xchg}(...) \\ | ||
135 | __atomic_op_fence(${xchg}, __VA_ARGS__) | ||
136 | #endif | ||
137 | |||
138 | #endif /* ${xchg}_relaxed */ | ||
139 | |||
140 | EOF | ||
141 | } | ||
142 | |||
143 | cat << EOF | ||
144 | // SPDX-License-Identifier: GPL-2.0 | ||
145 | |||
146 | // Generated by $0 | ||
147 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
148 | |||
149 | #ifndef _LINUX_ATOMIC_FALLBACK_H | ||
150 | #define _LINUX_ATOMIC_FALLBACK_H | ||
151 | |||
152 | EOF | ||
153 | |||
154 | for xchg in "xchg" "cmpxchg" "cmpxchg64"; do | ||
155 | gen_xchg_fallbacks "${xchg}" | ||
156 | done | ||
157 | |||
158 | grep '^[a-z]' "$1" | while read name meta args; do | ||
159 | gen_proto "${meta}" "${name}" "atomic" "int" ${args} | ||
160 | done | ||
161 | |||
162 | cat <<EOF | ||
163 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
164 | #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
165 | |||
166 | #ifdef CONFIG_GENERIC_ATOMIC64 | ||
167 | #include <asm-generic/atomic64.h> | ||
168 | #endif | ||
169 | |||
170 | EOF | ||
171 | |||
172 | grep '^[a-z]' "$1" | while read name meta args; do | ||
173 | gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} | ||
174 | done | ||
175 | |||
176 | cat <<EOF | ||
177 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
178 | #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
179 | |||
180 | #endif /* _LINUX_ATOMIC_FALLBACK_H */ | ||
181 | EOF | ||
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh new file mode 100755 index 000000000000..e09812372b17 --- /dev/null +++ b/scripts/atomic/gen-atomic-instrumented.sh | |||
@@ -0,0 +1,182 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | ATOMICDIR=$(dirname $0) | ||
5 | |||
6 | . ${ATOMICDIR}/atomic-tbl.sh | ||
7 | |||
8 | #gen_param_check(arg) | ||
9 | gen_param_check() | ||
10 | { | ||
11 | local arg="$1"; shift | ||
12 | local type="${arg%%:*}" | ||
13 | local name="$(gen_param_name "${arg}")" | ||
14 | local rw="write" | ||
15 | |||
16 | case "${type#c}" in | ||
17 | i) return;; | ||
18 | esac | ||
19 | |||
20 | # We don't write to constant parameters | ||
21 | [ ${type#c} != ${type} ] && rw="read" | ||
22 | |||
23 | printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n" | ||
24 | } | ||
25 | |||
26 | #gen_param_check(arg...) | ||
27 | gen_params_checks() | ||
28 | { | ||
29 | while [ "$#" -gt 0 ]; do | ||
30 | gen_param_check "$1" | ||
31 | shift; | ||
32 | done | ||
33 | } | ||
34 | |||
35 | # gen_guard(meta, atomic, pfx, name, sfx, order) | ||
36 | gen_guard() | ||
37 | { | ||
38 | local meta="$1"; shift | ||
39 | local atomic="$1"; shift | ||
40 | local pfx="$1"; shift | ||
41 | local name="$1"; shift | ||
42 | local sfx="$1"; shift | ||
43 | local order="$1"; shift | ||
44 | |||
45 | local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}" | ||
46 | |||
47 | local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" | ||
48 | |||
49 | # We definitely need a preprocessor symbol for this atomic if it is an | ||
50 | # ordering variant, or if there's a generic fallback. | ||
51 | if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then | ||
52 | printf "defined(${atomicname})" | ||
53 | return | ||
54 | fi | ||
55 | |||
56 | # If this is a base variant, but a relaxed variant *may* exist, then we | ||
57 | # only have a preprocessor symbol if the relaxed variant isn't defined | ||
58 | if meta_has_relaxed "${meta}"; then | ||
59 | printf "!defined(${atomicname}_relaxed) || defined(${atomicname})" | ||
60 | fi | ||
61 | } | ||
62 | |||
63 | #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) | ||
64 | gen_proto_order_variant() | ||
65 | { | ||
66 | local meta="$1"; shift | ||
67 | local pfx="$1"; shift | ||
68 | local name="$1"; shift | ||
69 | local sfx="$1"; shift | ||
70 | local order="$1"; shift | ||
71 | local atomic="$1"; shift | ||
72 | local int="$1"; shift | ||
73 | |||
74 | local atomicname="${atomic}_${pfx}${name}${sfx}${order}" | ||
75 | |||
76 | local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")" | ||
77 | |||
78 | local ret="$(gen_ret_type "${meta}" "${int}")" | ||
79 | local params="$(gen_params "${int}" "${atomic}" "$@")" | ||
80 | local checks="$(gen_params_checks "$@")" | ||
81 | local args="$(gen_args "$@")" | ||
82 | local retstmt="$(gen_ret_stmt "${meta}")" | ||
83 | |||
84 | [ ! -z "${guard}" ] && printf "#if ${guard}\n" | ||
85 | |||
86 | cat <<EOF | ||
87 | static inline ${ret} | ||
88 | ${atomicname}(${params}) | ||
89 | { | ||
90 | ${checks} | ||
91 | ${retstmt}arch_${atomicname}(${args}); | ||
92 | } | ||
93 | #define ${atomicname} ${atomicname} | ||
94 | EOF | ||
95 | |||
96 | [ ! -z "${guard}" ] && printf "#endif\n" | ||
97 | |||
98 | printf "\n" | ||
99 | } | ||
100 | |||
101 | gen_xchg() | ||
102 | { | ||
103 | local xchg="$1"; shift | ||
104 | local mult="$1"; shift | ||
105 | |||
106 | cat <<EOF | ||
107 | #define ${xchg}(ptr, ...) \\ | ||
108 | ({ \\ | ||
109 | typeof(ptr) __ai_ptr = (ptr); \\ | ||
110 | kasan_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\ | ||
111 | arch_${xchg}(__ai_ptr, __VA_ARGS__); \\ | ||
112 | }) | ||
113 | EOF | ||
114 | } | ||
115 | |||
116 | gen_optional_xchg() | ||
117 | { | ||
118 | local name="$1"; shift | ||
119 | local sfx="$1"; shift | ||
120 | local guard="defined(arch_${name}${sfx})" | ||
121 | |||
122 | [ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})" | ||
123 | |||
124 | printf "#if ${guard}\n" | ||
125 | gen_xchg "${name}${sfx}" "" | ||
126 | printf "#endif\n\n" | ||
127 | } | ||
128 | |||
129 | cat << EOF | ||
130 | // SPDX-License-Identifier: GPL-2.0 | ||
131 | |||
132 | // Generated by $0 | ||
133 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
134 | |||
135 | /* | ||
136 | * This file provides wrappers with KASAN instrumentation for atomic operations. | ||
137 | * To use this functionality an arch's atomic.h file needs to define all | ||
138 | * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include | ||
139 | * this file at the end. This file provides atomic_read() that forwards to | ||
140 | * arch_atomic_read() for actual atomic operation. | ||
141 | * Note: if an arch atomic operation is implemented by means of other atomic | ||
142 | * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use | ||
143 | * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid | ||
144 | * double instrumentation. | ||
145 | */ | ||
146 | #ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H | ||
147 | #define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H | ||
148 | |||
149 | #include <linux/build_bug.h> | ||
150 | #include <linux/kasan-checks.h> | ||
151 | |||
152 | EOF | ||
153 | |||
154 | grep '^[a-z]' "$1" | while read name meta args; do | ||
155 | gen_proto "${meta}" "${name}" "atomic" "int" ${args} | ||
156 | done | ||
157 | |||
158 | grep '^[a-z]' "$1" | while read name meta args; do | ||
159 | gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} | ||
160 | done | ||
161 | |||
162 | for xchg in "xchg" "cmpxchg" "cmpxchg64"; do | ||
163 | for order in "" "_acquire" "_release" "_relaxed"; do | ||
164 | gen_optional_xchg "${xchg}" "${order}" | ||
165 | done | ||
166 | done | ||
167 | |||
168 | for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do | ||
169 | gen_xchg "${xchg}" "" | ||
170 | printf "\n" | ||
171 | done | ||
172 | |||
173 | gen_xchg "cmpxchg_double" "2 * " | ||
174 | |||
175 | printf "\n\n" | ||
176 | |||
177 | gen_xchg "cmpxchg_double_local" "2 * " | ||
178 | |||
179 | cat <<EOF | ||
180 | |||
181 | #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ | ||
182 | EOF | ||
diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh new file mode 100755 index 000000000000..c240a7231b2e --- /dev/null +++ b/scripts/atomic/gen-atomic-long.sh | |||
@@ -0,0 +1,101 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | ATOMICDIR=$(dirname $0) | ||
5 | |||
6 | . ${ATOMICDIR}/atomic-tbl.sh | ||
7 | |||
8 | #gen_cast(arg, int, atomic) | ||
9 | gen_cast() | ||
10 | { | ||
11 | local arg="$1"; shift | ||
12 | local int="$1"; shift | ||
13 | local atomic="$1"; shift | ||
14 | |||
15 | [ "${arg%%:*}" = "p" ] || return | ||
16 | |||
17 | printf "($(gen_param_type "${arg}" "${int}" "${atomic}"))" | ||
18 | } | ||
19 | |||
20 | #gen_args_cast(int, atomic, arg...) | ||
21 | gen_args_cast() | ||
22 | { | ||
23 | local int="$1"; shift | ||
24 | local atomic="$1"; shift | ||
25 | |||
26 | while [ "$#" -gt 0 ]; do | ||
27 | local cast="$(gen_cast "$1" "${int}" "${atomic}")" | ||
28 | local arg="$(gen_param_name "$1")" | ||
29 | printf "${cast}${arg}" | ||
30 | [ "$#" -gt 1 ] && printf ", " | ||
31 | shift; | ||
32 | done | ||
33 | } | ||
34 | |||
35 | #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) | ||
36 | gen_proto_order_variant() | ||
37 | { | ||
38 | local meta="$1"; shift | ||
39 | local name="$1$2$3$4"; shift; shift; shift; shift | ||
40 | local atomic="$1"; shift | ||
41 | local int="$1"; shift | ||
42 | |||
43 | local ret="$(gen_ret_type "${meta}" "long")" | ||
44 | local params="$(gen_params "long" "atomic_long" "$@")" | ||
45 | local argscast="$(gen_args_cast "${int}" "${atomic}" "$@")" | ||
46 | local retstmt="$(gen_ret_stmt "${meta}")" | ||
47 | |||
48 | cat <<EOF | ||
49 | static inline ${ret} | ||
50 | atomic_long_${name}(${params}) | ||
51 | { | ||
52 | ${retstmt}${atomic}_${name}(${argscast}); | ||
53 | } | ||
54 | |||
55 | EOF | ||
56 | } | ||
57 | |||
58 | cat << EOF | ||
59 | // SPDX-License-Identifier: GPL-2.0 | ||
60 | |||
61 | // Generated by $0 | ||
62 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
63 | |||
64 | #ifndef _ASM_GENERIC_ATOMIC_LONG_H | ||
65 | #define _ASM_GENERIC_ATOMIC_LONG_H | ||
66 | |||
67 | #include <asm/types.h> | ||
68 | |||
69 | #ifdef CONFIG_64BIT | ||
70 | typedef atomic64_t atomic_long_t; | ||
71 | #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) | ||
72 | #define atomic_long_cond_read_acquire atomic64_cond_read_acquire | ||
73 | #define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed | ||
74 | #else | ||
75 | typedef atomic_t atomic_long_t; | ||
76 | #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) | ||
77 | #define atomic_long_cond_read_acquire atomic_cond_read_acquire | ||
78 | #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed | ||
79 | #endif | ||
80 | |||
81 | #ifdef CONFIG_64BIT | ||
82 | |||
83 | EOF | ||
84 | |||
85 | grep '^[a-z]' "$1" | while read name meta args; do | ||
86 | gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} | ||
87 | done | ||
88 | |||
89 | cat <<EOF | ||
90 | #else /* CONFIG_64BIT */ | ||
91 | |||
92 | EOF | ||
93 | |||
94 | grep '^[a-z]' "$1" | while read name meta args; do | ||
95 | gen_proto "${meta}" "${name}" "atomic" "int" ${args} | ||
96 | done | ||
97 | |||
98 | cat <<EOF | ||
99 | #endif /* CONFIG_64BIT */ | ||
100 | #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ | ||
101 | EOF | ||