aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@linaro.org>2015-03-22 10:51:51 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2015-03-24 14:02:55 -0400
commitf3eab7184ddcd4867cf42e3274ba24a66e1e093d (patch)
treee501fee107f0d97b77cde072d14b0eeb80cd85af /arch/arm64/include
parente53f21bce4d35a93b23d8fa1a840860f6c74f59e (diff)
arm64: percpu: Make this_cpu accessors pre-empt safe
this_cpu operations were implemented for arm64 in: 5284e1b arm64: xchg: Implement cmpxchg_double f97fc81 arm64: percpu: Implement this_cpu operations Unfortunately, it is possible for pre-emption to take place between address generation and data access. This can lead to cases where data is being manipulated by this_cpu for a different CPU than it was called on. Which effectively breaks the spec. This patch disables pre-emption for the this_cpu operations guaranteeing that address generation and data manipulation take place without a pre-emption in-between. Fixes: 5284e1b4bc8a ("arm64: xchg: Implement cmpxchg_double") Fixes: f97fc810798c ("arm64: percpu: Implement this_cpu operations") Reported-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Steve Capper <steve.capper@linaro.org> [catalin.marinas@arm.com: remove space after type cast] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/percpu.h44
2 files changed, 57 insertions, 19 deletions
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb9593079f29..d8c25b7b18fb 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
246 __ret; \ 246 __ret; \
247}) 247})
248 248
249#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 249#define _protect_cmpxchg_local(pcp, o, n) \
250#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 250({ \
251#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 251 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
252#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 252 preempt_disable(); \
253 253 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
254#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 254 preempt_enable(); \
255 cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ 255 __ret; \
256 o1, o2, n1, n2) 256})
257
258#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
259#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
260#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
261#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
262
263#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
264({ \
265 int __ret; \
266 preempt_disable(); \
267 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
268 raw_cpu_ptr(&(ptr2)), \
269 o1, o2, n1, n2); \
270 preempt_enable(); \
271 __ret; \
272})
257 273
258#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) 274#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
259#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) 275#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25bc596f..4fde8c1df97f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
204 return ret; 204 return ret;
205} 205}
206 206
207#define _percpu_read(pcp) \
208({ \
209 typeof(pcp) __retval; \
210 preempt_disable(); \
211 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
212 sizeof(pcp)); \
213 preempt_enable(); \
214 __retval; \
215})
216
217#define _percpu_write(pcp, val) \
218do { \
219 preempt_disable(); \
220 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
221 sizeof(pcp)); \
222 preempt_enable(); \
223} while(0) \
224
225#define _pcp_protect(operation, pcp, val) \
226({ \
227 typeof(pcp) __retval; \
228 preempt_disable(); \
229 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
230 (val), sizeof(pcp)); \
231 preempt_enable(); \
232 __retval; \
233})
234
207#define _percpu_add(pcp, val) \ 235#define _percpu_add(pcp, val) \
208 __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 236 _pcp_protect(__percpu_add, pcp, val)
209 237
210#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) 238#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
211 239
212#define _percpu_and(pcp, val) \ 240#define _percpu_and(pcp, val) \
213 __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 241 _pcp_protect(__percpu_and, pcp, val)
214 242
215#define _percpu_or(pcp, val) \ 243#define _percpu_or(pcp, val) \
216 __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 244 _pcp_protect(__percpu_or, pcp, val)
217
218#define _percpu_read(pcp) (typeof(pcp)) \
219 (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
220
221#define _percpu_write(pcp, val) \
222 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
223 245
224#define _percpu_xchg(pcp, val) (typeof(pcp)) \ 246#define _percpu_xchg(pcp, val) (typeof(pcp)) \
225 (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) 247 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
226 248
227#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) 249#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
228#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) 250#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)