summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/include/asm/atomic.h42
-rw-r--r--arch/arc/include/asm/atomic.h8
-rw-r--r--arch/arm/include/asm/atomic.h51
-rw-r--r--arch/arm/include/asm/barrier.h4
-rw-r--r--arch/arm/include/asm/cmpxchg.h47
-rw-r--r--arch/arm/include/asm/jump_label.h25
-rw-r--r--arch/arm/kernel/jump_label.c2
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/include/asm/barrier.h4
-rw-r--r--arch/arm64/include/asm/jump_label.h18
-rw-r--r--arch/arm64/kernel/jump_label.c2
-rw-r--r--arch/avr32/include/asm/atomic.h12
-rw-r--r--arch/blackfin/include/asm/atomic.h16
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c7
-rw-r--r--arch/blackfin/mach-bf561/atomic.S30
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/frv/include/asm/atomic.h107
-rw-r--r--arch/frv/include/asm/atomic_defs.h172
-rw-r--r--arch/frv/include/asm/bitops.h99
-rw-r--r--arch/frv/kernel/dma.c6
-rw-r--r--arch/frv/kernel/frv_ksyms.c5
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/atomic-lib.c7
-rw-r--r--arch/frv/lib/atomic-ops.S110
-rw-r--r--arch/frv/lib/atomic64-ops.S94
-rw-r--r--arch/h8300/include/asm/atomic.h137
-rw-r--r--arch/hexagon/include/asm/atomic.h4
-rw-r--r--arch/ia64/include/asm/atomic.h24
-rw-r--r--arch/ia64/include/asm/barrier.h4
-rw-r--r--arch/m32r/include/asm/atomic.h45
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/m68k/include/asm/atomic.h14
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h38
-rw-r--r--arch/metag/include/asm/atomic_lock1.h23
-rw-r--r--arch/metag/include/asm/barrier.h4
-rw-r--r--arch/mips/include/asm/atomic.h7
-rw-r--r--arch/mips/include/asm/barrier.h4
-rw-r--r--arch/mips/include/asm/jump_label.h19
-rw-r--r--arch/mips/kernel/jump_label.c2
-rw-r--r--arch/mn10300/include/asm/atomic.h71
-rw-r--r--arch/mn10300/mm/tlb-smp.c2
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/atomic.h7
-rw-r--r--arch/powerpc/include/asm/atomic.h7
-rw-r--r--arch/powerpc/include/asm/barrier.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h19
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S19
-rw-r--r--arch/s390/include/asm/atomic.h41
-rw-r--r--arch/s390/include/asm/barrier.h4
-rw-r--r--arch/s390/include/asm/jump_label.h19
-rw-r--r--arch/s390/kernel/jump_label.c2
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kvm/interrupt.c30
-rw-r--r--arch/s390/kvm/kvm-s390.c32
-rw-r--r--arch/s390/lib/uaccess.c12
-rw-r--r--arch/sh/include/asm/atomic-grb.h43
-rw-r--r--arch/sh/include/asm/atomic-irq.h21
-rw-r--r--arch/sh/include/asm/atomic-llsc.h31
-rw-r--r--arch/sparc/include/asm/atomic_32.h4
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/barrier_64.h4
-rw-r--r--arch/sparc/include/asm/jump_label.h35
-rw-r--r--arch/sparc/kernel/jump_label.c2
-rw-r--r--arch/sparc/lib/atomic32.c22
-rw-r--r--arch/sparc/lib/atomic_64.S6
-rw-r--r--arch/sparc/lib/ksyms.c3
-rw-r--r--arch/tile/include/asm/atomic_32.h28
-rw-r--r--arch/tile/include/asm/atomic_64.h40
-rw-r--r--arch/tile/lib/atomic_32.c23
-rw-r--r--arch/tile/lib/atomic_asm_32.S4
-rw-r--r--arch/x86/include/asm/atomic.h25
-rw-r--r--arch/x86/include/asm/atomic64_32.h14
-rw-r--r--arch/x86/include/asm/atomic64_64.h15
-rw-r--r--arch/x86/include/asm/barrier.h8
-rw-r--r--arch/x86/include/asm/jump_label.h23
-rw-r--r--arch/x86/include/asm/qrwlock.h10
-rw-r--r--arch/x86/kernel/jump_label.c2
-rw-r--r--arch/x86/kernel/tsc.c22
-rw-r--r--arch/xtensa/configs/iss_defconfig1
-rw-r--r--arch/xtensa/include/asm/atomic.h73
83 files changed, 899 insertions, 1062 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 8a8ea7110de8..a71cdbe2a04d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -71,6 +71,12 @@ config JUMP_LABEL
71 ( On 32-bit x86, the necessary options added to the compiler 71 ( On 32-bit x86, the necessary options added to the compiler
72 flags may increase the size of the kernel slightly. ) 72 flags may increase the size of the kernel slightly. )
73 73
74config STATIC_KEYS_SELFTEST
75 bool "Static key selftest"
76 depends on JUMP_LABEL
77 help
78 Boot time self-test of the branch patching code.
79
74config OPTPROBES 80config OPTPROBES
75 def_bool y 81 def_bool y
76 depends on KPROBES && HAVE_OPTPROBES 82 depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 8f8eafbedd7c..e8c956098424 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -29,13 +29,13 @@
29 * branch back to restart the operation. 29 * branch back to restart the operation.
30 */ 30 */
31 31
32#define ATOMIC_OP(op) \ 32#define ATOMIC_OP(op, asm_op) \
33static __inline__ void atomic_##op(int i, atomic_t * v) \ 33static __inline__ void atomic_##op(int i, atomic_t * v) \
34{ \ 34{ \
35 unsigned long temp; \ 35 unsigned long temp; \
36 __asm__ __volatile__( \ 36 __asm__ __volatile__( \
37 "1: ldl_l %0,%1\n" \ 37 "1: ldl_l %0,%1\n" \
38 " " #op "l %0,%2,%0\n" \ 38 " " #asm_op " %0,%2,%0\n" \
39 " stl_c %0,%1\n" \ 39 " stl_c %0,%1\n" \
40 " beq %0,2f\n" \ 40 " beq %0,2f\n" \
41 ".subsection 2\n" \ 41 ".subsection 2\n" \
@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
45 :"Ir" (i), "m" (v->counter)); \ 45 :"Ir" (i), "m" (v->counter)); \
46} \ 46} \
47 47
48#define ATOMIC_OP_RETURN(op) \ 48#define ATOMIC_OP_RETURN(op, asm_op) \
49static inline int atomic_##op##_return(int i, atomic_t *v) \ 49static inline int atomic_##op##_return(int i, atomic_t *v) \
50{ \ 50{ \
51 long temp, result; \ 51 long temp, result; \
52 smp_mb(); \ 52 smp_mb(); \
53 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
54 "1: ldl_l %0,%1\n" \ 54 "1: ldl_l %0,%1\n" \
55 " " #op "l %0,%3,%2\n" \ 55 " " #asm_op " %0,%3,%2\n" \
56 " " #op "l %0,%3,%0\n" \ 56 " " #asm_op " %0,%3,%0\n" \
57 " stl_c %0,%1\n" \ 57 " stl_c %0,%1\n" \
58 " beq %0,2f\n" \ 58 " beq %0,2f\n" \
59 ".subsection 2\n" \ 59 ".subsection 2\n" \
@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
65 return result; \ 65 return result; \
66} 66}
67 67
68#define ATOMIC64_OP(op) \ 68#define ATOMIC64_OP(op, asm_op) \
69static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 69static __inline__ void atomic64_##op(long i, atomic64_t * v) \
70{ \ 70{ \
71 unsigned long temp; \ 71 unsigned long temp; \
72 __asm__ __volatile__( \ 72 __asm__ __volatile__( \
73 "1: ldq_l %0,%1\n" \ 73 "1: ldq_l %0,%1\n" \
74 " " #op "q %0,%2,%0\n" \ 74 " " #asm_op " %0,%2,%0\n" \
75 " stq_c %0,%1\n" \ 75 " stq_c %0,%1\n" \
76 " beq %0,2f\n" \ 76 " beq %0,2f\n" \
77 ".subsection 2\n" \ 77 ".subsection 2\n" \
@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
81 :"Ir" (i), "m" (v->counter)); \ 81 :"Ir" (i), "m" (v->counter)); \
82} \ 82} \
83 83
84#define ATOMIC64_OP_RETURN(op) \ 84#define ATOMIC64_OP_RETURN(op, asm_op) \
85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
86{ \ 86{ \
87 long temp, result; \ 87 long temp, result; \
88 smp_mb(); \ 88 smp_mb(); \
89 __asm__ __volatile__( \ 89 __asm__ __volatile__( \
90 "1: ldq_l %0,%1\n" \ 90 "1: ldq_l %0,%1\n" \
91 " " #op "q %0,%3,%2\n" \ 91 " " #asm_op " %0,%3,%2\n" \
92 " " #op "q %0,%3,%0\n" \ 92 " " #asm_op " %0,%3,%0\n" \
93 " stq_c %0,%1\n" \ 93 " stq_c %0,%1\n" \
94 " beq %0,2f\n" \ 94 " beq %0,2f\n" \
95 ".subsection 2\n" \ 95 ".subsection 2\n" \
@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
101 return result; \ 101 return result; \
102} 102}
103 103
104#define ATOMIC_OPS(opg) \ 104#define ATOMIC_OPS(op) \
105 ATOMIC_OP(opg) \ 105 ATOMIC_OP(op, op##l) \
106 ATOMIC_OP_RETURN(opg) \ 106 ATOMIC_OP_RETURN(op, op##l) \
107 ATOMIC64_OP(opg) \ 107 ATOMIC64_OP(op, op##q) \
108 ATOMIC64_OP_RETURN(opg) 108 ATOMIC64_OP_RETURN(op, op##q)
109 109
110ATOMIC_OPS(add) 110ATOMIC_OPS(add)
111ATOMIC_OPS(sub) 111ATOMIC_OPS(sub)
112 112
113#define atomic_andnot atomic_andnot
114#define atomic64_andnot atomic64_andnot
115
116ATOMIC_OP(and, and)
117ATOMIC_OP(andnot, bic)
118ATOMIC_OP(or, bis)
119ATOMIC_OP(xor, xor)
120ATOMIC64_OP(and, and)
121ATOMIC64_OP(andnot, bic)
122ATOMIC64_OP(or, bis)
123ATOMIC64_OP(xor, xor)
124
113#undef ATOMIC_OPS 125#undef ATOMIC_OPS
114#undef ATOMIC64_OP_RETURN 126#undef ATOMIC64_OP_RETURN
115#undef ATOMIC64_OP 127#undef ATOMIC64_OP
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 87d18ae53115..c3ecda023e3a 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -172,9 +172,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
172 172
173ATOMIC_OPS(add, +=, add) 173ATOMIC_OPS(add, +=, add)
174ATOMIC_OPS(sub, -=, sub) 174ATOMIC_OPS(sub, -=, sub)
175ATOMIC_OP(and, &=, and)
176 175
177#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) 176#define atomic_andnot atomic_andnot
177
178ATOMIC_OP(and, &=, and)
179ATOMIC_OP(andnot, &= ~, bic)
180ATOMIC_OP(or, |=, or)
181ATOMIC_OP(xor, ^=, xor)
178 182
179#undef ATOMIC_OPS 183#undef ATOMIC_OPS
180#undef ATOMIC_OP_RETURN 184#undef ATOMIC_OP_RETURN
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c11970b7b..fe3ef397f5a4 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
57} \ 57} \
58 58
59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60static inline int atomic_##op##_return(int i, atomic_t *v) \ 60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
61{ \ 61{ \
62 unsigned long tmp; \ 62 unsigned long tmp; \
63 int result; \ 63 int result; \
64 \ 64 \
65 smp_mb(); \
66 prefetchw(&v->counter); \ 65 prefetchw(&v->counter); \
67 \ 66 \
68 __asm__ __volatile__("@ atomic_" #op "_return\n" \ 67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
75 : "r" (&v->counter), "Ir" (i) \ 74 : "r" (&v->counter), "Ir" (i) \
76 : "cc"); \ 75 : "cc"); \
77 \ 76 \
78 smp_mb(); \
79 \
80 return result; \ 77 return result; \
81} 78}
82 79
83static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 80#define atomic_add_return_relaxed atomic_add_return_relaxed
81#define atomic_sub_return_relaxed atomic_sub_return_relaxed
82
83static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
84{ 84{
85 int oldval; 85 int oldval;
86 unsigned long res; 86 unsigned long res;
87 87
88 smp_mb();
89 prefetchw(&ptr->counter); 88 prefetchw(&ptr->counter);
90 89
91 do { 90 do {
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
99 : "cc"); 98 : "cc");
100 } while (res); 99 } while (res);
101 100
102 smp_mb();
103
104 return oldval; 101 return oldval;
105} 102}
103#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
106 104
107static inline int __atomic_add_unless(atomic_t *v, int a, int u) 105static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108{ 106{
@@ -194,6 +192,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
194ATOMIC_OPS(add, +=, add) 192ATOMIC_OPS(add, +=, add)
195ATOMIC_OPS(sub, -=, sub) 193ATOMIC_OPS(sub, -=, sub)
196 194
195#define atomic_andnot atomic_andnot
196
197ATOMIC_OP(and, &=, and)
198ATOMIC_OP(andnot, &= ~, bic)
199ATOMIC_OP(or, |=, orr)
200ATOMIC_OP(xor, ^=, eor)
201
197#undef ATOMIC_OPS 202#undef ATOMIC_OPS
198#undef ATOMIC_OP_RETURN 203#undef ATOMIC_OP_RETURN
199#undef ATOMIC_OP 204#undef ATOMIC_OP
@@ -290,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
290} \ 295} \
291 296
292#define ATOMIC64_OP_RETURN(op, op1, op2) \ 297#define ATOMIC64_OP_RETURN(op, op1, op2) \
293static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ 298static inline long long \
299atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
294{ \ 300{ \
295 long long result; \ 301 long long result; \
296 unsigned long tmp; \ 302 unsigned long tmp; \
297 \ 303 \
298 smp_mb(); \
299 prefetchw(&v->counter); \ 304 prefetchw(&v->counter); \
300 \ 305 \
301 __asm__ __volatile__("@ atomic64_" #op "_return\n" \ 306 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
@@ -309,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
309 : "r" (&v->counter), "r" (i) \ 314 : "r" (&v->counter), "r" (i) \
310 : "cc"); \ 315 : "cc"); \
311 \ 316 \
312 smp_mb(); \
313 \
314 return result; \ 317 return result; \
315} 318}
316 319
@@ -321,17 +324,26 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
321ATOMIC64_OPS(add, adds, adc) 324ATOMIC64_OPS(add, adds, adc)
322ATOMIC64_OPS(sub, subs, sbc) 325ATOMIC64_OPS(sub, subs, sbc)
323 326
327#define atomic64_add_return_relaxed atomic64_add_return_relaxed
328#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
329
330#define atomic64_andnot atomic64_andnot
331
332ATOMIC64_OP(and, and, and)
333ATOMIC64_OP(andnot, bic, bic)
334ATOMIC64_OP(or, orr, orr)
335ATOMIC64_OP(xor, eor, eor)
336
324#undef ATOMIC64_OPS 337#undef ATOMIC64_OPS
325#undef ATOMIC64_OP_RETURN 338#undef ATOMIC64_OP_RETURN
326#undef ATOMIC64_OP 339#undef ATOMIC64_OP
327 340
328static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, 341static inline long long
329 long long new) 342atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
330{ 343{
331 long long oldval; 344 long long oldval;
332 unsigned long res; 345 unsigned long res;
333 346
334 smp_mb();
335 prefetchw(&ptr->counter); 347 prefetchw(&ptr->counter);
336 348
337 do { 349 do {
@@ -346,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
346 : "cc"); 358 : "cc");
347 } while (res); 359 } while (res);
348 360
349 smp_mb();
350
351 return oldval; 361 return oldval;
352} 362}
363#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
353 364
354static inline long long atomic64_xchg(atomic64_t *ptr, long long new) 365static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
355{ 366{
356 long long result; 367 long long result;
357 unsigned long tmp; 368 unsigned long tmp;
358 369
359 smp_mb();
360 prefetchw(&ptr->counter); 370 prefetchw(&ptr->counter);
361 371
362 __asm__ __volatile__("@ atomic64_xchg\n" 372 __asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
368 : "r" (&ptr->counter), "r" (new) 378 : "r" (&ptr->counter), "r" (new)
369 : "cc"); 379 : "cc");
370 380
371 smp_mb();
372
373 return result; 381 return result;
374} 382}
383#define atomic64_xchg_relaxed atomic64_xchg_relaxed
375 384
376static inline long long atomic64_dec_if_positive(atomic64_t *v) 385static inline long long atomic64_dec_if_positive(atomic64_t *v)
377{ 386{
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e1c732..70393574e0fa 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -67,12 +67,12 @@
67do { \ 67do { \
68 compiletime_assert_atomic_type(*p); \ 68 compiletime_assert_atomic_type(*p); \
69 smp_mb(); \ 69 smp_mb(); \
70 ACCESS_ONCE(*p) = (v); \ 70 WRITE_ONCE(*p, v); \
71} while (0) 71} while (0)
72 72
73#define smp_load_acquire(p) \ 73#define smp_load_acquire(p) \
74({ \ 74({ \
75 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 75 typeof(*p) ___p1 = READ_ONCE(*p); \
76 compiletime_assert_atomic_type(*p); \ 76 compiletime_assert_atomic_type(*p); \
77 smp_mb(); \ 77 smp_mb(); \
78 ___p1; \ 78 ___p1; \
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05d3207..916a2744d5c6 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
35 unsigned int tmp; 35 unsigned int tmp;
36#endif 36#endif
37 37
38 smp_mb();
39 prefetchw((const void *)ptr); 38 prefetchw((const void *)ptr);
40 39
41 switch (size) { 40 switch (size) {
@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
98 __bad_xchg(ptr, size), ret = 0; 97 __bad_xchg(ptr, size), ret = 0;
99 break; 98 break;
100 } 99 }
101 smp_mb();
102 100
103 return ret; 101 return ret;
104} 102}
105 103
106#define xchg(ptr, x) ({ \ 104#define xchg_relaxed(ptr, x) ({ \
107 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ 105 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
108 sizeof(*(ptr))); \ 106 sizeof(*(ptr))); \
109}) 107})
@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
117#error "SMP is not supported on this platform" 115#error "SMP is not supported on this platform"
118#endif 116#endif
119 117
118#define xchg xchg_relaxed
119
120/* 120/*
121 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 121 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
122 * them available. 122 * them available.
@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
194 return oldval; 194 return oldval;
195} 195}
196 196
197static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 197#define cmpxchg_relaxed(ptr,o,n) ({ \
198 unsigned long new, int size) 198 (__typeof__(*(ptr)))__cmpxchg((ptr), \
199{ 199 (unsigned long)(o), \
200 unsigned long ret; 200 (unsigned long)(n), \
201 201 sizeof(*(ptr))); \
202 smp_mb();
203 ret = __cmpxchg(ptr, old, new, size);
204 smp_mb();
205
206 return ret;
207}
208
209#define cmpxchg(ptr,o,n) ({ \
210 (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
211 (unsigned long)(o), \
212 (unsigned long)(n), \
213 sizeof(*(ptr))); \
214}) 202})
215 203
216static inline unsigned long __cmpxchg_local(volatile void *ptr, 204static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
273 261
274#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) 262#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
275 263
276static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
277 unsigned long long old,
278 unsigned long long new)
279{
280 unsigned long long ret;
281
282 smp_mb();
283 ret = __cmpxchg64(ptr, old, new);
284 smp_mb();
285
286 return ret;
287}
288
289#define cmpxchg64(ptr, o, n) ({ \
290 (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
291 (unsigned long long)(o), \
292 (unsigned long long)(n)); \
293})
294
295#endif /* __LINUX_ARM_ARCH__ >= 6 */ 264#endif /* __LINUX_ARM_ARCH__ >= 6 */
296 265
297#endif /* __ASM_ARM_CMPXCHG_H */ 266#endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5f337dc5c108..34f7b6980d21 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,23 +4,32 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/unified.h>
7 8
8#define JUMP_LABEL_NOP_SIZE 4 9#define JUMP_LABEL_NOP_SIZE 4
9 10
10#ifdef CONFIG_THUMB2_KERNEL 11static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
11#define JUMP_LABEL_NOP "nop.w" 12{
12#else 13 asm_volatile_goto("1:\n\t"
13#define JUMP_LABEL_NOP "nop" 14 WASM(nop) "\n\t"
14#endif 15 ".pushsection __jump_table, \"aw\"\n\t"
16 ".word 1b, %l[l_yes], %c0\n\t"
17 ".popsection\n\t"
18 : : "i" (&((char *)key)[branch]) : : l_yes);
19
20 return false;
21l_yes:
22 return true;
23}
15 24
16static __always_inline bool arch_static_branch(struct static_key *key) 25static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
17{ 26{
18 asm_volatile_goto("1:\n\t" 27 asm_volatile_goto("1:\n\t"
19 JUMP_LABEL_NOP "\n\t" 28 WASM(b) " %l[l_yes]\n\t"
20 ".pushsection __jump_table, \"aw\"\n\t" 29 ".pushsection __jump_table, \"aw\"\n\t"
21 ".word 1b, %l[l_yes], %c0\n\t" 30 ".word 1b, %l[l_yes], %c0\n\t"
22 ".popsection\n\t" 31 ".popsection\n\t"
23 : : "i" (key) : : l_yes); 32 : : "i" (&((char *)key)[branch]) : : l_yes);
24 33
25 return false; 34 return false;
26l_yes: 35l_yes:
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index e39cbf488cfe..845a5dd9c42b 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -12,7 +12,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
12 void *addr = (void *)entry->code; 12 void *addr = (void *)entry->code;
13 unsigned int insn; 13 unsigned int insn;
14 14
15 if (type == JUMP_LABEL_ENABLE) 15 if (type == JUMP_LABEL_JMP)
16 insn = arm_gen_branch(entry->code, entry->target); 16 insn = arm_gen_branch(entry->code, entry->target);
17 else 17 else
18 insn = arm_gen_nop(); 18 insn = arm_gen_nop();
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7047051ded40..866a71fca9a3 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
85ATOMIC_OPS(add, add) 85ATOMIC_OPS(add, add)
86ATOMIC_OPS(sub, sub) 86ATOMIC_OPS(sub, sub)
87 87
88#define atomic_andnot atomic_andnot
89
90ATOMIC_OP(and, and)
91ATOMIC_OP(andnot, bic)
92ATOMIC_OP(or, orr)
93ATOMIC_OP(xor, eor)
94
88#undef ATOMIC_OPS 95#undef ATOMIC_OPS
89#undef ATOMIC_OP_RETURN 96#undef ATOMIC_OP_RETURN
90#undef ATOMIC_OP 97#undef ATOMIC_OP
@@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \
183ATOMIC64_OPS(add, add) 190ATOMIC64_OPS(add, add)
184ATOMIC64_OPS(sub, sub) 191ATOMIC64_OPS(sub, sub)
185 192
193#define atomic64_andnot atomic64_andnot
194
195ATOMIC64_OP(and, and)
196ATOMIC64_OP(andnot, bic)
197ATOMIC64_OP(or, orr)
198ATOMIC64_OP(xor, eor)
199
186#undef ATOMIC64_OPS 200#undef ATOMIC64_OPS
187#undef ATOMIC64_OP_RETURN 201#undef ATOMIC64_OP_RETURN
188#undef ATOMIC64_OP 202#undef ATOMIC64_OP
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fa47c4275cb..ef93b20bc964 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,12 +44,12 @@
44do { \ 44do { \
45 compiletime_assert_atomic_type(*p); \ 45 compiletime_assert_atomic_type(*p); \
46 barrier(); \ 46 barrier(); \
47 ACCESS_ONCE(*p) = (v); \ 47 WRITE_ONCE(*p, v); \
48} while (0) 48} while (0)
49 49
50#define smp_load_acquire(p) \ 50#define smp_load_acquire(p) \
51({ \ 51({ \
52 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 52 typeof(*p) ___p1 = READ_ONCE(*p); \
53 compiletime_assert_atomic_type(*p); \ 53 compiletime_assert_atomic_type(*p); \
54 barrier(); \ 54 barrier(); \
55 ___p1; \ 55 ___p1; \
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index c0e5165c2f76..1b5e0e843c3a 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,14 +26,28 @@
26 26
27#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE 27#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
28 28
29static __always_inline bool arch_static_branch(struct static_key *key) 29static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
30{ 30{
31 asm goto("1: nop\n\t" 31 asm goto("1: nop\n\t"
32 ".pushsection __jump_table, \"aw\"\n\t" 32 ".pushsection __jump_table, \"aw\"\n\t"
33 ".align 3\n\t" 33 ".align 3\n\t"
34 ".quad 1b, %l[l_yes], %c0\n\t" 34 ".quad 1b, %l[l_yes], %c0\n\t"
35 ".popsection\n\t" 35 ".popsection\n\t"
36 : : "i"(key) : : l_yes); 36 : : "i"(&((char *)key)[branch]) : : l_yes);
37
38 return false;
39l_yes:
40 return true;
41}
42
43static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
44{
45 asm goto("1: b %l[l_yes]\n\t"
46 ".pushsection __jump_table, \"aw\"\n\t"
47 ".align 3\n\t"
48 ".quad 1b, %l[l_yes], %c0\n\t"
49 ".popsection\n\t"
50 : : "i"(&((char *)key)[branch]) : : l_yes);
37 51
38 return false; 52 return false;
39l_yes: 53l_yes:
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 4f1fec7a46db..c2dd1ad3e648 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -28,7 +28,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
28 void *addr = (void *)entry->code; 28 void *addr = (void *)entry->code;
29 u32 insn; 29 u32 insn;
30 30
31 if (type == JUMP_LABEL_ENABLE) { 31 if (type == JUMP_LABEL_JMP) {
32 insn = aarch64_insn_gen_branch_imm(entry->code, 32 insn = aarch64_insn_gen_branch_imm(entry->code,
33 entry->target, 33 entry->target,
34 AARCH64_INSN_BRANCH_NOLINK); 34 AARCH64_INSN_BRANCH_NOLINK);
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 2d07ce1c5327..97c9bdf83409 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
44ATOMIC_OP_RETURN(sub, sub, rKs21) 44ATOMIC_OP_RETURN(sub, sub, rKs21)
45ATOMIC_OP_RETURN(add, add, r) 45ATOMIC_OP_RETURN(add, add, r)
46 46
47#define ATOMIC_OP(op, asm_op) \
48ATOMIC_OP_RETURN(op, asm_op, r) \
49static inline void atomic_##op(int i, atomic_t *v) \
50{ \
51 (void)__atomic_##op##_return(i, v); \
52}
53
54ATOMIC_OP(and, and)
55ATOMIC_OP(or, or)
56ATOMIC_OP(xor, eor)
57
58#undef ATOMIC_OP
47#undef ATOMIC_OP_RETURN 59#undef ATOMIC_OP_RETURN
48 60
49/* 61/*
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98e9978..1c1c42330c99 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,21 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
19asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 19asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
20asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); 20
21asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); 21asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
22asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
22asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); 23asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
23asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); 24asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
24 25
25#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 26#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
26 27
27#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) 28#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
28#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) 29#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
29 30
30#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) 31#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
31#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) 32#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
33#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
32 34
33#endif 35#endif
34 36
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591b961d..a401c27b69b4 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
83EXPORT_SYMBOL(insl_16); 83EXPORT_SYMBOL(insl_16);
84 84
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86EXPORT_SYMBOL(__raw_atomic_update_asm); 86EXPORT_SYMBOL(__raw_atomic_add_asm);
87EXPORT_SYMBOL(__raw_atomic_clear_asm); 87EXPORT_SYMBOL(__raw_atomic_and_asm);
88EXPORT_SYMBOL(__raw_atomic_set_asm); 88EXPORT_SYMBOL(__raw_atomic_or_asm);
89EXPORT_SYMBOL(__raw_atomic_xor_asm); 89EXPORT_SYMBOL(__raw_atomic_xor_asm);
90EXPORT_SYMBOL(__raw_atomic_test_asm); 90EXPORT_SYMBOL(__raw_atomic_test_asm);
91
91EXPORT_SYMBOL(__raw_xchg_1_asm); 92EXPORT_SYMBOL(__raw_xchg_1_asm);
92EXPORT_SYMBOL(__raw_xchg_2_asm); 93EXPORT_SYMBOL(__raw_xchg_2_asm);
93EXPORT_SYMBOL(__raw_xchg_4_asm); 94EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8e8c4c..26fccb5568b9 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
587 * r0 = ptr 587 * r0 = ptr
588 * r1 = value 588 * r1 = value
589 * 589 *
590 * Add a signed value to a 32bit word and return the new value atomically. 590 * ADD a signed value to a 32bit word and return the new value atomically.
591 * Clobbers: r3:0, p1:0 591 * Clobbers: r3:0, p1:0
592 */ 592 */
593ENTRY(___raw_atomic_update_asm) 593ENTRY(___raw_atomic_add_asm)
594 p1 = r0; 594 p1 = r0;
595 r3 = r1; 595 r3 = r1;
596 [--sp] = rets; 596 [--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
603 r0 = r3; 603 r0 = r3;
604 rets = [sp++]; 604 rets = [sp++];
605 rts; 605 rts;
606ENDPROC(___raw_atomic_update_asm) 606ENDPROC(___raw_atomic_add_asm)
607 607
608/* 608/*
609 * r0 = ptr 609 * r0 = ptr
610 * r1 = mask 610 * r1 = mask
611 * 611 *
612 * Clear the mask bits from a 32bit word and return the old 32bit value 612 * AND the mask bits from a 32bit word and return the old 32bit value
613 * atomically. 613 * atomically.
614 * Clobbers: r3:0, p1:0 614 * Clobbers: r3:0, p1:0
615 */ 615 */
616ENTRY(___raw_atomic_clear_asm) 616ENTRY(___raw_atomic_and_asm)
617 p1 = r0; 617 p1 = r0;
618 r3 = ~r1; 618 r3 = r1;
619 [--sp] = rets; 619 [--sp] = rets;
620 call _get_core_lock; 620 call _get_core_lock;
621 r2 = [p1]; 621 r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
627 r0 = r3; 627 r0 = r3;
628 rets = [sp++]; 628 rets = [sp++];
629 rts; 629 rts;
630ENDPROC(___raw_atomic_clear_asm) 630ENDPROC(___raw_atomic_and_asm)
631 631
632/* 632/*
633 * r0 = ptr 633 * r0 = ptr
634 * r1 = mask 634 * r1 = mask
635 * 635 *
636 * Set the mask bits into a 32bit word and return the old 32bit value 636 * OR the mask bits into a 32bit word and return the old 32bit value
637 * atomically. 637 * atomically.
638 * Clobbers: r3:0, p1:0 638 * Clobbers: r3:0, p1:0
639 */ 639 */
640ENTRY(___raw_atomic_set_asm) 640ENTRY(___raw_atomic_or_asm)
641 p1 = r0; 641 p1 = r0;
642 r3 = r1; 642 r3 = r1;
643 [--sp] = rets; 643 [--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
651 r0 = r3; 651 r0 = r3;
652 rets = [sp++]; 652 rets = [sp++];
653 rts; 653 rts;
654ENDPROC(___raw_atomic_set_asm) 654ENDPROC(___raw_atomic_or_asm)
655 655
656/* 656/*
657 * r0 = ptr 657 * r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
787 r2 = r1; 787 r2 = r1;
788 r1 = 1; 788 r1 = 1;
789 r1 <<= r2; 789 r1 <<= r2;
790 jump ___raw_atomic_set_asm 790 jump ___raw_atomic_or_asm
791ENDPROC(___raw_bit_set_asm) 791ENDPROC(___raw_bit_set_asm)
792 792
793/* 793/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
798 * Clobbers: r3:0, p1:0 798 * Clobbers: r3:0, p1:0
799 */ 799 */
800ENTRY(___raw_bit_clear_asm) 800ENTRY(___raw_bit_clear_asm)
801 r2 = r1; 801 r2 = 1;
802 r1 = 1; 802 r2 <<= r1;
803 r1 <<= r2; 803 r1 = ~r2;
804 jump ___raw_atomic_clear_asm 804 jump ___raw_atomic_and_asm
805ENDPROC(___raw_bit_clear_asm) 805ENDPROC(___raw_bit_clear_asm)
806 806
807/* 807/*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c7259597395..0030e21cfceb 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
195 local_irq_save(flags); 195 local_irq_save(flags);
196 for_each_cpu(cpu, cpumask) { 196 for_each_cpu(cpu, cpumask) {
197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
198 atomic_set_mask((1 << msg), &bfin_ipi_data->bits); 198 atomic_or((1 << msg), &bfin_ipi_data->bits);
199 atomic_inc(&bfin_ipi_data->count); 199 atomic_inc(&bfin_ipi_data->count);
200 } 200 }
201 local_irq_restore(flags); 201 local_irq_restore(flags);
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a61d65..0da689def4cc 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
15#define _ASM_ATOMIC_H 15#define _ASM_ATOMIC_H
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/spr-regs.h>
19#include <asm/cmpxchg.h> 18#include <asm/cmpxchg.h>
20#include <asm/barrier.h> 19#include <asm/barrier.h>
21 20
@@ -23,6 +22,8 @@
23#error not SMP safe 22#error not SMP safe
24#endif 23#endif
25 24
25#include <asm/atomic_defs.h>
26
26/* 27/*
27 * Atomic operations that C can't guarantee us. Useful for 28 * Atomic operations that C can't guarantee us. Useful for
28 * resource counting etc.. 29 * resource counting etc..
@@ -34,56 +35,26 @@
34#define atomic_read(v) ACCESS_ONCE((v)->counter) 35#define atomic_read(v) ACCESS_ONCE((v)->counter)
35#define atomic_set(v, i) (((v)->counter) = (i)) 36#define atomic_set(v, i) (((v)->counter) = (i))
36 37
37#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 38static inline int atomic_inc_return(atomic_t *v)
38static inline int atomic_add_return(int i, atomic_t *v)
39{ 39{
40 unsigned long val; 40 return __atomic_add_return(1, &v->counter);
41}
41 42
42 asm("0: \n" 43static inline int atomic_dec_return(atomic_t *v)
43 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 44{
44 " ckeq icc3,cc7 \n" 45 return __atomic_sub_return(1, &v->counter);
45 " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ 46}
46 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
47 " add%I2 %1,%2,%1 \n"
48 " cst.p %1,%M0 ,cc3,#1 \n"
49 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
50 " beq icc3,#0,0b \n"
51 : "+U"(v->counter), "=&r"(val)
52 : "NPr"(i)
53 : "memory", "cc7", "cc3", "icc3"
54 );
55 47
56 return val; 48static inline int atomic_add_return(int i, atomic_t *v)
49{
50 return __atomic_add_return(i, &v->counter);
57} 51}
58 52
59static inline int atomic_sub_return(int i, atomic_t *v) 53static inline int atomic_sub_return(int i, atomic_t *v)
60{ 54{
61 unsigned long val; 55 return __atomic_sub_return(i, &v->counter);
62
63 asm("0: \n"
64 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
65 " ckeq icc3,cc7 \n"
66 " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
67 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
68 " sub%I2 %1,%2,%1 \n"
69 " cst.p %1,%M0 ,cc3,#1 \n"
70 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
71 " beq icc3,#0,0b \n"
72 : "+U"(v->counter), "=&r"(val)
73 : "NPr"(i)
74 : "memory", "cc7", "cc3", "icc3"
75 );
76
77 return val;
78} 56}
79 57
80#else
81
82extern int atomic_add_return(int i, atomic_t *v);
83extern int atomic_sub_return(int i, atomic_t *v);
84
85#endif
86
87static inline int atomic_add_negative(int i, atomic_t *v) 58static inline int atomic_add_negative(int i, atomic_t *v)
88{ 59{
89 return atomic_add_return(i, v) < 0; 60 return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v)
101 72
102static inline void atomic_inc(atomic_t *v) 73static inline void atomic_inc(atomic_t *v)
103{ 74{
104 atomic_add_return(1, v); 75 atomic_inc_return(v);
105} 76}
106 77
107static inline void atomic_dec(atomic_t *v) 78static inline void atomic_dec(atomic_t *v)
108{ 79{
109 atomic_sub_return(1, v); 80 atomic_dec_return(v);
110} 81}
111 82
112#define atomic_dec_return(v) atomic_sub_return(1, (v))
113#define atomic_inc_return(v) atomic_add_return(1, (v))
114
115#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 83#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
116#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 84#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
117#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 85#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v)
120 * 64-bit atomic ops 88 * 64-bit atomic ops
121 */ 89 */
122typedef struct { 90typedef struct {
123 volatile long long counter; 91 long long counter;
124} atomic64_t; 92} atomic64_t;
125 93
126#define ATOMIC64_INIT(i) { (i) } 94#define ATOMIC64_INIT(i) { (i) }
127 95
128static inline long long atomic64_read(atomic64_t *v) 96static inline long long atomic64_read(const atomic64_t *v)
129{ 97{
130 long long counter; 98 long long counter;
131 99
132 asm("ldd%I1 %M1,%0" 100 asm("ldd%I1 %M1,%0"
133 : "=e"(counter) 101 : "=e"(counter)
134 : "m"(v->counter)); 102 : "m"(v->counter));
103
135 return counter; 104 return counter;
136} 105}
137 106
@@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i)
142 : "e"(i)); 111 : "e"(i));
143} 112}
144 113
145extern long long atomic64_inc_return(atomic64_t *v); 114static inline long long atomic64_inc_return(atomic64_t *v)
146extern long long atomic64_dec_return(atomic64_t *v); 115{
147extern long long atomic64_add_return(long long i, atomic64_t *v); 116 return __atomic64_add_return(1, &v->counter);
148extern long long atomic64_sub_return(long long i, atomic64_t *v); 117}
118
119static inline long long atomic64_dec_return(atomic64_t *v)
120{
121 return __atomic64_sub_return(1, &v->counter);
122}
123
124static inline long long atomic64_add_return(long long i, atomic64_t *v)
125{
126 return __atomic64_add_return(i, &v->counter);
127}
128
129static inline long long atomic64_sub_return(long long i, atomic64_t *v)
130{
131 return __atomic64_sub_return(i, &v->counter);
132}
149 133
150static inline long long atomic64_add_negative(long long i, atomic64_t *v) 134static inline long long atomic64_add_negative(long long i, atomic64_t *v)
151{ 135{
@@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v)
176#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 160#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
177#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 161#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
178 162
163
179#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 164#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
180#define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 165#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
181#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) 166#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
196 return c; 181 return c;
197} 182}
198 183
184#define ATOMIC_OP(op) \
185static inline void atomic_##op(int i, atomic_t *v) \
186{ \
187 (void)__atomic32_fetch_##op(i, &v->counter); \
188} \
189 \
190static inline void atomic64_##op(long long i, atomic64_t *v) \
191{ \
192 (void)__atomic64_fetch_##op(i, &v->counter); \
193}
194
195ATOMIC_OP(or)
196ATOMIC_OP(and)
197ATOMIC_OP(xor)
198
199#undef ATOMIC_OP
199 200
200#endif /* _ASM_ATOMIC_H */ 201#endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
new file mode 100644
index 000000000000..36e126d2f801
--- /dev/null
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -0,0 +1,172 @@
1
2#include <asm/spr-regs.h>
3
4#ifdef __ATOMIC_LIB__
5
6#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
7
8#define ATOMIC_QUALS
9#define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x)
10
11#else /* !OUTOFLINE && LIB */
12
13#define ATOMIC_OP_RETURN(op)
14#define ATOMIC_FETCH_OP(op)
15
16#endif /* OUTOFLINE */
17
18#else /* !__ATOMIC_LIB__ */
19
20#define ATOMIC_EXPORT(x)
21
22#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
23
24#define ATOMIC_OP_RETURN(op) \
25extern int __atomic_##op##_return(int i, int *v); \
26extern long long __atomic64_##op##_return(long long i, long long *v);
27
28#define ATOMIC_FETCH_OP(op) \
29extern int __atomic32_fetch_##op(int i, int *v); \
30extern long long __atomic64_fetch_##op(long long i, long long *v);
31
32#else /* !OUTOFLINE && !LIB */
33
34#define ATOMIC_QUALS static inline
35
36#endif /* OUTOFLINE */
37#endif /* __ATOMIC_LIB__ */
38
39
40/*
41 * Note on the 64 bit inline asm variants...
42 *
43 * CSTD is a conditional instruction and needs a constrained memory reference.
44 * Normally 'U' provides the correct constraints for conditional instructions
45 * and this is used for the 32 bit version, however 'U' does not appear to work
46 * for 64 bit values (gcc-4.9)
47 *
48 * The exact constraint is that conditional instructions cannot deal with an
49 * immediate displacement in the memory reference, so what we do is we read the
50 * address through a volatile cast into a local variable in order to insure we
51 * _have_ to compute the correct address without displacement. This allows us
52 * to use the regular 'm' for the memory address.
53 *
54 * Furthermore, the %Ln operand, which prints the low word register (r+1),
55 * really only works for registers, this means we cannot allow immediate values
56 * for the 64 bit versions -- like we do for the 32 bit ones.
57 *
58 */
59
60#ifndef ATOMIC_OP_RETURN
61#define ATOMIC_OP_RETURN(op) \
62ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \
63{ \
64 int val; \
65 \
66 asm volatile( \
67 "0: \n" \
68 " orcc gr0,gr0,gr0,icc3 \n" \
69 " ckeq icc3,cc7 \n" \
70 " ld.p %M0,%1 \n" \
71 " orcr cc7,cc7,cc3 \n" \
72 " "#op"%I2 %1,%2,%1 \n" \
73 " cst.p %1,%M0 ,cc3,#1 \n" \
74 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
75 " beq icc3,#0,0b \n" \
76 : "+U"(*v), "=&r"(val) \
77 : "NPr"(i) \
78 : "memory", "cc7", "cc3", "icc3" \
79 ); \
80 \
81 return val; \
82} \
83ATOMIC_EXPORT(__atomic_##op##_return); \
84 \
85ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \
86{ \
87 long long *__v = READ_ONCE(v); \
88 long long val; \
89 \
90 asm volatile( \
91 "0: \n" \
92 " orcc gr0,gr0,gr0,icc3 \n" \
93 " ckeq icc3,cc7 \n" \
94 " ldd.p %M0,%1 \n" \
95 " orcr cc7,cc7,cc3 \n" \
96 " "#op"cc %L1,%L2,%L1,icc0 \n" \
97 " "#op"x %1,%2,%1,icc0 \n" \
98 " cstd.p %1,%M0 ,cc3,#1 \n" \
99 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
100 " beq icc3,#0,0b \n" \
101 : "+m"(*__v), "=&e"(val) \
102 : "e"(i) \
103 : "memory", "cc7", "cc3", "icc0", "icc3" \
104 ); \
105 \
106 return val; \
107} \
108ATOMIC_EXPORT(__atomic64_##op##_return);
109#endif
110
111#ifndef ATOMIC_FETCH_OP
112#define ATOMIC_FETCH_OP(op) \
113ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \
114{ \
115 int old, tmp; \
116 \
117 asm volatile( \
118 "0: \n" \
119 " orcc gr0,gr0,gr0,icc3 \n" \
120 " ckeq icc3,cc7 \n" \
121 " ld.p %M0,%1 \n" \
122 " orcr cc7,cc7,cc3 \n" \
123 " "#op"%I3 %1,%3,%2 \n" \
124 " cst.p %2,%M0 ,cc3,#1 \n" \
125 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
126 " beq icc3,#0,0b \n" \
127 : "+U"(*v), "=&r"(old), "=r"(tmp) \
128 : "NPr"(i) \
129 : "memory", "cc7", "cc3", "icc3" \
130 ); \
131 \
132 return old; \
133} \
134ATOMIC_EXPORT(__atomic32_fetch_##op); \
135 \
136ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \
137{ \
138 long long *__v = READ_ONCE(v); \
139 long long old, tmp; \
140 \
141 asm volatile( \
142 "0: \n" \
143 " orcc gr0,gr0,gr0,icc3 \n" \
144 " ckeq icc3,cc7 \n" \
145 " ldd.p %M0,%1 \n" \
146 " orcr cc7,cc7,cc3 \n" \
147 " "#op" %L1,%L3,%L2 \n" \
148 " "#op" %1,%3,%2 \n" \
149 " cstd.p %2,%M0 ,cc3,#1 \n" \
150 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
151 " beq icc3,#0,0b \n" \
152 : "+m"(*__v), "=&e"(old), "=e"(tmp) \
153 : "e"(i) \
154 : "memory", "cc7", "cc3", "icc3" \
155 ); \
156 \
157 return old; \
158} \
159ATOMIC_EXPORT(__atomic64_fetch_##op);
160#endif
161
162ATOMIC_FETCH_OP(or)
163ATOMIC_FETCH_OP(and)
164ATOMIC_FETCH_OP(xor)
165
166ATOMIC_OP_RETURN(add)
167ATOMIC_OP_RETURN(sub)
168
169#undef ATOMIC_FETCH_OP
170#undef ATOMIC_OP_RETURN
171#undef ATOMIC_QUALS
172#undef ATOMIC_EXPORT
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 96de220ef131..0df8e95e3715 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,109 +25,30 @@
25 25
26#include <asm-generic/bitops/ffz.h> 26#include <asm-generic/bitops/ffz.h>
27 27
28#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 28#include <asm/atomic.h>
29static inline
30unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
31{
32 unsigned long old, tmp;
33
34 asm volatile(
35 "0: \n"
36 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
37 " ckeq icc3,cc7 \n"
38 " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
39 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
40 " and%I3 %1,%3,%2 \n"
41 " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
42 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
43 " beq icc3,#0,0b \n"
44 : "+U"(*v), "=&r"(old), "=r"(tmp)
45 : "NPr"(~mask)
46 : "memory", "cc7", "cc3", "icc3"
47 );
48
49 return old;
50}
51
52static inline
53unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
54{
55 unsigned long old, tmp;
56
57 asm volatile(
58 "0: \n"
59 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
60 " ckeq icc3,cc7 \n"
61 " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
62 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
63 " or%I3 %1,%3,%2 \n"
64 " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
65 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
66 " beq icc3,#0,0b \n"
67 : "+U"(*v), "=&r"(old), "=r"(tmp)
68 : "NPr"(mask)
69 : "memory", "cc7", "cc3", "icc3"
70 );
71
72 return old;
73}
74
75static inline
76unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
77{
78 unsigned long old, tmp;
79
80 asm volatile(
81 "0: \n"
82 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
83 " ckeq icc3,cc7 \n"
84 " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
85 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
86 " xor%I3 %1,%3,%2 \n"
87 " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
88 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
89 " beq icc3,#0,0b \n"
90 : "+U"(*v), "=&r"(old), "=r"(tmp)
91 : "NPr"(mask)
92 : "memory", "cc7", "cc3", "icc3"
93 );
94
95 return old;
96}
97
98#else
99
100extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
101extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
102extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
103
104#endif
105
106#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
107#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
108 29
109static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) 30static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
110{ 31{
111 volatile unsigned long *ptr = addr; 32 unsigned int *ptr = (void *)addr;
112 unsigned long mask = 1UL << (nr & 31); 33 unsigned int mask = 1UL << (nr & 31);
113 ptr += nr >> 5; 34 ptr += nr >> 5;
114 return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; 35 return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
115} 36}
116 37
117static inline int test_and_set_bit(unsigned long nr, volatile void *addr) 38static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
118{ 39{
119 volatile unsigned long *ptr = addr; 40 unsigned int *ptr = (void *)addr;
120 unsigned long mask = 1UL << (nr & 31); 41 unsigned int mask = 1UL << (nr & 31);
121 ptr += nr >> 5; 42 ptr += nr >> 5;
122 return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; 43 return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
123} 44}
124 45
125static inline int test_and_change_bit(unsigned long nr, volatile void *addr) 46static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
126{ 47{
127 volatile unsigned long *ptr = addr; 48 unsigned int *ptr = (void *)addr;
128 unsigned long mask = 1UL << (nr & 31); 49 unsigned int mask = 1UL << (nr & 31);
129 ptr += nr >> 5; 50 ptr += nr >> 5;
130 return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; 51 return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
131} 52}
132 53
133static inline void clear_bit(unsigned long nr, volatile void *addr) 54static inline void clear_bit(unsigned long nr, volatile void *addr)
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
index 156184e17e57..370dc9fa0b11 100644
--- a/arch/frv/kernel/dma.c
+++ b/arch/frv/kernel/dma.c
@@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
109 109
110static DEFINE_RWLOCK(frv_dma_channels_lock); 110static DEFINE_RWLOCK(frv_dma_channels_lock);
111 111
112unsigned long frv_dma_inprogress; 112unsigned int frv_dma_inprogress;
113 113
114#define frv_clear_dma_inprogress(channel) \ 114#define frv_clear_dma_inprogress(channel) \
115 atomic_clear_mask(1 << (channel), &frv_dma_inprogress); 115 (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
116 116
117#define frv_set_dma_inprogress(channel) \ 117#define frv_set_dma_inprogress(channel) \
118 atomic_set_mask(1 << (channel), &frv_dma_inprogress); 118 (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
119 119
120/*****************************************************************************/ 120/*****************************************************************************/
121/* 121/*
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 86c516d96dcd..cdb4ce9960eb 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns);
58EXPORT_SYMBOL(__insl_ns); 58EXPORT_SYMBOL(__insl_ns);
59 59
60#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 60#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
61EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
62EXPORT_SYMBOL(atomic_test_and_OR_mask);
63EXPORT_SYMBOL(atomic_test_and_XOR_mask);
64EXPORT_SYMBOL(atomic_add_return);
65EXPORT_SYMBOL(atomic_sub_return);
66EXPORT_SYMBOL(__xchg_32); 61EXPORT_SYMBOL(__xchg_32);
67EXPORT_SYMBOL(__cmpxchg_32); 62EXPORT_SYMBOL(__cmpxchg_32);
68#endif 63#endif
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 4ff2fb1e6b16..970e8b4f1a02 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
5lib-y := \ 5lib-y := \
6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o 8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c
new file mode 100644
index 000000000000..4d1b887c248b
--- /dev/null
+++ b/arch/frv/lib/atomic-lib.c
@@ -0,0 +1,7 @@
1
2#include <linux/export.h>
3#include <asm/atomic.h>
4
5#define __ATOMIC_LIB__
6
7#include <asm/atomic_defs.h>
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index 5e9e6ab5dd0e..b7439a960b5b 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -19,116 +19,6 @@
19 19
20############################################################################### 20###############################################################################
21# 21#
22# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
23#
24###############################################################################
25 .globl atomic_test_and_ANDNOT_mask
26 .type atomic_test_and_ANDNOT_mask,@function
27atomic_test_and_ANDNOT_mask:
28 not.p gr8,gr10
290:
30 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
31 ckeq icc3,cc7
32 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
33 orcr cc7,cc7,cc3 /* set CC3 to true */
34 and gr8,gr10,gr11
35 cst.p gr11,@(gr9,gr0) ,cc3,#1
36 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
37 beq icc3,#0,0b
38 bralr
39
40 .size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
41
42###############################################################################
43#
44# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
45#
46###############################################################################
47 .globl atomic_test_and_OR_mask
48 .type atomic_test_and_OR_mask,@function
49atomic_test_and_OR_mask:
50 or.p gr8,gr8,gr10
510:
52 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
53 ckeq icc3,cc7
54 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
55 orcr cc7,cc7,cc3 /* set CC3 to true */
56 or gr8,gr10,gr11
57 cst.p gr11,@(gr9,gr0) ,cc3,#1
58 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
59 beq icc3,#0,0b
60 bralr
61
62 .size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
63
64###############################################################################
65#
66# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
67#
68###############################################################################
69 .globl atomic_test_and_XOR_mask
70 .type atomic_test_and_XOR_mask,@function
71atomic_test_and_XOR_mask:
72 or.p gr8,gr8,gr10
730:
74 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
75 ckeq icc3,cc7
76 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
77 orcr cc7,cc7,cc3 /* set CC3 to true */
78 xor gr8,gr10,gr11
79 cst.p gr11,@(gr9,gr0) ,cc3,#1
80 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
81 beq icc3,#0,0b
82 bralr
83
84 .size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
85
86###############################################################################
87#
88# int atomic_add_return(int i, atomic_t *v)
89#
90###############################################################################
91 .globl atomic_add_return
92 .type atomic_add_return,@function
93atomic_add_return:
94 or.p gr8,gr8,gr10
950:
96 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
97 ckeq icc3,cc7
98 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
99 orcr cc7,cc7,cc3 /* set CC3 to true */
100 add gr8,gr10,gr8
101 cst.p gr8,@(gr9,gr0) ,cc3,#1
102 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
103 beq icc3,#0,0b
104 bralr
105
106 .size atomic_add_return, .-atomic_add_return
107
108###############################################################################
109#
110# int atomic_sub_return(int i, atomic_t *v)
111#
112###############################################################################
113 .globl atomic_sub_return
114 .type atomic_sub_return,@function
115atomic_sub_return:
116 or.p gr8,gr8,gr10
1170:
118 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
119 ckeq icc3,cc7
120 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
121 orcr cc7,cc7,cc3 /* set CC3 to true */
122 sub gr8,gr10,gr8
123 cst.p gr8,@(gr9,gr0) ,cc3,#1
124 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
125 beq icc3,#0,0b
126 bralr
127
128 .size atomic_sub_return, .-atomic_sub_return
129
130###############################################################################
131#
132# uint32_t __xchg_32(uint32_t i, uint32_t *v) 22# uint32_t __xchg_32(uint32_t i, uint32_t *v)
133# 23#
134############################################################################### 24###############################################################################
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
index b6194eeac127..c4c472308a33 100644
--- a/arch/frv/lib/atomic64-ops.S
+++ b/arch/frv/lib/atomic64-ops.S
@@ -20,100 +20,6 @@
20 20
21############################################################################### 21###############################################################################
22# 22#
23# long long atomic64_inc_return(atomic64_t *v)
24#
25###############################################################################
26 .globl atomic64_inc_return
27 .type atomic64_inc_return,@function
28atomic64_inc_return:
29 or.p gr8,gr8,gr10
300:
31 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
32 ckeq icc3,cc7
33 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
34 orcr cc7,cc7,cc3 /* set CC3 to true */
35 addicc gr9,#1,gr9,icc0
36 addxi gr8,#0,gr8,icc0
37 cstd.p gr8,@(gr10,gr0) ,cc3,#1
38 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
39 beq icc3,#0,0b
40 bralr
41
42 .size atomic64_inc_return, .-atomic64_inc_return
43
44###############################################################################
45#
46# long long atomic64_dec_return(atomic64_t *v)
47#
48###############################################################################
49 .globl atomic64_dec_return
50 .type atomic64_dec_return,@function
51atomic64_dec_return:
52 or.p gr8,gr8,gr10
530:
54 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
55 ckeq icc3,cc7
56 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
57 orcr cc7,cc7,cc3 /* set CC3 to true */
58 subicc gr9,#1,gr9,icc0
59 subxi gr8,#0,gr8,icc0
60 cstd.p gr8,@(gr10,gr0) ,cc3,#1
61 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
62 beq icc3,#0,0b
63 bralr
64
65 .size atomic64_dec_return, .-atomic64_dec_return
66
67###############################################################################
68#
69# long long atomic64_add_return(long long i, atomic64_t *v)
70#
71###############################################################################
72 .globl atomic64_add_return
73 .type atomic64_add_return,@function
74atomic64_add_return:
75 or.p gr8,gr8,gr4
76 or gr9,gr9,gr5
770:
78 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
79 ckeq icc3,cc7
80 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
81 orcr cc7,cc7,cc3 /* set CC3 to true */
82 addcc gr9,gr5,gr9,icc0
83 addx gr8,gr4,gr8,icc0
84 cstd.p gr8,@(gr10,gr0) ,cc3,#1
85 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
86 beq icc3,#0,0b
87 bralr
88
89 .size atomic64_add_return, .-atomic64_add_return
90
91###############################################################################
92#
93# long long atomic64_sub_return(long long i, atomic64_t *v)
94#
95###############################################################################
96 .globl atomic64_sub_return
97 .type atomic64_sub_return,@function
98atomic64_sub_return:
99 or.p gr8,gr8,gr4
100 or gr9,gr9,gr5
1010:
102 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
103 ckeq icc3,cc7
104 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
105 orcr cc7,cc7,cc3 /* set CC3 to true */
106 subcc gr9,gr5,gr9,icc0
107 subx gr8,gr4,gr8,icc0
108 cstd.p gr8,@(gr10,gr0) ,cc3,#1
109 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
110 beq icc3,#0,0b
111 bralr
112
113 .size atomic64_sub_return, .-atomic64_sub_return
114
115###############################################################################
116#
117# uint64_t __xchg_64(uint64_t i, uint64_t *v) 23# uint64_t __xchg_64(uint64_t i, uint64_t *v)
118# 24#
119############################################################################### 25###############################################################################
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 7ca73f8546cc..702ee539f87d 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -16,83 +16,52 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18 18
19static inline int atomic_add_return(int i, atomic_t *v) 19#define ATOMIC_OP_RETURN(op, c_op) \
20{ 20static inline int atomic_##op##_return(int i, atomic_t *v) \
21 h8300flags flags; 21{ \
22 int ret; 22 h8300flags flags; \
23 23 int ret; \
24 flags = arch_local_irq_save(); 24 \
25 ret = v->counter += i; 25 flags = arch_local_irq_save(); \
26 arch_local_irq_restore(flags); 26 ret = v->counter c_op i; \
27 return ret; 27 arch_local_irq_restore(flags); \
28 return ret; \
28} 29}
29 30
30#define atomic_add(i, v) atomic_add_return(i, v) 31#define ATOMIC_OP(op, c_op) \
31#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 32static inline void atomic_##op(int i, atomic_t *v) \
32 33{ \
33static inline int atomic_sub_return(int i, atomic_t *v) 34 h8300flags flags; \
34{ 35 \
35 h8300flags flags; 36 flags = arch_local_irq_save(); \
36 int ret; 37 v->counter c_op i; \
37 38 arch_local_irq_restore(flags); \
38 flags = arch_local_irq_save();
39 ret = v->counter -= i;
40 arch_local_irq_restore(flags);
41 return ret;
42} 39}
43 40
44#define atomic_sub(i, v) atomic_sub_return(i, v) 41ATOMIC_OP_RETURN(add, +=)
45#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 42ATOMIC_OP_RETURN(sub, -=)
46 43
47static inline int atomic_inc_return(atomic_t *v) 44ATOMIC_OP(and, &=)
48{ 45ATOMIC_OP(or, |=)
49 h8300flags flags; 46ATOMIC_OP(xor, ^=)
50 int ret;
51 47
52 flags = arch_local_irq_save(); 48#undef ATOMIC_OP_RETURN
53 v->counter++; 49#undef ATOMIC_OP
54 ret = v->counter;
55 arch_local_irq_restore(flags);
56 return ret;
57}
58
59#define atomic_inc(v) atomic_inc_return(v)
60 50
61/* 51#define atomic_add(i, v) (void)atomic_add_return(i, v)
62 * atomic_inc_and_test - increment and test 52#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
63 * @v: pointer of type atomic_t
64 *
65 * Atomically increments @v by 1
66 * and returns true if the result is zero, or false for all
67 * other cases.
68 */
69#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
70 53
71static inline int atomic_dec_return(atomic_t *v) 54#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
72{ 55#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
73 h8300flags flags;
74 int ret;
75 56
76 flags = arch_local_irq_save(); 57#define atomic_inc_return(v) atomic_add_return(1, v)
77 --v->counter; 58#define atomic_dec_return(v) atomic_sub_return(1, v)
78 ret = v->counter;
79 arch_local_irq_restore(flags);
80 return ret;
81}
82 59
83#define atomic_dec(v) atomic_dec_return(v) 60#define atomic_inc(v) (void)atomic_inc_return(v)
61#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
84 62
85static inline int atomic_dec_and_test(atomic_t *v) 63#define atomic_dec(v) (void)atomic_dec_return(v)
86{ 64#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
87 h8300flags flags;
88 int ret;
89
90 flags = arch_local_irq_save();
91 --v->counter;
92 ret = v->counter;
93 arch_local_irq_restore(flags);
94 return ret == 0;
95}
96 65
97static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 66static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
98{ 67{
@@ -120,40 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
120 return ret; 89 return ret;
121} 90}
122 91
123static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
124{
125 unsigned char ccr;
126 unsigned long tmp;
127
128 __asm__ __volatile__("stc ccr,%w3\n\t"
129 "orc #0x80,ccr\n\t"
130 "mov.l %0,%1\n\t"
131 "and.l %2,%1\n\t"
132 "mov.l %1,%0\n\t"
133 "ldc %w3,ccr"
134 : "=m"(*v), "=r"(tmp)
135 : "g"(~(mask)), "r"(ccr));
136}
137
138static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
139{
140 unsigned char ccr;
141 unsigned long tmp;
142
143 __asm__ __volatile__("stc ccr,%w3\n\t"
144 "orc #0x80,ccr\n\t"
145 "mov.l %0,%1\n\t"
146 "or.l %2,%1\n\t"
147 "mov.l %1,%0\n\t"
148 "ldc %w3,ccr"
149 : "=m"(*v), "=r"(tmp)
150 : "g"(~(mask)), "r"(ccr));
151}
152
153/* Atomic operations are already serializing */
154#define smp_mb__before_atomic_dec() barrier()
155#define smp_mb__after_atomic_dec() barrier()
156#define smp_mb__before_atomic_inc() barrier()
157#define smp_mb__after_atomic_inc() barrier()
158
159#endif /* __ARCH_H8300_ATOMIC __ */ 92#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 93d07025f183..811d61f6422d 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -132,6 +132,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
132ATOMIC_OPS(add) 132ATOMIC_OPS(add)
133ATOMIC_OPS(sub) 133ATOMIC_OPS(sub)
134 134
135ATOMIC_OP(and)
136ATOMIC_OP(or)
137ATOMIC_OP(xor)
138
135#undef ATOMIC_OPS 139#undef ATOMIC_OPS
136#undef ATOMIC_OP_RETURN 140#undef ATOMIC_OP_RETURN
137#undef ATOMIC_OP 141#undef ATOMIC_OP
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0bf03501fe5c..be4beeb77d57 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \
45ATOMIC_OP(add, +) 45ATOMIC_OP(add, +)
46ATOMIC_OP(sub, -) 46ATOMIC_OP(sub, -)
47 47
48#undef ATOMIC_OP
49
50#define atomic_add_return(i,v) \ 48#define atomic_add_return(i,v) \
51({ \ 49({ \
52 int __ia64_aar_i = (i); \ 50 int __ia64_aar_i = (i); \
@@ -71,6 +69,16 @@ ATOMIC_OP(sub, -)
71 : ia64_atomic_sub(__ia64_asr_i, v); \ 69 : ia64_atomic_sub(__ia64_asr_i, v); \
72}) 70})
73 71
72ATOMIC_OP(and, &)
73ATOMIC_OP(or, |)
74ATOMIC_OP(xor, ^)
75
76#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
77#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
78#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
79
80#undef ATOMIC_OP
81
74#define ATOMIC64_OP(op, c_op) \ 82#define ATOMIC64_OP(op, c_op) \
75static __inline__ long \ 83static __inline__ long \
76ia64_atomic64_##op (__s64 i, atomic64_t *v) \ 84ia64_atomic64_##op (__s64 i, atomic64_t *v) \
@@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
89ATOMIC64_OP(add, +) 97ATOMIC64_OP(add, +)
90ATOMIC64_OP(sub, -) 98ATOMIC64_OP(sub, -)
91 99
92#undef ATOMIC64_OP
93
94#define atomic64_add_return(i,v) \ 100#define atomic64_add_return(i,v) \
95({ \ 101({ \
96 long __ia64_aar_i = (i); \ 102 long __ia64_aar_i = (i); \
@@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -)
115 : ia64_atomic64_sub(__ia64_asr_i, v); \ 121 : ia64_atomic64_sub(__ia64_asr_i, v); \
116}) 122})
117 123
124ATOMIC64_OP(and, &)
125ATOMIC64_OP(or, |)
126ATOMIC64_OP(xor, ^)
127
128#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
129#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
130#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
131
132#undef ATOMIC64_OP
133
118#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 134#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
119#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 135#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
120 136
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 843ba435e43b..df896a1c41d3 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -66,12 +66,12 @@
66do { \ 66do { \
67 compiletime_assert_atomic_type(*p); \ 67 compiletime_assert_atomic_type(*p); \
68 barrier(); \ 68 barrier(); \
69 ACCESS_ONCE(*p) = (v); \ 69 WRITE_ONCE(*p, v); \
70} while (0) 70} while (0)
71 71
72#define smp_load_acquire(p) \ 72#define smp_load_acquire(p) \
73({ \ 73({ \
74 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 74 typeof(*p) ___p1 = READ_ONCE(*p); \
75 compiletime_assert_atomic_type(*p); \ 75 compiletime_assert_atomic_type(*p); \
76 barrier(); \ 76 barrier(); \
77 ___p1; \ 77 ___p1; \
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 31bb74adba08..025e2a170493 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
94ATOMIC_OPS(add) 94ATOMIC_OPS(add)
95ATOMIC_OPS(sub) 95ATOMIC_OPS(sub)
96 96
97ATOMIC_OP(and)
98ATOMIC_OP(or)
99ATOMIC_OP(xor)
100
97#undef ATOMIC_OPS 101#undef ATOMIC_OPS
98#undef ATOMIC_OP_RETURN 102#undef ATOMIC_OP_RETURN
99#undef ATOMIC_OP 103#undef ATOMIC_OP
@@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
239 return c; 243 return c;
240} 244}
241 245
242
243static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
244{
245 unsigned long flags;
246 unsigned long tmp;
247
248 local_irq_save(flags);
249 __asm__ __volatile__ (
250 "# atomic_clear_mask \n\t"
251 DCACHE_CLEAR("%0", "r5", "%1")
252 M32R_LOCK" %0, @%1; \n\t"
253 "and %0, %2; \n\t"
254 M32R_UNLOCK" %0, @%1; \n\t"
255 : "=&r" (tmp)
256 : "r" (addr), "r" (~mask)
257 : "memory"
258 __ATOMIC_CLOBBER
259 );
260 local_irq_restore(flags);
261}
262
263static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
264{
265 unsigned long flags;
266 unsigned long tmp;
267
268 local_irq_save(flags);
269 __asm__ __volatile__ (
270 "# atomic_set_mask \n\t"
271 DCACHE_CLEAR("%0", "r5", "%1")
272 M32R_LOCK" %0, @%1; \n\t"
273 "or %0, %2; \n\t"
274 M32R_UNLOCK" %0, @%1; \n\t"
275 : "=&r" (tmp)
276 : "r" (addr), "r" (mask)
277 : "memory"
278 __ATOMIC_CLOBBER
279 );
280 local_irq_restore(flags);
281}
282
283#endif /* _ASM_M32R_ATOMIC_H */ 246#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc74ef9a..62d6961e7f2b 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
156 cpumask_clear_cpu(smp_processor_id(), &cpumask); 156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
157 spin_lock(&flushcache_lock); 157 spin_lock(&flushcache_lock);
158 mask=cpumask_bits(&cpumask); 158 mask=cpumask_bits(&cpumask);
159 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 159 atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161 _flush_cache_copyback_all(); 161 _flush_cache_copyback_all();
162 while (flushcache_cpumask) 162 while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
407 flush_vma = vma; 407 flush_vma = vma;
408 flush_va = va; 408 flush_va = va;
409 mask=cpumask_bits(&cpumask); 409 mask=cpumask_bits(&cpumask);
410 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); 410 atomic_or(*mask, (atomic_t *)&flush_cpumask);
411 411
412 /* 412 /*
413 * We have to send the IPI only to 413 * We have to send the IPI only to
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e85f047fb072..039fac120cc0 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -77,6 +77,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
77ATOMIC_OPS(add, +=, add) 77ATOMIC_OPS(add, +=, add)
78ATOMIC_OPS(sub, -=, sub) 78ATOMIC_OPS(sub, -=, sub)
79 79
80ATOMIC_OP(and, &=, and)
81ATOMIC_OP(or, |=, or)
82ATOMIC_OP(xor, ^=, eor)
83
80#undef ATOMIC_OPS 84#undef ATOMIC_OPS
81#undef ATOMIC_OP_RETURN 85#undef ATOMIC_OP_RETURN
82#undef ATOMIC_OP 86#undef ATOMIC_OP
@@ -170,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
170 return c != 0; 174 return c != 0;
171} 175}
172 176
173static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
174{
175 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
176}
177
178static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
179{
180 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
181}
182
183static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 177static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
184{ 178{
185 int c, old; 179 int c, old;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 948d8688643c..21c4c268b86c 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -74,44 +74,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
74ATOMIC_OPS(add) 74ATOMIC_OPS(add)
75ATOMIC_OPS(sub) 75ATOMIC_OPS(sub)
76 76
77ATOMIC_OP(and)
78ATOMIC_OP(or)
79ATOMIC_OP(xor)
80
77#undef ATOMIC_OPS 81#undef ATOMIC_OPS
78#undef ATOMIC_OP_RETURN 82#undef ATOMIC_OP_RETURN
79#undef ATOMIC_OP 83#undef ATOMIC_OP
80 84
81static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
82{
83 int temp;
84
85 asm volatile (
86 "1: LNKGETD %0, [%1]\n"
87 " AND %0, %0, %2\n"
88 " LNKSETD [%1] %0\n"
89 " DEFR %0, TXSTAT\n"
90 " ANDT %0, %0, #HI(0x3f000000)\n"
91 " CMPT %0, #HI(0x02000000)\n"
92 " BNZ 1b\n"
93 : "=&d" (temp)
94 : "da" (&v->counter), "bd" (~mask)
95 : "cc");
96}
97
98static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
99{
100 int temp;
101
102 asm volatile (
103 "1: LNKGETD %0, [%1]\n"
104 " OR %0, %0, %2\n"
105 " LNKSETD [%1], %0\n"
106 " DEFR %0, TXSTAT\n"
107 " ANDT %0, %0, #HI(0x3f000000)\n"
108 " CMPT %0, #HI(0x02000000)\n"
109 " BNZ 1b\n"
110 : "=&d" (temp)
111 : "da" (&v->counter), "bd" (mask)
112 : "cc");
113}
114
115static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 85static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
116{ 86{
117 int result, temp; 87 int result, temp;
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f5d5898c1020..f8efe380fe8b 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -68,31 +68,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
68 68
69ATOMIC_OPS(add, +=) 69ATOMIC_OPS(add, +=)
70ATOMIC_OPS(sub, -=) 70ATOMIC_OPS(sub, -=)
71ATOMIC_OP(and, &=)
72ATOMIC_OP(or, |=)
73ATOMIC_OP(xor, ^=)
71 74
72#undef ATOMIC_OPS 75#undef ATOMIC_OPS
73#undef ATOMIC_OP_RETURN 76#undef ATOMIC_OP_RETURN
74#undef ATOMIC_OP 77#undef ATOMIC_OP
75 78
76static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
77{
78 unsigned long flags;
79
80 __global_lock1(flags);
81 fence();
82 v->counter &= ~mask;
83 __global_unlock1(flags);
84}
85
86static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
87{
88 unsigned long flags;
89
90 __global_lock1(flags);
91 fence();
92 v->counter |= mask;
93 __global_unlock1(flags);
94}
95
96static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 79static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
97{ 80{
98 int ret; 81 int ret;
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5a696e507930..172b7e5efc53 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -90,12 +90,12 @@ static inline void fence(void)
90do { \ 90do { \
91 compiletime_assert_atomic_type(*p); \ 91 compiletime_assert_atomic_type(*p); \
92 smp_mb(); \ 92 smp_mb(); \
93 ACCESS_ONCE(*p) = (v); \ 93 WRITE_ONCE(*p, v); \
94} while (0) 94} while (0)
95 95
96#define smp_load_acquire(p) \ 96#define smp_load_acquire(p) \
97({ \ 97({ \
98 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 98 typeof(*p) ___p1 = READ_ONCE(*p); \
99 compiletime_assert_atomic_type(*p); \ 99 compiletime_assert_atomic_type(*p); \
100 smp_mb(); \ 100 smp_mb(); \
101 ___p1; \ 101 ___p1; \
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 26d436336f2e..4c42fd9af777 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -137,6 +137,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
137ATOMIC_OPS(add, +=, addu) 137ATOMIC_OPS(add, +=, addu)
138ATOMIC_OPS(sub, -=, subu) 138ATOMIC_OPS(sub, -=, subu)
139 139
140ATOMIC_OP(and, &=, and)
141ATOMIC_OP(or, |=, or)
142ATOMIC_OP(xor, ^=, xor)
143
140#undef ATOMIC_OPS 144#undef ATOMIC_OPS
141#undef ATOMIC_OP_RETURN 145#undef ATOMIC_OP_RETURN
142#undef ATOMIC_OP 146#undef ATOMIC_OP
@@ -416,6 +420,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
416 420
417ATOMIC64_OPS(add, +=, daddu) 421ATOMIC64_OPS(add, +=, daddu)
418ATOMIC64_OPS(sub, -=, dsubu) 422ATOMIC64_OPS(sub, -=, dsubu)
423ATOMIC64_OP(and, &=, and)
424ATOMIC64_OP(or, |=, or)
425ATOMIC64_OP(xor, ^=, xor)
419 426
420#undef ATOMIC64_OPS 427#undef ATOMIC64_OPS
421#undef ATOMIC64_OP_RETURN 428#undef ATOMIC64_OP_RETURN
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 7ecba84656d4..752e0b86c171 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -133,12 +133,12 @@
133do { \ 133do { \
134 compiletime_assert_atomic_type(*p); \ 134 compiletime_assert_atomic_type(*p); \
135 smp_mb(); \ 135 smp_mb(); \
136 ACCESS_ONCE(*p) = (v); \ 136 WRITE_ONCE(*p, v); \
137} while (0) 137} while (0)
138 138
139#define smp_load_acquire(p) \ 139#define smp_load_acquire(p) \
140({ \ 140({ \
141 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 141 typeof(*p) ___p1 = READ_ONCE(*p); \
142 compiletime_assert_atomic_type(*p); \ 142 compiletime_assert_atomic_type(*p); \
143 smp_mb(); \ 143 smp_mb(); \
144 ___p1; \ 144 ___p1; \
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 608aa57799c8..e77672539e8e 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -26,14 +26,29 @@
26#define NOP_INSN "nop" 26#define NOP_INSN "nop"
27#endif 27#endif
28 28
29static __always_inline bool arch_static_branch(struct static_key *key) 29static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
30{ 30{
31 asm_volatile_goto("1:\t" NOP_INSN "\n\t" 31 asm_volatile_goto("1:\t" NOP_INSN "\n\t"
32 "nop\n\t" 32 "nop\n\t"
33 ".pushsection __jump_table, \"aw\"\n\t" 33 ".pushsection __jump_table, \"aw\"\n\t"
34 WORD_INSN " 1b, %l[l_yes], %0\n\t" 34 WORD_INSN " 1b, %l[l_yes], %0\n\t"
35 ".popsection\n\t" 35 ".popsection\n\t"
36 : : "i" (key) : : l_yes); 36 : : "i" (&((char *)key)[branch]) : : l_yes);
37
38 return false;
39l_yes:
40 return true;
41}
42
43static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
44{
45 asm_volatile_goto("1:\tj %l[l_yes]\n\t"
46 "nop\n\t"
47 ".pushsection __jump_table, \"aw\"\n\t"
48 WORD_INSN " 1b, %l[l_yes], %0\n\t"
49 ".popsection\n\t"
50 : : "i" (&((char *)key)[branch]) : : l_yes);
51
37 return false; 52 return false;
38l_yes: 53l_yes:
39 return true; 54 return true;
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index dda800e9e731..3e586daa3a32 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -51,7 +51,7 @@ void arch_jump_label_transform(struct jump_entry *e,
51 /* Target must have the right alignment and ISA must be preserved. */ 51 /* Target must have the right alignment and ISA must be preserved. */
52 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); 52 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
53 53
54 if (type == JUMP_LABEL_ENABLE) { 54 if (type == JUMP_LABEL_JMP) {
55 insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; 55 insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
56 insn.j_format.target = e->target >> J_RANGE_SHIFT; 56 insn.j_format.target = e->target >> J_RANGE_SHIFT;
57 } else { 57 } else {
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5be655e83e70..375e59140c9c 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -89,6 +89,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
89ATOMIC_OPS(add) 89ATOMIC_OPS(add)
90ATOMIC_OPS(sub) 90ATOMIC_OPS(sub)
91 91
92ATOMIC_OP(and)
93ATOMIC_OP(or)
94ATOMIC_OP(xor)
95
92#undef ATOMIC_OPS 96#undef ATOMIC_OPS
93#undef ATOMIC_OP_RETURN 97#undef ATOMIC_OP_RETURN
94#undef ATOMIC_OP 98#undef ATOMIC_OP
@@ -127,73 +131,6 @@ static inline void atomic_dec(atomic_t *v)
127#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 131#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
128#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 132#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
129 133
130/**
131 * atomic_clear_mask - Atomically clear bits in memory
132 * @mask: Mask of the bits to be cleared
133 * @v: pointer to word in memory
134 *
135 * Atomically clears the bits set in mask from the memory word specified.
136 */
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139#ifdef CONFIG_SMP
140 int status;
141
142 asm volatile(
143 "1: mov %3,(_AAR,%2) \n"
144 " mov (_ADR,%2),%0 \n"
145 " and %4,%0 \n"
146 " mov %0,(_ADR,%2) \n"
147 " mov (_ADR,%2),%0 \n" /* flush */
148 " mov (_ASR,%2),%0 \n"
149 " or %0,%0 \n"
150 " bne 1b \n"
151 : "=&r"(status), "=m"(*addr)
152 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
153 : "memory", "cc");
154#else
155 unsigned long flags;
156
157 mask = ~mask;
158 flags = arch_local_cli_save();
159 *addr &= mask;
160 arch_local_irq_restore(flags);
161#endif
162}
163
164/**
165 * atomic_set_mask - Atomically set bits in memory
166 * @mask: Mask of the bits to be set
167 * @v: pointer to word in memory
168 *
169 * Atomically sets the bits set in mask from the memory word specified.
170 */
171static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
172{
173#ifdef CONFIG_SMP
174 int status;
175
176 asm volatile(
177 "1: mov %3,(_AAR,%2) \n"
178 " mov (_ADR,%2),%0 \n"
179 " or %4,%0 \n"
180 " mov %0,(_ADR,%2) \n"
181 " mov (_ADR,%2),%0 \n" /* flush */
182 " mov (_ASR,%2),%0 \n"
183 " or %0,%0 \n"
184 " bne 1b \n"
185 : "=&r"(status), "=m"(*addr)
186 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
187 : "memory", "cc");
188#else
189 unsigned long flags;
190
191 flags = arch_local_cli_save();
192 *addr |= mask;
193 arch_local_irq_restore(flags);
194#endif
195}
196
197#endif /* __KERNEL__ */ 134#endif /* __KERNEL__ */
198#endif /* CONFIG_SMP */ 135#endif /* CONFIG_SMP */
199#endif /* _ASM_ATOMIC_H */ 136#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index e5d0ef722bfa..9a39ea9031d4 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
119 flush_mm = mm; 119 flush_mm = mm;
120 flush_va = va; 120 flush_va = va;
121#if NR_CPUS <= BITS_PER_LONG 121#if NR_CPUS <= BITS_PER_LONG
122 atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); 122 atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
123#else 123#else
124#error Not supported. 124#error Not supported.
125#endif 125#endif
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 269c23d23fcb..1a8f6f95689e 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -242,7 +242,6 @@ CONFIG_LOCKUP_DETECTOR=y
242CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y 242CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
243CONFIG_PANIC_ON_OOPS=y 243CONFIG_PANIC_ON_OOPS=y
244CONFIG_DEBUG_RT_MUTEXES=y 244CONFIG_DEBUG_RT_MUTEXES=y
245CONFIG_RT_MUTEX_TESTER=y
246CONFIG_PROVE_RCU_DELAY=y 245CONFIG_PROVE_RCU_DELAY=y
247CONFIG_DEBUG_BLOCK_EXT_DEVT=y 246CONFIG_DEBUG_BLOCK_EXT_DEVT=y
248CONFIG_LATENCYTOP=y 247CONFIG_LATENCYTOP=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 33b148f825ba..0ffb08ff5125 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -295,7 +295,6 @@ CONFIG_DEBUG_SHIRQ=y
295CONFIG_DETECT_HUNG_TASK=y 295CONFIG_DETECT_HUNG_TASK=y
296CONFIG_TIMER_STATS=y 296CONFIG_TIMER_STATS=y
297CONFIG_DEBUG_RT_MUTEXES=y 297CONFIG_DEBUG_RT_MUTEXES=y
298CONFIG_RT_MUTEX_TESTER=y
299CONFIG_DEBUG_SPINLOCK=y 298CONFIG_DEBUG_SPINLOCK=y
300CONFIG_DEBUG_MUTEXES=y 299CONFIG_DEBUG_MUTEXES=y
301CONFIG_RCU_CPU_STALL_INFO=y 300CONFIG_RCU_CPU_STALL_INFO=y
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 226f8ca993f6..2536965d00ea 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -126,6 +126,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
126ATOMIC_OPS(add, +=) 126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=) 127ATOMIC_OPS(sub, -=)
128 128
129ATOMIC_OP(and, &=)
130ATOMIC_OP(or, |=)
131ATOMIC_OP(xor, ^=)
132
129#undef ATOMIC_OPS 133#undef ATOMIC_OPS
130#undef ATOMIC_OP_RETURN 134#undef ATOMIC_OP_RETURN
131#undef ATOMIC_OP 135#undef ATOMIC_OP
@@ -185,6 +189,9 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
185 189
186ATOMIC64_OPS(add, +=) 190ATOMIC64_OPS(add, +=)
187ATOMIC64_OPS(sub, -=) 191ATOMIC64_OPS(sub, -=)
192ATOMIC64_OP(and, &=)
193ATOMIC64_OP(or, |=)
194ATOMIC64_OP(xor, ^=)
188 195
189#undef ATOMIC64_OPS 196#undef ATOMIC64_OPS
190#undef ATOMIC64_OP_RETURN 197#undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 512d2782b043..55f106ed12bf 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
67ATOMIC_OPS(add, add) 67ATOMIC_OPS(add, add)
68ATOMIC_OPS(sub, subf) 68ATOMIC_OPS(sub, subf)
69 69
70ATOMIC_OP(and, and)
71ATOMIC_OP(or, or)
72ATOMIC_OP(xor, xor)
73
70#undef ATOMIC_OPS 74#undef ATOMIC_OPS
71#undef ATOMIC_OP_RETURN 75#undef ATOMIC_OP_RETURN
72#undef ATOMIC_OP 76#undef ATOMIC_OP
@@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
304 308
305ATOMIC64_OPS(add, add) 309ATOMIC64_OPS(add, add)
306ATOMIC64_OPS(sub, subf) 310ATOMIC64_OPS(sub, subf)
311ATOMIC64_OP(and, and)
312ATOMIC64_OP(or, or)
313ATOMIC64_OP(xor, xor)
307 314
308#undef ATOMIC64_OPS 315#undef ATOMIC64_OPS
309#undef ATOMIC64_OP_RETURN 316#undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 51ccc7232042..0eca6efc0631 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,12 +76,12 @@
76do { \ 76do { \
77 compiletime_assert_atomic_type(*p); \ 77 compiletime_assert_atomic_type(*p); \
78 smp_lwsync(); \ 78 smp_lwsync(); \
79 ACCESS_ONCE(*p) = (v); \ 79 WRITE_ONCE(*p, v); \
80} while (0) 80} while (0)
81 81
82#define smp_load_acquire(p) \ 82#define smp_load_acquire(p) \
83({ \ 83({ \
84 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 84 typeof(*p) ___p1 = READ_ONCE(*p); \
85 compiletime_assert_atomic_type(*p); \ 85 compiletime_assert_atomic_type(*p); \
86 smp_lwsync(); \ 86 smp_lwsync(); \
87 ___p1; \ 87 ___p1; \
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index efbf9a322a23..47e155f15433 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -18,14 +18,29 @@
18#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) 18#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
19#define JUMP_LABEL_NOP_SIZE 4 19#define JUMP_LABEL_NOP_SIZE 4
20 20
21static __always_inline bool arch_static_branch(struct static_key *key) 21static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
22{ 22{
23 asm_volatile_goto("1:\n\t" 23 asm_volatile_goto("1:\n\t"
24 "nop\n\t" 24 "nop\n\t"
25 ".pushsection __jump_table, \"aw\"\n\t" 25 ".pushsection __jump_table, \"aw\"\n\t"
26 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 26 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
27 ".popsection \n\t" 27 ".popsection \n\t"
28 : : "i" (key) : : l_yes); 28 : : "i" (&((char *)key)[branch]) : : l_yes);
29
30 return false;
31l_yes:
32 return true;
33}
34
35static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
36{
37 asm_volatile_goto("1:\n\t"
38 "b %l[l_yes]\n\t"
39 ".pushsection __jump_table, \"aw\"\n\t"
40 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
41 ".popsection \n\t"
42 : : "i" (&((char *)key)[branch]) : : l_yes);
43
29 return false; 44 return false;
30l_yes: 45l_yes:
31 return true; 46 return true;
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index a1ed8a8c7cb4..6472472093d0 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -17,7 +17,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
17{ 17{
18 u32 *addr = (u32 *)(unsigned long)entry->code; 18 u32 *addr = (u32 *)(unsigned long)entry->code;
19 19
20 if (type == JUMP_LABEL_ENABLE) 20 if (type == JUMP_LABEL_JMP)
21 patch_branch(addr, entry->target, 0); 21 patch_branch(addr, entry->target, 0);
22 else 22 else
23 patch_instruction(addr, PPC_INST_NOP); 23 patch_instruction(addr, PPC_INST_NOP);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7c6bb4b17b49..ed3ab509faca 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -596,25 +596,6 @@ _GLOBAL(copy_page)
596 b 2b 596 b 2b
597 597
598/* 598/*
599 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
600 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
601 */
602_GLOBAL(atomic_clear_mask)
60310: lwarx r5,0,r4
604 andc r5,r5,r3
605 PPC405_ERR77(0,r4)
606 stwcx. r5,0,r4
607 bne- 10b
608 blr
609_GLOBAL(atomic_set_mask)
61010: lwarx r5,0,r4
611 or r5,r5,r3
612 PPC405_ERR77(0,r4)
613 stwcx. r5,0,r4
614 bne- 10b
615 blr
616
617/*
618 * Extended precision shifts. 599 * Extended precision shifts.
619 * 600 *
620 * Updated to be valid for shift counts from 0 to 63 inclusive. 601 * Updated to be valid for shift counts from 0 to 63 inclusive.
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index adbe3802e377..117fa5c921c1 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -27,6 +27,7 @@
27#define __ATOMIC_OR "lao" 27#define __ATOMIC_OR "lao"
28#define __ATOMIC_AND "lan" 28#define __ATOMIC_AND "lan"
29#define __ATOMIC_ADD "laa" 29#define __ATOMIC_ADD "laa"
30#define __ATOMIC_XOR "lax"
30#define __ATOMIC_BARRIER "bcr 14,0\n" 31#define __ATOMIC_BARRIER "bcr 14,0\n"
31 32
32#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ 33#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
@@ -49,6 +50,7 @@
49#define __ATOMIC_OR "or" 50#define __ATOMIC_OR "or"
50#define __ATOMIC_AND "nr" 51#define __ATOMIC_AND "nr"
51#define __ATOMIC_ADD "ar" 52#define __ATOMIC_ADD "ar"
53#define __ATOMIC_XOR "xr"
52#define __ATOMIC_BARRIER "\n" 54#define __ATOMIC_BARRIER "\n"
53 55
54#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ 56#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
@@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v)
118#define atomic_dec_return(_v) atomic_sub_return(1, _v) 120#define atomic_dec_return(_v) atomic_sub_return(1, _v)
119#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) 121#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
120 122
121static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 123#define ATOMIC_OP(op, OP) \
122{ 124static inline void atomic_##op(int i, atomic_t *v) \
123 __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); 125{ \
126 __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
124} 127}
125 128
126static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 129ATOMIC_OP(and, AND)
127{ 130ATOMIC_OP(or, OR)
128 __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); 131ATOMIC_OP(xor, XOR)
129} 132
133#undef ATOMIC_OP
130 134
131#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 135#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
132 136
@@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
167#define __ATOMIC64_OR "laog" 171#define __ATOMIC64_OR "laog"
168#define __ATOMIC64_AND "lang" 172#define __ATOMIC64_AND "lang"
169#define __ATOMIC64_ADD "laag" 173#define __ATOMIC64_ADD "laag"
174#define __ATOMIC64_XOR "laxg"
170#define __ATOMIC64_BARRIER "bcr 14,0\n" 175#define __ATOMIC64_BARRIER "bcr 14,0\n"
171 176
172#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ 177#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
@@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
189#define __ATOMIC64_OR "ogr" 194#define __ATOMIC64_OR "ogr"
190#define __ATOMIC64_AND "ngr" 195#define __ATOMIC64_AND "ngr"
191#define __ATOMIC64_ADD "agr" 196#define __ATOMIC64_ADD "agr"
197#define __ATOMIC64_XOR "xgr"
192#define __ATOMIC64_BARRIER "\n" 198#define __ATOMIC64_BARRIER "\n"
193 199
194#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ 200#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
@@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v)
247 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); 253 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
248} 254}
249 255
250static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
251{
252 __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
253}
254
255static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
256{
257 __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
258}
259
260#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 256#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
261 257
262static inline long long atomic64_cmpxchg(atomic64_t *v, 258static inline long long atomic64_cmpxchg(atomic64_t *v,
@@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
270 return old; 266 return old;
271} 267}
272 268
269#define ATOMIC64_OP(op, OP) \
270static inline void atomic64_##op(long i, atomic64_t *v) \
271{ \
272 __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
273}
274
275ATOMIC64_OP(and, AND)
276ATOMIC64_OP(or, OR)
277ATOMIC64_OP(xor, XOR)
278
279#undef ATOMIC64_OP
273#undef __ATOMIC64_LOOP 280#undef __ATOMIC64_LOOP
274 281
275static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) 282static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index e6f8615a11eb..d48fe0162331 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -42,12 +42,12 @@
42do { \ 42do { \
43 compiletime_assert_atomic_type(*p); \ 43 compiletime_assert_atomic_type(*p); \
44 barrier(); \ 44 barrier(); \
45 ACCESS_ONCE(*p) = (v); \ 45 WRITE_ONCE(*p, v); \
46} while (0) 46} while (0)
47 47
48#define smp_load_acquire(p) \ 48#define smp_load_acquire(p) \
49({ \ 49({ \
50 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 50 typeof(*p) ___p1 = READ_ONCE(*p); \
51 compiletime_assert_atomic_type(*p); \ 51 compiletime_assert_atomic_type(*p); \
52 barrier(); \ 52 barrier(); \
53 ___p1; \ 53 ___p1; \
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 69972b7957ee..7f9fd5e3f1bf 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -12,14 +12,29 @@
12 * We use a brcl 0,2 instruction for jump labels at compile time so it 12 * We use a brcl 0,2 instruction for jump labels at compile time so it
13 * can be easily distinguished from a hotpatch generated instruction. 13 * can be easily distinguished from a hotpatch generated instruction.
14 */ 14 */
15static __always_inline bool arch_static_branch(struct static_key *key) 15static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
16{ 16{
17 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" 17 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
18 ".pushsection __jump_table, \"aw\"\n" 18 ".pushsection __jump_table, \"aw\"\n"
19 ".balign 8\n" 19 ".balign 8\n"
20 ".quad 0b, %l[label], %0\n" 20 ".quad 0b, %l[label], %0\n"
21 ".popsection\n" 21 ".popsection\n"
22 : : "X" (key) : : label); 22 : : "X" (&((char *)key)[branch]) : : label);
23
24 return false;
25label:
26 return true;
27}
28
29static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
30{
31 asm_volatile_goto("0: brcl 15, %l[label]\n"
32 ".pushsection __jump_table, \"aw\"\n"
33 ".balign 8\n"
34 ".quad 0b, %l[label], %0\n"
35 ".popsection\n"
36 : : "X" (&((char *)key)[branch]) : : label);
37
23 return false; 38 return false;
24label: 39label:
25 return true; 40 return true;
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index c9dac2139f59..083b05f5f5ab 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -61,7 +61,7 @@ static void __jump_label_transform(struct jump_entry *entry,
61{ 61{
62 struct insn old, new; 62 struct insn old, new;
63 63
64 if (type == JUMP_LABEL_ENABLE) { 64 if (type == JUMP_LABEL_JMP) {
65 jump_label_make_nop(entry, &old); 65 jump_label_make_nop(entry, &old);
66 jump_label_make_branch(entry, &new); 66 jump_label_make_branch(entry, &new);
67 } else { 67 } else {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 52524b9083c3..017c3a9bfc28 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -378,7 +378,7 @@ static void disable_sync_clock(void *dummy)
378 * increase the "sequence" counter to avoid the race of an 378 * increase the "sequence" counter to avoid the race of an
379 * etr event and the complete recovery against get_sync_clock. 379 * etr event and the complete recovery against get_sync_clock.
380 */ 380 */
381 atomic_clear_mask(0x80000000, sw_ptr); 381 atomic_andnot(0x80000000, sw_ptr);
382 atomic_inc(sw_ptr); 382 atomic_inc(sw_ptr);
383} 383}
384 384
@@ -389,7 +389,7 @@ static void disable_sync_clock(void *dummy)
389static void enable_sync_clock(void) 389static void enable_sync_clock(void)
390{ 390{
391 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); 391 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
392 atomic_set_mask(0x80000000, sw_ptr); 392 atomic_or(0x80000000, sw_ptr);
393} 393}
394 394
395/* 395/*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b277d50dcf76..5c2c169395c3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -173,20 +173,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
173 173
174static void __set_cpu_idle(struct kvm_vcpu *vcpu) 174static void __set_cpu_idle(struct kvm_vcpu *vcpu)
175{ 175{
176 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 176 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
177 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 177 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
178} 178}
179 179
180static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 180static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
181{ 181{
182 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 182 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
183 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 183 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
184} 184}
185 185
186static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 186static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
187{ 187{
188 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 188 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
189 &vcpu->arch.sie_block->cpuflags); 189 &vcpu->arch.sie_block->cpuflags);
190 vcpu->arch.sie_block->lctl = 0x0000; 190 vcpu->arch.sie_block->lctl = 0x0000;
191 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 191 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
192 192
@@ -199,7 +199,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
199 199
200static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 200static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
201{ 201{
202 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 202 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
203} 203}
204 204
205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -928,7 +928,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
928 spin_unlock(&li->lock); 928 spin_unlock(&li->lock);
929 929
930 /* clear pending external calls set by sigp interpretation facility */ 930 /* clear pending external calls set by sigp interpretation facility */
931 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); 931 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
932 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; 932 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
933} 933}
934 934
@@ -1026,7 +1026,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1026 1026
1027 li->irq.ext = irq->u.ext; 1027 li->irq.ext = irq->u.ext;
1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1029 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1029 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1030 return 0; 1030 return 0;
1031} 1031}
1032 1032
@@ -1041,7 +1041,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1041 /* another external call is pending */ 1041 /* another external call is pending */
1042 return -EBUSY; 1042 return -EBUSY;
1043 } 1043 }
1044 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 1044 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
@@ -1067,7 +1067,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1067 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1067 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1068 return -EBUSY; 1068 return -EBUSY;
1069 *extcall = irq->u.extcall; 1069 *extcall = irq->u.extcall;
1070 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1070 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1071 return 0; 1071 return 0;
1072} 1072}
1073 1073
@@ -1139,7 +1139,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1139 1139
1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1142 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1142 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1143 return 0; 1143 return 0;
1144} 1144}
1145 1145
@@ -1183,7 +1183,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
1183 0, 0); 1183 0, 0);
1184 1184
1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1186 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1186 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1187 return 0; 1187 return 0;
1188} 1188}
1189 1189
@@ -1196,7 +1196,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1196 0, 0); 1196 0, 0);
1197 1197
1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1199 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1199 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1200 return 0; 1200 return 0;
1201} 1201}
1202 1202
@@ -1375,13 +1375,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
1375 spin_lock(&li->lock); 1375 spin_lock(&li->lock);
1376 switch (type) { 1376 switch (type) {
1377 case KVM_S390_MCHK: 1377 case KVM_S390_MCHK:
1378 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 1378 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1379 break; 1379 break;
1380 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1380 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1381 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); 1381 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1382 break; 1382 break;
1383 default: 1383 default:
1384 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1384 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1385 break; 1385 break;
1386 } 1386 }
1387 spin_unlock(&li->lock); 1387 spin_unlock(&li->lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 98df53c01343..c91eb941b444 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1333,12 +1333,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1333 save_access_regs(vcpu->arch.host_acrs); 1333 save_access_regs(vcpu->arch.host_acrs);
1334 restore_access_regs(vcpu->run->s.regs.acrs); 1334 restore_access_regs(vcpu->run->s.regs.acrs);
1335 gmap_enable(vcpu->arch.gmap); 1335 gmap_enable(vcpu->arch.gmap);
1336 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1336 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1337} 1337}
1338 1338
1339void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1339void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1340{ 1340{
1341 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1341 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1342 gmap_disable(vcpu->arch.gmap); 1342 gmap_disable(vcpu->arch.gmap);
1343 1343
1344 save_fpu_regs(); 1344 save_fpu_regs();
@@ -1443,9 +1443,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1443 CPUSTAT_STOPPED); 1443 CPUSTAT_STOPPED);
1444 1444
1445 if (test_kvm_facility(vcpu->kvm, 78)) 1445 if (test_kvm_facility(vcpu->kvm, 78))
1446 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); 1446 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1447 else if (test_kvm_facility(vcpu->kvm, 8)) 1447 else if (test_kvm_facility(vcpu->kvm, 8))
1448 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); 1448 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1449 1449
1450 kvm_s390_vcpu_setup_model(vcpu); 1450 kvm_s390_vcpu_setup_model(vcpu);
1451 1451
@@ -1557,24 +1557,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1557 1557
1558void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) 1558void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1559{ 1559{
1560 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1560 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1561 exit_sie(vcpu); 1561 exit_sie(vcpu);
1562} 1562}
1563 1563
1564void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) 1564void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1565{ 1565{
1566 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1566 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1567} 1567}
1568 1568
1569static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) 1569static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1570{ 1570{
1571 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1571 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1572 exit_sie(vcpu); 1572 exit_sie(vcpu);
1573} 1573}
1574 1574
1575static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) 1575static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1576{ 1576{
1577 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1577 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1578} 1578}
1579 1579
1580/* 1580/*
@@ -1583,7 +1583,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1583 * return immediately. */ 1583 * return immediately. */
1584void exit_sie(struct kvm_vcpu *vcpu) 1584void exit_sie(struct kvm_vcpu *vcpu)
1585{ 1585{
1586 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 1586 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1587 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 1587 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1588 cpu_relax(); 1588 cpu_relax();
1589} 1589}
@@ -1807,19 +1807,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1807 if (dbg->control & KVM_GUESTDBG_ENABLE) { 1807 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1808 vcpu->guest_debug = dbg->control; 1808 vcpu->guest_debug = dbg->control;
1809 /* enforce guest PER */ 1809 /* enforce guest PER */
1810 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1810 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1811 1811
1812 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 1812 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1813 rc = kvm_s390_import_bp_data(vcpu, dbg); 1813 rc = kvm_s390_import_bp_data(vcpu, dbg);
1814 } else { 1814 } else {
1815 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1815 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1816 vcpu->arch.guestdbg.last_bp = 0; 1816 vcpu->arch.guestdbg.last_bp = 0;
1817 } 1817 }
1818 1818
1819 if (rc) { 1819 if (rc) {
1820 vcpu->guest_debug = 0; 1820 vcpu->guest_debug = 0;
1821 kvm_s390_clear_bp_data(vcpu); 1821 kvm_s390_clear_bp_data(vcpu);
1822 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1822 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1823 } 1823 }
1824 1824
1825 return rc; 1825 return rc;
@@ -1894,7 +1894,7 @@ retry:
1894 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 1894 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1895 if (!ibs_enabled(vcpu)) { 1895 if (!ibs_enabled(vcpu)) {
1896 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 1896 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1897 atomic_set_mask(CPUSTAT_IBS, 1897 atomic_or(CPUSTAT_IBS,
1898 &vcpu->arch.sie_block->cpuflags); 1898 &vcpu->arch.sie_block->cpuflags);
1899 } 1899 }
1900 goto retry; 1900 goto retry;
@@ -1903,7 +1903,7 @@ retry:
1903 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 1903 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1904 if (ibs_enabled(vcpu)) { 1904 if (ibs_enabled(vcpu)) {
1905 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 1905 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1906 atomic_clear_mask(CPUSTAT_IBS, 1906 atomic_andnot(CPUSTAT_IBS,
1907 &vcpu->arch.sie_block->cpuflags); 1907 &vcpu->arch.sie_block->cpuflags);
1908 } 1908 }
1909 goto retry; 1909 goto retry;
@@ -2419,7 +2419,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2419 __disable_ibs_on_all_vcpus(vcpu->kvm); 2419 __disable_ibs_on_all_vcpus(vcpu->kvm);
2420 } 2420 }
2421 2421
2422 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2422 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2423 /* 2423 /*
2424 * Another VCPU might have used IBS while we were offline. 2424 * Another VCPU might have used IBS while we were offline.
2425 * Let's play safe and flush the VCPU at startup. 2425 * Let's play safe and flush the VCPU at startup.
@@ -2445,7 +2445,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2445 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 2445 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2446 kvm_s390_clear_stop_irq(vcpu); 2446 kvm_s390_clear_stop_irq(vcpu);
2447 2447
2448 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2448 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2449 __disable_ibs_on_vcpu(vcpu); 2449 __disable_ibs_on_vcpu(vcpu);
2450 2450
2451 for (i = 0; i < online_vcpus; i++) { 2451 for (i = 0; i < online_vcpus; i++) {
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0d002a746bec..ae4de559e3a0 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,7 +15,7 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/facility.h> 16#include <asm/facility.h>
17 17
18static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; 18static DEFINE_STATIC_KEY_FALSE(have_mvcos);
19 19
20static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, 20static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
21 unsigned long size) 21 unsigned long size)
@@ -104,7 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
104 104
105unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 105unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
106{ 106{
107 if (static_key_false(&have_mvcos)) 107 if (static_branch_likely(&have_mvcos))
108 return copy_from_user_mvcos(to, from, n); 108 return copy_from_user_mvcos(to, from, n);
109 return copy_from_user_mvcp(to, from, n); 109 return copy_from_user_mvcp(to, from, n);
110} 110}
@@ -177,7 +177,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
177 177
178unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 178unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
179{ 179{
180 if (static_key_false(&have_mvcos)) 180 if (static_branch_likely(&have_mvcos))
181 return copy_to_user_mvcos(to, from, n); 181 return copy_to_user_mvcos(to, from, n);
182 return copy_to_user_mvcs(to, from, n); 182 return copy_to_user_mvcs(to, from, n);
183} 183}
@@ -240,7 +240,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
240 240
241unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) 241unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
242{ 242{
243 if (static_key_false(&have_mvcos)) 243 if (static_branch_likely(&have_mvcos))
244 return copy_in_user_mvcos(to, from, n); 244 return copy_in_user_mvcos(to, from, n);
245 return copy_in_user_mvc(to, from, n); 245 return copy_in_user_mvc(to, from, n);
246} 246}
@@ -312,7 +312,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
312 312
313unsigned long __clear_user(void __user *to, unsigned long size) 313unsigned long __clear_user(void __user *to, unsigned long size)
314{ 314{
315 if (static_key_false(&have_mvcos)) 315 if (static_branch_likely(&have_mvcos))
316 return clear_user_mvcos(to, size); 316 return clear_user_mvcos(to, size);
317 return clear_user_xc(to, size); 317 return clear_user_xc(to, size);
318} 318}
@@ -373,7 +373,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
373static int __init uaccess_init(void) 373static int __init uaccess_init(void)
374{ 374{
375 if (test_facility(27)) 375 if (test_facility(27))
376 static_key_slow_inc(&have_mvcos); 376 static_branch_enable(&have_mvcos);
377 return 0; 377 return 0;
378} 378}
379early_initcall(uaccess_init); 379early_initcall(uaccess_init);
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 97a5fda83450..b94df40e5f2d 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -48,47 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
48ATOMIC_OPS(add) 48ATOMIC_OPS(add)
49ATOMIC_OPS(sub) 49ATOMIC_OPS(sub)
50 50
51ATOMIC_OP(and)
52ATOMIC_OP(or)
53ATOMIC_OP(xor)
54
51#undef ATOMIC_OPS 55#undef ATOMIC_OPS
52#undef ATOMIC_OP_RETURN 56#undef ATOMIC_OP_RETURN
53#undef ATOMIC_OP 57#undef ATOMIC_OP
54 58
55static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
56{
57 int tmp;
58 unsigned int _mask = ~mask;
59
60 __asm__ __volatile__ (
61 " .align 2 \n\t"
62 " mova 1f, r0 \n\t" /* r0 = end point */
63 " mov r15, r1 \n\t" /* r1 = saved sp */
64 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
65 " mov.l @%1, %0 \n\t" /* load old value */
66 " and %2, %0 \n\t" /* add */
67 " mov.l %0, @%1 \n\t" /* store new value */
68 "1: mov r1, r15 \n\t" /* LOGOUT */
69 : "=&r" (tmp),
70 "+r" (v)
71 : "r" (_mask)
72 : "memory" , "r0", "r1");
73}
74
75static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
76{
77 int tmp;
78
79 __asm__ __volatile__ (
80 " .align 2 \n\t"
81 " mova 1f, r0 \n\t" /* r0 = end point */
82 " mov r15, r1 \n\t" /* r1 = saved sp */
83 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
84 " mov.l @%1, %0 \n\t" /* load old value */
85 " or %2, %0 \n\t" /* or */
86 " mov.l %0, @%1 \n\t" /* store new value */
87 "1: mov r1, r15 \n\t" /* LOGOUT */
88 : "=&r" (tmp),
89 "+r" (v)
90 : "r" (mask)
91 : "memory" , "r0", "r1");
92}
93
94#endif /* __ASM_SH_ATOMIC_GRB_H */ 59#endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 61d107523f06..23fcdad5773e 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
37 37
38ATOMIC_OPS(add, +=) 38ATOMIC_OPS(add, +=)
39ATOMIC_OPS(sub, -=) 39ATOMIC_OPS(sub, -=)
40ATOMIC_OP(and, &=)
41ATOMIC_OP(or, |=)
42ATOMIC_OP(xor, ^=)
40 43
41#undef ATOMIC_OPS 44#undef ATOMIC_OPS
42#undef ATOMIC_OP_RETURN 45#undef ATOMIC_OP_RETURN
43#undef ATOMIC_OP 46#undef ATOMIC_OP
44 47
45static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
46{
47 unsigned long flags;
48
49 raw_local_irq_save(flags);
50 v->counter &= ~mask;
51 raw_local_irq_restore(flags);
52}
53
54static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
55{
56 unsigned long flags;
57
58 raw_local_irq_save(flags);
59 v->counter |= mask;
60 raw_local_irq_restore(flags);
61}
62
63#endif /* __ASM_SH_ATOMIC_IRQ_H */ 48#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 8575dccb9ef7..33d34b16d4d6 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
52 52
53ATOMIC_OPS(add) 53ATOMIC_OPS(add)
54ATOMIC_OPS(sub) 54ATOMIC_OPS(sub)
55ATOMIC_OP(and)
56ATOMIC_OP(or)
57ATOMIC_OP(xor)
55 58
56#undef ATOMIC_OPS 59#undef ATOMIC_OPS
57#undef ATOMIC_OP_RETURN 60#undef ATOMIC_OP_RETURN
58#undef ATOMIC_OP 61#undef ATOMIC_OP
59 62
60static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
61{
62 unsigned long tmp;
63
64 __asm__ __volatile__ (
65"1: movli.l @%2, %0 ! atomic_clear_mask \n"
66" and %1, %0 \n"
67" movco.l %0, @%2 \n"
68" bf 1b \n"
69 : "=&z" (tmp)
70 : "r" (~mask), "r" (&v->counter)
71 : "t");
72}
73
74static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
75{
76 unsigned long tmp;
77
78 __asm__ __volatile__ (
79"1: movli.l @%2, %0 ! atomic_set_mask \n"
80" or %1, %0 \n"
81" movco.l %0, @%2 \n"
82" bf 1b \n"
83 : "=&z" (tmp)
84 : "r" (mask), "r" (&v->counter)
85 : "t");
86}
87
88#endif /* __ASM_SH_ATOMIC_LLSC_H */ 63#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0e69b7e7a439..7dcbebbcaec6 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -17,10 +17,12 @@
17#include <asm/barrier.h> 17#include <asm/barrier.h>
18#include <asm-generic/atomic64.h> 18#include <asm-generic/atomic64.h>
19 19
20
21#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
22 21
23int atomic_add_return(int, atomic_t *); 22int atomic_add_return(int, atomic_t *);
23void atomic_and(int, atomic_t *);
24void atomic_or(int, atomic_t *);
25void atomic_xor(int, atomic_t *);
24int atomic_cmpxchg(atomic_t *, int, int); 26int atomic_cmpxchg(atomic_t *, int, int);
25int atomic_xchg(atomic_t *, int); 27int atomic_xchg(atomic_t *, int);
26int __atomic_add_unless(atomic_t *, int, int); 28int __atomic_add_unless(atomic_t *, int, int);
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 4082749913ce..917084ace49d 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *);
33ATOMIC_OPS(add) 33ATOMIC_OPS(add)
34ATOMIC_OPS(sub) 34ATOMIC_OPS(sub)
35 35
36ATOMIC_OP(and)
37ATOMIC_OP(or)
38ATOMIC_OP(xor)
39
36#undef ATOMIC_OPS 40#undef ATOMIC_OPS
37#undef ATOMIC_OP_RETURN 41#undef ATOMIC_OP_RETURN
38#undef ATOMIC_OP 42#undef ATOMIC_OP
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 809941e33e12..14a928601657 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -60,12 +60,12 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
60do { \ 60do { \
61 compiletime_assert_atomic_type(*p); \ 61 compiletime_assert_atomic_type(*p); \
62 barrier(); \ 62 barrier(); \
63 ACCESS_ONCE(*p) = (v); \ 63 WRITE_ONCE(*p, v); \
64} while (0) 64} while (0)
65 65
66#define smp_load_acquire(p) \ 66#define smp_load_acquire(p) \
67({ \ 67({ \
68 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 68 typeof(*p) ___p1 = READ_ONCE(*p); \
69 compiletime_assert_atomic_type(*p); \ 69 compiletime_assert_atomic_type(*p); \
70 barrier(); \ 70 barrier(); \
71 ___p1; \ 71 ___p1; \
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index cc9b04a2b11b..62d0354d1727 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,16 +7,33 @@
7 7
8#define JUMP_LABEL_NOP_SIZE 4 8#define JUMP_LABEL_NOP_SIZE 4
9 9
10static __always_inline bool arch_static_branch(struct static_key *key) 10static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
11{ 11{
12 asm_volatile_goto("1:\n\t" 12 asm_volatile_goto("1:\n\t"
13 "nop\n\t" 13 "nop\n\t"
14 "nop\n\t" 14 "nop\n\t"
15 ".pushsection __jump_table, \"aw\"\n\t" 15 ".pushsection __jump_table, \"aw\"\n\t"
16 ".align 4\n\t" 16 ".align 4\n\t"
17 ".word 1b, %l[l_yes], %c0\n\t" 17 ".word 1b, %l[l_yes], %c0\n\t"
18 ".popsection \n\t" 18 ".popsection \n\t"
19 : : "i" (key) : : l_yes); 19 : : "i" (&((char *)key)[branch]) : : l_yes);
20
21 return false;
22l_yes:
23 return true;
24}
25
26static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
27{
28 asm_volatile_goto("1:\n\t"
29 "b %l[l_yes]\n\t"
30 "nop\n\t"
31 ".pushsection __jump_table, \"aw\"\n\t"
32 ".align 4\n\t"
33 ".word 1b, %l[l_yes], %c0\n\t"
34 ".popsection \n\t"
35 : : "i" (&((char *)key)[branch]) : : l_yes);
36
20 return false; 37 return false;
21l_yes: 38l_yes:
22 return true; 39 return true;
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 48565c11e82a..59bbeff55024 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -16,7 +16,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
16 u32 val; 16 u32 val;
17 u32 *insn = (u32 *) (unsigned long) entry->code; 17 u32 *insn = (u32 *) (unsigned long) entry->code;
18 18
19 if (type == JUMP_LABEL_ENABLE) { 19 if (type == JUMP_LABEL_JMP) {
20 s32 off = (s32)entry->target - (s32)entry->code; 20 s32 off = (s32)entry->target - (s32)entry->code;
21 21
22#ifdef CONFIG_SPARC64 22#ifdef CONFIG_SPARC64
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 71cd65ab200c..b9d63c0a7aab 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy);
27 27
28#endif /* SMP */ 28#endif /* SMP */
29 29
30#define ATOMIC_OP(op, cop) \ 30#define ATOMIC_OP_RETURN(op, c_op) \
31int atomic_##op##_return(int i, atomic_t *v) \ 31int atomic_##op##_return(int i, atomic_t *v) \
32{ \ 32{ \
33 int ret; \ 33 int ret; \
34 unsigned long flags; \ 34 unsigned long flags; \
35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \ 35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 \ 36 \
37 ret = (v->counter cop i); \ 37 ret = (v->counter c_op i); \
38 \ 38 \
39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ 39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
40 return ret; \ 40 return ret; \
41} \ 41} \
42EXPORT_SYMBOL(atomic_##op##_return); 42EXPORT_SYMBOL(atomic_##op##_return);
43 43
44ATOMIC_OP(add, +=) 44#define ATOMIC_OP(op, c_op) \
45void atomic_##op(int i, atomic_t *v) \
46{ \
47 unsigned long flags; \
48 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
49 \
50 v->counter c_op i; \
51 \
52 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
53} \
54EXPORT_SYMBOL(atomic_##op);
55
56ATOMIC_OP_RETURN(add, +=)
57ATOMIC_OP(and, &=)
58ATOMIC_OP(or, |=)
59ATOMIC_OP(xor, ^=)
45 60
61#undef ATOMIC_OP_RETURN
46#undef ATOMIC_OP 62#undef ATOMIC_OP
47 63
48int atomic_xchg(atomic_t *v, int new) 64int atomic_xchg(atomic_t *v, int new)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 05dac43907d1..d6b0363f345b 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return);
47 47
48ATOMIC_OPS(add) 48ATOMIC_OPS(add)
49ATOMIC_OPS(sub) 49ATOMIC_OPS(sub)
50ATOMIC_OP(and)
51ATOMIC_OP(or)
52ATOMIC_OP(xor)
50 53
51#undef ATOMIC_OPS 54#undef ATOMIC_OPS
52#undef ATOMIC_OP_RETURN 55#undef ATOMIC_OP_RETURN
@@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return);
84 87
85ATOMIC64_OPS(add) 88ATOMIC64_OPS(add)
86ATOMIC64_OPS(sub) 89ATOMIC64_OPS(sub)
90ATOMIC64_OP(and)
91ATOMIC64_OP(or)
92ATOMIC64_OP(xor)
87 93
88#undef ATOMIC64_OPS 94#undef ATOMIC64_OPS
89#undef ATOMIC64_OP_RETURN 95#undef ATOMIC64_OP_RETURN
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 8069ce12f20b..8eb454cfe05c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
111 111
112ATOMIC_OPS(add) 112ATOMIC_OPS(add)
113ATOMIC_OPS(sub) 113ATOMIC_OPS(sub)
114ATOMIC_OP(and)
115ATOMIC_OP(or)
116ATOMIC_OP(xor)
114 117
115#undef ATOMIC_OPS 118#undef ATOMIC_OPS
116#undef ATOMIC_OP_RETURN 119#undef ATOMIC_OP_RETURN
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1b109fad9fff..d320ce253d86 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v)
34 _atomic_xchg_add(&v->counter, i); 34 _atomic_xchg_add(&v->counter, i);
35} 35}
36 36
37#define ATOMIC_OP(op) \
38unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
41 _atomic_##op((unsigned long *)&v->counter, i); \
42}
43
44ATOMIC_OP(and)
45ATOMIC_OP(or)
46ATOMIC_OP(xor)
47
48#undef ATOMIC_OP
49
37/** 50/**
38 * atomic_add_return - add integer and return 51 * atomic_add_return - add integer and return
39 * @v: pointer of type atomic_t 52 * @v: pointer of type atomic_t
@@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
113 _atomic64_xchg_add(&v->counter, i); 126 _atomic64_xchg_add(&v->counter, i);
114} 127}
115 128
129#define ATOMIC64_OP(op) \
130long long _atomic64_##op(long long *v, long long n); \
131static inline void atomic64_##op(long long i, atomic64_t *v) \
132{ \
133 _atomic64_##op(&v->counter, i); \
134}
135
136ATOMIC64_OP(and)
137ATOMIC64_OP(or)
138ATOMIC64_OP(xor)
139
116/** 140/**
117 * atomic64_add_return - add integer and return 141 * atomic64_add_return - add integer and return
118 * @v: pointer of type atomic64_t 142 * @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
225extern struct __get_user __atomic_xchg_add_unless(volatile int *p, 249extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
226 int *lock, int o, int n); 250 int *lock, int o, int n);
227extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 251extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
252extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
228extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 253extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
229extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 254extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
230extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, 255extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
234 long long n); 259 long long n);
235extern long long __atomic64_xchg_add_unless(volatile long long *p, 260extern long long __atomic64_xchg_add_unless(volatile long long *p,
236 int *lock, long long o, long long n); 261 int *lock, long long o, long long n);
262extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
263extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
264extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
237 265
238/* Return failure from the atomic wrappers. */ 266/* Return failure from the atomic wrappers. */
239struct __get_user __atomic_bad_address(int __user *addr); 267struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 0496970cef82..096a56d6ead4 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58 return oldval; 58 return oldval;
59} 59}
60 60
61static inline void atomic_and(int i, atomic_t *v)
62{
63 __insn_fetchand4((void *)&v->counter, i);
64}
65
66static inline void atomic_or(int i, atomic_t *v)
67{
68 __insn_fetchor4((void *)&v->counter, i);
69}
70
71static inline void atomic_xor(int i, atomic_t *v)
72{
73 int guess, oldval = v->counter;
74 do {
75 guess = oldval;
76 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
77 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
78 } while (guess != oldval);
79}
80
61/* Now the true 64-bit operations. */ 81/* Now the true 64-bit operations. */
62 82
63#define ATOMIC64_INIT(i) { (i) } 83#define ATOMIC64_INIT(i) { (i) }
@@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
91 return oldval != u; 111 return oldval != u;
92} 112}
93 113
114static inline void atomic64_and(long i, atomic64_t *v)
115{
116 __insn_fetchand((void *)&v->counter, i);
117}
118
119static inline void atomic64_or(long i, atomic64_t *v)
120{
121 __insn_fetchor((void *)&v->counter, i);
122}
123
124static inline void atomic64_xor(long i, atomic64_t *v)
125{
126 long guess, oldval = v->counter;
127 do {
128 guess = oldval;
129 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
130 oldval = __insn_cmpexch(&v->counter, guess ^ i);
131 } while (guess != oldval);
132}
133
94#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) 134#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
95#define atomic64_sub(i, v) atomic64_add(-(i), (v)) 135#define atomic64_sub(i, v) atomic64_add(-(i), (v))
96#define atomic64_inc_return(v) atomic64_add_return(1, (v)) 136#define atomic64_inc_return(v) atomic64_add_return(1, (v))
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index c89b211fd9e7..298df1e9912a 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
94} 94}
95EXPORT_SYMBOL(_atomic_or); 95EXPORT_SYMBOL(_atomic_or);
96 96
97unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
98{
99 return __atomic_and((int *)p, __atomic_setup(p), mask).val;
100}
101EXPORT_SYMBOL(_atomic_and);
102
97unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) 103unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
98{ 104{
99 return __atomic_andn((int *)p, __atomic_setup(p), mask).val; 105 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
136} 142}
137EXPORT_SYMBOL(_atomic64_cmpxchg); 143EXPORT_SYMBOL(_atomic64_cmpxchg);
138 144
145long long _atomic64_and(long long *v, long long n)
146{
147 return __atomic64_and(v, __atomic_setup(v), n);
148}
149EXPORT_SYMBOL(_atomic64_and);
150
151long long _atomic64_or(long long *v, long long n)
152{
153 return __atomic64_or(v, __atomic_setup(v), n);
154}
155EXPORT_SYMBOL(_atomic64_or);
156
157long long _atomic64_xor(long long *v, long long n)
158{
159 return __atomic64_xor(v, __atomic_setup(v), n);
160}
161EXPORT_SYMBOL(_atomic64_xor);
139 162
140/* 163/*
141 * If any of the atomic or futex routines hit a bad address (not in 164 * If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 6bda3132cd61..f611265633d6 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2"
178atomic_op _xchg_add_unless, 32, \ 178atomic_op _xchg_add_unless, 32, \
179 "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" 179 "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
180atomic_op _or, 32, "or r24, r22, r2" 180atomic_op _or, 32, "or r24, r22, r2"
181atomic_op _and, 32, "and r24, r22, r2"
181atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" 182atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
182atomic_op _xor, 32, "xor r24, r22, r2" 183atomic_op _xor, 32, "xor r24, r22, r2"
183 184
@@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
191 { bbns r26, 3f; add r24, r22, r4 }; \ 192 { bbns r26, 3f; add r24, r22, r4 }; \
192 { bbns r27, 3f; add r25, r23, r5 }; \ 193 { bbns r27, 3f; add r25, r23, r5 }; \
193 slt_u r26, r24, r22; add r25, r25, r26" 194 slt_u r26, r24, r22; add r25, r25, r26"
195atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
196atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
197atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
194 198
195 jrp lr /* happy backtracer */ 199 jrp lr /* happy backtracer */
196 200
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index e9168955c42f..fb52aa644aab 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new)
182 return xchg(&v->counter, new); 182 return xchg(&v->counter, new);
183} 183}
184 184
185#define ATOMIC_OP(op) \
186static inline void atomic_##op(int i, atomic_t *v) \
187{ \
188 asm volatile(LOCK_PREFIX #op"l %1,%0" \
189 : "+m" (v->counter) \
190 : "ir" (i) \
191 : "memory"); \
192}
193
194ATOMIC_OP(and)
195ATOMIC_OP(or)
196ATOMIC_OP(xor)
197
198#undef ATOMIC_OP
199
185/** 200/**
186 * __atomic_add_unless - add unless the number is already a given value 201 * __atomic_add_unless - add unless the number is already a given value
187 * @v: pointer of type atomic_t 202 * @v: pointer of type atomic_t
@@ -219,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v)
219 return *v; 234 return *v;
220} 235}
221 236
222/* These are x86-specific, used by some header files */
223#define atomic_clear_mask(mask, addr) \
224 asm volatile(LOCK_PREFIX "andl %0,%1" \
225 : : "r" (~(mask)), "m" (*(addr)) : "memory")
226
227#define atomic_set_mask(mask, addr) \
228 asm volatile(LOCK_PREFIX "orl %0,%1" \
229 : : "r" ((unsigned)(mask)), "m" (*(addr)) \
230 : "memory")
231
232#ifdef CONFIG_X86_32 237#ifdef CONFIG_X86_32
233# include <asm/atomic64_32.h> 238# include <asm/atomic64_32.h>
234#else 239#else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index b154de75c90c..a11c30b77fb5 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
313#undef alternative_atomic64 313#undef alternative_atomic64
314#undef __alternative_atomic64 314#undef __alternative_atomic64
315 315
316#define ATOMIC64_OP(op, c_op) \
317static inline void atomic64_##op(long long i, atomic64_t *v) \
318{ \
319 long long old, c = 0; \
320 while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
321 c = old; \
322}
323
324ATOMIC64_OP(and, &)
325ATOMIC64_OP(or, |)
326ATOMIC64_OP(xor, ^)
327
328#undef ATOMIC64_OP
329
316#endif /* _ASM_X86_ATOMIC64_32_H */ 330#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index b965f9e03f2a..50e33eff58de 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
220 return dec; 220 return dec;
221} 221}
222 222
223#define ATOMIC64_OP(op) \
224static inline void atomic64_##op(long i, atomic64_t *v) \
225{ \
226 asm volatile(LOCK_PREFIX #op"q %1,%0" \
227 : "+m" (v->counter) \
228 : "er" (i) \
229 : "memory"); \
230}
231
232ATOMIC64_OP(and)
233ATOMIC64_OP(or)
234ATOMIC64_OP(xor)
235
236#undef ATOMIC64_OP
237
223#endif /* _ASM_X86_ATOMIC64_64_H */ 238#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 818cb8788225..0681d2532527 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,12 +57,12 @@
57do { \ 57do { \
58 compiletime_assert_atomic_type(*p); \ 58 compiletime_assert_atomic_type(*p); \
59 smp_mb(); \ 59 smp_mb(); \
60 ACCESS_ONCE(*p) = (v); \ 60 WRITE_ONCE(*p, v); \
61} while (0) 61} while (0)
62 62
63#define smp_load_acquire(p) \ 63#define smp_load_acquire(p) \
64({ \ 64({ \
65 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 65 typeof(*p) ___p1 = READ_ONCE(*p); \
66 compiletime_assert_atomic_type(*p); \ 66 compiletime_assert_atomic_type(*p); \
67 smp_mb(); \ 67 smp_mb(); \
68 ___p1; \ 68 ___p1; \
@@ -74,12 +74,12 @@ do { \
74do { \ 74do { \
75 compiletime_assert_atomic_type(*p); \ 75 compiletime_assert_atomic_type(*p); \
76 barrier(); \ 76 barrier(); \
77 ACCESS_ONCE(*p) = (v); \ 77 WRITE_ONCE(*p, v); \
78} while (0) 78} while (0)
79 79
80#define smp_load_acquire(p) \ 80#define smp_load_acquire(p) \
81({ \ 81({ \
82 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 82 typeof(*p) ___p1 = READ_ONCE(*p); \
83 compiletime_assert_atomic_type(*p); \ 83 compiletime_assert_atomic_type(*p); \
84 barrier(); \ 84 barrier(); \
85 ___p1; \ 85 ___p1; \
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a4c1cf7e93f8..5daeca3d0f9e 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -16,15 +16,32 @@
16# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC 16# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
17#endif 17#endif
18 18
19static __always_inline bool arch_static_branch(struct static_key *key) 19static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
20{ 20{
21 asm_volatile_goto("1:" 21 asm_volatile_goto("1:"
22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" 22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
23 ".pushsection __jump_table, \"aw\" \n\t" 23 ".pushsection __jump_table, \"aw\" \n\t"
24 _ASM_ALIGN "\n\t" 24 _ASM_ALIGN "\n\t"
25 _ASM_PTR "1b, %l[l_yes], %c0 \n\t" 25 _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
26 ".popsection \n\t" 26 ".popsection \n\t"
27 : : "i" (key) : : l_yes); 27 : : "i" (key), "i" (branch) : : l_yes);
28
29 return false;
30l_yes:
31 return true;
32}
33
34static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
35{
36 asm_volatile_goto("1:"
37 ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
38 "2:\n\t"
39 ".pushsection __jump_table, \"aw\" \n\t"
40 _ASM_ALIGN "\n\t"
41 _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
42 ".popsection \n\t"
43 : : "i" (key), "i" (branch) : : l_yes);
44
28 return false; 45 return false;
29l_yes: 46l_yes:
30 return true; 47 return true;
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241e228b..c537cbb038a7 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -2,16 +2,6 @@
2#define _ASM_X86_QRWLOCK_H 2#define _ASM_X86_QRWLOCK_H
3 3
4#include <asm-generic/qrwlock_types.h> 4#include <asm-generic/qrwlock_types.h>
5
6#ifndef CONFIG_X86_PPRO_FENCE
7#define queue_write_unlock queue_write_unlock
8static inline void queue_write_unlock(struct qrwlock *lock)
9{
10 barrier();
11 ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
12}
13#endif
14
15#include <asm-generic/qrwlock.h> 5#include <asm-generic/qrwlock.h>
16 6
17#endif /* _ASM_X86_QRWLOCK_H */ 7#endif /* _ASM_X86_QRWLOCK_H */
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 26d5a55a2736..e565e0e4d216 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -45,7 +45,7 @@ static void __jump_label_transform(struct jump_entry *entry,
45 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP }; 45 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
46 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5]; 46 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
47 47
48 if (type == JUMP_LABEL_ENABLE) { 48 if (type == JUMP_LABEL_JMP) {
49 if (init) { 49 if (init) {
50 /* 50 /*
51 * Jump label is enabled for the first time. 51 * Jump label is enabled for the first time.
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 79055cf2c497..c8d52cb4cb6e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -38,7 +38,7 @@ static int __read_mostly tsc_unstable;
38 erroneous rdtsc usage on !cpu_has_tsc processors */ 38 erroneous rdtsc usage on !cpu_has_tsc processors */
39static int __read_mostly tsc_disabled = -1; 39static int __read_mostly tsc_disabled = -1;
40 40
41static struct static_key __use_tsc = STATIC_KEY_INIT; 41static DEFINE_STATIC_KEY_FALSE(__use_tsc);
42 42
43int tsc_clocksource_reliable; 43int tsc_clocksource_reliable;
44 44
@@ -274,7 +274,12 @@ done:
274 */ 274 */
275u64 native_sched_clock(void) 275u64 native_sched_clock(void)
276{ 276{
277 u64 tsc_now; 277 if (static_branch_likely(&__use_tsc)) {
278 u64 tsc_now = rdtsc();
279
280 /* return the value in ns */
281 return cycles_2_ns(tsc_now);
282 }
278 283
279 /* 284 /*
280 * Fall back to jiffies if there's no TSC available: 285 * Fall back to jiffies if there's no TSC available:
@@ -284,16 +289,9 @@ u64 native_sched_clock(void)
284 * very important for it to be as fast as the platform 289 * very important for it to be as fast as the platform
285 * can achieve it. ) 290 * can achieve it. )
286 */ 291 */
287 if (!static_key_false(&__use_tsc)) {
288 /* No locking but a rare wrong value is not a big deal: */
289 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
290 }
291
292 /* read the Time Stamp Counter: */
293 tsc_now = rdtsc();
294 292
295 /* return the value in ns */ 293 /* No locking but a rare wrong value is not a big deal: */
296 return cycles_2_ns(tsc_now); 294 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
297} 295}
298 296
299/* 297/*
@@ -1212,7 +1210,7 @@ void __init tsc_init(void)
1212 /* now allow native_sched_clock() to use rdtsc */ 1210 /* now allow native_sched_clock() to use rdtsc */
1213 1211
1214 tsc_disabled = 0; 1212 tsc_disabled = 0;
1215 static_key_slow_inc(&__use_tsc); 1213 static_branch_enable(&__use_tsc);
1216 1214
1217 if (!no_sched_irq_time) 1215 if (!no_sched_irq_time)
1218 enable_sched_clock_irqtime(); 1216 enable_sched_clock_irqtime();
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index e4d193e7a300..f3dfe0d921c2 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -616,7 +616,6 @@ CONFIG_SCHED_DEBUG=y
616# CONFIG_SLUB_DEBUG_ON is not set 616# CONFIG_SLUB_DEBUG_ON is not set
617# CONFIG_SLUB_STATS is not set 617# CONFIG_SLUB_STATS is not set
618# CONFIG_DEBUG_RT_MUTEXES is not set 618# CONFIG_DEBUG_RT_MUTEXES is not set
619# CONFIG_RT_MUTEX_TESTER is not set
620# CONFIG_DEBUG_SPINLOCK is not set 619# CONFIG_DEBUG_SPINLOCK is not set
621# CONFIG_DEBUG_MUTEXES is not set 620# CONFIG_DEBUG_MUTEXES is not set
622# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 621# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index ebcd1f6fc8cb..93795d047303 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
145ATOMIC_OPS(add) 145ATOMIC_OPS(add)
146ATOMIC_OPS(sub) 146ATOMIC_OPS(sub)
147 147
148ATOMIC_OP(and)
149ATOMIC_OP(or)
150ATOMIC_OP(xor)
151
148#undef ATOMIC_OPS 152#undef ATOMIC_OPS
149#undef ATOMIC_OP_RETURN 153#undef ATOMIC_OP_RETURN
150#undef ATOMIC_OP 154#undef ATOMIC_OP
@@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
250 return c; 254 return c;
251} 255}
252 256
253
254static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
255{
256#if XCHAL_HAVE_S32C1I
257 unsigned long tmp;
258 int result;
259
260 __asm__ __volatile__(
261 "1: l32i %1, %3, 0\n"
262 " wsr %1, scompare1\n"
263 " and %0, %1, %2\n"
264 " s32c1i %0, %3, 0\n"
265 " bne %0, %1, 1b\n"
266 : "=&a" (result), "=&a" (tmp)
267 : "a" (~mask), "a" (v)
268 : "memory"
269 );
270#else
271 unsigned int all_f = -1;
272 unsigned int vval;
273
274 __asm__ __volatile__(
275 " rsil a15,"__stringify(TOPLEVEL)"\n"
276 " l32i %0, %2, 0\n"
277 " xor %1, %4, %3\n"
278 " and %0, %0, %4\n"
279 " s32i %0, %2, 0\n"
280 " wsr a15, ps\n"
281 " rsync\n"
282 : "=&a" (vval), "=a" (mask)
283 : "a" (v), "a" (all_f), "1" (mask)
284 : "a15", "memory"
285 );
286#endif
287}
288
289static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
290{
291#if XCHAL_HAVE_S32C1I
292 unsigned long tmp;
293 int result;
294
295 __asm__ __volatile__(
296 "1: l32i %1, %3, 0\n"
297 " wsr %1, scompare1\n"
298 " or %0, %1, %2\n"
299 " s32c1i %0, %3, 0\n"
300 " bne %0, %1, 1b\n"
301 : "=&a" (result), "=&a" (tmp)
302 : "a" (mask), "a" (v)
303 : "memory"
304 );
305#else
306 unsigned int vval;
307
308 __asm__ __volatile__(
309 " rsil a15,"__stringify(TOPLEVEL)"\n"
310 " l32i %0, %2, 0\n"
311 " or %0, %0, %1\n"
312 " s32i %0, %2, 0\n"
313 " wsr a15, ps\n"
314 " rsync\n"
315 : "=&a" (vval)
316 : "a" (mask), "a" (v)
317 : "a15", "memory"
318 );
319#endif
320}
321
322#endif /* __KERNEL__ */ 257#endif /* __KERNEL__ */
323 258
324#endif /* _XTENSA_ATOMIC_H */ 259#endif /* _XTENSA_ATOMIC_H */