summaryrefslogtreecommitdiffstats
path: root/arch/avr32/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
commitc86ad14d305d2429c3da19462440bac50c183def (patch)
treebd794cd72476661faf82c440063c217bb978ce44 /arch/avr32/include
parenta2303849a6b4b7ba59667091e00d6bb194071d9a (diff)
parentf06628638cf6e75f179742b6c1b35076965b9fdd (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The locking tree was busier in this cycle than the usual pattern - a couple of major projects happened to coincide. The main changes are: - implement the atomic_fetch_{add,sub,and,or,xor}() API natively across all SMP architectures (Peter Zijlstra) - add atomic_fetch_{inc/dec}() as well, using the generic primitives (Davidlohr Bueso) - optimize various aspects of rwsems (Jason Low, Davidlohr Bueso, Waiman Long) - optimize smp_cond_load_acquire() on arm64 and implement LSE based atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() on arm64 (Will Deacon) - introduce smp_acquire__after_ctrl_dep() and fix various barrier mis-uses and bugs (Peter Zijlstra) - after discovering ancient spin_unlock_wait() barrier bugs in its implementation and usage, strengthen its semantics and update/fix usage sites (Peter Zijlstra) - optimize mutex_trylock() fastpath (Peter Zijlstra) - ... misc fixes and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire() locking/static_keys: Fix non static symbol Sparse warning locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec() locking/atomic, arch/tile: Fix tilepro build locking/atomic, arch/m68k: Remove comment locking/atomic, arch/arc: Fix build locking/Documentation: Clarify limited control-dependency scope locking/atomic, arch/rwsem: Employ atomic_long_fetch_add() locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire() locking/atomic, arch/mips: Convert to _relaxed atomics locking/atomic, arch/alpha: Convert to _relaxed atomics locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions locking/atomic: Remove linux/atomic.h:atomic_fetch_or() locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() locking/atomic: Fix atomic64_relaxed() bits locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() ...
Diffstat (limited to 'arch/avr32/include')
-rw-r--r--arch/avr32/include/asm/atomic.h54
1 files changed, 49 insertions, 5 deletions
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index d74fd8ce980a..3d5ce38a6f0b 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -41,21 +41,49 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
41 return result; \ 41 return result; \
42} 42}
43 43
44#define ATOMIC_FETCH_OP(op, asm_op, asm_con) \
45static inline int __atomic_fetch_##op(int i, atomic_t *v) \
46{ \
47 int result, val; \
48 \
49 asm volatile( \
50 "/* atomic_fetch_" #op " */\n" \
51 "1: ssrf 5\n" \
52 " ld.w %0, %3\n" \
53 " mov %1, %0\n" \
54 " " #asm_op " %1, %4\n" \
55 " stcond %2, %1\n" \
56 " brne 1b" \
57 : "=&r" (result), "=&r" (val), "=o" (v->counter) \
58 : "m" (v->counter), #asm_con (i) \
59 : "cc"); \
60 \
61 return result; \
62}
63
44ATOMIC_OP_RETURN(sub, sub, rKs21) 64ATOMIC_OP_RETURN(sub, sub, rKs21)
45ATOMIC_OP_RETURN(add, add, r) 65ATOMIC_OP_RETURN(add, add, r)
66ATOMIC_FETCH_OP (sub, sub, rKs21)
67ATOMIC_FETCH_OP (add, add, r)
46 68
47#define ATOMIC_OP(op, asm_op) \ 69#define ATOMIC_OPS(op, asm_op) \
48ATOMIC_OP_RETURN(op, asm_op, r) \ 70ATOMIC_OP_RETURN(op, asm_op, r) \
49static inline void atomic_##op(int i, atomic_t *v) \ 71static inline void atomic_##op(int i, atomic_t *v) \
50{ \ 72{ \
51 (void)__atomic_##op##_return(i, v); \ 73 (void)__atomic_##op##_return(i, v); \
74} \
75ATOMIC_FETCH_OP(op, asm_op, r) \
76static inline int atomic_fetch_##op(int i, atomic_t *v) \
77{ \
78 return __atomic_fetch_##op(i, v); \
52} 79}
53 80
54ATOMIC_OP(and, and) 81ATOMIC_OPS(and, and)
55ATOMIC_OP(or, or) 82ATOMIC_OPS(or, or)
56ATOMIC_OP(xor, eor) 83ATOMIC_OPS(xor, eor)
57 84
58#undef ATOMIC_OP 85#undef ATOMIC_OPS
86#undef ATOMIC_FETCH_OP
59#undef ATOMIC_OP_RETURN 87#undef ATOMIC_OP_RETURN
60 88
61/* 89/*
@@ -87,6 +115,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
87 return __atomic_add_return(i, v); 115 return __atomic_add_return(i, v);
88} 116}
89 117
118static inline int atomic_fetch_add(int i, atomic_t *v)
119{
120 if (IS_21BIT_CONST(i))
121 return __atomic_fetch_sub(-i, v);
122
123 return __atomic_fetch_add(i, v);
124}
125
90/* 126/*
91 * atomic_sub_return - subtract the atomic variable 127 * atomic_sub_return - subtract the atomic variable
92 * @i: integer value to subtract 128 * @i: integer value to subtract
@@ -102,6 +138,14 @@ static inline int atomic_sub_return(int i, atomic_t *v)
102 return __atomic_add_return(-i, v); 138 return __atomic_add_return(-i, v);
103} 139}
104 140
141static inline int atomic_fetch_sub(int i, atomic_t *v)
142{
143 if (IS_21BIT_CONST(i))
144 return __atomic_fetch_sub(i, v);
145
146 return __atomic_fetch_add(-i, v);
147}
148
105/* 149/*
106 * __atomic_add_unless - add unless the number is a given value 150 * __atomic_add_unless - add unless the number is a given value
107 * @v: pointer of type atomic_t 151 * @v: pointer of type atomic_t