aboutsummaryrefslogtreecommitdiffstats
path: root/arch/avr32
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/avr32
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/avr32')
-rw-r--r--arch/avr32/include/asm/atomic.h125
1 files changed, 63 insertions, 62 deletions
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 0780f3f2415b..2d07ce1c5327 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -19,33 +19,46 @@
19 19
20#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
21 21
22#define atomic_read(v) (*(volatile int *)&(v)->counter) 22#define atomic_read(v) ACCESS_ONCE((v)->counter)
23#define atomic_set(v, i) (((v)->counter) = i) 23#define atomic_set(v, i) (((v)->counter) = i)
24 24
25#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
26static inline int __atomic_##op##_return(int i, atomic_t *v) \
27{ \
28 int result; \
29 \
30 asm volatile( \
31 "/* atomic_" #op "_return */\n" \
32 "1: ssrf 5\n" \
33 " ld.w %0, %2\n" \
34 " " #asm_op " %0, %3\n" \
35 " stcond %1, %0\n" \
36 " brne 1b" \
37 : "=&r" (result), "=o" (v->counter) \
38 : "m" (v->counter), #asm_con (i) \
39 : "cc"); \
40 \
41 return result; \
42}
43
44ATOMIC_OP_RETURN(sub, sub, rKs21)
45ATOMIC_OP_RETURN(add, add, r)
46
47#undef ATOMIC_OP_RETURN
48
25/* 49/*
26 * atomic_sub_return - subtract the atomic variable 50 * Probably found the reason why we want to use sub with the signed 21-bit
27 * @i: integer value to subtract 51 * limit, it uses one less register than the add instruction that can add up to
28 * @v: pointer of type atomic_t 52 * 32-bit values.
29 * 53 *
30 * Atomically subtracts @i from @v. Returns the resulting value. 54 * Both instructions are 32-bit, to use a 16-bit instruction the immediate is
55 * very small; 4 bit.
56 *
57 * sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate.
58 * add 32-bit, type II, adds two register values together.
31 */ 59 */
32static inline int atomic_sub_return(int i, atomic_t *v) 60#define IS_21BIT_CONST(i) \
33{ 61 (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576))
34 int result;
35
36 asm volatile(
37 "/* atomic_sub_return */\n"
38 "1: ssrf 5\n"
39 " ld.w %0, %2\n"
40 " sub %0, %3\n"
41 " stcond %1, %0\n"
42 " brne 1b"
43 : "=&r"(result), "=o"(v->counter)
44 : "m"(v->counter), "rKs21"(i)
45 : "cc");
46
47 return result;
48}
49 62
50/* 63/*
51 * atomic_add_return - add integer to atomic variable 64 * atomic_add_return - add integer to atomic variable
@@ -56,51 +69,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
56 */ 69 */
57static inline int atomic_add_return(int i, atomic_t *v) 70static inline int atomic_add_return(int i, atomic_t *v)
58{ 71{
59 int result; 72 if (IS_21BIT_CONST(i))
60 73 return __atomic_sub_return(-i, v);
61 if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
62 result = atomic_sub_return(-i, v);
63 else
64 asm volatile(
65 "/* atomic_add_return */\n"
66 "1: ssrf 5\n"
67 " ld.w %0, %1\n"
68 " add %0, %3\n"
69 " stcond %2, %0\n"
70 " brne 1b"
71 : "=&r"(result), "=o"(v->counter)
72 : "m"(v->counter), "r"(i)
73 : "cc", "memory");
74 74
75 return result; 75 return __atomic_add_return(i, v);
76} 76}
77 77
78/* 78/*
79 * atomic_sub_unless - sub unless the number is a given value 79 * atomic_sub_return - subtract the atomic variable
80 * @i: integer value to subtract
80 * @v: pointer of type atomic_t 81 * @v: pointer of type atomic_t
81 * @a: the amount to subtract from v...
82 * @u: ...unless v is equal to u.
83 * 82 *
84 * Atomically subtract @a from @v, so long as it was not @u. 83 * Atomically subtracts @i from @v. Returns the resulting value.
85 * Returns the old value of @v. 84 */
86*/ 85static inline int atomic_sub_return(int i, atomic_t *v)
87static inline void atomic_sub_unless(atomic_t *v, int a, int u)
88{ 86{
89 int tmp; 87 if (IS_21BIT_CONST(i))
88 return __atomic_sub_return(i, v);
90 89
91 asm volatile( 90 return __atomic_add_return(-i, v);
92 "/* atomic_sub_unless */\n"
93 "1: ssrf 5\n"
94 " ld.w %0, %2\n"
95 " cp.w %0, %4\n"
96 " breq 1f\n"
97 " sub %0, %3\n"
98 " stcond %1, %0\n"
99 " brne 1b\n"
100 "1:"
101 : "=&r"(tmp), "=o"(v->counter)
102 : "m"(v->counter), "rKs21"(a), "rKs21"(u)
103 : "cc", "memory");
104} 91}
105 92
106/* 93/*
@@ -116,9 +103,21 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
116{ 103{
117 int tmp, old = atomic_read(v); 104 int tmp, old = atomic_read(v);
118 105
119 if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) 106 if (IS_21BIT_CONST(a)) {
120 atomic_sub_unless(v, -a, u); 107 asm volatile(
121 else { 108 "/* __atomic_sub_unless */\n"
109 "1: ssrf 5\n"
110 " ld.w %0, %2\n"
111 " cp.w %0, %4\n"
112 " breq 1f\n"
113 " sub %0, %3\n"
114 " stcond %1, %0\n"
115 " brne 1b\n"
116 "1:"
117 : "=&r"(tmp), "=o"(v->counter)
118 : "m"(v->counter), "rKs21"(-a), "rKs21"(u)
119 : "cc", "memory");
120 } else {
122 asm volatile( 121 asm volatile(
123 "/* __atomic_add_unless */\n" 122 "/* __atomic_add_unless */\n"
124 "1: ssrf 5\n" 123 "1: ssrf 5\n"
@@ -137,6 +136,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
137 return old; 136 return old;
138} 137}
139 138
139#undef IS_21BIT_CONST
140
140/* 141/*
141 * atomic_sub_if_positive - conditionally subtract integer from atomic variable 142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
142 * @i: integer value to subtract 143 * @i: integer value to subtract