aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/mn10300
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/mn10300')
-rw-r--r--arch/mn10300/include/asm/atomic.h125
1 files changed, 42 insertions, 83 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index cadeb1e2cdfc..5be655e83e70 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -33,7 +33,6 @@
33 * @v: pointer of type atomic_t 33 * @v: pointer of type atomic_t
34 * 34 *
35 * Atomically reads the value of @v. Note that the guaranteed 35 * Atomically reads the value of @v. Note that the guaranteed
36 * useful range of an atomic_t is only 24 bits.
37 */ 36 */
38#define atomic_read(v) (ACCESS_ONCE((v)->counter)) 37#define atomic_read(v) (ACCESS_ONCE((v)->counter))
39 38
@@ -43,102 +42,62 @@
43 * @i: required value 42 * @i: required value
44 * 43 *
45 * Atomically sets the value of @v to @i. Note that the guaranteed 44 * Atomically sets the value of @v to @i. Note that the guaranteed
46 * useful range of an atomic_t is only 24 bits.
47 */ 45 */
48#define atomic_set(v, i) (((v)->counter) = (i)) 46#define atomic_set(v, i) (((v)->counter) = (i))
49 47
50/** 48#define ATOMIC_OP(op) \
51 * atomic_add_return - add integer to atomic variable 49static inline void atomic_##op(int i, atomic_t *v) \
52 * @i: integer value to add 50{ \
53 * @v: pointer of type atomic_t 51 int retval, status; \
54 * 52 \
55 * Atomically adds @i to @v and returns the result 53 asm volatile( \
56 * Note that the guaranteed useful range of an atomic_t is only 24 bits. 54 "1: mov %4,(_AAR,%3) \n" \
57 */ 55 " mov (_ADR,%3),%1 \n" \
58static inline int atomic_add_return(int i, atomic_t *v) 56 " " #op " %5,%1 \n" \
59{ 57 " mov %1,(_ADR,%3) \n" \
60 int retval; 58 " mov (_ADR,%3),%0 \n" /* flush */ \
61#ifdef CONFIG_SMP 59 " mov (_ASR,%3),%0 \n" \
62 int status; 60 " or %0,%0 \n" \
63 61 " bne 1b \n" \
64 asm volatile( 62 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
65 "1: mov %4,(_AAR,%3) \n" 63 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
66 " mov (_ADR,%3),%1 \n" 64 : "memory", "cc"); \
67 " add %5,%1 \n" 65}
68 " mov %1,(_ADR,%3) \n"
69 " mov (_ADR,%3),%0 \n" /* flush */
70 " mov (_ASR,%3),%0 \n"
71 " or %0,%0 \n"
72 " bne 1b \n"
73 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
74 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
75 : "memory", "cc");
76
77#else
78 unsigned long flags;
79 66
80 flags = arch_local_cli_save(); 67#define ATOMIC_OP_RETURN(op) \
81 retval = v->counter; 68static inline int atomic_##op##_return(int i, atomic_t *v) \
82 retval += i; 69{ \
83 v->counter = retval; 70 int retval, status; \
84 arch_local_irq_restore(flags); 71 \
85#endif 72 asm volatile( \
86 return retval; 73 "1: mov %4,(_AAR,%3) \n" \
74 " mov (_ADR,%3),%1 \n" \
75 " " #op " %5,%1 \n" \
76 " mov %1,(_ADR,%3) \n" \
77 " mov (_ADR,%3),%0 \n" /* flush */ \
78 " mov (_ASR,%3),%0 \n" \
79 " or %0,%0 \n" \
80 " bne 1b \n" \
81 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
82 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
83 : "memory", "cc"); \
84 return retval; \
87} 85}
88 86
89/** 87#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
90 * atomic_sub_return - subtract integer from atomic variable
91 * @i: integer value to subtract
92 * @v: pointer of type atomic_t
93 *
94 * Atomically subtracts @i from @v and returns the result
95 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
96 */
97static inline int atomic_sub_return(int i, atomic_t *v)
98{
99 int retval;
100#ifdef CONFIG_SMP
101 int status;
102 88
103 asm volatile( 89ATOMIC_OPS(add)
104 "1: mov %4,(_AAR,%3) \n" 90ATOMIC_OPS(sub)
105 " mov (_ADR,%3),%1 \n"
106 " sub %5,%1 \n"
107 " mov %1,(_ADR,%3) \n"
108 " mov (_ADR,%3),%0 \n" /* flush */
109 " mov (_ASR,%3),%0 \n"
110 " or %0,%0 \n"
111 " bne 1b \n"
112 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
113 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
114 : "memory", "cc");
115 91
116#else 92#undef ATOMIC_OPS
117 unsigned long flags; 93#undef ATOMIC_OP_RETURN
118 flags = arch_local_cli_save(); 94#undef ATOMIC_OP
119 retval = v->counter;
120 retval -= i;
121 v->counter = retval;
122 arch_local_irq_restore(flags);
123#endif
124 return retval;
125}
126 95
127static inline int atomic_add_negative(int i, atomic_t *v) 96static inline int atomic_add_negative(int i, atomic_t *v)
128{ 97{
129 return atomic_add_return(i, v) < 0; 98 return atomic_add_return(i, v) < 0;
130} 99}
131 100
132static inline void atomic_add(int i, atomic_t *v)
133{
134 atomic_add_return(i, v);
135}
136
137static inline void atomic_sub(int i, atomic_t *v)
138{
139 atomic_sub_return(i, v);
140}
141
142static inline void atomic_inc(atomic_t *v) 101static inline void atomic_inc(atomic_t *v)
143{ 102{
144 atomic_add_return(1, v); 103 atomic_add_return(1, v);