aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-07 06:33:38 -0500
committerPaul Mundt <lethal@linux-sh.org>2006-12-11 18:42:08 -0500
commitec723fbe7e19f5a66cea183bca7ca20675631a7a (patch)
tree2a716c86a4ba9924459c9e6436a31b1acb62d449 /include
parenta45e724ba07c02bcf3da96ddc4efefbfe10957f5 (diff)
sh: Split out atomic ops logically.
We have a few different ways to do the atomic operations, so split them out in to different headers rather than bloating atomic.h. Kernelspace gUSA will take this up to a third implementation. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/atomic-irq.h71
-rw-r--r--include/asm-sh/atomic-llsc.h107
-rw-r--r--include/asm-sh/atomic.h153
3 files changed, 180 insertions, 151 deletions
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h
new file mode 100644
index 000000000000..74f7943cff6f
--- /dev/null
+++ b/include/asm-sh/atomic-irq.h
@@ -0,0 +1,71 @@
1#ifndef __ASM_SH_ATOMIC_IRQ_H
2#define __ASM_SH_ATOMIC_IRQ_H
3
4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long flags;
12
13 local_irq_save(flags);
14 *(long *)v += i;
15 local_irq_restore(flags);
16}
17
18static inline void atomic_sub(int i, atomic_t *v)
19{
20 unsigned long flags;
21
22 local_irq_save(flags);
23 *(long *)v -= i;
24 local_irq_restore(flags);
25}
26
27static inline int atomic_add_return(int i, atomic_t *v)
28{
29 unsigned long temp, flags;
30
31 local_irq_save(flags);
32 temp = *(long *)v;
33 temp += i;
34 *(long *)v = temp;
35 local_irq_restore(flags);
36
37 return temp;
38}
39
40static inline int atomic_sub_return(int i, atomic_t *v)
41{
42 unsigned long temp, flags;
43
44 local_irq_save(flags);
45 temp = *(long *)v;
46 temp -= i;
47 *(long *)v = temp;
48 local_irq_restore(flags);
49
50 return temp;
51}
52
53static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
54{
55 unsigned long flags;
56
57 local_irq_save(flags);
58 *(long *)v &= ~mask;
59 local_irq_restore(flags);
60}
61
62static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
63{
64 unsigned long flags;
65
66 local_irq_save(flags);
67 *(long *)v |= mask;
68 local_irq_restore(flags);
69}
70
71#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h
new file mode 100644
index 000000000000..4b00b78e3f4f
--- /dev/null
+++ b/include/asm-sh/atomic-llsc.h
@@ -0,0 +1,107 @@
1#ifndef __ASM_SH_ATOMIC_LLSC_H
2#define __ASM_SH_ATOMIC_LLSC_H
3
4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long tmp;
12
13 __asm__ __volatile__ (
14"1: movli.l @%2, %0 ! atomic_add \n"
15" add %1, %0 \n"
16" movco.l %0, @%2 \n"
17" bf 1b \n"
18 : "=&z" (tmp)
19 : "r" (i), "r" (&v->counter)
20 : "t");
21}
22
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 unsigned long tmp;
26
27 __asm__ __volatile__ (
28"1: movli.l @%2, %0 ! atomic_sub \n"
29" sub %1, %0 \n"
30" movco.l %0, @%2 \n"
31" bf 1b \n"
32 : "=&z" (tmp)
33 : "r" (i), "r" (&v->counter)
34 : "t");
35}
36
37/*
38 * SH-4A note:
39 *
40 * We basically get atomic_xxx_return() for free compared with
41 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
42 * encoding, so the retval is automatically set without having to
43 * do any special work.
44 */
45static inline int atomic_add_return(int i, atomic_t *v)
46{
47 unsigned long temp;
48
49 __asm__ __volatile__ (
50"1: movli.l @%2, %0 ! atomic_add_return \n"
51" add %1, %0 \n"
52" movco.l %0, @%2 \n"
53" bf 1b \n"
54" synco \n"
55 : "=&z" (temp)
56 : "r" (i), "r" (&v->counter)
57 : "t");
58
59 return temp;
60}
61
62static inline int atomic_sub_return(int i, atomic_t *v)
63{
64 unsigned long temp;
65
66 __asm__ __volatile__ (
67"1: movli.l @%2, %0 ! atomic_sub_return \n"
68" sub %1, %0 \n"
69" movco.l %0, @%2 \n"
70" bf 1b \n"
71" synco \n"
72 : "=&z" (temp)
73 : "r" (i), "r" (&v->counter)
74 : "t");
75
76 return temp;
77}
78
79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80{
81 unsigned long tmp;
82
83 __asm__ __volatile__ (
84"1: movli.l @%2, %0 ! atomic_clear_mask \n"
85" and %1, %0 \n"
86" movco.l %0, @%2 \n"
87" bf 1b \n"
88 : "=&z" (tmp)
89 : "r" (~mask), "r" (&v->counter)
90 : "t");
91}
92
93static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
94{
95 unsigned long tmp;
96
97 __asm__ __volatile__ (
98"1: movli.l @%2, %0 ! atomic_set_mask \n"
99" or %1, %0 \n"
100" movco.l %0, @%2 \n"
101" bf 1b \n"
102 : "=&z" (tmp)
103 : "r" (mask), "r" (&v->counter)
104 : "t");
105}
106
107#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3cbddf..e12570b9339d 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20/*
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
24 */
25static inline void atomic_add(int i, atomic_t *v)
26{
27#ifdef CONFIG_CPU_SH4A 20#ifdef CONFIG_CPU_SH4A
28 unsigned long tmp; 21#include <asm/atomic-llsc.h>
29
30 __asm__ __volatile__ (
31"1: movli.l @%2, %0 ! atomic_add \n"
32" add %1, %0 \n"
33" movco.l %0, @%2 \n"
34" bf 1b \n"
35 : "=&z" (tmp)
36 : "r" (i), "r" (&v->counter)
37 : "t");
38#else 22#else
39 unsigned long flags; 23#include <asm/atomic-irq.h>
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44#endif
45}
46
47static inline void atomic_sub(int i, atomic_t *v)
48{
49#ifdef CONFIG_CPU_SH4A
50 unsigned long tmp;
51
52 __asm__ __volatile__ (
53"1: movli.l @%2, %0 ! atomic_sub \n"
54" sub %1, %0 \n"
55" movco.l %0, @%2 \n"
56" bf 1b \n"
57 : "=&z" (tmp)
58 : "r" (i), "r" (&v->counter)
59 : "t");
60#else
61 unsigned long flags;
62
63 local_irq_save(flags);
64 *(long *)v -= i;
65 local_irq_restore(flags);
66#endif 24#endif
67}
68
69/*
70 * SH-4A note:
71 *
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
76 */
77static inline int atomic_add_return(int i, atomic_t *v)
78{
79 unsigned long temp;
80
81#ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83"1: movli.l @%2, %0 ! atomic_add_return \n"
84" add %1, %0 \n"
85" movco.l %0, @%2 \n"
86" bf 1b \n"
87" synco \n"
88 : "=&z" (temp)
89 : "r" (i), "r" (&v->counter)
90 : "t");
91#else
92 unsigned long flags;
93
94 local_irq_save(flags);
95 temp = *(long *)v;
96 temp += i;
97 *(long *)v = temp;
98 local_irq_restore(flags);
99#endif
100
101 return temp;
102}
103 25
104#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 26#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
105 27
106static inline int atomic_sub_return(int i, atomic_t *v)
107{
108 unsigned long temp;
109
110#ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112"1: movli.l @%2, %0 ! atomic_sub_return \n"
113" sub %1, %0 \n"
114" movco.l %0, @%2 \n"
115" bf 1b \n"
116" synco \n"
117 : "=&z" (temp)
118 : "r" (i), "r" (&v->counter)
119 : "t");
120#else
121 unsigned long flags;
122
123 local_irq_save(flags);
124 temp = *(long *)v;
125 temp -= i;
126 *(long *)v = temp;
127 local_irq_restore(flags);
128#endif
129
130 return temp;
131}
132
133#define atomic_dec_return(v) atomic_sub_return(1,(v)) 28#define atomic_dec_return(v) atomic_sub_return(1,(v))
134#define atomic_inc_return(v) atomic_add_return(1,(v)) 29#define atomic_inc_return(v) atomic_add_return(1,(v))
135 30
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
180} 75}
181#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 76#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
182 77
183static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
184{
185#ifdef CONFIG_CPU_SH4A
186 unsigned long tmp;
187
188 __asm__ __volatile__ (
189"1: movli.l @%2, %0 ! atomic_clear_mask \n"
190" and %1, %0 \n"
191" movco.l %0, @%2 \n"
192" bf 1b \n"
193 : "=&z" (tmp)
194 : "r" (~mask), "r" (&v->counter)
195 : "t");
196#else
197 unsigned long flags;
198
199 local_irq_save(flags);
200 *(long *)v &= ~mask;
201 local_irq_restore(flags);
202#endif
203}
204
205static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
206{
207#ifdef CONFIG_CPU_SH4A
208 unsigned long tmp;
209
210 __asm__ __volatile__ (
211"1: movli.l @%2, %0 ! atomic_set_mask \n"
212" or %1, %0 \n"
213" movco.l %0, @%2 \n"
214" bf 1b \n"
215 : "=&z" (tmp)
216 : "r" (mask), "r" (&v->counter)
217 : "t");
218#else
219 unsigned long flags;
220
221 local_irq_save(flags);
222 *(long *)v |= mask;
223 local_irq_restore(flags);
224#endif
225}
226
227/* Atomic operations are already serializing on SH */ 78/* Atomic operations are already serializing on SH */
228#define smp_mb__before_atomic_dec() barrier() 79#define smp_mb__before_atomic_dec() barrier()
229#define smp_mb__after_atomic_dec() barrier() 80#define smp_mb__after_atomic_dec() barrier()