aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorBecky Bruce <bgill@freescale.com>2005-09-22 15:20:04 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-25 08:38:46 -0400
commitfeaf7cf153335fe7100b65ed6f4585c3574fe69a (patch)
treec57198f01b5f12ffe8ce90f4e1399505c1f84a02 /include/asm-powerpc
parent2bfadee32f1501faa3184d574f6a769f17236c87 (diff)
[PATCH] powerpc: merge atomic.h, memory.h
powerpc: Merge atomic.h and memory.h into powerpc Merged atomic.h into include/powerpc. Moved asm-style HMT_ defines from memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect its contents. Signed-off-by: Kumar Gala <kumar.gala@freescale.com> Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Jon Loeliger <linuxppc@jdl.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/atomic.h209
-rw-r--r--include/asm-powerpc/ppc_asm.h3
-rw-r--r--include/asm-powerpc/synch.h51
3 files changed, 263 insertions, 0 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
new file mode 100644
index 000000000000..ed4b345ed75d
--- /dev/null
+++ b/include/asm-powerpc/atomic.h
@@ -0,0 +1,209 @@
1#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
4/*
5 * PowerPC atomic operations
6 */
7
8typedef struct { volatile int counter; } atomic_t;
9
10#ifdef __KERNEL__
11#include <asm/synch.h>
12
13#define ATOMIC_INIT(i) { (i) }
14
15#define atomic_read(v) ((v)->counter)
16#define atomic_set(v,i) (((v)->counter) = (i))
17
18/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
19 * The old ATOMIC_SYNC_FIX covered some but not all of this.
20 */
21#ifdef CONFIG_IBM405_ERR77
22#define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
23#else
24#define PPC405_ERR77(ra,rb)
25#endif
26
27static __inline__ void atomic_add(int a, atomic_t *v)
28{
29 int t;
30
31 __asm__ __volatile__(
32"1: lwarx %0,0,%3 # atomic_add\n\
33 add %0,%2,%0\n"
34 PPC405_ERR77(0,%3)
35" stwcx. %0,0,%3 \n\
36 bne- 1b"
37 : "=&r" (t), "=m" (v->counter)
38 : "r" (a), "r" (&v->counter), "m" (v->counter)
39 : "cc");
40}
41
42static __inline__ int atomic_add_return(int a, atomic_t *v)
43{
44 int t;
45
46 __asm__ __volatile__(
47 EIEIO_ON_SMP
48"1: lwarx %0,0,%2 # atomic_add_return\n\
49 add %0,%1,%0\n"
50 PPC405_ERR77(0,%2)
51" stwcx. %0,0,%2 \n\
52 bne- 1b"
53 ISYNC_ON_SMP
54 : "=&r" (t)
55 : "r" (a), "r" (&v->counter)
56 : "cc", "memory");
57
58 return t;
59}
60
61#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
62
63static __inline__ void atomic_sub(int a, atomic_t *v)
64{
65 int t;
66
67 __asm__ __volatile__(
68"1: lwarx %0,0,%3 # atomic_sub\n\
69 subf %0,%2,%0\n"
70 PPC405_ERR77(0,%3)
71" stwcx. %0,0,%3 \n\
72 bne- 1b"
73 : "=&r" (t), "=m" (v->counter)
74 : "r" (a), "r" (&v->counter), "m" (v->counter)
75 : "cc");
76}
77
78static __inline__ int atomic_sub_return(int a, atomic_t *v)
79{
80 int t;
81
82 __asm__ __volatile__(
83 EIEIO_ON_SMP
84"1: lwarx %0,0,%2 # atomic_sub_return\n\
85 subf %0,%1,%0\n"
86 PPC405_ERR77(0,%2)
87" stwcx. %0,0,%2 \n\
88 bne- 1b"
89 ISYNC_ON_SMP
90 : "=&r" (t)
91 : "r" (a), "r" (&v->counter)
92 : "cc", "memory");
93
94 return t;
95}
96
97static __inline__ void atomic_inc(atomic_t *v)
98{
99 int t;
100
101 __asm__ __volatile__(
102"1: lwarx %0,0,%2 # atomic_inc\n\
103 addic %0,%0,1\n"
104 PPC405_ERR77(0,%2)
105" stwcx. %0,0,%2 \n\
106 bne- 1b"
107 : "=&r" (t), "=m" (v->counter)
108 : "r" (&v->counter), "m" (v->counter)
109 : "cc");
110}
111
112static __inline__ int atomic_inc_return(atomic_t *v)
113{
114 int t;
115
116 __asm__ __volatile__(
117 EIEIO_ON_SMP
118"1: lwarx %0,0,%1 # atomic_inc_return\n\
119 addic %0,%0,1\n"
120 PPC405_ERR77(0,%1)
121" stwcx. %0,0,%1 \n\
122 bne- 1b"
123 ISYNC_ON_SMP
124 : "=&r" (t)
125 : "r" (&v->counter)
126 : "cc", "memory");
127
128 return t;
129}
130
131/*
132 * atomic_inc_and_test - increment and test
133 * @v: pointer of type atomic_t
134 *
135 * Atomically increments @v by 1
136 * and returns true if the result is zero, or false for all
137 * other cases.
138 */
139#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
140
141static __inline__ void atomic_dec(atomic_t *v)
142{
143 int t;
144
145 __asm__ __volatile__(
146"1: lwarx %0,0,%2 # atomic_dec\n\
147 addic %0,%0,-1\n"
148 PPC405_ERR77(0,%2)\
149" stwcx. %0,0,%2\n\
150 bne- 1b"
151 : "=&r" (t), "=m" (v->counter)
152 : "r" (&v->counter), "m" (v->counter)
153 : "cc");
154}
155
156static __inline__ int atomic_dec_return(atomic_t *v)
157{
158 int t;
159
160 __asm__ __volatile__(
161 EIEIO_ON_SMP
162"1: lwarx %0,0,%1 # atomic_dec_return\n\
163 addic %0,%0,-1\n"
164 PPC405_ERR77(0,%1)
165" stwcx. %0,0,%1\n\
166 bne- 1b"
167 ISYNC_ON_SMP
168 : "=&r" (t)
169 : "r" (&v->counter)
170 : "cc", "memory");
171
172 return t;
173}
174
175#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
176#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
177
178/*
179 * Atomically test *v and decrement if it is greater than 0.
180 * The function returns the old value of *v minus 1.
181 */
182static __inline__ int atomic_dec_if_positive(atomic_t *v)
183{
184 int t;
185
186 __asm__ __volatile__(
187 EIEIO_ON_SMP
188"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
189 addic. %0,%0,-1\n\
190 blt- 2f\n"
191 PPC405_ERR77(0,%1)
192" stwcx. %0,0,%1\n\
193 bne- 1b"
194 ISYNC_ON_SMP
195 "\n\
1962:" : "=&r" (t)
197 : "r" (&v->counter)
198 : "cc", "memory");
199
200 return t;
201}
202
203#define smp_mb__before_atomic_dec() smp_mb()
204#define smp_mb__after_atomic_dec() smp_mb()
205#define smp_mb__before_atomic_inc() smp_mb()
206#define smp_mb__after_atomic_inc() smp_mb()
207
208#endif /* __KERNEL__ */
209#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index 553035cda00e..4efa71878fa9 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -75,8 +75,11 @@
75#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) 75#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
76 76
77/* Macros to adjust thread priority for Iseries hardware multithreading */ 77/* Macros to adjust thread priority for Iseries hardware multithreading */
78#define HMT_VERY_LOW or 31,31,31 # very low priority\n"
78#define HMT_LOW or 1,1,1 79#define HMT_LOW or 1,1,1
80#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n"
79#define HMT_MEDIUM or 2,2,2 81#define HMT_MEDIUM or 2,2,2
82#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n"
80#define HMT_HIGH or 3,3,3 83#define HMT_HIGH or 3,3,3
81 84
82/* handle instructions that older assemblers may not know */ 85/* handle instructions that older assemblers may not know */
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644
index 000000000000..4660c0394a77
--- /dev/null
+++ b/include/asm-powerpc/synch.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_POWERPC_SYNCH_H
2#define _ASM_POWERPC_SYNCH_H
3
4#include <linux/config.h>
5
6#ifdef __powerpc64__
7#define __SUBARCH_HAS_LWSYNC
8#endif
9
10#ifdef __SUBARCH_HAS_LWSYNC
11# define LWSYNC lwsync
12#else
13# define LWSYNC sync
14#endif
15
16
17/*
18 * Arguably the bitops and *xchg operations don't imply any memory barrier
19 * or SMP ordering, but in fact a lot of drivers expect them to imply
20 * both, since they do on x86 cpus.
21 */
22#ifdef CONFIG_SMP
23#define EIEIO_ON_SMP "eieio\n"
24#define ISYNC_ON_SMP "\n\tisync"
25#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
26#else
27#define EIEIO_ON_SMP
28#define ISYNC_ON_SMP
29#define SYNC_ON_SMP
30#endif
31
32static inline void eieio(void)
33{
34 __asm__ __volatile__ ("eieio" : : : "memory");
35}
36
37static inline void isync(void)
38{
39 __asm__ __volatile__ ("isync" : : : "memory");
40}
41
42#ifdef CONFIG_SMP
43#define eieio_on_smp() eieio()
44#define isync_on_smp() isync()
45#else
46#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
47#define isync_on_smp() __asm__ __volatile__("": : :"memory")
48#endif
49
50#endif /* _ASM_POWERPC_SYNCH_H */
51