aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/system.h')
-rw-r--r--include/asm-sh/system.h202
1 files changed, 147 insertions, 55 deletions
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index b752e5cbb830..6c1f8fde5ac4 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -6,13 +6,14 @@
6 * Copyright (C) 2002 Paul Mundt 6 * Copyright (C) 2002 Paul Mundt
7 */ 7 */
8 8
9#include <asm/types.h>
9 10
10/* 11/*
11 * switch_to() should switch tasks to task nr n, first 12 * switch_to() should switch tasks to task nr n, first
12 */ 13 */
13 14
14#define switch_to(prev, next, last) do { \ 15#define switch_to(prev, next, last) do { \
15 task_t *__last; \ 16 struct task_struct *__last; \
16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 17 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 18 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 19 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
@@ -66,13 +67,20 @@ static inline void sched_cacheflush(void)
66{ 67{
67} 68}
68 69
69#define nop() __asm__ __volatile__ ("nop") 70#ifdef CONFIG_CPU_SH4A
70 71#define __icbi() \
71 72{ \
72#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 73 unsigned long __addr; \
74 __addr = 0xa8000000; \
75 __asm__ __volatile__( \
76 "icbi %0\n\t" \
77 : /* no output */ \
78 : "m" (__m(__addr))); \
79}
80#endif
73 81
74static __inline__ unsigned long tas(volatile int *m) 82static inline unsigned long tas(volatile int *m)
75{ /* #define tas(ptr) (xchg((ptr),1)) */ 83{
76 unsigned long retval; 84 unsigned long retval;
77 85
78 __asm__ __volatile__ ("tas.b @%1\n\t" 86 __asm__ __volatile__ ("tas.b @%1\n\t"
@@ -81,12 +89,33 @@ static __inline__ unsigned long tas(volatile int *m)
81 return retval; 89 return retval;
82} 90}
83 91
84extern void __xchg_called_with_bad_pointer(void); 92/*
85 93 * A brief note on ctrl_barrier(), the control register write barrier.
86#define mb() __asm__ __volatile__ ("": : :"memory") 94 *
87#define rmb() mb() 95 * Legacy SH cores typically require a sequence of 8 nops after
88#define wmb() __asm__ __volatile__ ("": : :"memory") 96 * modification of a control register in order for the changes to take
97 * effect. On newer cores (like the sh4a and sh5) this is accomplished
98 * with icbi.
99 *
100 * Also note that on sh4a in the icbi case we can forego a synco for the
101 * write barrier, as it's not necessary for control registers.
102 *
103 * Historically we have only done this type of barrier for the MMUCR, but
104 * it's also necessary for the CCR, so we make it generic here instead.
105 */
106#ifdef CONFIG_CPU_SH4A
107#define mb() __asm__ __volatile__ ("synco": : :"memory")
108#define rmb() mb()
109#define wmb() __asm__ __volatile__ ("synco": : :"memory")
110#define ctrl_barrier() __icbi()
111#define read_barrier_depends() do { } while(0)
112#else
113#define mb() __asm__ __volatile__ ("": : :"memory")
114#define rmb() mb()
115#define wmb() __asm__ __volatile__ ("": : :"memory")
116#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
89#define read_barrier_depends() do { } while(0) 117#define read_barrier_depends() do { } while(0)
118#endif
90 119
91#ifdef CONFIG_SMP 120#ifdef CONFIG_SMP
92#define smp_mb() mb() 121#define smp_mb() mb()
@@ -101,10 +130,10 @@ extern void __xchg_called_with_bad_pointer(void);
101#endif 130#endif
102 131
103#define set_mb(var, value) do { xchg(&var, value); } while (0) 132#define set_mb(var, value) do { xchg(&var, value); } while (0)
104#define set_wmb(var, value) do { var = value; wmb(); } while (0)
105 133
106/* Interrupt Control */ 134/* Interrupt Control */
107static __inline__ void local_irq_enable(void) 135#ifdef CONFIG_CPU_HAS_SR_RB
136static inline void local_irq_enable(void)
108{ 137{
109 unsigned long __dummy0, __dummy1; 138 unsigned long __dummy0, __dummy1;
110 139
@@ -117,8 +146,22 @@ static __inline__ void local_irq_enable(void)
117 : "1" (~0x000000f0) 146 : "1" (~0x000000f0)
118 : "memory"); 147 : "memory");
119} 148}
149#else
150static inline void local_irq_enable(void)
151{
152 unsigned long __dummy0, __dummy1;
153
154 __asm__ __volatile__ (
155 "stc sr, %0\n\t"
156 "and %1, %0\n\t"
157 "ldc %0, sr\n\t"
158 : "=&r" (__dummy0), "=r" (__dummy1)
159 : "1" (~0x000000f0)
160 : "memory");
161}
162#endif
120 163
121static __inline__ void local_irq_disable(void) 164static inline void local_irq_disable(void)
122{ 165{
123 unsigned long __dummy; 166 unsigned long __dummy;
124 __asm__ __volatile__("stc sr, %0\n\t" 167 __asm__ __volatile__("stc sr, %0\n\t"
@@ -129,6 +172,31 @@ static __inline__ void local_irq_disable(void)
129 : "memory"); 172 : "memory");
130} 173}
131 174
175static inline void set_bl_bit(void)
176{
177 unsigned long __dummy0, __dummy1;
178
179 __asm__ __volatile__ ("stc sr, %0\n\t"
180 "or %2, %0\n\t"
181 "and %3, %0\n\t"
182 "ldc %0, sr"
183 : "=&r" (__dummy0), "=r" (__dummy1)
184 : "r" (0x10000000), "r" (0xffffff0f)
185 : "memory");
186}
187
188static inline void clear_bl_bit(void)
189{
190 unsigned long __dummy0, __dummy1;
191
192 __asm__ __volatile__ ("stc sr, %0\n\t"
193 "and %2, %0\n\t"
194 "ldc %0, sr"
195 : "=&r" (__dummy0), "=r" (__dummy1)
196 : "1" (~0x10000000)
197 : "memory");
198}
199
132#define local_save_flags(x) \ 200#define local_save_flags(x) \
133 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) 201 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
134 202
@@ -139,7 +207,7 @@ static __inline__ void local_irq_disable(void)
139 (flags != 0); \ 207 (flags != 0); \
140}) 208})
141 209
142static __inline__ unsigned long local_irq_save(void) 210static inline unsigned long local_irq_save(void)
143{ 211{
144 unsigned long flags, __dummy; 212 unsigned long flags, __dummy;
145 213
@@ -155,35 +223,9 @@ static __inline__ unsigned long local_irq_save(void)
155 return flags; 223 return flags;
156} 224}
157 225
158#ifdef DEBUG_CLI_STI 226#define local_irq_restore(x) do { \
159static __inline__ void local_irq_restore(unsigned long x)
160{
161 if ((x & 0x000000f0) != 0x000000f0)
162 local_irq_enable();
163 else {
164 unsigned long flags;
165 local_save_flags(flags);
166
167 if (flags == 0) {
168 extern void dump_stack(void);
169 printk(KERN_ERR "BUG!\n");
170 dump_stack();
171 local_irq_disable();
172 }
173 }
174}
175#else
176#define local_irq_restore(x) do { \
177 if ((x & 0x000000f0) != 0x000000f0) \
178 local_irq_enable(); \
179} while (0)
180#endif
181
182#define really_restore_flags(x) do { \
183 if ((x & 0x000000f0) != 0x000000f0) \ 227 if ((x & 0x000000f0) != 0x000000f0) \
184 local_irq_enable(); \ 228 local_irq_enable(); \
185 else \
186 local_irq_disable(); \
187} while (0) 229} while (0)
188 230
189/* 231/*
@@ -211,8 +253,8 @@ do { \
211#define back_to_P1() \ 253#define back_to_P1() \
212do { \ 254do { \
213 unsigned long __dummy; \ 255 unsigned long __dummy; \
256 ctrl_barrier(); \
214 __asm__ __volatile__( \ 257 __asm__ __volatile__( \
215 "nop;nop;nop;nop;nop;nop;nop\n\t" \
216 "mov.l 1f, %0\n\t" \ 258 "mov.l 1f, %0\n\t" \
217 "jmp @%0\n\t" \ 259 "jmp @%0\n\t" \
218 " nop\n\t" \ 260 " nop\n\t" \
@@ -225,7 +267,7 @@ do { \
225/* For spinlocks etc */ 267/* For spinlocks etc */
226#define local_irq_save(x) x = local_irq_save() 268#define local_irq_save(x) x = local_irq_save()
227 269
228static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) 270static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
229{ 271{
230 unsigned long flags, retval; 272 unsigned long flags, retval;
231 273
@@ -236,7 +278,7 @@ static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
236 return retval; 278 return retval;
237} 279}
238 280
239static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 281static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
240{ 282{
241 unsigned long flags, retval; 283 unsigned long flags, retval;
242 284
@@ -247,20 +289,70 @@ static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned lon
247 return retval; 289 return retval;
248} 290}
249 291
250static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 292extern void __xchg_called_with_bad_pointer(void);
293
294#define __xchg(ptr, x, size) \
295({ \
296 unsigned long __xchg__res; \
297 volatile void *__xchg_ptr = (ptr); \
298 switch (size) { \
299 case 4: \
300 __xchg__res = xchg_u32(__xchg_ptr, x); \
301 break; \
302 case 1: \
303 __xchg__res = xchg_u8(__xchg_ptr, x); \
304 break; \
305 default: \
306 __xchg_called_with_bad_pointer(); \
307 __xchg__res = x; \
308 break; \
309 } \
310 \
311 __xchg__res; \
312})
313
314#define xchg(ptr,x) \
315 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
316
317static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
318 unsigned long new)
319{
320 __u32 retval;
321 unsigned long flags;
322
323 local_irq_save(flags);
324 retval = *m;
325 if (retval == old)
326 *m = new;
327 local_irq_restore(flags); /* implies memory barrier */
328 return retval;
329}
330
331/* This function doesn't exist, so you'll get a linker error
332 * if something tries to do an invalid cmpxchg(). */
333extern void __cmpxchg_called_with_bad_pointer(void);
334
335#define __HAVE_ARCH_CMPXCHG 1
336
337static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
338 unsigned long new, int size)
251{ 339{
252 switch (size) { 340 switch (size) {
253 case 4: 341 case 4:
254 return xchg_u32(ptr, x); 342 return __cmpxchg_u32(ptr, old, new);
255 break;
256 case 1:
257 return xchg_u8(ptr, x);
258 break;
259 } 343 }
260 __xchg_called_with_bad_pointer(); 344 __cmpxchg_called_with_bad_pointer();
261 return x; 345 return old;
262} 346}
263 347
348#define cmpxchg(ptr,o,n) \
349 ({ \
350 __typeof__(*(ptr)) _o_ = (o); \
351 __typeof__(*(ptr)) _n_ = (n); \
352 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
353 (unsigned long)_n_, sizeof(*(ptr))); \
354 })
355
264/* XXX 356/* XXX
265 * disable hlt during certain critical i/o operations 357 * disable hlt during certain critical i/o operations
266 */ 358 */