aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/system.h')
-rw-r--r--include/asm-sh/system.h199
1 files changed, 146 insertions, 53 deletions
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index ad35ad4958f4..6c1f8fde5ac4 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -6,6 +6,7 @@
6 * Copyright (C) 2002 Paul Mundt 6 * Copyright (C) 2002 Paul Mundt
7 */ 7 */
8 8
9#include <asm/types.h>
9 10
10/* 11/*
11 * switch_to() should switch tasks to task nr n, first 12 * switch_to() should switch tasks to task nr n, first
@@ -66,13 +67,20 @@ static inline void sched_cacheflush(void)
66{ 67{
67} 68}
68 69
69#define nop() __asm__ __volatile__ ("nop") 70#ifdef CONFIG_CPU_SH4A
70 71#define __icbi() \
71 72{ \
72#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 73 unsigned long __addr; \
74 __addr = 0xa8000000; \
75 __asm__ __volatile__( \
76 "icbi %0\n\t" \
77 : /* no output */ \
78 : "m" (__m(__addr))); \
79}
80#endif
73 81
74static __inline__ unsigned long tas(volatile int *m) 82static inline unsigned long tas(volatile int *m)
75{ /* #define tas(ptr) (xchg((ptr),1)) */ 83{
76 unsigned long retval; 84 unsigned long retval;
77 85
78 __asm__ __volatile__ ("tas.b @%1\n\t" 86 __asm__ __volatile__ ("tas.b @%1\n\t"
@@ -81,12 +89,33 @@ static __inline__ unsigned long tas(volatile int *m)
81 return retval; 89 return retval;
82} 90}
83 91
84extern void __xchg_called_with_bad_pointer(void); 92/*
85 93 * A brief note on ctrl_barrier(), the control register write barrier.
86#define mb() __asm__ __volatile__ ("": : :"memory") 94 *
87#define rmb() mb() 95 * Legacy SH cores typically require a sequence of 8 nops after
88#define wmb() __asm__ __volatile__ ("": : :"memory") 96 * modification of a control register in order for the changes to take
97 * effect. On newer cores (like the sh4a and sh5) this is accomplished
98 * with icbi.
99 *
100 * Also note that on sh4a in the icbi case we can forego a synco for the
101 * write barrier, as it's not necessary for control registers.
102 *
103 * Historically we have only done this type of barrier for the MMUCR, but
104 * it's also necessary for the CCR, so we make it generic here instead.
105 */
106#ifdef CONFIG_CPU_SH4A
107#define mb() __asm__ __volatile__ ("synco": : :"memory")
108#define rmb() mb()
109#define wmb() __asm__ __volatile__ ("synco": : :"memory")
110#define ctrl_barrier() __icbi()
111#define read_barrier_depends() do { } while(0)
112#else
113#define mb() __asm__ __volatile__ ("": : :"memory")
114#define rmb() mb()
115#define wmb() __asm__ __volatile__ ("": : :"memory")
116#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
89#define read_barrier_depends() do { } while(0) 117#define read_barrier_depends() do { } while(0)
118#endif
90 119
91#ifdef CONFIG_SMP 120#ifdef CONFIG_SMP
92#define smp_mb() mb() 121#define smp_mb() mb()
@@ -103,7 +132,8 @@ extern void __xchg_called_with_bad_pointer(void);
103#define set_mb(var, value) do { xchg(&var, value); } while (0) 132#define set_mb(var, value) do { xchg(&var, value); } while (0)
104 133
105/* Interrupt Control */ 134/* Interrupt Control */
106static __inline__ void local_irq_enable(void) 135#ifdef CONFIG_CPU_HAS_SR_RB
136static inline void local_irq_enable(void)
107{ 137{
108 unsigned long __dummy0, __dummy1; 138 unsigned long __dummy0, __dummy1;
109 139
@@ -116,8 +146,22 @@ static __inline__ void local_irq_enable(void)
116 : "1" (~0x000000f0) 146 : "1" (~0x000000f0)
117 : "memory"); 147 : "memory");
118} 148}
149#else
150static inline void local_irq_enable(void)
151{
152 unsigned long __dummy0, __dummy1;
153
154 __asm__ __volatile__ (
155 "stc sr, %0\n\t"
156 "and %1, %0\n\t"
157 "ldc %0, sr\n\t"
158 : "=&r" (__dummy0), "=r" (__dummy1)
159 : "1" (~0x000000f0)
160 : "memory");
161}
162#endif
119 163
120static __inline__ void local_irq_disable(void) 164static inline void local_irq_disable(void)
121{ 165{
122 unsigned long __dummy; 166 unsigned long __dummy;
123 __asm__ __volatile__("stc sr, %0\n\t" 167 __asm__ __volatile__("stc sr, %0\n\t"
@@ -128,6 +172,31 @@ static __inline__ void local_irq_disable(void)
128 : "memory"); 172 : "memory");
129} 173}
130 174
175static inline void set_bl_bit(void)
176{
177 unsigned long __dummy0, __dummy1;
178
179 __asm__ __volatile__ ("stc sr, %0\n\t"
180 "or %2, %0\n\t"
181 "and %3, %0\n\t"
182 "ldc %0, sr"
183 : "=&r" (__dummy0), "=r" (__dummy1)
184 : "r" (0x10000000), "r" (0xffffff0f)
185 : "memory");
186}
187
188static inline void clear_bl_bit(void)
189{
190 unsigned long __dummy0, __dummy1;
191
192 __asm__ __volatile__ ("stc sr, %0\n\t"
193 "and %2, %0\n\t"
194 "ldc %0, sr"
195 : "=&r" (__dummy0), "=r" (__dummy1)
196 : "1" (~0x10000000)
197 : "memory");
198}
199
131#define local_save_flags(x) \ 200#define local_save_flags(x) \
132 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) 201 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
133 202
@@ -138,7 +207,7 @@ static __inline__ void local_irq_disable(void)
138 (flags != 0); \ 207 (flags != 0); \
139}) 208})
140 209
141static __inline__ unsigned long local_irq_save(void) 210static inline unsigned long local_irq_save(void)
142{ 211{
143 unsigned long flags, __dummy; 212 unsigned long flags, __dummy;
144 213
@@ -154,35 +223,9 @@ static __inline__ unsigned long local_irq_save(void)
154 return flags; 223 return flags;
155} 224}
156 225
157#ifdef DEBUG_CLI_STI 226#define local_irq_restore(x) do { \
158static __inline__ void local_irq_restore(unsigned long x)
159{
160 if ((x & 0x000000f0) != 0x000000f0)
161 local_irq_enable();
162 else {
163 unsigned long flags;
164 local_save_flags(flags);
165
166 if (flags == 0) {
167 extern void dump_stack(void);
168 printk(KERN_ERR "BUG!\n");
169 dump_stack();
170 local_irq_disable();
171 }
172 }
173}
174#else
175#define local_irq_restore(x) do { \
176 if ((x & 0x000000f0) != 0x000000f0) \
177 local_irq_enable(); \
178} while (0)
179#endif
180
181#define really_restore_flags(x) do { \
182 if ((x & 0x000000f0) != 0x000000f0) \ 227 if ((x & 0x000000f0) != 0x000000f0) \
183 local_irq_enable(); \ 228 local_irq_enable(); \
184 else \
185 local_irq_disable(); \
186} while (0) 229} while (0)
187 230
188/* 231/*
@@ -210,8 +253,8 @@ do { \
210#define back_to_P1() \ 253#define back_to_P1() \
211do { \ 254do { \
212 unsigned long __dummy; \ 255 unsigned long __dummy; \
256 ctrl_barrier(); \
213 __asm__ __volatile__( \ 257 __asm__ __volatile__( \
214 "nop;nop;nop;nop;nop;nop;nop\n\t" \
215 "mov.l 1f, %0\n\t" \ 258 "mov.l 1f, %0\n\t" \
216 "jmp @%0\n\t" \ 259 "jmp @%0\n\t" \
217 " nop\n\t" \ 260 " nop\n\t" \
@@ -224,7 +267,7 @@ do { \
224/* For spinlocks etc */ 267/* For spinlocks etc */
225#define local_irq_save(x) x = local_irq_save() 268#define local_irq_save(x) x = local_irq_save()
226 269
227static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) 270static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
228{ 271{
229 unsigned long flags, retval; 272 unsigned long flags, retval;
230 273
@@ -235,7 +278,7 @@ static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
235 return retval; 278 return retval;
236} 279}
237 280
238static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 281static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
239{ 282{
240 unsigned long flags, retval; 283 unsigned long flags, retval;
241 284
@@ -246,20 +289,70 @@ static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned lon
246 return retval; 289 return retval;
247} 290}
248 291
249static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 292extern void __xchg_called_with_bad_pointer(void);
293
294#define __xchg(ptr, x, size) \
295({ \
296 unsigned long __xchg__res; \
297 volatile void *__xchg_ptr = (ptr); \
298 switch (size) { \
299 case 4: \
300 __xchg__res = xchg_u32(__xchg_ptr, x); \
301 break; \
302 case 1: \
303 __xchg__res = xchg_u8(__xchg_ptr, x); \
304 break; \
305 default: \
306 __xchg_called_with_bad_pointer(); \
307 __xchg__res = x; \
308 break; \
309 } \
310 \
311 __xchg__res; \
312})
313
314#define xchg(ptr,x) \
315 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
316
317static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
318 unsigned long new)
319{
320 __u32 retval;
321 unsigned long flags;
322
323 local_irq_save(flags);
324 retval = *m;
325 if (retval == old)
326 *m = new;
327 local_irq_restore(flags); /* implies memory barrier */
328 return retval;
329}
330
331/* This function doesn't exist, so you'll get a linker error
332 * if something tries to do an invalid cmpxchg(). */
333extern void __cmpxchg_called_with_bad_pointer(void);
334
335#define __HAVE_ARCH_CMPXCHG 1
336
337static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
338 unsigned long new, int size)
250{ 339{
251 switch (size) { 340 switch (size) {
252 case 4: 341 case 4:
253 return xchg_u32(ptr, x); 342 return __cmpxchg_u32(ptr, old, new);
254 break;
255 case 1:
256 return xchg_u8(ptr, x);
257 break;
258 } 343 }
259 __xchg_called_with_bad_pointer(); 344 __cmpxchg_called_with_bad_pointer();
260 return x; 345 return old;
261} 346}
262 347
348#define cmpxchg(ptr,o,n) \
349 ({ \
350 __typeof__(*(ptr)) _o_ = (o); \
351 __typeof__(*(ptr)) _n_ = (n); \
352 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
353 (unsigned long)_n_, sizeof(*(ptr))); \
354 })
355
263/* XXX 356/* XXX
264 * disable hlt during certain critical i/o operations 357 * disable hlt during certain critical i/o operations
265 */ 358 */