diff options
Diffstat (limited to 'include/asm-xtensa/system.h')
-rw-r--r-- | include/asm-xtensa/system.h | 252 |
1 files changed, 252 insertions, 0 deletions
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h new file mode 100644 index 000000000000..690fe325e671 --- /dev/null +++ b/include/asm-xtensa/system.h | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * include/asm-xtensa/system.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_SYSTEM_H | ||
12 | #define _XTENSA_SYSTEM_H | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/stringify.h> | ||
16 | |||
17 | #include <asm/processor.h> | ||
18 | |||
19 | /* interrupt control */ | ||
20 | |||
21 | #define local_save_flags(x) \ | ||
22 | __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); | ||
23 | #define local_irq_restore(x) do { \ | ||
24 | __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ | ||
25 | :: "a" (x) : "memory"); } while(0); | ||
26 | #define local_irq_save(x) do { \ | ||
27 | __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ | ||
28 | : "=a" (x) :: "memory");} while(0); | ||
29 | |||
30 | static inline void local_irq_disable(void) | ||
31 | { | ||
32 | unsigned long flags; | ||
33 | __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) | ||
34 | : "=a" (flags) :: "memory"); | ||
35 | } | ||
36 | static inline void local_irq_enable(void) | ||
37 | { | ||
38 | unsigned long flags; | ||
39 | __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); | ||
40 | |||
41 | } | ||
42 | |||
43 | static inline int irqs_disabled(void) | ||
44 | { | ||
45 | unsigned long flags; | ||
46 | local_save_flags(flags); | ||
47 | return flags & 0xf; | ||
48 | } | ||
49 | |||
50 | #define RSR_CPENABLE(x) do { \ | ||
51 | __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ | ||
52 | } while(0); | ||
53 | #define WSR_CPENABLE(x) do { \ | ||
54 | __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \ | ||
55 | :: "a" (x));} while(0); | ||
56 | |||
57 | #define clear_cpenable() __clear_cpenable() | ||
58 | |||
59 | extern __inline__ void __clear_cpenable(void) | ||
60 | { | ||
61 | #if XCHAL_HAVE_CP | ||
62 | unsigned long i = 0; | ||
63 | WSR_CPENABLE(i); | ||
64 | #endif | ||
65 | } | ||
66 | |||
67 | extern __inline__ void enable_coprocessor(int i) | ||
68 | { | ||
69 | #if XCHAL_HAVE_CP | ||
70 | int cp; | ||
71 | RSR_CPENABLE(cp); | ||
72 | cp |= 1 << i; | ||
73 | WSR_CPENABLE(cp); | ||
74 | #endif | ||
75 | } | ||
76 | |||
77 | extern __inline__ void disable_coprocessor(int i) | ||
78 | { | ||
79 | #if XCHAL_HAVE_CP | ||
80 | int cp; | ||
81 | RSR_CPENABLE(cp); | ||
82 | cp &= ~(1 << i); | ||
83 | WSR_CPENABLE(cp); | ||
84 | #endif | ||
85 | } | ||
86 | |||
87 | #define smp_read_barrier_depends() do { } while(0) | ||
88 | #define read_barrier_depends() do { } while(0) | ||
89 | |||
90 | #define mb() barrier() | ||
91 | #define rmb() mb() | ||
92 | #define wmb() mb() | ||
93 | |||
94 | #ifdef CONFIG_SMP | ||
95 | #error smp_* not defined | ||
96 | #else | ||
97 | #define smp_mb() barrier() | ||
98 | #define smp_rmb() barrier() | ||
99 | #define smp_wmb() barrier() | ||
100 | #endif | ||
101 | |||
102 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
103 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
104 | |||
105 | #if !defined (__ASSEMBLY__) | ||
106 | |||
107 | /* * switch_to(n) should switch tasks to task nr n, first | ||
108 | * checking that n isn't the current task, in which case it does nothing. | ||
109 | */ | ||
110 | extern void *_switch_to(void *last, void *next); | ||
111 | |||
112 | #endif /* __ASSEMBLY__ */ | ||
113 | |||
114 | #define prepare_to_switch() do { } while(0) | ||
115 | |||
116 | #define switch_to(prev,next,last) \ | ||
117 | do { \ | ||
118 | clear_cpenable(); \ | ||
119 | (last) = _switch_to(prev, next); \ | ||
120 | } while(0) | ||
121 | |||
122 | /* | ||
123 | * cmpxchg | ||
124 | */ | ||
125 | |||
126 | extern __inline__ unsigned long | ||
127 | __cmpxchg_u32(volatile int *p, int old, int new) | ||
128 | { | ||
129 | __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" | ||
130 | "l32i %0, %1, 0 \n\t" | ||
131 | "bne %0, %2, 1f \n\t" | ||
132 | "s32i %3, %1, 0 \n\t" | ||
133 | "1: \n\t" | ||
134 | "wsr a15, "__stringify(PS)" \n\t" | ||
135 | "rsync \n\t" | ||
136 | : "=&a" (old) | ||
137 | : "a" (p), "a" (old), "r" (new) | ||
138 | : "a15", "memory"); | ||
139 | return old; | ||
140 | } | ||
141 | /* This function doesn't exist, so you'll get a linker error | ||
142 | * if something tries to do an invalid cmpxchg(). */ | ||
143 | |||
144 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
145 | |||
146 | static __inline__ unsigned long | ||
147 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | ||
148 | { | ||
149 | switch (size) { | ||
150 | case 4: return __cmpxchg_u32(ptr, old, new); | ||
151 | default: __cmpxchg_called_with_bad_pointer(); | ||
152 | return old; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | #define cmpxchg(ptr,o,n) \ | ||
157 | ({ __typeof__(*(ptr)) _o_ = (o); \ | ||
158 | __typeof__(*(ptr)) _n_ = (n); \ | ||
159 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
160 | (unsigned long)_n_, sizeof (*(ptr))); \ | ||
161 | }) | ||
162 | |||
163 | |||
164 | |||
165 | |||
166 | /* | ||
167 | * xchg_u32 | ||
168 | * | ||
169 | * Note that a15 is used here because the register allocation | ||
170 | * done by the compiler is not guaranteed and a window overflow | ||
171 | * may not occur between the rsil and wsr instructions. By using | ||
172 | * a15 in the rsil, the machine is guaranteed to be in a state | ||
173 | * where no register reference will cause an overflow. | ||
174 | */ | ||
175 | |||
176 | extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) | ||
177 | { | ||
178 | unsigned long tmp; | ||
179 | __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" | ||
180 | "l32i %0, %1, 0 \n\t" | ||
181 | "s32i %2, %1, 0 \n\t" | ||
182 | "wsr a15, "__stringify(PS)" \n\t" | ||
183 | "rsync \n\t" | ||
184 | : "=&a" (tmp) | ||
185 | : "a" (m), "a" (val) | ||
186 | : "a15", "memory"); | ||
187 | return tmp; | ||
188 | } | ||
189 | |||
190 | #define tas(ptr) (xchg((ptr),1)) | ||
191 | |||
192 | #if ( __XCC__ == 1 ) | ||
193 | |||
194 | /* xt-xcc processes __inline__ differently than xt-gcc and decides to | ||
195 | * insert an out-of-line copy of function __xchg. This presents the | ||
196 | * unresolved symbol at link time of __xchg_called_with_bad_pointer, | ||
197 | * even though such a function would never be called at run-time. | ||
198 | * xt-gcc always inlines __xchg, and optimizes away the undefined | ||
199 | * bad_pointer function. | ||
200 | */ | ||
201 | |||
202 | #define xchg(ptr,x) xchg_u32(ptr,x) | ||
203 | |||
204 | #else /* assume xt-gcc */ | ||
205 | |||
206 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
207 | |||
208 | /* | ||
209 | * This only works if the compiler isn't horribly bad at optimizing. | ||
210 | * gcc-2.5.8 reportedly can't handle this, but I define that one to | ||
211 | * be dead anyway. | ||
212 | */ | ||
213 | |||
214 | extern void __xchg_called_with_bad_pointer(void); | ||
215 | |||
216 | static __inline__ unsigned long | ||
217 | __xchg(unsigned long x, volatile void * ptr, int size) | ||
218 | { | ||
219 | switch (size) { | ||
220 | case 4: | ||
221 | return xchg_u32(ptr, x); | ||
222 | } | ||
223 | __xchg_called_with_bad_pointer(); | ||
224 | return x; | ||
225 | } | ||
226 | |||
227 | #endif | ||
228 | |||
229 | extern void set_except_vector(int n, void *addr); | ||
230 | |||
231 | static inline void spill_registers(void) | ||
232 | { | ||
233 | unsigned int a0, ps; | ||
234 | |||
235 | __asm__ __volatile__ ( | ||
236 | "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t" | ||
237 | "mov a12, a0\n\t" | ||
238 | "rsr a13," __stringify(SAR) "\n\t" | ||
239 | "xsr a14," __stringify(PS) "\n\t" | ||
240 | "movi a0, _spill_registers\n\t" | ||
241 | "rsync\n\t" | ||
242 | "callx0 a0\n\t" | ||
243 | "mov a0, a12\n\t" | ||
244 | "wsr a13," __stringify(SAR) "\n\t" | ||
245 | "wsr a14," __stringify(PS) "\n\t" | ||
246 | :: "a" (&a0), "a" (&ps) | ||
247 | : "a2", "a3", "a12", "a13", "a14", "a15", "memory"); | ||
248 | } | ||
249 | |||
250 | #define arch_align_stack(x) (x) | ||
251 | |||
252 | #endif /* _XTENSA_SYSTEM_H */ | ||