diff options
Diffstat (limited to 'include/asm-mips/system.h')
-rw-r--r-- | include/asm-mips/system.h | 71 |
1 files changed, 47 insertions, 24 deletions
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 6663efd49b27..330c4e497af3 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
19 | #include <asm/cpu-features.h> | 19 | #include <asm/cpu-features.h> |
20 | #include <asm/dsp.h> | ||
20 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
21 | #include <asm/war.h> | 22 | #include <asm/war.h> |
22 | #include <asm/interrupt.h> | 23 | #include <asm/interrupt.h> |
@@ -70,7 +71,7 @@ | |||
70 | * does not enforce ordering, since there is no data dependency between | 71 | * does not enforce ordering, since there is no data dependency between |
71 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | 72 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
72 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | 73 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
73 | * in cases like thiswhere there are no data dependencies. | 74 | * in cases like this where there are no data dependencies. |
74 | */ | 75 | */ |
75 | 76 | ||
76 | #define read_barrier_depends() do { } while(0) | 77 | #define read_barrier_depends() do { } while(0) |
@@ -154,15 +155,15 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti); | |||
154 | 155 | ||
155 | struct task_struct; | 156 | struct task_struct; |
156 | 157 | ||
157 | #define switch_to(prev,next,last) \ | 158 | #define switch_to(prev,next,last) \ |
158 | do { \ | 159 | do { \ |
159 | (last) = resume(prev, next, next->thread_info); \ | 160 | if (cpu_has_dsp) \ |
161 | __save_dsp(prev); \ | ||
162 | (last) = resume(prev, next, next->thread_info); \ | ||
163 | if (cpu_has_dsp) \ | ||
164 | __restore_dsp(current); \ | ||
160 | } while(0) | 165 | } while(0) |
161 | 166 | ||
162 | #define ROT_IN_PIECES \ | ||
163 | " .set noreorder \n" \ | ||
164 | " .set reorder \n" | ||
165 | |||
166 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | 167 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) |
167 | { | 168 | { |
168 | __u32 retval; | 169 | __u32 retval; |
@@ -171,14 +172,17 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
171 | unsigned long dummy; | 172 | unsigned long dummy; |
172 | 173 | ||
173 | __asm__ __volatile__( | 174 | __asm__ __volatile__( |
175 | " .set mips3 \n" | ||
174 | "1: ll %0, %3 # xchg_u32 \n" | 176 | "1: ll %0, %3 # xchg_u32 \n" |
177 | " .set mips0 \n" | ||
175 | " move %2, %z4 \n" | 178 | " move %2, %z4 \n" |
179 | " .set mips3 \n" | ||
176 | " sc %2, %1 \n" | 180 | " sc %2, %1 \n" |
177 | " beqzl %2, 1b \n" | 181 | " beqzl %2, 1b \n" |
178 | ROT_IN_PIECES | ||
179 | #ifdef CONFIG_SMP | 182 | #ifdef CONFIG_SMP |
180 | " sync \n" | 183 | " sync \n" |
181 | #endif | 184 | #endif |
185 | " .set mips0 \n" | ||
182 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 186 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
183 | : "R" (*m), "Jr" (val) | 187 | : "R" (*m), "Jr" (val) |
184 | : "memory"); | 188 | : "memory"); |
@@ -186,13 +190,17 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
186 | unsigned long dummy; | 190 | unsigned long dummy; |
187 | 191 | ||
188 | __asm__ __volatile__( | 192 | __asm__ __volatile__( |
193 | " .set mips3 \n" | ||
189 | "1: ll %0, %3 # xchg_u32 \n" | 194 | "1: ll %0, %3 # xchg_u32 \n" |
195 | " .set mips0 \n" | ||
190 | " move %2, %z4 \n" | 196 | " move %2, %z4 \n" |
197 | " .set mips3 \n" | ||
191 | " sc %2, %1 \n" | 198 | " sc %2, %1 \n" |
192 | " beqz %2, 1b \n" | 199 | " beqz %2, 1b \n" |
193 | #ifdef CONFIG_SMP | 200 | #ifdef CONFIG_SMP |
194 | " sync \n" | 201 | " sync \n" |
195 | #endif | 202 | #endif |
203 | " .set mips0 \n" | ||
196 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 204 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
197 | : "R" (*m), "Jr" (val) | 205 | : "R" (*m), "Jr" (val) |
198 | : "memory"); | 206 | : "memory"); |
@@ -217,14 +225,15 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
217 | unsigned long dummy; | 225 | unsigned long dummy; |
218 | 226 | ||
219 | __asm__ __volatile__( | 227 | __asm__ __volatile__( |
228 | " .set mips3 \n" | ||
220 | "1: lld %0, %3 # xchg_u64 \n" | 229 | "1: lld %0, %3 # xchg_u64 \n" |
221 | " move %2, %z4 \n" | 230 | " move %2, %z4 \n" |
222 | " scd %2, %1 \n" | 231 | " scd %2, %1 \n" |
223 | " beqzl %2, 1b \n" | 232 | " beqzl %2, 1b \n" |
224 | ROT_IN_PIECES | ||
225 | #ifdef CONFIG_SMP | 233 | #ifdef CONFIG_SMP |
226 | " sync \n" | 234 | " sync \n" |
227 | #endif | 235 | #endif |
236 | " .set mips0 \n" | ||
228 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 237 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
229 | : "R" (*m), "Jr" (val) | 238 | : "R" (*m), "Jr" (val) |
230 | : "memory"); | 239 | : "memory"); |
@@ -232,6 +241,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
232 | unsigned long dummy; | 241 | unsigned long dummy; |
233 | 242 | ||
234 | __asm__ __volatile__( | 243 | __asm__ __volatile__( |
244 | " .set mips3 \n" | ||
235 | "1: lld %0, %3 # xchg_u64 \n" | 245 | "1: lld %0, %3 # xchg_u64 \n" |
236 | " move %2, %z4 \n" | 246 | " move %2, %z4 \n" |
237 | " scd %2, %1 \n" | 247 | " scd %2, %1 \n" |
@@ -239,6 +249,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
239 | #ifdef CONFIG_SMP | 249 | #ifdef CONFIG_SMP |
240 | " sync \n" | 250 | " sync \n" |
241 | #endif | 251 | #endif |
252 | " .set mips0 \n" | ||
242 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 253 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
243 | : "R" (*m), "Jr" (val) | 254 | : "R" (*m), "Jr" (val) |
244 | : "memory"); | 255 | : "memory"); |
@@ -286,34 +297,41 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
286 | 297 | ||
287 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 298 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
288 | __asm__ __volatile__( | 299 | __asm__ __volatile__( |
300 | " .set push \n" | ||
289 | " .set noat \n" | 301 | " .set noat \n" |
302 | " .set mips3 \n" | ||
290 | "1: ll %0, %2 # __cmpxchg_u32 \n" | 303 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
291 | " bne %0, %z3, 2f \n" | 304 | " bne %0, %z3, 2f \n" |
305 | " .set mips0 \n" | ||
292 | " move $1, %z4 \n" | 306 | " move $1, %z4 \n" |
307 | " .set mips3 \n" | ||
293 | " sc $1, %1 \n" | 308 | " sc $1, %1 \n" |
294 | " beqzl $1, 1b \n" | 309 | " beqzl $1, 1b \n" |
295 | ROT_IN_PIECES | ||
296 | #ifdef CONFIG_SMP | 310 | #ifdef CONFIG_SMP |
297 | " sync \n" | 311 | " sync \n" |
298 | #endif | 312 | #endif |
299 | "2: \n" | 313 | "2: \n" |
300 | " .set at \n" | 314 | " .set pop \n" |
301 | : "=&r" (retval), "=m" (*m) | 315 | : "=&r" (retval), "=m" (*m) |
302 | : "R" (*m), "Jr" (old), "Jr" (new) | 316 | : "R" (*m), "Jr" (old), "Jr" (new) |
303 | : "memory"); | 317 | : "memory"); |
304 | } else if (cpu_has_llsc) { | 318 | } else if (cpu_has_llsc) { |
305 | __asm__ __volatile__( | 319 | __asm__ __volatile__( |
320 | " .set push \n" | ||
306 | " .set noat \n" | 321 | " .set noat \n" |
322 | " .set mips3 \n" | ||
307 | "1: ll %0, %2 # __cmpxchg_u32 \n" | 323 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
308 | " bne %0, %z3, 2f \n" | 324 | " bne %0, %z3, 2f \n" |
325 | " .set mips0 \n" | ||
309 | " move $1, %z4 \n" | 326 | " move $1, %z4 \n" |
327 | " .set mips3 \n" | ||
310 | " sc $1, %1 \n" | 328 | " sc $1, %1 \n" |
311 | " beqz $1, 1b \n" | 329 | " beqz $1, 1b \n" |
312 | #ifdef CONFIG_SMP | 330 | #ifdef CONFIG_SMP |
313 | " sync \n" | 331 | " sync \n" |
314 | #endif | 332 | #endif |
315 | "2: \n" | 333 | "2: \n" |
316 | " .set at \n" | 334 | " .set pop \n" |
317 | : "=&r" (retval), "=m" (*m) | 335 | : "=&r" (retval), "=m" (*m) |
318 | : "R" (*m), "Jr" (old), "Jr" (new) | 336 | : "R" (*m), "Jr" (old), "Jr" (new) |
319 | : "memory"); | 337 | : "memory"); |
@@ -338,24 +356,27 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
338 | 356 | ||
339 | if (cpu_has_llsc) { | 357 | if (cpu_has_llsc) { |
340 | __asm__ __volatile__( | 358 | __asm__ __volatile__( |
359 | " .set push \n" | ||
341 | " .set noat \n" | 360 | " .set noat \n" |
361 | " .set mips3 \n" | ||
342 | "1: lld %0, %2 # __cmpxchg_u64 \n" | 362 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
343 | " bne %0, %z3, 2f \n" | 363 | " bne %0, %z3, 2f \n" |
344 | " move $1, %z4 \n" | 364 | " move $1, %z4 \n" |
345 | " scd $1, %1 \n" | 365 | " scd $1, %1 \n" |
346 | " beqzl $1, 1b \n" | 366 | " beqzl $1, 1b \n" |
347 | ROT_IN_PIECES | ||
348 | #ifdef CONFIG_SMP | 367 | #ifdef CONFIG_SMP |
349 | " sync \n" | 368 | " sync \n" |
350 | #endif | 369 | #endif |
351 | "2: \n" | 370 | "2: \n" |
352 | " .set at \n" | 371 | " .set pop \n" |
353 | : "=&r" (retval), "=m" (*m) | 372 | : "=&r" (retval), "=m" (*m) |
354 | : "R" (*m), "Jr" (old), "Jr" (new) | 373 | : "R" (*m), "Jr" (old), "Jr" (new) |
355 | : "memory"); | 374 | : "memory"); |
356 | } else if (cpu_has_llsc) { | 375 | } else if (cpu_has_llsc) { |
357 | __asm__ __volatile__( | 376 | __asm__ __volatile__( |
377 | " .set push \n" | ||
358 | " .set noat \n" | 378 | " .set noat \n" |
379 | " .set mips3 \n" | ||
359 | "1: lld %0, %2 # __cmpxchg_u64 \n" | 380 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
360 | " bne %0, %z3, 2f \n" | 381 | " bne %0, %z3, 2f \n" |
361 | " move $1, %z4 \n" | 382 | " move $1, %z4 \n" |
@@ -365,7 +386,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
365 | " sync \n" | 386 | " sync \n" |
366 | #endif | 387 | #endif |
367 | "2: \n" | 388 | "2: \n" |
368 | " .set at \n" | 389 | " .set pop \n" |
369 | : "=&r" (retval), "=m" (*m) | 390 | : "=&r" (retval), "=m" (*m) |
370 | : "R" (*m), "Jr" (old), "Jr" (new) | 391 | : "R" (*m), "Jr" (old), "Jr" (new) |
371 | : "memory"); | 392 | : "memory"); |
@@ -406,18 +427,20 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |||
406 | 427 | ||
407 | #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) | 428 | #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) |
408 | 429 | ||
430 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); | ||
431 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | ||
432 | extern void *set_vi_handler (int n, void *addr); | ||
433 | extern void *set_vi_srs_handler (int n, void *addr, int regset); | ||
409 | extern void *set_except_vector(int n, void *addr); | 434 | extern void *set_except_vector(int n, void *addr); |
410 | extern void per_cpu_trap_init(void); | 435 | extern void per_cpu_trap_init(void); |
411 | 436 | ||
412 | extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file, | 437 | extern NORET_TYPE void die(const char *, struct pt_regs *); |
413 | const char *func, unsigned long line); | ||
414 | extern void __die_if_kernel(const char *, struct pt_regs *, const char *file, | ||
415 | const char *func, unsigned long line); | ||
416 | 438 | ||
417 | #define die(msg, regs) \ | 439 | static inline void die_if_kernel(const char *str, struct pt_regs *regs) |
418 | __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) | 440 | { |
419 | #define die_if_kernel(msg, regs) \ | 441 | if (unlikely(!user_mode(regs))) |
420 | __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) | 442 | die(str, regs); |
443 | } | ||
421 | 444 | ||
422 | extern int stop_a_enabled; | 445 | extern int stop_a_enabled; |
423 | 446 | ||