diff options
Diffstat (limited to 'include/asm-mips/system.h')
-rw-r--r-- | include/asm-mips/system.h | 295 |
1 files changed, 14 insertions, 281 deletions
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 46bdb3f566f9..90e4b403f531 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
19 | #include <asm/barrier.h> | 19 | #include <asm/barrier.h> |
20 | #include <asm/cmpxchg.h> | ||
20 | #include <asm/cpu-features.h> | 21 | #include <asm/cpu-features.h> |
21 | #include <asm/dsp.h> | 22 | #include <asm/dsp.h> |
22 | #include <asm/war.h> | 23 | #include <asm/war.h> |
@@ -46,10 +47,12 @@ struct task_struct; | |||
46 | 47 | ||
47 | #define __mips_mt_fpaff_switch_to(prev) \ | 48 | #define __mips_mt_fpaff_switch_to(prev) \ |
48 | do { \ | 49 | do { \ |
50 | struct thread_info *__prev_ti = task_thread_info(prev); \ | ||
51 | \ | ||
49 | if (cpu_has_fpu && \ | 52 | if (cpu_has_fpu && \ |
50 | (prev->thread.mflags & MF_FPUBOUND) && \ | 53 | test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ |
51 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | 54 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ |
52 | prev->thread.mflags &= ~MF_FPUBOUND; \ | 55 | clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ |
53 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | 56 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ |
54 | } \ | 57 | } \ |
55 | next->thread.emulated_fp = 0; \ | 58 | next->thread.emulated_fp = 0; \ |
@@ -59,7 +62,7 @@ do { \ | |||
59 | #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) | 62 | #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) |
60 | #endif | 63 | #endif |
61 | 64 | ||
62 | #define switch_to(prev,next,last) \ | 65 | #define switch_to(prev, next, last) \ |
63 | do { \ | 66 | do { \ |
64 | __mips_mt_fpaff_switch_to(prev); \ | 67 | __mips_mt_fpaff_switch_to(prev); \ |
65 | if (cpu_has_dsp) \ | 68 | if (cpu_has_dsp) \ |
@@ -71,16 +74,6 @@ do { \ | |||
71 | write_c0_userlocal(task_thread_info(current)->tp_value);\ | 74 | write_c0_userlocal(task_thread_info(current)->tp_value);\ |
72 | } while(0) | 75 | } while(0) |
73 | 76 | ||
74 | /* | ||
75 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
76 | * it needs a way to flush as much of the CPU's caches as possible. | ||
77 | * | ||
78 | * TODO: fill this in! | ||
79 | */ | ||
80 | static inline void sched_cacheflush(void) | ||
81 | { | ||
82 | } | ||
83 | |||
84 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | 77 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) |
85 | { | 78 | { |
86 | __u32 retval; | 79 | __u32 retval; |
@@ -127,7 +120,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
127 | raw_local_irq_restore(flags); /* implies memory barrier */ | 120 | raw_local_irq_restore(flags); /* implies memory barrier */ |
128 | } | 121 | } |
129 | 122 | ||
130 | smp_mb(); | 123 | smp_llsc_mb(); |
131 | 124 | ||
132 | return retval; | 125 | return retval; |
133 | } | 126 | } |
@@ -175,7 +168,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
175 | raw_local_irq_restore(flags); /* implies memory barrier */ | 168 | raw_local_irq_restore(flags); /* implies memory barrier */ |
176 | } | 169 | } |
177 | 170 | ||
178 | smp_mb(); | 171 | smp_llsc_mb(); |
179 | 172 | ||
180 | return retval; | 173 | return retval; |
181 | } | 174 | } |
@@ -200,273 +193,13 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
200 | return x; | 193 | return x; |
201 | } | 194 | } |
202 | 195 | ||
203 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 196 | #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) |
204 | |||
205 | #define __HAVE_ARCH_CMPXCHG 1 | ||
206 | |||
207 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | ||
208 | unsigned long new) | ||
209 | { | ||
210 | __u32 retval; | ||
211 | |||
212 | if (cpu_has_llsc && R10000_LLSC_WAR) { | ||
213 | __asm__ __volatile__( | ||
214 | " .set push \n" | ||
215 | " .set noat \n" | ||
216 | " .set mips3 \n" | ||
217 | "1: ll %0, %2 # __cmpxchg_u32 \n" | ||
218 | " bne %0, %z3, 2f \n" | ||
219 | " .set mips0 \n" | ||
220 | " move $1, %z4 \n" | ||
221 | " .set mips3 \n" | ||
222 | " sc $1, %1 \n" | ||
223 | " beqzl $1, 1b \n" | ||
224 | "2: \n" | ||
225 | " .set pop \n" | ||
226 | : "=&r" (retval), "=R" (*m) | ||
227 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
228 | : "memory"); | ||
229 | } else if (cpu_has_llsc) { | ||
230 | __asm__ __volatile__( | ||
231 | " .set push \n" | ||
232 | " .set noat \n" | ||
233 | " .set mips3 \n" | ||
234 | "1: ll %0, %2 # __cmpxchg_u32 \n" | ||
235 | " bne %0, %z3, 2f \n" | ||
236 | " .set mips0 \n" | ||
237 | " move $1, %z4 \n" | ||
238 | " .set mips3 \n" | ||
239 | " sc $1, %1 \n" | ||
240 | " beqz $1, 3f \n" | ||
241 | "2: \n" | ||
242 | " .subsection 2 \n" | ||
243 | "3: b 1b \n" | ||
244 | " .previous \n" | ||
245 | " .set pop \n" | ||
246 | : "=&r" (retval), "=R" (*m) | ||
247 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
248 | : "memory"); | ||
249 | } else { | ||
250 | unsigned long flags; | ||
251 | |||
252 | raw_local_irq_save(flags); | ||
253 | retval = *m; | ||
254 | if (retval == old) | ||
255 | *m = new; | ||
256 | raw_local_irq_restore(flags); /* implies memory barrier */ | ||
257 | } | ||
258 | |||
259 | smp_mb(); | ||
260 | |||
261 | return retval; | ||
262 | } | ||
263 | |||
264 | static inline unsigned long __cmpxchg_u32_local(volatile int * m, | ||
265 | unsigned long old, unsigned long new) | ||
266 | { | ||
267 | __u32 retval; | ||
268 | |||
269 | if (cpu_has_llsc && R10000_LLSC_WAR) { | ||
270 | __asm__ __volatile__( | ||
271 | " .set push \n" | ||
272 | " .set noat \n" | ||
273 | " .set mips3 \n" | ||
274 | "1: ll %0, %2 # __cmpxchg_u32 \n" | ||
275 | " bne %0, %z3, 2f \n" | ||
276 | " .set mips0 \n" | ||
277 | " move $1, %z4 \n" | ||
278 | " .set mips3 \n" | ||
279 | " sc $1, %1 \n" | ||
280 | " beqzl $1, 1b \n" | ||
281 | "2: \n" | ||
282 | " .set pop \n" | ||
283 | : "=&r" (retval), "=R" (*m) | ||
284 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
285 | : "memory"); | ||
286 | } else if (cpu_has_llsc) { | ||
287 | __asm__ __volatile__( | ||
288 | " .set push \n" | ||
289 | " .set noat \n" | ||
290 | " .set mips3 \n" | ||
291 | "1: ll %0, %2 # __cmpxchg_u32 \n" | ||
292 | " bne %0, %z3, 2f \n" | ||
293 | " .set mips0 \n" | ||
294 | " move $1, %z4 \n" | ||
295 | " .set mips3 \n" | ||
296 | " sc $1, %1 \n" | ||
297 | " beqz $1, 1b \n" | ||
298 | "2: \n" | ||
299 | " .set pop \n" | ||
300 | : "=&r" (retval), "=R" (*m) | ||
301 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
302 | : "memory"); | ||
303 | } else { | ||
304 | unsigned long flags; | ||
305 | |||
306 | local_irq_save(flags); | ||
307 | retval = *m; | ||
308 | if (retval == old) | ||
309 | *m = new; | ||
310 | local_irq_restore(flags); /* implies memory barrier */ | ||
311 | } | ||
312 | |||
313 | return retval; | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_64BIT | ||
317 | static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | ||
318 | unsigned long new) | ||
319 | { | ||
320 | __u64 retval; | ||
321 | |||
322 | if (cpu_has_llsc && R10000_LLSC_WAR) { | ||
323 | __asm__ __volatile__( | ||
324 | " .set push \n" | ||
325 | " .set noat \n" | ||
326 | " .set mips3 \n" | ||
327 | "1: lld %0, %2 # __cmpxchg_u64 \n" | ||
328 | " bne %0, %z3, 2f \n" | ||
329 | " move $1, %z4 \n" | ||
330 | " scd $1, %1 \n" | ||
331 | " beqzl $1, 1b \n" | ||
332 | "2: \n" | ||
333 | " .set pop \n" | ||
334 | : "=&r" (retval), "=R" (*m) | ||
335 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
336 | : "memory"); | ||
337 | } else if (cpu_has_llsc) { | ||
338 | __asm__ __volatile__( | ||
339 | " .set push \n" | ||
340 | " .set noat \n" | ||
341 | " .set mips3 \n" | ||
342 | "1: lld %0, %2 # __cmpxchg_u64 \n" | ||
343 | " bne %0, %z3, 2f \n" | ||
344 | " move $1, %z4 \n" | ||
345 | " scd $1, %1 \n" | ||
346 | " beqz $1, 3f \n" | ||
347 | "2: \n" | ||
348 | " .subsection 2 \n" | ||
349 | "3: b 1b \n" | ||
350 | " .previous \n" | ||
351 | " .set pop \n" | ||
352 | : "=&r" (retval), "=R" (*m) | ||
353 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
354 | : "memory"); | ||
355 | } else { | ||
356 | unsigned long flags; | ||
357 | |||
358 | raw_local_irq_save(flags); | ||
359 | retval = *m; | ||
360 | if (retval == old) | ||
361 | *m = new; | ||
362 | raw_local_irq_restore(flags); /* implies memory barrier */ | ||
363 | } | ||
364 | |||
365 | smp_mb(); | ||
366 | |||
367 | return retval; | ||
368 | } | ||
369 | |||
370 | static inline unsigned long __cmpxchg_u64_local(volatile int * m, | ||
371 | unsigned long old, unsigned long new) | ||
372 | { | ||
373 | __u64 retval; | ||
374 | |||
375 | if (cpu_has_llsc && R10000_LLSC_WAR) { | ||
376 | __asm__ __volatile__( | ||
377 | " .set push \n" | ||
378 | " .set noat \n" | ||
379 | " .set mips3 \n" | ||
380 | "1: lld %0, %2 # __cmpxchg_u64 \n" | ||
381 | " bne %0, %z3, 2f \n" | ||
382 | " move $1, %z4 \n" | ||
383 | " scd $1, %1 \n" | ||
384 | " beqzl $1, 1b \n" | ||
385 | "2: \n" | ||
386 | " .set pop \n" | ||
387 | : "=&r" (retval), "=R" (*m) | ||
388 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
389 | : "memory"); | ||
390 | } else if (cpu_has_llsc) { | ||
391 | __asm__ __volatile__( | ||
392 | " .set push \n" | ||
393 | " .set noat \n" | ||
394 | " .set mips3 \n" | ||
395 | "1: lld %0, %2 # __cmpxchg_u64 \n" | ||
396 | " bne %0, %z3, 2f \n" | ||
397 | " move $1, %z4 \n" | ||
398 | " scd $1, %1 \n" | ||
399 | " beqz $1, 1b \n" | ||
400 | "2: \n" | ||
401 | " .set pop \n" | ||
402 | : "=&r" (retval), "=R" (*m) | ||
403 | : "R" (*m), "Jr" (old), "Jr" (new) | ||
404 | : "memory"); | ||
405 | } else { | ||
406 | unsigned long flags; | ||
407 | |||
408 | local_irq_save(flags); | ||
409 | retval = *m; | ||
410 | if (retval == old) | ||
411 | *m = new; | ||
412 | local_irq_restore(flags); /* implies memory barrier */ | ||
413 | } | ||
414 | |||
415 | return retval; | ||
416 | } | ||
417 | |||
418 | #else | ||
419 | extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( | ||
420 | volatile int * m, unsigned long old, unsigned long new); | ||
421 | #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels | ||
422 | extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels( | ||
423 | volatile int * m, unsigned long old, unsigned long new); | ||
424 | #define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels | ||
425 | #endif | ||
426 | |||
427 | /* This function doesn't exist, so you'll get a linker error | ||
428 | if something tries to do an invalid cmpxchg(). */ | ||
429 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
430 | |||
431 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | ||
432 | unsigned long new, int size) | ||
433 | { | ||
434 | switch (size) { | ||
435 | case 4: | ||
436 | return __cmpxchg_u32(ptr, old, new); | ||
437 | case 8: | ||
438 | return __cmpxchg_u64(ptr, old, new); | ||
439 | } | ||
440 | __cmpxchg_called_with_bad_pointer(); | ||
441 | return old; | ||
442 | } | ||
443 | |||
444 | static inline unsigned long __cmpxchg_local(volatile void * ptr, | ||
445 | unsigned long old, unsigned long new, int size) | ||
446 | { | ||
447 | switch (size) { | ||
448 | case 4: | ||
449 | return __cmpxchg_u32_local(ptr, old, new); | ||
450 | case 8: | ||
451 | return __cmpxchg_u64_local(ptr, old, new); | ||
452 | } | ||
453 | __cmpxchg_called_with_bad_pointer(); | ||
454 | return old; | ||
455 | } | ||
456 | |||
457 | #define cmpxchg(ptr,old,new) \ | ||
458 | ((__typeof__(*(ptr)))__cmpxchg((ptr), \ | ||
459 | (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) | ||
460 | |||
461 | #define cmpxchg_local(ptr,old,new) \ | ||
462 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | ||
463 | (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) | ||
464 | 197 | ||
465 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); | 198 | extern void set_handler(unsigned long offset, void *addr, unsigned long len); |
466 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | 199 | extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); |
467 | 200 | ||
468 | typedef void (*vi_handler_t)(void); | 201 | typedef void (*vi_handler_t)(void); |
469 | extern void *set_vi_handler (int n, vi_handler_t addr); | 202 | extern void *set_vi_handler(int n, vi_handler_t addr); |
470 | 203 | ||
471 | extern void *set_except_vector(int n, void *addr); | 204 | extern void *set_except_vector(int n, void *addr); |
472 | extern unsigned long ebase; | 205 | extern unsigned long ebase; |
@@ -480,6 +213,6 @@ extern int stop_a_enabled; | |||
480 | */ | 213 | */ |
481 | #define __ARCH_WANT_UNLOCKED_CTXSW | 214 | #define __ARCH_WANT_UNLOCKED_CTXSW |
482 | 215 | ||
483 | #define arch_align_stack(x) (x) | 216 | extern unsigned long arch_align_stack(unsigned long sp); |
484 | 217 | ||
485 | #endif /* _ASM_SYSTEM_H */ | 218 | #endif /* _ASM_SYSTEM_H */ |