diff options
Diffstat (limited to 'arch/mips/include/asm/system.h')
-rw-r--r-- | arch/mips/include/asm/system.h | 235 |
1 files changed, 0 insertions, 235 deletions
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h deleted file mode 100644 index 6018c80ce37a..000000000000 --- a/arch/mips/include/asm/system.h +++ /dev/null | |||
@@ -1,235 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle | ||
7 | * Copyright (C) 1996 by Paul M. Antoine | ||
8 | * Copyright (C) 1999 Silicon Graphics | ||
9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com | ||
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
11 | */ | ||
12 | #ifndef _ASM_SYSTEM_H | ||
13 | #define _ASM_SYSTEM_H | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/irqflags.h> | ||
18 | |||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/barrier.h> | ||
21 | #include <asm/cmpxchg.h> | ||
22 | #include <asm/cpu-features.h> | ||
23 | #include <asm/dsp.h> | ||
24 | #include <asm/watch.h> | ||
25 | #include <asm/war.h> | ||
26 | |||
27 | |||
28 | /* | ||
29 | * switch_to(n) should switch tasks to task nr n, first | ||
30 | * checking that n isn't the current task, in which case it does nothing. | ||
31 | */ | ||
32 | extern asmlinkage void *resume(void *last, void *next, void *next_ti); | ||
33 | |||
34 | struct task_struct; | ||
35 | |||
36 | extern unsigned int ll_bit; | ||
37 | extern struct task_struct *ll_task; | ||
38 | |||
39 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
40 | |||
41 | /* | ||
42 | * Handle the scheduler resume end of FPU affinity management. We do this | ||
43 | * inline to try to keep the overhead down. If we have been forced to run on | ||
44 | * a "CPU" with an FPU because of a previous high level of FP computation, | ||
45 | * but did not actually use the FPU during the most recent time-slice (CU1 | ||
46 | * isn't set), we undo the restriction on cpus_allowed. | ||
47 | * | ||
48 | * We're not calling set_cpus_allowed() here, because we have no need to | ||
49 | * force prompt migration - we're already switching the current CPU to a | ||
50 | * different thread. | ||
51 | */ | ||
52 | |||
53 | #define __mips_mt_fpaff_switch_to(prev) \ | ||
54 | do { \ | ||
55 | struct thread_info *__prev_ti = task_thread_info(prev); \ | ||
56 | \ | ||
57 | if (cpu_has_fpu && \ | ||
58 | test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ | ||
59 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | ||
60 | clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ | ||
61 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | ||
62 | } \ | ||
63 | next->thread.emulated_fp = 0; \ | ||
64 | } while(0) | ||
65 | |||
66 | #else | ||
67 | #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) | ||
68 | #endif | ||
69 | |||
70 | #define __clear_software_ll_bit() \ | ||
71 | do { \ | ||
72 | if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ | ||
73 | ll_bit = 0; \ | ||
74 | } while (0) | ||
75 | |||
76 | #define switch_to(prev, next, last) \ | ||
77 | do { \ | ||
78 | __mips_mt_fpaff_switch_to(prev); \ | ||
79 | if (cpu_has_dsp) \ | ||
80 | __save_dsp(prev); \ | ||
81 | __clear_software_ll_bit(); \ | ||
82 | (last) = resume(prev, next, task_thread_info(next)); \ | ||
83 | } while (0) | ||
84 | |||
85 | #define finish_arch_switch(prev) \ | ||
86 | do { \ | ||
87 | if (cpu_has_dsp) \ | ||
88 | __restore_dsp(current); \ | ||
89 | if (cpu_has_userlocal) \ | ||
90 | write_c0_userlocal(current_thread_info()->tp_value); \ | ||
91 | __restore_watch(); \ | ||
92 | } while (0) | ||
93 | |||
94 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | ||
95 | { | ||
96 | __u32 retval; | ||
97 | |||
98 | smp_mb__before_llsc(); | ||
99 | |||
100 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | ||
101 | unsigned long dummy; | ||
102 | |||
103 | __asm__ __volatile__( | ||
104 | " .set mips3 \n" | ||
105 | "1: ll %0, %3 # xchg_u32 \n" | ||
106 | " .set mips0 \n" | ||
107 | " move %2, %z4 \n" | ||
108 | " .set mips3 \n" | ||
109 | " sc %2, %1 \n" | ||
110 | " beqzl %2, 1b \n" | ||
111 | " .set mips0 \n" | ||
112 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | ||
113 | : "R" (*m), "Jr" (val) | ||
114 | : "memory"); | ||
115 | } else if (kernel_uses_llsc) { | ||
116 | unsigned long dummy; | ||
117 | |||
118 | do { | ||
119 | __asm__ __volatile__( | ||
120 | " .set mips3 \n" | ||
121 | " ll %0, %3 # xchg_u32 \n" | ||
122 | " .set mips0 \n" | ||
123 | " move %2, %z4 \n" | ||
124 | " .set mips3 \n" | ||
125 | " sc %2, %1 \n" | ||
126 | " .set mips0 \n" | ||
127 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | ||
128 | : "R" (*m), "Jr" (val) | ||
129 | : "memory"); | ||
130 | } while (unlikely(!dummy)); | ||
131 | } else { | ||
132 | unsigned long flags; | ||
133 | |||
134 | raw_local_irq_save(flags); | ||
135 | retval = *m; | ||
136 | *m = val; | ||
137 | raw_local_irq_restore(flags); /* implies memory barrier */ | ||
138 | } | ||
139 | |||
140 | smp_llsc_mb(); | ||
141 | |||
142 | return retval; | ||
143 | } | ||
144 | |||
145 | #ifdef CONFIG_64BIT | ||
146 | static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | ||
147 | { | ||
148 | __u64 retval; | ||
149 | |||
150 | smp_mb__before_llsc(); | ||
151 | |||
152 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | ||
153 | unsigned long dummy; | ||
154 | |||
155 | __asm__ __volatile__( | ||
156 | " .set mips3 \n" | ||
157 | "1: lld %0, %3 # xchg_u64 \n" | ||
158 | " move %2, %z4 \n" | ||
159 | " scd %2, %1 \n" | ||
160 | " beqzl %2, 1b \n" | ||
161 | " .set mips0 \n" | ||
162 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | ||
163 | : "R" (*m), "Jr" (val) | ||
164 | : "memory"); | ||
165 | } else if (kernel_uses_llsc) { | ||
166 | unsigned long dummy; | ||
167 | |||
168 | do { | ||
169 | __asm__ __volatile__( | ||
170 | " .set mips3 \n" | ||
171 | " lld %0, %3 # xchg_u64 \n" | ||
172 | " move %2, %z4 \n" | ||
173 | " scd %2, %1 \n" | ||
174 | " .set mips0 \n" | ||
175 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | ||
176 | : "R" (*m), "Jr" (val) | ||
177 | : "memory"); | ||
178 | } while (unlikely(!dummy)); | ||
179 | } else { | ||
180 | unsigned long flags; | ||
181 | |||
182 | raw_local_irq_save(flags); | ||
183 | retval = *m; | ||
184 | *m = val; | ||
185 | raw_local_irq_restore(flags); /* implies memory barrier */ | ||
186 | } | ||
187 | |||
188 | smp_llsc_mb(); | ||
189 | |||
190 | return retval; | ||
191 | } | ||
192 | #else | ||
193 | extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); | ||
194 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels | ||
195 | #endif | ||
196 | |||
197 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
198 | { | ||
199 | switch (size) { | ||
200 | case 4: | ||
201 | return __xchg_u32(ptr, x); | ||
202 | case 8: | ||
203 | return __xchg_u64(ptr, x); | ||
204 | } | ||
205 | |||
206 | return x; | ||
207 | } | ||
208 | |||
209 | #define xchg(ptr, x) \ | ||
210 | ({ \ | ||
211 | BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ | ||
212 | \ | ||
213 | ((__typeof__(*(ptr))) \ | ||
214 | __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ | ||
215 | }) | ||
216 | |||
217 | extern void set_handler(unsigned long offset, void *addr, unsigned long len); | ||
218 | extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); | ||
219 | |||
220 | typedef void (*vi_handler_t)(void); | ||
221 | extern void *set_vi_handler(int n, vi_handler_t addr); | ||
222 | |||
223 | extern void *set_except_vector(int n, void *addr); | ||
224 | extern unsigned long ebase; | ||
225 | extern void per_cpu_trap_init(void); | ||
226 | |||
227 | /* | ||
228 | * See include/asm-ia64/system.h; prevents deadlock on SMP | ||
229 | * systems. | ||
230 | */ | ||
231 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
232 | |||
233 | extern unsigned long arch_align_stack(unsigned long sp); | ||
234 | |||
235 | #endif /* _ASM_SYSTEM_H */ | ||