diff options
Diffstat (limited to 'include/asm-mips/system.h')
-rw-r--r-- | include/asm-mips/system.h | 34 |
1 files changed, 33 insertions, 1 deletions
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 4097fac5ac3c..261f71d16a07 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -155,6 +155,37 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti); | |||
155 | 155 | ||
156 | struct task_struct; | 156 | struct task_struct; |
157 | 157 | ||
158 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
159 | |||
160 | /* | ||
161 | * Handle the scheduler resume end of FPU affinity management. We do this | ||
162 | * inline to try to keep the overhead down. If we have been forced to run on | ||
163 | * a "CPU" with an FPU because of a previous high level of FP computation, | ||
164 | * but did not actually use the FPU during the most recent time-slice (CU1 | ||
165 | * isn't set), we undo the restriction on cpus_allowed. | ||
166 | * | ||
167 | * We're not calling set_cpus_allowed() here, because we have no need to | ||
168 | * force prompt migration - we're already switching the current CPU to a | ||
169 | * different thread. | ||
170 | */ | ||
171 | |||
172 | #define switch_to(prev,next,last) \ | ||
173 | do { \ | ||
174 | if (cpu_has_fpu && \ | ||
175 | (prev->thread.mflags & MF_FPUBOUND) && \ | ||
176 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | ||
177 | prev->thread.mflags &= ~MF_FPUBOUND; \ | ||
178 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | ||
179 | } \ | ||
180 | if (cpu_has_dsp) \ | ||
181 | __save_dsp(prev); \ | ||
182 | next->thread.emulated_fp = 0; \ | ||
183 | (last) = resume(prev, next, next->thread_info); \ | ||
184 | if (cpu_has_dsp) \ | ||
185 | __restore_dsp(current); \ | ||
186 | } while(0) | ||
187 | |||
188 | #else | ||
158 | #define switch_to(prev,next,last) \ | 189 | #define switch_to(prev,next,last) \ |
159 | do { \ | 190 | do { \ |
160 | if (cpu_has_dsp) \ | 191 | if (cpu_has_dsp) \ |
@@ -163,6 +194,7 @@ do { \ | |||
163 | if (cpu_has_dsp) \ | 194 | if (cpu_has_dsp) \ |
164 | __restore_dsp(current); \ | 195 | __restore_dsp(current); \ |
165 | } while(0) | 196 | } while(0) |
197 | #endif | ||
166 | 198 | ||
167 | /* | 199 | /* |
168 | * On SMP systems, when the scheduler does migration-cost autodetection, | 200 | * On SMP systems, when the scheduler does migration-cost autodetection, |
@@ -440,8 +472,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |||
440 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); | 472 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); |
441 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | 473 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); |
442 | extern void *set_vi_handler (int n, void *addr); | 474 | extern void *set_vi_handler (int n, void *addr); |
443 | extern void *set_vi_srs_handler (int n, void *addr, int regset); | ||
444 | extern void *set_except_vector(int n, void *addr); | 475 | extern void *set_except_vector(int n, void *addr); |
476 | extern unsigned long ebase; | ||
445 | extern void per_cpu_trap_init(void); | 477 | extern void per_cpu_trap_init(void); |
446 | 478 | ||
447 | extern NORET_TYPE void die(const char *, struct pt_regs *); | 479 | extern NORET_TYPE void die(const char *, struct pt_regs *); |