diff options
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 40 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 59 | ||||
-rw-r--r-- | include/asm-mips/system.h | 12 |
3 files changed, 56 insertions, 55 deletions
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index ba01800b6018..b1b994dd41db 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
109 | read_unlock(&tasklist_lock); | 109 | read_unlock(&tasklist_lock); |
110 | 110 | ||
111 | /* Compute new global allowed CPU set if necessary */ | 111 | /* Compute new global allowed CPU set if necessary */ |
112 | if( (p->thread.mflags & MF_FPUBOUND) | 112 | if ((p->thread.mflags & MF_FPUBOUND) |
113 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | 113 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { |
114 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | 114 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); |
115 | retval = set_cpus_allowed(p, effective_mask); | 115 | retval = set_cpus_allowed(p, effective_mask); |
@@ -195,27 +195,31 @@ void mips_mt_regdump(unsigned long mvpctl) | |||
195 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | 195 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
196 | ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | 196 | ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
197 | printk("-- per-VPE State --\n"); | 197 | printk("-- per-VPE State --\n"); |
198 | for(i = 0; i < nvpe; i++) { | 198 | for (i = 0; i < nvpe; i++) { |
199 | for(tc = 0; tc < ntc; tc++) { | 199 | for (tc = 0; tc < ntc; tc++) { |
200 | settc(tc); | 200 | settc(tc); |
201 | if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { | 201 | if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { |
202 | printk(" VPE %d\n", i); | 202 | printk(" VPE %d\n", i); |
203 | printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); | 203 | printk(" VPEControl : %08lx\n", |
204 | printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); | 204 | read_vpe_c0_vpecontrol()); |
205 | printk(" VPE%d.Status : %08lx\n", | 205 | printk(" VPEConf0 : %08lx\n", |
206 | i, read_vpe_c0_status()); | 206 | read_vpe_c0_vpeconf0()); |
207 | printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); | 207 | printk(" VPE%d.Status : %08lx\n", |
208 | printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); | 208 | i, read_vpe_c0_status()); |
209 | printk(" VPE%d.Config7 : %08lx\n", | 209 | printk(" VPE%d.EPC : %08lx\n", |
210 | i, read_vpe_c0_config7()); | 210 | i, read_vpe_c0_epc()); |
211 | break; /* Next VPE */ | 211 | printk(" VPE%d.Cause : %08lx\n", |
212 | i, read_vpe_c0_cause()); | ||
213 | printk(" VPE%d.Config7 : %08lx\n", | ||
214 | i, read_vpe_c0_config7()); | ||
215 | break; /* Next VPE */ | ||
216 | } | ||
212 | } | 217 | } |
213 | } | ||
214 | } | 218 | } |
215 | printk("-- per-TC State --\n"); | 219 | printk("-- per-TC State --\n"); |
216 | for(tc = 0; tc < ntc; tc++) { | 220 | for (tc = 0; tc < ntc; tc++) { |
217 | settc(tc); | 221 | settc(tc); |
218 | if(read_tc_c0_tcbind() == read_c0_tcbind()) { | 222 | if (read_tc_c0_tcbind() == read_c0_tcbind()) { |
219 | /* Are we dumping ourself? */ | 223 | /* Are we dumping ourself? */ |
220 | haltval = 0; /* Then we're not halted, and mustn't be */ | 224 | haltval = 0; /* Then we're not halted, and mustn't be */ |
221 | tcstatval = flags; /* And pre-dump TCStatus is flags */ | 225 | tcstatval = flags; /* And pre-dump TCStatus is flags */ |
@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void) | |||
384 | mt_fpemul_threshold = fpaff_threshold; | 388 | mt_fpemul_threshold = fpaff_threshold; |
385 | } else { | 389 | } else { |
386 | mt_fpemul_threshold = | 390 | mt_fpemul_threshold = |
387 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | 391 | (FPUSEFACTOR * (loops_per_jiffy / (500000 / HZ))) / HZ; |
388 | } | 392 | } |
389 | printk("FPU Affinity set after %ld emulations\n", | 393 | printk("FPU Affinity set after %ld emulations\n", |
390 | mt_fpemul_threshold); | 394 | mt_fpemul_threshold); |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 7e9cb5b1b4a7..c598e890a880 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
752 | force_sig(SIGILL, current); | 752 | force_sig(SIGILL, current); |
753 | } | 753 | } |
754 | 754 | ||
755 | /* | ||
756 | * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've | ||
757 | * emulated more than some threshold number of instructions, force migration to | ||
758 | * a "CPU" that has FP support. | ||
759 | */ | ||
760 | static void mt_ase_fp_affinity(void) | ||
761 | { | ||
762 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
763 | if (mt_fpemul_threshold > 0 && | ||
764 | ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { | ||
765 | /* | ||
766 | * If there's no FPU present, or if the application has already | ||
767 | * restricted the allowed set to exclude any CPUs with FPUs, | ||
768 | * we'll skip the procedure. | ||
769 | */ | ||
770 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | ||
771 | cpumask_t tmask; | ||
772 | |||
773 | cpus_and(tmask, current->thread.user_cpus_allowed, | ||
774 | mt_fpu_cpumask); | ||
775 | set_cpus_allowed(current, tmask); | ||
776 | current->thread.mflags |= MF_FPUBOUND; | ||
777 | } | ||
778 | } | ||
779 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
780 | } | ||
781 | |||
755 | asmlinkage void do_cpu(struct pt_regs *regs) | 782 | asmlinkage void do_cpu(struct pt_regs *regs) |
756 | { | 783 | { |
757 | unsigned int cpid; | 784 | unsigned int cpid; |
@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
785 | ¤t->thread.fpu, 0); | 812 | ¤t->thread.fpu, 0); |
786 | if (sig) | 813 | if (sig) |
787 | force_sig(sig, current); | 814 | force_sig(sig, current); |
788 | #ifdef CONFIG_MIPS_MT_FPAFF | 815 | else |
789 | else { | 816 | mt_ase_fp_affinity(); |
790 | /* | ||
791 | * MIPS MT processors may have fewer FPU contexts | ||
792 | * than CPU threads. If we've emulated more than | ||
793 | * some threshold number of instructions, force | ||
794 | * migration to a "CPU" that has FP support. | ||
795 | */ | ||
796 | if(mt_fpemul_threshold > 0 | ||
797 | && ((current->thread.emulated_fp++ | ||
798 | > mt_fpemul_threshold))) { | ||
799 | /* | ||
800 | * If there's no FPU present, or if the | ||
801 | * application has already restricted | ||
802 | * the allowed set to exclude any CPUs | ||
803 | * with FPUs, we'll skip the procedure. | ||
804 | */ | ||
805 | if (cpus_intersects(current->cpus_allowed, | ||
806 | mt_fpu_cpumask)) { | ||
807 | cpumask_t tmask; | ||
808 | |||
809 | cpus_and(tmask, | ||
810 | current->thread.user_cpus_allowed, | ||
811 | mt_fpu_cpumask); | ||
812 | set_cpus_allowed(current, tmask); | ||
813 | current->thread.mflags |= MF_FPUBOUND; | ||
814 | } | ||
815 | } | ||
816 | } | ||
817 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
818 | } | 817 | } |
819 | 818 | ||
820 | return; | 819 | return; |
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index bb0b289dbc9e..9b3a8dd2c3db 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -44,7 +44,7 @@ struct task_struct; | |||
44 | * different thread. | 44 | * different thread. |
45 | */ | 45 | */ |
46 | 46 | ||
47 | #define switch_to(prev,next,last) \ | 47 | #define __mips_mt_fpaff_switch_to(prev) \ |
48 | do { \ | 48 | do { \ |
49 | if (cpu_has_fpu && \ | 49 | if (cpu_has_fpu && \ |
50 | (prev->thread.mflags & MF_FPUBOUND) && \ | 50 | (prev->thread.mflags & MF_FPUBOUND) && \ |
@@ -52,24 +52,22 @@ do { \ | |||
52 | prev->thread.mflags &= ~MF_FPUBOUND; \ | 52 | prev->thread.mflags &= ~MF_FPUBOUND; \ |
53 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | 53 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ |
54 | } \ | 54 | } \ |
55 | if (cpu_has_dsp) \ | ||
56 | __save_dsp(prev); \ | ||
57 | next->thread.emulated_fp = 0; \ | 55 | next->thread.emulated_fp = 0; \ |
58 | (last) = resume(prev, next, task_thread_info(next)); \ | ||
59 | if (cpu_has_dsp) \ | ||
60 | __restore_dsp(current); \ | ||
61 | } while(0) | 56 | } while(0) |
62 | 57 | ||
63 | #else | 58 | #else |
59 | #define __mips_mt_fpaff_switch_to(prev) do { (prev); } while (0) | ||
60 | #endif | ||
61 | |||
64 | #define switch_to(prev,next,last) \ | 62 | #define switch_to(prev,next,last) \ |
65 | do { \ | 63 | do { \ |
64 | __mips_mt_fpaff_switch_to(prev); \ | ||
66 | if (cpu_has_dsp) \ | 65 | if (cpu_has_dsp) \ |
67 | __save_dsp(prev); \ | 66 | __save_dsp(prev); \ |
68 | (last) = resume(prev, next, task_thread_info(next)); \ | 67 | (last) = resume(prev, next, task_thread_info(next)); \ |
69 | if (cpu_has_dsp) \ | 68 | if (cpu_has_dsp) \ |
70 | __restore_dsp(current); \ | 69 | __restore_dsp(current); \ |
71 | } while(0) | 70 | } while(0) |
72 | #endif | ||
73 | 71 | ||
74 | /* | 72 | /* |
75 | * On SMP systems, when the scheduler does migration-cost autodetection, | 73 | * On SMP systems, when the scheduler does migration-cost autodetection, |