aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2014-07-30 03:53:20 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-08-01 18:06:44 -0400
commitc9017757c532d48bf43d6e7d3b7282443ad4207b (patch)
tree169346d3830d35c20c8cce14cb9c91cb3bfc3b7a
parent33c771ba5c5d067f85a5a6c4b11047219b5b8f4e (diff)
MIPS: init upper 64b of vector registers when MSA is first used
When a task first makes use of MSA we need to ensure that the upper 64b of the vector registers are set to some value such that no information can be leaked to it from the previous task to use MSA context on the CPU. The architecture formerly specified that these bits would be cleared to 0 when a scalar FP instructions wrote to the aliased FP registers, which would have implicitly handled this as the kernel restored scalar FP context. However more recent versions of the specification now state that the value of the bits in such cases is unpredictable. Initialise them explictly to be sure, and set all the bits to 1 rather than 0 for consistency with the least significant 64b. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7497/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/include/asm/asmmacro.h20
-rw-r--r--arch/mips/include/asm/msa.h1
-rw-r--r--arch/mips/kernel/r4k_switch.S5
-rw-r--r--arch/mips/kernel/traps.c39
4 files changed, 56 insertions, 9 deletions
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 4986bf5ffd29..cd9a98bc8f60 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -426,4 +426,24 @@
426 ld_d 31, THREAD_FPR31, \thread 426 ld_d 31, THREAD_FPR31, \thread
427 .endm 427 .endm
428 428
429 .macro msa_init_upper wd
430#ifdef CONFIG_64BIT
431 insert_d \wd, 1
432#else
433 insert_w \wd, 2
434 insert_w \wd, 3
435#endif
436 .if 31-\wd
437 msa_init_upper (\wd+1)
438 .endif
439 .endm
440
441 .macro msa_init_all_upper
442 .set push
443 .set noat
444 not $1, zero
445 msa_init_upper 0
446 .set pop
447 .endm
448
429#endif /* _ASM_ASMMACRO_H */ 449#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index e80e85c1334f..fe25a17bc783 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -16,6 +16,7 @@
16 16
17extern void _save_msa(struct task_struct *); 17extern void _save_msa(struct task_struct *);
18extern void _restore_msa(struct task_struct *); 18extern void _restore_msa(struct task_struct *);
19extern void _init_msa_upper(void);
19 20
20static inline void enable_msa(void) 21static inline void enable_msa(void)
21{ 22{
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 1a1aef04312d..4c4ec1812420 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -144,6 +144,11 @@ LEAF(_restore_msa)
144 jr ra 144 jr ra
145 END(_restore_msa) 145 END(_restore_msa)
146 146
147LEAF(_init_msa_upper)
148 msa_init_all_upper
149 jr ra
150 END(_init_msa_upper)
151
147#endif 152#endif
148 153
149/* 154/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 649c151fe1db..1ed84577d3e3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1089,13 +1089,15 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1089 1089
1090static int enable_restore_fp_context(int msa) 1090static int enable_restore_fp_context(int msa)
1091{ 1091{
1092 int err, was_fpu_owner; 1092 int err, was_fpu_owner, prior_msa;
1093 1093
1094 if (!used_math()) { 1094 if (!used_math()) {
1095 /* First time FP context user. */ 1095 /* First time FP context user. */
1096 err = init_fpu(); 1096 err = init_fpu();
1097 if (msa && !err) 1097 if (msa && !err) {
1098 enable_msa(); 1098 enable_msa();
1099 _init_msa_upper();
1100 }
1099 if (!err) 1101 if (!err)
1100 set_used_math(); 1102 set_used_math();
1101 return err; 1103 return err;
@@ -1147,18 +1149,37 @@ static int enable_restore_fp_context(int msa)
1147 /* 1149 /*
1148 * If this is the first time that the task is using MSA and it has 1150 * If this is the first time that the task is using MSA and it has
1149 * previously used scalar FP in this time slice then we already nave 1151 * previously used scalar FP in this time slice then we already nave
1150 * FP context which we shouldn't clobber. 1152 * FP context which we shouldn't clobber. We do however need to clear
1153 * the upper 64b of each vector register so that this task has no
1154 * opportunity to see data left behind by another.
1151 */ 1155 */
1152 if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) 1156 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1157 if (!prior_msa && was_fpu_owner) {
1158 _init_msa_upper();
1153 return 0; 1159 return 0;
1160 }
1154 1161
1155 /* We need to restore the vector context. */ 1162 if (!prior_msa) {
1156 restore_msa(current); 1163 /*
1164 * Restore the least significant 64b of each vector register
1165 * from the existing scalar FP context.
1166 */
1167 _restore_fp(current);
1157 1168
1158 /* Restore the scalar FP control & status register */ 1169 /*
1159 if (!was_fpu_owner) 1170 * The task has not formerly used MSA, so clear the upper 64b
1160 asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); 1171 * of each vector register such that it cannot see data left
1172 * behind by another task.
1173 */
1174 _init_msa_upper();
1175 } else {
1176 /* We need to restore the vector context. */
1177 restore_msa(current);
1161 1178
1179 /* Restore the scalar FP control & status register */
1180 if (!was_fpu_owner)
1181 asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
1182 }
1162 return 0; 1183 return 0;
1163} 1184}
1164 1185