aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2015-07-31 11:29:38 -0400
committerRalf Baechle <ralf@linux-mips.org>2015-08-03 04:29:11 -0400
commit3aff47c062b944a5e1f9af56a37a23f5295628fc (patch)
tree3e06f91bc72e2c9b99927b0435d44344dfdd1780
parent247bfb65d731350093f5d1a0a8b3d65e49c17baa (diff)
MIPS: Flush RPS on kernel entry with EVA
When EVA is enabled, flush the Return Prediction Stack (RPS) present on some MIPS cores on entry to the kernel from user mode. This is important specifically for interAptiv with EVA enabled, otherwise kernel mode RPS mispredicts may trigger speculative fetches of user return addresses, which may be sensitive in the kernel address space due to EVA's overlapping user/kernel address spaces. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Markos Chandras <markos.chandras@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # 3.15.x- Patchwork: https://patchwork.linux-mips.org/patch/10812/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/include/asm/stackframe.h25
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
152 .set noreorder 152 .set noreorder
153 bltz k0, 8f 153 bltz k0, 8f
154 move k1, sp 154 move k1, sp
155#ifdef CONFIG_EVA
156 /*
157 * Flush interAptiv's Return Prediction Stack (RPS) by writing
158 * EntryHi. Toggling Config7.RPS is slower and less portable.
159 *
160 * The RPS isn't automatically flushed when exceptions are
161 * taken, which can result in kernel mode speculative accesses
162 * to user addresses if the RPS mispredicts. That's harmless
163 * when user and kernel share the same address space, but with
164 * EVA the same user segments may be unmapped to kernel mode,
165 * even containing sensitive MMIO regions or invalid memory.
166 *
167 * This can happen when the kernel sets the return address to
168 * ret_from_* and jr's to the exception handler, which looks
169 * more like a tail call than a function call. If nested calls
170 * don't evict the last user address in the RPS, it will
171 * mispredict the return and fetch from a user controlled
172 * address into the icache.
173 *
174 * More recent EVA-capable cores with MAAR to restrict
175 * speculative accesses aren't affected.
176 */
177 MFC0 k0, CP0_ENTRYHI
178 MTC0 k0, CP0_ENTRYHI
179#endif
155 .set reorder 180 .set reorder
156 /* Called from user mode, new stack. */ 181 /* Called from user mode, new stack. */
157 get_saved_sp 182 get_saved_sp