aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-04-19 04:25:06 -0400
committerRalf Baechle <ralf@linux-mips.org>2016-05-13 09:30:25 -0400
commitf383219674b72f390911c60e1d8ae95e32622398 (patch)
tree9248889d186ec3e672b2060accb2ffecba8b1d50 /arch/mips
parent7b2cb64f91f25a7293b10054e20d1c0734ffab6f (diff)
MIPS: mm: Don't clobber $1 on XPA TLB refill
For XPA kernels build_update_entries() uses $1 (at) as a scratch register, but doesn't arrange for it to be preserved, so it will always be clobbered by the TLB refill exception. Although this register normally has a very short lifetime that doesn't cross memory accesses, TLB refills due to instruction fetches (either on a page boundary or after preemption) could clobber live data, and its easy to reproduce the clobber with a little bit of assembler code. Note that the use of a hardware page table walker will partly mask the problem, as the TLB refill handler will not always be invoked. This is fixed by avoiding the use of the extra scratch register. The pte_high parts (going into the lower half of the EntryLo registers) are loaded and manipulated separately so as to keep the PTE pointer around for the other halves (instead of storing in the scratch register), and the pte_low parts (going into the high half of the EntryLo registers) are masked with 0x00ffffff using an ext instruction (instead of loading 0x00ffffff into the scratch register and AND'ing). [paul.burton@imgtec.com: - Rebase atop other TLB work. - Use ext instead of an sll, srl sequence. - Use cpu_has_xpa instead of #ifdefs. - Modify commit subject to include "mm".] Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Signed-off-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13120/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/mm/tlbex.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index db4adf9cc65c..81d42c33f882 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1014,26 +1014,22 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1014 if (config_enabled(CONFIG_XPA)) { 1014 if (config_enabled(CONFIG_XPA)) {
1015 int pte_off_even = sizeof(pte_t) / 2; 1015 int pte_off_even = sizeof(pte_t) / 2;
1016 int pte_off_odd = pte_off_even + sizeof(pte_t); 1016 int pte_off_odd = pte_off_even + sizeof(pte_t);
1017 const int scratch = 1; /* Our extra working register */
1018
1019 uasm_i_addu(p, scratch, 0, ptep);
1020 1017
1021 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1018 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1022 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1019 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1023 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1020 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1024 1021
1025 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ 1022 uasm_i_lw(p, tmp, 0, ptep);
1026 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); 1023 uasm_i_ext(p, tmp, tmp, 0, 24);
1027 UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
1028
1029 uasm_i_lw(p, tmp, 0, scratch);
1030 uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
1031 uasm_i_lui(p, scratch, 0xff);
1032 uasm_i_ori(p, scratch, scratch, 0xffff);
1033 uasm_i_and(p, tmp, scratch, tmp);
1034 uasm_i_and(p, ptep, scratch, ptep);
1035 uasm_i_mthc0(p, tmp, C0_ENTRYLO0); 1024 uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1036 uasm_i_mthc0(p, ptep, C0_ENTRYLO1); 1025
1026 uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1027 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1028 UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1029
1030 uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1031 uasm_i_ext(p, tmp, tmp, 0, 24);
1032 uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1037 return; 1033 return;
1038 } 1034 }
1039 1035