diff options
author | ShihPo Hung <shihpo.hung@sifive.com> | 2019-06-17 00:26:17 -0400 |
---|---|---|
committer | Paul Walmsley <paul.walmsley@sifive.com> | 2019-06-17 06:44:44 -0400 |
commit | bf587caae305ae3b4393077fb22c98478ee55755 (patch) | |
tree | ab59e43380e7b8e32b79875eb26a5c27e5046957 /arch/riscv/mm | |
parent | c35f1b87fc595807ff15d2834d241f9771497205 (diff) |
riscv: mm: synchronize MMU after pte change
Because RISC-V compliant implementations can cache invalid entries
in TLB, an SFENCE.VMA is necessary after changes to the page table.
This patch adds an SFENCE.vma for the vmalloc_fault path.
Signed-off-by: ShihPo Hung <shihpo.hung@sifive.com>
[paul.walmsley@sifive.com: reversed tab->whitespace conversion,
wrapped comment lines]
Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: linux-riscv@lists.infradead.org
Cc: stable@vger.kernel.org
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/fault.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index cec8be9e2d6a..5b72e60c5a6b 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <asm/pgalloc.h> | 30 | #include <asm/pgalloc.h> |
31 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
32 | #include <asm/tlbflush.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * This routine handles page faults. It determines the address and the | 35 | * This routine handles page faults. It determines the address and the |
@@ -278,6 +279,18 @@ vmalloc_fault: | |||
278 | pte_k = pte_offset_kernel(pmd_k, addr); | 279 | pte_k = pte_offset_kernel(pmd_k, addr); |
279 | if (!pte_present(*pte_k)) | 280 | if (!pte_present(*pte_k)) |
280 | goto no_context; | 281 | goto no_context; |
282 | |||
283 | /* | ||
284 | * The kernel assumes that TLBs don't cache invalid | ||
285 | * entries, but in RISC-V, SFENCE.VMA specifies an | ||
286 | * ordering constraint, not a cache flush; it is | ||
287 | * necessary even after writing invalid entries. | ||
288 | * Relying on flush_tlb_fix_spurious_fault would | ||
289 | * suffice, but the extra traps reduce | ||
290 | * performance. So, eagerly SFENCE.VMA. | ||
291 | */ | ||
292 | local_flush_tlb_page(addr); | ||
293 | |||
281 | return; | 294 | return; |
282 | } | 295 | } |
283 | } | 296 | } |