diff options
| author | Gary Guo <gary@garyguo.net> | 2019-03-26 20:41:29 -0400 |
|---|---|---|
| committer | Palmer Dabbelt <palmer@sifive.com> | 2019-05-16 23:42:12 -0400 |
| commit | f6635f873a605576fa1983c605655a8721475c22 (patch) | |
| tree | e90d492641c5dbbc02307894264a20ed9ccf53b7 | |
| parent | 58de77545e53b94cd6c816776197dade598632c5 (diff) | |
riscv: move switch_mm to its own file
switch_mm is an expensive operations that has two users.
flush_icache_deferred is only called within switch_mm and can be moved
together. The function is expected to be more complicated when ASID
support is added, so clean up eagerly.
By moving them to a separate file we also removes some excessive
dependency of tlbflush.h and cacheflush.h.
Signed-off-by: Gary Guo <gary@garyguo.net>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
| -rw-r--r-- | arch/riscv/include/asm/mmu_context.h | 54 | ||||
| -rw-r--r-- | arch/riscv/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/riscv/mm/context.c | 69 |
3 files changed, 72 insertions, 52 deletions
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 98c76c821367..bf4f097a9051 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h | |||
| @@ -20,8 +20,6 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
| 22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
| 23 | #include <asm/tlbflush.h> | ||
| 24 | #include <asm/cacheflush.h> | ||
| 25 | 23 | ||
| 26 | static inline void enter_lazy_tlb(struct mm_struct *mm, | 24 | static inline void enter_lazy_tlb(struct mm_struct *mm, |
| 27 | struct task_struct *task) | 25 | struct task_struct *task) |
| @@ -39,56 +37,8 @@ static inline void destroy_context(struct mm_struct *mm) | |||
| 39 | { | 37 | { |
| 40 | } | 38 | } |
| 41 | 39 | ||
| 42 | /* | 40 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 43 | * When necessary, performs a deferred icache flush for the given MM context, | 41 | struct task_struct *task); |
| 44 | * on the local CPU. RISC-V has no direct mechanism for instruction cache | ||
| 45 | * shoot downs, so instead we send an IPI that informs the remote harts they | ||
| 46 | * need to flush their local instruction caches. To avoid pathologically slow | ||
| 47 | * behavior in a common case (a bunch of single-hart processes on a many-hart | ||
| 48 | * machine, ie 'make -j') we avoid the IPIs for harts that are not currently | ||
| 49 | * executing a MM context and instead schedule a deferred local instruction | ||
| 50 | * cache flush to be performed before execution resumes on each hart. This | ||
| 51 | * actually performs that local instruction cache flush, which implicitly only | ||
| 52 | * refers to the current hart. | ||
| 53 | */ | ||
| 54 | static inline void flush_icache_deferred(struct mm_struct *mm) | ||
| 55 | { | ||
| 56 | #ifdef CONFIG_SMP | ||
| 57 | unsigned int cpu = smp_processor_id(); | ||
| 58 | cpumask_t *mask = &mm->context.icache_stale_mask; | ||
| 59 | |||
| 60 | if (cpumask_test_cpu(cpu, mask)) { | ||
| 61 | cpumask_clear_cpu(cpu, mask); | ||
| 62 | /* | ||
| 63 | * Ensure the remote hart's writes are visible to this hart. | ||
| 64 | * This pairs with a barrier in flush_icache_mm. | ||
| 65 | */ | ||
| 66 | smp_mb(); | ||
| 67 | local_flush_icache_all(); | ||
| 68 | } | ||
| 69 | #endif | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void switch_mm(struct mm_struct *prev, | ||
| 73 | struct mm_struct *next, struct task_struct *task) | ||
| 74 | { | ||
| 75 | if (likely(prev != next)) { | ||
| 76 | /* | ||
| 77 | * Mark the current MM context as inactive, and the next as | ||
| 78 | * active. This is at least used by the icache flushing | ||
| 79 | * routines in order to determine who should | ||
| 80 | */ | ||
| 81 | unsigned int cpu = smp_processor_id(); | ||
| 82 | |||
| 83 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
| 84 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
| 85 | |||
| 86 | csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); | ||
| 87 | local_flush_tlb_all(); | ||
| 88 | |||
| 89 | flush_icache_deferred(next); | ||
| 90 | } | ||
| 91 | } | ||
| 92 | 42 | ||
| 93 | static inline void activate_mm(struct mm_struct *prev, | 43 | static inline void activate_mm(struct mm_struct *prev, |
| 94 | struct mm_struct *next) | 44 | struct mm_struct *next) |
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index b68aac701803..0f1295d8731f 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile | |||
| @@ -9,3 +9,4 @@ obj-y += fault.o | |||
| 9 | obj-y += extable.o | 9 | obj-y += extable.o |
| 10 | obj-y += ioremap.o | 10 | obj-y += ioremap.o |
| 11 | obj-y += cacheflush.o | 11 | obj-y += cacheflush.o |
| 12 | obj-y += context.o | ||
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c new file mode 100644 index 000000000000..89ceb3cbe218 --- /dev/null +++ b/arch/riscv/mm/context.c | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2012 Regents of the University of California | ||
| 4 | * Copyright (C) 2017 SiFive | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/mm.h> | ||
| 8 | #include <asm/tlbflush.h> | ||
| 9 | #include <asm/cacheflush.h> | ||
| 10 | |||
| 11 | /* | ||
| 12 | * When necessary, performs a deferred icache flush for the given MM context, | ||
| 13 | * on the local CPU. RISC-V has no direct mechanism for instruction cache | ||
| 14 | * shoot downs, so instead we send an IPI that informs the remote harts they | ||
| 15 | * need to flush their local instruction caches. To avoid pathologically slow | ||
| 16 | * behavior in a common case (a bunch of single-hart processes on a many-hart | ||
| 17 | * machine, ie 'make -j') we avoid the IPIs for harts that are not currently | ||
| 18 | * executing a MM context and instead schedule a deferred local instruction | ||
| 19 | * cache flush to be performed before execution resumes on each hart. This | ||
| 20 | * actually performs that local instruction cache flush, which implicitly only | ||
| 21 | * refers to the current hart. | ||
| 22 | */ | ||
| 23 | static inline void flush_icache_deferred(struct mm_struct *mm) | ||
| 24 | { | ||
| 25 | #ifdef CONFIG_SMP | ||
| 26 | unsigned int cpu = smp_processor_id(); | ||
| 27 | cpumask_t *mask = &mm->context.icache_stale_mask; | ||
| 28 | |||
| 29 | if (cpumask_test_cpu(cpu, mask)) { | ||
| 30 | cpumask_clear_cpu(cpu, mask); | ||
| 31 | /* | ||
| 32 | * Ensure the remote hart's writes are visible to this hart. | ||
| 33 | * This pairs with a barrier in flush_icache_mm. | ||
| 34 | */ | ||
| 35 | smp_mb(); | ||
| 36 | local_flush_icache_all(); | ||
| 37 | } | ||
| 38 | |||
| 39 | #endif | ||
| 40 | } | ||
| 41 | |||
| 42 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
| 43 | struct task_struct *task) | ||
| 44 | { | ||
| 45 | unsigned int cpu; | ||
| 46 | |||
| 47 | if (unlikely(prev == next)) | ||
| 48 | return; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Mark the current MM context as inactive, and the next as | ||
| 52 | * active. This is at least used by the icache flushing | ||
| 53 | * routines in order to determine who should be flushed. | ||
| 54 | */ | ||
| 55 | cpu = smp_processor_id(); | ||
| 56 | |||
| 57 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
| 58 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
| 59 | |||
| 60 | /* | ||
| 61 | * Use the old spbtr name instead of using the current satp | ||
| 62 | * name to support binutils 2.29 which doesn't know about the | ||
| 63 | * privileged ISA 1.10 yet. | ||
| 64 | */ | ||
| 65 | csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE); | ||
| 66 | local_flush_tlb_all(); | ||
| 67 | |||
| 68 | flush_icache_deferred(next); | ||
| 69 | } | ||
