diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-10-27 05:19:02 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-11-06 00:11:45 -0500 |
commit | 5ea72a90261552ed5fdca35239feb6cba498301e (patch) | |
tree | c83f798813ae867ec10c35be6d2f9c3bc99609b5 | |
parent | 63eca94ca206e342bad4a06a86d8e7eda3053a4e (diff) |
ARC: [SMP] TLB flush
- Add mm_cpumask setting (aggregating only, unlike some other arches)
used to restrict the TLB flush cross-calling
- cross-calling versions of TLB flush routines (thanks to Noam)
Signed-off-by: Noam Camus <noamc@ezchip.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 17 | ||||
-rw-r--r-- | arch/arc/include/asm/tlbflush.h | 11 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 73 |
4 files changed, 99 insertions, 3 deletions
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 45f06f566b02..1fd467ef658f 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -80,7 +80,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
80 | /* move to new ASID and handle rollover */ | 80 | /* move to new ASID and handle rollover */ |
81 | if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { | 81 | if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { |
82 | 82 | ||
83 | flush_tlb_all(); | 83 | local_flush_tlb_all(); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Above checke for rollover of 8 bit ASID in 32 bit container. | 86 | * Above checke for rollover of 8 bit ASID in 32 bit container. |
@@ -131,6 +131,21 @@ static inline void destroy_context(struct mm_struct *mm) | |||
131 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 131 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
132 | struct task_struct *tsk) | 132 | struct task_struct *tsk) |
133 | { | 133 | { |
134 | const int cpu = smp_processor_id(); | ||
135 | |||
136 | /* | ||
137 | * Note that the mm_cpumask is "aggregating" only, we don't clear it | ||
138 | * for the switched-out task, unlike some other arches. | ||
139 | * It is used to enlist cpus for sending TLB flush IPIs and not sending | ||
140 | * it to CPUs where a task once ran-on, could cause stale TLB entry | ||
141 | * re-use, specially for a multi-threaded task. | ||
142 | * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. | ||
143 | * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 | ||
144 | * were to re-migrate to C1, it could access the unmapped region | ||
145 | * via any existing stale TLB entries. | ||
146 | */ | ||
147 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
148 | |||
134 | #ifndef CONFIG_SMP | 149 | #ifndef CONFIG_SMP |
135 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ | 150 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
136 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | 151 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h index b2f9bc7f68c8..71c7b2e4b874 100644 --- a/arch/arc/include/asm/tlbflush.h +++ b/arch/arc/include/asm/tlbflush.h | |||
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); | |||
18 | void local_flush_tlb_range(struct vm_area_struct *vma, | 18 | void local_flush_tlb_range(struct vm_area_struct *vma, |
19 | unsigned long start, unsigned long end); | 19 | unsigned long start, unsigned long end); |
20 | 20 | ||
21 | /* XXX: Revisit for SMP */ | 21 | #ifndef CONFIG_SMP |
22 | #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) | 22 | #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) |
23 | #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) | 23 | #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) |
24 | #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) | 24 | #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) |
25 | #define flush_tlb_all() local_flush_tlb_all() | 25 | #define flush_tlb_all() local_flush_tlb_all() |
26 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) | 26 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) |
27 | 27 | #else | |
28 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
29 | unsigned long end); | ||
30 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | ||
31 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
32 | extern void flush_tlb_all(void); | ||
33 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
34 | #endif /* CONFIG_SMP */ | ||
28 | #endif | 35 | #endif |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 7f0ab1ecd640..41bc4c703f42 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -128,6 +128,7 @@ void start_kernel_secondary(void) | |||
128 | atomic_inc(&mm->mm_users); | 128 | atomic_inc(&mm->mm_users); |
129 | atomic_inc(&mm->mm_count); | 129 | atomic_inc(&mm->mm_count); |
130 | current->active_mm = mm; | 130 | current->active_mm = mm; |
131 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
131 | 132 | ||
132 | notify_cpu_starting(cpu); | 133 | notify_cpu_starting(cpu); |
133 | set_cpu_online(cpu, true); | 134 | set_cpu_online(cpu, true); |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index db0f0f823980..e1acf0ce5647 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -363,6 +363,79 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
363 | local_irq_restore(flags); | 363 | local_irq_restore(flags); |
364 | } | 364 | } |
365 | 365 | ||
366 | #ifdef CONFIG_SMP | ||
367 | |||
368 | struct tlb_args { | ||
369 | struct vm_area_struct *ta_vma; | ||
370 | unsigned long ta_start; | ||
371 | unsigned long ta_end; | ||
372 | }; | ||
373 | |||
374 | static inline void ipi_flush_tlb_page(void *arg) | ||
375 | { | ||
376 | struct tlb_args *ta = arg; | ||
377 | |||
378 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | ||
379 | } | ||
380 | |||
381 | static inline void ipi_flush_tlb_range(void *arg) | ||
382 | { | ||
383 | struct tlb_args *ta = arg; | ||
384 | |||
385 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | ||
386 | } | ||
387 | |||
388 | static inline void ipi_flush_tlb_kernel_range(void *arg) | ||
389 | { | ||
390 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
391 | |||
392 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | ||
393 | } | ||
394 | |||
395 | void flush_tlb_all(void) | ||
396 | { | ||
397 | on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); | ||
398 | } | ||
399 | |||
400 | void flush_tlb_mm(struct mm_struct *mm) | ||
401 | { | ||
402 | on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, | ||
403 | mm, 1); | ||
404 | } | ||
405 | |||
406 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | ||
407 | { | ||
408 | struct tlb_args ta = { | ||
409 | .ta_vma = vma, | ||
410 | .ta_start = uaddr | ||
411 | }; | ||
412 | |||
413 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); | ||
414 | } | ||
415 | |||
416 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
417 | unsigned long end) | ||
418 | { | ||
419 | struct tlb_args ta = { | ||
420 | .ta_vma = vma, | ||
421 | .ta_start = start, | ||
422 | .ta_end = end | ||
423 | }; | ||
424 | |||
425 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); | ||
426 | } | ||
427 | |||
428 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
429 | { | ||
430 | struct tlb_args ta = { | ||
431 | .ta_start = start, | ||
432 | .ta_end = end | ||
433 | }; | ||
434 | |||
435 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | ||
436 | } | ||
437 | #endif | ||
438 | |||
366 | /* | 439 | /* |
367 | * Routine to create a TLB entry | 440 | * Routine to create a TLB entry |
368 | */ | 441 | */ |