aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-10-27 05:19:02 -0400
committerVineet Gupta <vgupta@synopsys.com>2013-11-06 00:11:45 -0500
commit5ea72a90261552ed5fdca35239feb6cba498301e (patch)
treec83f798813ae867ec10c35be6d2f9c3bc99609b5 /arch/arc/include/asm
parent63eca94ca206e342bad4a06a86d8e7eda3053a4e (diff)
ARC: [SMP] TLB flush
- Add mm_cpumask setting (aggregating only, unlike some other arches) used to restrict the TLB flush cross-calling - cross-calling versions of TLB flush routines (thanks to Noam) Signed-off-by: Noam Camus <noamc@ezchip.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/mmu_context.h17
-rw-r--r--arch/arc/include/asm/tlbflush.h11
2 files changed, 25 insertions, 3 deletions
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 45f06f566b02..1fd467ef658f 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -80,7 +80,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
80 /* move to new ASID and handle rollover */ 80 /* move to new ASID and handle rollover */
81 if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { 81 if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
82 82
83 flush_tlb_all(); 83 local_flush_tlb_all();
84 84
85 /* 85 /*
86 * Above checke for rollover of 8 bit ASID in 32 bit container. 86 * Above checke for rollover of 8 bit ASID in 32 bit container.
@@ -131,6 +131,21 @@ static inline void destroy_context(struct mm_struct *mm)
131static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 131static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
132 struct task_struct *tsk) 132 struct task_struct *tsk)
133{ 133{
134 const int cpu = smp_processor_id();
135
136 /*
137 * Note that the mm_cpumask is "aggregating" only, we don't clear it
138 * for the switched-out task, unlike some other arches.
139 * It is used to enlist cpus for sending TLB flush IPIs and not sending
140 * it to CPUs where a task once ran-on, could cause stale TLB entry
141 * re-use, specially for a multi-threaded task.
142 * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
143 * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
144 * were to re-migrate to C1, it could access the unmapped region
145 * via any existing stale TLB entries.
146 */
147 cpumask_set_cpu(cpu, mm_cpumask(next));
148
134#ifndef CONFIG_SMP 149#ifndef CONFIG_SMP
135 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ 150 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
136 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 151 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index b2f9bc7f68c8..71c7b2e4b874 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
18void local_flush_tlb_range(struct vm_area_struct *vma, 18void local_flush_tlb_range(struct vm_area_struct *vma,
19 unsigned long start, unsigned long end); 19 unsigned long start, unsigned long end);
20 20
21/* XXX: Revisit for SMP */ 21#ifndef CONFIG_SMP
22#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) 22#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
23#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) 23#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
24#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) 24#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
25#define flush_tlb_all() local_flush_tlb_all() 25#define flush_tlb_all() local_flush_tlb_all()
26#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 26#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
27 27#else
28extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
29 unsigned long end);
30extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
31extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
32extern void flush_tlb_all(void);
33extern void flush_tlb_mm(struct mm_struct *mm);
34#endif /* CONFIG_SMP */
28#endif 35#endif