aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-11-28 08:53:28 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2012-04-17 10:29:32 -0400
commit7fec1b57b8a925d83c194f995f83d9f8442fd48e (patch)
tree320c333459779e1388f5aae50ae50edb4482e82c /arch/arm/include/asm/mmu_context.h
parent3c5f7e7b4a0346de670b08f595bd15e7eec91f97 (diff)
ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs
Since the ASIDs must be unique to an mm across all the CPUs in a system, the __new_context() function needs to broadcast a context reset event to all the CPUs during ASID allocation if a roll-over occurred. Such IPIs cannot be issued with interrupts disabled and ARM had to define __ARCH_WANT_INTERRUPTS_ON_CTXSW. This patch changes the check_context() function to check_and_switch_context() called from switch_mm(). In case of ASID-capable CPUs (ARMv6 onwards), if a new ASID is needed and the interrupts are disabled, it defers the __new_context() and cpu_switch_mm() calls to the post-lock switch hook where the interrupts are enabled. Setting the reserved TTBR0 was also moved to check_and_switch_context() from cpu_v7_switch_mm(). Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Tested-by: Marc Zyngier <Marc.Zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r--arch/arm/include/asm/mmu_context.h72
1 files changed, 56 insertions, 16 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac0547c..94e265cb5146 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -49,39 +49,80 @@ DECLARE_PER_CPU(struct mm_struct *, current_mm);
49 49
50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
51void __new_context(struct mm_struct *mm); 51void __new_context(struct mm_struct *mm);
52void cpu_set_reserved_ttbr0(void);
52 53
53static inline void check_context(struct mm_struct *mm) 54static inline void switch_new_context(struct mm_struct *mm)
54{ 55{
55 /* 56 unsigned long flags;
56 * This code is executed with interrupts enabled. Therefore,
57 * mm->context.id cannot be updated to the latest ASID version
58 * on a different CPU (and condition below not triggered)
59 * without first getting an IPI to reset the context. The
60 * alternative is to take a read_lock on mm->context.id_lock
61 * (after changing its type to rwlock_t).
62 */
63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
64 __new_context(mm);
65 57
58 __new_context(mm);
59
60 local_irq_save(flags);
61 cpu_switch_mm(mm->pgd, mm);
62 local_irq_restore(flags);
63}
64
65static inline void check_and_switch_context(struct mm_struct *mm,
66 struct task_struct *tsk)
67{
66 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 68 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
67 __check_kvm_seq(mm); 69 __check_kvm_seq(mm);
70
71 /*
72 * Required during context switch to avoid speculative page table
73 * walking with the wrong TTBR.
74 */
75 cpu_set_reserved_ttbr0();
76
77 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
78 /*
79 * The ASID is from the current generation, just switch to the
80 * new pgd. This condition is only true for calls from
81 * context_switch() and interrupts are already disabled.
82 */
83 cpu_switch_mm(mm->pgd, mm);
84 else if (irqs_disabled())
85 /*
86 * Defer the new ASID allocation until after the context
87 * switch critical region since __new_context() cannot be
88 * called with interrupts disabled (it sends IPIs).
89 */
90 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
91 else
92 /*
93 * That is a direct call to switch_mm() or activate_mm() with
94 * interrupts enabled and a new context.
95 */
96 switch_new_context(mm);
68} 97}
69 98
70#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) 99#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
71 100
72#else 101#define finish_arch_post_lock_switch \
102 finish_arch_post_lock_switch
103static inline void finish_arch_post_lock_switch(void)
104{
105 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
106 switch_new_context(current->mm);
107}
73 108
74static inline void check_context(struct mm_struct *mm) 109#else /* !CONFIG_CPU_HAS_ASID */
110
111static inline void check_and_switch_context(struct mm_struct *mm,
112 struct task_struct *tsk)
75{ 113{
76#ifdef CONFIG_MMU 114#ifdef CONFIG_MMU
77 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 115 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
78 __check_kvm_seq(mm); 116 __check_kvm_seq(mm);
117 cpu_switch_mm(mm->pgd, mm);
79#endif 118#endif
80} 119}
81 120
82#define init_new_context(tsk,mm) 0 121#define init_new_context(tsk,mm) 0
83 122
84#endif 123#define finish_arch_post_lock_switch() do { } while (0)
124
125#endif /* CONFIG_CPU_HAS_ASID */
85 126
86#define destroy_context(mm) do { } while(0) 127#define destroy_context(mm) do { } while(0)
87 128
@@ -123,8 +164,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); 164 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next; 165 *crt_mm = next;
125#endif 166#endif
126 check_context(next); 167 check_and_switch_context(next, tsk);
127 cpu_switch_mm(next->pgd, next);
128 if (cache_is_vivt()) 168 if (cache_is_vivt())
129 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 169 cpumask_clear_cpu(cpu, mm_cpumask(prev));
130 } 170 }