aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r--arch/arm/include/asm/mmu_context.h82
1 files changed, 3 insertions, 79 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 0306bc642c0d..a64f61cb23d1 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -24,84 +24,8 @@ void __check_kvm_seq(struct mm_struct *mm);
24 24
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27/* 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28 * On ARMv6, we have the following structure in the Context ID: 28#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
29 *
30 * 31 7 0
31 * +-------------------------+-----------+
32 * | process ID | ASID |
33 * +-------------------------+-----------+
34 * | context ID |
35 * +-------------------------------------+
36 *
37 * The ASID is used to tag entries in the CPU caches and TLBs.
38 * The context ID is used by debuggers and trace logic, and
39 * should be unique within all running processes.
40 */
41#define ASID_BITS 8
42#define ASID_MASK ((~0) << ASID_BITS)
43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44
45extern unsigned int cpu_last_asid;
46
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm);
49void cpu_set_reserved_ttbr0(void);
50
51static inline void switch_new_context(struct mm_struct *mm)
52{
53 unsigned long flags;
54
55 __new_context(mm);
56
57 local_irq_save(flags);
58 cpu_switch_mm(mm->pgd, mm);
59 local_irq_restore(flags);
60}
61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm);
67
68 /*
69 * Required during context switch to avoid speculative page table
70 * walking with the wrong TTBR.
71 */
72 cpu_set_reserved_ttbr0();
73
74 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
75 /*
76 * The ASID is from the current generation, just switch to the
77 * new pgd. This condition is only true for calls from
78 * context_switch() and interrupts are already disabled.
79 */
80 cpu_switch_mm(mm->pgd, mm);
81 else if (irqs_disabled())
82 /*
83 * Defer the new ASID allocation until after the context
84 * switch critical region since __new_context() cannot be
85 * called with interrupts disabled (it sends IPIs).
86 */
87 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
88 else
89 /*
90 * That is a direct call to switch_mm() or activate_mm() with
91 * interrupts enabled and a new context.
92 */
93 switch_new_context(mm);
94}
95
96#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
97
98#define finish_arch_post_lock_switch \
99 finish_arch_post_lock_switch
100static inline void finish_arch_post_lock_switch(void)
101{
102 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
103 switch_new_context(current->mm);
104}
105 29
106#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
107 31
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
143#endif /* CONFIG_CPU_HAS_ASID */ 67#endif /* CONFIG_CPU_HAS_ASID */
144 68
145#define destroy_context(mm) do { } while(0) 69#define destroy_context(mm) do { } while(0)
70#define activate_mm(prev,next) switch_mm(prev, next, NULL)
146 71
147/* 72/*
148 * This is called when "tsk" is about to enter lazy TLB mode. 73 * This is called when "tsk" is about to enter lazy TLB mode.
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
186} 111}
187 112
188#define deactivate_mm(tsk,mm) do { } while (0) 113#define deactivate_mm(tsk,mm) do { } while (0)
189#define activate_mm(prev,next) switch_mm(prev, next, NULL)
190 114
191#endif 115#endif