diff options
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
| -rw-r--r-- | arch/arm/include/asm/mmu_context.h | 88 |
1 files changed, 6 insertions, 82 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 0306bc642c0d..e1f644bc7cc5 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
| @@ -20,88 +20,12 @@ | |||
| 20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
| 21 | #include <asm-generic/mm_hooks.h> | 21 | #include <asm-generic/mm_hooks.h> |
| 22 | 22 | ||
| 23 | void __check_kvm_seq(struct mm_struct *mm); | 23 | void __check_vmalloc_seq(struct mm_struct *mm); |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_CPU_HAS_ASID | 25 | #ifdef CONFIG_CPU_HAS_ASID |
| 26 | 26 | ||
| 27 | /* | 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
| 28 | * On ARMv6, we have the following structure in the Context ID: | 28 | #define init_new_context(tsk,mm) ({ mm->context.id = 0; }) |
| 29 | * | ||
| 30 | * 31 7 0 | ||
| 31 | * +-------------------------+-----------+ | ||
| 32 | * | process ID | ASID | | ||
| 33 | * +-------------------------+-----------+ | ||
| 34 | * | context ID | | ||
| 35 | * +-------------------------------------+ | ||
| 36 | * | ||
| 37 | * The ASID is used to tag entries in the CPU caches and TLBs. | ||
| 38 | * The context ID is used by debuggers and trace logic, and | ||
| 39 | * should be unique within all running processes. | ||
| 40 | */ | ||
| 41 | #define ASID_BITS 8 | ||
| 42 | #define ASID_MASK ((~0) << ASID_BITS) | ||
| 43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) | ||
| 44 | |||
| 45 | extern unsigned int cpu_last_asid; | ||
| 46 | |||
| 47 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
| 48 | void __new_context(struct mm_struct *mm); | ||
| 49 | void cpu_set_reserved_ttbr0(void); | ||
| 50 | |||
| 51 | static inline void switch_new_context(struct mm_struct *mm) | ||
| 52 | { | ||
| 53 | unsigned long flags; | ||
| 54 | |||
| 55 | __new_context(mm); | ||
| 56 | |||
| 57 | local_irq_save(flags); | ||
| 58 | cpu_switch_mm(mm->pgd, mm); | ||
| 59 | local_irq_restore(flags); | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline void check_and_switch_context(struct mm_struct *mm, | ||
| 63 | struct task_struct *tsk) | ||
| 64 | { | ||
| 65 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | ||
| 66 | __check_kvm_seq(mm); | ||
| 67 | |||
| 68 | /* | ||
| 69 | * Required during context switch to avoid speculative page table | ||
| 70 | * walking with the wrong TTBR. | ||
| 71 | */ | ||
| 72 | cpu_set_reserved_ttbr0(); | ||
| 73 | |||
| 74 | if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | ||
| 75 | /* | ||
| 76 | * The ASID is from the current generation, just switch to the | ||
| 77 | * new pgd. This condition is only true for calls from | ||
| 78 | * context_switch() and interrupts are already disabled. | ||
| 79 | */ | ||
| 80 | cpu_switch_mm(mm->pgd, mm); | ||
| 81 | else if (irqs_disabled()) | ||
| 82 | /* | ||
| 83 | * Defer the new ASID allocation until after the context | ||
| 84 | * switch critical region since __new_context() cannot be | ||
| 85 | * called with interrupts disabled (it sends IPIs). | ||
| 86 | */ | ||
| 87 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | ||
| 88 | else | ||
| 89 | /* | ||
| 90 | * That is a direct call to switch_mm() or activate_mm() with | ||
| 91 | * interrupts enabled and a new context. | ||
| 92 | */ | ||
| 93 | switch_new_context(mm); | ||
| 94 | } | ||
| 95 | |||
| 96 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | ||
| 97 | |||
| 98 | #define finish_arch_post_lock_switch \ | ||
| 99 | finish_arch_post_lock_switch | ||
| 100 | static inline void finish_arch_post_lock_switch(void) | ||
| 101 | { | ||
| 102 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) | ||
| 103 | switch_new_context(current->mm); | ||
| 104 | } | ||
| 105 | 29 | ||
| 106 | #else /* !CONFIG_CPU_HAS_ASID */ | 30 | #else /* !CONFIG_CPU_HAS_ASID */ |
| 107 | 31 | ||
| @@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void) | |||
| 110 | static inline void check_and_switch_context(struct mm_struct *mm, | 34 | static inline void check_and_switch_context(struct mm_struct *mm, |
| 111 | struct task_struct *tsk) | 35 | struct task_struct *tsk) |
| 112 | { | 36 | { |
| 113 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | 37 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
| 114 | __check_kvm_seq(mm); | 38 | __check_vmalloc_seq(mm); |
| 115 | 39 | ||
| 116 | if (irqs_disabled()) | 40 | if (irqs_disabled()) |
| 117 | /* | 41 | /* |
| @@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
| 143 | #endif /* CONFIG_CPU_HAS_ASID */ | 67 | #endif /* CONFIG_CPU_HAS_ASID */ |
| 144 | 68 | ||
| 145 | #define destroy_context(mm) do { } while(0) | 69 | #define destroy_context(mm) do { } while(0) |
| 70 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
| 146 | 71 | ||
| 147 | /* | 72 | /* |
| 148 | * This is called when "tsk" is about to enter lazy TLB mode. | 73 | * This is called when "tsk" is about to enter lazy TLB mode. |
| @@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 186 | } | 111 | } |
| 187 | 112 | ||
| 188 | #define deactivate_mm(tsk,mm) do { } while (0) | 113 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 189 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
| 190 | 114 | ||
| 191 | #endif | 115 | #endif |
