diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-05-04 07:04:09 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-05-04 07:04:09 -0400 |
commit | e423c0c30c76fc850a443102a2965ec397ba6e21 (patch) | |
tree | 2a68e818302da75bd22cdf05c37381ee911ffd44 | |
parent | 69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff) | |
parent | b9d4d42ad901cc848ac87f1cb8923fded3645568 (diff) |
Merge branch 'intr-ctxsw' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux into devel-stable
-rw-r--r-- | arch/arm/include/asm/mmu.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 104 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 57 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 9 |
5 files changed, 110 insertions, 68 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b8e580a297e4..14965658a923 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -34,11 +34,4 @@ typedef struct { | |||
34 | 34 | ||
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | /* | ||
38 | * switch_mm() may do a full cache flush over the context switch, | ||
39 | * so enable interrupts over the context switch to avoid high | ||
40 | * latency. | ||
41 | */ | ||
42 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
43 | |||
44 | #endif | 37 | #endif |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a0b3cac0547c..0306bc642c0d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm); | |||
43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) | 43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) |
44 | 44 | ||
45 | extern unsigned int cpu_last_asid; | 45 | extern unsigned int cpu_last_asid; |
46 | #ifdef CONFIG_SMP | ||
47 | DECLARE_PER_CPU(struct mm_struct *, current_mm); | ||
48 | #endif | ||
49 | 46 | ||
50 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 47 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
51 | void __new_context(struct mm_struct *mm); | 48 | void __new_context(struct mm_struct *mm); |
49 | void cpu_set_reserved_ttbr0(void); | ||
52 | 50 | ||
53 | static inline void check_context(struct mm_struct *mm) | 51 | static inline void switch_new_context(struct mm_struct *mm) |
54 | { | 52 | { |
55 | /* | 53 | unsigned long flags; |
56 | * This code is executed with interrupts enabled. Therefore, | 54 | |
57 | * mm->context.id cannot be updated to the latest ASID version | 55 | __new_context(mm); |
58 | * on a different CPU (and condition below not triggered) | 56 | |
59 | * without first getting an IPI to reset the context. The | 57 | local_irq_save(flags); |
60 | * alternative is to take a read_lock on mm->context.id_lock | 58 | cpu_switch_mm(mm->pgd, mm); |
61 | * (after changing its type to rwlock_t). | 59 | local_irq_restore(flags); |
62 | */ | 60 | } |
63 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | ||
64 | __new_context(mm); | ||
65 | 61 | ||
62 | static inline void check_and_switch_context(struct mm_struct *mm, | ||
63 | struct task_struct *tsk) | ||
64 | { | ||
66 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | 65 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) |
67 | __check_kvm_seq(mm); | 66 | __check_kvm_seq(mm); |
67 | |||
68 | /* | ||
69 | * Required during context switch to avoid speculative page table | ||
70 | * walking with the wrong TTBR. | ||
71 | */ | ||
72 | cpu_set_reserved_ttbr0(); | ||
73 | |||
74 | if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | ||
75 | /* | ||
76 | * The ASID is from the current generation, just switch to the | ||
77 | * new pgd. This condition is only true for calls from | ||
78 | * context_switch() and interrupts are already disabled. | ||
79 | */ | ||
80 | cpu_switch_mm(mm->pgd, mm); | ||
81 | else if (irqs_disabled()) | ||
82 | /* | ||
83 | * Defer the new ASID allocation until after the context | ||
84 | * switch critical region since __new_context() cannot be | ||
85 | * called with interrupts disabled (it sends IPIs). | ||
86 | */ | ||
87 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | ||
88 | else | ||
89 | /* | ||
90 | * That is a direct call to switch_mm() or activate_mm() with | ||
91 | * interrupts enabled and a new context. | ||
92 | */ | ||
93 | switch_new_context(mm); | ||
68 | } | 94 | } |
69 | 95 | ||
70 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | 96 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) |
71 | 97 | ||
72 | #else | 98 | #define finish_arch_post_lock_switch \ |
73 | 99 | finish_arch_post_lock_switch | |
74 | static inline void check_context(struct mm_struct *mm) | 100 | static inline void finish_arch_post_lock_switch(void) |
75 | { | 101 | { |
102 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) | ||
103 | switch_new_context(current->mm); | ||
104 | } | ||
105 | |||
106 | #else /* !CONFIG_CPU_HAS_ASID */ | ||
107 | |||
76 | #ifdef CONFIG_MMU | 108 | #ifdef CONFIG_MMU |
109 | |||
110 | static inline void check_and_switch_context(struct mm_struct *mm, | ||
111 | struct task_struct *tsk) | ||
112 | { | ||
77 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | 113 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) |
78 | __check_kvm_seq(mm); | 114 | __check_kvm_seq(mm); |
79 | #endif | 115 | |
116 | if (irqs_disabled()) | ||
117 | /* | ||
118 | * cpu_switch_mm() needs to flush the VIVT caches. To avoid | ||
119 | * high interrupt latencies, defer the call and continue | ||
120 | * running with the old mm. Since we only support UP systems | ||
121 | * on non-ASID CPUs, the old mm will remain valid until the | ||
122 | * finish_arch_post_lock_switch() call. | ||
123 | */ | ||
124 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | ||
125 | else | ||
126 | cpu_switch_mm(mm->pgd, mm); | ||
80 | } | 127 | } |
81 | 128 | ||
129 | #define finish_arch_post_lock_switch \ | ||
130 | finish_arch_post_lock_switch | ||
131 | static inline void finish_arch_post_lock_switch(void) | ||
132 | { | ||
133 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { | ||
134 | struct mm_struct *mm = current->mm; | ||
135 | cpu_switch_mm(mm->pgd, mm); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | #endif /* CONFIG_MMU */ | ||
140 | |||
82 | #define init_new_context(tsk,mm) 0 | 141 | #define init_new_context(tsk,mm) 0 |
83 | 142 | ||
84 | #endif | 143 | #endif /* CONFIG_CPU_HAS_ASID */ |
85 | 144 | ||
86 | #define destroy_context(mm) do { } while(0) | 145 | #define destroy_context(mm) do { } while(0) |
87 | 146 | ||
@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
119 | __flush_icache_all(); | 178 | __flush_icache_all(); |
120 | #endif | 179 | #endif |
121 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | 180 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { |
122 | #ifdef CONFIG_SMP | 181 | check_and_switch_context(next, tsk); |
123 | struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); | ||
124 | *crt_mm = next; | ||
125 | #endif | ||
126 | check_context(next); | ||
127 | cpu_switch_mm(next->pgd, next); | ||
128 | if (cache_is_vivt()) | 182 | if (cache_is_vivt()) |
129 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | 183 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
130 | } | 184 | } |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d4c24d412a8d..9e13e33ec746 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *); | |||
146 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 146 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
147 | #define TIF_RESTORE_SIGMASK 20 | 147 | #define TIF_RESTORE_SIGMASK 20 |
148 | #define TIF_SECCOMP 21 | 148 | #define TIF_SECCOMP 21 |
149 | #define TIF_SWITCH_MM 22 /* deferred switch_mm */ | ||
149 | 150 | ||
150 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 151 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
151 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 152 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index ee9bb363d606..806cc4f63516 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -18,30 +18,39 @@ | |||
18 | 18 | ||
19 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | ||
23 | #endif | ||
24 | 21 | ||
25 | #ifdef CONFIG_ARM_LPAE | 22 | #ifdef CONFIG_ARM_LPAE |
26 | #define cpu_set_asid(asid) { \ | 23 | void cpu_set_reserved_ttbr0(void) |
27 | unsigned long ttbl, ttbh; \ | 24 | { |
28 | asm volatile( \ | 25 | unsigned long ttbl = __pa(swapper_pg_dir); |
29 | " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ | 26 | unsigned long ttbh = 0; |
30 | " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ | 27 | |
31 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ | 28 | /* |
32 | : "=&r" (ttbl), "=&r" (ttbh) \ | 29 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The |
33 | : "r" (asid & ~ASID_MASK)); \ | 30 | * ASID is set to 0. |
31 | */ | ||
32 | asm volatile( | ||
33 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | ||
34 | : | ||
35 | : "r" (ttbl), "r" (ttbh)); | ||
36 | isb(); | ||
34 | } | 37 | } |
35 | #else | 38 | #else |
36 | #define cpu_set_asid(asid) \ | 39 | void cpu_set_reserved_ttbr0(void) |
37 | asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) | 40 | { |
41 | u32 ttb; | ||
42 | /* Copy TTBR1 into TTBR0 */ | ||
43 | asm volatile( | ||
44 | " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" | ||
45 | " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" | ||
46 | : "=r" (ttb)); | ||
47 | isb(); | ||
48 | } | ||
38 | #endif | 49 | #endif |
39 | 50 | ||
40 | /* | 51 | /* |
41 | * We fork()ed a process, and we need a new context for the child | 52 | * We fork()ed a process, and we need a new context for the child |
42 | * to run in. We reserve version 0 for initial tasks so we will | 53 | * to run in. |
43 | * always allocate an ASID. The ASID 0 is reserved for the TTBR | ||
44 | * register changing sequence. | ||
45 | */ | 54 | */ |
46 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 55 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
47 | { | 56 | { |
@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
51 | 60 | ||
52 | static void flush_context(void) | 61 | static void flush_context(void) |
53 | { | 62 | { |
54 | /* set the reserved ASID before flushing the TLB */ | 63 | cpu_set_reserved_ttbr0(); |
55 | cpu_set_asid(0); | ||
56 | isb(); | ||
57 | local_flush_tlb_all(); | 64 | local_flush_tlb_all(); |
58 | if (icache_is_vivt_asid_tagged()) { | 65 | if (icache_is_vivt_asid_tagged()) { |
59 | __flush_icache_all(); | 66 | __flush_icache_all(); |
@@ -98,14 +105,7 @@ static void reset_context(void *info) | |||
98 | { | 105 | { |
99 | unsigned int asid; | 106 | unsigned int asid; |
100 | unsigned int cpu = smp_processor_id(); | 107 | unsigned int cpu = smp_processor_id(); |
101 | struct mm_struct *mm = per_cpu(current_mm, cpu); | 108 | struct mm_struct *mm = current->active_mm; |
102 | |||
103 | /* | ||
104 | * Check if a current_mm was set on this CPU as it might still | ||
105 | * be in the early booting stages and using the reserved ASID. | ||
106 | */ | ||
107 | if (!mm) | ||
108 | return; | ||
109 | 109 | ||
110 | smp_rmb(); | 110 | smp_rmb(); |
111 | asid = cpu_last_asid + cpu + 1; | 111 | asid = cpu_last_asid + cpu + 1; |
@@ -114,8 +114,7 @@ static void reset_context(void *info) | |||
114 | set_mm_context(mm, asid); | 114 | set_mm_context(mm, asid); |
115 | 115 | ||
116 | /* set the new ASID */ | 116 | /* set the new ASID */ |
117 | cpu_set_asid(mm->context.id); | 117 | cpu_switch_mm(mm->pgd, mm); |
118 | isb(); | ||
119 | } | 118 | } |
120 | 119 | ||
121 | #else | 120 | #else |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 3a4b3e7b888c..42ac069c8012 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -49,15 +49,10 @@ ENTRY(cpu_v7_switch_mm) | |||
49 | #ifdef CONFIG_ARM_ERRATA_754322 | 49 | #ifdef CONFIG_ARM_ERRATA_754322 |
50 | dsb | 50 | dsb |
51 | #endif | 51 | #endif |
52 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID | ||
53 | isb | ||
54 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | ||
55 | isb | ||
56 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
57 | dsb | ||
58 | #endif | ||
59 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | 52 | mcr p15, 0, r1, c13, c0, 1 @ set context ID |
60 | isb | 53 | isb |
54 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | ||
55 | isb | ||
61 | #endif | 56 | #endif |
62 | mov pc, lr | 57 | mov pc, lr |
63 | ENDPROC(cpu_v7_switch_mm) | 58 | ENDPROC(cpu_v7_switch_mm) |