diff options
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r-- | arch/x86/mm/tlb.c | 295 |
1 files changed, 295 insertions, 0 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c new file mode 100644 index 000000000000..a654d59e4483 --- /dev/null +++ b/arch/x86/mm/tlb.c | |||
@@ -0,0 +1,295 @@ | |||
1 | #include <linux/init.h> | ||
2 | |||
3 | #include <linux/mm.h> | ||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/smp.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | #include <asm/tlbflush.h> | ||
10 | #include <asm/mmu_context.h> | ||
11 | #include <asm/apic.h> | ||
12 | #include <asm/uv/uv.h> | ||
13 | |||
14 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) | ||
15 | = { &init_mm, 0, }; | ||
16 | |||
17 | /* | ||
18 | * Smarter SMP flushing macros. | ||
19 | * c/o Linus Torvalds. | ||
20 | * | ||
21 | * These mean you can really definitely utterly forget about | ||
22 | * writing to user space from interrupts. (Its not allowed anyway). | ||
23 | * | ||
24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | ||
25 | * | ||
26 | * More scalable flush, from Andi Kleen | ||
27 | * | ||
28 | * To avoid global state use 8 different call vectors. | ||
29 | * Each CPU uses a specific vector to trigger flushes on other | ||
30 | * CPUs. Depending on the received vector the target CPUs look into | ||
31 | * the right array slot for the flush data. | ||
32 | * | ||
33 | * With more than 8 CPUs they are hashed to the 8 available | ||
34 | * vectors. The limited global vector space forces us to this right now. | ||
35 | * In future when interrupts are split into per CPU domains this could be | ||
36 | * fixed, at the cost of triggering multiple IPIs in some cases. | ||
37 | */ | ||
38 | |||
39 | union smp_flush_state { | ||
40 | struct { | ||
41 | struct mm_struct *flush_mm; | ||
42 | unsigned long flush_va; | ||
43 | spinlock_t tlbstate_lock; | ||
44 | DECLARE_BITMAP(flush_cpumask, NR_CPUS); | ||
45 | }; | ||
46 | char pad[CONFIG_X86_INTERNODE_CACHE_BYTES]; | ||
47 | } ____cacheline_internodealigned_in_smp; | ||
48 | |||
49 | /* State is put into the per CPU data section, but padded | ||
50 | to a full cache line because other CPUs can access it and we don't | ||
51 | want false sharing in the per cpu data segment. */ | ||
52 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; | ||
53 | |||
54 | /* | ||
55 | * We cannot call mmdrop() because we are in interrupt context, | ||
56 | * instead update mm->cpu_vm_mask. | ||
57 | */ | ||
58 | void leave_mm(int cpu) | ||
59 | { | ||
60 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | ||
61 | BUG(); | ||
62 | cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); | ||
63 | load_cr3(swapper_pg_dir); | ||
64 | } | ||
65 | EXPORT_SYMBOL_GPL(leave_mm); | ||
66 | |||
67 | /* | ||
68 | * | ||
69 | * The flush IPI assumes that a thread switch happens in this order: | ||
70 | * [cpu0: the cpu that switches] | ||
71 | * 1) switch_mm() either 1a) or 1b) | ||
72 | * 1a) thread switch to a different mm | ||
73 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | ||
74 | * Stop ipi delivery for the old mm. This is not synchronized with | ||
75 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | ||
76 | * for the wrong mm, and in the worst case we perform a superfluous | ||
77 | * tlb flush. | ||
78 | * 1a2) set cpu mmu_state to TLBSTATE_OK | ||
79 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | ||
80 | * was in lazy tlb mode. | ||
81 | * 1a3) update cpu active_mm | ||
82 | * Now cpu0 accepts tlb flushes for the new mm. | ||
83 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | ||
84 | * Now the other cpus will send tlb flush ipis. | ||
85 | * 1a4) change cr3. | ||
86 | * 1b) thread switch without mm change | ||
87 | * cpu active_mm is correct, cpu0 already handles | ||
88 | * flush ipis. | ||
89 | * 1b1) set cpu mmu_state to TLBSTATE_OK | ||
90 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | ||
91 | * Atomically set the bit [other cpus will start sending flush ipis], | ||
92 | * and test the bit. | ||
93 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | ||
94 | * 2) switch %%esp, ie current | ||
95 | * | ||
96 | * The interrupt must handle 2 special cases: | ||
97 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | ||
98 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | ||
99 | * runs in kernel space, the cpu could load tlb entries for user space | ||
100 | * pages. | ||
101 | * | ||
102 | * The good news is that cpu mmu_state is local to each cpu, no | ||
103 | * write/read ordering problems. | ||
104 | */ | ||
105 | |||
106 | /* | ||
107 | * TLB flush IPI: | ||
108 | * | ||
109 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | ||
110 | * 2) Leave the mm if we are in the lazy tlb mode. | ||
111 | * | ||
112 | * Interrupts are disabled. | ||
113 | */ | ||
114 | |||
115 | /* | ||
116 | * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop | ||
117 | * but still used for documentation purpose but the usage is slightly | ||
118 | * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt | ||
119 | * entry calls in with the first parameter in %eax. Maybe define | ||
120 | * intrlinkage? | ||
121 | */ | ||
122 | #ifdef CONFIG_X86_64 | ||
123 | asmlinkage | ||
124 | #endif | ||
125 | void smp_invalidate_interrupt(struct pt_regs *regs) | ||
126 | { | ||
127 | unsigned int cpu; | ||
128 | unsigned int sender; | ||
129 | union smp_flush_state *f; | ||
130 | |||
131 | cpu = smp_processor_id(); | ||
132 | /* | ||
133 | * orig_rax contains the negated interrupt vector. | ||
134 | * Use that to determine where the sender put the data. | ||
135 | */ | ||
136 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | ||
137 | f = &flush_state[sender]; | ||
138 | |||
139 | if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) | ||
140 | goto out; | ||
141 | /* | ||
142 | * This was a BUG() but until someone can quote me the | ||
143 | * line from the intel manual that guarantees an IPI to | ||
144 | * multiple CPUs is retried _only_ on the erroring CPUs | ||
145 | * its staying as a return | ||
146 | * | ||
147 | * BUG(); | ||
148 | */ | ||
149 | |||
150 | if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { | ||
151 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | ||
152 | if (f->flush_va == TLB_FLUSH_ALL) | ||
153 | local_flush_tlb(); | ||
154 | else | ||
155 | __flush_tlb_one(f->flush_va); | ||
156 | } else | ||
157 | leave_mm(cpu); | ||
158 | } | ||
159 | out: | ||
160 | ack_APIC_irq(); | ||
161 | smp_mb__before_clear_bit(); | ||
162 | cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); | ||
163 | smp_mb__after_clear_bit(); | ||
164 | inc_irq_stat(irq_tlb_count); | ||
165 | } | ||
166 | |||
167 | static void flush_tlb_others_ipi(const struct cpumask *cpumask, | ||
168 | struct mm_struct *mm, unsigned long va) | ||
169 | { | ||
170 | unsigned int sender; | ||
171 | union smp_flush_state *f; | ||
172 | |||
173 | /* Caller has disabled preemption */ | ||
174 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | ||
175 | f = &flush_state[sender]; | ||
176 | |||
177 | /* | ||
178 | * Could avoid this lock when | ||
179 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | ||
180 | * probably not worth checking this for a cache-hot lock. | ||
181 | */ | ||
182 | spin_lock(&f->tlbstate_lock); | ||
183 | |||
184 | f->flush_mm = mm; | ||
185 | f->flush_va = va; | ||
186 | cpumask_andnot(to_cpumask(f->flush_cpumask), | ||
187 | cpumask, cpumask_of(smp_processor_id())); | ||
188 | |||
189 | /* | ||
190 | * Make the above memory operations globally visible before | ||
191 | * sending the IPI. | ||
192 | */ | ||
193 | smp_mb(); | ||
194 | /* | ||
195 | * We have to send the IPI only to | ||
196 | * CPUs affected. | ||
197 | */ | ||
198 | apic->send_IPI_mask(to_cpumask(f->flush_cpumask), | ||
199 | INVALIDATE_TLB_VECTOR_START + sender); | ||
200 | |||
201 | while (!cpumask_empty(to_cpumask(f->flush_cpumask))) | ||
202 | cpu_relax(); | ||
203 | |||
204 | f->flush_mm = NULL; | ||
205 | f->flush_va = 0; | ||
206 | spin_unlock(&f->tlbstate_lock); | ||
207 | } | ||
208 | |||
209 | void native_flush_tlb_others(const struct cpumask *cpumask, | ||
210 | struct mm_struct *mm, unsigned long va) | ||
211 | { | ||
212 | if (is_uv_system()) { | ||
213 | unsigned int cpu; | ||
214 | |||
215 | cpu = get_cpu(); | ||
216 | cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); | ||
217 | if (cpumask) | ||
218 | flush_tlb_others_ipi(cpumask, mm, va); | ||
219 | put_cpu(); | ||
220 | return; | ||
221 | } | ||
222 | flush_tlb_others_ipi(cpumask, mm, va); | ||
223 | } | ||
224 | |||
225 | static int __cpuinit init_smp_flush(void) | ||
226 | { | ||
227 | int i; | ||
228 | |||
229 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) | ||
230 | spin_lock_init(&flush_state[i].tlbstate_lock); | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | core_initcall(init_smp_flush); | ||
235 | |||
236 | void flush_tlb_current_task(void) | ||
237 | { | ||
238 | struct mm_struct *mm = current->mm; | ||
239 | |||
240 | preempt_disable(); | ||
241 | |||
242 | local_flush_tlb(); | ||
243 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | ||
244 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | ||
245 | preempt_enable(); | ||
246 | } | ||
247 | |||
248 | void flush_tlb_mm(struct mm_struct *mm) | ||
249 | { | ||
250 | preempt_disable(); | ||
251 | |||
252 | if (current->active_mm == mm) { | ||
253 | if (current->mm) | ||
254 | local_flush_tlb(); | ||
255 | else | ||
256 | leave_mm(smp_processor_id()); | ||
257 | } | ||
258 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | ||
259 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | ||
260 | |||
261 | preempt_enable(); | ||
262 | } | ||
263 | |||
264 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | ||
265 | { | ||
266 | struct mm_struct *mm = vma->vm_mm; | ||
267 | |||
268 | preempt_disable(); | ||
269 | |||
270 | if (current->active_mm == mm) { | ||
271 | if (current->mm) | ||
272 | __flush_tlb_one(va); | ||
273 | else | ||
274 | leave_mm(smp_processor_id()); | ||
275 | } | ||
276 | |||
277 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | ||
278 | flush_tlb_others(&mm->cpu_vm_mask, mm, va); | ||
279 | |||
280 | preempt_enable(); | ||
281 | } | ||
282 | |||
283 | static void do_flush_tlb_all(void *info) | ||
284 | { | ||
285 | unsigned long cpu = smp_processor_id(); | ||
286 | |||
287 | __flush_tlb_all(); | ||
288 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) | ||
289 | leave_mm(cpu); | ||
290 | } | ||
291 | |||
292 | void flush_tlb_all(void) | ||
293 | { | ||
294 | on_each_cpu(do_flush_tlb_all, NULL, 1); | ||
295 | } | ||