diff options
author | Alex Shi <alex.shi@intel.com> | 2012-06-27 21:02:23 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2012-06-27 22:29:13 -0400 |
commit | 52aec3308db85f4e9f5c8b9f5dc4fbd0138c6fa4 (patch) | |
tree | 75ad5a9e508fcbbc041d0fe4f1245c98e08af38c /arch/x86/mm | |
parent | 611ae8e3f5204f7480b3b405993b3352cfa16662 (diff) |
x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR
There are 32 INVALIDATE_TLB_VECTOR now in kernel. That is quite big
amount of vector in IDT. But it is still not enough, since modern x86
sever has more cpu number. That still causes heavy lock contention
in TLB flushing.
The patch using generic smp call function to replace it. That saved 32
vector number in IDT, and resolved the lock contention in TLB
flushing on large system.
In the NHM EX machine 4P * 8cores * HT = 64 CPUs, hackbench pthread
has 3% performance increase.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-9-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/tlb.c | 242 |
1 files changed, 47 insertions, 195 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 481737def84a..2b5f506a7655 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -28,34 +28,14 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) | |||
28 | * | 28 | * |
29 | * More scalable flush, from Andi Kleen | 29 | * More scalable flush, from Andi Kleen |
30 | * | 30 | * |
31 | * To avoid global state use 8 different call vectors. | 31 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
32 | * Each CPU uses a specific vector to trigger flushes on other | ||
33 | * CPUs. Depending on the received vector the target CPUs look into | ||
34 | * the right array slot for the flush data. | ||
35 | * | ||
36 | * With more than 8 CPUs they are hashed to the 8 available | ||
37 | * vectors. The limited global vector space forces us to this right now. | ||
38 | * In future when interrupts are split into per CPU domains this could be | ||
39 | * fixed, at the cost of triggering multiple IPIs in some cases. | ||
40 | */ | 32 | */ |
41 | 33 | ||
42 | union smp_flush_state { | 34 | struct flush_tlb_info { |
43 | struct { | 35 | struct mm_struct *flush_mm; |
44 | struct mm_struct *flush_mm; | 36 | unsigned long flush_start; |
45 | unsigned long flush_start; | 37 | unsigned long flush_end; |
46 | unsigned long flush_end; | 38 | }; |
47 | raw_spinlock_t tlbstate_lock; | ||
48 | DECLARE_BITMAP(flush_cpumask, NR_CPUS); | ||
49 | }; | ||
50 | char pad[INTERNODE_CACHE_BYTES]; | ||
51 | } ____cacheline_internodealigned_in_smp; | ||
52 | |||
53 | /* State is put into the per CPU data section, but padded | ||
54 | to a full cache line because other CPUs can access it and we don't | ||
55 | want false sharing in the per cpu data segment. */ | ||
56 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; | ||
57 | |||
58 | static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); | ||
59 | 39 | ||
60 | /* | 40 | /* |
61 | * We cannot call mmdrop() because we are in interrupt context, | 41 | * We cannot call mmdrop() because we are in interrupt context, |
@@ -74,28 +54,25 @@ void leave_mm(int cpu) | |||
74 | EXPORT_SYMBOL_GPL(leave_mm); | 54 | EXPORT_SYMBOL_GPL(leave_mm); |
75 | 55 | ||
76 | /* | 56 | /* |
77 | * | ||
78 | * The flush IPI assumes that a thread switch happens in this order: | 57 | * The flush IPI assumes that a thread switch happens in this order: |
79 | * [cpu0: the cpu that switches] | 58 | * [cpu0: the cpu that switches] |
80 | * 1) switch_mm() either 1a) or 1b) | 59 | * 1) switch_mm() either 1a) or 1b) |
81 | * 1a) thread switch to a different mm | 60 | * 1a) thread switch to a different mm |
82 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | 61 | * 1a1) set cpu_tlbstate to TLBSTATE_OK |
83 | * Stop ipi delivery for the old mm. This is not synchronized with | 62 | * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm |
84 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | 63 | * if cpu0 was in lazy tlb mode. |
85 | * for the wrong mm, and in the worst case we perform a superfluous | 64 | * 1a2) update cpu active_mm |
86 | * tlb flush. | ||
87 | * 1a2) set cpu mmu_state to TLBSTATE_OK | ||
88 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | ||
89 | * was in lazy tlb mode. | ||
90 | * 1a3) update cpu active_mm | ||
91 | * Now cpu0 accepts tlb flushes for the new mm. | 65 | * Now cpu0 accepts tlb flushes for the new mm. |
92 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | 66 | * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); |
93 | * Now the other cpus will send tlb flush ipis. | 67 | * Now the other cpus will send tlb flush ipis. |
94 | * 1a4) change cr3. | 68 | * 1a4) change cr3. |
69 | * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); | ||
70 | * Stop ipi delivery for the old mm. This is not synchronized with | ||
71 | * the other cpus, but flush_tlb_func ignore flush ipis for the wrong | ||
72 | * mm, and in the worst case we perform a superfluous tlb flush. | ||
95 | * 1b) thread switch without mm change | 73 | * 1b) thread switch without mm change |
96 | * cpu active_mm is correct, cpu0 already handles | 74 | * cpu active_mm is correct, cpu0 already handles flush ipis. |
97 | * flush ipis. | 75 | * 1b1) set cpu_tlbstate to TLBSTATE_OK |
98 | * 1b1) set cpu mmu_state to TLBSTATE_OK | ||
99 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | 76 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
100 | * Atomically set the bit [other cpus will start sending flush ipis], | 77 | * Atomically set the bit [other cpus will start sending flush ipis], |
101 | * and test the bit. | 78 | * and test the bit. |
@@ -108,186 +85,61 @@ EXPORT_SYMBOL_GPL(leave_mm); | |||
108 | * runs in kernel space, the cpu could load tlb entries for user space | 85 | * runs in kernel space, the cpu could load tlb entries for user space |
109 | * pages. | 86 | * pages. |
110 | * | 87 | * |
111 | * The good news is that cpu mmu_state is local to each cpu, no | 88 | * The good news is that cpu_tlbstate is local to each cpu, no |
112 | * write/read ordering problems. | 89 | * write/read ordering problems. |
113 | */ | 90 | */ |
114 | 91 | ||
115 | /* | 92 | /* |
116 | * TLB flush IPI: | 93 | * TLB flush funcation: |
117 | * | ||
118 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | 94 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
119 | * 2) Leave the mm if we are in the lazy tlb mode. | 95 | * 2) Leave the mm if we are in the lazy tlb mode. |
120 | * | ||
121 | * Interrupts are disabled. | ||
122 | */ | ||
123 | |||
124 | /* | ||
125 | * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop | ||
126 | * but still used for documentation purpose but the usage is slightly | ||
127 | * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt | ||
128 | * entry calls in with the first parameter in %eax. Maybe define | ||
129 | * intrlinkage? | ||
130 | */ | 96 | */ |
131 | #ifdef CONFIG_X86_64 | 97 | static void flush_tlb_func(void *info) |
132 | asmlinkage | ||
133 | #endif | ||
134 | void smp_invalidate_interrupt(struct pt_regs *regs) | ||
135 | { | 98 | { |
136 | unsigned int cpu; | 99 | struct flush_tlb_info *f = info; |
137 | unsigned int sender; | ||
138 | union smp_flush_state *f; | ||
139 | |||
140 | cpu = smp_processor_id(); | ||
141 | /* | ||
142 | * orig_rax contains the negated interrupt vector. | ||
143 | * Use that to determine where the sender put the data. | ||
144 | */ | ||
145 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | ||
146 | f = &flush_state[sender]; | ||
147 | |||
148 | if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) | ||
149 | goto out; | ||
150 | /* | ||
151 | * This was a BUG() but until someone can quote me the | ||
152 | * line from the intel manual that guarantees an IPI to | ||
153 | * multiple CPUs is retried _only_ on the erroring CPUs | ||
154 | * its staying as a return | ||
155 | * | ||
156 | * BUG(); | ||
157 | */ | ||
158 | |||
159 | if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) { | ||
160 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | ||
161 | if (f->flush_end == TLB_FLUSH_ALL | ||
162 | || !cpu_has_invlpg) | ||
163 | local_flush_tlb(); | ||
164 | else if (!f->flush_end) | ||
165 | __flush_tlb_single(f->flush_start); | ||
166 | else { | ||
167 | unsigned long addr; | ||
168 | addr = f->flush_start; | ||
169 | while (addr < f->flush_end) { | ||
170 | __flush_tlb_single(addr); | ||
171 | addr += PAGE_SIZE; | ||
172 | } | ||
173 | } | ||
174 | } else | ||
175 | leave_mm(cpu); | ||
176 | } | ||
177 | out: | ||
178 | ack_APIC_irq(); | ||
179 | smp_mb__before_clear_bit(); | ||
180 | cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); | ||
181 | smp_mb__after_clear_bit(); | ||
182 | inc_irq_stat(irq_tlb_count); | ||
183 | } | ||
184 | 100 | ||
185 | static void flush_tlb_others_ipi(const struct cpumask *cpumask, | 101 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
186 | struct mm_struct *mm, unsigned long start, | 102 | return; |
187 | unsigned long end) | 103 | |
188 | { | 104 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
189 | unsigned int sender; | 105 | if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg) |
190 | union smp_flush_state *f; | 106 | local_flush_tlb(); |
191 | 107 | else if (!f->flush_end) | |
192 | /* Caller has disabled preemption */ | 108 | __flush_tlb_single(f->flush_start); |
193 | sender = this_cpu_read(tlb_vector_offset); | 109 | else { |
194 | f = &flush_state[sender]; | 110 | unsigned long addr; |
195 | 111 | addr = f->flush_start; | |
196 | if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) | 112 | while (addr < f->flush_end) { |
197 | raw_spin_lock(&f->tlbstate_lock); | 113 | __flush_tlb_single(addr); |
198 | 114 | addr += PAGE_SIZE; | |
199 | f->flush_mm = mm; | 115 | } |
200 | f->flush_start = start; | 116 | } |
201 | f->flush_end = end; | 117 | } else |
202 | if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) { | 118 | leave_mm(smp_processor_id()); |
203 | /* | ||
204 | * We have to send the IPI only to | ||
205 | * CPUs affected. | ||
206 | */ | ||
207 | apic->send_IPI_mask(to_cpumask(f->flush_cpumask), | ||
208 | INVALIDATE_TLB_VECTOR_START + sender); | ||
209 | |||
210 | while (!cpumask_empty(to_cpumask(f->flush_cpumask))) | ||
211 | cpu_relax(); | ||
212 | } | ||
213 | 119 | ||
214 | f->flush_mm = NULL; | ||
215 | f->flush_start = 0; | ||
216 | f->flush_end = 0; | ||
217 | if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) | ||
218 | raw_spin_unlock(&f->tlbstate_lock); | ||
219 | } | 120 | } |
220 | 121 | ||
221 | void native_flush_tlb_others(const struct cpumask *cpumask, | 122 | void native_flush_tlb_others(const struct cpumask *cpumask, |
222 | struct mm_struct *mm, unsigned long start, | 123 | struct mm_struct *mm, unsigned long start, |
223 | unsigned long end) | 124 | unsigned long end) |
224 | { | 125 | { |
126 | struct flush_tlb_info info; | ||
127 | info.flush_mm = mm; | ||
128 | info.flush_start = start; | ||
129 | info.flush_end = end; | ||
130 | |||
225 | if (is_uv_system()) { | 131 | if (is_uv_system()) { |
226 | unsigned int cpu; | 132 | unsigned int cpu; |
227 | 133 | ||
228 | cpu = smp_processor_id(); | 134 | cpu = smp_processor_id(); |
229 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); | 135 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); |
230 | if (cpumask) | 136 | if (cpumask) |
231 | flush_tlb_others_ipi(cpumask, mm, start, end); | 137 | smp_call_function_many(cpumask, flush_tlb_func, |
138 | &info, 1); | ||
232 | return; | 139 | return; |
233 | } | 140 | } |
234 | flush_tlb_others_ipi(cpumask, mm, start, end); | 141 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
235 | } | ||
236 | |||
237 | static void __cpuinit calculate_tlb_offset(void) | ||
238 | { | ||
239 | int cpu, node, nr_node_vecs, idx = 0; | ||
240 | /* | ||
241 | * we are changing tlb_vector_offset for each CPU in runtime, but this | ||
242 | * will not cause inconsistency, as the write is atomic under X86. we | ||
243 | * might see more lock contentions in a short time, but after all CPU's | ||
244 | * tlb_vector_offset are changed, everything should go normal | ||
245 | * | ||
246 | * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might | ||
247 | * waste some vectors. | ||
248 | **/ | ||
249 | if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS) | ||
250 | nr_node_vecs = 1; | ||
251 | else | ||
252 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; | ||
253 | |||
254 | for_each_online_node(node) { | ||
255 | int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * | ||
256 | nr_node_vecs; | ||
257 | int cpu_offset = 0; | ||
258 | for_each_cpu(cpu, cpumask_of_node(node)) { | ||
259 | per_cpu(tlb_vector_offset, cpu) = node_offset + | ||
260 | cpu_offset; | ||
261 | cpu_offset++; | ||
262 | cpu_offset = cpu_offset % nr_node_vecs; | ||
263 | } | ||
264 | idx++; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, | ||
269 | unsigned long action, void *hcpu) | ||
270 | { | ||
271 | switch (action & 0xf) { | ||
272 | case CPU_ONLINE: | ||
273 | case CPU_DEAD: | ||
274 | calculate_tlb_offset(); | ||
275 | } | ||
276 | return NOTIFY_OK; | ||
277 | } | ||
278 | |||
279 | static int __cpuinit init_smp_flush(void) | ||
280 | { | ||
281 | int i; | ||
282 | |||
283 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) | ||
284 | raw_spin_lock_init(&flush_state[i].tlbstate_lock); | ||
285 | |||
286 | calculate_tlb_offset(); | ||
287 | hotcpu_notifier(tlb_cpuhp_notify, 0); | ||
288 | return 0; | ||
289 | } | 142 | } |
290 | core_initcall(init_smp_flush); | ||
291 | 143 | ||
292 | void flush_tlb_current_task(void) | 144 | void flush_tlb_current_task(void) |
293 | { | 145 | { |