aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/tlb_32.c67
-rw-r--r--arch/x86/kernel/tlb_64.c61
-rw-r--r--arch/x86/kernel/tlb_uv.c16
3 files changed, 70 insertions, 74 deletions
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index ce5054642247..ec53818f4e38 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
20 * Optimizations Manfred Spraul <manfred@colorfullife.com> 20 * Optimizations Manfred Spraul <manfred@colorfullife.com>
21 */ 21 */
22 22
23static cpumask_t flush_cpumask; 23static cpumask_var_t flush_cpumask;
24static struct mm_struct *flush_mm; 24static struct mm_struct *flush_mm;
25static unsigned long flush_va; 25static unsigned long flush_va;
26static DEFINE_SPINLOCK(tlbstate_lock); 26static DEFINE_SPINLOCK(tlbstate_lock);
@@ -92,7 +92,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
92 92
93 cpu = get_cpu(); 93 cpu = get_cpu();
94 94
95 if (!cpu_isset(cpu, flush_cpumask)) 95 if (!cpumask_test_cpu(cpu, flush_cpumask))
96 goto out; 96 goto out;
97 /* 97 /*
98 * This was a BUG() but until someone can quote me the 98 * This was a BUG() but until someone can quote me the
@@ -114,35 +114,22 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
114 } 114 }
115 ack_APIC_irq(); 115 ack_APIC_irq();
116 smp_mb__before_clear_bit(); 116 smp_mb__before_clear_bit();
117 cpu_clear(cpu, flush_cpumask); 117 cpumask_clear_cpu(cpu, flush_cpumask);
118 smp_mb__after_clear_bit(); 118 smp_mb__after_clear_bit();
119out: 119out:
120 put_cpu_no_resched(); 120 put_cpu_no_resched();
121 inc_irq_stat(irq_tlb_count); 121 inc_irq_stat(irq_tlb_count);
122} 122}
123 123
124void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, 124void native_flush_tlb_others(const struct cpumask *cpumask,
125 unsigned long va) 125 struct mm_struct *mm, unsigned long va)
126{ 126{
127 cpumask_t cpumask = *cpumaskp;
128
129 /* 127 /*
130 * A couple of (to be removed) sanity checks:
131 *
132 * - current CPU must not be in mask
133 * - mask must exist :) 128 * - mask must exist :)
134 */ 129 */
135 BUG_ON(cpus_empty(cpumask)); 130 BUG_ON(cpumask_empty(cpumask));
136 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
137 BUG_ON(!mm); 131 BUG_ON(!mm);
138 132
139#ifdef CONFIG_HOTPLUG_CPU
140 /* If a CPU which we ran on has gone down, OK. */
141 cpus_and(cpumask, cpumask, cpu_online_map);
142 if (unlikely(cpus_empty(cpumask)))
143 return;
144#endif
145
146 /* 133 /*
147 * i'm not happy about this global shared spinlock in the 134 * i'm not happy about this global shared spinlock in the
148 * MM hot path, but we'll see how contended it is. 135 * MM hot path, but we'll see how contended it is.
@@ -150,9 +137,17 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
150 */ 137 */
151 spin_lock(&tlbstate_lock); 138 spin_lock(&tlbstate_lock);
152 139
140 cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id()));
141#ifdef CONFIG_HOTPLUG_CPU
142 /* If a CPU which we ran on has gone down, OK. */
143 cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask);
144 if (unlikely(cpumask_empty(flush_cpumask))) {
145 spin_unlock(&tlbstate_lock);
146 return;
147 }
148#endif
153 flush_mm = mm; 149 flush_mm = mm;
154 flush_va = va; 150 flush_va = va;
155 cpus_or(flush_cpumask, cpumask, flush_cpumask);
156 151
157 /* 152 /*
158 * Make the above memory operations globally visible before 153 * Make the above memory operations globally visible before
@@ -163,9 +158,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
163 * We have to send the IPI only to 158 * We have to send the IPI only to
164 * CPUs affected. 159 * CPUs affected.
165 */ 160 */
166 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); 161 send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
167 162
168 while (!cpus_empty(flush_cpumask)) 163 while (!cpumask_empty(flush_cpumask))
169 /* nothing. lockup detection does not belong here */ 164 /* nothing. lockup detection does not belong here */
170 cpu_relax(); 165 cpu_relax();
171 166
@@ -177,25 +172,19 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
177void flush_tlb_current_task(void) 172void flush_tlb_current_task(void)
178{ 173{
179 struct mm_struct *mm = current->mm; 174 struct mm_struct *mm = current->mm;
180 cpumask_t cpu_mask;
181 175
182 preempt_disable(); 176 preempt_disable();
183 cpu_mask = mm->cpu_vm_mask;
184 cpu_clear(smp_processor_id(), cpu_mask);
185 177
186 local_flush_tlb(); 178 local_flush_tlb();
187 if (!cpus_empty(cpu_mask)) 179 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
188 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 180 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
189 preempt_enable(); 181 preempt_enable();
190} 182}
191 183
192void flush_tlb_mm(struct mm_struct *mm) 184void flush_tlb_mm(struct mm_struct *mm)
193{ 185{
194 cpumask_t cpu_mask;
195 186
196 preempt_disable(); 187 preempt_disable();
197 cpu_mask = mm->cpu_vm_mask;
198 cpu_clear(smp_processor_id(), cpu_mask);
199 188
200 if (current->active_mm == mm) { 189 if (current->active_mm == mm) {
201 if (current->mm) 190 if (current->mm)
@@ -203,8 +192,8 @@ void flush_tlb_mm(struct mm_struct *mm)
203 else 192 else
204 leave_mm(smp_processor_id()); 193 leave_mm(smp_processor_id());
205 } 194 }
206 if (!cpus_empty(cpu_mask)) 195 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
207 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 196 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
208 197
209 preempt_enable(); 198 preempt_enable();
210} 199}
@@ -212,11 +201,8 @@ void flush_tlb_mm(struct mm_struct *mm)
212void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) 201void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
213{ 202{
214 struct mm_struct *mm = vma->vm_mm; 203 struct mm_struct *mm = vma->vm_mm;
215 cpumask_t cpu_mask;
216 204
217 preempt_disable(); 205 preempt_disable();
218 cpu_mask = mm->cpu_vm_mask;
219 cpu_clear(smp_processor_id(), cpu_mask);
220 206
221 if (current->active_mm == mm) { 207 if (current->active_mm == mm) {
222 if (current->mm) 208 if (current->mm)
@@ -225,9 +211,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
225 leave_mm(smp_processor_id()); 211 leave_mm(smp_processor_id());
226 } 212 }
227 213
228 if (!cpus_empty(cpu_mask)) 214 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
229 flush_tlb_others(cpu_mask, mm, va); 215 flush_tlb_others(&mm->cpu_vm_mask, mm, va);
230
231 preempt_enable(); 216 preempt_enable();
232} 217}
233EXPORT_SYMBOL(flush_tlb_page); 218EXPORT_SYMBOL(flush_tlb_page);
@@ -254,3 +239,9 @@ void reset_lazy_tlbstate(void)
254 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; 239 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
255} 240}
256 241
242static int init_flush_cpumask(void)
243{
244 alloc_cpumask_var(&flush_cpumask, GFP_KERNEL);
245 return 0;
246}
247early_initcall(init_flush_cpumask);
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index f8be6f1d2e48..38836aef51b4 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -43,10 +43,10 @@
43 43
44union smp_flush_state { 44union smp_flush_state {
45 struct { 45 struct {
46 cpumask_t flush_cpumask;
47 struct mm_struct *flush_mm; 46 struct mm_struct *flush_mm;
48 unsigned long flush_va; 47 unsigned long flush_va;
49 spinlock_t tlbstate_lock; 48 spinlock_t tlbstate_lock;
49 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
50 }; 50 };
51 char pad[SMP_CACHE_BYTES]; 51 char pad[SMP_CACHE_BYTES];
52} ____cacheline_aligned; 52} ____cacheline_aligned;
@@ -131,7 +131,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
131 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; 131 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
132 f = &per_cpu(flush_state, sender); 132 f = &per_cpu(flush_state, sender);
133 133
134 if (!cpu_isset(cpu, f->flush_cpumask)) 134 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
135 goto out; 135 goto out;
136 /* 136 /*
137 * This was a BUG() but until someone can quote me the 137 * This was a BUG() but until someone can quote me the
@@ -153,19 +153,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
153 } 153 }
154out: 154out:
155 ack_APIC_irq(); 155 ack_APIC_irq();
156 cpu_clear(cpu, f->flush_cpumask); 156 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
157 inc_irq_stat(irq_tlb_count); 157 inc_irq_stat(irq_tlb_count);
158} 158}
159 159
160void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, 160static void flush_tlb_others_ipi(const struct cpumask *cpumask,
161 unsigned long va) 161 struct mm_struct *mm, unsigned long va)
162{ 162{
163 int sender; 163 int sender;
164 union smp_flush_state *f; 164 union smp_flush_state *f;
165 cpumask_t cpumask = *cpumaskp;
166
167 if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
168 return;
169 165
170 /* Caller has disabled preemption */ 166 /* Caller has disabled preemption */
171 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; 167 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
@@ -180,7 +176,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
180 176
181 f->flush_mm = mm; 177 f->flush_mm = mm;
182 f->flush_va = va; 178 f->flush_va = va;
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); 179 cpumask_andnot(to_cpumask(f->flush_cpumask),
180 cpumask, cpumask_of(smp_processor_id()));
184 181
185 /* 182 /*
186 * Make the above memory operations globally visible before 183 * Make the above memory operations globally visible before
@@ -191,9 +188,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
191 * We have to send the IPI only to 188 * We have to send the IPI only to
192 * CPUs affected. 189 * CPUs affected.
193 */ 190 */
194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); 191 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
195 192
196 while (!cpus_empty(f->flush_cpumask)) 193 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
197 cpu_relax(); 194 cpu_relax();
198 195
199 f->flush_mm = NULL; 196 f->flush_mm = NULL;
@@ -201,6 +198,24 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
201 spin_unlock(&f->tlbstate_lock); 198 spin_unlock(&f->tlbstate_lock);
202} 199}
203 200
201void native_flush_tlb_others(const struct cpumask *cpumask,
202 struct mm_struct *mm, unsigned long va)
203{
204 if (is_uv_system()) {
205 cpumask_var_t after_uv_flush;
206
207 if (alloc_cpumask_var(&after_uv_flush, GFP_ATOMIC)) {
208 cpumask_andnot(after_uv_flush,
209 cpumask, cpumask_of(smp_processor_id()));
210 if (!uv_flush_tlb_others(after_uv_flush, mm, va))
211 flush_tlb_others_ipi(after_uv_flush, mm, va);
212 free_cpumask_var(after_uv_flush);
213 return;
214 }
215 }
216 flush_tlb_others_ipi(cpumask, mm, va);
217}
218
204static int __cpuinit init_smp_flush(void) 219static int __cpuinit init_smp_flush(void)
205{ 220{
206 int i; 221 int i;
@@ -215,25 +230,18 @@ core_initcall(init_smp_flush);
215void flush_tlb_current_task(void) 230void flush_tlb_current_task(void)
216{ 231{
217 struct mm_struct *mm = current->mm; 232 struct mm_struct *mm = current->mm;
218 cpumask_t cpu_mask;
219 233
220 preempt_disable(); 234 preempt_disable();
221 cpu_mask = mm->cpu_vm_mask;
222 cpu_clear(smp_processor_id(), cpu_mask);
223 235
224 local_flush_tlb(); 236 local_flush_tlb();
225 if (!cpus_empty(cpu_mask)) 237 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
226 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 238 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
227 preempt_enable(); 239 preempt_enable();
228} 240}
229 241
230void flush_tlb_mm(struct mm_struct *mm) 242void flush_tlb_mm(struct mm_struct *mm)
231{ 243{
232 cpumask_t cpu_mask;
233
234 preempt_disable(); 244 preempt_disable();
235 cpu_mask = mm->cpu_vm_mask;
236 cpu_clear(smp_processor_id(), cpu_mask);
237 245
238 if (current->active_mm == mm) { 246 if (current->active_mm == mm) {
239 if (current->mm) 247 if (current->mm)
@@ -241,8 +249,8 @@ void flush_tlb_mm(struct mm_struct *mm)
241 else 249 else
242 leave_mm(smp_processor_id()); 250 leave_mm(smp_processor_id());
243 } 251 }
244 if (!cpus_empty(cpu_mask)) 252 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
245 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 253 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
246 254
247 preempt_enable(); 255 preempt_enable();
248} 256}
@@ -250,11 +258,8 @@ void flush_tlb_mm(struct mm_struct *mm)
250void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) 258void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
251{ 259{
252 struct mm_struct *mm = vma->vm_mm; 260 struct mm_struct *mm = vma->vm_mm;
253 cpumask_t cpu_mask;
254 261
255 preempt_disable(); 262 preempt_disable();
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
258 263
259 if (current->active_mm == mm) { 264 if (current->active_mm == mm) {
260 if (current->mm) 265 if (current->mm)
@@ -263,8 +268,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
263 leave_mm(smp_processor_id()); 268 leave_mm(smp_processor_id());
264 } 269 }
265 270
266 if (!cpus_empty(cpu_mask)) 271 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
267 flush_tlb_others(cpu_mask, mm, va); 272 flush_tlb_others(&mm->cpu_vm_mask, mm, va);
268 273
269 preempt_enable(); 274 preempt_enable();
270} 275}
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f885023167e0..690dcf1a27d4 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
212 * The cpumaskp mask contains the cpus the broadcast was sent to. 212 * The cpumaskp mask contains the cpus the broadcast was sent to.
213 * 213 *
214 * Returns 1 if all remote flushing was done. The mask is zeroed. 214 * Returns 1 if all remote flushing was done. The mask is zeroed.
215 * Returns 0 if some remote flushing remains to be done. The mask is left 215 * Returns 0 if some remote flushing remains to be done. The mask will have
216 * unchanged. 216 * some bits still set.
217 */ 217 */
218int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, 218int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
219 cpumask_t *cpumaskp) 219 struct cpumask *cpumaskp)
220{ 220{
221 int completion_status = 0; 221 int completion_status = 0;
222 int right_shift; 222 int right_shift;
@@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
263 * Success, so clear the remote cpu's from the mask so we don't 263 * Success, so clear the remote cpu's from the mask so we don't
264 * use the IPI method of shootdown on them. 264 * use the IPI method of shootdown on them.
265 */ 265 */
266 for_each_cpu_mask(bit, *cpumaskp) { 266 for_each_cpu(bit, cpumaskp) {
267 blade = uv_cpu_to_blade_id(bit); 267 blade = uv_cpu_to_blade_id(bit);
268 if (blade == this_blade) 268 if (blade == this_blade)
269 continue; 269 continue;
270 cpu_clear(bit, *cpumaskp); 270 cpumask_clear_cpu(bit, cpumaskp);
271 } 271 }
272 if (!cpus_empty(*cpumaskp)) 272 if (!cpumask_empty(cpumaskp))
273 return 0; 273 return 0;
274 return 1; 274 return 1;
275} 275}
@@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
296 * Returns 1 if all remote flushing was done. 296 * Returns 1 if all remote flushing was done.
297 * Returns 0 if some remote flushing remains to be done. 297 * Returns 0 if some remote flushing remains to be done.
298 */ 298 */
299int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, 299int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm,
300 unsigned long va) 300 unsigned long va)
301{ 301{
302 int i; 302 int i;
@@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
315 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 315 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
316 316
317 i = 0; 317 i = 0;
318 for_each_cpu_mask(bit, *cpumaskp) { 318 for_each_cpu(bit, cpumaskp) {
319 blade = uv_cpu_to_blade_id(bit); 319 blade = uv_cpu_to_blade_id(bit);
320 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 320 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
321 if (blade == this_blade) { 321 if (blade == this_blade) {