aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/smp.c')
-rw-r--r--arch/x86_64/kernel/smp.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4a6628b14d99..5a1c0a3bf872 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
135 135
136 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
137 /* 137 /*
138 * orig_rax contains the interrupt vector - 256. 138 * orig_rax contains the negated interrupt vector.
139 * Use that to determine where the sender put the data. 139 * Use that to determine where the sender put the data.
140 */ 140 */
141 sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START; 141 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
142 f = &per_cpu(flush_state, sender); 142 f = &per_cpu(flush_state, sender);
143 143
144 if (!cpu_isset(cpu, f->flush_cpumask)) 144 if (!cpu_isset(cpu, f->flush_cpumask))
@@ -224,6 +224,7 @@ void flush_tlb_current_task(void)
224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
225 preempt_enable(); 225 preempt_enable();
226} 226}
227EXPORT_SYMBOL(flush_tlb_current_task);
227 228
228void flush_tlb_mm (struct mm_struct * mm) 229void flush_tlb_mm (struct mm_struct * mm)
229{ 230{
@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm)
244 245
245 preempt_enable(); 246 preempt_enable();
246} 247}
248EXPORT_SYMBOL(flush_tlb_mm);
247 249
248void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) 250void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
249{ 251{
@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
266 268
267 preempt_enable(); 269 preempt_enable();
268} 270}
271EXPORT_SYMBOL(flush_tlb_page);
269 272
270static void do_flush_tlb_all(void* info) 273static void do_flush_tlb_all(void* info)
271{ 274{
@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
443 spin_unlock(&call_lock); 446 spin_unlock(&call_lock);
444 return 0; 447 return 0;
445} 448}
449EXPORT_SYMBOL(smp_call_function);
446 450
447void smp_stop_cpu(void) 451void smp_stop_cpu(void)
448{ 452{
@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy)
460{ 464{
461 smp_stop_cpu(); 465 smp_stop_cpu();
462 for (;;) 466 for (;;)
463 asm("hlt"); 467 halt();
464} 468}
465 469
466void smp_send_stop(void) 470void smp_send_stop(void)
@@ -470,7 +474,7 @@ void smp_send_stop(void)
470 return; 474 return;
471 /* Don't deadlock on the call lock in panic */ 475 /* Don't deadlock on the call lock in panic */
472 if (!spin_trylock(&call_lock)) { 476 if (!spin_trylock(&call_lock)) {
473 /* ignore locking because we have paniced anyways */ 477 /* ignore locking because we have panicked anyways */
474 nolock = 1; 478 nolock = 1;
475 } 479 }
476 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); 480 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void)
520 524
521int safe_smp_processor_id(void) 525int safe_smp_processor_id(void)
522{ 526{
523 int apicid, i; 527 unsigned apicid, i;
524 528
525 if (disable_apic) 529 if (disable_apic)
526 return 0; 530 return 0;
527 531
528 apicid = hard_smp_processor_id(); 532 apicid = hard_smp_processor_id();
529 if (x86_cpu_to_apicid[apicid] == apicid) 533 if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
530 return apicid; 534 return apicid;
531 535
532 for (i = 0; i < NR_CPUS; ++i) { 536 for (i = 0; i < NR_CPUS; ++i) {