aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smp_32.c
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-03-03 12:12:52 -0500
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:40:56 -0400
commitf9e47a126be2eaabf04a1a5c71ca7b23a473d0d8 (patch)
tree5a16cf4ac06bb4031de1be5070281ef5f7847bf0 /arch/x86/kernel/smp_32.c
parent377d698426b8c685fb6d48fe89694fe4ce3aa1f8 (diff)
x86: create smp.c
this patch moves all the functions and data structures that look like exactly the same from smp_{32,64}.c to smp.c Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/smp_32.c')
-rw-r--r--arch/x86/kernel/smp_32.c223
1 files changed, 0 insertions, 223 deletions
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 8be3e091dcd0..61e546e85733 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -466,217 +466,6 @@ void flush_tlb_all(void)
466 on_each_cpu(do_flush_tlb_all, NULL, 1, 1); 466 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
467} 467}
468 468
469/*
470 * this function sends a 'reschedule' IPI to another CPU.
471 * it goes straight through and wastes no time serializing
472 * anything. Worst case is that we lose a reschedule ...
473 */
474static void native_smp_send_reschedule(int cpu)
475{
476 WARN_ON(cpu_is_offline(cpu));
477 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
478}
479
480/*
481 * Structure and data for smp_call_function(). This is designed to minimise
482 * static memory requirements. It also looks cleaner.
483 */
484static DEFINE_SPINLOCK(call_lock);
485
486struct call_data_struct {
487 void (*func) (void *info);
488 void *info;
489 atomic_t started;
490 atomic_t finished;
491 int wait;
492};
493
494void lock_ipi_call_lock(void)
495{
496 spin_lock_irq(&call_lock);
497}
498
499void unlock_ipi_call_lock(void)
500{
501 spin_unlock_irq(&call_lock);
502}
503
504static struct call_data_struct *call_data;
505
506static void __smp_call_function(void (*func) (void *info), void *info,
507 int nonatomic, int wait)
508{
509 struct call_data_struct data;
510 int cpus = num_online_cpus() - 1;
511
512 if (!cpus)
513 return;
514
515 data.func = func;
516 data.info = info;
517 atomic_set(&data.started, 0);
518 data.wait = wait;
519 if (wait)
520 atomic_set(&data.finished, 0);
521
522 call_data = &data;
523 mb();
524
525 /* Send a message to all other CPUs and wait for them to respond */
526 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
527
528 /* Wait for response */
529 while (atomic_read(&data.started) != cpus)
530 cpu_relax();
531
532 if (wait)
533 while (atomic_read(&data.finished) != cpus)
534 cpu_relax();
535}
536
537
538/**
539 * smp_call_function_mask(): Run a function on a set of other CPUs.
540 * @mask: The set of cpus to run on. Must not include the current cpu.
541 * @func: The function to run. This must be fast and non-blocking.
542 * @info: An arbitrary pointer to pass to the function.
543 * @wait: If true, wait (atomically) until function has completed on other CPUs.
544 *
545 * Returns 0 on success, else a negative status code.
546 *
547 * If @wait is true, then returns once @func has returned; otherwise
548 * it returns just before the target cpu calls @func.
549 *
550 * You must not call this function with disabled interrupts or from a
551 * hardware interrupt handler or from a bottom half handler.
552 */
553static int
554native_smp_call_function_mask(cpumask_t mask,
555 void (*func)(void *), void *info,
556 int wait)
557{
558 struct call_data_struct data;
559 cpumask_t allbutself;
560 int cpus;
561
562 /* Can deadlock when called with interrupts disabled */
563 WARN_ON(irqs_disabled());
564
565 /* Holding any lock stops cpus from going down. */
566 spin_lock(&call_lock);
567
568 allbutself = cpu_online_map;
569 cpu_clear(smp_processor_id(), allbutself);
570
571 cpus_and(mask, mask, allbutself);
572 cpus = cpus_weight(mask);
573
574 if (!cpus) {
575 spin_unlock(&call_lock);
576 return 0;
577 }
578
579 data.func = func;
580 data.info = info;
581 atomic_set(&data.started, 0);
582 data.wait = wait;
583 if (wait)
584 atomic_set(&data.finished, 0);
585
586 call_data = &data;
587 wmb();
588
589 /* Send a message to other CPUs */
590 if (cpus_equal(mask, allbutself))
591 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
592 else
593 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
594
595 /* Wait for response */
596 while (atomic_read(&data.started) != cpus)
597 cpu_relax();
598
599 if (wait)
600 while (atomic_read(&data.finished) != cpus)
601 cpu_relax();
602 spin_unlock(&call_lock);
603
604 return 0;
605}
606
607static void stop_this_cpu (void * dummy)
608{
609 local_irq_disable();
610 /*
611 * Remove this CPU:
612 */
613 cpu_clear(smp_processor_id(), cpu_online_map);
614 disable_local_APIC();
615 if (hlt_works(smp_processor_id()))
616 for(;;) halt();
617 for (;;);
618}
619
620/*
621 * this function calls the 'stop' function on all other CPUs in the system.
622 */
623
624static void native_smp_send_stop(void)
625{
626 int nolock;
627 unsigned long flags;
628
629 if (reboot_force)
630 return;
631
632 /* Don't deadlock on the call lock in panic */
633 nolock = !spin_trylock(&call_lock);
634 local_irq_save(flags);
635 __smp_call_function(stop_this_cpu, NULL, 0, 0);
636 if (!nolock)
637 spin_unlock(&call_lock);
638 disable_local_APIC();
639 local_irq_restore(flags);
640}
641
642/*
643 * Reschedule call back. Nothing to do,
644 * all the work is done automatically when
645 * we return from the interrupt.
646 */
647void smp_reschedule_interrupt(struct pt_regs *regs)
648{
649 ack_APIC_irq();
650 __get_cpu_var(irq_stat).irq_resched_count++;
651}
652
653void smp_call_function_interrupt(struct pt_regs *regs)
654{
655 void (*func) (void *info) = call_data->func;
656 void *info = call_data->info;
657 int wait = call_data->wait;
658
659 ack_APIC_irq();
660 /*
661 * Notify initiating CPU that I've grabbed the data and am
662 * about to execute the function
663 */
664 mb();
665 atomic_inc(&call_data->started);
666 /*
667 * At this point the info structure may be out of scope unless wait==1
668 */
669 irq_enter();
670 (*func)(info);
671 __get_cpu_var(irq_stat).irq_call_count++;
672 irq_exit();
673
674 if (wait) {
675 mb();
676 atomic_inc(&call_data->finished);
677 }
678}
679
680static int convert_apicid_to_cpu(int apic_id) 469static int convert_apicid_to_cpu(int apic_id)
681{ 470{
682 int i; 471 int i;
@@ -703,15 +492,3 @@ int safe_smp_processor_id(void)
703 492
704 return cpuid >= 0 ? cpuid : 0; 493 return cpuid >= 0 ? cpuid : 0;
705} 494}
706
707struct smp_ops smp_ops = {
708 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
709 .smp_prepare_cpus = native_smp_prepare_cpus,
710 .cpu_up = native_cpu_up,
711 .smp_cpus_done = native_smp_cpus_done,
712
713 .smp_send_stop = native_smp_send_stop,
714 .smp_send_reschedule = native_smp_send_reschedule,
715 .smp_call_function_mask = native_smp_call_function_mask,
716};
717EXPORT_SYMBOL_GPL(smp_ops);