aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-15 04:41:48 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-15 11:54:00 -0400
commit297d9c035edd04327fedc0d1da27c2b112b66fcc (patch)
treec5a2f1258def2f54790b57afc0bea9dc49563773
parent838c41184fee5e151c09972f2ba90c16493af614 (diff)
i386: move common parts of smp into their own file
Several parts of kernel/smp.c and smpboot.c are generally useful for other subarchitectures and paravirt_ops implementations, so make them available for reuse. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Acked-by: Chris Wright <chrisw@sous-sol.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/smp.c65
-rw-r--r--arch/i386/kernel/smpboot.c22
-rw-r--r--arch/i386/kernel/smpcommon.c79
-rw-r--r--include/asm-i386/processor.h4
5 files changed, 90 insertions, 81 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 91cff8dc9e1a..06da59f6f837 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
19obj-$(CONFIG_MICROCODE) += microcode.o 19obj-$(CONFIG_MICROCODE) += microcode.o
20obj-$(CONFIG_APM) += apm.o 20obj-$(CONFIG_APM) += apm.o
21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o 21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
22obj-$(CONFIG_SMP) += smpcommon.o
22obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 23obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
23obj-$(CONFIG_X86_MPPARSE) += mpparse.o 24obj-$(CONFIG_X86_MPPARSE) += mpparse.o
24obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 25obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 706bda72dc60..c9a7c9835aba 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -467,7 +467,7 @@ void flush_tlb_all(void)
467 * it goes straight through and wastes no time serializing 467 * it goes straight through and wastes no time serializing
468 * anything. Worst case is that we lose a reschedule ... 468 * anything. Worst case is that we lose a reschedule ...
469 */ 469 */
470void native_smp_send_reschedule(int cpu) 470static void native_smp_send_reschedule(int cpu)
471{ 471{
472 WARN_ON(cpu_is_offline(cpu)); 472 WARN_ON(cpu_is_offline(cpu));
473 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 473 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -546,9 +546,10 @@ static void __smp_call_function(void (*func) (void *info), void *info,
546 * You must not call this function with disabled interrupts or from a 546 * You must not call this function with disabled interrupts or from a
547 * hardware interrupt handler or from a bottom half handler. 547 * hardware interrupt handler or from a bottom half handler.
548 */ 548 */
549int native_smp_call_function_mask(cpumask_t mask, 549static int
550 void (*func)(void *), void *info, 550native_smp_call_function_mask(cpumask_t mask,
551 int wait) 551 void (*func)(void *), void *info,
552 int wait)
552{ 553{
553 struct call_data_struct data; 554 struct call_data_struct data;
554 cpumask_t allbutself; 555 cpumask_t allbutself;
@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask,
599 return 0; 600 return 0;
600} 601}
601 602
602/**
603 * smp_call_function(): Run a function on all other CPUs.
604 * @func: The function to run. This must be fast and non-blocking.
605 * @info: An arbitrary pointer to pass to the function.
606 * @nonatomic: Unused.
607 * @wait: If true, wait (atomically) until function has completed on other CPUs.
608 *
609 * Returns 0 on success, else a negative status code.
610 *
611 * If @wait is true, then returns once @func has returned; otherwise
612 * it returns just before the target cpu calls @func.
613 *
614 * You must not call this function with disabled interrupts or from a
615 * hardware interrupt handler or from a bottom half handler.
616 */
617int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
618 int wait)
619{
620 return smp_call_function_mask(cpu_online_map, func, info, wait);
621}
622EXPORT_SYMBOL(smp_call_function);
623
624/**
625 * smp_call_function_single - Run a function on another CPU
626 * @cpu: The target CPU. Cannot be the calling CPU.
627 * @func: The function to run. This must be fast and non-blocking.
628 * @info: An arbitrary pointer to pass to the function.
629 * @nonatomic: Unused.
630 * @wait: If true, wait until function has completed on other CPUs.
631 *
632 * Returns 0 on success, else a negative status code.
633 *
634 * If @wait is true, then returns once @func has returned; otherwise
635 * it returns just before the target cpu calls @func.
636 */
637int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
638 int nonatomic, int wait)
639{
640 /* prevent preemption and reschedule on another processor */
641 int ret;
642 int me = get_cpu();
643 if (cpu == me) {
644 WARN_ON(1);
645 put_cpu();
646 return -EBUSY;
647 }
648
649 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
650
651 put_cpu();
652 return ret;
653}
654EXPORT_SYMBOL(smp_call_function_single);
655
656static void stop_this_cpu (void * dummy) 603static void stop_this_cpu (void * dummy)
657{ 604{
658 local_irq_disable(); 605 local_irq_disable();
@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
670 * this function calls the 'stop' function on all other CPUs in the system. 617 * this function calls the 'stop' function on all other CPUs in the system.
671 */ 618 */
672 619
673void native_smp_send_stop(void) 620static void native_smp_send_stop(void)
674{ 621{
675 /* Don't deadlock on the call lock in panic */ 622 /* Don't deadlock on the call lock in panic */
676 int nolock = !spin_trylock(&call_lock); 623 int nolock = !spin_trylock(&call_lock);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b92cc4e8b3bb..08f07a74a9d3 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
98 98
99u8 apicid_2_node[MAX_APICID]; 99u8 apicid_2_node[MAX_APICID];
100 100
101DEFINE_PER_CPU(unsigned long, this_cpu_off);
102EXPORT_PER_CPU_SYMBOL(this_cpu_off);
103
104/* 101/*
105 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
106 */ 103 */
@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu)
763#define alloc_idle_task(cpu) fork_idle(cpu) 760#define alloc_idle_task(cpu) fork_idle(cpu)
764#endif 761#endif
765 762
766/* Initialize the CPU's GDT. This is either the boot CPU doing itself
767 (still using the master per-cpu area), or a CPU doing it for a
768 secondary which will soon come up. */
769static __cpuinit void init_gdt(int cpu)
770{
771 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
772
773 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
774 (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
775 __per_cpu_offset[cpu], 0xFFFFF,
776 0x80 | DESCTYPE_S | 0x2, 0x8);
777
778 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
779 per_cpu(cpu_number, cpu) = cpu;
780}
781
782/* Defined in head.S */
783extern struct Xgt_desc_struct early_gdt_descr;
784
785static int __cpuinit do_boot_cpu(int apicid, int cpu) 763static int __cpuinit do_boot_cpu(int apicid, int cpu)
786/* 764/*
787 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 765 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
diff --git a/arch/i386/kernel/smpcommon.c b/arch/i386/kernel/smpcommon.c
new file mode 100644
index 000000000000..1868ae18eb4d
--- /dev/null
+++ b/arch/i386/kernel/smpcommon.c
@@ -0,0 +1,79 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6
7DEFINE_PER_CPU(unsigned long, this_cpu_off);
8EXPORT_PER_CPU_SYMBOL(this_cpu_off);
9
10/* Initialize the CPU's GDT. This is either the boot CPU doing itself
11 (still using the master per-cpu area), or a CPU doing it for a
12 secondary which will soon come up. */
13__cpuinit void init_gdt(int cpu)
14{
15 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16
17 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
18 (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
19 __per_cpu_offset[cpu], 0xFFFFF,
20 0x80 | DESCTYPE_S | 0x2, 0x8);
21
22 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
23 per_cpu(cpu_number, cpu) = cpu;
24}
25
26
27/**
28 * smp_call_function(): Run a function on all other CPUs.
29 * @func: The function to run. This must be fast and non-blocking.
30 * @info: An arbitrary pointer to pass to the function.
31 * @nonatomic: Unused.
32 * @wait: If true, wait (atomically) until function has completed on other CPUs.
33 *
34 * Returns 0 on success, else a negative status code.
35 *
36 * If @wait is true, then returns once @func has returned; otherwise
37 * it returns just before the target cpu calls @func.
38 *
39 * You must not call this function with disabled interrupts or from a
40 * hardware interrupt handler or from a bottom half handler.
41 */
42int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
43 int wait)
44{
45 return smp_call_function_mask(cpu_online_map, func, info, wait);
46}
47EXPORT_SYMBOL(smp_call_function);
48
49/**
50 * smp_call_function_single - Run a function on another CPU
51 * @cpu: The target CPU. Cannot be the calling CPU.
52 * @func: The function to run. This must be fast and non-blocking.
53 * @info: An arbitrary pointer to pass to the function.
54 * @nonatomic: Unused.
55 * @wait: If true, wait until function has completed on other CPUs.
56 *
57 * Returns 0 on success, else a negative status code.
58 *
59 * If @wait is true, then returns once @func has returned; otherwise
60 * it returns just before the target cpu calls @func.
61 */
62int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
63 int nonatomic, int wait)
64{
65 /* prevent preemption and reschedule on another processor */
66 int ret;
67 int me = get_cpu();
68 if (cpu == me) {
69 WARN_ON(1);
70 put_cpu();
71 return -EBUSY;
72 }
73
74 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
75
76 put_cpu();
77 return ret;
78}
79EXPORT_SYMBOL(smp_call_function_single);
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 70f3515c3db0..338668bfb0a2 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_override;
749extern void enable_sep_cpu(void); 749extern void enable_sep_cpu(void);
750extern int sysenter_setup(void); 750extern int sysenter_setup(void);
751 751
752/* Defined in head.S */
753extern struct Xgt_desc_struct early_gdt_descr;
754
752extern void cpu_set_gdt(int); 755extern void cpu_set_gdt(int);
753extern void switch_to_new_gdt(void); 756extern void switch_to_new_gdt(void);
754extern void cpu_init(void); 757extern void cpu_init(void);
758extern void init_gdt(int cpu);
755 759
756extern int force_mwait; 760extern int force_mwait;
757 761