diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-03-03 12:12:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:40:55 -0400 |
commit | e32640a2cd530e1259a06e34a72b0cdb73738ce2 (patch) | |
tree | 56cbc749b47c287c6bbb2a1f92ece7b81977f8f6 | |
parent | 3428f3d6caa3bc2adde050a2771a2821eb46f901 (diff) |
x86: create smpcommon.c
This patch creates smpcommon.c with functions that are
equal between architectures. The i386-only init_gdt
is ifdef'd.
Note that smpcommon.o figures twice in the Makefile:
this is because sub-architectures like voyager that does
not use the normal smp_$(BITS) files also have to access them
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/kernel/smp_64.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/smpcommon.c | 83 | ||||
-rw-r--r-- | arch/x86/kernel/smpcommon_32.c | 81 |
4 files changed, 85 insertions, 139 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4c68bfc6df1d..018d04d880db 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -47,8 +47,8 @@ obj-$(CONFIG_PCI) += early-quirks.o | |||
47 | apm-y := apm_32.o | 47 | apm-y := apm_32.o |
48 | obj-$(CONFIG_APM) += apm.o | 48 | obj-$(CONFIG_APM) += apm.o |
49 | obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smpboot.o tsc_sync.o | 49 | obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smpboot.o tsc_sync.o |
50 | obj-$(CONFIG_X86_32_SMP) += smpcommon_32.o | 50 | obj-$(CONFIG_X86_32_SMP) += smpcommon.o |
51 | obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o | 51 | obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o |
52 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o | 52 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o |
53 | obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o | 53 | obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o |
54 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o | 54 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o |
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index b040224927ca..1d8b863fa357 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -401,62 +401,6 @@ int native_smp_call_function_mask(cpumask_t mask, | |||
401 | return ret; | 401 | return ret; |
402 | } | 402 | } |
403 | 403 | ||
404 | /* | ||
405 | * smp_call_function_single - Run a function on a specific CPU | ||
406 | * @func: The function to run. This must be fast and non-blocking. | ||
407 | * @info: An arbitrary pointer to pass to the function. | ||
408 | * @nonatomic: Currently unused. | ||
409 | * @wait: If true, wait until function has completed on other CPUs. | ||
410 | * | ||
411 | * Retrurns 0 on success, else a negative status code. | ||
412 | * | ||
413 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
414 | * or is or has executed. | ||
415 | */ | ||
416 | |||
417 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | ||
418 | int nonatomic, int wait) | ||
419 | { | ||
420 | /* prevent preemption and reschedule on another processor */ | ||
421 | int ret, me = get_cpu(); | ||
422 | |||
423 | if (cpu == me) { | ||
424 | local_irq_disable(); | ||
425 | func(info); | ||
426 | local_irq_enable(); | ||
427 | put_cpu(); | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
432 | |||
433 | put_cpu(); | ||
434 | return ret; | ||
435 | } | ||
436 | EXPORT_SYMBOL(smp_call_function_single); | ||
437 | |||
438 | /* | ||
439 | * smp_call_function - run a function on all other CPUs. | ||
440 | * @func: The function to run. This must be fast and non-blocking. | ||
441 | * @info: An arbitrary pointer to pass to the function. | ||
442 | * @nonatomic: currently unused. | ||
443 | * @wait: If true, wait (atomically) until function has completed on other | ||
444 | * CPUs. | ||
445 | * | ||
446 | * Returns 0 on success, else a negative status code. Does not return until | ||
447 | * remote CPUs are nearly ready to execute func or are or have executed. | ||
448 | * | ||
449 | * You must not call this function with disabled interrupts or from a | ||
450 | * hardware interrupt handler or from a bottom half handler. | ||
451 | * Actually there are a few legal cases, like panic. | ||
452 | */ | ||
453 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
454 | int wait) | ||
455 | { | ||
456 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
457 | } | ||
458 | EXPORT_SYMBOL(smp_call_function); | ||
459 | |||
460 | static void stop_this_cpu(void *dummy) | 404 | static void stop_this_cpu(void *dummy) |
461 | { | 405 | { |
462 | local_irq_disable(); | 406 | local_irq_disable(); |
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c new file mode 100644 index 000000000000..3449064d141a --- /dev/null +++ b/arch/x86/kernel/smpcommon.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * SMP stuff which is common to all sub-architectures. | ||
3 | */ | ||
4 | #include <linux/module.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | #ifdef CONFIG_X86_32 | ||
8 | DEFINE_PER_CPU(unsigned long, this_cpu_off); | ||
9 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | ||
10 | |||
11 | /* Initialize the CPU's GDT. This is either the boot CPU doing itself | ||
12 | (still using the master per-cpu area), or a CPU doing it for a | ||
13 | secondary which will soon come up. */ | ||
14 | __cpuinit void init_gdt(int cpu) | ||
15 | { | ||
16 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
17 | |||
18 | pack_descriptor(&gdt[GDT_ENTRY_PERCPU], | ||
19 | __per_cpu_offset[cpu], 0xFFFFF, | ||
20 | 0x2 | DESCTYPE_S, 0x8); | ||
21 | |||
22 | gdt[GDT_ENTRY_PERCPU].s = 1; | ||
23 | |||
24 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; | ||
25 | per_cpu(cpu_number, cpu) = cpu; | ||
26 | } | ||
27 | #endif | ||
28 | |||
29 | /** | ||
30 | * smp_call_function(): Run a function on all other CPUs. | ||
31 | * @func: The function to run. This must be fast and non-blocking. | ||
32 | * @info: An arbitrary pointer to pass to the function. | ||
33 | * @nonatomic: Unused. | ||
34 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
35 | * | ||
36 | * Returns 0 on success, else a negative status code. | ||
37 | * | ||
38 | * If @wait is true, then returns once @func has returned; otherwise | ||
39 | * it returns just before the target cpu calls @func. | ||
40 | * | ||
41 | * You must not call this function with disabled interrupts or from a | ||
42 | * hardware interrupt handler or from a bottom half handler. | ||
43 | */ | ||
44 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
45 | int wait) | ||
46 | { | ||
47 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
48 | } | ||
49 | EXPORT_SYMBOL(smp_call_function); | ||
50 | |||
51 | /** | ||
52 | * smp_call_function_single - Run a function on a specific CPU | ||
53 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
54 | * @func: The function to run. This must be fast and non-blocking. | ||
55 | * @info: An arbitrary pointer to pass to the function. | ||
56 | * @nonatomic: Unused. | ||
57 | * @wait: If true, wait until function has completed on other CPUs. | ||
58 | * | ||
59 | * Returns 0 on success, else a negative status code. | ||
60 | * | ||
61 | * If @wait is true, then returns once @func has returned; otherwise | ||
62 | * it returns just before the target cpu calls @func. | ||
63 | */ | ||
64 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
65 | int nonatomic, int wait) | ||
66 | { | ||
67 | /* prevent preemption and reschedule on another processor */ | ||
68 | int ret; | ||
69 | int me = get_cpu(); | ||
70 | if (cpu == me) { | ||
71 | local_irq_disable(); | ||
72 | func(info); | ||
73 | local_irq_enable(); | ||
74 | put_cpu(); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
79 | |||
80 | put_cpu(); | ||
81 | return ret; | ||
82 | } | ||
83 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/x86/kernel/smpcommon_32.c b/arch/x86/kernel/smpcommon_32.c index 8bc38af29aef..8b137891791f 100644 --- a/arch/x86/kernel/smpcommon_32.c +++ b/arch/x86/kernel/smpcommon_32.c | |||
@@ -1,82 +1 @@ | |||
1 | /* | ||
2 | * SMP stuff which is common to all sub-architectures. | ||
3 | */ | ||
4 | #include <linux/module.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | DEFINE_PER_CPU(unsigned long, this_cpu_off); | ||
8 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | ||
9 | |||
10 | /* Initialize the CPU's GDT. This is either the boot CPU doing itself | ||
11 | (still using the master per-cpu area), or a CPU doing it for a | ||
12 | secondary which will soon come up. */ | ||
13 | __cpuinit void init_gdt(int cpu) | ||
14 | { | ||
15 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
16 | |||
17 | pack_descriptor(&gdt[GDT_ENTRY_PERCPU], | ||
18 | __per_cpu_offset[cpu], 0xFFFFF, | ||
19 | 0x2 | DESCTYPE_S, 0x8); | ||
20 | |||
21 | gdt[GDT_ENTRY_PERCPU].s = 1; | ||
22 | |||
23 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; | ||
24 | per_cpu(cpu_number, cpu) = cpu; | ||
25 | } | ||
26 | |||
27 | |||
28 | /** | ||
29 | * smp_call_function(): Run a function on all other CPUs. | ||
30 | * @func: The function to run. This must be fast and non-blocking. | ||
31 | * @info: An arbitrary pointer to pass to the function. | ||
32 | * @nonatomic: Unused. | ||
33 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
34 | * | ||
35 | * Returns 0 on success, else a negative status code. | ||
36 | * | ||
37 | * If @wait is true, then returns once @func has returned; otherwise | ||
38 | * it returns just before the target cpu calls @func. | ||
39 | * | ||
40 | * You must not call this function with disabled interrupts or from a | ||
41 | * hardware interrupt handler or from a bottom half handler. | ||
42 | */ | ||
43 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
44 | int wait) | ||
45 | { | ||
46 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
47 | } | ||
48 | EXPORT_SYMBOL(smp_call_function); | ||
49 | |||
50 | /** | ||
51 | * smp_call_function_single - Run a function on a specific CPU | ||
52 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
53 | * @func: The function to run. This must be fast and non-blocking. | ||
54 | * @info: An arbitrary pointer to pass to the function. | ||
55 | * @nonatomic: Unused. | ||
56 | * @wait: If true, wait until function has completed on other CPUs. | ||
57 | * | ||
58 | * Returns 0 on success, else a negative status code. | ||
59 | * | ||
60 | * If @wait is true, then returns once @func has returned; otherwise | ||
61 | * it returns just before the target cpu calls @func. | ||
62 | */ | ||
63 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
64 | int nonatomic, int wait) | ||
65 | { | ||
66 | /* prevent preemption and reschedule on another processor */ | ||
67 | int ret; | ||
68 | int me = get_cpu(); | ||
69 | if (cpu == me) { | ||
70 | local_irq_disable(); | ||
71 | func(info); | ||
72 | local_irq_enable(); | ||
73 | put_cpu(); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
78 | |||
79 | put_cpu(); | ||
80 | return ret; | ||
81 | } | ||
82 | EXPORT_SYMBOL(smp_call_function_single); | ||