aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/smp.c65
-rw-r--r--arch/i386/kernel/smpboot.c22
-rw-r--r--arch/i386/kernel/smpcommon.c79
-rw-r--r--arch/i386/kernel/traps.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c106
6 files changed, 127 insertions, 151 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 91cff8dc9e1a..06da59f6f837 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
19obj-$(CONFIG_MICROCODE) += microcode.o 19obj-$(CONFIG_MICROCODE) += microcode.o
20obj-$(CONFIG_APM) += apm.o 20obj-$(CONFIG_APM) += apm.o
21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o 21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
22obj-$(CONFIG_SMP) += smpcommon.o
22obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 23obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
23obj-$(CONFIG_X86_MPPARSE) += mpparse.o 24obj-$(CONFIG_X86_MPPARSE) += mpparse.o
24obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 25obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 706bda72dc60..c9a7c9835aba 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -467,7 +467,7 @@ void flush_tlb_all(void)
467 * it goes straight through and wastes no time serializing 467 * it goes straight through and wastes no time serializing
468 * anything. Worst case is that we lose a reschedule ... 468 * anything. Worst case is that we lose a reschedule ...
469 */ 469 */
470void native_smp_send_reschedule(int cpu) 470static void native_smp_send_reschedule(int cpu)
471{ 471{
472 WARN_ON(cpu_is_offline(cpu)); 472 WARN_ON(cpu_is_offline(cpu));
473 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 473 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -546,9 +546,10 @@ static void __smp_call_function(void (*func) (void *info), void *info,
546 * You must not call this function with disabled interrupts or from a 546 * You must not call this function with disabled interrupts or from a
547 * hardware interrupt handler or from a bottom half handler. 547 * hardware interrupt handler or from a bottom half handler.
548 */ 548 */
549int native_smp_call_function_mask(cpumask_t mask, 549static int
550 void (*func)(void *), void *info, 550native_smp_call_function_mask(cpumask_t mask,
551 int wait) 551 void (*func)(void *), void *info,
552 int wait)
552{ 553{
553 struct call_data_struct data; 554 struct call_data_struct data;
554 cpumask_t allbutself; 555 cpumask_t allbutself;
@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask,
599 return 0; 600 return 0;
600} 601}
601 602
602/**
603 * smp_call_function(): Run a function on all other CPUs.
604 * @func: The function to run. This must be fast and non-blocking.
605 * @info: An arbitrary pointer to pass to the function.
606 * @nonatomic: Unused.
607 * @wait: If true, wait (atomically) until function has completed on other CPUs.
608 *
609 * Returns 0 on success, else a negative status code.
610 *
611 * If @wait is true, then returns once @func has returned; otherwise
612 * it returns just before the target cpu calls @func.
613 *
614 * You must not call this function with disabled interrupts or from a
615 * hardware interrupt handler or from a bottom half handler.
616 */
617int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
618 int wait)
619{
620 return smp_call_function_mask(cpu_online_map, func, info, wait);
621}
622EXPORT_SYMBOL(smp_call_function);
623
624/**
625 * smp_call_function_single - Run a function on another CPU
626 * @cpu: The target CPU. Cannot be the calling CPU.
627 * @func: The function to run. This must be fast and non-blocking.
628 * @info: An arbitrary pointer to pass to the function.
629 * @nonatomic: Unused.
630 * @wait: If true, wait until function has completed on other CPUs.
631 *
632 * Returns 0 on success, else a negative status code.
633 *
634 * If @wait is true, then returns once @func has returned; otherwise
635 * it returns just before the target cpu calls @func.
636 */
637int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
638 int nonatomic, int wait)
639{
640 /* prevent preemption and reschedule on another processor */
641 int ret;
642 int me = get_cpu();
643 if (cpu == me) {
644 WARN_ON(1);
645 put_cpu();
646 return -EBUSY;
647 }
648
649 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
650
651 put_cpu();
652 return ret;
653}
654EXPORT_SYMBOL(smp_call_function_single);
655
656static void stop_this_cpu (void * dummy) 603static void stop_this_cpu (void * dummy)
657{ 604{
658 local_irq_disable(); 605 local_irq_disable();
@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
670 * this function calls the 'stop' function on all other CPUs in the system. 617 * this function calls the 'stop' function on all other CPUs in the system.
671 */ 618 */
672 619
673void native_smp_send_stop(void) 620static void native_smp_send_stop(void)
674{ 621{
675 /* Don't deadlock on the call lock in panic */ 622 /* Don't deadlock on the call lock in panic */
676 int nolock = !spin_trylock(&call_lock); 623 int nolock = !spin_trylock(&call_lock);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b92cc4e8b3bb..08f07a74a9d3 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
98 98
99u8 apicid_2_node[MAX_APICID]; 99u8 apicid_2_node[MAX_APICID];
100 100
101DEFINE_PER_CPU(unsigned long, this_cpu_off);
102EXPORT_PER_CPU_SYMBOL(this_cpu_off);
103
104/* 101/*
105 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
106 */ 103 */
@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu)
763#define alloc_idle_task(cpu) fork_idle(cpu) 760#define alloc_idle_task(cpu) fork_idle(cpu)
764#endif 761#endif
765 762
766/* Initialize the CPU's GDT. This is either the boot CPU doing itself
767 (still using the master per-cpu area), or a CPU doing it for a
768 secondary which will soon come up. */
769static __cpuinit void init_gdt(int cpu)
770{
771 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
772
773 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
774 (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
775 __per_cpu_offset[cpu], 0xFFFFF,
776 0x80 | DESCTYPE_S | 0x2, 0x8);
777
778 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
779 per_cpu(cpu_number, cpu) = cpu;
780}
781
782/* Defined in head.S */
783extern struct Xgt_desc_struct early_gdt_descr;
784
785static int __cpuinit do_boot_cpu(int apicid, int cpu) 763static int __cpuinit do_boot_cpu(int apicid, int cpu)
786/* 764/*
787 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 765 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
diff --git a/arch/i386/kernel/smpcommon.c b/arch/i386/kernel/smpcommon.c
new file mode 100644
index 000000000000..1868ae18eb4d
--- /dev/null
+++ b/arch/i386/kernel/smpcommon.c
@@ -0,0 +1,79 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6
7DEFINE_PER_CPU(unsigned long, this_cpu_off);
8EXPORT_PER_CPU_SYMBOL(this_cpu_off);
9
10/* Initialize the CPU's GDT. This is either the boot CPU doing itself
11 (still using the master per-cpu area), or a CPU doing it for a
12 secondary which will soon come up. */
13__cpuinit void init_gdt(int cpu)
14{
15 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16
17 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
18 (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
19 __per_cpu_offset[cpu], 0xFFFFF,
20 0x80 | DESCTYPE_S | 0x2, 0x8);
21
22 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
23 per_cpu(cpu_number, cpu) = cpu;
24}
25
26
27/**
28 * smp_call_function(): Run a function on all other CPUs.
29 * @func: The function to run. This must be fast and non-blocking.
30 * @info: An arbitrary pointer to pass to the function.
31 * @nonatomic: Unused.
32 * @wait: If true, wait (atomically) until function has completed on other CPUs.
33 *
34 * Returns 0 on success, else a negative status code.
35 *
36 * If @wait is true, then returns once @func has returned; otherwise
37 * it returns just before the target cpu calls @func.
38 *
39 * You must not call this function with disabled interrupts or from a
40 * hardware interrupt handler or from a bottom half handler.
41 */
42int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
43 int wait)
44{
45 return smp_call_function_mask(cpu_online_map, func, info, wait);
46}
47EXPORT_SYMBOL(smp_call_function);
48
49/**
50 * smp_call_function_single - Run a function on another CPU
51 * @cpu: The target CPU. Cannot be the calling CPU.
52 * @func: The function to run. This must be fast and non-blocking.
53 * @info: An arbitrary pointer to pass to the function.
54 * @nonatomic: Unused.
55 * @wait: If true, wait until function has completed on other CPUs.
56 *
57 * Returns 0 on success, else a negative status code.
58 *
59 * If @wait is true, then returns once @func has returned; otherwise
60 * it returns just before the target cpu calls @func.
61 */
62int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
63 int nonatomic, int wait)
64{
65 /* prevent preemption and reschedule on another processor */
66 int ret;
67 int me = get_cpu();
68 if (cpu == me) {
69 WARN_ON(1);
70 put_cpu();
71 return -EBUSY;
72 }
73
74 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
75
76 put_cpu();
77 return ret;
78}
79EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index c05e7e861b29..90da0575fcff 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -733,11 +733,6 @@ static __kprobes void default_do_nmi(struct pt_regs * regs)
733 */ 733 */
734 if (nmi_watchdog_tick(regs, reason)) 734 if (nmi_watchdog_tick(regs, reason))
735 return; 735 return;
736#endif
737 if (notify_die(DIE_NMI_POST, "nmi_post", regs, reason, 2, 0)
738 == NOTIFY_STOP)
739 return;
740#ifdef CONFIG_X86_LOCAL_APIC
741 if (!do_nmi_callback(regs, smp_processor_id())) 736 if (!do_nmi_callback(regs, smp_processor_id()))
742#endif 737#endif
743 unknown_nmi_error(reason, regs); 738 unknown_nmi_error(reason, regs);
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 50d9c52070b1..b87f8548e75a 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -27,7 +27,6 @@
27#include <asm/pgalloc.h> 27#include <asm/pgalloc.h>
28#include <asm/tlbflush.h> 28#include <asm/tlbflush.h>
29#include <asm/arch_hooks.h> 29#include <asm/arch_hooks.h>
30#include <asm/pda.h>
31 30
32/* TLB state -- visible externally, indexed physically */ 31/* TLB state -- visible externally, indexed physically */
33DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; 32DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -422,7 +421,7 @@ find_smp_config(void)
422 VOYAGER_SUS_IN_CONTROL_PORT); 421 VOYAGER_SUS_IN_CONTROL_PORT);
423 422
424 current_thread_info()->cpu = boot_cpu_id; 423 current_thread_info()->cpu = boot_cpu_id;
425 write_pda(cpu_number, boot_cpu_id); 424 x86_write_percpu(cpu_number, boot_cpu_id);
426} 425}
427 426
428/* 427/*
@@ -435,7 +434,7 @@ smp_store_cpu_info(int id)
435 434
436 *c = boot_cpu_data; 435 *c = boot_cpu_data;
437 436
438 identify_cpu(c); 437 identify_secondary_cpu(c);
439} 438}
440 439
441/* set up the trampoline and return the physical address of the code */ 440/* set up the trampoline and return the physical address of the code */
@@ -459,7 +458,7 @@ start_secondary(void *unused)
459 /* external functions not defined in the headers */ 458 /* external functions not defined in the headers */
460 extern void calibrate_delay(void); 459 extern void calibrate_delay(void);
461 460
462 secondary_cpu_init(); 461 cpu_init();
463 462
464 /* OK, we're in the routine */ 463 /* OK, we're in the routine */
465 ack_CPI(VIC_CPU_BOOT_CPI); 464 ack_CPI(VIC_CPU_BOOT_CPI);
@@ -572,7 +571,9 @@ do_boot_cpu(__u8 cpu)
572 /* init_tasks (in sched.c) is indexed logically */ 571 /* init_tasks (in sched.c) is indexed logically */
573 stack_start.esp = (void *) idle->thread.esp; 572 stack_start.esp = (void *) idle->thread.esp;
574 573
575 init_gdt(cpu, idle); 574 init_gdt(cpu);
575 per_cpu(current_task, cpu) = idle;
576 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
576 irq_ctx_init(cpu); 577 irq_ctx_init(cpu);
577 578
578 /* Note: Don't modify initial ss override */ 579 /* Note: Don't modify initial ss override */
@@ -859,8 +860,8 @@ smp_invalidate_interrupt(void)
859 860
860/* This routine is called with a physical cpu mask */ 861/* This routine is called with a physical cpu mask */
861static void 862static void
862flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, 863voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
863 unsigned long va) 864 unsigned long va)
864{ 865{
865 int stuck = 50000; 866 int stuck = 50000;
866 867
@@ -912,7 +913,7 @@ flush_tlb_current_task(void)
912 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); 913 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
913 local_flush_tlb(); 914 local_flush_tlb();
914 if (cpu_mask) 915 if (cpu_mask)
915 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 916 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
916 917
917 preempt_enable(); 918 preempt_enable();
918} 919}
@@ -934,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm)
934 leave_mm(smp_processor_id()); 935 leave_mm(smp_processor_id());
935 } 936 }
936 if (cpu_mask) 937 if (cpu_mask)
937 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 938 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
938 939
939 preempt_enable(); 940 preempt_enable();
940} 941}
@@ -955,7 +956,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
955 } 956 }
956 957
957 if (cpu_mask) 958 if (cpu_mask)
958 flush_tlb_others(cpu_mask, mm, va); 959 voyager_flush_tlb_others(cpu_mask, mm, va);
959 960
960 preempt_enable(); 961 preempt_enable();
961} 962}
@@ -1044,10 +1045,12 @@ smp_call_function_interrupt(void)
1044} 1045}
1045 1046
1046static int 1047static int
1047__smp_call_function_mask (void (*func) (void *info), void *info, int retry, 1048voyager_smp_call_function_mask (cpumask_t cpumask,
1048 int wait, __u32 mask) 1049 void (*func) (void *info), void *info,
1050 int wait)
1049{ 1051{
1050 struct call_data_struct data; 1052 struct call_data_struct data;
1053 u32 mask = cpus_addr(cpumask)[0];
1051 1054
1052 mask &= ~(1<<smp_processor_id()); 1055 mask &= ~(1<<smp_processor_id());
1053 1056
@@ -1083,47 +1086,6 @@ __smp_call_function_mask (void (*func) (void *info), void *info, int retry,
1083 return 0; 1086 return 0;
1084} 1087}
1085 1088
1086/* Call this function on all CPUs using the function_interrupt above
1087 <func> The function to run. This must be fast and non-blocking.
1088 <info> An arbitrary pointer to pass to the function.
1089 <retry> If true, keep retrying until ready.
1090 <wait> If true, wait until function has completed on other CPUs.
1091 [RETURNS] 0 on success, else a negative status code. Does not return until
1092 remote CPUs are nearly ready to execute <<func>> or are or have executed.
1093*/
1094int
1095smp_call_function(void (*func) (void *info), void *info, int retry,
1096 int wait)
1097{
1098 __u32 mask = cpus_addr(cpu_online_map)[0];
1099
1100 return __smp_call_function_mask(func, info, retry, wait, mask);
1101}
1102EXPORT_SYMBOL(smp_call_function);
1103
1104/*
1105 * smp_call_function_single - Run a function on another CPU
1106 * @func: The function to run. This must be fast and non-blocking.
1107 * @info: An arbitrary pointer to pass to the function.
1108 * @nonatomic: Currently unused.
1109 * @wait: If true, wait until function has completed on other CPUs.
1110 *
1111 * Retrurns 0 on success, else a negative status code.
1112 *
1113 * Does not return until the remote CPU is nearly ready to execute <func>
1114 * or is or has executed.
1115 */
1116
1117int
1118smp_call_function_single(int cpu, void (*func) (void *info), void *info,
1119 int nonatomic, int wait)
1120{
1121 __u32 mask = 1 << cpu;
1122
1123 return __smp_call_function_mask(func, info, nonatomic, wait, mask);
1124}
1125EXPORT_SYMBOL(smp_call_function_single);
1126
1127/* Sorry about the name. In an APIC based system, the APICs 1089/* Sorry about the name. In an APIC based system, the APICs
1128 * themselves are programmed to send a timer interrupt. This is used 1090 * themselves are programmed to send a timer interrupt. This is used
1129 * by linux to reschedule the processor. Voyager doesn't have this, 1091 * by linux to reschedule the processor. Voyager doesn't have this,
@@ -1237,8 +1199,8 @@ smp_alloc_memory(void)
1237} 1199}
1238 1200
1239/* send a reschedule CPI to one CPU by physical CPU number*/ 1201/* send a reschedule CPI to one CPU by physical CPU number*/
1240void 1202static void
1241smp_send_reschedule(int cpu) 1203voyager_smp_send_reschedule(int cpu)
1242{ 1204{
1243 send_one_CPI(cpu, VIC_RESCHEDULE_CPI); 1205 send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1244} 1206}
@@ -1267,8 +1229,8 @@ safe_smp_processor_id(void)
1267} 1229}
1268 1230
1269/* broadcast a halt to all other CPUs */ 1231/* broadcast a halt to all other CPUs */
1270void 1232static void
1271smp_send_stop(void) 1233voyager_smp_send_stop(void)
1272{ 1234{
1273 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1235 smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
1274} 1236}
@@ -1930,23 +1892,26 @@ smp_voyager_power_off(void *dummy)
1930 smp_stop_cpu_function(NULL); 1892 smp_stop_cpu_function(NULL);
1931} 1893}
1932 1894
1933void __init 1895static void __init
1934smp_prepare_cpus(unsigned int max_cpus) 1896voyager_smp_prepare_cpus(unsigned int max_cpus)
1935{ 1897{
1936 /* FIXME: ignore max_cpus for now */ 1898 /* FIXME: ignore max_cpus for now */
1937 smp_boot_cpus(); 1899 smp_boot_cpus();
1938} 1900}
1939 1901
1940void __devinit smp_prepare_boot_cpu(void) 1902static void __devinit voyager_smp_prepare_boot_cpu(void)
1941{ 1903{
1904 init_gdt(smp_processor_id());
1905 switch_to_new_gdt();
1906
1942 cpu_set(smp_processor_id(), cpu_online_map); 1907 cpu_set(smp_processor_id(), cpu_online_map);
1943 cpu_set(smp_processor_id(), cpu_callout_map); 1908 cpu_set(smp_processor_id(), cpu_callout_map);
1944 cpu_set(smp_processor_id(), cpu_possible_map); 1909 cpu_set(smp_processor_id(), cpu_possible_map);
1945 cpu_set(smp_processor_id(), cpu_present_map); 1910 cpu_set(smp_processor_id(), cpu_present_map);
1946} 1911}
1947 1912
1948int __devinit 1913static int __devinit
1949__cpu_up(unsigned int cpu) 1914voyager_cpu_up(unsigned int cpu)
1950{ 1915{
1951 /* This only works at boot for x86. See "rewrite" above. */ 1916 /* This only works at boot for x86. See "rewrite" above. */
1952 if (cpu_isset(cpu, smp_commenced_mask)) 1917 if (cpu_isset(cpu, smp_commenced_mask))
@@ -1962,8 +1927,8 @@ __cpu_up(unsigned int cpu)
1962 return 0; 1927 return 0;
1963} 1928}
1964 1929
1965void __init 1930static void __init
1966smp_cpus_done(unsigned int max_cpus) 1931voyager_smp_cpus_done(unsigned int max_cpus)
1967{ 1932{
1968 zap_low_mappings(); 1933 zap_low_mappings();
1969} 1934}
@@ -1972,5 +1937,16 @@ void __init
1972smp_setup_processor_id(void) 1937smp_setup_processor_id(void)
1973{ 1938{
1974 current_thread_info()->cpu = hard_smp_processor_id(); 1939 current_thread_info()->cpu = hard_smp_processor_id();
1975 write_pda(cpu_number, hard_smp_processor_id()); 1940 x86_write_percpu(cpu_number, hard_smp_processor_id());
1976} 1941}
1942
1943struct smp_ops smp_ops = {
1944 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
1945 .smp_prepare_cpus = voyager_smp_prepare_cpus,
1946 .cpu_up = voyager_cpu_up,
1947 .smp_cpus_done = voyager_smp_cpus_done,
1948
1949 .smp_send_stop = voyager_smp_send_stop,
1950 .smp_send_reschedule = voyager_smp_send_reschedule,
1951 .smp_call_function_mask = voyager_smp_call_function_mask,
1952};