diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2012-05-14 09:14:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-14 17:05:07 -0400 |
commit | 4ba22b16bbf354822b7988ec5b4b35774dcd479f (patch) | |
tree | 3476a58b0d8788b89b575f4926e294f4b8bbdd43 /arch/sparc/kernel/smp_32.c | |
parent | c68e5d39a502d01421cbc70d25c377e9215facef (diff) |
sparc32: move smp ipi to method ops
I ended up renaming set_cpu_int to send_ipi to
be consistent all way around.
send_ipi was moved to the *_smp.c files so
we could call the relevant method direct,
without any _ops indirection.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/smp_32.c')
-rw-r--r-- | arch/sparc/kernel/smp_32.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 8cd5c79f6193..57713758079e 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -40,6 +40,8 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; | |||
40 | 40 | ||
41 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 41 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
42 | 42 | ||
43 | const struct sparc32_ipi_ops *sparc32_ipi_ops; | ||
44 | |||
43 | /* The only guaranteed locking primitive available on all Sparc | 45 | /* The only guaranteed locking primitive available on all Sparc |
44 | * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically | 46 | * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically |
45 | * places the current byte at the effective address into dest_reg and | 47 | * places the current byte at the effective address into dest_reg and |
@@ -124,7 +126,7 @@ void smp_send_reschedule(int cpu) | |||
124 | * a single CPU. The trap handler needs only to do trap entry/return | 126 | * a single CPU. The trap handler needs only to do trap entry/return |
125 | * to call schedule. | 127 | * to call schedule. |
126 | */ | 128 | */ |
127 | BTFIXUP_CALL(smp_ipi_resched)(cpu); | 129 | sparc32_ipi_ops->resched(cpu); |
128 | } | 130 | } |
129 | 131 | ||
130 | void smp_send_stop(void) | 132 | void smp_send_stop(void) |
@@ -134,7 +136,7 @@ void smp_send_stop(void) | |||
134 | void arch_send_call_function_single_ipi(int cpu) | 136 | void arch_send_call_function_single_ipi(int cpu) |
135 | { | 137 | { |
136 | /* trigger one IPI single call on one CPU */ | 138 | /* trigger one IPI single call on one CPU */ |
137 | BTFIXUP_CALL(smp_ipi_single)(cpu); | 139 | sparc32_ipi_ops->single(cpu); |
138 | } | 140 | } |
139 | 141 | ||
140 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 142 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
@@ -143,7 +145,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
143 | 145 | ||
144 | /* trigger IPI mask call on each CPU */ | 146 | /* trigger IPI mask call on each CPU */ |
145 | for_each_cpu(cpu, mask) | 147 | for_each_cpu(cpu, mask) |
146 | BTFIXUP_CALL(smp_ipi_mask_one)(cpu); | 148 | sparc32_ipi_ops->mask_one(cpu); |
147 | } | 149 | } |
148 | 150 | ||
149 | void smp_resched_interrupt(void) | 151 | void smp_resched_interrupt(void) |