diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 1 | ||||
-rw-r--r-- | arch/mips/kernel/irq-rm9000.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 149 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/stacktrace.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 18 | ||||
-rw-r--r-- | arch/mips/oprofile/common.c | 6 | ||||
-rw-r--r-- | arch/mips/oprofile/op_model_mipsxx.c | 4 | ||||
-rw-r--r-- | arch/mips/pmc-sierra/yosemite/prom.c | 2 | ||||
-rw-r--r-- | arch/mips/sibyte/cfe/setup.c | 2 | ||||
-rw-r--r-- | arch/mips/sibyte/sb1250/prom.c | 2 | ||||
-rw-r--r-- | arch/mips/sibyte/swarm/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/sibyte/swarm/swarm-i2c.c | 37 |
13 files changed, 74 insertions, 154 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d23204e29e16..d21df5f1b1f3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1657,6 +1657,7 @@ config SMP | |||
1657 | bool "Multi-Processing support" | 1657 | bool "Multi-Processing support" |
1658 | depends on SYS_SUPPORTS_SMP | 1658 | depends on SYS_SUPPORTS_SMP |
1659 | select IRQ_PER_CPU | 1659 | select IRQ_PER_CPU |
1660 | select USE_GENERIC_SMP_HELPERS | ||
1660 | help | 1661 | help |
1661 | This enables support for systems with more than one CPU. If you have | 1662 | This enables support for systems with more than one CPU. If you have |
1662 | a system with only one CPU, like most personal computers, say N. If | 1663 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ed9febe63d72..b47e4615ec12 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args) | |||
49 | 49 | ||
50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) | 50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) |
51 | { | 51 | { |
52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); | 52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); |
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
@@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args) | |||
66 | 66 | ||
67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | 67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) |
68 | { | 68 | { |
69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); | 69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct irq_chip rm9k_irq_controller = { | 72 | static struct irq_chip rm9k_irq_controller = { |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index cdf87a9dd4ba..4410f172b8ab 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void) | |||
131 | cpu_idle(); | 131 | cpu_idle(); |
132 | } | 132 | } |
133 | 133 | ||
134 | DEFINE_SPINLOCK(smp_call_lock); | 134 | void arch_send_call_function_ipi(cpumask_t mask) |
135 | |||
136 | struct call_data_struct *call_data; | ||
137 | |||
138 | /* | ||
139 | * Run a function on all other CPUs. | ||
140 | * | ||
141 | * <mask> cpuset_t of all processors to run the function on. | ||
142 | * <func> The function to run. This must be fast and non-blocking. | ||
143 | * <info> An arbitrary pointer to pass to the function. | ||
144 | * <retry> If true, keep retrying until ready. | ||
145 | * <wait> If true, wait until function has completed on other CPUs. | ||
146 | * [RETURNS] 0 on success, else a negative status code. | ||
147 | * | ||
148 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
149 | * or are or have executed. | ||
150 | * | ||
151 | * You must not call this function with disabled interrupts or from a | ||
152 | * hardware interrupt handler or from a bottom half handler: | ||
153 | * | ||
154 | * CPU A CPU B | ||
155 | * Disable interrupts | ||
156 | * smp_call_function() | ||
157 | * Take call_lock | ||
158 | * Send IPIs | ||
159 | * Wait for all cpus to acknowledge IPI | ||
160 | * CPU A has not responded, spin waiting | ||
161 | * for cpu A to respond, holding call_lock | ||
162 | * smp_call_function() | ||
163 | * Spin waiting for call_lock | ||
164 | * Deadlock Deadlock | ||
165 | */ | ||
166 | int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | ||
167 | void *info, int retry, int wait) | ||
168 | { | 135 | { |
169 | struct call_data_struct data; | ||
170 | int cpu = smp_processor_id(); | ||
171 | int cpus; | ||
172 | |||
173 | /* | ||
174 | * Can die spectacularly if this CPU isn't yet marked online | ||
175 | */ | ||
176 | BUG_ON(!cpu_online(cpu)); | ||
177 | |||
178 | cpu_clear(cpu, mask); | ||
179 | cpus = cpus_weight(mask); | ||
180 | if (!cpus) | ||
181 | return 0; | ||
182 | |||
183 | /* Can deadlock when called with interrupts disabled */ | ||
184 | WARN_ON(irqs_disabled()); | ||
185 | |||
186 | data.func = func; | ||
187 | data.info = info; | ||
188 | atomic_set(&data.started, 0); | ||
189 | data.wait = wait; | ||
190 | if (wait) | ||
191 | atomic_set(&data.finished, 0); | ||
192 | |||
193 | spin_lock(&smp_call_lock); | ||
194 | call_data = &data; | ||
195 | smp_mb(); | ||
196 | |||
197 | /* Send a message to all other CPUs and wait for them to respond */ | ||
198 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); | 136 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); |
199 | |||
200 | /* Wait for response */ | ||
201 | /* FIXME: lock-up detection, backtrace on lock-up */ | ||
202 | while (atomic_read(&data.started) != cpus) | ||
203 | barrier(); | ||
204 | |||
205 | if (wait) | ||
206 | while (atomic_read(&data.finished) != cpus) | ||
207 | barrier(); | ||
208 | call_data = NULL; | ||
209 | spin_unlock(&smp_call_lock); | ||
210 | |||
211 | return 0; | ||
212 | } | 137 | } |
213 | 138 | ||
214 | int smp_call_function(void (*func) (void *info), void *info, int retry, | 139 | /* |
215 | int wait) | 140 | * We reuse the same vector for the single IPI |
141 | */ | ||
142 | void arch_send_call_function_single_ipi(int cpu) | ||
216 | { | 143 | { |
217 | return smp_call_function_mask(cpu_online_map, func, info, retry, wait); | 144 | mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); |
218 | } | 145 | } |
219 | EXPORT_SYMBOL(smp_call_function); | ||
220 | 146 | ||
147 | /* | ||
148 | * Call into both interrupt handlers, as we share the IPI for them | ||
149 | */ | ||
221 | void smp_call_function_interrupt(void) | 150 | void smp_call_function_interrupt(void) |
222 | { | 151 | { |
223 | void (*func) (void *info) = call_data->func; | ||
224 | void *info = call_data->info; | ||
225 | int wait = call_data->wait; | ||
226 | |||
227 | /* | ||
228 | * Notify initiating CPU that I've grabbed the data and am | ||
229 | * about to execute the function. | ||
230 | */ | ||
231 | smp_mb(); | ||
232 | atomic_inc(&call_data->started); | ||
233 | |||
234 | /* | ||
235 | * At this point the info structure may be out of scope unless wait==1. | ||
236 | */ | ||
237 | irq_enter(); | 152 | irq_enter(); |
238 | (*func)(info); | 153 | generic_smp_call_function_single_interrupt(); |
154 | generic_smp_call_function_interrupt(); | ||
239 | irq_exit(); | 155 | irq_exit(); |
240 | |||
241 | if (wait) { | ||
242 | smp_mb(); | ||
243 | atomic_inc(&call_data->finished); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
248 | int retry, int wait) | ||
249 | { | ||
250 | int ret, me; | ||
251 | |||
252 | /* | ||
253 | * Can die spectacularly if this CPU isn't yet marked online | ||
254 | */ | ||
255 | if (!cpu_online(cpu)) | ||
256 | return 0; | ||
257 | |||
258 | me = get_cpu(); | ||
259 | BUG_ON(!cpu_online(me)); | ||
260 | |||
261 | if (cpu == me) { | ||
262 | local_irq_disable(); | ||
263 | func(info); | ||
264 | local_irq_enable(); | ||
265 | put_cpu(); | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, | ||
270 | wait); | ||
271 | |||
272 | put_cpu(); | ||
273 | return 0; | ||
274 | } | 156 | } |
275 | EXPORT_SYMBOL(smp_call_function_single); | ||
276 | 157 | ||
277 | static void stop_this_cpu(void *dummy) | 158 | static void stop_this_cpu(void *dummy) |
278 | { | 159 | { |
@@ -286,7 +167,7 @@ static void stop_this_cpu(void *dummy) | |||
286 | 167 | ||
287 | void smp_send_stop(void) | 168 | void smp_send_stop(void) |
288 | { | 169 | { |
289 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 170 | smp_call_function(stop_this_cpu, NULL, 0); |
290 | } | 171 | } |
291 | 172 | ||
292 | void __init smp_cpus_done(unsigned int max_cpus) | 173 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -365,7 +246,7 @@ static void flush_tlb_all_ipi(void *info) | |||
365 | 246 | ||
366 | void flush_tlb_all(void) | 247 | void flush_tlb_all(void) |
367 | { | 248 | { |
368 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); | 249 | on_each_cpu(flush_tlb_all_ipi, NULL, 1); |
369 | } | 250 | } |
370 | 251 | ||
371 | static void flush_tlb_mm_ipi(void *mm) | 252 | static void flush_tlb_mm_ipi(void *mm) |
@@ -385,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm) | |||
385 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | 266 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) |
386 | { | 267 | { |
387 | #ifndef CONFIG_MIPS_MT_SMTC | 268 | #ifndef CONFIG_MIPS_MT_SMTC |
388 | smp_call_function(func, info, 1, 1); | 269 | smp_call_function(func, info, 1); |
389 | #endif | 270 | #endif |
390 | } | 271 | } |
391 | 272 | ||
@@ -485,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
485 | .addr2 = end, | 366 | .addr2 = end, |
486 | }; | 367 | }; |
487 | 368 | ||
488 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); | 369 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); |
489 | } | 370 | } |
490 | 371 | ||
491 | static void flush_tlb_page_ipi(void *info) | 372 | static void flush_tlb_page_ipi(void *info) |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 3e863186cd22..a516286532ab 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void) | |||
877 | /* Return from interrupt should be enough to cause scheduler check */ | 877 | /* Return from interrupt should be enough to cause scheduler check */ |
878 | } | 878 | } |
879 | 879 | ||
880 | |||
881 | static void ipi_call_interrupt(void) | 880 | static void ipi_call_interrupt(void) |
882 | { | 881 | { |
883 | /* Invoke generic function invocation code in smp.c */ | 882 | /* Invoke generic function invocation code in smp.c */ |
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index ebd9db8d1ece..5eb4681a73d2 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c | |||
@@ -73,3 +73,4 @@ void save_stack_trace(struct stack_trace *trace) | |||
73 | prepare_frametrace(regs); | 73 | prepare_frametrace(regs); |
74 | save_context_stack(trace, regs); | 74 | save_context_stack(trace, regs); |
75 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 27096751ddce..71df3390c07b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -43,12 +43,12 @@ | |||
43 | * primary cache. | 43 | * primary cache. |
44 | */ | 44 | */ |
45 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, | 45 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, |
46 | int retry, int wait) | 46 | int wait) |
47 | { | 47 | { |
48 | preempt_disable(); | 48 | preempt_disable(); |
49 | 49 | ||
50 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | 50 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) |
51 | smp_call_function(func, info, retry, wait); | 51 | smp_call_function(func, info, wait); |
52 | #endif | 52 | #endif |
53 | func(info); | 53 | func(info); |
54 | preempt_enable(); | 54 | preempt_enable(); |
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args) | |||
350 | 350 | ||
351 | static void r4k___flush_cache_all(void) | 351 | static void r4k___flush_cache_all(void) |
352 | { | 352 | { |
353 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); | 353 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); |
354 | } | 354 | } |
355 | 355 | ||
356 | static inline int has_valid_asid(const struct mm_struct *mm) | 356 | static inline int has_valid_asid(const struct mm_struct *mm) |
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, | |||
397 | int exec = vma->vm_flags & VM_EXEC; | 397 | int exec = vma->vm_flags & VM_EXEC; |
398 | 398 | ||
399 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) | 399 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) |
400 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); | 400 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); |
401 | } | 401 | } |
402 | 402 | ||
403 | static inline void local_r4k_flush_cache_mm(void * args) | 403 | static inline void local_r4k_flush_cache_mm(void * args) |
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) | |||
429 | if (!cpu_has_dc_aliases) | 429 | if (!cpu_has_dc_aliases) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); | 432 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); |
433 | } | 433 | } |
434 | 434 | ||
435 | struct flush_cache_page_args { | 435 | struct flush_cache_page_args { |
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, | |||
521 | args.addr = addr; | 521 | args.addr = addr; |
522 | args.pfn = pfn; | 522 | args.pfn = pfn; |
523 | 523 | ||
524 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); | 524 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); |
525 | } | 525 | } |
526 | 526 | ||
527 | static inline void local_r4k_flush_data_cache_page(void * addr) | 527 | static inline void local_r4k_flush_data_cache_page(void * addr) |
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr) | |||
535 | local_r4k_flush_data_cache_page((void *)addr); | 535 | local_r4k_flush_data_cache_page((void *)addr); |
536 | else | 536 | else |
537 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, | 537 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, |
538 | 1, 1); | 538 | 1); |
539 | } | 539 | } |
540 | 540 | ||
541 | struct flush_icache_range_args { | 541 | struct flush_icache_range_args { |
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
571 | args.start = start; | 571 | args.start = start; |
572 | args.end = end; | 572 | args.end = end; |
573 | 573 | ||
574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); | 574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); |
575 | instruction_hazard(); | 575 | instruction_hazard(); |
576 | } | 576 | } |
577 | 577 | ||
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
672 | 672 | ||
673 | static void r4k_flush_cache_sigtramp(unsigned long addr) | 673 | static void r4k_flush_cache_sigtramp(unsigned long addr) |
674 | { | 674 | { |
675 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); | 675 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); |
676 | } | 676 | } |
677 | 677 | ||
678 | static void r4k_flush_icache_all(void) | 678 | static void r4k_flush_icache_all(void) |
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index b5f6f71b27bc..dd2fbd6645c1 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c | |||
@@ -27,7 +27,7 @@ static int op_mips_setup(void) | |||
27 | model->reg_setup(ctr); | 27 | model->reg_setup(ctr); |
28 | 28 | ||
29 | /* Configure the registers on all cpus. */ | 29 | /* Configure the registers on all cpus. */ |
30 | on_each_cpu(model->cpu_setup, NULL, 0, 1); | 30 | on_each_cpu(model->cpu_setup, NULL, 1); |
31 | 31 | ||
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
@@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root) | |||
58 | 58 | ||
59 | static int op_mips_start(void) | 59 | static int op_mips_start(void) |
60 | { | 60 | { |
61 | on_each_cpu(model->cpu_start, NULL, 0, 1); | 61 | on_each_cpu(model->cpu_start, NULL, 1); |
62 | 62 | ||
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
@@ -66,7 +66,7 @@ static int op_mips_start(void) | |||
66 | static void op_mips_stop(void) | 66 | static void op_mips_stop(void) |
67 | { | 67 | { |
68 | /* Disable performance monitoring for all counters. */ | 68 | /* Disable performance monitoring for all counters. */ |
69 | on_each_cpu(model->cpu_stop, NULL, 0, 1); | 69 | on_each_cpu(model->cpu_stop, NULL, 1); |
70 | } | 70 | } |
71 | 71 | ||
72 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 72 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index b40df7d2cf44..54759f1669d3 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c | |||
@@ -313,7 +313,7 @@ static int __init mipsxx_init(void) | |||
313 | if (!cpu_has_mipsmt_pertccounters) | 313 | if (!cpu_has_mipsmt_pertccounters) |
314 | counters = counters_total_to_per_cpu(counters); | 314 | counters = counters_total_to_per_cpu(counters); |
315 | #endif | 315 | #endif |
316 | on_each_cpu(reset_counters, (void *)(long)counters, 0, 1); | 316 | on_each_cpu(reset_counters, (void *)(long)counters, 1); |
317 | 317 | ||
318 | op_model_mipsxx_ops.num_counters = counters; | 318 | op_model_mipsxx_ops.num_counters = counters; |
319 | switch (current_cpu_type()) { | 319 | switch (current_cpu_type()) { |
@@ -382,7 +382,7 @@ static void mipsxx_exit(void) | |||
382 | int counters = op_model_mipsxx_ops.num_counters; | 382 | int counters = op_model_mipsxx_ops.num_counters; |
383 | 383 | ||
384 | counters = counters_per_cpu_to_total(counters); | 384 | counters = counters_per_cpu_to_total(counters); |
385 | on_each_cpu(reset_counters, (void *)(long)counters, 0, 1); | 385 | on_each_cpu(reset_counters, (void *)(long)counters, 1); |
386 | 386 | ||
387 | perf_irq = save_perf_irq; | 387 | perf_irq = save_perf_irq; |
388 | } | 388 | } |
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c index 35dc435846a6..cf4c868715ac 100644 --- a/arch/mips/pmc-sierra/yosemite/prom.c +++ b/arch/mips/pmc-sierra/yosemite/prom.c | |||
@@ -64,7 +64,7 @@ static void prom_exit(void) | |||
64 | #ifdef CONFIG_SMP | 64 | #ifdef CONFIG_SMP |
65 | if (smp_processor_id()) | 65 | if (smp_processor_id()) |
66 | /* CPU 1 */ | 66 | /* CPU 1 */ |
67 | smp_call_function(prom_cpu0_exit, NULL, 1, 1); | 67 | smp_call_function(prom_cpu0_exit, NULL, 1); |
68 | #endif | 68 | #endif |
69 | prom_cpu0_exit(NULL); | 69 | prom_cpu0_exit(NULL); |
70 | } | 70 | } |
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c index 33fce826f8bf..fd9604d5555a 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/cfe/setup.c | |||
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg) | |||
74 | if (!reboot_smp) { | 74 | if (!reboot_smp) { |
75 | /* Get CPU 0 to do the cfe_exit */ | 75 | /* Get CPU 0 to do the cfe_exit */ |
76 | reboot_smp = 1; | 76 | reboot_smp = 1; |
77 | smp_call_function(cfe_linux_exit, arg, 1, 0); | 77 | smp_call_function(cfe_linux_exit, arg, 0); |
78 | } | 78 | } |
79 | } else { | 79 | } else { |
80 | printk("Passing control back to CFE...\n"); | 80 | printk("Passing control back to CFE...\n"); |
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c index cf8f6b3de86c..65b1af66b674 100644 --- a/arch/mips/sibyte/sb1250/prom.c +++ b/arch/mips/sibyte/sb1250/prom.c | |||
@@ -66,7 +66,7 @@ static void prom_linux_exit(void) | |||
66 | { | 66 | { |
67 | #ifdef CONFIG_SMP | 67 | #ifdef CONFIG_SMP |
68 | if (smp_processor_id()) { | 68 | if (smp_processor_id()) { |
69 | smp_call_function(prom_cpu0_exit, NULL, 1, 1); | 69 | smp_call_function(prom_cpu0_exit, NULL, 1); |
70 | } | 70 | } |
71 | #endif | 71 | #endif |
72 | while(1); | 72 | while(1); |
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile index 1775755a2619..255d692bfa18 100644 --- a/arch/mips/sibyte/swarm/Makefile +++ b/arch/mips/sibyte/swarm/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o | 1 | obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o |
2 | 2 | ||
3 | obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o | ||
3 | obj-$(CONFIG_KGDB) += dbg_io.o | 4 | obj-$(CONFIG_KGDB) += dbg_io.o |
diff --git a/arch/mips/sibyte/swarm/swarm-i2c.c b/arch/mips/sibyte/swarm/swarm-i2c.c new file mode 100644 index 000000000000..4282ac9d01d2 --- /dev/null +++ b/arch/mips/sibyte/swarm/swarm-i2c.c | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * arch/mips/sibyte/swarm/swarm-i2c.c | ||
3 | * | ||
4 | * Broadcom BCM91250A (SWARM), etc. I2C platform setup. | ||
5 | * | ||
6 | * Copyright (c) 2008 Maciej W. Rozycki | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel.h> | ||
17 | |||
18 | |||
19 | static struct i2c_board_info swarm_i2c_info1[] __initdata = { | ||
20 | { | ||
21 | I2C_BOARD_INFO("m41t81", 0x68), | ||
22 | }, | ||
23 | }; | ||
24 | |||
25 | static int __init swarm_i2c_init(void) | ||
26 | { | ||
27 | int err; | ||
28 | |||
29 | err = i2c_register_board_info(1, swarm_i2c_info1, | ||
30 | ARRAY_SIZE(swarm_i2c_info1)); | ||
31 | if (err < 0) | ||
32 | printk(KERN_ERR | ||
33 | "swarm-i2c: cannot register board I2C devices\n"); | ||
34 | return err; | ||
35 | } | ||
36 | |||
37 | arch_initcall(swarm_i2c_init); | ||