aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/basler/excite/excite_iodev.c9
-rw-r--r--arch/mips/kernel/irq-rm9000.c4
-rw-r--r--arch/mips/kernel/rtlx.c7
-rw-r--r--arch/mips/kernel/smp.c149
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--arch/mips/kernel/stacktrace.c1
-rw-r--r--arch/mips/kernel/vpe.c12
-rw-r--r--arch/mips/mm/c-r3k.c6
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/page.c61
-rw-r--r--arch/mips/mm/sc-rm7k.c4
-rw-r--r--arch/mips/oprofile/common.c6
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c2
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c25
-rw-r--r--arch/mips/sibyte/sb1250/prom.c2
-rw-r--r--arch/mips/sibyte/swarm/Makefile1
-rw-r--r--arch/mips/sibyte/swarm/swarm-i2c.c37
20 files changed, 152 insertions, 200 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 24c5dee91768..d2be3ffca280 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1763,6 +1763,7 @@ config SMP
1763 bool "Multi-Processing support" 1763 bool "Multi-Processing support"
1764 depends on SYS_SUPPORTS_SMP 1764 depends on SYS_SUPPORTS_SMP
1765 select IRQ_PER_CPU 1765 select IRQ_PER_CPU
1766 select USE_GENERIC_SMP_HELPERS
1766 help 1767 help
1767 This enables support for systems with more than one CPU. If you have 1768 This enables support for systems with more than one CPU. If you have
1768 a system with only one CPU, like most personal computers, say N. If 1769 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c
index 476d20e08d0e..a1e3526b4a94 100644
--- a/arch/mips/basler/excite/excite_iodev.c
+++ b/arch/mips/basler/excite/excite_iodev.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
29#include <linux/smp_lock.h>
29 30
30#include "excite_iodev.h" 31#include "excite_iodev.h"
31 32
@@ -110,8 +111,14 @@ static int __exit iodev_remove(struct device *dev)
110 111
111static int iodev_open(struct inode *i, struct file *f) 112static int iodev_open(struct inode *i, struct file *f)
112{ 113{
113 return request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED, 114 int ret;
115
116 lock_kernel();
117 ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
114 iodev_name, &miscdev); 118 iodev_name, &miscdev);
119 unlock_kernel();
120
121 return ret;
115} 122}
116 123
117static int iodev_release(struct inode *i, struct file *f) 124static int iodev_release(struct inode *i, struct file *f)
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index ed9febe63d72..b47e4615ec12 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args)
49 49
50static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) 50static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
51{ 51{
52 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); 52 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1);
53 53
54 return 0; 54 return 0;
55} 55}
@@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args)
66 66
67static void rm9k_perfcounter_irq_shutdown(unsigned int irq) 67static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
68{ 68{
69 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); 69 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1);
70} 70}
71 71
72static struct irq_chip rm9k_irq_controller = { 72static struct irq_chip rm9k_irq_controller = {
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index b88f1c18ff4d..b55641961232 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -28,6 +28,7 @@
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/elf.h> 29#include <linux/elf.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/smp_lock.h>
31#include <linux/syscalls.h> 32#include <linux/syscalls.h>
32#include <linux/moduleloader.h> 33#include <linux/moduleloader.h>
33#include <linux/interrupt.h> 34#include <linux/interrupt.h>
@@ -392,8 +393,12 @@ out:
392static int file_open(struct inode *inode, struct file *filp) 393static int file_open(struct inode *inode, struct file *filp)
393{ 394{
394 int minor = iminor(inode); 395 int minor = iminor(inode);
396 int err;
395 397
396 return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1); 398 lock_kernel();
399 err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
400 unlock_kernel();
401 return err;
397} 402}
398 403
399static int file_release(struct inode *inode, struct file *filp) 404static int file_release(struct inode *inode, struct file *filp)
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index cdf87a9dd4ba..4410f172b8ab 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void)
131 cpu_idle(); 131 cpu_idle();
132} 132}
133 133
134DEFINE_SPINLOCK(smp_call_lock); 134void arch_send_call_function_ipi(cpumask_t mask)
135
136struct call_data_struct *call_data;
137
138/*
139 * Run a function on all other CPUs.
140 *
141 * <mask> cpuset_t of all processors to run the function on.
142 * <func> The function to run. This must be fast and non-blocking.
143 * <info> An arbitrary pointer to pass to the function.
144 * <retry> If true, keep retrying until ready.
145 * <wait> If true, wait until function has completed on other CPUs.
146 * [RETURNS] 0 on success, else a negative status code.
147 *
148 * Does not return until remote CPUs are nearly ready to execute <func>
149 * or are or have executed.
150 *
151 * You must not call this function with disabled interrupts or from a
152 * hardware interrupt handler or from a bottom half handler:
153 *
154 * CPU A CPU B
155 * Disable interrupts
156 * smp_call_function()
157 * Take call_lock
158 * Send IPIs
159 * Wait for all cpus to acknowledge IPI
160 * CPU A has not responded, spin waiting
161 * for cpu A to respond, holding call_lock
162 * smp_call_function()
163 * Spin waiting for call_lock
164 * Deadlock Deadlock
165 */
166int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
167 void *info, int retry, int wait)
168{ 135{
169 struct call_data_struct data;
170 int cpu = smp_processor_id();
171 int cpus;
172
173 /*
174 * Can die spectacularly if this CPU isn't yet marked online
175 */
176 BUG_ON(!cpu_online(cpu));
177
178 cpu_clear(cpu, mask);
179 cpus = cpus_weight(mask);
180 if (!cpus)
181 return 0;
182
183 /* Can deadlock when called with interrupts disabled */
184 WARN_ON(irqs_disabled());
185
186 data.func = func;
187 data.info = info;
188 atomic_set(&data.started, 0);
189 data.wait = wait;
190 if (wait)
191 atomic_set(&data.finished, 0);
192
193 spin_lock(&smp_call_lock);
194 call_data = &data;
195 smp_mb();
196
197 /* Send a message to all other CPUs and wait for them to respond */
198 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 136 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
199
200 /* Wait for response */
201 /* FIXME: lock-up detection, backtrace on lock-up */
202 while (atomic_read(&data.started) != cpus)
203 barrier();
204
205 if (wait)
206 while (atomic_read(&data.finished) != cpus)
207 barrier();
208 call_data = NULL;
209 spin_unlock(&smp_call_lock);
210
211 return 0;
212} 137}
213 138
214int smp_call_function(void (*func) (void *info), void *info, int retry, 139/*
215 int wait) 140 * We reuse the same vector for the single IPI
141 */
142void arch_send_call_function_single_ipi(int cpu)
216{ 143{
217 return smp_call_function_mask(cpu_online_map, func, info, retry, wait); 144 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
218} 145}
219EXPORT_SYMBOL(smp_call_function);
220 146
147/*
148 * Call into both interrupt handlers, as we share the IPI for them
149 */
221void smp_call_function_interrupt(void) 150void smp_call_function_interrupt(void)
222{ 151{
223 void (*func) (void *info) = call_data->func;
224 void *info = call_data->info;
225 int wait = call_data->wait;
226
227 /*
228 * Notify initiating CPU that I've grabbed the data and am
229 * about to execute the function.
230 */
231 smp_mb();
232 atomic_inc(&call_data->started);
233
234 /*
235 * At this point the info structure may be out of scope unless wait==1.
236 */
237 irq_enter(); 152 irq_enter();
238 (*func)(info); 153 generic_smp_call_function_single_interrupt();
154 generic_smp_call_function_interrupt();
239 irq_exit(); 155 irq_exit();
240
241 if (wait) {
242 smp_mb();
243 atomic_inc(&call_data->finished);
244 }
245}
246
247int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
248 int retry, int wait)
249{
250 int ret, me;
251
252 /*
253 * Can die spectacularly if this CPU isn't yet marked online
254 */
255 if (!cpu_online(cpu))
256 return 0;
257
258 me = get_cpu();
259 BUG_ON(!cpu_online(me));
260
261 if (cpu == me) {
262 local_irq_disable();
263 func(info);
264 local_irq_enable();
265 put_cpu();
266 return 0;
267 }
268
269 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
270 wait);
271
272 put_cpu();
273 return 0;
274} 156}
275EXPORT_SYMBOL(smp_call_function_single);
276 157
277static void stop_this_cpu(void *dummy) 158static void stop_this_cpu(void *dummy)
278{ 159{
@@ -286,7 +167,7 @@ static void stop_this_cpu(void *dummy)
286 167
287void smp_send_stop(void) 168void smp_send_stop(void)
288{ 169{
289 smp_call_function(stop_this_cpu, NULL, 1, 0); 170 smp_call_function(stop_this_cpu, NULL, 0);
290} 171}
291 172
292void __init smp_cpus_done(unsigned int max_cpus) 173void __init smp_cpus_done(unsigned int max_cpus)
@@ -365,7 +246,7 @@ static void flush_tlb_all_ipi(void *info)
365 246
366void flush_tlb_all(void) 247void flush_tlb_all(void)
367{ 248{
368 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); 249 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
369} 250}
370 251
371static void flush_tlb_mm_ipi(void *mm) 252static void flush_tlb_mm_ipi(void *mm)
@@ -385,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm)
385static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 266static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
386{ 267{
387#ifndef CONFIG_MIPS_MT_SMTC 268#ifndef CONFIG_MIPS_MT_SMTC
388 smp_call_function(func, info, 1, 1); 269 smp_call_function(func, info, 1);
389#endif 270#endif
390} 271}
391 272
@@ -485,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
485 .addr2 = end, 366 .addr2 = end,
486 }; 367 };
487 368
488 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); 369 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
489} 370}
490 371
491static void flush_tlb_page_ipi(void *info) 372static void flush_tlb_page_ipi(void *info)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3e863186cd22..a516286532ab 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void)
877 /* Return from interrupt should be enough to cause scheduler check */ 877 /* Return from interrupt should be enough to cause scheduler check */
878} 878}
879 879
880
881static void ipi_call_interrupt(void) 880static void ipi_call_interrupt(void)
882{ 881{
883 /* Invoke generic function invocation code in smp.c */ 882 /* Invoke generic function invocation code in smp.c */
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
index ebd9db8d1ece..5eb4681a73d2 100644
--- a/arch/mips/kernel/stacktrace.c
+++ b/arch/mips/kernel/stacktrace.c
@@ -73,3 +73,4 @@ void save_stack_trace(struct stack_trace *trace)
73 prepare_frametrace(regs); 73 prepare_frametrace(regs);
74 save_context_stack(trace, regs); 74 save_context_stack(trace, regs);
75} 75}
76EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2794501ff302..972b2d2b8401 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -38,6 +38,7 @@
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/elf.h> 39#include <linux/elf.h>
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <linux/smp_lock.h>
41#include <linux/syscalls.h> 42#include <linux/syscalls.h>
42#include <linux/moduleloader.h> 43#include <linux/moduleloader.h>
43#include <linux/interrupt.h> 44#include <linux/interrupt.h>
@@ -1050,17 +1051,20 @@ static int vpe_open(struct inode *inode, struct file *filp)
1050 enum vpe_state state; 1051 enum vpe_state state;
1051 struct vpe_notifications *not; 1052 struct vpe_notifications *not;
1052 struct vpe *v; 1053 struct vpe *v;
1053 int ret; 1054 int ret, err = 0;
1054 1055
1056 lock_kernel();
1055 if (minor != iminor(inode)) { 1057 if (minor != iminor(inode)) {
1056 /* assume only 1 device at the moment. */ 1058 /* assume only 1 device at the moment. */
1057 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); 1059 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
1058 return -ENODEV; 1060 err = -ENODEV;
1061 goto out;
1059 } 1062 }
1060 1063
1061 if ((v = get_vpe(tclimit)) == NULL) { 1064 if ((v = get_vpe(tclimit)) == NULL) {
1062 printk(KERN_WARNING "VPE loader: unable to get vpe\n"); 1065 printk(KERN_WARNING "VPE loader: unable to get vpe\n");
1063 return -ENODEV; 1066 err = -ENODEV;
1067 goto out;
1064 } 1068 }
1065 1069
1066 state = xchg(&v->state, VPE_STATE_INUSE); 1070 state = xchg(&v->state, VPE_STATE_INUSE);
@@ -1100,6 +1104,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
1100 v->shared_ptr = NULL; 1104 v->shared_ptr = NULL;
1101 v->__start = 0; 1105 v->__start = 0;
1102 1106
1107out:
1108 unlock_kernel();
1103 return 0; 1109 return 0;
1104} 1110}
1105 1111
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 76935e320214..27a5b466c85c 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
26static unsigned long icache_size, dcache_size; /* Size in bytes */ 26static unsigned long icache_size, dcache_size; /* Size in bytes */
27static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ 27static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
28 28
29unsigned long __init r3k_cache_size(unsigned long ca_flags) 29unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
30{ 30{
31 unsigned long flags, status, dummy, size; 31 unsigned long flags, status, dummy, size;
32 volatile unsigned long *p; 32 volatile unsigned long *p;
@@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags)
61 return size * sizeof(*p); 61 return size * sizeof(*p);
62} 62}
63 63
64unsigned long __init r3k_cache_lsize(unsigned long ca_flags) 64unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
65{ 65{
66 unsigned long flags, status, lsize, i; 66 unsigned long flags, status, lsize, i;
67 volatile unsigned long *p; 67 volatile unsigned long *p;
@@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
90 return lsize * sizeof(*p); 90 return lsize * sizeof(*p);
91} 91}
92 92
93static void __init r3k_probe_cache(void) 93static void __cpuinit r3k_probe_cache(void)
94{ 94{
95 dcache_size = r3k_cache_size(ST0_ISC); 95 dcache_size = r3k_cache_size(ST0_ISC);
96 if (dcache_size) 96 if (dcache_size)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 27096751ddce..71df3390c07b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -43,12 +43,12 @@
43 * primary cache. 43 * primary cache.
44 */ 44 */
45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46 int retry, int wait) 46 int wait)
47{ 47{
48 preempt_disable(); 48 preempt_disable();
49 49
50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 smp_call_function(func, info, retry, wait); 51 smp_call_function(func, info, wait);
52#endif 52#endif
53 func(info); 53 func(info);
54 preempt_enable(); 54 preempt_enable();
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
350 350
351static void r4k___flush_cache_all(void) 351static void r4k___flush_cache_all(void)
352{ 352{
353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
354} 354}
355 355
356static inline int has_valid_asid(const struct mm_struct *mm) 356static inline int has_valid_asid(const struct mm_struct *mm)
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
397 int exec = vma->vm_flags & VM_EXEC; 397 int exec = vma->vm_flags & VM_EXEC;
398 398
399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
401} 401}
402 402
403static inline void local_r4k_flush_cache_mm(void * args) 403static inline void local_r4k_flush_cache_mm(void * args)
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
429 if (!cpu_has_dc_aliases) 429 if (!cpu_has_dc_aliases)
430 return; 430 return;
431 431
432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
433} 433}
434 434
435struct flush_cache_page_args { 435struct flush_cache_page_args {
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
521 args.addr = addr; 521 args.addr = addr;
522 args.pfn = pfn; 522 args.pfn = pfn;
523 523
524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
525} 525}
526 526
527static inline void local_r4k_flush_data_cache_page(void * addr) 527static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
535 local_r4k_flush_data_cache_page((void *)addr); 535 local_r4k_flush_data_cache_page((void *)addr);
536 else 536 else
537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
538 1, 1); 538 1);
539} 539}
540 540
541struct flush_icache_range_args { 541struct flush_icache_range_args {
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
571 args.start = start; 571 args.start = start;
572 args.end = end; 572 args.end = end;
573 573
574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
575 instruction_hazard(); 575 instruction_hazard();
576} 576}
577 577
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
672 672
673static void r4k_flush_cache_sigtramp(unsigned long addr) 673static void r4k_flush_cache_sigtramp(unsigned long addr)
674{ 674{
675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
676} 676}
677 677
678static void r4k_flush_icache_all(void) 678static void r4k_flush_icache_all(void)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 1edf0cbbeede..1417c6494858 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void)
235 } 235 }
236 /* 236 /*
237 * Too much unrolling will overflow the available space in 237 * Too much unrolling will overflow the available space in
238 * clear_space_array / copy_page_array. 8 words sounds generous, 238 * clear_space_array / copy_page_array.
239 * but a R4000 with 128 byte L2 line length can exceed even that.
240 */ 239 */
241 half_clear_loop_size = min(8 * clear_word_size, 240 half_clear_loop_size = min(16 * clear_word_size,
242 max(cache_line_size >> 1, 241 max(cache_line_size >> 1,
243 4 * clear_word_size)); 242 4 * clear_word_size));
244 half_copy_loop_size = min(8 * copy_word_size, 243 half_copy_loop_size = min(16 * copy_word_size,
245 max(cache_line_size >> 1, 244 max(cache_line_size >> 1,
246 4 * copy_word_size)); 245 4 * copy_word_size));
247} 246}
@@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
263 if (pref_bias_clear_store) { 262 if (pref_bias_clear_store) {
264 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 263 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
265 A0); 264 A0);
266 } else if (cpu_has_cache_cdex_s) { 265 } else if (cache_line_size == (half_clear_loop_size << 1)) {
267 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); 266 if (cpu_has_cache_cdex_s) {
268 } else if (cpu_has_cache_cdex_p) { 267 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
269 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { 268 } else if (cpu_has_cache_cdex_p) {
270 uasm_i_nop(buf); 269 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
271 uasm_i_nop(buf); 270 uasm_i_nop(buf);
272 uasm_i_nop(buf); 271 uasm_i_nop(buf);
273 uasm_i_nop(buf); 272 uasm_i_nop(buf);
274 } 273 uasm_i_nop(buf);
274 }
275 275
276 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 276 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
277 uasm_i_lw(buf, ZERO, ZERO, AT); 277 uasm_i_lw(buf, ZERO, ZERO, AT);
278 278
279 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); 279 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
280 } 280 }
281 }
281} 282}
282 283
283void __cpuinit build_clear_page(void) 284void __cpuinit build_clear_page(void)
@@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off)
403 if (pref_bias_copy_store) { 404 if (pref_bias_copy_store) {
404 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 405 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
405 A0); 406 A0);
406 } else if (cpu_has_cache_cdex_s) { 407 } else if (cache_line_size == (half_copy_loop_size << 1)) {
407 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); 408 if (cpu_has_cache_cdex_s) {
408 } else if (cpu_has_cache_cdex_p) { 409 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
409 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { 410 } else if (cpu_has_cache_cdex_p) {
410 uasm_i_nop(buf); 411 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
411 uasm_i_nop(buf); 412 uasm_i_nop(buf);
412 uasm_i_nop(buf); 413 uasm_i_nop(buf);
413 uasm_i_nop(buf); 414 uasm_i_nop(buf);
414 } 415 uasm_i_nop(buf);
416 }
415 417
416 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 418 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
417 uasm_i_lw(buf, ZERO, ZERO, AT); 419 uasm_i_lw(buf, ZERO, ZERO, AT);
418 420
419 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); 421 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
422 }
420 } 423 }
421} 424}
422 425
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index fc227f3b1199..e3abfb2d7e86 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
86/* 86/*
87 * This function is executed in uncached address space. 87 * This function is executed in uncached address space.
88 */ 88 */
89static __init void __rm7k_sc_enable(void) 89static __cpuinit void __rm7k_sc_enable(void)
90{ 90{
91 int i; 91 int i;
92 92
@@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void)
107 } 107 }
108} 108}
109 109
110static __init void rm7k_sc_enable(void) 110static __cpuinit void rm7k_sc_enable(void)
111{ 111{
112 if (read_c0_config() & RM7K_CONF_SE) 112 if (read_c0_config() & RM7K_CONF_SE)
113 return; 113 return;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index b5f6f71b27bc..dd2fbd6645c1 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -27,7 +27,7 @@ static int op_mips_setup(void)
27 model->reg_setup(ctr); 27 model->reg_setup(ctr);
28 28
29 /* Configure the registers on all cpus. */ 29 /* Configure the registers on all cpus. */
30 on_each_cpu(model->cpu_setup, NULL, 0, 1); 30 on_each_cpu(model->cpu_setup, NULL, 1);
31 31
32 return 0; 32 return 0;
33} 33}
@@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
58 58
59static int op_mips_start(void) 59static int op_mips_start(void)
60{ 60{
61 on_each_cpu(model->cpu_start, NULL, 0, 1); 61 on_each_cpu(model->cpu_start, NULL, 1);
62 62
63 return 0; 63 return 0;
64} 64}
@@ -66,7 +66,7 @@ static int op_mips_start(void)
66static void op_mips_stop(void) 66static void op_mips_stop(void)
67{ 67{
68 /* Disable performance monitoring for all counters. */ 68 /* Disable performance monitoring for all counters. */
69 on_each_cpu(model->cpu_stop, NULL, 0, 1); 69 on_each_cpu(model->cpu_stop, NULL, 1);
70} 70}
71 71
72int __init oprofile_arch_init(struct oprofile_operations *ops) 72int __init oprofile_arch_init(struct oprofile_operations *ops)
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index b40df7d2cf44..54759f1669d3 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -313,7 +313,7 @@ static int __init mipsxx_init(void)
313 if (!cpu_has_mipsmt_pertccounters) 313 if (!cpu_has_mipsmt_pertccounters)
314 counters = counters_total_to_per_cpu(counters); 314 counters = counters_total_to_per_cpu(counters);
315#endif 315#endif
316 on_each_cpu(reset_counters, (void *)(long)counters, 0, 1); 316 on_each_cpu(reset_counters, (void *)(long)counters, 1);
317 317
318 op_model_mipsxx_ops.num_counters = counters; 318 op_model_mipsxx_ops.num_counters = counters;
319 switch (current_cpu_type()) { 319 switch (current_cpu_type()) {
@@ -382,7 +382,7 @@ static void mipsxx_exit(void)
382 int counters = op_model_mipsxx_ops.num_counters; 382 int counters = op_model_mipsxx_ops.num_counters;
383 383
384 counters = counters_per_cpu_to_total(counters); 384 counters = counters_per_cpu_to_total(counters);
385 on_each_cpu(reset_counters, (void *)(long)counters, 0, 1); 385 on_each_cpu(reset_counters, (void *)(long)counters, 1);
386 386
387 perf_irq = save_perf_irq; 387 perf_irq = save_perf_irq;
388} 388}
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 35dc435846a6..cf4c868715ac 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -64,7 +64,7 @@ static void prom_exit(void)
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 if (smp_processor_id()) 65 if (smp_processor_id())
66 /* CPU 1 */ 66 /* CPU 1 */
67 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 67 smp_call_function(prom_cpu0_exit, NULL, 1);
68#endif 68#endif
69 prom_cpu0_exit(NULL); 69 prom_cpu0_exit(NULL);
70} 70}
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 33fce826f8bf..fd9604d5555a 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg)
74 if (!reboot_smp) { 74 if (!reboot_smp) {
75 /* Get CPU 0 to do the cfe_exit */ 75 /* Get CPU 0 to do the cfe_exit */
76 reboot_smp = 1; 76 reboot_smp = 1;
77 smp_call_function(cfe_linux_exit, arg, 1, 0); 77 smp_call_function(cfe_linux_exit, arg, 0);
78 } 78 }
79 } else { 79 } else {
80 printk("Passing control back to CFE...\n"); 80 printk("Passing control back to CFE...\n");
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index 63b444eaf01e..28b012ab8dcb 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/smp_lock.h>
31#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
32#include <linux/fs.h> 33#include <linux/fs.h>
33#include <linux/errno.h> 34#include <linux/errno.h>
@@ -402,18 +403,26 @@ static int sbprof_zbprof_stop(void)
402static int sbprof_tb_open(struct inode *inode, struct file *filp) 403static int sbprof_tb_open(struct inode *inode, struct file *filp)
403{ 404{
404 int minor; 405 int minor;
406 int err = 0;
405 407
408 lock_kernel();
406 minor = iminor(inode); 409 minor = iminor(inode);
407 if (minor != 0) 410 if (minor != 0) {
408 return -ENODEV; 411 err = -ENODEV;
412 goto out;
413 }
409 414
410 if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) 415 if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) {
411 return -EBUSY; 416 err = -EBUSY;
417 goto out;
418 }
412 419
413 memset(&sbp, 0, sizeof(struct sbprof_tb)); 420 memset(&sbp, 0, sizeof(struct sbprof_tb));
414 sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES); 421 sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
415 if (!sbp.sbprof_tbbuf) 422 if (!sbp.sbprof_tbbuf) {
416 return -ENOMEM; 423 err = -ENOMEM;
424 goto out;
425 }
417 memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES); 426 memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
418 init_waitqueue_head(&sbp.tb_sync); 427 init_waitqueue_head(&sbp.tb_sync);
419 init_waitqueue_head(&sbp.tb_read); 428 init_waitqueue_head(&sbp.tb_read);
@@ -421,7 +430,9 @@ static int sbprof_tb_open(struct inode *inode, struct file *filp)
421 430
422 sbp.open = SB_OPEN; 431 sbp.open = SB_OPEN;
423 432
424 return 0; 433 out:
434 unlock_kernel();
435 return err;
425} 436}
426 437
427static int sbprof_tb_release(struct inode *inode, struct file *filp) 438static int sbprof_tb_release(struct inode *inode, struct file *filp)
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index cf8f6b3de86c..65b1af66b674 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -66,7 +66,7 @@ static void prom_linux_exit(void)
66{ 66{
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 if (smp_processor_id()) { 68 if (smp_processor_id()) {
69 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 69 smp_call_function(prom_cpu0_exit, NULL, 1);
70 } 70 }
71#endif 71#endif
72 while(1); 72 while(1);
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
index 1775755a2619..255d692bfa18 100644
--- a/arch/mips/sibyte/swarm/Makefile
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -1,3 +1,4 @@
1obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o 1obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o
2 2
3obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
3obj-$(CONFIG_KGDB) += dbg_io.o 4obj-$(CONFIG_KGDB) += dbg_io.o
diff --git a/arch/mips/sibyte/swarm/swarm-i2c.c b/arch/mips/sibyte/swarm/swarm-i2c.c
new file mode 100644
index 000000000000..4282ac9d01d2
--- /dev/null
+++ b/arch/mips/sibyte/swarm/swarm-i2c.c
@@ -0,0 +1,37 @@
1/*
2 * arch/mips/sibyte/swarm/swarm-i2c.c
3 *
4 * Broadcom BCM91250A (SWARM), etc. I2C platform setup.
5 *
6 * Copyright (c) 2008 Maciej W. Rozycki
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/i2c.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17
18
19static struct i2c_board_info swarm_i2c_info1[] __initdata = {
20 {
21 I2C_BOARD_INFO("m41t81", 0x68),
22 },
23};
24
25static int __init swarm_i2c_init(void)
26{
27 int err;
28
29 err = i2c_register_board_info(1, swarm_i2c_info1,
30 ARRAY_SIZE(swarm_i2c_info1));
31 if (err < 0)
32 printk(KERN_ERR
33 "swarm-i2c: cannot register board I2C devices\n");
34 return err;
35}
36
37arch_initcall(swarm_i2c_init);