aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/entry.S6
-rw-r--r--arch/mips/kernel/genex.S47
-rw-r--r--arch/mips/kernel/kspd.c22
-rw-r--r--arch/mips/kernel/linux32.c58
-rw-r--r--arch/mips/kernel/r2300_switch.S10
-rw-r--r--arch/mips/kernel/r4k_fpu.S16
-rw-r--r--arch/mips/kernel/r4k_switch.S10
-rw-r--r--arch/mips/kernel/rtlx.c104
-rw-r--r--arch/mips/kernel/signal-common.h3
-rw-r--r--arch/mips/kernel/signal.c75
-rw-r--r--arch/mips/kernel/signal32.c56
-rw-r--r--arch/mips/kernel/signal_n32.c6
-rw-r--r--arch/mips/kernel/smtc.c65
-rw-r--r--arch/mips/kernel/traps.c84
14 files changed, 341 insertions, 221 deletions
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 0b78fcbf044a..686249c5c328 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -121,7 +121,11 @@ FEXPORT(restore_partial) # restore partial frame
121 SAVE_AT 121 SAVE_AT
122 SAVE_TEMP 122 SAVE_TEMP
123 LONG_L v0, PT_STATUS(sp) 123 LONG_L v0, PT_STATUS(sp)
124 and v0, 1 124#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
125 and v0, ST0_IEP
126#else
127 and v0, ST0_IE
128#endif
125 beqz v0, 1f 129 beqz v0, 1f
126 jal trace_hardirqs_on 130 jal trace_hardirqs_on
127 b 2f 131 b 2f
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index aacd4a005c5f..297bd56c2347 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -128,6 +128,37 @@ handle_vcei:
128 128
129 .align 5 129 .align 5
130NESTED(handle_int, PT_SIZE, sp) 130NESTED(handle_int, PT_SIZE, sp)
131#ifdef CONFIG_TRACE_IRQFLAGS
132 /*
133 * Check to see if the interrupted code has just disabled
134 * interrupts and ignore this interrupt for now if so.
135 *
136 * local_irq_disable() disables interrupts and then calls
137 * trace_hardirqs_off() to track the state. If an interrupt is taken
138 * after interrupts are disabled but before the state is updated
139 * it will appear to restore_all that it is incorrectly returning with
140 * interrupts disabled
141 */
142 .set push
143 .set noat
144 mfc0 k0, CP0_STATUS
145#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
146 and k0, ST0_IEP
147 bnez k0, 1f
148
149 mfc0 k0, EP0_EPC
150 .set noreorder
151 j k0
152 rfe
153#else
154 and k0, ST0_IE
155 bnez k0, 1f
156
157 eret
158#endif
1591:
160 .set pop
161#endif
131 SAVE_ALL 162 SAVE_ALL
132 CLI 163 CLI
133 TRACE_IRQS_OFF 164 TRACE_IRQS_OFF
@@ -181,13 +212,13 @@ NESTED(except_vec_vi, 0, sp)
181 * during service by SMTC kernel, we also want to 212 * during service by SMTC kernel, we also want to
182 * pass the IM value to be cleared. 213 * pass the IM value to be cleared.
183 */ 214 */
184EXPORT(except_vec_vi_mori) 215FEXPORT(except_vec_vi_mori)
185 ori a0, $0, 0 216 ori a0, $0, 0
186#endif /* CONFIG_MIPS_MT_SMTC */ 217#endif /* CONFIG_MIPS_MT_SMTC */
187EXPORT(except_vec_vi_lui) 218FEXPORT(except_vec_vi_lui)
188 lui v0, 0 /* Patched */ 219 lui v0, 0 /* Patched */
189 j except_vec_vi_handler 220 j except_vec_vi_handler
190EXPORT(except_vec_vi_ori) 221FEXPORT(except_vec_vi_ori)
191 ori v0, 0 /* Patched */ 222 ori v0, 0 /* Patched */
192 .set pop 223 .set pop
193 END(except_vec_vi) 224 END(except_vec_vi)
@@ -220,7 +251,17 @@ NESTED(except_vec_vi_handler, 0, sp)
220 _ehb 251 _ehb
221#endif /* CONFIG_MIPS_MT_SMTC */ 252#endif /* CONFIG_MIPS_MT_SMTC */
222 CLI 253 CLI
254#ifdef CONFIG_TRACE_IRQFLAGS
255 move s0, v0
256#ifdef CONFIG_MIPS_MT_SMTC
257 move s1, a0
258#endif
223 TRACE_IRQS_OFF 259 TRACE_IRQS_OFF
260#ifdef CONFIG_MIPS_MT_SMTC
261 move a0, s1
262#endif
263 move v0, s0
264#endif
224 265
225 LONG_L s0, TI_REGS($28) 266 LONG_L s0, TI_REGS($28)
226 LONG_S sp, TI_REGS($28) 267 LONG_S sp, TI_REGS($28)
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 5929f883e46b..29eadd404fa5 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -70,6 +70,7 @@ static int sp_stopping = 0;
70#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7) 70#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
71#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8) 71#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
72#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9) 72#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
73#define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10)
73 74
74#define MTSP_O_RDONLY 0x0000 75#define MTSP_O_RDONLY 0x0000
75#define MTSP_O_WRONLY 0x0001 76#define MTSP_O_WRONLY 0x0001
@@ -110,7 +111,8 @@ struct apsp_table syscall_command_table[] = {
110 { MTSP_SYSCALL_CLOSE, __NR_close }, 111 { MTSP_SYSCALL_CLOSE, __NR_close },
111 { MTSP_SYSCALL_READ, __NR_read }, 112 { MTSP_SYSCALL_READ, __NR_read },
112 { MTSP_SYSCALL_WRITE, __NR_write }, 113 { MTSP_SYSCALL_WRITE, __NR_write },
113 { MTSP_SYSCALL_LSEEK32, __NR_lseek } 114 { MTSP_SYSCALL_LSEEK32, __NR_lseek },
115 { MTSP_SYSCALL_IOCTL, __NR_ioctl }
114}; 116};
115 117
116static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) 118static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
@@ -189,6 +191,8 @@ void sp_work_handle_request(void)
189 struct mtsp_syscall_generic generic; 191 struct mtsp_syscall_generic generic;
190 struct mtsp_syscall_ret ret; 192 struct mtsp_syscall_ret ret;
191 struct kspd_notifications *n; 193 struct kspd_notifications *n;
194 unsigned long written;
195 mm_segment_t old_fs;
192 struct timeval tv; 196 struct timeval tv;
193 struct timezone tz; 197 struct timezone tz;
194 int cmd; 198 int cmd;
@@ -199,7 +203,11 @@ void sp_work_handle_request(void)
199 203
200 ret.retval = -1; 204 ret.retval = -1;
201 205
202 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) { 206 old_fs = get_fs();
207 set_fs(KERNEL_DS);
208
209 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) {
210 set_fs(old_fs);
203 printk(KERN_ERR "Expected request but nothing to read\n"); 211 printk(KERN_ERR "Expected request but nothing to read\n");
204 return; 212 return;
205 } 213 }
@@ -207,7 +215,8 @@ void sp_work_handle_request(void)
207 size = sc.size; 215 size = sc.size;
208 216
209 if (size) { 217 if (size) {
210 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) { 218 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) {
219 set_fs(old_fs);
211 printk(KERN_ERR "Expected request but nothing to read\n"); 220 printk(KERN_ERR "Expected request but nothing to read\n");
212 return; 221 return;
213 } 222 }
@@ -280,8 +289,11 @@ void sp_work_handle_request(void)
280 if (vpe_getuid(SP_VPE)) 289 if (vpe_getuid(SP_VPE))
281 sp_setfsuidgid( 0, 0); 290 sp_setfsuidgid( 0, 0);
282 291
283 if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0)) 292 old_fs = get_fs();
284 < sizeof(struct mtsp_syscall_ret)) 293 set_fs(KERNEL_DS);
294 written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret));
295 set_fs(old_fs);
296 if (written < sizeof(ret))
285 printk("KSPD: sp_work_handle_request failed to send to SP\n"); 297 printk("KSPD: sp_work_handle_request failed to send to SP\n");
286} 298}
287 299
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 30d433f14f93..37849edd0645 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -311,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
311 return ret; 311 return ret;
312} 312}
313 313
314#ifdef CONFIG_SYSVIPC
315
314asmlinkage long 316asmlinkage long
315sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 317sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
316{ 318{
@@ -368,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
368 return err; 370 return err;
369} 371}
370 372
373#else
374
375asmlinkage long
376sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
377{
378 return -ENOSYS;
379}
380
381#endif /* CONFIG_SYSVIPC */
382
371#ifdef CONFIG_MIPS32_N32 383#ifdef CONFIG_MIPS32_N32
372asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) 384asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
373{ 385{
@@ -564,49 +576,3 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs)
564 return do_fork(clone_flags, newsp, &regs, 0, 576 return do_fork(clone_flags, newsp, &regs, 0,
565 parent_tidptr, child_tidptr); 577 parent_tidptr, child_tidptr);
566} 578}
567
568/*
569 * Implement the event wait interface for the eventpoll file. It is the kernel
570 * part of the user space epoll_pwait(2).
571 */
572asmlinkage long compat_sys_epoll_pwait(int epfd,
573 struct epoll_event __user *events, int maxevents, int timeout,
574 const compat_sigset_t __user *sigmask, size_t sigsetsize)
575{
576 int error;
577 sigset_t ksigmask, sigsaved;
578
579 /*
580 * If the caller wants a certain signal mask to be set during the wait,
581 * we apply it here.
582 */
583 if (sigmask) {
584 if (sigsetsize != sizeof(sigset_t))
585 return -EINVAL;
586 if (!access_ok(VERIFY_READ, sigmask, sizeof(ksigmask)))
587 return -EFAULT;
588 if (__copy_conv_sigset_from_user(&ksigmask, sigmask))
589 return -EFAULT;
590 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
591 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
592 }
593
594 error = sys_epoll_wait(epfd, events, maxevents, timeout);
595
596 /*
597 * If we changed the signal mask, we need to restore the original one.
598 * In case we've got a signal while waiting, we do not restore the
599 * signal mask yet, and we allow do_signal() to deliver the signal on
600 * the way back to userspace, before the signal mask is restored.
601 */
602 if (sigmask) {
603 if (error == -EINTR) {
604 memcpy(&current->saved_sigmask, &sigsaved,
605 sizeof(sigsaved));
606 set_thread_flag(TIF_RESTORE_SIGMASK);
607 } else
608 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
609 }
610
611 return error;
612}
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 656bde2e11b1..28c2e2e6af73 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -49,8 +49,7 @@ LEAF(resume)
49#ifndef CONFIG_CPU_HAS_LLSC 49#ifndef CONFIG_CPU_HAS_LLSC
50 sw zero, ll_bit 50 sw zero, ll_bit
51#endif 51#endif
52 mfc0 t1, CP0_STATUS 52 mfc0 t2, CP0_STATUS
53 sw t1, THREAD_STATUS(a0)
54 cpu_save_nonscratch a0 53 cpu_save_nonscratch a0
55 sw ra, THREAD_REG31(a0) 54 sw ra, THREAD_REG31(a0)
56 55
@@ -60,8 +59,8 @@ LEAF(resume)
60 lw t3, TASK_THREAD_INFO(a0) 59 lw t3, TASK_THREAD_INFO(a0)
61 lw t0, TI_FLAGS(t3) 60 lw t0, TI_FLAGS(t3)
62 li t1, _TIF_USEDFPU 61 li t1, _TIF_USEDFPU
63 and t2, t0, t1 62 and t1, t0
64 beqz t2, 1f 63 beqz t1, 1f
65 nor t1, zero, t1 64 nor t1, zero, t1
66 65
67 and t0, t0, t1 66 and t0, t0, t1
@@ -74,10 +73,13 @@ LEAF(resume)
74 li t1, ~ST0_CU1 73 li t1, ~ST0_CU1
75 and t0, t0, t1 74 and t0, t0, t1
76 sw t0, ST_OFF(t3) 75 sw t0, ST_OFF(t3)
76 /* clear thread_struct CU1 bit */
77 and t2, t1
77 78
78 fpu_save_single a0, t0 # clobbers t0 79 fpu_save_single a0, t0 # clobbers t0
79 80
801: 811:
82 sw t2, THREAD_STATUS(a0)
81 /* 83 /*
82 * The order of restoring the registers takes care of the race 84 * The order of restoring the registers takes care of the race
83 * updating $28, $29 and kernelsp without disabling ints. 85 * updating $28, $29 and kernelsp without disabling ints.
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 59c1577ecbb3..dbd42adc52ed 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -114,14 +114,6 @@ LEAF(_save_fp_context32)
114 */ 114 */
115LEAF(_restore_fp_context) 115LEAF(_restore_fp_context)
116 EX lw t0, SC_FPC_CSR(a0) 116 EX lw t0, SC_FPC_CSR(a0)
117
118 /* Fail if the CSR has exceptions pending */
119 srl t1, t0, 5
120 and t1, t0
121 andi t1, 0x1f << 7
122 bnez t1, fault
123 nop
124
125#ifdef CONFIG_64BIT 117#ifdef CONFIG_64BIT
126 EX ldc1 $f1, SC_FPREGS+8(a0) 118 EX ldc1 $f1, SC_FPREGS+8(a0)
127 EX ldc1 $f3, SC_FPREGS+24(a0) 119 EX ldc1 $f3, SC_FPREGS+24(a0)
@@ -165,14 +157,6 @@ LEAF(_restore_fp_context)
165LEAF(_restore_fp_context32) 157LEAF(_restore_fp_context32)
166 /* Restore an o32 sigcontext. */ 158 /* Restore an o32 sigcontext. */
167 EX lw t0, SC32_FPC_CSR(a0) 159 EX lw t0, SC32_FPC_CSR(a0)
168
169 /* Fail if the CSR has exceptions pending */
170 srl t1, t0, 5
171 and t1, t0
172 andi t1, 0x1f << 7
173 bnez t1, fault
174 nop
175
176 EX ldc1 $f0, SC32_FPREGS+0(a0) 160 EX ldc1 $f0, SC32_FPREGS+0(a0)
177 EX ldc1 $f2, SC32_FPREGS+16(a0) 161 EX ldc1 $f2, SC32_FPREGS+16(a0)
178 EX ldc1 $f4, SC32_FPREGS+32(a0) 162 EX ldc1 $f4, SC32_FPREGS+32(a0)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index cc566cf12246..c7698fd9955c 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -48,8 +48,7 @@
48#ifndef CONFIG_CPU_HAS_LLSC 48#ifndef CONFIG_CPU_HAS_LLSC
49 sw zero, ll_bit 49 sw zero, ll_bit
50#endif 50#endif
51 mfc0 t1, CP0_STATUS 51 mfc0 t2, CP0_STATUS
52 LONG_S t1, THREAD_STATUS(a0)
53 cpu_save_nonscratch a0 52 cpu_save_nonscratch a0
54 LONG_S ra, THREAD_REG31(a0) 53 LONG_S ra, THREAD_REG31(a0)
55 54
@@ -59,8 +58,8 @@
59 PTR_L t3, TASK_THREAD_INFO(a0) 58 PTR_L t3, TASK_THREAD_INFO(a0)
60 LONG_L t0, TI_FLAGS(t3) 59 LONG_L t0, TI_FLAGS(t3)
61 li t1, _TIF_USEDFPU 60 li t1, _TIF_USEDFPU
62 and t2, t0, t1 61 and t1, t0
63 beqz t2, 1f 62 beqz t1, 1f
64 nor t1, zero, t1 63 nor t1, zero, t1
65 64
66 and t0, t0, t1 65 and t0, t0, t1
@@ -73,10 +72,13 @@
73 li t1, ~ST0_CU1 72 li t1, ~ST0_CU1
74 and t0, t0, t1 73 and t0, t0, t1
75 LONG_S t0, ST_OFF(t3) 74 LONG_S t0, ST_OFF(t3)
75 /* clear thread_struct CU1 bit */
76 and t2, t1
76 77
77 fpu_save_double a0 t0 t1 # c0_status passed in t0 78 fpu_save_double a0 t0 t1 # c0_status passed in t0
78 # clobbers t1 79 # clobbers t1
791: 801:
81 LONG_S t2, THREAD_STATUS(a0)
80 82
81 /* 83 /*
82 * The order of restoring the registers takes care of the race 84 * The order of restoring the registers takes care of the race
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index e14ae09eda2b..e6e3047151a6 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -54,6 +54,7 @@ static struct chan_waitqueues {
54 wait_queue_head_t rt_queue; 54 wait_queue_head_t rt_queue;
55 wait_queue_head_t lx_queue; 55 wait_queue_head_t lx_queue;
56 atomic_t in_open; 56 atomic_t in_open;
57 struct mutex mutex;
57} channel_wqs[RTLX_CHANNELS]; 58} channel_wqs[RTLX_CHANNELS];
58 59
59static struct irqaction irq; 60static struct irqaction irq;
@@ -146,7 +147,7 @@ static void stopping(int vpe)
146 147
147int rtlx_open(int index, int can_sleep) 148int rtlx_open(int index, int can_sleep)
148{ 149{
149 volatile struct rtlx_info **p; 150 struct rtlx_info **p;
150 struct rtlx_channel *chan; 151 struct rtlx_channel *chan;
151 enum rtlx_state state; 152 enum rtlx_state state;
152 int ret = 0; 153 int ret = 0;
@@ -179,13 +180,24 @@ int rtlx_open(int index, int can_sleep)
179 } 180 }
180 } 181 }
181 182
183 smp_rmb();
182 if (*p == NULL) { 184 if (*p == NULL) {
183 if (can_sleep) { 185 if (can_sleep) {
184 __wait_event_interruptible(channel_wqs[index].lx_queue, 186 DEFINE_WAIT(wait);
185 *p != NULL, 187
186 ret); 188 for (;;) {
187 if (ret) 189 prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE);
190 smp_rmb();
191 if (*p != NULL)
192 break;
193 if (!signal_pending(current)) {
194 schedule();
195 continue;
196 }
197 ret = -ERESTARTSYS;
188 goto out_fail; 198 goto out_fail;
199 }
200 finish_wait(&channel_wqs[index].lx_queue, &wait);
189 } else { 201 } else {
190 printk(" *vpe_get_shared is NULL. " 202 printk(" *vpe_get_shared is NULL. "
191 "Has an SP program been loaded?\n"); 203 "Has an SP program been loaded?\n");
@@ -277,56 +289,52 @@ unsigned int rtlx_write_poll(int index)
277 return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); 289 return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
278} 290}
279 291
280static inline void copy_to(void *dst, void *src, size_t count, int user) 292ssize_t rtlx_read(int index, void __user *buff, size_t count, int user)
281{
282 if (user)
283 copy_to_user(dst, src, count);
284 else
285 memcpy(dst, src, count);
286}
287
288static inline void copy_from(void *dst, void *src, size_t count, int user)
289{ 293{
290 if (user) 294 size_t lx_write, fl = 0L;
291 copy_from_user(dst, src, count);
292 else
293 memcpy(dst, src, count);
294}
295
296ssize_t rtlx_read(int index, void *buff, size_t count, int user)
297{
298 size_t fl = 0L;
299 struct rtlx_channel *lx; 295 struct rtlx_channel *lx;
296 unsigned long failed;
300 297
301 if (rtlx == NULL) 298 if (rtlx == NULL)
302 return -ENOSYS; 299 return -ENOSYS;
303 300
304 lx = &rtlx->channel[index]; 301 lx = &rtlx->channel[index];
305 302
303 mutex_lock(&channel_wqs[index].mutex);
304 smp_rmb();
305 lx_write = lx->lx_write;
306
306 /* find out how much in total */ 307 /* find out how much in total */
307 count = min(count, 308 count = min(count,
308 (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) 309 (size_t)(lx_write + lx->buffer_size - lx->lx_read)
309 % lx->buffer_size); 310 % lx->buffer_size);
310 311
311 /* then how much from the read pointer onwards */ 312 /* then how much from the read pointer onwards */
312 fl = min( count, (size_t)lx->buffer_size - lx->lx_read); 313 fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
313 314
314 copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user); 315 failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
316 if (failed)
317 goto out;
315 318
316 /* and if there is anything left at the beginning of the buffer */ 319 /* and if there is anything left at the beginning of the buffer */
317 if ( count - fl ) 320 if (count - fl)
318 copy_to (buff + fl, lx->lx_buffer, count - fl, user); 321 failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
319 322
320 /* update the index */ 323out:
321 lx->lx_read += count; 324 count -= failed;
322 lx->lx_read %= lx->buffer_size; 325
326 smp_wmb();
327 lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
328 smp_wmb();
329 mutex_unlock(&channel_wqs[index].mutex);
323 330
324 return count; 331 return count;
325} 332}
326 333
327ssize_t rtlx_write(int index, void *buffer, size_t count, int user) 334ssize_t rtlx_write(int index, const void __user *buffer, size_t count, int user)
328{ 335{
329 struct rtlx_channel *rt; 336 struct rtlx_channel *rt;
337 size_t rt_read;
330 size_t fl; 338 size_t fl;
331 339
332 if (rtlx == NULL) 340 if (rtlx == NULL)
@@ -334,24 +342,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
334 342
335 rt = &rtlx->channel[index]; 343 rt = &rtlx->channel[index];
336 344
345 mutex_lock(&channel_wqs[index].mutex);
346 smp_rmb();
347 rt_read = rt->rt_read;
348
337 /* total number of bytes to copy */ 349 /* total number of bytes to copy */
338 count = min(count, 350 count = min(count,
339 (size_t)write_spacefree(rt->rt_read, rt->rt_write, 351 (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size));
340 rt->buffer_size));
341 352
342 /* first bit from write pointer to the end of the buffer, or count */ 353 /* first bit from write pointer to the end of the buffer, or count */
343 fl = min(count, (size_t) rt->buffer_size - rt->rt_write); 354 fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
344 355
345 copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user); 356 failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
357 if (failed)
358 goto out;
346 359
347 /* if there's any left copy to the beginning of the buffer */ 360 /* if there's any left copy to the beginning of the buffer */
348 if( count - fl ) 361 if (count - fl) {
349 copy_from (rt->rt_buffer, buffer + fl, count - fl, user); 362 failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
363 }
364
365out:
366 count -= cailed;
350 367
351 rt->rt_write += count; 368 smp_wmb();
352 rt->rt_write %= rt->buffer_size; 369 rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
370 smp_wmb();
371 mutex_unlock(&channel_wqs[index].mutex);
353 372
354 return(count); 373 return count;
355} 374}
356 375
357 376
@@ -403,7 +422,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
403 return 0; // -EAGAIN makes cat whinge 422 return 0; // -EAGAIN makes cat whinge
404 } 423 }
405 424
406 return rtlx_read(minor, buffer, count, 1); 425 return rtlx_read(minor, buffer, count);
407} 426}
408 427
409static ssize_t file_write(struct file *file, const char __user * buffer, 428static ssize_t file_write(struct file *file, const char __user * buffer,
@@ -429,7 +448,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
429 return ret; 448 return ret;
430 } 449 }
431 450
432 return rtlx_write(minor, (void *)buffer, count, 1); 451 return rtlx_write(minor, buffer, count);
433} 452}
434 453
435static const struct file_operations rtlx_fops = { 454static const struct file_operations rtlx_fops = {
@@ -468,6 +487,7 @@ static int rtlx_module_init(void)
468 init_waitqueue_head(&channel_wqs[i].rt_queue); 487 init_waitqueue_head(&channel_wqs[i].rt_queue);
469 init_waitqueue_head(&channel_wqs[i].lx_queue); 488 init_waitqueue_head(&channel_wqs[i].lx_queue);
470 atomic_set(&channel_wqs[i].in_open, 0); 489 atomic_set(&channel_wqs[i].in_open, 0);
490 mutex_init(&channel_wqs[i].mutex);
471 491
472 dev = device_create(mt_class, NULL, MKDEV(major, i), 492 dev = device_create(mt_class, NULL, MKDEV(major, i),
473 "%s%d", module_name, i); 493 "%s%d", module_name, i);
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index fdbdbdc65b54..297dfcb97524 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -31,4 +31,7 @@ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
31 */ 31 */
32extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); 32extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
33 33
34/* Check and clear pending FPU exceptions in saved CSR */
35extern int fpcsr_pending(unsigned int __user *fpcsr);
36
34#endif /* __SIGNAL_COMMON_H */ 37#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index f091786187a6..8c3c5a5789b0 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -82,6 +82,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
82{ 82{
83 int err = 0; 83 int err = 0;
84 int i; 84 int i;
85 unsigned int used_math;
85 86
86 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 87 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
87 88
@@ -104,26 +105,53 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
104 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 105 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
105 } 106 }
106 107
107 err |= __put_user(!!used_math(), &sc->sc_used_math); 108 used_math = !!used_math();
109 err |= __put_user(used_math, &sc->sc_used_math);
108 110
109 if (used_math()) { 111 if (used_math) {
110 /* 112 /*
111 * Save FPU state to signal context. Signal handler 113 * Save FPU state to signal context. Signal handler
112 * will "inherit" current FPU state. 114 * will "inherit" current FPU state.
113 */ 115 */
114 preempt_disable(); 116 own_fpu(1);
115 117 enable_fp_in_kernel();
116 if (!is_fpu_owner()) {
117 own_fpu();
118 restore_fp(current);
119 }
120 err |= save_fp_context(sc); 118 err |= save_fp_context(sc);
121 119 disable_fp_in_kernel();
122 preempt_enable();
123 } 120 }
124 return err; 121 return err;
125} 122}
126 123
124int fpcsr_pending(unsigned int __user *fpcsr)
125{
126 int err, sig = 0;
127 unsigned int csr, enabled;
128
129 err = __get_user(csr, fpcsr);
130 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
131 /*
132 * If the signal handler set some FPU exceptions, clear it and
133 * send SIGFPE.
134 */
135 if (csr & enabled) {
136 csr &= ~enabled;
137 err |= __put_user(csr, fpcsr);
138 sig = SIGFPE;
139 }
140 return err ?: sig;
141}
142
143static int
144check_and_restore_fp_context(struct sigcontext __user *sc)
145{
146 int err, sig;
147
148 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
149 if (err > 0)
150 err = 0;
151 err |= restore_fp_context(sc);
152 return err ?: sig;
153}
154
127int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 155int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
128{ 156{
129 unsigned int used_math; 157 unsigned int used_math;
@@ -157,19 +185,18 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
157 err |= __get_user(used_math, &sc->sc_used_math); 185 err |= __get_user(used_math, &sc->sc_used_math);
158 conditional_used_math(used_math); 186 conditional_used_math(used_math);
159 187
160 preempt_disable(); 188 if (used_math) {
161
162 if (used_math()) {
163 /* restore fpu context if we have used it before */ 189 /* restore fpu context if we have used it before */
164 own_fpu(); 190 own_fpu(0);
165 err |= restore_fp_context(sc); 191 enable_fp_in_kernel();
192 if (!err)
193 err = check_and_restore_fp_context(sc);
194 disable_fp_in_kernel();
166 } else { 195 } else {
167 /* signal handler may have used FPU. Give it up. */ 196 /* signal handler may have used FPU. Give it up. */
168 lose_fpu(); 197 lose_fpu(0);
169 } 198 }
170 199
171 preempt_enable();
172
173 return err; 200 return err;
174} 201}
175 202
@@ -332,6 +359,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
332{ 359{
333 struct sigframe __user *frame; 360 struct sigframe __user *frame;
334 sigset_t blocked; 361 sigset_t blocked;
362 int sig;
335 363
336 frame = (struct sigframe __user *) regs.regs[29]; 364 frame = (struct sigframe __user *) regs.regs[29];
337 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 365 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -345,8 +373,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
345 recalc_sigpending(); 373 recalc_sigpending();
346 spin_unlock_irq(&current->sighand->siglock); 374 spin_unlock_irq(&current->sighand->siglock);
347 375
348 if (restore_sigcontext(&regs, &frame->sf_sc)) 376 sig = restore_sigcontext(&regs, &frame->sf_sc);
377 if (sig < 0)
349 goto badframe; 378 goto badframe;
379 else if (sig)
380 force_sig(sig, current);
350 381
351 /* 382 /*
352 * Don't let your children do this ... 383 * Don't let your children do this ...
@@ -368,6 +399,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
368 struct rt_sigframe __user *frame; 399 struct rt_sigframe __user *frame;
369 sigset_t set; 400 sigset_t set;
370 stack_t st; 401 stack_t st;
402 int sig;
371 403
372 frame = (struct rt_sigframe __user *) regs.regs[29]; 404 frame = (struct rt_sigframe __user *) regs.regs[29];
373 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 405 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -381,8 +413,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
381 recalc_sigpending(); 413 recalc_sigpending();
382 spin_unlock_irq(&current->sighand->siglock); 414 spin_unlock_irq(&current->sighand->siglock);
383 415
384 if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext)) 416 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
417 if (sig < 0)
385 goto badframe; 418 goto badframe;
419 else if (sig)
420 force_sig(sig, current);
386 421
387 if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) 422 if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
388 goto badframe; 423 goto badframe;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19bbef001959..151fd2f0893a 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -181,6 +181,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
181{ 181{
182 int err = 0; 182 int err = 0;
183 int i; 183 int i;
184 u32 used_math;
184 185
185 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 186 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
186 187
@@ -200,26 +201,34 @@ static int setup_sigcontext32(struct pt_regs *regs,
200 err |= __put_user(mflo3(), &sc->sc_lo3); 201 err |= __put_user(mflo3(), &sc->sc_lo3);
201 } 202 }
202 203
203 err |= __put_user(!!used_math(), &sc->sc_used_math); 204 used_math = !!used_math();
205 err |= __put_user(used_math, &sc->sc_used_math);
204 206
205 if (used_math()) { 207 if (used_math) {
206 /* 208 /*
207 * Save FPU state to signal context. Signal handler 209 * Save FPU state to signal context. Signal handler
208 * will "inherit" current FPU state. 210 * will "inherit" current FPU state.
209 */ 211 */
210 preempt_disable(); 212 own_fpu(1);
211 213 enable_fp_in_kernel();
212 if (!is_fpu_owner()) {
213 own_fpu();
214 restore_fp(current);
215 }
216 err |= save_fp_context32(sc); 214 err |= save_fp_context32(sc);
217 215 disable_fp_in_kernel();
218 preempt_enable();
219 } 216 }
220 return err; 217 return err;
221} 218}
222 219
220static int
221check_and_restore_fp_context32(struct sigcontext32 __user *sc)
222{
223 int err, sig;
224
225 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
226 if (err > 0)
227 err = 0;
228 err |= restore_fp_context32(sc);
229 return err ?: sig;
230}
231
223static int restore_sigcontext32(struct pt_regs *regs, 232static int restore_sigcontext32(struct pt_regs *regs,
224 struct sigcontext32 __user *sc) 233 struct sigcontext32 __user *sc)
225{ 234{
@@ -250,19 +259,18 @@ static int restore_sigcontext32(struct pt_regs *regs,
250 err |= __get_user(used_math, &sc->sc_used_math); 259 err |= __get_user(used_math, &sc->sc_used_math);
251 conditional_used_math(used_math); 260 conditional_used_math(used_math);
252 261
253 preempt_disable(); 262 if (used_math) {
254
255 if (used_math()) {
256 /* restore fpu context if we have used it before */ 263 /* restore fpu context if we have used it before */
257 own_fpu(); 264 own_fpu(0);
258 err |= restore_fp_context32(sc); 265 enable_fp_in_kernel();
266 if (!err)
267 err = check_and_restore_fp_context32(sc);
268 disable_fp_in_kernel();
259 } else { 269 } else {
260 /* signal handler may have used FPU. Give it up. */ 270 /* signal handler may have used FPU. Give it up. */
261 lose_fpu(); 271 lose_fpu(0);
262 } 272 }
263 273
264 preempt_enable();
265
266 return err; 274 return err;
267} 275}
268 276
@@ -508,6 +516,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
508{ 516{
509 struct sigframe32 __user *frame; 517 struct sigframe32 __user *frame;
510 sigset_t blocked; 518 sigset_t blocked;
519 int sig;
511 520
512 frame = (struct sigframe32 __user *) regs.regs[29]; 521 frame = (struct sigframe32 __user *) regs.regs[29];
513 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 522 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -521,8 +530,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
521 recalc_sigpending(); 530 recalc_sigpending();
522 spin_unlock_irq(&current->sighand->siglock); 531 spin_unlock_irq(&current->sighand->siglock);
523 532
524 if (restore_sigcontext32(&regs, &frame->sf_sc)) 533 sig = restore_sigcontext32(&regs, &frame->sf_sc);
534 if (sig < 0)
525 goto badframe; 535 goto badframe;
536 else if (sig)
537 force_sig(sig, current);
526 538
527 /* 539 /*
528 * Don't let your children do this ... 540 * Don't let your children do this ...
@@ -545,6 +557,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
545 sigset_t set; 557 sigset_t set;
546 stack_t st; 558 stack_t st;
547 s32 sp; 559 s32 sp;
560 int sig;
548 561
549 frame = (struct rt_sigframe32 __user *) regs.regs[29]; 562 frame = (struct rt_sigframe32 __user *) regs.regs[29];
550 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 563 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -558,8 +571,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
558 recalc_sigpending(); 571 recalc_sigpending();
559 spin_unlock_irq(&current->sighand->siglock); 572 spin_unlock_irq(&current->sighand->siglock);
560 573
561 if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext)) 574 sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
575 if (sig < 0)
562 goto badframe; 576 goto badframe;
577 else if (sig)
578 force_sig(sig, current);
563 579
564 /* The ucontext contains a stack32_t, so we must convert! */ 580 /* The ucontext contains a stack32_t, so we must convert! */
565 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) 581 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index ecf1f7ecaad9..a9202fa95987 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -127,6 +127,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
127 sigset_t set; 127 sigset_t set;
128 stack_t st; 128 stack_t st;
129 s32 sp; 129 s32 sp;
130 int sig;
130 131
131 frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; 132 frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
132 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 133 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -140,8 +141,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
140 recalc_sigpending(); 141 recalc_sigpending();
141 spin_unlock_irq(&current->sighand->siglock); 142 spin_unlock_irq(&current->sighand->siglock);
142 143
143 if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext)) 144 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
145 if (sig < 0)
144 goto badframe; 146 goto badframe;
147 else if (sig)
148 force_sig(sig, current);
145 149
146 /* The ucontext contains a stack32_t, so we must convert! */ 150 /* The ucontext contains a stack32_t, so we must convert! */
147 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) 151 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f253eda27fa3..5dcfab6b288e 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <linux/interrupt.h> 6#include <linux/interrupt.h>
7#include <linux/kernel_stat.h>
7#include <linux/module.h> 8#include <linux/module.h>
8 9
9#include <asm/cpu.h> 10#include <asm/cpu.h>
@@ -14,6 +15,7 @@
14#include <asm/hazards.h> 15#include <asm/hazards.h>
15#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
16#include <asm/smp.h> 17#include <asm/smp.h>
18#include <asm/mips-boards/maltaint.h>
17#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
18#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
19#include <asm/time.h> 21#include <asm/time.h>
@@ -75,7 +77,7 @@ static struct smtc_ipi_q freeIPIq;
75 77
76void ipi_decode(struct smtc_ipi *); 78void ipi_decode(struct smtc_ipi *);
77static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 79static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
78static void setup_cross_vpe_interrupts(void); 80static void setup_cross_vpe_interrupts(unsigned int nvpe);
79void init_smtc_stats(void); 81void init_smtc_stats(void);
80 82
81/* Global SMTC Status */ 83/* Global SMTC Status */
@@ -168,7 +170,10 @@ __setup("tintq=", tintq);
168 170
169int imstuckcount[2][8]; 171int imstuckcount[2][8];
170/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ 172/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
171int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; 173int vpemask[2][8] = {
174 {0, 0, 1, 0, 0, 0, 0, 1},
175 {0, 0, 0, 0, 0, 0, 0, 1}
176};
172int tcnoprog[NR_CPUS]; 177int tcnoprog[NR_CPUS];
173static atomic_t idle_hook_initialized = {0}; 178static atomic_t idle_hook_initialized = {0};
174static int clock_hang_reported[NR_CPUS]; 179static int clock_hang_reported[NR_CPUS];
@@ -501,8 +506,7 @@ void mipsmt_prepare_cpus(void)
501 506
502 /* If we have multiple VPEs running, set up the cross-VPE interrupt */ 507 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
503 508
504 if (nvpe > 1) 509 setup_cross_vpe_interrupts(nvpe);
505 setup_cross_vpe_interrupts();
506 510
507 /* Set up queue of free IPI "messages". */ 511 /* Set up queue of free IPI "messages". */
508 nipi = NR_CPUS * IPIBUF_PER_CPU; 512 nipi = NR_CPUS * IPIBUF_PER_CPU;
@@ -607,7 +611,12 @@ void smtc_cpus_done(void)
607int setup_irq_smtc(unsigned int irq, struct irqaction * new, 611int setup_irq_smtc(unsigned int irq, struct irqaction * new,
608 unsigned long hwmask) 612 unsigned long hwmask)
609{ 613{
614 unsigned int vpe = current_cpu_data.vpe_id;
615
610 irq_hwmask[irq] = hwmask; 616 irq_hwmask[irq] = hwmask;
617#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
618 vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1;
619#endif
611 620
612 return setup_irq(irq, new); 621 return setup_irq(irq, new);
613} 622}
@@ -812,12 +821,15 @@ void ipi_decode(struct smtc_ipi *pipi)
812 smtc_ipi_nq(&freeIPIq, pipi); 821 smtc_ipi_nq(&freeIPIq, pipi);
813 switch (type_copy) { 822 switch (type_copy) {
814 case SMTC_CLOCK_TICK: 823 case SMTC_CLOCK_TICK:
824 irq_enter();
825 kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++;
815 /* Invoke Clock "Interrupt" */ 826 /* Invoke Clock "Interrupt" */
816 ipi_timer_latch[dest_copy] = 0; 827 ipi_timer_latch[dest_copy] = 0;
817#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 828#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
818 clock_hang_reported[dest_copy] = 0; 829 clock_hang_reported[dest_copy] = 0;
819#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 830#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
820 local_timer_interrupt(0, NULL); 831 local_timer_interrupt(0, NULL);
832 irq_exit();
821 break; 833 break;
822 case LINUX_SMP_IPI: 834 case LINUX_SMP_IPI:
823 switch ((int)arg_copy) { 835 switch ((int)arg_copy) {
@@ -965,8 +977,11 @@ static void ipi_irq_dispatch(void)
965 977
966static struct irqaction irq_ipi; 978static struct irqaction irq_ipi;
967 979
968static void setup_cross_vpe_interrupts(void) 980static void setup_cross_vpe_interrupts(unsigned int nvpe)
969{ 981{
982 if (nvpe < 1)
983 return;
984
970 if (!cpu_has_vint) 985 if (!cpu_has_vint)
971 panic("SMTC Kernel requires Vectored Interupt support"); 986 panic("SMTC Kernel requires Vectored Interupt support");
972 987
@@ -984,10 +999,17 @@ static void setup_cross_vpe_interrupts(void)
984 999
985/* 1000/*
986 * SMTC-specific hacks invoked from elsewhere in the kernel. 1001 * SMTC-specific hacks invoked from elsewhere in the kernel.
1002 *
1003 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1004 * called with interrupts disabled. We do rely on interrupts being disabled
1005 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1006 * result in a recursive call to raw_local_irq_restore().
987 */ 1007 */
988 1008
989void smtc_ipi_replay(void) 1009static void __smtc_ipi_replay(void)
990{ 1010{
1011 unsigned int cpu = smp_processor_id();
1012
991 /* 1013 /*
992 * To the extent that we've ever turned interrupts off, 1014 * To the extent that we've ever turned interrupts off,
993 * we may have accumulated deferred IPIs. This is subtle. 1015 * we may have accumulated deferred IPIs. This is subtle.
@@ -1002,17 +1024,30 @@ void smtc_ipi_replay(void)
1002 * is clear, and we'll handle it as a real pseudo-interrupt 1024 * is clear, and we'll handle it as a real pseudo-interrupt
1003 * and not a pseudo-pseudo interrupt. 1025 * and not a pseudo-pseudo interrupt.
1004 */ 1026 */
1005 if (IPIQ[smp_processor_id()].depth > 0) { 1027 if (IPIQ[cpu].depth > 0) {
1006 struct smtc_ipi *pipi; 1028 while (1) {
1007 extern void self_ipi(struct smtc_ipi *); 1029 struct smtc_ipi_q *q = &IPIQ[cpu];
1030 struct smtc_ipi *pipi;
1031 extern void self_ipi(struct smtc_ipi *);
1032
1033 spin_lock(&q->lock);
1034 pipi = __smtc_ipi_dq(q);
1035 spin_unlock(&q->lock);
1036 if (!pipi)
1037 break;
1008 1038
1009 while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
1010 self_ipi(pipi); 1039 self_ipi(pipi);
1011 smtc_cpu_stats[smp_processor_id()].selfipis++; 1040 smtc_cpu_stats[cpu].selfipis++;
1012 } 1041 }
1013 } 1042 }
1014} 1043}
1015 1044
1045void smtc_ipi_replay(void)
1046{
1047 raw_local_irq_disable();
1048 __smtc_ipi_replay();
1049}
1050
1016EXPORT_SYMBOL(smtc_ipi_replay); 1051EXPORT_SYMBOL(smtc_ipi_replay);
1017 1052
1018void smtc_idle_loop_hook(void) 1053void smtc_idle_loop_hook(void)
@@ -1117,7 +1152,13 @@ void smtc_idle_loop_hook(void)
1117 * is in use, there should never be any. 1152 * is in use, there should never be any.
1118 */ 1153 */
1119#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY 1154#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1120 smtc_ipi_replay(); 1155 {
1156 unsigned long flags;
1157
1158 local_irq_save(flags);
1159 __smtc_ipi_replay();
1160 local_irq_restore(flags);
1161 }
1121#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ 1162#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1122} 1163}
1123 1164
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 18f56a9dbcfa..7d76a85422b2 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -610,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
610 if (fcr31 & FPU_CSR_UNI_X) { 610 if (fcr31 & FPU_CSR_UNI_X) {
611 int sig; 611 int sig;
612 612
613 preempt_disable();
614
615#ifdef CONFIG_PREEMPT
616 if (!is_fpu_owner()) {
617 /* We might lose fpu before disabling preempt... */
618 own_fpu();
619 BUG_ON(!used_math());
620 restore_fp(current);
621 }
622#endif
623 /* 613 /*
624 * Unimplemented operation exception. If we've got the full 614 * Unimplemented operation exception. If we've got the full
625 * software emulator on-board, let's use it... 615 * software emulator on-board, let's use it...
@@ -630,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
630 * register operands before invoking the emulator, which seems 620 * register operands before invoking the emulator, which seems
631 * a bit extreme for what should be an infrequent event. 621 * a bit extreme for what should be an infrequent event.
632 */ 622 */
633 save_fp(current);
634 /* Ensure 'resume' not overwrite saved fp context again. */ 623 /* Ensure 'resume' not overwrite saved fp context again. */
635 lose_fpu(); 624 lose_fpu(1);
636
637 preempt_enable();
638 625
639 /* Run the emulator */ 626 /* Run the emulator */
640 sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1); 627 sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
641 628
642 preempt_disable();
643
644 own_fpu(); /* Using the FPU again. */
645 /* 629 /*
646 * We can't allow the emulated instruction to leave any of 630 * We can't allow the emulated instruction to leave any of
647 * the cause bit set in $fcr31. 631 * the cause bit set in $fcr31.
@@ -649,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
649 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 633 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
650 634
651 /* Restore the hardware register state */ 635 /* Restore the hardware register state */
652 restore_fp(current); 636 own_fpu(1); /* Using the FPU again. */
653
654 preempt_enable();
655 637
656 /* If something went wrong, signal */ 638 /* If something went wrong, signal */
657 if (sig) 639 if (sig)
@@ -775,12 +757,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
775{ 757{
776 unsigned int cpid; 758 unsigned int cpid;
777 759
778 die_if_kernel("do_cpu invoked from kernel context!", regs);
779
780 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 760 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
781 761
782 switch (cpid) { 762 switch (cpid) {
783 case 0: 763 case 0:
764 die_if_kernel("do_cpu invoked from kernel context!", regs);
784 if (!cpu_has_llsc) 765 if (!cpu_has_llsc)
785 if (!simulate_llsc(regs)) 766 if (!simulate_llsc(regs))
786 return; 767 return;
@@ -791,21 +772,30 @@ asmlinkage void do_cpu(struct pt_regs *regs)
791 break; 772 break;
792 773
793 case 1: 774 case 1:
794 preempt_disable(); 775 if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
795 776 die_if_kernel("do_cpu invoked from kernel context!",
796 own_fpu(); 777 regs);
797 if (used_math()) { /* Using the FPU again. */ 778 if (used_math()) /* Using the FPU again. */
798 restore_fp(current); 779 own_fpu(1);
799 } else { /* First time FPU user. */ 780 else { /* First time FPU user. */
800 init_fpu(); 781 init_fpu();
801 set_used_math(); 782 set_used_math();
802 } 783 }
803 784
804 if (cpu_has_fpu) { 785 if (raw_cpu_has_fpu) {
805 preempt_enable(); 786 if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
787 local_irq_disable();
788 if (cpu_has_fpu)
789 regs->cp0_status |= ST0_CU1;
790 /*
791 * We must return without enabling
792 * interrupts to ensure keep FPU
793 * ownership until resume.
794 */
795 return;
796 }
806 } else { 797 } else {
807 int sig; 798 int sig;
808 preempt_enable();
809 sig = fpu_emulator_cop1Handler(regs, 799 sig = fpu_emulator_cop1Handler(regs,
810 &current->thread.fpu, 0); 800 &current->thread.fpu, 0);
811 if (sig) 801 if (sig)
@@ -1259,26 +1249,26 @@ static inline void mips_srs_init(void)
1259/* 1249/*
1260 * This is used by native signal handling 1250 * This is used by native signal handling
1261 */ 1251 */
1262asmlinkage int (*save_fp_context)(struct sigcontext *sc); 1252asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
1263asmlinkage int (*restore_fp_context)(struct sigcontext *sc); 1253asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
1264 1254
1265extern asmlinkage int _save_fp_context(struct sigcontext *sc); 1255extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
1266extern asmlinkage int _restore_fp_context(struct sigcontext *sc); 1256extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
1267 1257
1268extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); 1258extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
1269extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); 1259extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
1270 1260
1271#ifdef CONFIG_SMP 1261#ifdef CONFIG_SMP
1272static int smp_save_fp_context(struct sigcontext *sc) 1262static int smp_save_fp_context(struct sigcontext __user *sc)
1273{ 1263{
1274 return cpu_has_fpu 1264 return raw_cpu_has_fpu
1275 ? _save_fp_context(sc) 1265 ? _save_fp_context(sc)
1276 : fpu_emulator_save_context(sc); 1266 : fpu_emulator_save_context(sc);
1277} 1267}
1278 1268
1279static int smp_restore_fp_context(struct sigcontext *sc) 1269static int smp_restore_fp_context(struct sigcontext __user *sc)
1280{ 1270{
1281 return cpu_has_fpu 1271 return raw_cpu_has_fpu
1282 ? _restore_fp_context(sc) 1272 ? _restore_fp_context(sc)
1283 : fpu_emulator_restore_context(sc); 1273 : fpu_emulator_restore_context(sc);
1284} 1274}
@@ -1306,14 +1296,14 @@ static inline void signal_init(void)
1306/* 1296/*
1307 * This is used by 32-bit signal stuff on the 64-bit kernel 1297 * This is used by 32-bit signal stuff on the 64-bit kernel
1308 */ 1298 */
1309asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc); 1299asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
1310asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc); 1300asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
1311 1301
1312extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc); 1302extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
1313extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc); 1303extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
1314 1304
1315extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc); 1305extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
1316extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc); 1306extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
1317 1307
1318static inline void signal32_init(void) 1308static inline void signal32_init(void)
1319{ 1309{