diff options
Diffstat (limited to 'arch/mips/kernel')
| -rw-r--r-- | arch/mips/kernel/entry.S | 6 | ||||
| -rw-r--r-- | arch/mips/kernel/genex.S | 47 | ||||
| -rw-r--r-- | arch/mips/kernel/kspd.c | 22 | ||||
| -rw-r--r-- | arch/mips/kernel/linux32.c | 12 | ||||
| -rw-r--r-- | arch/mips/kernel/r4k_fpu.S | 16 | ||||
| -rw-r--r-- | arch/mips/kernel/rtlx.c | 104 | ||||
| -rw-r--r-- | arch/mips/kernel/signal-common.h | 12 | ||||
| -rw-r--r-- | arch/mips/kernel/signal.c | 111 | ||||
| -rw-r--r-- | arch/mips/kernel/signal32.c | 94 | ||||
| -rw-r--r-- | arch/mips/kernel/signal_n32.c | 6 | ||||
| -rw-r--r-- | arch/mips/kernel/smtc.c | 65 | ||||
| -rw-r--r-- | arch/mips/kernel/traps.c | 73 |
12 files changed, 396 insertions, 172 deletions
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 0b78fcbf044a..686249c5c328 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
| @@ -121,7 +121,11 @@ FEXPORT(restore_partial) # restore partial frame | |||
| 121 | SAVE_AT | 121 | SAVE_AT |
| 122 | SAVE_TEMP | 122 | SAVE_TEMP |
| 123 | LONG_L v0, PT_STATUS(sp) | 123 | LONG_L v0, PT_STATUS(sp) |
| 124 | and v0, 1 | 124 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
| 125 | and v0, ST0_IEP | ||
| 126 | #else | ||
| 127 | and v0, ST0_IE | ||
| 128 | #endif | ||
| 125 | beqz v0, 1f | 129 | beqz v0, 1f |
| 126 | jal trace_hardirqs_on | 130 | jal trace_hardirqs_on |
| 127 | b 2f | 131 | b 2f |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index aacd4a005c5f..297bd56c2347 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
| @@ -128,6 +128,37 @@ handle_vcei: | |||
| 128 | 128 | ||
| 129 | .align 5 | 129 | .align 5 |
| 130 | NESTED(handle_int, PT_SIZE, sp) | 130 | NESTED(handle_int, PT_SIZE, sp) |
| 131 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 132 | /* | ||
| 133 | * Check to see if the interrupted code has just disabled | ||
| 134 | * interrupts and ignore this interrupt for now if so. | ||
| 135 | * | ||
| 136 | * local_irq_disable() disables interrupts and then calls | ||
| 137 | * trace_hardirqs_off() to track the state. If an interrupt is taken | ||
| 138 | * after interrupts are disabled but before the state is updated | ||
| 139 | * it will appear to restore_all that it is incorrectly returning with | ||
| 140 | * interrupts disabled | ||
| 141 | */ | ||
| 142 | .set push | ||
| 143 | .set noat | ||
| 144 | mfc0 k0, CP0_STATUS | ||
| 145 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
| 146 | and k0, ST0_IEP | ||
| 147 | bnez k0, 1f | ||
| 148 | |||
| 149 | mfc0 k0, EP0_EPC | ||
| 150 | .set noreorder | ||
| 151 | j k0 | ||
| 152 | rfe | ||
| 153 | #else | ||
| 154 | and k0, ST0_IE | ||
| 155 | bnez k0, 1f | ||
| 156 | |||
| 157 | eret | ||
| 158 | #endif | ||
| 159 | 1: | ||
| 160 | .set pop | ||
| 161 | #endif | ||
| 131 | SAVE_ALL | 162 | SAVE_ALL |
| 132 | CLI | 163 | CLI |
| 133 | TRACE_IRQS_OFF | 164 | TRACE_IRQS_OFF |
| @@ -181,13 +212,13 @@ NESTED(except_vec_vi, 0, sp) | |||
| 181 | * during service by SMTC kernel, we also want to | 212 | * during service by SMTC kernel, we also want to |
| 182 | * pass the IM value to be cleared. | 213 | * pass the IM value to be cleared. |
| 183 | */ | 214 | */ |
| 184 | EXPORT(except_vec_vi_mori) | 215 | FEXPORT(except_vec_vi_mori) |
| 185 | ori a0, $0, 0 | 216 | ori a0, $0, 0 |
| 186 | #endif /* CONFIG_MIPS_MT_SMTC */ | 217 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 187 | EXPORT(except_vec_vi_lui) | 218 | FEXPORT(except_vec_vi_lui) |
| 188 | lui v0, 0 /* Patched */ | 219 | lui v0, 0 /* Patched */ |
| 189 | j except_vec_vi_handler | 220 | j except_vec_vi_handler |
| 190 | EXPORT(except_vec_vi_ori) | 221 | FEXPORT(except_vec_vi_ori) |
| 191 | ori v0, 0 /* Patched */ | 222 | ori v0, 0 /* Patched */ |
| 192 | .set pop | 223 | .set pop |
| 193 | END(except_vec_vi) | 224 | END(except_vec_vi) |
| @@ -220,7 +251,17 @@ NESTED(except_vec_vi_handler, 0, sp) | |||
| 220 | _ehb | 251 | _ehb |
| 221 | #endif /* CONFIG_MIPS_MT_SMTC */ | 252 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 222 | CLI | 253 | CLI |
| 254 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 255 | move s0, v0 | ||
| 256 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 257 | move s1, a0 | ||
| 258 | #endif | ||
| 223 | TRACE_IRQS_OFF | 259 | TRACE_IRQS_OFF |
| 260 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 261 | move a0, s1 | ||
| 262 | #endif | ||
| 263 | move v0, s0 | ||
| 264 | #endif | ||
| 224 | 265 | ||
| 225 | LONG_L s0, TI_REGS($28) | 266 | LONG_L s0, TI_REGS($28) |
| 226 | LONG_S sp, TI_REGS($28) | 267 | LONG_S sp, TI_REGS($28) |
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index 5929f883e46b..29eadd404fa5 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
| @@ -70,6 +70,7 @@ static int sp_stopping = 0; | |||
| 70 | #define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7) | 70 | #define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7) |
| 71 | #define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8) | 71 | #define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8) |
| 72 | #define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9) | 72 | #define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9) |
| 73 | #define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10) | ||
| 73 | 74 | ||
| 74 | #define MTSP_O_RDONLY 0x0000 | 75 | #define MTSP_O_RDONLY 0x0000 |
| 75 | #define MTSP_O_WRONLY 0x0001 | 76 | #define MTSP_O_WRONLY 0x0001 |
| @@ -110,7 +111,8 @@ struct apsp_table syscall_command_table[] = { | |||
| 110 | { MTSP_SYSCALL_CLOSE, __NR_close }, | 111 | { MTSP_SYSCALL_CLOSE, __NR_close }, |
| 111 | { MTSP_SYSCALL_READ, __NR_read }, | 112 | { MTSP_SYSCALL_READ, __NR_read }, |
| 112 | { MTSP_SYSCALL_WRITE, __NR_write }, | 113 | { MTSP_SYSCALL_WRITE, __NR_write }, |
| 113 | { MTSP_SYSCALL_LSEEK32, __NR_lseek } | 114 | { MTSP_SYSCALL_LSEEK32, __NR_lseek }, |
| 115 | { MTSP_SYSCALL_IOCTL, __NR_ioctl } | ||
| 114 | }; | 116 | }; |
| 115 | 117 | ||
| 116 | static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) | 118 | static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) |
| @@ -189,6 +191,8 @@ void sp_work_handle_request(void) | |||
| 189 | struct mtsp_syscall_generic generic; | 191 | struct mtsp_syscall_generic generic; |
| 190 | struct mtsp_syscall_ret ret; | 192 | struct mtsp_syscall_ret ret; |
| 191 | struct kspd_notifications *n; | 193 | struct kspd_notifications *n; |
| 194 | unsigned long written; | ||
| 195 | mm_segment_t old_fs; | ||
| 192 | struct timeval tv; | 196 | struct timeval tv; |
| 193 | struct timezone tz; | 197 | struct timezone tz; |
| 194 | int cmd; | 198 | int cmd; |
| @@ -199,7 +203,11 @@ void sp_work_handle_request(void) | |||
| 199 | 203 | ||
| 200 | ret.retval = -1; | 204 | ret.retval = -1; |
| 201 | 205 | ||
| 202 | if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) { | 206 | old_fs = get_fs(); |
| 207 | set_fs(KERNEL_DS); | ||
| 208 | |||
| 209 | if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) { | ||
| 210 | set_fs(old_fs); | ||
| 203 | printk(KERN_ERR "Expected request but nothing to read\n"); | 211 | printk(KERN_ERR "Expected request but nothing to read\n"); |
| 204 | return; | 212 | return; |
| 205 | } | 213 | } |
| @@ -207,7 +215,8 @@ void sp_work_handle_request(void) | |||
| 207 | size = sc.size; | 215 | size = sc.size; |
| 208 | 216 | ||
| 209 | if (size) { | 217 | if (size) { |
| 210 | if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) { | 218 | if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) { |
| 219 | set_fs(old_fs); | ||
| 211 | printk(KERN_ERR "Expected request but nothing to read\n"); | 220 | printk(KERN_ERR "Expected request but nothing to read\n"); |
| 212 | return; | 221 | return; |
| 213 | } | 222 | } |
| @@ -280,8 +289,11 @@ void sp_work_handle_request(void) | |||
| 280 | if (vpe_getuid(SP_VPE)) | 289 | if (vpe_getuid(SP_VPE)) |
| 281 | sp_setfsuidgid( 0, 0); | 290 | sp_setfsuidgid( 0, 0); |
| 282 | 291 | ||
| 283 | if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0)) | 292 | old_fs = get_fs(); |
| 284 | < sizeof(struct mtsp_syscall_ret)) | 293 | set_fs(KERNEL_DS); |
| 294 | written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret)); | ||
| 295 | set_fs(old_fs); | ||
| 296 | if (written < sizeof(ret)) | ||
| 285 | printk("KSPD: sp_work_handle_request failed to send to SP\n"); | 297 | printk("KSPD: sp_work_handle_request failed to send to SP\n"); |
| 286 | } | 298 | } |
| 287 | 299 | ||
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 1df544c1f966..37849edd0645 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
| @@ -311,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, | |||
| 311 | return ret; | 311 | return ret; |
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | #ifdef CONFIG_SYSVIPC | ||
| 315 | |||
| 314 | asmlinkage long | 316 | asmlinkage long |
| 315 | sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) | 317 | sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) |
| 316 | { | 318 | { |
| @@ -368,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) | |||
| 368 | return err; | 370 | return err; |
| 369 | } | 371 | } |
| 370 | 372 | ||
| 373 | #else | ||
| 374 | |||
| 375 | asmlinkage long | ||
| 376 | sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) | ||
| 377 | { | ||
| 378 | return -ENOSYS; | ||
| 379 | } | ||
| 380 | |||
| 381 | #endif /* CONFIG_SYSVIPC */ | ||
| 382 | |||
| 371 | #ifdef CONFIG_MIPS32_N32 | 383 | #ifdef CONFIG_MIPS32_N32 |
| 372 | asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) | 384 | asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) |
| 373 | { | 385 | { |
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 59c1577ecbb3..dbd42adc52ed 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
| @@ -114,14 +114,6 @@ LEAF(_save_fp_context32) | |||
| 114 | */ | 114 | */ |
| 115 | LEAF(_restore_fp_context) | 115 | LEAF(_restore_fp_context) |
| 116 | EX lw t0, SC_FPC_CSR(a0) | 116 | EX lw t0, SC_FPC_CSR(a0) |
| 117 | |||
| 118 | /* Fail if the CSR has exceptions pending */ | ||
| 119 | srl t1, t0, 5 | ||
| 120 | and t1, t0 | ||
| 121 | andi t1, 0x1f << 7 | ||
| 122 | bnez t1, fault | ||
| 123 | nop | ||
| 124 | |||
| 125 | #ifdef CONFIG_64BIT | 117 | #ifdef CONFIG_64BIT |
| 126 | EX ldc1 $f1, SC_FPREGS+8(a0) | 118 | EX ldc1 $f1, SC_FPREGS+8(a0) |
| 127 | EX ldc1 $f3, SC_FPREGS+24(a0) | 119 | EX ldc1 $f3, SC_FPREGS+24(a0) |
| @@ -165,14 +157,6 @@ LEAF(_restore_fp_context) | |||
| 165 | LEAF(_restore_fp_context32) | 157 | LEAF(_restore_fp_context32) |
| 166 | /* Restore an o32 sigcontext. */ | 158 | /* Restore an o32 sigcontext. */ |
| 167 | EX lw t0, SC32_FPC_CSR(a0) | 159 | EX lw t0, SC32_FPC_CSR(a0) |
| 168 | |||
| 169 | /* Fail if the CSR has exceptions pending */ | ||
| 170 | srl t1, t0, 5 | ||
| 171 | and t1, t0 | ||
| 172 | andi t1, 0x1f << 7 | ||
| 173 | bnez t1, fault | ||
| 174 | nop | ||
| 175 | |||
| 176 | EX ldc1 $f0, SC32_FPREGS+0(a0) | 160 | EX ldc1 $f0, SC32_FPREGS+0(a0) |
| 177 | EX ldc1 $f2, SC32_FPREGS+16(a0) | 161 | EX ldc1 $f2, SC32_FPREGS+16(a0) |
| 178 | EX ldc1 $f4, SC32_FPREGS+32(a0) | 162 | EX ldc1 $f4, SC32_FPREGS+32(a0) |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index e14ae09eda2b..e6e3047151a6 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
| @@ -54,6 +54,7 @@ static struct chan_waitqueues { | |||
| 54 | wait_queue_head_t rt_queue; | 54 | wait_queue_head_t rt_queue; |
| 55 | wait_queue_head_t lx_queue; | 55 | wait_queue_head_t lx_queue; |
| 56 | atomic_t in_open; | 56 | atomic_t in_open; |
| 57 | struct mutex mutex; | ||
| 57 | } channel_wqs[RTLX_CHANNELS]; | 58 | } channel_wqs[RTLX_CHANNELS]; |
| 58 | 59 | ||
| 59 | static struct irqaction irq; | 60 | static struct irqaction irq; |
| @@ -146,7 +147,7 @@ static void stopping(int vpe) | |||
| 146 | 147 | ||
| 147 | int rtlx_open(int index, int can_sleep) | 148 | int rtlx_open(int index, int can_sleep) |
| 148 | { | 149 | { |
| 149 | volatile struct rtlx_info **p; | 150 | struct rtlx_info **p; |
| 150 | struct rtlx_channel *chan; | 151 | struct rtlx_channel *chan; |
| 151 | enum rtlx_state state; | 152 | enum rtlx_state state; |
| 152 | int ret = 0; | 153 | int ret = 0; |
| @@ -179,13 +180,24 @@ int rtlx_open(int index, int can_sleep) | |||
| 179 | } | 180 | } |
| 180 | } | 181 | } |
| 181 | 182 | ||
| 183 | smp_rmb(); | ||
| 182 | if (*p == NULL) { | 184 | if (*p == NULL) { |
| 183 | if (can_sleep) { | 185 | if (can_sleep) { |
| 184 | __wait_event_interruptible(channel_wqs[index].lx_queue, | 186 | DEFINE_WAIT(wait); |
| 185 | *p != NULL, | 187 | |
| 186 | ret); | 188 | for (;;) { |
| 187 | if (ret) | 189 | prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE); |
| 190 | smp_rmb(); | ||
| 191 | if (*p != NULL) | ||
| 192 | break; | ||
| 193 | if (!signal_pending(current)) { | ||
| 194 | schedule(); | ||
| 195 | continue; | ||
| 196 | } | ||
| 197 | ret = -ERESTARTSYS; | ||
| 188 | goto out_fail; | 198 | goto out_fail; |
| 199 | } | ||
| 200 | finish_wait(&channel_wqs[index].lx_queue, &wait); | ||
| 189 | } else { | 201 | } else { |
| 190 | printk(" *vpe_get_shared is NULL. " | 202 | printk(" *vpe_get_shared is NULL. " |
| 191 | "Has an SP program been loaded?\n"); | 203 | "Has an SP program been loaded?\n"); |
| @@ -277,56 +289,52 @@ unsigned int rtlx_write_poll(int index) | |||
| 277 | return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); | 289 | return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); |
| 278 | } | 290 | } |
| 279 | 291 | ||
| 280 | static inline void copy_to(void *dst, void *src, size_t count, int user) | 292 | ssize_t rtlx_read(int index, void __user *buff, size_t count, int user) |
| 281 | { | ||
| 282 | if (user) | ||
| 283 | copy_to_user(dst, src, count); | ||
| 284 | else | ||
| 285 | memcpy(dst, src, count); | ||
| 286 | } | ||
| 287 | |||
| 288 | static inline void copy_from(void *dst, void *src, size_t count, int user) | ||
| 289 | { | 293 | { |
| 290 | if (user) | 294 | size_t lx_write, fl = 0L; |
| 291 | copy_from_user(dst, src, count); | ||
| 292 | else | ||
| 293 | memcpy(dst, src, count); | ||
| 294 | } | ||
| 295 | |||
| 296 | ssize_t rtlx_read(int index, void *buff, size_t count, int user) | ||
| 297 | { | ||
| 298 | size_t fl = 0L; | ||
| 299 | struct rtlx_channel *lx; | 295 | struct rtlx_channel *lx; |
| 296 | unsigned long failed; | ||
| 300 | 297 | ||
| 301 | if (rtlx == NULL) | 298 | if (rtlx == NULL) |
| 302 | return -ENOSYS; | 299 | return -ENOSYS; |
| 303 | 300 | ||
| 304 | lx = &rtlx->channel[index]; | 301 | lx = &rtlx->channel[index]; |
| 305 | 302 | ||
| 303 | mutex_lock(&channel_wqs[index].mutex); | ||
| 304 | smp_rmb(); | ||
| 305 | lx_write = lx->lx_write; | ||
| 306 | |||
| 306 | /* find out how much in total */ | 307 | /* find out how much in total */ |
| 307 | count = min(count, | 308 | count = min(count, |
| 308 | (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) | 309 | (size_t)(lx_write + lx->buffer_size - lx->lx_read) |
| 309 | % lx->buffer_size); | 310 | % lx->buffer_size); |
| 310 | 311 | ||
| 311 | /* then how much from the read pointer onwards */ | 312 | /* then how much from the read pointer onwards */ |
| 312 | fl = min( count, (size_t)lx->buffer_size - lx->lx_read); | 313 | fl = min(count, (size_t)lx->buffer_size - lx->lx_read); |
| 313 | 314 | ||
| 314 | copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user); | 315 | failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); |
| 316 | if (failed) | ||
| 317 | goto out; | ||
| 315 | 318 | ||
| 316 | /* and if there is anything left at the beginning of the buffer */ | 319 | /* and if there is anything left at the beginning of the buffer */ |
| 317 | if ( count - fl ) | 320 | if (count - fl) |
| 318 | copy_to (buff + fl, lx->lx_buffer, count - fl, user); | 321 | failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); |
| 319 | 322 | ||
| 320 | /* update the index */ | 323 | out: |
| 321 | lx->lx_read += count; | 324 | count -= failed; |
| 322 | lx->lx_read %= lx->buffer_size; | 325 | |
| 326 | smp_wmb(); | ||
| 327 | lx->lx_read = (lx->lx_read + count) % lx->buffer_size; | ||
| 328 | smp_wmb(); | ||
| 329 | mutex_unlock(&channel_wqs[index].mutex); | ||
| 323 | 330 | ||
| 324 | return count; | 331 | return count; |
| 325 | } | 332 | } |
| 326 | 333 | ||
| 327 | ssize_t rtlx_write(int index, void *buffer, size_t count, int user) | 334 | ssize_t rtlx_write(int index, const void __user *buffer, size_t count, int user) |
| 328 | { | 335 | { |
| 329 | struct rtlx_channel *rt; | 336 | struct rtlx_channel *rt; |
| 337 | size_t rt_read; | ||
| 330 | size_t fl; | 338 | size_t fl; |
| 331 | 339 | ||
| 332 | if (rtlx == NULL) | 340 | if (rtlx == NULL) |
| @@ -334,24 +342,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user) | |||
| 334 | 342 | ||
| 335 | rt = &rtlx->channel[index]; | 343 | rt = &rtlx->channel[index]; |
| 336 | 344 | ||
| 345 | mutex_lock(&channel_wqs[index].mutex); | ||
| 346 | smp_rmb(); | ||
| 347 | rt_read = rt->rt_read; | ||
| 348 | |||
| 337 | /* total number of bytes to copy */ | 349 | /* total number of bytes to copy */ |
| 338 | count = min(count, | 350 | count = min(count, |
| 339 | (size_t)write_spacefree(rt->rt_read, rt->rt_write, | 351 | (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size)); |
| 340 | rt->buffer_size)); | ||
| 341 | 352 | ||
| 342 | /* first bit from write pointer to the end of the buffer, or count */ | 353 | /* first bit from write pointer to the end of the buffer, or count */ |
| 343 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | 354 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); |
| 344 | 355 | ||
| 345 | copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user); | 356 | failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); |
| 357 | if (failed) | ||
| 358 | goto out; | ||
| 346 | 359 | ||
| 347 | /* if there's any left copy to the beginning of the buffer */ | 360 | /* if there's any left copy to the beginning of the buffer */ |
| 348 | if( count - fl ) | 361 | if (count - fl) { |
| 349 | copy_from (rt->rt_buffer, buffer + fl, count - fl, user); | 362 | failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); |
| 363 | } | ||
| 364 | |||
| 365 | out: | ||
| 366 | count -= cailed; | ||
| 350 | 367 | ||
| 351 | rt->rt_write += count; | 368 | smp_wmb(); |
| 352 | rt->rt_write %= rt->buffer_size; | 369 | rt->rt_write = (rt->rt_write + count) % rt->buffer_size; |
| 370 | smp_wmb(); | ||
| 371 | mutex_unlock(&channel_wqs[index].mutex); | ||
| 353 | 372 | ||
| 354 | return(count); | 373 | return count; |
| 355 | } | 374 | } |
| 356 | 375 | ||
| 357 | 376 | ||
| @@ -403,7 +422,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count, | |||
| 403 | return 0; // -EAGAIN makes cat whinge | 422 | return 0; // -EAGAIN makes cat whinge |
| 404 | } | 423 | } |
| 405 | 424 | ||
| 406 | return rtlx_read(minor, buffer, count, 1); | 425 | return rtlx_read(minor, buffer, count); |
| 407 | } | 426 | } |
| 408 | 427 | ||
| 409 | static ssize_t file_write(struct file *file, const char __user * buffer, | 428 | static ssize_t file_write(struct file *file, const char __user * buffer, |
| @@ -429,7 +448,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer, | |||
| 429 | return ret; | 448 | return ret; |
| 430 | } | 449 | } |
| 431 | 450 | ||
| 432 | return rtlx_write(minor, (void *)buffer, count, 1); | 451 | return rtlx_write(minor, buffer, count); |
| 433 | } | 452 | } |
| 434 | 453 | ||
| 435 | static const struct file_operations rtlx_fops = { | 454 | static const struct file_operations rtlx_fops = { |
| @@ -468,6 +487,7 @@ static int rtlx_module_init(void) | |||
| 468 | init_waitqueue_head(&channel_wqs[i].rt_queue); | 487 | init_waitqueue_head(&channel_wqs[i].rt_queue); |
| 469 | init_waitqueue_head(&channel_wqs[i].lx_queue); | 488 | init_waitqueue_head(&channel_wqs[i].lx_queue); |
| 470 | atomic_set(&channel_wqs[i].in_open, 0); | 489 | atomic_set(&channel_wqs[i].in_open, 0); |
| 490 | mutex_init(&channel_wqs[i].mutex); | ||
| 471 | 491 | ||
| 472 | dev = device_create(mt_class, NULL, MKDEV(major, i), | 492 | dev = device_create(mt_class, NULL, MKDEV(major, i), |
| 473 | "%s%d", module_name, i); | 493 | "%s%d", module_name, i); |
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index fdbdbdc65b54..c0faabd52010 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
| @@ -31,4 +31,16 @@ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
| 31 | */ | 31 | */ |
| 32 | extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); | 32 | extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); |
| 33 | 33 | ||
| 34 | /* Check and clear pending FPU exceptions in saved CSR */ | ||
| 35 | extern int fpcsr_pending(unsigned int __user *fpcsr); | ||
| 36 | |||
| 37 | /* Make sure we will not lose FPU ownership */ | ||
| 38 | #ifdef CONFIG_PREEMPT | ||
| 39 | #define lock_fpu_owner() preempt_disable() | ||
| 40 | #define unlock_fpu_owner() preempt_enable() | ||
| 41 | #else | ||
| 42 | #define lock_fpu_owner() pagefault_disable() | ||
| 43 | #define unlock_fpu_owner() pagefault_enable() | ||
| 44 | #endif | ||
| 45 | |||
| 34 | #endif /* __SIGNAL_COMMON_H */ | 46 | #endif /* __SIGNAL_COMMON_H */ |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index f091786187a6..07d67309451a 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/ptrace.h> | 20 | #include <linux/ptrace.h> |
| 21 | #include <linux/unistd.h> | 21 | #include <linux/unistd.h> |
| 22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
| 23 | #include <linux/uaccess.h> | ||
| 23 | 24 | ||
| 24 | #include <asm/abi.h> | 25 | #include <asm/abi.h> |
| 25 | #include <asm/asm.h> | 26 | #include <asm/asm.h> |
| @@ -27,7 +28,6 @@ | |||
| 27 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
| 28 | #include <asm/fpu.h> | 29 | #include <asm/fpu.h> |
| 29 | #include <asm/sim.h> | 30 | #include <asm/sim.h> |
| 30 | #include <asm/uaccess.h> | ||
| 31 | #include <asm/ucontext.h> | 31 | #include <asm/ucontext.h> |
| 32 | #include <asm/cpu-features.h> | 32 | #include <asm/cpu-features.h> |
| 33 | #include <asm/war.h> | 33 | #include <asm/war.h> |
| @@ -78,10 +78,51 @@ struct rt_sigframe { | |||
| 78 | /* | 78 | /* |
| 79 | * Helper routines | 79 | * Helper routines |
| 80 | */ | 80 | */ |
| 81 | static int protected_save_fp_context(struct sigcontext __user *sc) | ||
| 82 | { | ||
| 83 | int err; | ||
| 84 | while (1) { | ||
| 85 | lock_fpu_owner(); | ||
| 86 | own_fpu_inatomic(1); | ||
| 87 | err = save_fp_context(sc); /* this might fail */ | ||
| 88 | unlock_fpu_owner(); | ||
| 89 | if (likely(!err)) | ||
| 90 | break; | ||
| 91 | /* touch the sigcontext and try again */ | ||
| 92 | err = __put_user(0, &sc->sc_fpregs[0]) | | ||
| 93 | __put_user(0, &sc->sc_fpregs[31]) | | ||
| 94 | __put_user(0, &sc->sc_fpc_csr); | ||
| 95 | if (err) | ||
| 96 | break; /* really bad sigcontext */ | ||
| 97 | } | ||
| 98 | return err; | ||
| 99 | } | ||
| 100 | |||
| 101 | static int protected_restore_fp_context(struct sigcontext __user *sc) | ||
| 102 | { | ||
| 103 | int err, tmp; | ||
| 104 | while (1) { | ||
| 105 | lock_fpu_owner(); | ||
| 106 | own_fpu_inatomic(0); | ||
| 107 | err = restore_fp_context(sc); /* this might fail */ | ||
| 108 | unlock_fpu_owner(); | ||
| 109 | if (likely(!err)) | ||
| 110 | break; | ||
| 111 | /* touch the sigcontext and try again */ | ||
| 112 | err = __get_user(tmp, &sc->sc_fpregs[0]) | | ||
| 113 | __get_user(tmp, &sc->sc_fpregs[31]) | | ||
| 114 | __get_user(tmp, &sc->sc_fpc_csr); | ||
| 115 | if (err) | ||
| 116 | break; /* really bad sigcontext */ | ||
| 117 | } | ||
| 118 | return err; | ||
| 119 | } | ||
| 120 | |||
| 81 | int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | 121 | int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) |
| 82 | { | 122 | { |
| 83 | int err = 0; | 123 | int err = 0; |
| 84 | int i; | 124 | int i; |
| 125 | unsigned int used_math; | ||
| 85 | 126 | ||
| 86 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | 127 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); |
| 87 | 128 | ||
| @@ -104,24 +145,48 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | |||
| 104 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | 145 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); |
| 105 | } | 146 | } |
| 106 | 147 | ||
| 107 | err |= __put_user(!!used_math(), &sc->sc_used_math); | 148 | used_math = !!used_math(); |
| 149 | err |= __put_user(used_math, &sc->sc_used_math); | ||
| 108 | 150 | ||
| 109 | if (used_math()) { | 151 | if (used_math) { |
| 110 | /* | 152 | /* |
| 111 | * Save FPU state to signal context. Signal handler | 153 | * Save FPU state to signal context. Signal handler |
| 112 | * will "inherit" current FPU state. | 154 | * will "inherit" current FPU state. |
| 113 | */ | 155 | */ |
| 114 | preempt_disable(); | 156 | err |= protected_save_fp_context(sc); |
| 157 | } | ||
| 158 | return err; | ||
| 159 | } | ||
| 115 | 160 | ||
| 116 | if (!is_fpu_owner()) { | 161 | int fpcsr_pending(unsigned int __user *fpcsr) |
| 117 | own_fpu(); | 162 | { |
| 118 | restore_fp(current); | 163 | int err, sig = 0; |
| 119 | } | 164 | unsigned int csr, enabled; |
| 120 | err |= save_fp_context(sc); | ||
| 121 | 165 | ||
| 122 | preempt_enable(); | 166 | err = __get_user(csr, fpcsr); |
| 167 | enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); | ||
| 168 | /* | ||
| 169 | * If the signal handler set some FPU exceptions, clear it and | ||
| 170 | * send SIGFPE. | ||
| 171 | */ | ||
| 172 | if (csr & enabled) { | ||
| 173 | csr &= ~enabled; | ||
| 174 | err |= __put_user(csr, fpcsr); | ||
| 175 | sig = SIGFPE; | ||
| 123 | } | 176 | } |
| 124 | return err; | 177 | return err ?: sig; |
| 178 | } | ||
| 179 | |||
| 180 | static int | ||
| 181 | check_and_restore_fp_context(struct sigcontext __user *sc) | ||
| 182 | { | ||
| 183 | int err, sig; | ||
| 184 | |||
| 185 | err = sig = fpcsr_pending(&sc->sc_fpc_csr); | ||
| 186 | if (err > 0) | ||
| 187 | err = 0; | ||
| 188 | err |= protected_restore_fp_context(sc); | ||
| 189 | return err ?: sig; | ||
| 125 | } | 190 | } |
| 126 | 191 | ||
| 127 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | 192 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) |
| @@ -157,19 +222,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | |||
| 157 | err |= __get_user(used_math, &sc->sc_used_math); | 222 | err |= __get_user(used_math, &sc->sc_used_math); |
| 158 | conditional_used_math(used_math); | 223 | conditional_used_math(used_math); |
| 159 | 224 | ||
| 160 | preempt_disable(); | 225 | if (used_math) { |
| 161 | |||
| 162 | if (used_math()) { | ||
| 163 | /* restore fpu context if we have used it before */ | 226 | /* restore fpu context if we have used it before */ |
| 164 | own_fpu(); | 227 | if (!err) |
| 165 | err |= restore_fp_context(sc); | 228 | err = check_and_restore_fp_context(sc); |
| 166 | } else { | 229 | } else { |
| 167 | /* signal handler may have used FPU. Give it up. */ | 230 | /* signal handler may have used FPU. Give it up. */ |
| 168 | lose_fpu(); | 231 | lose_fpu(0); |
| 169 | } | 232 | } |
| 170 | 233 | ||
| 171 | preempt_enable(); | ||
| 172 | |||
| 173 | return err; | 234 | return err; |
| 174 | } | 235 | } |
| 175 | 236 | ||
| @@ -332,6 +393,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 332 | { | 393 | { |
| 333 | struct sigframe __user *frame; | 394 | struct sigframe __user *frame; |
| 334 | sigset_t blocked; | 395 | sigset_t blocked; |
| 396 | int sig; | ||
| 335 | 397 | ||
| 336 | frame = (struct sigframe __user *) regs.regs[29]; | 398 | frame = (struct sigframe __user *) regs.regs[29]; |
| 337 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 399 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| @@ -345,8 +407,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 345 | recalc_sigpending(); | 407 | recalc_sigpending(); |
| 346 | spin_unlock_irq(¤t->sighand->siglock); | 408 | spin_unlock_irq(¤t->sighand->siglock); |
| 347 | 409 | ||
| 348 | if (restore_sigcontext(®s, &frame->sf_sc)) | 410 | sig = restore_sigcontext(®s, &frame->sf_sc); |
| 411 | if (sig < 0) | ||
| 349 | goto badframe; | 412 | goto badframe; |
| 413 | else if (sig) | ||
| 414 | force_sig(sig, current); | ||
| 350 | 415 | ||
| 351 | /* | 416 | /* |
| 352 | * Don't let your children do this ... | 417 | * Don't let your children do this ... |
| @@ -368,6 +433,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 368 | struct rt_sigframe __user *frame; | 433 | struct rt_sigframe __user *frame; |
| 369 | sigset_t set; | 434 | sigset_t set; |
| 370 | stack_t st; | 435 | stack_t st; |
| 436 | int sig; | ||
| 371 | 437 | ||
| 372 | frame = (struct rt_sigframe __user *) regs.regs[29]; | 438 | frame = (struct rt_sigframe __user *) regs.regs[29]; |
| 373 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 439 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| @@ -381,8 +447,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 381 | recalc_sigpending(); | 447 | recalc_sigpending(); |
| 382 | spin_unlock_irq(¤t->sighand->siglock); | 448 | spin_unlock_irq(¤t->sighand->siglock); |
| 383 | 449 | ||
| 384 | if (restore_sigcontext(®s, &frame->rs_uc.uc_mcontext)) | 450 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); |
| 451 | if (sig < 0) | ||
| 385 | goto badframe; | 452 | goto badframe; |
| 453 | else if (sig) | ||
| 454 | force_sig(sig, current); | ||
| 386 | 455 | ||
| 387 | if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) | 456 | if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) |
| 388 | goto badframe; | 457 | goto badframe; |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 19bbef001959..b9a014411f83 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
| 23 | #include <linux/suspend.h> | 23 | #include <linux/suspend.h> |
| 24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
| 25 | #include <linux/uaccess.h> | ||
| 25 | 26 | ||
| 26 | #include <asm/abi.h> | 27 | #include <asm/abi.h> |
| 27 | #include <asm/asm.h> | 28 | #include <asm/asm.h> |
| @@ -29,7 +30,6 @@ | |||
| 29 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
| 30 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
| 31 | #include <asm/sim.h> | 32 | #include <asm/sim.h> |
| 32 | #include <asm/uaccess.h> | ||
| 33 | #include <asm/ucontext.h> | 33 | #include <asm/ucontext.h> |
| 34 | #include <asm/system.h> | 34 | #include <asm/system.h> |
| 35 | #include <asm/fpu.h> | 35 | #include <asm/fpu.h> |
| @@ -176,11 +176,52 @@ struct rt_sigframe32 { | |||
| 176 | /* | 176 | /* |
| 177 | * sigcontext handlers | 177 | * sigcontext handlers |
| 178 | */ | 178 | */ |
| 179 | static int protected_save_fp_context32(struct sigcontext32 __user *sc) | ||
| 180 | { | ||
| 181 | int err; | ||
| 182 | while (1) { | ||
| 183 | lock_fpu_owner(); | ||
| 184 | own_fpu_inatomic(1); | ||
| 185 | err = save_fp_context32(sc); /* this might fail */ | ||
| 186 | unlock_fpu_owner(); | ||
| 187 | if (likely(!err)) | ||
| 188 | break; | ||
| 189 | /* touch the sigcontext and try again */ | ||
| 190 | err = __put_user(0, &sc->sc_fpregs[0]) | | ||
| 191 | __put_user(0, &sc->sc_fpregs[31]) | | ||
| 192 | __put_user(0, &sc->sc_fpc_csr); | ||
| 193 | if (err) | ||
| 194 | break; /* really bad sigcontext */ | ||
| 195 | } | ||
| 196 | return err; | ||
| 197 | } | ||
| 198 | |||
| 199 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) | ||
| 200 | { | ||
| 201 | int err, tmp; | ||
| 202 | while (1) { | ||
| 203 | lock_fpu_owner(); | ||
| 204 | own_fpu_inatomic(0); | ||
| 205 | err = restore_fp_context32(sc); /* this might fail */ | ||
| 206 | unlock_fpu_owner(); | ||
| 207 | if (likely(!err)) | ||
| 208 | break; | ||
| 209 | /* touch the sigcontext and try again */ | ||
| 210 | err = __get_user(tmp, &sc->sc_fpregs[0]) | | ||
| 211 | __get_user(tmp, &sc->sc_fpregs[31]) | | ||
| 212 | __get_user(tmp, &sc->sc_fpc_csr); | ||
| 213 | if (err) | ||
| 214 | break; /* really bad sigcontext */ | ||
| 215 | } | ||
| 216 | return err; | ||
| 217 | } | ||
| 218 | |||
| 179 | static int setup_sigcontext32(struct pt_regs *regs, | 219 | static int setup_sigcontext32(struct pt_regs *regs, |
| 180 | struct sigcontext32 __user *sc) | 220 | struct sigcontext32 __user *sc) |
| 181 | { | 221 | { |
| 182 | int err = 0; | 222 | int err = 0; |
| 183 | int i; | 223 | int i; |
| 224 | u32 used_math; | ||
| 184 | 225 | ||
| 185 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | 226 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); |
| 186 | 227 | ||
| @@ -200,26 +241,31 @@ static int setup_sigcontext32(struct pt_regs *regs, | |||
| 200 | err |= __put_user(mflo3(), &sc->sc_lo3); | 241 | err |= __put_user(mflo3(), &sc->sc_lo3); |
| 201 | } | 242 | } |
| 202 | 243 | ||
| 203 | err |= __put_user(!!used_math(), &sc->sc_used_math); | 244 | used_math = !!used_math(); |
| 245 | err |= __put_user(used_math, &sc->sc_used_math); | ||
| 204 | 246 | ||
| 205 | if (used_math()) { | 247 | if (used_math) { |
| 206 | /* | 248 | /* |
| 207 | * Save FPU state to signal context. Signal handler | 249 | * Save FPU state to signal context. Signal handler |
| 208 | * will "inherit" current FPU state. | 250 | * will "inherit" current FPU state. |
| 209 | */ | 251 | */ |
| 210 | preempt_disable(); | 252 | err |= protected_save_fp_context32(sc); |
| 211 | |||
| 212 | if (!is_fpu_owner()) { | ||
| 213 | own_fpu(); | ||
| 214 | restore_fp(current); | ||
| 215 | } | ||
| 216 | err |= save_fp_context32(sc); | ||
| 217 | |||
| 218 | preempt_enable(); | ||
| 219 | } | 253 | } |
| 220 | return err; | 254 | return err; |
| 221 | } | 255 | } |
| 222 | 256 | ||
| 257 | static int | ||
| 258 | check_and_restore_fp_context32(struct sigcontext32 __user *sc) | ||
| 259 | { | ||
| 260 | int err, sig; | ||
| 261 | |||
| 262 | err = sig = fpcsr_pending(&sc->sc_fpc_csr); | ||
| 263 | if (err > 0) | ||
| 264 | err = 0; | ||
| 265 | err |= protected_restore_fp_context32(sc); | ||
| 266 | return err ?: sig; | ||
| 267 | } | ||
| 268 | |||
| 223 | static int restore_sigcontext32(struct pt_regs *regs, | 269 | static int restore_sigcontext32(struct pt_regs *regs, |
| 224 | struct sigcontext32 __user *sc) | 270 | struct sigcontext32 __user *sc) |
| 225 | { | 271 | { |
| @@ -250,19 +296,15 @@ static int restore_sigcontext32(struct pt_regs *regs, | |||
| 250 | err |= __get_user(used_math, &sc->sc_used_math); | 296 | err |= __get_user(used_math, &sc->sc_used_math); |
| 251 | conditional_used_math(used_math); | 297 | conditional_used_math(used_math); |
| 252 | 298 | ||
| 253 | preempt_disable(); | 299 | if (used_math) { |
| 254 | |||
| 255 | if (used_math()) { | ||
| 256 | /* restore fpu context if we have used it before */ | 300 | /* restore fpu context if we have used it before */ |
| 257 | own_fpu(); | 301 | if (!err) |
| 258 | err |= restore_fp_context32(sc); | 302 | err = check_and_restore_fp_context32(sc); |
| 259 | } else { | 303 | } else { |
| 260 | /* signal handler may have used FPU. Give it up. */ | 304 | /* signal handler may have used FPU. Give it up. */ |
| 261 | lose_fpu(); | 305 | lose_fpu(0); |
| 262 | } | 306 | } |
| 263 | 307 | ||
| 264 | preempt_enable(); | ||
| 265 | |||
| 266 | return err; | 308 | return err; |
| 267 | } | 309 | } |
| 268 | 310 | ||
| @@ -508,6 +550,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 508 | { | 550 | { |
| 509 | struct sigframe32 __user *frame; | 551 | struct sigframe32 __user *frame; |
| 510 | sigset_t blocked; | 552 | sigset_t blocked; |
| 553 | int sig; | ||
| 511 | 554 | ||
| 512 | frame = (struct sigframe32 __user *) regs.regs[29]; | 555 | frame = (struct sigframe32 __user *) regs.regs[29]; |
| 513 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 556 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| @@ -521,8 +564,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 521 | recalc_sigpending(); | 564 | recalc_sigpending(); |
| 522 | spin_unlock_irq(¤t->sighand->siglock); | 565 | spin_unlock_irq(¤t->sighand->siglock); |
| 523 | 566 | ||
| 524 | if (restore_sigcontext32(®s, &frame->sf_sc)) | 567 | sig = restore_sigcontext32(®s, &frame->sf_sc); |
| 568 | if (sig < 0) | ||
| 525 | goto badframe; | 569 | goto badframe; |
| 570 | else if (sig) | ||
| 571 | force_sig(sig, current); | ||
| 526 | 572 | ||
| 527 | /* | 573 | /* |
| 528 | * Don't let your children do this ... | 574 | * Don't let your children do this ... |
| @@ -545,6 +591,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 545 | sigset_t set; | 591 | sigset_t set; |
| 546 | stack_t st; | 592 | stack_t st; |
| 547 | s32 sp; | 593 | s32 sp; |
| 594 | int sig; | ||
| 548 | 595 | ||
| 549 | frame = (struct rt_sigframe32 __user *) regs.regs[29]; | 596 | frame = (struct rt_sigframe32 __user *) regs.regs[29]; |
| 550 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 597 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| @@ -558,8 +605,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 558 | recalc_sigpending(); | 605 | recalc_sigpending(); |
| 559 | spin_unlock_irq(¤t->sighand->siglock); | 606 | spin_unlock_irq(¤t->sighand->siglock); |
| 560 | 607 | ||
| 561 | if (restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext)) | 608 | sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); |
| 609 | if (sig < 0) | ||
| 562 | goto badframe; | 610 | goto badframe; |
| 611 | else if (sig) | ||
| 612 | force_sig(sig, current); | ||
| 563 | 613 | ||
| 564 | /* The ucontext contains a stack32_t, so we must convert! */ | 614 | /* The ucontext contains a stack32_t, so we must convert! */ |
| 565 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) | 615 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ecf1f7ecaad9..a9202fa95987 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
| @@ -127,6 +127,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 127 | sigset_t set; | 127 | sigset_t set; |
| 128 | stack_t st; | 128 | stack_t st; |
| 129 | s32 sp; | 129 | s32 sp; |
| 130 | int sig; | ||
| 130 | 131 | ||
| 131 | frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; | 132 | frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; |
| 132 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 133 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
| @@ -140,8 +141,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
| 140 | recalc_sigpending(); | 141 | recalc_sigpending(); |
| 141 | spin_unlock_irq(¤t->sighand->siglock); | 142 | spin_unlock_irq(¤t->sighand->siglock); |
| 142 | 143 | ||
| 143 | if (restore_sigcontext(®s, &frame->rs_uc.uc_mcontext)) | 144 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); |
| 145 | if (sig < 0) | ||
| 144 | goto badframe; | 146 | goto badframe; |
| 147 | else if (sig) | ||
| 148 | force_sig(sig, current); | ||
| 145 | 149 | ||
| 146 | /* The ucontext contains a stack32_t, so we must convert! */ | 150 | /* The ucontext contains a stack32_t, so we must convert! */ |
| 147 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) | 151 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f253eda27fa3..5dcfab6b288e 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 5 | #include <linux/cpumask.h> | 5 | #include <linux/cpumask.h> |
| 6 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
| 7 | #include <linux/kernel_stat.h> | ||
| 7 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 8 | 9 | ||
| 9 | #include <asm/cpu.h> | 10 | #include <asm/cpu.h> |
| @@ -14,6 +15,7 @@ | |||
| 14 | #include <asm/hazards.h> | 15 | #include <asm/hazards.h> |
| 15 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
| 16 | #include <asm/smp.h> | 17 | #include <asm/smp.h> |
| 18 | #include <asm/mips-boards/maltaint.h> | ||
| 17 | #include <asm/mipsregs.h> | 19 | #include <asm/mipsregs.h> |
| 18 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
| 19 | #include <asm/time.h> | 21 | #include <asm/time.h> |
| @@ -75,7 +77,7 @@ static struct smtc_ipi_q freeIPIq; | |||
| 75 | 77 | ||
| 76 | void ipi_decode(struct smtc_ipi *); | 78 | void ipi_decode(struct smtc_ipi *); |
| 77 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); | 79 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); |
| 78 | static void setup_cross_vpe_interrupts(void); | 80 | static void setup_cross_vpe_interrupts(unsigned int nvpe); |
| 79 | void init_smtc_stats(void); | 81 | void init_smtc_stats(void); |
| 80 | 82 | ||
| 81 | /* Global SMTC Status */ | 83 | /* Global SMTC Status */ |
| @@ -168,7 +170,10 @@ __setup("tintq=", tintq); | |||
| 168 | 170 | ||
| 169 | int imstuckcount[2][8]; | 171 | int imstuckcount[2][8]; |
| 170 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | 172 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ |
| 171 | int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; | 173 | int vpemask[2][8] = { |
| 174 | {0, 0, 1, 0, 0, 0, 0, 1}, | ||
| 175 | {0, 0, 0, 0, 0, 0, 0, 1} | ||
| 176 | }; | ||
| 172 | int tcnoprog[NR_CPUS]; | 177 | int tcnoprog[NR_CPUS]; |
| 173 | static atomic_t idle_hook_initialized = {0}; | 178 | static atomic_t idle_hook_initialized = {0}; |
| 174 | static int clock_hang_reported[NR_CPUS]; | 179 | static int clock_hang_reported[NR_CPUS]; |
| @@ -501,8 +506,7 @@ void mipsmt_prepare_cpus(void) | |||
| 501 | 506 | ||
| 502 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ | 507 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ |
| 503 | 508 | ||
| 504 | if (nvpe > 1) | 509 | setup_cross_vpe_interrupts(nvpe); |
| 505 | setup_cross_vpe_interrupts(); | ||
| 506 | 510 | ||
| 507 | /* Set up queue of free IPI "messages". */ | 511 | /* Set up queue of free IPI "messages". */ |
| 508 | nipi = NR_CPUS * IPIBUF_PER_CPU; | 512 | nipi = NR_CPUS * IPIBUF_PER_CPU; |
| @@ -607,7 +611,12 @@ void smtc_cpus_done(void) | |||
| 607 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, | 611 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, |
| 608 | unsigned long hwmask) | 612 | unsigned long hwmask) |
| 609 | { | 613 | { |
| 614 | unsigned int vpe = current_cpu_data.vpe_id; | ||
| 615 | |||
| 610 | irq_hwmask[irq] = hwmask; | 616 | irq_hwmask[irq] = hwmask; |
| 617 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | ||
| 618 | vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1; | ||
| 619 | #endif | ||
| 611 | 620 | ||
| 612 | return setup_irq(irq, new); | 621 | return setup_irq(irq, new); |
| 613 | } | 622 | } |
| @@ -812,12 +821,15 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
| 812 | smtc_ipi_nq(&freeIPIq, pipi); | 821 | smtc_ipi_nq(&freeIPIq, pipi); |
| 813 | switch (type_copy) { | 822 | switch (type_copy) { |
| 814 | case SMTC_CLOCK_TICK: | 823 | case SMTC_CLOCK_TICK: |
| 824 | irq_enter(); | ||
| 825 | kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++; | ||
| 815 | /* Invoke Clock "Interrupt" */ | 826 | /* Invoke Clock "Interrupt" */ |
| 816 | ipi_timer_latch[dest_copy] = 0; | 827 | ipi_timer_latch[dest_copy] = 0; |
| 817 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | 828 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
| 818 | clock_hang_reported[dest_copy] = 0; | 829 | clock_hang_reported[dest_copy] = 0; |
| 819 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 830 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
| 820 | local_timer_interrupt(0, NULL); | 831 | local_timer_interrupt(0, NULL); |
| 832 | irq_exit(); | ||
| 821 | break; | 833 | break; |
| 822 | case LINUX_SMP_IPI: | 834 | case LINUX_SMP_IPI: |
| 823 | switch ((int)arg_copy) { | 835 | switch ((int)arg_copy) { |
| @@ -965,8 +977,11 @@ static void ipi_irq_dispatch(void) | |||
| 965 | 977 | ||
| 966 | static struct irqaction irq_ipi; | 978 | static struct irqaction irq_ipi; |
| 967 | 979 | ||
| 968 | static void setup_cross_vpe_interrupts(void) | 980 | static void setup_cross_vpe_interrupts(unsigned int nvpe) |
| 969 | { | 981 | { |
| 982 | if (nvpe < 1) | ||
| 983 | return; | ||
| 984 | |||
| 970 | if (!cpu_has_vint) | 985 | if (!cpu_has_vint) |
| 971 | panic("SMTC Kernel requires Vectored Interupt support"); | 986 | panic("SMTC Kernel requires Vectored Interupt support"); |
| 972 | 987 | ||
| @@ -984,10 +999,17 @@ static void setup_cross_vpe_interrupts(void) | |||
| 984 | 999 | ||
| 985 | /* | 1000 | /* |
| 986 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1001 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
| 1002 | * | ||
| 1003 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
| 1004 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
| 1005 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
| 1006 | * result in a recursive call to raw_local_irq_restore(). | ||
| 987 | */ | 1007 | */ |
| 988 | 1008 | ||
| 989 | void smtc_ipi_replay(void) | 1009 | static void __smtc_ipi_replay(void) |
| 990 | { | 1010 | { |
| 1011 | unsigned int cpu = smp_processor_id(); | ||
| 1012 | |||
| 991 | /* | 1013 | /* |
| 992 | * To the extent that we've ever turned interrupts off, | 1014 | * To the extent that we've ever turned interrupts off, |
| 993 | * we may have accumulated deferred IPIs. This is subtle. | 1015 | * we may have accumulated deferred IPIs. This is subtle. |
| @@ -1002,17 +1024,30 @@ void smtc_ipi_replay(void) | |||
| 1002 | * is clear, and we'll handle it as a real pseudo-interrupt | 1024 | * is clear, and we'll handle it as a real pseudo-interrupt |
| 1003 | * and not a pseudo-pseudo interrupt. | 1025 | * and not a pseudo-pseudo interrupt. |
| 1004 | */ | 1026 | */ |
| 1005 | if (IPIQ[smp_processor_id()].depth > 0) { | 1027 | if (IPIQ[cpu].depth > 0) { |
| 1006 | struct smtc_ipi *pipi; | 1028 | while (1) { |
| 1007 | extern void self_ipi(struct smtc_ipi *); | 1029 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
| 1030 | struct smtc_ipi *pipi; | ||
| 1031 | extern void self_ipi(struct smtc_ipi *); | ||
| 1032 | |||
| 1033 | spin_lock(&q->lock); | ||
| 1034 | pipi = __smtc_ipi_dq(q); | ||
| 1035 | spin_unlock(&q->lock); | ||
| 1036 | if (!pipi) | ||
| 1037 | break; | ||
| 1008 | 1038 | ||
| 1009 | while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { | ||
| 1010 | self_ipi(pipi); | 1039 | self_ipi(pipi); |
| 1011 | smtc_cpu_stats[smp_processor_id()].selfipis++; | 1040 | smtc_cpu_stats[cpu].selfipis++; |
| 1012 | } | 1041 | } |
| 1013 | } | 1042 | } |
| 1014 | } | 1043 | } |
| 1015 | 1044 | ||
| 1045 | void smtc_ipi_replay(void) | ||
| 1046 | { | ||
| 1047 | raw_local_irq_disable(); | ||
| 1048 | __smtc_ipi_replay(); | ||
| 1049 | } | ||
| 1050 | |||
| 1016 | EXPORT_SYMBOL(smtc_ipi_replay); | 1051 | EXPORT_SYMBOL(smtc_ipi_replay); |
| 1017 | 1052 | ||
| 1018 | void smtc_idle_loop_hook(void) | 1053 | void smtc_idle_loop_hook(void) |
| @@ -1117,7 +1152,13 @@ void smtc_idle_loop_hook(void) | |||
| 1117 | * is in use, there should never be any. | 1152 | * is in use, there should never be any. |
| 1118 | */ | 1153 | */ |
| 1119 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | 1154 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY |
| 1120 | smtc_ipi_replay(); | 1155 | { |
| 1156 | unsigned long flags; | ||
| 1157 | |||
| 1158 | local_irq_save(flags); | ||
| 1159 | __smtc_ipi_replay(); | ||
| 1160 | local_irq_restore(flags); | ||
| 1161 | } | ||
| 1121 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | 1162 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ |
| 1122 | } | 1163 | } |
| 1123 | 1164 | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 18f56a9dbcfa..493cb29b8a42 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -610,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
| 610 | if (fcr31 & FPU_CSR_UNI_X) { | 610 | if (fcr31 & FPU_CSR_UNI_X) { |
| 611 | int sig; | 611 | int sig; |
| 612 | 612 | ||
| 613 | preempt_disable(); | ||
| 614 | |||
| 615 | #ifdef CONFIG_PREEMPT | ||
| 616 | if (!is_fpu_owner()) { | ||
| 617 | /* We might lose fpu before disabling preempt... */ | ||
| 618 | own_fpu(); | ||
| 619 | BUG_ON(!used_math()); | ||
| 620 | restore_fp(current); | ||
| 621 | } | ||
| 622 | #endif | ||
| 623 | /* | 613 | /* |
| 624 | * Unimplemented operation exception. If we've got the full | 614 | * Unimplemented operation exception. If we've got the full |
| 625 | * software emulator on-board, let's use it... | 615 | * software emulator on-board, let's use it... |
| @@ -630,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
| 630 | * register operands before invoking the emulator, which seems | 620 | * register operands before invoking the emulator, which seems |
| 631 | * a bit extreme for what should be an infrequent event. | 621 | * a bit extreme for what should be an infrequent event. |
| 632 | */ | 622 | */ |
| 633 | save_fp(current); | ||
| 634 | /* Ensure 'resume' not overwrite saved fp context again. */ | 623 | /* Ensure 'resume' not overwrite saved fp context again. */ |
| 635 | lose_fpu(); | 624 | lose_fpu(1); |
| 636 | |||
| 637 | preempt_enable(); | ||
| 638 | 625 | ||
| 639 | /* Run the emulator */ | 626 | /* Run the emulator */ |
| 640 | sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu, 1); | 627 | sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu, 1); |
| 641 | 628 | ||
| 642 | preempt_disable(); | ||
| 643 | |||
| 644 | own_fpu(); /* Using the FPU again. */ | ||
| 645 | /* | 629 | /* |
| 646 | * We can't allow the emulated instruction to leave any of | 630 | * We can't allow the emulated instruction to leave any of |
| 647 | * the cause bit set in $fcr31. | 631 | * the cause bit set in $fcr31. |
| @@ -649,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
| 649 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | 633 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; |
| 650 | 634 | ||
| 651 | /* Restore the hardware register state */ | 635 | /* Restore the hardware register state */ |
| 652 | restore_fp(current); | 636 | own_fpu(1); /* Using the FPU again. */ |
| 653 | |||
| 654 | preempt_enable(); | ||
| 655 | 637 | ||
| 656 | /* If something went wrong, signal */ | 638 | /* If something went wrong, signal */ |
| 657 | if (sig) | 639 | if (sig) |
| @@ -668,7 +650,7 @@ asmlinkage void do_bp(struct pt_regs *regs) | |||
| 668 | unsigned int opcode, bcode; | 650 | unsigned int opcode, bcode; |
| 669 | siginfo_t info; | 651 | siginfo_t info; |
| 670 | 652 | ||
| 671 | if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 653 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) |
| 672 | goto out_sigsegv; | 654 | goto out_sigsegv; |
| 673 | 655 | ||
| 674 | /* | 656 | /* |
| @@ -718,7 +700,7 @@ asmlinkage void do_tr(struct pt_regs *regs) | |||
| 718 | unsigned int opcode, tcode = 0; | 700 | unsigned int opcode, tcode = 0; |
| 719 | siginfo_t info; | 701 | siginfo_t info; |
| 720 | 702 | ||
| 721 | if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 703 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) |
| 722 | goto out_sigsegv; | 704 | goto out_sigsegv; |
| 723 | 705 | ||
| 724 | /* Immediate versions don't provide a code. */ | 706 | /* Immediate versions don't provide a code. */ |
| @@ -791,21 +773,15 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
| 791 | break; | 773 | break; |
| 792 | 774 | ||
| 793 | case 1: | 775 | case 1: |
| 794 | preempt_disable(); | 776 | if (used_math()) /* Using the FPU again. */ |
| 795 | 777 | own_fpu(1); | |
| 796 | own_fpu(); | 778 | else { /* First time FPU user. */ |
| 797 | if (used_math()) { /* Using the FPU again. */ | ||
| 798 | restore_fp(current); | ||
| 799 | } else { /* First time FPU user. */ | ||
| 800 | init_fpu(); | 779 | init_fpu(); |
| 801 | set_used_math(); | 780 | set_used_math(); |
| 802 | } | 781 | } |
| 803 | 782 | ||
| 804 | if (cpu_has_fpu) { | 783 | if (!raw_cpu_has_fpu) { |
| 805 | preempt_enable(); | ||
| 806 | } else { | ||
| 807 | int sig; | 784 | int sig; |
| 808 | preempt_enable(); | ||
| 809 | sig = fpu_emulator_cop1Handler(regs, | 785 | sig = fpu_emulator_cop1Handler(regs, |
| 810 | ¤t->thread.fpu, 0); | 786 | ¤t->thread.fpu, 0); |
| 811 | if (sig) | 787 | if (sig) |
| @@ -846,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
| 846 | 822 | ||
| 847 | case 2: | 823 | case 2: |
| 848 | case 3: | 824 | case 3: |
| 849 | die_if_kernel("do_cpu invoked from kernel context!", regs); | ||
| 850 | break; | 825 | break; |
| 851 | } | 826 | } |
| 852 | 827 | ||
| @@ -1259,26 +1234,26 @@ static inline void mips_srs_init(void) | |||
| 1259 | /* | 1234 | /* |
| 1260 | * This is used by native signal handling | 1235 | * This is used by native signal handling |
| 1261 | */ | 1236 | */ |
| 1262 | asmlinkage int (*save_fp_context)(struct sigcontext *sc); | 1237 | asmlinkage int (*save_fp_context)(struct sigcontext __user *sc); |
| 1263 | asmlinkage int (*restore_fp_context)(struct sigcontext *sc); | 1238 | asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc); |
| 1264 | 1239 | ||
| 1265 | extern asmlinkage int _save_fp_context(struct sigcontext *sc); | 1240 | extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); |
| 1266 | extern asmlinkage int _restore_fp_context(struct sigcontext *sc); | 1241 | extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); |
| 1267 | 1242 | ||
| 1268 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); | 1243 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); |
| 1269 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); | 1244 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); |
| 1270 | 1245 | ||
| 1271 | #ifdef CONFIG_SMP | 1246 | #ifdef CONFIG_SMP |
| 1272 | static int smp_save_fp_context(struct sigcontext *sc) | 1247 | static int smp_save_fp_context(struct sigcontext __user *sc) |
| 1273 | { | 1248 | { |
| 1274 | return cpu_has_fpu | 1249 | return raw_cpu_has_fpu |
| 1275 | ? _save_fp_context(sc) | 1250 | ? _save_fp_context(sc) |
| 1276 | : fpu_emulator_save_context(sc); | 1251 | : fpu_emulator_save_context(sc); |
| 1277 | } | 1252 | } |
| 1278 | 1253 | ||
| 1279 | static int smp_restore_fp_context(struct sigcontext *sc) | 1254 | static int smp_restore_fp_context(struct sigcontext __user *sc) |
| 1280 | { | 1255 | { |
| 1281 | return cpu_has_fpu | 1256 | return raw_cpu_has_fpu |
| 1282 | ? _restore_fp_context(sc) | 1257 | ? _restore_fp_context(sc) |
| 1283 | : fpu_emulator_restore_context(sc); | 1258 | : fpu_emulator_restore_context(sc); |
| 1284 | } | 1259 | } |
| @@ -1306,14 +1281,14 @@ static inline void signal_init(void) | |||
| 1306 | /* | 1281 | /* |
| 1307 | * This is used by 32-bit signal stuff on the 64-bit kernel | 1282 | * This is used by 32-bit signal stuff on the 64-bit kernel |
| 1308 | */ | 1283 | */ |
| 1309 | asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc); | 1284 | asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc); |
| 1310 | asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc); | 1285 | asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc); |
| 1311 | 1286 | ||
| 1312 | extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc); | 1287 | extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); |
| 1313 | extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc); | 1288 | extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); |
| 1314 | 1289 | ||
| 1315 | extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc); | 1290 | extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); |
| 1316 | extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc); | 1291 | extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); |
| 1317 | 1292 | ||
| 1318 | static inline void signal32_init(void) | 1293 | static inline void signal32_init(void) |
| 1319 | { | 1294 | { |
