diff options
Diffstat (limited to 'arch/powerpc/kernel/signal_32.c')
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 184 |
1 files changed, 164 insertions, 20 deletions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index ad6943468ee9..3e80aa32b8b0 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -68,6 +68,13 @@ | |||
68 | #define ucontext ucontext32 | 68 | #define ucontext ucontext32 |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Userspace code may pass a ucontext which doesn't include VSX added | ||
72 | * at the end. We need to check for this case. | ||
73 | */ | ||
74 | #define UCONTEXTSIZEWITHOUTVSX \ | ||
75 | (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32)) | ||
76 | |||
77 | /* | ||
71 | * Returning 0 means we return to userspace via | 78 | * Returning 0 means we return to userspace via |
72 | * ret_from_except and thus restore all user | 79 | * ret_from_except and thus restore all user |
73 | * registers from *regs. This is what we need | 80 | * registers from *regs. This is what we need |
@@ -243,7 +250,7 @@ long sys_sigsuspend(old_sigset_t mask) | |||
243 | 250 | ||
244 | current->state = TASK_INTERRUPTIBLE; | 251 | current->state = TASK_INTERRUPTIBLE; |
245 | schedule(); | 252 | schedule(); |
246 | set_thread_flag(TIF_RESTORE_SIGMASK); | 253 | set_restore_sigmask(); |
247 | return -ERESTARTNOHAND; | 254 | return -ERESTARTNOHAND; |
248 | } | 255 | } |
249 | 256 | ||
@@ -328,6 +335,75 @@ struct rt_sigframe { | |||
328 | int abigap[56]; | 335 | int abigap[56]; |
329 | }; | 336 | }; |
330 | 337 | ||
338 | #ifdef CONFIG_VSX | ||
339 | unsigned long copy_fpr_to_user(void __user *to, | ||
340 | struct task_struct *task) | ||
341 | { | ||
342 | double buf[ELF_NFPREG]; | ||
343 | int i; | ||
344 | |||
345 | /* save FPR copy to local buffer then write to the thread_struct */ | ||
346 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | ||
347 | buf[i] = task->thread.TS_FPR(i); | ||
348 | memcpy(&buf[i], &task->thread.fpscr, sizeof(double)); | ||
349 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); | ||
350 | } | ||
351 | |||
352 | unsigned long copy_fpr_from_user(struct task_struct *task, | ||
353 | void __user *from) | ||
354 | { | ||
355 | double buf[ELF_NFPREG]; | ||
356 | int i; | ||
357 | |||
358 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) | ||
359 | return 1; | ||
360 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | ||
361 | task->thread.TS_FPR(i) = buf[i]; | ||
362 | memcpy(&task->thread.fpscr, &buf[i], sizeof(double)); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | unsigned long copy_vsx_to_user(void __user *to, | ||
368 | struct task_struct *task) | ||
369 | { | ||
370 | double buf[ELF_NVSRHALFREG]; | ||
371 | int i; | ||
372 | |||
373 | /* save FPR copy to local buffer then write to the thread_struct */ | ||
374 | for (i = 0; i < ELF_NVSRHALFREG; i++) | ||
375 | buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET]; | ||
376 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); | ||
377 | } | ||
378 | |||
379 | unsigned long copy_vsx_from_user(struct task_struct *task, | ||
380 | void __user *from) | ||
381 | { | ||
382 | double buf[ELF_NVSRHALFREG]; | ||
383 | int i; | ||
384 | |||
385 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) | ||
386 | return 1; | ||
387 | for (i = 0; i < ELF_NVSRHALFREG ; i++) | ||
388 | task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; | ||
389 | return 0; | ||
390 | } | ||
391 | #else | ||
392 | inline unsigned long copy_fpr_to_user(void __user *to, | ||
393 | struct task_struct *task) | ||
394 | { | ||
395 | return __copy_to_user(to, task->thread.fpr, | ||
396 | ELF_NFPREG * sizeof(double)); | ||
397 | } | ||
398 | |||
399 | inline unsigned long copy_fpr_from_user(struct task_struct *task, | ||
400 | void __user *from) | ||
401 | { | ||
402 | return __copy_from_user(task->thread.fpr, from, | ||
403 | ELF_NFPREG * sizeof(double)); | ||
404 | } | ||
405 | #endif | ||
406 | |||
331 | /* | 407 | /* |
332 | * Save the current user registers on the user stack. | 408 | * Save the current user registers on the user stack. |
333 | * We only save the altivec/spe registers if the process has used | 409 | * We only save the altivec/spe registers if the process has used |
@@ -336,13 +412,13 @@ struct rt_sigframe { | |||
336 | static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | 412 | static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, |
337 | int sigret) | 413 | int sigret) |
338 | { | 414 | { |
415 | unsigned long msr = regs->msr; | ||
416 | |||
339 | /* Make sure floating point registers are stored in regs */ | 417 | /* Make sure floating point registers are stored in regs */ |
340 | flush_fp_to_thread(current); | 418 | flush_fp_to_thread(current); |
341 | 419 | ||
342 | /* save general and floating-point registers */ | 420 | /* save general registers */ |
343 | if (save_general_regs(regs, frame) || | 421 | if (save_general_regs(regs, frame)) |
344 | __copy_to_user(&frame->mc_fregs, current->thread.fpr, | ||
345 | ELF_NFPREG * sizeof(double))) | ||
346 | return 1; | 422 | return 1; |
347 | 423 | ||
348 | #ifdef CONFIG_ALTIVEC | 424 | #ifdef CONFIG_ALTIVEC |
@@ -354,8 +430,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | |||
354 | return 1; | 430 | return 1; |
355 | /* set MSR_VEC in the saved MSR value to indicate that | 431 | /* set MSR_VEC in the saved MSR value to indicate that |
356 | frame->mc_vregs contains valid data */ | 432 | frame->mc_vregs contains valid data */ |
357 | if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR])) | 433 | msr |= MSR_VEC; |
358 | return 1; | ||
359 | } | 434 | } |
360 | /* else assert((regs->msr & MSR_VEC) == 0) */ | 435 | /* else assert((regs->msr & MSR_VEC) == 0) */ |
361 | 436 | ||
@@ -367,7 +442,22 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | |||
367 | if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) | 442 | if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) |
368 | return 1; | 443 | return 1; |
369 | #endif /* CONFIG_ALTIVEC */ | 444 | #endif /* CONFIG_ALTIVEC */ |
370 | 445 | if (copy_fpr_to_user(&frame->mc_fregs, current)) | |
446 | return 1; | ||
447 | #ifdef CONFIG_VSX | ||
448 | /* | ||
449 | * Copy VSR 0-31 upper half from thread_struct to local | ||
450 | * buffer, then write that to userspace. Also set MSR_VSX in | ||
451 | * the saved MSR value to indicate that frame->mc_vregs | ||
452 | * contains valid data | ||
453 | */ | ||
454 | if (current->thread.used_vsr) { | ||
455 | __giveup_vsx(current); | ||
456 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) | ||
457 | return 1; | ||
458 | msr |= MSR_VSX; | ||
459 | } | ||
460 | #endif /* CONFIG_VSX */ | ||
371 | #ifdef CONFIG_SPE | 461 | #ifdef CONFIG_SPE |
372 | /* save spe registers */ | 462 | /* save spe registers */ |
373 | if (current->thread.used_spe) { | 463 | if (current->thread.used_spe) { |
@@ -377,8 +467,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | |||
377 | return 1; | 467 | return 1; |
378 | /* set MSR_SPE in the saved MSR value to indicate that | 468 | /* set MSR_SPE in the saved MSR value to indicate that |
379 | frame->mc_vregs contains valid data */ | 469 | frame->mc_vregs contains valid data */ |
380 | if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR])) | 470 | msr |= MSR_SPE; |
381 | return 1; | ||
382 | } | 471 | } |
383 | /* else assert((regs->msr & MSR_SPE) == 0) */ | 472 | /* else assert((regs->msr & MSR_SPE) == 0) */ |
384 | 473 | ||
@@ -387,6 +476,8 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | |||
387 | return 1; | 476 | return 1; |
388 | #endif /* CONFIG_SPE */ | 477 | #endif /* CONFIG_SPE */ |
389 | 478 | ||
479 | if (__put_user(msr, &frame->mc_gregs[PT_MSR])) | ||
480 | return 1; | ||
390 | if (sigret) { | 481 | if (sigret) { |
391 | /* Set up the sigreturn trampoline: li r0,sigret; sc */ | 482 | /* Set up the sigreturn trampoline: li r0,sigret; sc */ |
392 | if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) | 483 | if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) |
@@ -409,6 +500,9 @@ static long restore_user_regs(struct pt_regs *regs, | |||
409 | long err; | 500 | long err; |
410 | unsigned int save_r2 = 0; | 501 | unsigned int save_r2 = 0; |
411 | unsigned long msr; | 502 | unsigned long msr; |
503 | #ifdef CONFIG_VSX | ||
504 | int i; | ||
505 | #endif | ||
412 | 506 | ||
413 | /* | 507 | /* |
414 | * restore general registers but not including MSR or SOFTE. Also | 508 | * restore general registers but not including MSR or SOFTE. Also |
@@ -436,16 +530,11 @@ static long restore_user_regs(struct pt_regs *regs, | |||
436 | */ | 530 | */ |
437 | discard_lazy_cpu_state(); | 531 | discard_lazy_cpu_state(); |
438 | 532 | ||
439 | /* force the process to reload the FP registers from | ||
440 | current->thread when it next does FP instructions */ | ||
441 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); | ||
442 | if (__copy_from_user(current->thread.fpr, &sr->mc_fregs, | ||
443 | sizeof(sr->mc_fregs))) | ||
444 | return 1; | ||
445 | |||
446 | #ifdef CONFIG_ALTIVEC | 533 | #ifdef CONFIG_ALTIVEC |
447 | /* force the process to reload the altivec registers from | 534 | /* |
448 | current->thread when it next does altivec instructions */ | 535 | * Force the process to reload the altivec registers from |
536 | * current->thread when it next does altivec instructions | ||
537 | */ | ||
449 | regs->msr &= ~MSR_VEC; | 538 | regs->msr &= ~MSR_VEC; |
450 | if (msr & MSR_VEC) { | 539 | if (msr & MSR_VEC) { |
451 | /* restore altivec registers from the stack */ | 540 | /* restore altivec registers from the stack */ |
@@ -459,6 +548,31 @@ static long restore_user_regs(struct pt_regs *regs, | |||
459 | if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) | 548 | if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) |
460 | return 1; | 549 | return 1; |
461 | #endif /* CONFIG_ALTIVEC */ | 550 | #endif /* CONFIG_ALTIVEC */ |
551 | if (copy_fpr_from_user(current, &sr->mc_fregs)) | ||
552 | return 1; | ||
553 | |||
554 | #ifdef CONFIG_VSX | ||
555 | /* | ||
556 | * Force the process to reload the VSX registers from | ||
557 | * current->thread when it next does VSX instruction. | ||
558 | */ | ||
559 | regs->msr &= ~MSR_VSX; | ||
560 | if (msr & MSR_VSX) { | ||
561 | /* | ||
562 | * Restore altivec registers from the stack to a local | ||
563 | * buffer, then write this out to the thread_struct | ||
564 | */ | ||
565 | if (copy_vsx_from_user(current, &sr->mc_vsregs)) | ||
566 | return 1; | ||
567 | } else if (current->thread.used_vsr) | ||
568 | for (i = 0; i < 32 ; i++) | ||
569 | current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; | ||
570 | #endif /* CONFIG_VSX */ | ||
571 | /* | ||
572 | * force the process to reload the FP registers from | ||
573 | * current->thread when it next does FP instructions | ||
574 | */ | ||
575 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); | ||
462 | 576 | ||
463 | #ifdef CONFIG_SPE | 577 | #ifdef CONFIG_SPE |
464 | /* force the process to reload the spe registers from | 578 | /* force the process to reload the spe registers from |
@@ -823,12 +937,42 @@ long sys_swapcontext(struct ucontext __user *old_ctx, | |||
823 | { | 937 | { |
824 | unsigned char tmp; | 938 | unsigned char tmp; |
825 | 939 | ||
940 | #ifdef CONFIG_PPC64 | ||
941 | unsigned long new_msr = 0; | ||
942 | |||
943 | if (new_ctx && | ||
944 | __get_user(new_msr, &new_ctx->uc_mcontext.mc_gregs[PT_MSR])) | ||
945 | return -EFAULT; | ||
946 | /* | ||
947 | * Check that the context is not smaller than the original | ||
948 | * size (with VMX but without VSX) | ||
949 | */ | ||
950 | if (ctx_size < UCONTEXTSIZEWITHOUTVSX) | ||
951 | return -EINVAL; | ||
952 | /* | ||
953 | * If the new context state sets the MSR VSX bits but | ||
954 | * it doesn't provide VSX state. | ||
955 | */ | ||
956 | if ((ctx_size < sizeof(struct ucontext)) && | ||
957 | (new_msr & MSR_VSX)) | ||
958 | return -EINVAL; | ||
959 | #ifdef CONFIG_VSX | ||
960 | /* | ||
961 | * If userspace doesn't provide enough room for VSX data, | ||
962 | * but current thread has used VSX, we don't have anywhere | ||
963 | * to store the full context back into. | ||
964 | */ | ||
965 | if ((ctx_size < sizeof(struct ucontext)) && | ||
966 | (current->thread.used_vsr && old_ctx)) | ||
967 | return -EINVAL; | ||
968 | #endif | ||
969 | #else | ||
826 | /* Context size is for future use. Right now, we only make sure | 970 | /* Context size is for future use. Right now, we only make sure |
827 | * we are passed something we understand | 971 | * we are passed something we understand |
828 | */ | 972 | */ |
829 | if (ctx_size < sizeof(struct ucontext)) | 973 | if (ctx_size < sizeof(struct ucontext)) |
830 | return -EINVAL; | 974 | return -EINVAL; |
831 | 975 | #endif | |
832 | if (old_ctx != NULL) { | 976 | if (old_ctx != NULL) { |
833 | struct mcontext __user *mctx; | 977 | struct mcontext __user *mctx; |
834 | 978 | ||