aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2013-02-13 11:21:41 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-02-15 01:02:23 -0500
commit2b0a576d15e0e14751f00f9c87e46bad27f217e7 (patch)
treebd160f2f67cbae826fe0216bb9e217d9c3a290c8
parentbc2a9408fa65195288b41751016c36fd00a75a85 (diff)
powerpc: Add new transactional memory state to the signal context
This adds the new transactional memory archtected state to the signal context in both 32 and 64 bit. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/signal.h8
-rw-r--r--arch/powerpc/kernel/signal_32.c500
-rw-r--r--arch/powerpc/kernel/signal_64.c337
4 files changed, 830 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index eee2a60994bf..7035e608f3fa 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -120,6 +120,7 @@
120#define TM_CAUSE_FAC_UNAV 0xfa 120#define TM_CAUSE_FAC_UNAV 0xfa
121#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */ 121#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
122#define TM_CAUSE_MISC 0xf6 122#define TM_CAUSE_MISC 0xf6
123#define TM_CAUSE_SIGNAL 0xf4
123 124
124#if defined(CONFIG_PPC_BOOK3S_64) 125#if defined(CONFIG_PPC_BOOK3S_64)
125#define MSR_64BIT MSR_SF 126#define MSR_64BIT MSR_SF
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index e00acb413934..ec84c901ceab 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -25,13 +25,21 @@ extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
25 25
26extern unsigned long copy_fpr_to_user(void __user *to, 26extern unsigned long copy_fpr_to_user(void __user *to,
27 struct task_struct *task); 27 struct task_struct *task);
28extern unsigned long copy_transact_fpr_to_user(void __user *to,
29 struct task_struct *task);
28extern unsigned long copy_fpr_from_user(struct task_struct *task, 30extern unsigned long copy_fpr_from_user(struct task_struct *task,
29 void __user *from); 31 void __user *from);
32extern unsigned long copy_transact_fpr_from_user(struct task_struct *task,
33 void __user *from);
30#ifdef CONFIG_VSX 34#ifdef CONFIG_VSX
31extern unsigned long copy_vsx_to_user(void __user *to, 35extern unsigned long copy_vsx_to_user(void __user *to,
32 struct task_struct *task); 36 struct task_struct *task);
37extern unsigned long copy_transact_vsx_to_user(void __user *to,
38 struct task_struct *task);
33extern unsigned long copy_vsx_from_user(struct task_struct *task, 39extern unsigned long copy_vsx_from_user(struct task_struct *task,
34 void __user *from); 40 void __user *from);
41extern unsigned long copy_transact_vsx_from_user(struct task_struct *task,
42 void __user *from);
35#endif 43#endif
36 44
37#ifdef CONFIG_PPC64 45#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 804e323c139d..e4a88d340de6 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -43,6 +43,7 @@
43#include <asm/sigcontext.h> 43#include <asm/sigcontext.h>
44#include <asm/vdso.h> 44#include <asm/vdso.h>
45#include <asm/switch_to.h> 45#include <asm/switch_to.h>
46#include <asm/tm.h>
46#ifdef CONFIG_PPC64 47#ifdef CONFIG_PPC64
47#include "ppc32.h" 48#include "ppc32.h"
48#include <asm/unistd.h> 49#include <asm/unistd.h>
@@ -293,6 +294,10 @@ long sys_sigaction(int sig, struct old_sigaction __user *act,
293struct sigframe { 294struct sigframe {
294 struct sigcontext sctx; /* the sigcontext */ 295 struct sigcontext sctx; /* the sigcontext */
295 struct mcontext mctx; /* all the register values */ 296 struct mcontext mctx; /* all the register values */
297#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
298 struct sigcontext sctx_transact;
299 struct mcontext mctx_transact;
300#endif
296 /* 301 /*
297 * Programs using the rs6000/xcoff abi can save up to 19 gp 302 * Programs using the rs6000/xcoff abi can save up to 19 gp
298 * regs and 18 fp regs below sp before decrementing it. 303 * regs and 18 fp regs below sp before decrementing it.
@@ -321,6 +326,9 @@ struct rt_sigframe {
321 struct siginfo info; 326 struct siginfo info;
322#endif 327#endif
323 struct ucontext uc; 328 struct ucontext uc;
329#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
330 struct ucontext uc_transact;
331#endif
324 /* 332 /*
325 * Programs using the rs6000/xcoff abi can save up to 19 gp 333 * Programs using the rs6000/xcoff abi can save up to 19 gp
326 * regs and 18 fp regs below sp before decrementing it. 334 * regs and 18 fp regs below sp before decrementing it.
@@ -381,6 +389,61 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
381 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 389 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
382 return 0; 390 return 0;
383} 391}
392
393#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
394unsigned long copy_transact_fpr_to_user(void __user *to,
395 struct task_struct *task)
396{
397 double buf[ELF_NFPREG];
398 int i;
399
400 /* save FPR copy to local buffer then write to the thread_struct */
401 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
402 buf[i] = task->thread.TS_TRANS_FPR(i);
403 memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
404 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
405}
406
407unsigned long copy_transact_fpr_from_user(struct task_struct *task,
408 void __user *from)
409{
410 double buf[ELF_NFPREG];
411 int i;
412
413 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
414 return 1;
415 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
416 task->thread.TS_TRANS_FPR(i) = buf[i];
417 memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
418
419 return 0;
420}
421
422unsigned long copy_transact_vsx_to_user(void __user *to,
423 struct task_struct *task)
424{
425 double buf[ELF_NVSRHALFREG];
426 int i;
427
428 /* save FPR copy to local buffer then write to the thread_struct */
429 for (i = 0; i < ELF_NVSRHALFREG; i++)
430 buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
431 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
432}
433
434unsigned long copy_transact_vsx_from_user(struct task_struct *task,
435 void __user *from)
436{
437 double buf[ELF_NVSRHALFREG];
438 int i;
439
440 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
441 return 1;
442 for (i = 0; i < ELF_NVSRHALFREG ; i++)
443 task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
444 return 0;
445}
446#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
384#else 447#else
385inline unsigned long copy_fpr_to_user(void __user *to, 448inline unsigned long copy_fpr_to_user(void __user *to,
386 struct task_struct *task) 449 struct task_struct *task)
@@ -395,6 +458,22 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
395 return __copy_from_user(task->thread.fpr, from, 458 return __copy_from_user(task->thread.fpr, from,
396 ELF_NFPREG * sizeof(double)); 459 ELF_NFPREG * sizeof(double));
397} 460}
461
462#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
463inline unsigned long copy_transact_fpr_to_user(void __user *to,
464 struct task_struct *task)
465{
466 return __copy_to_user(to, task->thread.transact_fpr,
467 ELF_NFPREG * sizeof(double));
468}
469
470inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
471 void __user *from)
472{
473 return __copy_from_user(task->thread.transact_fpr, from,
474 ELF_NFPREG * sizeof(double));
475}
476#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
398#endif 477#endif
399 478
400/* 479/*
@@ -483,6 +562,156 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
483 return 0; 562 return 0;
484} 563}
485 564
565#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
566/*
567 * Save the current user registers on the user stack.
568 * We only save the altivec/spe registers if the process has used
569 * altivec/spe instructions at some point.
570 * We also save the transactional registers to a second ucontext in the
571 * frame.
572 *
573 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
574 */
575static int save_tm_user_regs(struct pt_regs *regs,
576 struct mcontext __user *frame,
577 struct mcontext __user *tm_frame, int sigret)
578{
579 unsigned long msr = regs->msr;
580
581 /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
582 * thread.transact_fpr[], thread.transact_vr[], etc.
583 */
584 tm_enable();
585 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
586
587 /* Make sure floating point registers are stored in regs */
588 flush_fp_to_thread(current);
589
590 /* Save both sets of general registers */
591 if (save_general_regs(&current->thread.ckpt_regs, frame)
592 || save_general_regs(regs, tm_frame))
593 return 1;
594
595 /* Stash the top half of the 64bit MSR into the 32bit MSR word
596 * of the transactional mcontext. This way we have a backward-compatible
597 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
598 * also look at what type of transaction (T or S) was active at the
599 * time of the signal.
600 */
601 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
602 return 1;
603
604#ifdef CONFIG_ALTIVEC
605 /* save altivec registers */
606 if (current->thread.used_vr) {
607 flush_altivec_to_thread(current);
608 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
609 ELF_NVRREG * sizeof(vector128)))
610 return 1;
611 if (msr & MSR_VEC) {
612 if (__copy_to_user(&tm_frame->mc_vregs,
613 current->thread.transact_vr,
614 ELF_NVRREG * sizeof(vector128)))
615 return 1;
616 } else {
617 if (__copy_to_user(&tm_frame->mc_vregs,
618 current->thread.vr,
619 ELF_NVRREG * sizeof(vector128)))
620 return 1;
621 }
622
623 /* set MSR_VEC in the saved MSR value to indicate that
624 * frame->mc_vregs contains valid data
625 */
626 msr |= MSR_VEC;
627 }
628
629 /* We always copy to/from vrsave, it's 0 if we don't have or don't
630 * use altivec. Since VSCR only contains 32 bits saved in the least
631 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
632 * most significant bits of that same vector. --BenH
633 */
634 if (__put_user(current->thread.vrsave,
635 (u32 __user *)&frame->mc_vregs[32]))
636 return 1;
637 if (msr & MSR_VEC) {
638 if (__put_user(current->thread.transact_vrsave,
639 (u32 __user *)&tm_frame->mc_vregs[32]))
640 return 1;
641 } else {
642 if (__put_user(current->thread.vrsave,
643 (u32 __user *)&tm_frame->mc_vregs[32]))
644 return 1;
645 }
646#endif /* CONFIG_ALTIVEC */
647
648 if (copy_fpr_to_user(&frame->mc_fregs, current))
649 return 1;
650 if (msr & MSR_FP) {
651 if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
652 return 1;
653 } else {
654 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
655 return 1;
656 }
657
658#ifdef CONFIG_VSX
659 /*
660 * Copy VSR 0-31 upper half from thread_struct to local
661 * buffer, then write that to userspace. Also set MSR_VSX in
662 * the saved MSR value to indicate that frame->mc_vregs
663 * contains valid data
664 */
665 if (current->thread.used_vsr) {
666 __giveup_vsx(current);
667 if (copy_vsx_to_user(&frame->mc_vsregs, current))
668 return 1;
669 if (msr & MSR_VSX) {
670 if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
671 current))
672 return 1;
673 } else {
674 if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
675 return 1;
676 }
677
678 msr |= MSR_VSX;
679 }
680#endif /* CONFIG_VSX */
681#ifdef CONFIG_SPE
682 /* SPE regs are not checkpointed with TM, so this section is
683 * simply the same as in save_user_regs().
684 */
685 if (current->thread.used_spe) {
686 flush_spe_to_thread(current);
687 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
688 ELF_NEVRREG * sizeof(u32)))
689 return 1;
690 /* set MSR_SPE in the saved MSR value to indicate that
691 * frame->mc_vregs contains valid data */
692 msr |= MSR_SPE;
693 }
694
695 /* We always copy to/from spefscr */
696 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
697 return 1;
698#endif /* CONFIG_SPE */
699
700 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
701 return 1;
702 if (sigret) {
703 /* Set up the sigreturn trampoline: li r0,sigret; sc */
704 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
705 || __put_user(0x44000002UL, &frame->tramp[1]))
706 return 1;
707 flush_icache_range((unsigned long) &frame->tramp[0],
708 (unsigned long) &frame->tramp[2]);
709 }
710
711 return 0;
712}
713#endif
714
486/* 715/*
487 * Restore the current user register values from the user stack, 716 * Restore the current user register values from the user stack,
488 * (except for MSR). 717 * (except for MSR).
@@ -588,6 +817,139 @@ static long restore_user_regs(struct pt_regs *regs,
588 return 0; 817 return 0;
589} 818}
590 819
820#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
821/*
822 * Restore the current user register values from the user stack, except for
823 * MSR, and recheckpoint the original checkpointed register state for processes
824 * in transactions.
825 */
826static long restore_tm_user_regs(struct pt_regs *regs,
827 struct mcontext __user *sr,
828 struct mcontext __user *tm_sr)
829{
830 long err;
831 unsigned long msr;
832#ifdef CONFIG_VSX
833 int i;
834#endif
835
836 /*
837 * restore general registers but not including MSR or SOFTE. Also
838 * take care of keeping r2 (TLS) intact if not a signal.
839 * See comment in signal_64.c:restore_tm_sigcontexts();
840 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
841 * were set by the signal delivery.
842 */
843 err = restore_general_regs(regs, tm_sr);
844 err |= restore_general_regs(&current->thread.ckpt_regs, sr);
845
846 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
847
848 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
849 if (err)
850 return 1;
851
852 /* Restore the previous little-endian mode */
853 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
854
855 /*
856 * Do this before updating the thread state in
857 * current->thread.fpr/vr/evr. That way, if we get preempted
858 * and another task grabs the FPU/Altivec/SPE, it won't be
859 * tempted to save the current CPU state into the thread_struct
860 * and corrupt what we are writing there.
861 */
862 discard_lazy_cpu_state();
863
864#ifdef CONFIG_ALTIVEC
865 regs->msr &= ~MSR_VEC;
866 if (msr & MSR_VEC) {
867 /* restore altivec registers from the stack */
868 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
869 sizeof(sr->mc_vregs)) ||
870 __copy_from_user(current->thread.transact_vr,
871 &tm_sr->mc_vregs,
872 sizeof(sr->mc_vregs)))
873 return 1;
874 } else if (current->thread.used_vr) {
875 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
876 memset(current->thread.transact_vr, 0,
877 ELF_NVRREG * sizeof(vector128));
878 }
879
880 /* Always get VRSAVE back */
881 if (__get_user(current->thread.vrsave,
882 (u32 __user *)&sr->mc_vregs[32]) ||
883 __get_user(current->thread.transact_vrsave,
884 (u32 __user *)&tm_sr->mc_vregs[32]))
885 return 1;
886#endif /* CONFIG_ALTIVEC */
887
888 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
889
890 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
891 copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
892 return 1;
893
894#ifdef CONFIG_VSX
895 regs->msr &= ~MSR_VSX;
896 if (msr & MSR_VSX) {
897 /*
898 * Restore altivec registers from the stack to a local
899 * buffer, then write this out to the thread_struct
900 */
901 if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
902 copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
903 return 1;
904 } else if (current->thread.used_vsr)
905 for (i = 0; i < 32 ; i++) {
906 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
907 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
908 }
909#endif /* CONFIG_VSX */
910
911#ifdef CONFIG_SPE
912 /* SPE regs are not checkpointed with TM, so this section is
913 * simply the same as in restore_user_regs().
914 */
915 regs->msr &= ~MSR_SPE;
916 if (msr & MSR_SPE) {
917 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
918 ELF_NEVRREG * sizeof(u32)))
919 return 1;
920 } else if (current->thread.used_spe)
921 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
922
923 /* Always get SPEFSCR back */
924 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
925 + ELF_NEVRREG))
926 return 1;
927#endif /* CONFIG_SPE */
928
929 /* Now, recheckpoint. This loads up all of the checkpointed (older)
930 * registers, including FP and V[S]Rs. After recheckpointing, the
931 * transactional versions should be loaded.
932 */
933 tm_enable();
934 /* This loads the checkpointed FP/VEC state, if used */
935 tm_recheckpoint(&current->thread, msr);
936 /* The task has moved into TM state S, so ensure MSR reflects this */
937 regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
938
939 /* This loads the speculative FP/VEC state, if used */
940 if (msr & MSR_FP) {
941 do_load_up_transact_fpu(&current->thread);
942 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
943 }
944 if (msr & MSR_VEC) {
945 do_load_up_transact_altivec(&current->thread);
946 regs->msr |= MSR_VEC;
947 }
948
949 return 0;
950}
951#endif
952
591#ifdef CONFIG_PPC64 953#ifdef CONFIG_PPC64
592long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act, 954long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
593 struct sigaction32 __user *oact, size_t sigsetsize) 955 struct sigaction32 __user *oact, size_t sigsetsize)
@@ -827,6 +1189,8 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
827 struct mcontext __user *frame; 1189 struct mcontext __user *frame;
828 void __user *addr; 1190 void __user *addr;
829 unsigned long newsp = 0; 1191 unsigned long newsp = 0;
1192 int sigret;
1193 unsigned long tramp;
830 1194
831 /* Set up Signal Frame */ 1195 /* Set up Signal Frame */
832 /* Put a Real Time Context onto stack */ 1196 /* Put a Real Time Context onto stack */
@@ -838,7 +1202,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
838 /* Put the siginfo & fill in most of the ucontext */ 1202 /* Put the siginfo & fill in most of the ucontext */
839 if (copy_siginfo_to_user(&rt_sf->info, info) 1203 if (copy_siginfo_to_user(&rt_sf->info, info)
840 || __put_user(0, &rt_sf->uc.uc_flags) 1204 || __put_user(0, &rt_sf->uc.uc_flags)
841 || __put_user(0, &rt_sf->uc.uc_link)
842 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) 1205 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
843 || __put_user(sas_ss_flags(regs->gpr[1]), 1206 || __put_user(sas_ss_flags(regs->gpr[1]),
844 &rt_sf->uc.uc_stack.ss_flags) 1207 &rt_sf->uc.uc_stack.ss_flags)
@@ -852,14 +1215,37 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
852 frame = &rt_sf->uc.uc_mcontext; 1215 frame = &rt_sf->uc.uc_mcontext;
853 addr = frame; 1216 addr = frame;
854 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { 1217 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
855 if (save_user_regs(regs, frame, 0, 1)) 1218 sigret = 0;
856 goto badframe; 1219 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
857 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
858 } else { 1220 } else {
859 if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1)) 1221 sigret = __NR_rt_sigreturn;
1222 tramp = (unsigned long) frame->tramp;
1223 }
1224
1225#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1226 if (MSR_TM_ACTIVE(regs->msr)) {
1227 if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
1228 &rt_sf->uc_transact.uc_mcontext, sigret))
860 goto badframe; 1229 goto badframe;
861 regs->link = (unsigned long) frame->tramp;
862 } 1230 }
1231 else
1232#endif
1233 if (save_user_regs(regs, frame, sigret, 1))
1234 goto badframe;
1235 regs->link = tramp;
1236
1237#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1238 if (MSR_TM_ACTIVE(regs->msr)) {
1239 if (__put_user((unsigned long)&rt_sf->uc_transact,
1240 &rt_sf->uc.uc_link)
1241 || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
1242 &rt_sf->uc_transact.uc_regs))
1243 goto badframe;
1244 }
1245 else
1246#endif
1247 if (__put_user(0, &rt_sf->uc.uc_link))
1248 goto badframe;
863 1249
864 current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1250 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
865 1251
@@ -878,6 +1264,13 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
878 regs->nip = (unsigned long) ka->sa.sa_handler; 1264 regs->nip = (unsigned long) ka->sa.sa_handler;
879 /* enter the signal handler in big-endian mode */ 1265 /* enter the signal handler in big-endian mode */
880 regs->msr &= ~MSR_LE; 1266 regs->msr &= ~MSR_LE;
1267#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1268 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1269 * just indicates to userland that we were doing a transaction, but we
1270 * don't want to return in transactional state:
1271 */
1272 regs->msr &= ~MSR_TS_MASK;
1273#endif
881 return 1; 1274 return 1;
882 1275
883badframe: 1276badframe:
@@ -925,6 +1318,35 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
925 return 0; 1318 return 0;
926} 1319}
927 1320
1321#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1322static int do_setcontext_tm(struct ucontext __user *ucp,
1323 struct ucontext __user *tm_ucp,
1324 struct pt_regs *regs)
1325{
1326 sigset_t set;
1327 struct mcontext __user *mcp;
1328 struct mcontext __user *tm_mcp;
1329 u32 cmcp;
1330 u32 tm_cmcp;
1331
1332 if (get_sigset_t(&set, &ucp->uc_sigmask))
1333 return -EFAULT;
1334
1335 if (__get_user(cmcp, &ucp->uc_regs) ||
1336 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1337 return -EFAULT;
1338 mcp = (struct mcontext __user *)(u64)cmcp;
1339 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1340 /* no need to check access_ok(mcp), since mcp < 4GB */
1341
1342 set_current_blocked(&set);
1343 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1344 return -EFAULT;
1345
1346 return 0;
1347}
1348#endif
1349
928long sys_swapcontext(struct ucontext __user *old_ctx, 1350long sys_swapcontext(struct ucontext __user *old_ctx,
929 struct ucontext __user *new_ctx, 1351 struct ucontext __user *new_ctx,
930 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) 1352 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
@@ -1020,7 +1442,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1020 struct pt_regs *regs) 1442 struct pt_regs *regs)
1021{ 1443{
1022 struct rt_sigframe __user *rt_sf; 1444 struct rt_sigframe __user *rt_sf;
1023 1445#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1446 struct ucontext __user *uc_transact;
1447 unsigned long msr_hi;
1448 unsigned long tmp;
1449 int tm_restore = 0;
1450#endif
1024 /* Always make any pending restarted system calls return -EINTR */ 1451 /* Always make any pending restarted system calls return -EINTR */
1025 current_thread_info()->restart_block.fn = do_no_restart_syscall; 1452 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1026 1453
@@ -1028,6 +1455,34 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1028 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); 1455 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1029 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 1456 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1030 goto bad; 1457 goto bad;
1458#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459 if (__get_user(tmp, &rt_sf->uc.uc_link))
1460 goto bad;
1461 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1462 if (uc_transact) {
1463 u32 cmcp;
1464 struct mcontext __user *mcp;
1465
1466 if (__get_user(cmcp, &uc_transact->uc_regs))
1467 return -EFAULT;
1468 mcp = (struct mcontext __user *)(u64)cmcp;
1469 /* The top 32 bits of the MSR are stashed in the transactional
1470 * ucontext. */
1471 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1472 goto bad;
1473
1474 if (MSR_TM_SUSPENDED(msr_hi<<32)) {
1475 /* We only recheckpoint on return if we're
1476 * transaction.
1477 */
1478 tm_restore = 1;
1479 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1480 goto bad;
1481 }
1482 }
1483 if (!tm_restore)
1484 /* Fall through, for non-TM restore */
1485#endif
1031 if (do_setcontext(&rt_sf->uc, regs, 1)) 1486 if (do_setcontext(&rt_sf->uc, regs, 1))
1032 goto bad; 1487 goto bad;
1033 1488
@@ -1179,6 +1634,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1179 struct sigcontext __user *sc; 1634 struct sigcontext __user *sc;
1180 struct sigframe __user *frame; 1635 struct sigframe __user *frame;
1181 unsigned long newsp = 0; 1636 unsigned long newsp = 0;
1637 int sigret;
1638 unsigned long tramp;
1182 1639
1183 /* Set up Signal Frame */ 1640 /* Set up Signal Frame */
1184 frame = get_sigframe(ka, regs, sizeof(*frame), 1); 1641 frame = get_sigframe(ka, regs, sizeof(*frame), 1);
@@ -1201,14 +1658,25 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1201 goto badframe; 1658 goto badframe;
1202 1659
1203 if (vdso32_sigtramp && current->mm->context.vdso_base) { 1660 if (vdso32_sigtramp && current->mm->context.vdso_base) {
1204 if (save_user_regs(regs, &frame->mctx, 0, 1)) 1661 sigret = 0;
1205 goto badframe; 1662 tramp = current->mm->context.vdso_base + vdso32_sigtramp;
1206 regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1207 } else { 1663 } else {
1208 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1)) 1664 sigret = __NR_sigreturn;
1665 tramp = (unsigned long) frame->mctx.tramp;
1666 }
1667
1668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1669 if (MSR_TM_ACTIVE(regs->msr)) {
1670 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1671 sigret))
1209 goto badframe; 1672 goto badframe;
1210 regs->link = (unsigned long) frame->mctx.tramp;
1211 } 1673 }
1674 else
1675#endif
1676 if (save_user_regs(regs, &frame->mctx, sigret, 1))
1677 goto badframe;
1678
1679 regs->link = tramp;
1212 1680
1213 current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1681 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1214 1682
@@ -1223,7 +1691,13 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1223 regs->nip = (unsigned long) ka->sa.sa_handler; 1691 regs->nip = (unsigned long) ka->sa.sa_handler;
1224 /* enter the signal handler in big-endian mode */ 1692 /* enter the signal handler in big-endian mode */
1225 regs->msr &= ~MSR_LE; 1693 regs->msr &= ~MSR_LE;
1226 1694#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1695 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1696 * just indicates to userland that we were doing a transaction, but we
1697 * don't want to return in transactional state:
1698 */
1699 regs->msr &= ~MSR_TS_MASK;
1700#endif
1227 return 1; 1701 return 1;
1228 1702
1229badframe: 1703badframe:
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 1ca045d44324..7a76ee48a952 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -34,6 +34,7 @@
34#include <asm/syscalls.h> 34#include <asm/syscalls.h>
35#include <asm/vdso.h> 35#include <asm/vdso.h>
36#include <asm/switch_to.h> 36#include <asm/switch_to.h>
37#include <asm/tm.h>
37 38
38#include "signal.h" 39#include "signal.h"
39 40
@@ -56,6 +57,9 @@
56struct rt_sigframe { 57struct rt_sigframe {
57 /* sys_rt_sigreturn requires the ucontext be the first field */ 58 /* sys_rt_sigreturn requires the ucontext be the first field */
58 struct ucontext uc; 59 struct ucontext uc;
60#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
61 struct ucontext uc_transact;
62#endif
59 unsigned long _unused[2]; 63 unsigned long _unused[2];
60 unsigned int tramp[TRAMP_SIZE]; 64 unsigned int tramp[TRAMP_SIZE];
61 struct siginfo __user *pinfo; 65 struct siginfo __user *pinfo;
@@ -145,6 +149,145 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
145 return err; 149 return err;
146} 150}
147 151
152#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
153/*
154 * As above, but Transactional Memory is in use, so deliver sigcontexts
155 * containing checkpointed and transactional register states.
156 *
157 * To do this, we treclaim to gather both sets of registers and set up the
158 * 'normal' sigcontext registers with rolled-back register values such that a
159 * simple signal handler sees a correct checkpointed register state.
160 * If interested, a TM-aware sighandler can examine the transactional registers
161 * in the 2nd sigcontext to determine the real origin of the signal.
162 */
163static long setup_tm_sigcontexts(struct sigcontext __user *sc,
164 struct sigcontext __user *tm_sc,
165 struct pt_regs *regs,
166 int signr, sigset_t *set, unsigned long handler)
167{
168 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
169 * process never used altivec yet (MSR_VEC is zero in pt_regs of
170 * the context). This is very important because we must ensure we
171 * don't lose the VRSAVE content that may have been set prior to
172 * the process doing its first vector operation
173 * Userland shall check AT_HWCAP to know wether it can rely on the
174 * v_regs pointer or not.
175 */
176#ifdef CONFIG_ALTIVEC
177 elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)
178 (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
179 elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *)
180 (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful);
181#endif
182 unsigned long msr = regs->msr;
183 long err = 0;
184
185 BUG_ON(!MSR_TM_ACTIVE(regs->msr));
186
187 /* tm_reclaim rolls back all reg states, saving checkpointed (older)
188 * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
189 * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
190 * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
191 * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
192 * stack, in *regs.
193 */
194 tm_enable();
195 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
196
197 flush_fp_to_thread(current);
198
199#ifdef CONFIG_ALTIVEC
200 err |= __put_user(v_regs, &sc->v_regs);
201 err |= __put_user(tm_v_regs, &tm_sc->v_regs);
202
203 /* save altivec registers */
204 if (current->thread.used_vr) {
205 flush_altivec_to_thread(current);
206 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
207 err |= __copy_to_user(v_regs, current->thread.vr,
208 33 * sizeof(vector128));
209 /* If VEC was enabled there are transactional VRs valid too,
210 * else they're a copy of the checkpointed VRs.
211 */
212 if (msr & MSR_VEC)
213 err |= __copy_to_user(tm_v_regs,
214 current->thread.transact_vr,
215 33 * sizeof(vector128));
216 else
217 err |= __copy_to_user(tm_v_regs,
218 current->thread.vr,
219 33 * sizeof(vector128));
220
221 /* set MSR_VEC in the MSR value in the frame to indicate
222 * that sc->v_reg contains valid data.
223 */
224 msr |= MSR_VEC;
225 }
226 /* We always copy to/from vrsave, it's 0 if we don't have or don't
227 * use altivec.
228 */
229 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
230 if (msr & MSR_VEC)
231 err |= __put_user(current->thread.transact_vrsave,
232 (u32 __user *)&tm_v_regs[33]);
233 else
234 err |= __put_user(current->thread.vrsave,
235 (u32 __user *)&tm_v_regs[33]);
236
237#else /* CONFIG_ALTIVEC */
238 err |= __put_user(0, &sc->v_regs);
239 err |= __put_user(0, &tm_sc->v_regs);
240#endif /* CONFIG_ALTIVEC */
241
242 /* copy fpr regs and fpscr */
243 err |= copy_fpr_to_user(&sc->fp_regs, current);
244 if (msr & MSR_FP)
245 err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current);
246 else
247 err |= copy_fpr_to_user(&tm_sc->fp_regs, current);
248
249#ifdef CONFIG_VSX
250 /*
251 * Copy VSX low doubleword to local buffer for formatting,
252 * then out to userspace. Update v_regs to point after the
253 * VMX data.
254 */
255 if (current->thread.used_vsr) {
256 __giveup_vsx(current);
257 v_regs += ELF_NVRREG;
258 tm_v_regs += ELF_NVRREG;
259
260 err |= copy_vsx_to_user(v_regs, current);
261
262 if (msr & MSR_VSX)
263 err |= copy_transact_vsx_to_user(tm_v_regs, current);
264 else
265 err |= copy_vsx_to_user(tm_v_regs, current);
266
267 /* set MSR_VSX in the MSR value in the frame to
268 * indicate that sc->vs_reg) contains valid data.
269 */
270 msr |= MSR_VSX;
271 }
272#endif /* CONFIG_VSX */
273
274 err |= __put_user(&sc->gp_regs, &sc->regs);
275 err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
276 WARN_ON(!FULL_REGS(regs));
277 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
278 err |= __copy_to_user(&sc->gp_regs,
279 &current->thread.ckpt_regs, GP_REGS_SIZE);
280 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
281 err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
282 err |= __put_user(signr, &sc->signal);
283 err |= __put_user(handler, &sc->handler);
284 if (set != NULL)
285 err |= __put_user(set->sig[0], &sc->oldmask);
286
287 return err;
288}
289#endif
290
148/* 291/*
149 * Restore the sigcontext from the signal frame. 292 * Restore the sigcontext from the signal frame.
150 */ 293 */
@@ -241,6 +384,153 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
241 return err; 384 return err;
242} 385}
243 386
387#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
388/*
389 * Restore the two sigcontexts from the frame of a transactional processes.
390 */
391
392static long restore_tm_sigcontexts(struct pt_regs *regs,
393 struct sigcontext __user *sc,
394 struct sigcontext __user *tm_sc)
395{
396#ifdef CONFIG_ALTIVEC
397 elf_vrreg_t __user *v_regs, *tm_v_regs;
398#endif
399 unsigned long err = 0;
400 unsigned long msr;
401#ifdef CONFIG_VSX
402 int i;
403#endif
404 /* copy the GPRs */
405 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
406 err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs,
407 sizeof(regs->gpr));
408
409 /*
410 * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
411 * TEXASR was set by the signal delivery reclaim, as was TFIAR.
412 * Users doing anything abhorrent like thread-switching w/ signals for
413 * TM-Suspended code will have to back TEXASR/TFIAR up themselves.
414 * For the case of getting a signal and simply returning from it,
415 * we don't need to re-copy them here.
416 */
417 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
418 err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
419
420 /* get MSR separately, transfer the LE bit if doing signal return */
421 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
422 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
423
424 /* The following non-GPR non-FPR non-VR state is also checkpointed: */
425 err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
426 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
427 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
428 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
429 err |= __get_user(current->thread.ckpt_regs.ctr,
430 &sc->gp_regs[PT_CTR]);
431 err |= __get_user(current->thread.ckpt_regs.link,
432 &sc->gp_regs[PT_LNK]);
433 err |= __get_user(current->thread.ckpt_regs.xer,
434 &sc->gp_regs[PT_XER]);
435 err |= __get_user(current->thread.ckpt_regs.ccr,
436 &sc->gp_regs[PT_CCR]);
437
438 /* These regs are not checkpointed; they can go in 'regs'. */
439 err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
440 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
441 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
442 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
443
444 /*
445 * Do this before updating the thread state in
446 * current->thread.fpr/vr. That way, if we get preempted
447 * and another task grabs the FPU/Altivec, it won't be
448 * tempted to save the current CPU state into the thread_struct
449 * and corrupt what we are writing there.
450 */
451 discard_lazy_cpu_state();
452
453 /*
454 * Force reload of FP/VEC.
455 * This has to be done before copying stuff into current->thread.fpr/vr
456 * for the reasons explained in the previous comment.
457 */
458 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
459
460#ifdef CONFIG_ALTIVEC
461 err |= __get_user(v_regs, &sc->v_regs);
462 err |= __get_user(tm_v_regs, &tm_sc->v_regs);
463 if (err)
464 return err;
465 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
466 return -EFAULT;
467 if (tm_v_regs && !access_ok(VERIFY_READ,
468 tm_v_regs, 34 * sizeof(vector128)))
469 return -EFAULT;
470 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
471 if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) {
472 err |= __copy_from_user(current->thread.vr, v_regs,
473 33 * sizeof(vector128));
474 err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
475 33 * sizeof(vector128));
476 }
477 else if (current->thread.used_vr) {
478 memset(current->thread.vr, 0, 33 * sizeof(vector128));
479 memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
480 }
481 /* Always get VRSAVE back */
482 if (v_regs != 0 && tm_v_regs != 0) {
483 err |= __get_user(current->thread.vrsave,
484 (u32 __user *)&v_regs[33]);
485 err |= __get_user(current->thread.transact_vrsave,
486 (u32 __user *)&tm_v_regs[33]);
487 }
488 else {
489 current->thread.vrsave = 0;
490 current->thread.transact_vrsave = 0;
491 }
492#endif /* CONFIG_ALTIVEC */
493 /* restore floating point */
494 err |= copy_fpr_from_user(current, &sc->fp_regs);
495 err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs);
496#ifdef CONFIG_VSX
497 /*
498 * Get additional VSX data. Update v_regs to point after the
499 * VMX data. Copy VSX low doubleword from userspace to local
500 * buffer for formatting, then into the taskstruct.
501 */
502 if (v_regs && ((msr & MSR_VSX) != 0)) {
503 v_regs += ELF_NVRREG;
504 tm_v_regs += ELF_NVRREG;
505 err |= copy_vsx_from_user(current, v_regs);
506 err |= copy_transact_vsx_from_user(current, tm_v_regs);
507 } else {
508 for (i = 0; i < 32 ; i++) {
509 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
510 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
511 }
512 }
513#endif
514 tm_enable();
515 /* This loads the checkpointed FP/VEC state, if used */
516 tm_recheckpoint(&current->thread, msr);
517 /* The task has moved into TM state S, so ensure MSR reflects this: */
518 regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
519
520 /* This loads the speculative FP/VEC state, if used */
521 if (msr & MSR_FP) {
522 do_load_up_transact_fpu(&current->thread);
523 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
524 }
525 if (msr & MSR_VEC) {
526 do_load_up_transact_altivec(&current->thread);
527 regs->msr |= MSR_VEC;
528 }
529
530 return err;
531}
532#endif
533
244/* 534/*
245 * Setup the trampoline code on the stack 535 * Setup the trampoline code on the stack
246 */ 536 */
@@ -355,6 +645,9 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
355{ 645{
356 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; 646 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
357 sigset_t set; 647 sigset_t set;
648#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
649 unsigned long msr;
650#endif
358 651
359 /* Always make any pending restarted system calls return -EINTR */ 652 /* Always make any pending restarted system calls return -EINTR */
360 current_thread_info()->restart_block.fn = do_no_restart_syscall; 653 current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -365,6 +658,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
365 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) 658 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
366 goto badframe; 659 goto badframe;
367 set_current_blocked(&set); 660 set_current_blocked(&set);
661#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
662 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
663 goto badframe;
664 if (MSR_TM_SUSPENDED(msr)) {
665 /* We recheckpoint on return. */
666 struct ucontext __user *uc_transact;
667 if (__get_user(uc_transact, &uc->uc_link))
668 goto badframe;
669 if (restore_tm_sigcontexts(regs, &uc->uc_mcontext,
670 &uc_transact->uc_mcontext))
671 goto badframe;
672 }
673 else
674 /* Fall through, for non-TM restore */
675#endif
368 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) 676 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
369 goto badframe; 677 goto badframe;
370 678
@@ -415,19 +723,42 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
415 723
416 /* Create the ucontext. */ 724 /* Create the ucontext. */
417 err |= __put_user(0, &frame->uc.uc_flags); 725 err |= __put_user(0, &frame->uc.uc_flags);
418 err |= __put_user(0, &frame->uc.uc_link);
419 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 726 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
420 err |= __put_user(sas_ss_flags(regs->gpr[1]), 727 err |= __put_user(sas_ss_flags(regs->gpr[1]),
421 &frame->uc.uc_stack.ss_flags); 728 &frame->uc.uc_stack.ss_flags);
422 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 729 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
423 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, 730#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
424 (unsigned long)ka->sa.sa_handler, 1); 731 if (MSR_TM_ACTIVE(regs->msr)) {
732 /* The ucontext_t passed to userland points to the second
733 * ucontext_t (for transactional state) with its uc_link ptr.
734 */
735 err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
736 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
737 &frame->uc_transact.uc_mcontext,
738 regs, signr,
739 NULL,
740 (unsigned long)ka->sa.sa_handler);
741 } else
742#endif
743 {
744 err |= __put_user(0, &frame->uc.uc_link);
745 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr,
746 NULL, (unsigned long)ka->sa.sa_handler,
747 1);
748 }
425 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 749 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
426 if (err) 750 if (err)
427 goto badframe; 751 goto badframe;
428 752
429 /* Make sure signal handler doesn't get spurious FP exceptions */ 753 /* Make sure signal handler doesn't get spurious FP exceptions */
430 current->thread.fpscr.val = 0; 754 current->thread.fpscr.val = 0;
755#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
756 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
757 * just indicates to userland that we were doing a transaction, but we
758 * don't want to return in transactional state:
759 */
760 regs->msr &= ~MSR_TS_MASK;
761#endif
431 762
432 /* Set up to return from userspace. */ 763 /* Set up to return from userspace. */
433 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { 764 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {