aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/signal.h2
-rw-r--r--arch/powerpc/kernel/signal.c41
-rw-r--r--arch/powerpc/kernel/signal.h10
-rw-r--r--arch/powerpc/kernel/signal_32.c31
-rw-r--r--arch/powerpc/kernel/signal_64.c201
5 files changed, 159 insertions, 126 deletions
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index 9322c28aebd2..5ff77722a52d 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -5,6 +5,4 @@
5#include <uapi/asm/signal.h> 5#include <uapi/asm/signal.h>
6#include <uapi/asm/ptrace.h> 6#include <uapi/asm/ptrace.h>
7 7
8extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
9
10#endif /* _ASM_POWERPC_SIGNAL_H */ 8#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cb64d6feb45a..bbe77aed198d 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -99,22 +99,24 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
99 } 99 }
100} 100}
101 101
102static void do_signal(struct pt_regs *regs) 102static void do_signal(struct task_struct *tsk)
103{ 103{
104 sigset_t *oldset = sigmask_to_save(); 104 sigset_t *oldset = sigmask_to_save();
105 struct ksignal ksig; 105 struct ksignal ksig;
106 int ret; 106 int ret;
107 int is32 = is_32bit_task(); 107 int is32 = is_32bit_task();
108 108
109 BUG_ON(tsk != current);
110
109 get_signal(&ksig); 111 get_signal(&ksig);
110 112
111 /* Is there any syscall restart business here ? */ 113 /* Is there any syscall restart business here ? */
112 check_syscall_restart(regs, &ksig.ka, ksig.sig > 0); 114 check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
113 115
114 if (ksig.sig <= 0) { 116 if (ksig.sig <= 0) {
115 /* No signal to deliver -- put the saved sigmask back */ 117 /* No signal to deliver -- put the saved sigmask back */
116 restore_saved_sigmask(); 118 restore_saved_sigmask();
117 regs->trap = 0; 119 tsk->thread.regs->trap = 0;
118 return; /* no signals delivered */ 120 return; /* no signals delivered */
119 } 121 }
120 122
@@ -124,23 +126,22 @@ static void do_signal(struct pt_regs *regs)
124 * user space. The DABR will have been cleared if it 126 * user space. The DABR will have been cleared if it
125 * triggered inside the kernel. 127 * triggered inside the kernel.
126 */ 128 */
127 if (current->thread.hw_brk.address && 129 if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type)
128 current->thread.hw_brk.type) 130 __set_breakpoint(&tsk->thread.hw_brk);
129 __set_breakpoint(&current->thread.hw_brk);
130#endif 131#endif
131 /* Re-enable the breakpoints for the signal stack */ 132 /* Re-enable the breakpoints for the signal stack */
132 thread_change_pc(current, regs); 133 thread_change_pc(tsk, tsk->thread.regs);
133 134
134 if (is32) { 135 if (is32) {
135 if (ksig.ka.sa.sa_flags & SA_SIGINFO) 136 if (ksig.ka.sa.sa_flags & SA_SIGINFO)
136 ret = handle_rt_signal32(&ksig, oldset, regs); 137 ret = handle_rt_signal32(&ksig, oldset, tsk);
137 else 138 else
138 ret = handle_signal32(&ksig, oldset, regs); 139 ret = handle_signal32(&ksig, oldset, tsk);
139 } else { 140 } else {
140 ret = handle_rt_signal64(&ksig, oldset, regs); 141 ret = handle_rt_signal64(&ksig, oldset, tsk);
141 } 142 }
142 143
143 regs->trap = 0; 144 tsk->thread.regs->trap = 0;
144 signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP)); 145 signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
145} 146}
146 147
@@ -151,8 +152,10 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
151 if (thread_info_flags & _TIF_UPROBE) 152 if (thread_info_flags & _TIF_UPROBE)
152 uprobe_notify_resume(regs); 153 uprobe_notify_resume(regs);
153 154
154 if (thread_info_flags & _TIF_SIGPENDING) 155 if (thread_info_flags & _TIF_SIGPENDING) {
155 do_signal(regs); 156 BUG_ON(regs != current->thread.regs);
157 do_signal(current);
158 }
156 159
157 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 160 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
158 clear_thread_flag(TIF_NOTIFY_RESUME); 161 clear_thread_flag(TIF_NOTIFY_RESUME);
@@ -162,7 +165,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
162 user_enter(); 165 user_enter();
163} 166}
164 167
165unsigned long get_tm_stackpointer(struct pt_regs *regs) 168unsigned long get_tm_stackpointer(struct task_struct *tsk)
166{ 169{
167 /* When in an active transaction that takes a signal, we need to be 170 /* When in an active transaction that takes a signal, we need to be
168 * careful with the stack. It's possible that the stack has moved back 171 * careful with the stack. It's possible that the stack has moved back
@@ -187,11 +190,13 @@ unsigned long get_tm_stackpointer(struct pt_regs *regs)
187 */ 190 */
188 191
189#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 192#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
190 if (MSR_TM_ACTIVE(regs->msr)) { 193 BUG_ON(tsk != current);
194
195 if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
191 tm_reclaim_current(TM_CAUSE_SIGNAL); 196 tm_reclaim_current(TM_CAUSE_SIGNAL);
192 if (MSR_TM_TRANSACTIONAL(regs->msr)) 197 if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
193 return current->thread.ckpt_regs.gpr[1]; 198 return tsk->thread.ckpt_regs.gpr[1];
194 } 199 }
195#endif 200#endif
196 return regs->gpr[1]; 201 return tsk->thread.regs->gpr[1];
197} 202}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index be305c858e51..254ca074504f 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -16,10 +16,10 @@ extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
16 size_t frame_size, int is_32); 16 size_t frame_size, int is_32);
17 17
18extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset, 18extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
19 struct pt_regs *regs); 19 struct task_struct *tsk);
20 20
21extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, 21extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
22 struct pt_regs *regs); 22 struct task_struct *tsk);
23 23
24extern unsigned long copy_fpr_to_user(void __user *to, 24extern unsigned long copy_fpr_to_user(void __user *to,
25 struct task_struct *task); 25 struct task_struct *task);
@@ -29,6 +29,8 @@ extern unsigned long copy_fpr_from_user(struct task_struct *task,
29 void __user *from); 29 void __user *from);
30extern unsigned long copy_transact_fpr_from_user(struct task_struct *task, 30extern unsigned long copy_transact_fpr_from_user(struct task_struct *task,
31 void __user *from); 31 void __user *from);
32extern unsigned long get_tm_stackpointer(struct task_struct *tsk);
33
32#ifdef CONFIG_VSX 34#ifdef CONFIG_VSX
33extern unsigned long copy_vsx_to_user(void __user *to, 35extern unsigned long copy_vsx_to_user(void __user *to,
34 struct task_struct *task); 36 struct task_struct *task);
@@ -43,12 +45,12 @@ extern unsigned long copy_transact_vsx_from_user(struct task_struct *task,
43#ifdef CONFIG_PPC64 45#ifdef CONFIG_PPC64
44 46
45extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, 47extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
46 struct pt_regs *regs); 48 struct task_struct *tsk);
47 49
48#else /* CONFIG_PPC64 */ 50#else /* CONFIG_PPC64 */
49 51
50static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, 52static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
51 struct pt_regs *regs) 53 struct task_struct *tsk)
52{ 54{
53 return -EFAULT; 55 return -EFAULT;
54} 56}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d2745375f27e..9637f8eb5204 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -978,7 +978,7 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
978 * (one which gets siginfo). 978 * (one which gets siginfo).
979 */ 979 */
980int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, 980int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
981 struct pt_regs *regs) 981 struct task_struct *tsk)
982{ 982{
983 struct rt_sigframe __user *rt_sf; 983 struct rt_sigframe __user *rt_sf;
984 struct mcontext __user *frame; 984 struct mcontext __user *frame;
@@ -987,10 +987,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
987 unsigned long newsp = 0; 987 unsigned long newsp = 0;
988 int sigret; 988 int sigret;
989 unsigned long tramp; 989 unsigned long tramp;
990 struct pt_regs *regs = tsk->thread.regs;
991
992 BUG_ON(tsk != current);
990 993
991 /* Set up Signal Frame */ 994 /* Set up Signal Frame */
992 /* Put a Real Time Context onto stack */ 995 /* Put a Real Time Context onto stack */
993 rt_sf = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); 996 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
994 addr = rt_sf; 997 addr = rt_sf;
995 if (unlikely(rt_sf == NULL)) 998 if (unlikely(rt_sf == NULL))
996 goto badframe; 999 goto badframe;
@@ -1007,9 +1010,9 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
1007 /* Save user registers on the stack */ 1010 /* Save user registers on the stack */
1008 frame = &rt_sf->uc.uc_mcontext; 1011 frame = &rt_sf->uc.uc_mcontext;
1009 addr = frame; 1012 addr = frame;
1010 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { 1013 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
1011 sigret = 0; 1014 sigret = 0;
1012 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; 1015 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
1013 } else { 1016 } else {
1014 sigret = __NR_rt_sigreturn; 1017 sigret = __NR_rt_sigreturn;
1015 tramp = (unsigned long) frame->tramp; 1018 tramp = (unsigned long) frame->tramp;
@@ -1036,7 +1039,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
1036 } 1039 }
1037 regs->link = tramp; 1040 regs->link = tramp;
1038 1041
1039 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ 1042 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1040 1043
1041 /* create a stack frame for the caller of the handler */ 1044 /* create a stack frame for the caller of the handler */
1042 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); 1045 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1061,7 +1064,7 @@ badframe:
1061 printk_ratelimited(KERN_INFO 1064 printk_ratelimited(KERN_INFO
1062 "%s[%d]: bad frame in handle_rt_signal32: " 1065 "%s[%d]: bad frame in handle_rt_signal32: "
1063 "%p nip %08lx lr %08lx\n", 1066 "%p nip %08lx lr %08lx\n",
1064 current->comm, current->pid, 1067 tsk->comm, tsk->pid,
1065 addr, regs->nip, regs->link); 1068 addr, regs->nip, regs->link);
1066 1069
1067 return 1; 1070 return 1;
@@ -1417,7 +1420,8 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
1417/* 1420/*
1418 * OK, we're invoking a handler 1421 * OK, we're invoking a handler
1419 */ 1422 */
1420int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs) 1423int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1424 struct task_struct *tsk)
1421{ 1425{
1422 struct sigcontext __user *sc; 1426 struct sigcontext __user *sc;
1423 struct sigframe __user *frame; 1427 struct sigframe __user *frame;
@@ -1425,9 +1429,12 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs
1425 unsigned long newsp = 0; 1429 unsigned long newsp = 0;
1426 int sigret; 1430 int sigret;
1427 unsigned long tramp; 1431 unsigned long tramp;
1432 struct pt_regs *regs = tsk->thread.regs;
1433
1434 BUG_ON(tsk != current);
1428 1435
1429 /* Set up Signal Frame */ 1436 /* Set up Signal Frame */
1430 frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 1); 1437 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1431 if (unlikely(frame == NULL)) 1438 if (unlikely(frame == NULL))
1432 goto badframe; 1439 goto badframe;
1433 sc = (struct sigcontext __user *) &frame->sctx; 1440 sc = (struct sigcontext __user *) &frame->sctx;
@@ -1446,9 +1453,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs
1446 || __put_user(ksig->sig, &sc->signal)) 1453 || __put_user(ksig->sig, &sc->signal))
1447 goto badframe; 1454 goto badframe;
1448 1455
1449 if (vdso32_sigtramp && current->mm->context.vdso_base) { 1456 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1450 sigret = 0; 1457 sigret = 0;
1451 tramp = current->mm->context.vdso_base + vdso32_sigtramp; 1458 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1452 } else { 1459 } else {
1453 sigret = __NR_sigreturn; 1460 sigret = __NR_sigreturn;
1454 tramp = (unsigned long) frame->mctx.tramp; 1461 tramp = (unsigned long) frame->mctx.tramp;
@@ -1470,7 +1477,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs
1470 1477
1471 regs->link = tramp; 1478 regs->link = tramp;
1472 1479
1473 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ 1480 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1474 1481
1475 /* create a stack frame for the caller of the handler */ 1482 /* create a stack frame for the caller of the handler */
1476 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; 1483 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
@@ -1490,7 +1497,7 @@ badframe:
1490 printk_ratelimited(KERN_INFO 1497 printk_ratelimited(KERN_INFO
1491 "%s[%d]: bad frame in handle_signal32: " 1498 "%s[%d]: bad frame in handle_signal32: "
1492 "%p nip %08lx lr %08lx\n", 1499 "%p nip %08lx lr %08lx\n",
1493 current->comm, current->pid, 1500 tsk->comm, tsk->pid,
1494 frame, regs->nip, regs->link); 1501 frame, regs->nip, regs->link);
1495 1502
1496 return 1; 1503 return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 6faa8240b7c9..befa10af0c17 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -91,9 +91,9 @@ static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
91 * Set up the sigcontext for the signal frame. 91 * Set up the sigcontext for the signal frame.
92 */ 92 */
93 93
94static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, 94static long setup_sigcontext(struct sigcontext __user *sc,
95 int signr, sigset_t *set, unsigned long handler, 95 struct task_struct *tsk, int signr, sigset_t *set,
96 int ctx_has_vsx_region) 96 unsigned long handler, int ctx_has_vsx_region)
97{ 97{
98 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 98 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
99 * process never used altivec yet (MSR_VEC is zero in pt_regs of 99 * process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -107,17 +107,20 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
107 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); 107 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
108 unsigned long vrsave; 108 unsigned long vrsave;
109#endif 109#endif
110 struct pt_regs *regs = tsk->thread.regs;
110 unsigned long msr = regs->msr; 111 unsigned long msr = regs->msr;
111 long err = 0; 112 long err = 0;
112 113
114 BUG_ON(tsk != current);
115
113#ifdef CONFIG_ALTIVEC 116#ifdef CONFIG_ALTIVEC
114 err |= __put_user(v_regs, &sc->v_regs); 117 err |= __put_user(v_regs, &sc->v_regs);
115 118
116 /* save altivec registers */ 119 /* save altivec registers */
117 if (current->thread.used_vr) { 120 if (tsk->thread.used_vr) {
118 flush_altivec_to_thread(current); 121 flush_altivec_to_thread(tsk);
119 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 122 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
120 err |= __copy_to_user(v_regs, &current->thread.vr_state, 123 err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
121 33 * sizeof(vector128)); 124 33 * sizeof(vector128));
122 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) 125 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
123 * contains valid data. 126 * contains valid data.
@@ -130,16 +133,16 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
130 vrsave = 0; 133 vrsave = 0;
131 if (cpu_has_feature(CPU_FTR_ALTIVEC)) { 134 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
132 vrsave = mfspr(SPRN_VRSAVE); 135 vrsave = mfspr(SPRN_VRSAVE);
133 current->thread.vrsave = vrsave; 136 tsk->thread.vrsave = vrsave;
134 } 137 }
135 138
136 err |= __put_user(vrsave, (u32 __user *)&v_regs[33]); 139 err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
137#else /* CONFIG_ALTIVEC */ 140#else /* CONFIG_ALTIVEC */
138 err |= __put_user(0, &sc->v_regs); 141 err |= __put_user(0, &sc->v_regs);
139#endif /* CONFIG_ALTIVEC */ 142#endif /* CONFIG_ALTIVEC */
140 flush_fp_to_thread(current); 143 flush_fp_to_thread(tsk);
141 /* copy fpr regs and fpscr */ 144 /* copy fpr regs and fpscr */
142 err |= copy_fpr_to_user(&sc->fp_regs, current); 145 err |= copy_fpr_to_user(&sc->fp_regs, tsk);
143 146
144 /* 147 /*
145 * Clear the MSR VSX bit to indicate there is no valid state attached 148 * Clear the MSR VSX bit to indicate there is no valid state attached
@@ -152,10 +155,10 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
152 * then out to userspace. Update v_regs to point after the 155 * then out to userspace. Update v_regs to point after the
153 * VMX data. 156 * VMX data.
154 */ 157 */
155 if (current->thread.used_vsr && ctx_has_vsx_region) { 158 if (tsk->thread.used_vsr && ctx_has_vsx_region) {
156 flush_vsx_to_thread(current); 159 flush_vsx_to_thread(tsk);
157 v_regs += ELF_NVRREG; 160 v_regs += ELF_NVRREG;
158 err |= copy_vsx_to_user(v_regs, current); 161 err |= copy_vsx_to_user(v_regs, tsk);
159 /* set MSR_VSX in the MSR value in the frame to 162 /* set MSR_VSX in the MSR value in the frame to
160 * indicate that sc->vs_reg) contains valid data. 163 * indicate that sc->vs_reg) contains valid data.
161 */ 164 */
@@ -188,7 +191,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
188 */ 191 */
189static long setup_tm_sigcontexts(struct sigcontext __user *sc, 192static long setup_tm_sigcontexts(struct sigcontext __user *sc,
190 struct sigcontext __user *tm_sc, 193 struct sigcontext __user *tm_sc,
191 struct pt_regs *regs, 194 struct task_struct *tsk,
192 int signr, sigset_t *set, unsigned long handler) 195 int signr, sigset_t *set, unsigned long handler)
193{ 196{
194 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 197 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
@@ -203,9 +206,12 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
203 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); 206 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
204 elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); 207 elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
205#endif 208#endif
206 unsigned long msr = regs->msr; 209 struct pt_regs *regs = tsk->thread.regs;
210 unsigned long msr = tsk->thread.ckpt_regs.msr;
207 long err = 0; 211 long err = 0;
208 212
213 BUG_ON(tsk != current);
214
209 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 215 BUG_ON(!MSR_TM_ACTIVE(regs->msr));
210 216
211 /* Remove TM bits from thread's MSR. The MSR in the sigcontext 217 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
@@ -215,28 +221,28 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
215 */ 221 */
216 regs->msr &= ~MSR_TS_MASK; 222 regs->msr &= ~MSR_TS_MASK;
217 223
218 flush_fp_to_thread(current); 224 flush_fp_to_thread(tsk);
219 225
220#ifdef CONFIG_ALTIVEC 226#ifdef CONFIG_ALTIVEC
221 err |= __put_user(v_regs, &sc->v_regs); 227 err |= __put_user(v_regs, &sc->v_regs);
222 err |= __put_user(tm_v_regs, &tm_sc->v_regs); 228 err |= __put_user(tm_v_regs, &tm_sc->v_regs);
223 229
224 /* save altivec registers */ 230 /* save altivec registers */
225 if (current->thread.used_vr) { 231 if (tsk->thread.used_vr) {
226 flush_altivec_to_thread(current); 232 flush_altivec_to_thread(tsk);
227 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 233 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
228 err |= __copy_to_user(v_regs, &current->thread.vr_state, 234 err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
229 33 * sizeof(vector128)); 235 33 * sizeof(vector128));
230 /* If VEC was enabled there are transactional VRs valid too, 236 /* If VEC was enabled there are transactional VRs valid too,
231 * else they're a copy of the checkpointed VRs. 237 * else they're a copy of the checkpointed VRs.
232 */ 238 */
233 if (msr & MSR_VEC) 239 if (msr & MSR_VEC)
234 err |= __copy_to_user(tm_v_regs, 240 err |= __copy_to_user(tm_v_regs,
235 &current->thread.transact_vr, 241 &tsk->thread.transact_vr,
236 33 * sizeof(vector128)); 242 33 * sizeof(vector128));
237 else 243 else
238 err |= __copy_to_user(tm_v_regs, 244 err |= __copy_to_user(tm_v_regs,
239 &current->thread.vr_state, 245 &tsk->thread.vr_state,
240 33 * sizeof(vector128)); 246 33 * sizeof(vector128));
241 247
242 /* set MSR_VEC in the MSR value in the frame to indicate 248 /* set MSR_VEC in the MSR value in the frame to indicate
@@ -248,13 +254,13 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
248 * use altivec. 254 * use altivec.
249 */ 255 */
250 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 256 if (cpu_has_feature(CPU_FTR_ALTIVEC))
251 current->thread.vrsave = mfspr(SPRN_VRSAVE); 257 tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
252 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 258 err |= __put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
253 if (msr & MSR_VEC) 259 if (msr & MSR_VEC)
254 err |= __put_user(current->thread.transact_vrsave, 260 err |= __put_user(tsk->thread.transact_vrsave,
255 (u32 __user *)&tm_v_regs[33]); 261 (u32 __user *)&tm_v_regs[33]);
256 else 262 else
257 err |= __put_user(current->thread.vrsave, 263 err |= __put_user(tsk->thread.vrsave,
258 (u32 __user *)&tm_v_regs[33]); 264 (u32 __user *)&tm_v_regs[33]);
259 265
260#else /* CONFIG_ALTIVEC */ 266#else /* CONFIG_ALTIVEC */
@@ -263,11 +269,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
263#endif /* CONFIG_ALTIVEC */ 269#endif /* CONFIG_ALTIVEC */
264 270
265 /* copy fpr regs and fpscr */ 271 /* copy fpr regs and fpscr */
266 err |= copy_fpr_to_user(&sc->fp_regs, current); 272 err |= copy_fpr_to_user(&sc->fp_regs, tsk);
267 if (msr & MSR_FP) 273 if (msr & MSR_FP)
268 err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); 274 err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, tsk);
269 else 275 else
270 err |= copy_fpr_to_user(&tm_sc->fp_regs, current); 276 err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
271 277
272#ifdef CONFIG_VSX 278#ifdef CONFIG_VSX
273 /* 279 /*
@@ -275,17 +281,17 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
275 * then out to userspace. Update v_regs to point after the 281 * then out to userspace. Update v_regs to point after the
276 * VMX data. 282 * VMX data.
277 */ 283 */
278 if (current->thread.used_vsr) { 284 if (tsk->thread.used_vsr) {
279 flush_vsx_to_thread(current); 285 flush_vsx_to_thread(tsk);
280 v_regs += ELF_NVRREG; 286 v_regs += ELF_NVRREG;
281 tm_v_regs += ELF_NVRREG; 287 tm_v_regs += ELF_NVRREG;
282 288
283 err |= copy_vsx_to_user(v_regs, current); 289 err |= copy_vsx_to_user(v_regs, tsk);
284 290
285 if (msr & MSR_VSX) 291 if (msr & MSR_VSX)
286 err |= copy_transact_vsx_to_user(tm_v_regs, current); 292 err |= copy_transact_vsx_to_user(tm_v_regs, tsk);
287 else 293 else
288 err |= copy_vsx_to_user(tm_v_regs, current); 294 err |= copy_vsx_to_user(tm_v_regs, tsk);
289 295
290 /* set MSR_VSX in the MSR value in the frame to 296 /* set MSR_VSX in the MSR value in the frame to
291 * indicate that sc->vs_reg) contains valid data. 297 * indicate that sc->vs_reg) contains valid data.
@@ -299,7 +305,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
299 WARN_ON(!FULL_REGS(regs)); 305 WARN_ON(!FULL_REGS(regs));
300 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); 306 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
301 err |= __copy_to_user(&sc->gp_regs, 307 err |= __copy_to_user(&sc->gp_regs,
302 &current->thread.ckpt_regs, GP_REGS_SIZE); 308 &tsk->thread.ckpt_regs, GP_REGS_SIZE);
303 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); 309 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
304 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); 310 err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
305 err |= __put_user(signr, &sc->signal); 311 err |= __put_user(signr, &sc->signal);
@@ -315,7 +321,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
315 * Restore the sigcontext from the signal frame. 321 * Restore the sigcontext from the signal frame.
316 */ 322 */
317 323
318static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, 324static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
319 struct sigcontext __user *sc) 325 struct sigcontext __user *sc)
320{ 326{
321#ifdef CONFIG_ALTIVEC 327#ifdef CONFIG_ALTIVEC
@@ -324,10 +330,13 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
324 unsigned long err = 0; 330 unsigned long err = 0;
325 unsigned long save_r13 = 0; 331 unsigned long save_r13 = 0;
326 unsigned long msr; 332 unsigned long msr;
333 struct pt_regs *regs = tsk->thread.regs;
327#ifdef CONFIG_VSX 334#ifdef CONFIG_VSX
328 int i; 335 int i;
329#endif 336#endif
330 337
338 BUG_ON(tsk != current);
339
331 /* If this is not a signal return, we preserve the TLS in r13 */ 340 /* If this is not a signal return, we preserve the TLS in r13 */
332 if (!sig) 341 if (!sig)
333 save_r13 = regs->gpr[13]; 342 save_r13 = regs->gpr[13];
@@ -357,7 +366,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
357 366
358 /* 367 /*
359 * Force reload of FP/VEC. 368 * Force reload of FP/VEC.
360 * This has to be done before copying stuff into current->thread.fpr/vr 369 * This has to be done before copying stuff into tsk->thread.fpr/vr
361 * for the reasons explained in the previous comment. 370 * for the reasons explained in the previous comment.
362 */ 371 */
363 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 372 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
@@ -370,22 +379,22 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
370 return -EFAULT; 379 return -EFAULT;
371 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 380 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
372 if (v_regs != NULL && (msr & MSR_VEC) != 0) { 381 if (v_regs != NULL && (msr & MSR_VEC) != 0) {
373 err |= __copy_from_user(&current->thread.vr_state, v_regs, 382 err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
374 33 * sizeof(vector128)); 383 33 * sizeof(vector128));
375 current->thread.used_vr = true; 384 tsk->thread.used_vr = true;
385 } else if (tsk->thread.used_vr) {
386 memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
376 } 387 }
377 else if (current->thread.used_vr)
378 memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
379 /* Always get VRSAVE back */ 388 /* Always get VRSAVE back */
380 if (v_regs != NULL) 389 if (v_regs != NULL)
381 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 390 err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
382 else 391 else
383 current->thread.vrsave = 0; 392 tsk->thread.vrsave = 0;
384 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 393 if (cpu_has_feature(CPU_FTR_ALTIVEC))
385 mtspr(SPRN_VRSAVE, current->thread.vrsave); 394 mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
386#endif /* CONFIG_ALTIVEC */ 395#endif /* CONFIG_ALTIVEC */
387 /* restore floating point */ 396 /* restore floating point */
388 err |= copy_fpr_from_user(current, &sc->fp_regs); 397 err |= copy_fpr_from_user(tsk, &sc->fp_regs);
389#ifdef CONFIG_VSX 398#ifdef CONFIG_VSX
390 /* 399 /*
391 * Get additional VSX data. Update v_regs to point after the 400 * Get additional VSX data. Update v_regs to point after the
@@ -394,11 +403,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
394 */ 403 */
395 v_regs += ELF_NVRREG; 404 v_regs += ELF_NVRREG;
396 if ((msr & MSR_VSX) != 0) { 405 if ((msr & MSR_VSX) != 0) {
397 err |= copy_vsx_from_user(current, v_regs); 406 err |= copy_vsx_from_user(tsk, v_regs);
398 current->thread.used_vsr = true; 407 tsk->thread.used_vsr = true;
399 } else 408 } else {
400 for (i = 0; i < 32 ; i++) 409 for (i = 0; i < 32 ; i++)
401 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 410 tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
411 }
402#endif 412#endif
403 return err; 413 return err;
404} 414}
@@ -408,7 +418,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
408 * Restore the two sigcontexts from the frame of a transactional processes. 418 * Restore the two sigcontexts from the frame of a transactional processes.
409 */ 419 */
410 420
411static long restore_tm_sigcontexts(struct pt_regs *regs, 421static long restore_tm_sigcontexts(struct task_struct *tsk,
412 struct sigcontext __user *sc, 422 struct sigcontext __user *sc,
413 struct sigcontext __user *tm_sc) 423 struct sigcontext __user *tm_sc)
414{ 424{
@@ -417,12 +427,16 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
417#endif 427#endif
418 unsigned long err = 0; 428 unsigned long err = 0;
419 unsigned long msr; 429 unsigned long msr;
430 struct pt_regs *regs = tsk->thread.regs;
420#ifdef CONFIG_VSX 431#ifdef CONFIG_VSX
421 int i; 432 int i;
422#endif 433#endif
434
435 BUG_ON(tsk != current);
436
423 /* copy the GPRs */ 437 /* copy the GPRs */
424 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); 438 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
425 err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs, 439 err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
426 sizeof(regs->gpr)); 440 sizeof(regs->gpr));
427 441
428 /* 442 /*
@@ -434,7 +448,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
434 * we don't need to re-copy them here. 448 * we don't need to re-copy them here.
435 */ 449 */
436 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); 450 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
437 err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); 451 err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
438 452
439 /* get MSR separately, transfer the LE bit if doing signal return */ 453 /* get MSR separately, transfer the LE bit if doing signal return */
440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 454 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
@@ -453,13 +467,13 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
453 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); 467 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
454 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); 468 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
455 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); 469 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
456 err |= __get_user(current->thread.ckpt_regs.ctr, 470 err |= __get_user(tsk->thread.ckpt_regs.ctr,
457 &sc->gp_regs[PT_CTR]); 471 &sc->gp_regs[PT_CTR]);
458 err |= __get_user(current->thread.ckpt_regs.link, 472 err |= __get_user(tsk->thread.ckpt_regs.link,
459 &sc->gp_regs[PT_LNK]); 473 &sc->gp_regs[PT_LNK]);
460 err |= __get_user(current->thread.ckpt_regs.xer, 474 err |= __get_user(tsk->thread.ckpt_regs.xer,
461 &sc->gp_regs[PT_XER]); 475 &sc->gp_regs[PT_XER]);
462 err |= __get_user(current->thread.ckpt_regs.ccr, 476 err |= __get_user(tsk->thread.ckpt_regs.ccr,
463 &sc->gp_regs[PT_CCR]); 477 &sc->gp_regs[PT_CCR]);
464 478
465 /* These regs are not checkpointed; they can go in 'regs'. */ 479 /* These regs are not checkpointed; they can go in 'regs'. */
@@ -470,7 +484,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
470 484
471 /* 485 /*
472 * Force reload of FP/VEC. 486 * Force reload of FP/VEC.
473 * This has to be done before copying stuff into current->thread.fpr/vr 487 * This has to be done before copying stuff into tsk->thread.fpr/vr
474 * for the reasons explained in the previous comment. 488 * for the reasons explained in the previous comment.
475 */ 489 */
476 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 490 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
@@ -487,33 +501,33 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
487 return -EFAULT; 501 return -EFAULT;
488 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 502 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
489 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { 503 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
490 err |= __copy_from_user(&current->thread.vr_state, v_regs, 504 err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
491 33 * sizeof(vector128)); 505 33 * sizeof(vector128));
492 err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs, 506 err |= __copy_from_user(&tsk->thread.transact_vr, tm_v_regs,
493 33 * sizeof(vector128)); 507 33 * sizeof(vector128));
494 current->thread.used_vr = true; 508 current->thread.used_vr = true;
495 } 509 }
496 else if (current->thread.used_vr) { 510 else if (tsk->thread.used_vr) {
497 memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); 511 memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
498 memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128)); 512 memset(&tsk->thread.transact_vr, 0, 33 * sizeof(vector128));
499 } 513 }
500 /* Always get VRSAVE back */ 514 /* Always get VRSAVE back */
501 if (v_regs != NULL && tm_v_regs != NULL) { 515 if (v_regs != NULL && tm_v_regs != NULL) {
502 err |= __get_user(current->thread.vrsave, 516 err |= __get_user(tsk->thread.vrsave,
503 (u32 __user *)&v_regs[33]); 517 (u32 __user *)&v_regs[33]);
504 err |= __get_user(current->thread.transact_vrsave, 518 err |= __get_user(tsk->thread.transact_vrsave,
505 (u32 __user *)&tm_v_regs[33]); 519 (u32 __user *)&tm_v_regs[33]);
506 } 520 }
507 else { 521 else {
508 current->thread.vrsave = 0; 522 tsk->thread.vrsave = 0;
509 current->thread.transact_vrsave = 0; 523 tsk->thread.transact_vrsave = 0;
510 } 524 }
511 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 525 if (cpu_has_feature(CPU_FTR_ALTIVEC))
512 mtspr(SPRN_VRSAVE, current->thread.vrsave); 526 mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
513#endif /* CONFIG_ALTIVEC */ 527#endif /* CONFIG_ALTIVEC */
514 /* restore floating point */ 528 /* restore floating point */
515 err |= copy_fpr_from_user(current, &sc->fp_regs); 529 err |= copy_fpr_from_user(tsk, &sc->fp_regs);
516 err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); 530 err |= copy_transact_fpr_from_user(tsk, &tm_sc->fp_regs);
517#ifdef CONFIG_VSX 531#ifdef CONFIG_VSX
518 /* 532 /*
519 * Get additional VSX data. Update v_regs to point after the 533 * Get additional VSX data. Update v_regs to point after the
@@ -523,30 +537,30 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
523 if (v_regs && ((msr & MSR_VSX) != 0)) { 537 if (v_regs && ((msr & MSR_VSX) != 0)) {
524 v_regs += ELF_NVRREG; 538 v_regs += ELF_NVRREG;
525 tm_v_regs += ELF_NVRREG; 539 tm_v_regs += ELF_NVRREG;
526 err |= copy_vsx_from_user(current, v_regs); 540 err |= copy_vsx_from_user(tsk, v_regs);
527 err |= copy_transact_vsx_from_user(current, tm_v_regs); 541 err |= copy_transact_vsx_from_user(tsk, tm_v_regs);
528 current->thread.used_vsr = true; 542 tsk->thread.used_vsr = true;
529 } else { 543 } else {
530 for (i = 0; i < 32 ; i++) { 544 for (i = 0; i < 32 ; i++) {
531 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 545 tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
532 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; 546 tsk->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
533 } 547 }
534 } 548 }
535#endif 549#endif
536 tm_enable(); 550 tm_enable();
537 /* Make sure the transaction is marked as failed */ 551 /* Make sure the transaction is marked as failed */
538 current->thread.tm_texasr |= TEXASR_FS; 552 tsk->thread.tm_texasr |= TEXASR_FS;
539 /* This loads the checkpointed FP/VEC state, if used */ 553 /* This loads the checkpointed FP/VEC state, if used */
540 tm_recheckpoint(&current->thread, msr); 554 tm_recheckpoint(&tsk->thread, msr);
541 555
542 /* This loads the speculative FP/VEC state, if used */ 556 /* This loads the speculative FP/VEC state, if used */
543 if (msr & MSR_FP) { 557 if (msr & MSR_FP) {
544 do_load_up_transact_fpu(&current->thread); 558 do_load_up_transact_fpu(&tsk->thread);
545 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 559 regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
546 } 560 }
547#ifdef CONFIG_ALTIVEC 561#ifdef CONFIG_ALTIVEC
548 if (msr & MSR_VEC) { 562 if (msr & MSR_VEC) {
549 do_load_up_transact_altivec(&current->thread); 563 do_load_up_transact_altivec(&tsk->thread);
550 regs->msr |= MSR_VEC; 564 regs->msr |= MSR_VEC;
551 } 565 }
552#endif 566#endif
@@ -600,6 +614,8 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
600 unsigned long new_msr = 0; 614 unsigned long new_msr = 0;
601 int ctx_has_vsx_region = 0; 615 int ctx_has_vsx_region = 0;
602 616
617 BUG_ON(regs != current->thread.regs);
618
603 if (new_ctx && 619 if (new_ctx &&
604 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) 620 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
605 return -EFAULT; 621 return -EFAULT;
@@ -622,7 +638,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
622 638
623 if (old_ctx != NULL) { 639 if (old_ctx != NULL) {
624 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) 640 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
625 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, 641 || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0,
626 ctx_has_vsx_region) 642 ctx_has_vsx_region)
627 || __copy_to_user(&old_ctx->uc_sigmask, 643 || __copy_to_user(&old_ctx->uc_sigmask,
628 &current->blocked, sizeof(sigset_t))) 644 &current->blocked, sizeof(sigset_t)))
@@ -650,7 +666,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
650 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) 666 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
651 do_exit(SIGSEGV); 667 do_exit(SIGSEGV);
652 set_current_blocked(&set); 668 set_current_blocked(&set);
653 if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) 669 if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext))
654 do_exit(SIGSEGV); 670 do_exit(SIGSEGV);
655 671
656 /* This returns like rt_sigreturn */ 672 /* This returns like rt_sigreturn */
@@ -673,6 +689,8 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
673 unsigned long msr; 689 unsigned long msr;
674#endif 690#endif
675 691
692 BUG_ON(current->thread.regs != regs);
693
676 /* Always make any pending restarted system calls return -EINTR */ 694 /* Always make any pending restarted system calls return -EINTR */
677 current->restart_block.fn = do_no_restart_syscall; 695 current->restart_block.fn = do_no_restart_syscall;
678 696
@@ -704,14 +722,14 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
704 struct ucontext __user *uc_transact; 722 struct ucontext __user *uc_transact;
705 if (__get_user(uc_transact, &uc->uc_link)) 723 if (__get_user(uc_transact, &uc->uc_link))
706 goto badframe; 724 goto badframe;
707 if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, 725 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
708 &uc_transact->uc_mcontext)) 726 &uc_transact->uc_mcontext))
709 goto badframe; 727 goto badframe;
710 } 728 }
711 else 729 else
712 /* Fall through, for non-TM restore */ 730 /* Fall through, for non-TM restore */
713#endif 731#endif
714 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) 732 if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
715 goto badframe; 733 goto badframe;
716 734
717 if (restore_altstack(&uc->uc_stack)) 735 if (restore_altstack(&uc->uc_stack))
@@ -730,13 +748,17 @@ badframe:
730 return 0; 748 return 0;
731} 749}
732 750
733int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 751int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
752 struct task_struct *tsk)
734{ 753{
735 struct rt_sigframe __user *frame; 754 struct rt_sigframe __user *frame;
736 unsigned long newsp = 0; 755 unsigned long newsp = 0;
737 long err = 0; 756 long err = 0;
757 struct pt_regs *regs = tsk->thread.regs;
758
759 BUG_ON(tsk != current);
738 760
739 frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0); 761 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 0);
740 if (unlikely(frame == NULL)) 762 if (unlikely(frame == NULL))
741 goto badframe; 763 goto badframe;
742 764
@@ -757,14 +779,13 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
757 err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); 779 err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
758 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, 780 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
759 &frame->uc_transact.uc_mcontext, 781 &frame->uc_transact.uc_mcontext,
760 regs, ksig->sig, 782 tsk, ksig->sig, NULL,
761 NULL,
762 (unsigned long)ksig->ka.sa.sa_handler); 783 (unsigned long)ksig->ka.sa.sa_handler);
763 } else 784 } else
764#endif 785#endif
765 { 786 {
766 err |= __put_user(0, &frame->uc.uc_link); 787 err |= __put_user(0, &frame->uc.uc_link);
767 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig, 788 err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
768 NULL, (unsigned long)ksig->ka.sa.sa_handler, 789 NULL, (unsigned long)ksig->ka.sa.sa_handler,
769 1); 790 1);
770 } 791 }
@@ -773,11 +794,11 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
773 goto badframe; 794 goto badframe;
774 795
775 /* Make sure signal handler doesn't get spurious FP exceptions */ 796 /* Make sure signal handler doesn't get spurious FP exceptions */
776 current->thread.fp_state.fpscr = 0; 797 tsk->thread.fp_state.fpscr = 0;
777 798
778 /* Set up to return from userspace. */ 799 /* Set up to return from userspace. */
779 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { 800 if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
780 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; 801 regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
781 } else { 802 } else {
782 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); 803 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
783 if (err) 804 if (err)
@@ -827,7 +848,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
827badframe: 848badframe:
828 if (show_unhandled_signals) 849 if (show_unhandled_signals)
829 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 850 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
830 current->comm, current->pid, "setup_rt_frame", 851 tsk->comm, tsk->pid, "setup_rt_frame",
831 (long)frame, regs->nip, regs->link); 852 (long)frame, regs->nip, regs->link);
832 853
833 return 1; 854 return 1;