aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-09-10 06:20:42 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-11 02:26:49 -0400
commitde79f7b9f6f92ec1bd6f61fa1f20de60728a5b5e (patch)
tree452b24060a36bf7c57a3a484c6ff981539259ea2
parent8e0a1611cb891e72a9affc4a8ee4795c634896a6 (diff)
powerpc: Put FP/VSX and VR state into structures
This creates new 'thread_fp_state' and 'thread_vr_state' structures to store FP/VSX state (including FPSCR) and Altivec/VSX state (including VSCR), and uses them in the thread_struct. In the thread_fp_state, the FPRs and VSRs are represented as u64 rather than double, since we rarely perform floating-point computations on the values, and this will enable the structures to be used in KVM code as well. Similarly FPSCR is now a u64 rather than a structure of two 32-bit values. This takes the offsets out of the macros such as SAVE_32FPRS, REST_32FPRS, etc. This enables the same macros to be used for normal and transactional state, enabling us to delete the transactional versions of the macros. This also removes the unused do_load_up_fpu and do_load_up_altivec, which were in fact buggy since they didn't create large enough stack frames to account for the fact that load_up_fpu and load_up_altivec are not designed to be called from C and assume that their caller's stack frame is an interrupt frame. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h95
-rw-r--r--arch/powerpc/include/asm/processor.h40
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h2
-rw-r--r--arch/powerpc/kernel/align.c6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c25
-rw-r--r--arch/powerpc/kernel/fpu.S59
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/ptrace.c49
-rw-r--r--arch/powerpc/kernel/ptrace32.c11
-rw-r--r--arch/powerpc/kernel/signal_32.c72
-rw-r--r--arch/powerpc/kernel/signal_64.c29
-rw-r--r--arch/powerpc/kernel/tm.S41
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/vecemu.c6
-rw-r--r--arch/powerpc/kernel/vector.S50
-rw-r--r--arch/powerpc/kvm/book3s_pr.c36
-rw-r--r--arch/powerpc/kvm/booke.c19
17 files changed, 200 insertions, 358 deletions
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 599545738af3..140f67090f0b 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -98,123 +98,40 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
98#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) 98#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
99#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) 99#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
100 100
101#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) 101#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
102#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 102#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
103#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 103#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
104#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) 104#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
105#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) 105#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
106#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) 106#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
107#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) 107#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
108#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) 108#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
109#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) 109#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
110#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) 110#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
111#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) 111#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
112#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 112#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
113 113
114#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b 114#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
115#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 115#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
116#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) 116#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
117#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) 117#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
118#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) 118#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
119#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) 119#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
120#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b 120#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
121#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 121#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
122#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) 122#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
123#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) 123#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
124#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) 124#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
125#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) 125#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
126 126
127/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
128 * thread_struct:
129 */
130#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \
131 8*TS_FPRWIDTH*(n)(base)
132#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \
133 SAVE_FPR_TRANSACT(n+1, base)
134#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \
135 SAVE_2FPRS_TRANSACT(n+2, base)
136#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \
137 SAVE_4FPRS_TRANSACT(n+4, base)
138#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \
139 SAVE_8FPRS_TRANSACT(n+8, base)
140#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \
141 SAVE_16FPRS_TRANSACT(n+16, base)
142
143#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \
144 8*TS_FPRWIDTH*(n)(base)
145#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \
146 REST_FPR_TRANSACT(n+1, base)
147#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \
148 REST_2FPRS_TRANSACT(n+2, base)
149#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \
150 REST_4FPRS_TRANSACT(n+4, base)
151#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \
152 REST_8FPRS_TRANSACT(n+8, base)
153#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \
154 REST_16FPRS_TRANSACT(n+16, base)
155
156
157#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
158 stvx n,b,base
159#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \
160 SAVE_VR_TRANSACT(n+1,b,base)
161#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \
162 SAVE_2VRS_TRANSACT(n+2,b,base)
163#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \
164 SAVE_4VRS_TRANSACT(n+4,b,base)
165#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \
166 SAVE_8VRS_TRANSACT(n+8,b,base)
167#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \
168 SAVE_16VRS_TRANSACT(n+16,b,base)
169
170#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
171 lvx n,b,base
172#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \
173 REST_VR_TRANSACT(n+1,b,base)
174#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \
175 REST_2VRS_TRANSACT(n+2,b,base)
176#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \
177 REST_4VRS_TRANSACT(n+4,b,base)
178#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \
179 REST_8VRS_TRANSACT(n+8,b,base)
180#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \
181 REST_16VRS_TRANSACT(n+16,b,base)
182
183
184#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
185 STXVD2X(n,R##base,R##b)
186#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \
187 SAVE_VSR_TRANSACT(n+1,b,base)
188#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \
189 SAVE_2VSRS_TRANSACT(n+2,b,base)
190#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \
191 SAVE_4VSRS_TRANSACT(n+4,b,base)
192#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \
193 SAVE_8VSRS_TRANSACT(n+8,b,base)
194#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
195 SAVE_16VSRS_TRANSACT(n+16,b,base)
196
197#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
198 LXVD2X(n,R##base,R##b)
199#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \
200 REST_VSR_TRANSACT(n+1,b,base)
201#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \
202 REST_2VSRS_TRANSACT(n+2,b,base)
203#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \
204 REST_4VSRS_TRANSACT(n+4,b,base)
205#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \
206 REST_8VSRS_TRANSACT(n+8,b,base)
207#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
208 REST_16VSRS_TRANSACT(n+16,b,base)
209
210/* Save the lower 32 VSRs in the thread VSR region */ 127/* Save the lower 32 VSRs in the thread VSR region */
211#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) 128#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X(n,R##base,R##b)
212#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) 129#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
213#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) 130#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
214#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) 131#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
215#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) 132#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
216#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) 133#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
217#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) 134#define REST_VSR(n,b,base) li b,16*(n); LXVD2X(n,R##base,R##b)
218#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) 135#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
219#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) 136#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
220#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) 137#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index ce4de5aed7b5..afe695e9feb8 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -144,8 +144,20 @@ typedef struct {
144 144
145#define TS_FPROFFSET 0 145#define TS_FPROFFSET 0
146#define TS_VSRLOWOFFSET 1 146#define TS_VSRLOWOFFSET 1
147#define TS_FPR(i) fpr[i][TS_FPROFFSET] 147#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
148#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] 148#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
149
150/* FP and VSX 0-31 register set */
151struct thread_fp_state {
152 u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
153 u64 fpscr; /* Floating point status */
154};
155
156/* Complete AltiVec register set including VSCR */
157struct thread_vr_state {
158 vector128 vr[32] __attribute__((aligned(16)));
159 vector128 vscr __attribute__((aligned(16)));
160};
149 161
150struct thread_struct { 162struct thread_struct {
151 unsigned long ksp; /* Kernel stack pointer */ 163 unsigned long ksp; /* Kernel stack pointer */
@@ -198,13 +210,7 @@ struct thread_struct {
198 unsigned long dvc2; 210 unsigned long dvc2;
199#endif 211#endif
200#endif 212#endif
201 /* FP and VSX 0-31 register set */ 213 struct thread_fp_state fp_state;
202 double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
203 struct {
204
205 unsigned int pad;
206 unsigned int val; /* Floating point status */
207 } fpscr;
208 int fpexc_mode; /* floating-point exception mode */ 214 int fpexc_mode; /* floating-point exception mode */
209 unsigned int align_ctl; /* alignment handling control */ 215 unsigned int align_ctl; /* alignment handling control */
210#ifdef CONFIG_PPC64 216#ifdef CONFIG_PPC64
@@ -222,10 +228,7 @@ struct thread_struct {
222 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ 228 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
223 unsigned long trap_nr; /* last trap # on this thread */ 229 unsigned long trap_nr; /* last trap # on this thread */
224#ifdef CONFIG_ALTIVEC 230#ifdef CONFIG_ALTIVEC
225 /* Complete AltiVec register set */ 231 struct thread_vr_state vr_state;
226 vector128 vr[32] __attribute__((aligned(16)));
227 /* AltiVec status */
228 vector128 vscr __attribute__((aligned(16)));
229 unsigned long vrsave; 232 unsigned long vrsave;
230 int used_vr; /* set if process has used altivec */ 233 int used_vr; /* set if process has used altivec */
231#endif /* CONFIG_ALTIVEC */ 234#endif /* CONFIG_ALTIVEC */
@@ -262,13 +265,8 @@ struct thread_struct {
262 * transact_fpr[] is the new set of transactional values. 265 * transact_fpr[] is the new set of transactional values.
263 * VRs work the same way. 266 * VRs work the same way.
264 */ 267 */
265 double transact_fpr[32][TS_FPRWIDTH]; 268 struct thread_fp_state transact_fp;
266 struct { 269 struct thread_vr_state transact_vr;
267 unsigned int pad;
268 unsigned int val; /* Floating point status */
269 } transact_fpscr;
270 vector128 transact_vr[32] __attribute__((aligned(16)));
271 vector128 transact_vscr __attribute__((aligned(16)));
272 unsigned long transact_vrsave; 270 unsigned long transact_vrsave;
273#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 271#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
274#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 272#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -322,8 +320,6 @@ struct thread_struct {
322 .ksp = INIT_SP, \ 320 .ksp = INIT_SP, \
323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 321 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
324 .fs = KERNEL_DS, \ 322 .fs = KERNEL_DS, \
325 .fpr = {{0}}, \
326 .fpscr = { .val = 0, }, \
327 .fpexc_mode = 0, \ 323 .fpexc_mode = 0, \
328 .ppr = INIT_PPR, \ 324 .ppr = INIT_PPR, \
329} 325}
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 3a7a67a0d006..d89beaba26ff 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -125,7 +125,7 @@
125#define FP_EX_DIVZERO (1 << (31 - 5)) 125#define FP_EX_DIVZERO (1 << (31 - 5))
126#define FP_EX_INEXACT (1 << (31 - 6)) 126#define FP_EX_INEXACT (1 << (31 - 6))
127 127
128#define __FPU_FPSCR (current->thread.fpscr.val) 128#define __FPU_FPSCR (current->thread.fp_state.fpscr)
129 129
130/* We only actually write to the destination register 130/* We only actually write to the destination register
131 * if exceptions signalled (if any) will not trap. 131 * if exceptions signalled (if any) will not trap.
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index a27ccd5dc6b9..eaa16bc17e9d 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -660,7 +660,7 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
660 if (reg < 32) 660 if (reg < 32)
661 ptr = (char *) &current->thread.TS_FPR(reg); 661 ptr = (char *) &current->thread.TS_FPR(reg);
662 else 662 else
663 ptr = (char *) &current->thread.vr[reg - 32]; 663 ptr = (char *) &current->thread.vr_state.vr[reg - 32];
664 664
665 lptr = (unsigned long *) ptr; 665 lptr = (unsigned long *) ptr;
666 666
@@ -897,7 +897,7 @@ int fix_alignment(struct pt_regs *regs)
897 return -EFAULT; 897 return -EFAULT;
898 } 898 }
899 } else if (flags & F) { 899 } else if (flags & F) {
900 data.dd = current->thread.TS_FPR(reg); 900 data.ll = current->thread.TS_FPR(reg);
901 if (flags & S) { 901 if (flags & S) {
902 /* Single-precision FP store requires conversion... */ 902 /* Single-precision FP store requires conversion... */
903#ifdef CONFIG_PPC_FPU 903#ifdef CONFIG_PPC_FPU
@@ -975,7 +975,7 @@ int fix_alignment(struct pt_regs *regs)
975 if (unlikely(ret)) 975 if (unlikely(ret))
976 return -EFAULT; 976 return -EFAULT;
977 } else if (flags & F) 977 } else if (flags & F)
978 current->thread.TS_FPR(reg) = data.dd; 978 current->thread.TS_FPR(reg) = data.ll;
979 else 979 else
980 regs->gpr[reg] = data.ll; 980 regs->gpr[reg] = data.ll;
981 981
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 502c7a4e73f7..8d27b61c95b9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -90,16 +90,15 @@ int main(void)
90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
91#endif 91#endif
92 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); 92 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
93 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); 93 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
94 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); 94 DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
95#ifdef CONFIG_ALTIVEC 95#ifdef CONFIG_ALTIVEC
96 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); 96 DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
97 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); 97 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
98 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
99 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); 98 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
99 DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
100#endif /* CONFIG_ALTIVEC */ 100#endif /* CONFIG_ALTIVEC */
101#ifdef CONFIG_VSX 101#ifdef CONFIG_VSX
102 DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
103 DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); 102 DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
104#endif /* CONFIG_VSX */ 103#endif /* CONFIG_VSX */
105#ifdef CONFIG_PPC64 104#ifdef CONFIG_PPC64
@@ -143,20 +142,12 @@ int main(void)
143 DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); 142 DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
144 DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); 143 DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
145 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); 144 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
146 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, 145 DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
147 transact_vr[0])); 146 transact_vr));
148 DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct,
149 transact_vscr));
150 DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct, 147 DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
151 transact_vrsave)); 148 transact_vrsave));
152 DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct, 149 DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
153 transact_fpr[0])); 150 transact_fp));
154 DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct,
155 transact_fpscr));
156#ifdef CONFIG_VSX
157 DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct,
158 transact_fpr[0]));
159#endif
160 /* Local pt_regs on stack for Transactional Memory funcs. */ 151 /* Local pt_regs on stack for Transactional Memory funcs. */
161 DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + 152 DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
162 sizeof(struct pt_regs) + 16); 153 sizeof(struct pt_regs) + 16);
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index caeaabf11a2f..34b96e6d2f0d 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
352: REST_32VSRS(n,c,base); \ 352: REST_32VSRS(n,c,base); \
363: 363:
37 37
38#define __REST_32FPVSRS_TRANSACT(n,c,base) \
39BEGIN_FTR_SECTION \
40 b 2f; \
41END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
42 REST_32FPRS_TRANSACT(n,base); \
43 b 3f; \
442: REST_32VSRS_TRANSACT(n,c,base); \
453:
46
47#define __SAVE_32FPVSRS(n,c,base) \ 38#define __SAVE_32FPVSRS(n,c,base) \
48BEGIN_FTR_SECTION \ 39BEGIN_FTR_SECTION \
49 b 2f; \ 40 b 2f; \
@@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
543: 453:
55#else 46#else
56#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) 47#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
57#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base)
58#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) 48#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
59#endif 49#endif
60#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) 50#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
61#define REST_32FPVSRS_TRANSACT(n,c,base) \
62 __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
63#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) 51#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
64 52
65#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 53#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
66/*
67 * Wrapper to call load_up_fpu from C.
68 * void do_load_up_fpu(struct pt_regs *regs);
69 */
70_GLOBAL(do_load_up_fpu)
71 mflr r0
72 std r0, 16(r1)
73 stdu r1, -112(r1)
74
75 subi r6, r3, STACK_FRAME_OVERHEAD
76 /* load_up_fpu expects r12=MSR, r13=PACA, and returns
77 * with r12 = new MSR.
78 */
79 ld r12,_MSR(r6)
80 GET_PACA(r13)
81
82 bl load_up_fpu
83 std r12,_MSR(r6)
84
85 ld r0, 112+16(r1)
86 addi r1, r1, 112
87 mtlr r0
88 blr
89
90
91/* void do_load_up_transact_fpu(struct thread_struct *thread) 54/* void do_load_up_transact_fpu(struct thread_struct *thread)
92 * 55 *
93 * This is similar to load_up_fpu but for the transactional version of the FP 56 * This is similar to load_up_fpu but for the transactional version of the FP
@@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
105 SYNC 68 SYNC
106 MTMSRD(r5) 69 MTMSRD(r5)
107 70
108 lfd fr0,THREAD_TRANSACT_FPSCR(r3) 71 addi r7,r3,THREAD_TRANSACT_FPSTATE
72 lfd fr0,FPSTATE_FPSCR(r7)
109 MTFSF_L(fr0) 73 MTFSF_L(fr0)
110 REST_32FPVSRS_TRANSACT(0, R4, R3) 74 REST_32FPVSRS(0, R4, R7)
111 75
112 /* FP/VSX off again */ 76 /* FP/VSX off again */
113 MTMSRD(r6) 77 MTMSRD(r6)
@@ -147,9 +111,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
147 beq 1f 111 beq 1f
148 toreal(r4) 112 toreal(r4)
149 addi r4,r4,THREAD /* want last_task_used_math->thread */ 113 addi r4,r4,THREAD /* want last_task_used_math->thread */
150 SAVE_32FPVSRS(0, R5, R4) 114 addi r8,r4,THREAD_FPSTATE
115 SAVE_32FPVSRS(0, R5, R8)
151 mffs fr0 116 mffs fr0
152 stfd fr0,THREAD_FPSCR(r4) 117 stfd fr0,FPSTATE_FPSCR(r8)
153 PPC_LL r5,PT_REGS(r4) 118 PPC_LL r5,PT_REGS(r4)
154 toreal(r5) 119 toreal(r5)
155 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 120 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -160,7 +125,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
160#endif /* CONFIG_SMP */ 125#endif /* CONFIG_SMP */
161 /* enable use of FP after return */ 126 /* enable use of FP after return */
162#ifdef CONFIG_PPC32 127#ifdef CONFIG_PPC32
163 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 128 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
164 lwz r4,THREAD_FPEXC_MODE(r5) 129 lwz r4,THREAD_FPEXC_MODE(r5)
165 ori r9,r9,MSR_FP /* enable FP for current */ 130 ori r9,r9,MSR_FP /* enable FP for current */
166 or r9,r9,r4 131 or r9,r9,r4
@@ -172,9 +137,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
172 or r12,r12,r4 137 or r12,r12,r4
173 std r12,_MSR(r1) 138 std r12,_MSR(r1)
174#endif 139#endif
175 lfd fr0,THREAD_FPSCR(r5) 140 addi r7,r5,THREAD_FPSTATE
141 lfd fr0,FPSTATE_FPSCR(r7)
176 MTFSF_L(fr0) 142 MTFSF_L(fr0)
177 REST_32FPVSRS(0, R4, R5) 143 REST_32FPVSRS(0, R4, R7)
178#ifndef CONFIG_SMP 144#ifndef CONFIG_SMP
179 subi r4,r5,THREAD 145 subi r4,r5,THREAD
180 fromreal(r4) 146 fromreal(r4)
@@ -208,9 +174,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
208 addi r3,r3,THREAD /* want THREAD of task */ 174 addi r3,r3,THREAD /* want THREAD of task */
209 PPC_LL r5,PT_REGS(r3) 175 PPC_LL r5,PT_REGS(r3)
210 PPC_LCMPI 0,r5,0 176 PPC_LCMPI 0,r5,0
211 SAVE_32FPVSRS(0, R4 ,R3) 177 addi r6,r3,THREAD_FPSTATE
178 SAVE_32FPVSRS(0, R4, R6)
212 mffs fr0 179 mffs fr0
213 stfd fr0,THREAD_FPSCR(r3) 180 stfd fr0,FPSTATE_FPSCR(r6)
214 beq 1f 181 beq 1f
215 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 182 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
216 li r3,MSR_FP|MSR_FE0|MSR_FE1 183 li r3,MSR_FP|MSR_FE0|MSR_FE1
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 96d2fdf3aa9e..7a281416affb 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1113,12 +1113,10 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1113#ifdef CONFIG_VSX 1113#ifdef CONFIG_VSX
1114 current->thread.used_vsr = 0; 1114 current->thread.used_vsr = 0;
1115#endif 1115#endif
1116 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 1116 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1117 current->thread.fpscr.val = 0;
1118#ifdef CONFIG_ALTIVEC 1117#ifdef CONFIG_ALTIVEC
1119 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 1118 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1120 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 1119 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1121 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
1122 current->thread.vrsave = 0; 1120 current->thread.vrsave = 0;
1123 current->thread.used_vr = 0; 1121 current->thread.used_vr = 0;
1124#endif /* CONFIG_ALTIVEC */ 1122#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 9a0d24c390a3..238580043d85 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
362 void *kbuf, void __user *ubuf) 362 void *kbuf, void __user *ubuf)
363{ 363{
364#ifdef CONFIG_VSX 364#ifdef CONFIG_VSX
365 double buf[33]; 365 u64 buf[33];
366 int i; 366 int i;
367#endif 367#endif
368 flush_fp_to_thread(target); 368 flush_fp_to_thread(target);
@@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
371 /* copy to local buffer then write that out */ 371 /* copy to local buffer then write that out */
372 for (i = 0; i < 32 ; i++) 372 for (i = 0; i < 32 ; i++)
373 buf[i] = target->thread.TS_FPR(i); 373 buf[i] = target->thread.TS_FPR(i);
374 memcpy(&buf[32], &target->thread.fpscr, sizeof(double)); 374 buf[32] = target->thread.fp_state.fpscr;
375 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 375 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
376 376
377#else 377#else
378 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != 378 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
379 offsetof(struct thread_struct, TS_FPR(32))); 379 offsetof(struct thread_fp_state, fpr[32][0]));
380 380
381 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 381 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
382 &target->thread.fpr, 0, -1); 382 &target->thread.fp_state, 0, -1);
383#endif 383#endif
384} 384}
385 385
@@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
388 const void *kbuf, const void __user *ubuf) 388 const void *kbuf, const void __user *ubuf)
389{ 389{
390#ifdef CONFIG_VSX 390#ifdef CONFIG_VSX
391 double buf[33]; 391 u64 buf[33];
392 int i; 392 int i;
393#endif 393#endif
394 flush_fp_to_thread(target); 394 flush_fp_to_thread(target);
@@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
400 return i; 400 return i;
401 for (i = 0; i < 32 ; i++) 401 for (i = 0; i < 32 ; i++)
402 target->thread.TS_FPR(i) = buf[i]; 402 target->thread.TS_FPR(i) = buf[i];
403 memcpy(&target->thread.fpscr, &buf[32], sizeof(double)); 403 target->thread.fp_state.fpscr = buf[32];
404 return 0; 404 return 0;
405#else 405#else
406 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != 406 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
407 offsetof(struct thread_struct, TS_FPR(32))); 407 offsetof(struct thread_fp_state, fpr[32][0]));
408 408
409 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 409 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
410 &target->thread.fpr, 0, -1); 410 &target->thread.fp_state, 0, -1);
411#endif 411#endif
412} 412}
413 413
@@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
440 440
441 flush_altivec_to_thread(target); 441 flush_altivec_to_thread(target);
442 442
443 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != 443 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
444 offsetof(struct thread_struct, vr[32])); 444 offsetof(struct thread_vr_state, vr[32]));
445 445
446 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 446 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
447 &target->thread.vr, 0, 447 &target->thread.vr_state, 0,
448 33 * sizeof(vector128)); 448 33 * sizeof(vector128));
449 if (!ret) { 449 if (!ret) {
450 /* 450 /*
@@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
471 471
472 flush_altivec_to_thread(target); 472 flush_altivec_to_thread(target);
473 473
474 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != 474 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
475 offsetof(struct thread_struct, vr[32])); 475 offsetof(struct thread_vr_state, vr[32]));
476 476
477 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 477 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
478 &target->thread.vr, 0, 33 * sizeof(vector128)); 478 &target->thread.vr_state, 0,
479 33 * sizeof(vector128));
479 if (!ret && count > 0) { 480 if (!ret && count > 0) {
480 /* 481 /*
481 * We use only the first word of vrsave. 482 * We use only the first word of vrsave.
@@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
514 unsigned int pos, unsigned int count, 515 unsigned int pos, unsigned int count,
515 void *kbuf, void __user *ubuf) 516 void *kbuf, void __user *ubuf)
516{ 517{
517 double buf[32]; 518 u64 buf[32];
518 int ret, i; 519 int ret, i;
519 520
520 flush_vsx_to_thread(target); 521 flush_vsx_to_thread(target);
521 522
522 for (i = 0; i < 32 ; i++) 523 for (i = 0; i < 32 ; i++)
523 buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET]; 524 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
524 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 525 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
525 buf, 0, 32 * sizeof(double)); 526 buf, 0, 32 * sizeof(double));
526 527
@@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
531 unsigned int pos, unsigned int count, 532 unsigned int pos, unsigned int count,
532 const void *kbuf, const void __user *ubuf) 533 const void *kbuf, const void __user *ubuf)
533{ 534{
534 double buf[32]; 535 u64 buf[32];
535 int ret,i; 536 int ret,i;
536 537
537 flush_vsx_to_thread(target); 538 flush_vsx_to_thread(target);
@@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
539 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 540 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
540 buf, 0, 32 * sizeof(double)); 541 buf, 0, 32 * sizeof(double));
541 for (i = 0; i < 32 ; i++) 542 for (i = 0; i < 32 ; i++)
542 target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 543 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
543 544
544 545
545 return ret; 546 return ret;
@@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request,
1554 1555
1555 flush_fp_to_thread(child); 1556 flush_fp_to_thread(child);
1556 if (fpidx < (PT_FPSCR - PT_FPR0)) 1557 if (fpidx < (PT_FPSCR - PT_FPR0))
1557 tmp = ((unsigned long *)child->thread.fpr) 1558 tmp = ((unsigned long *)child->thread.fp_state.fpr)
1558 [fpidx * TS_FPRWIDTH]; 1559 [fpidx * TS_FPRWIDTH];
1559 else 1560 else
1560 tmp = child->thread.fpscr.val; 1561 tmp = child->thread.fp_state.fpscr;
1561 } 1562 }
1562 ret = put_user(tmp, datalp); 1563 ret = put_user(tmp, datalp);
1563 break; 1564 break;
@@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request,
1587 1588
1588 flush_fp_to_thread(child); 1589 flush_fp_to_thread(child);
1589 if (fpidx < (PT_FPSCR - PT_FPR0)) 1590 if (fpidx < (PT_FPSCR - PT_FPR0))
1590 ((unsigned long *)child->thread.fpr) 1591 ((unsigned long *)child->thread.fp_state.fpr)
1591 [fpidx * TS_FPRWIDTH] = data; 1592 [fpidx * TS_FPRWIDTH] = data;
1592 else 1593 else
1593 child->thread.fpscr.val = data; 1594 child->thread.fp_state.fpscr = data;
1594 ret = 0; 1595 ret = 0;
1595 } 1596 }
1596 break; 1597 break;
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f51599e941c7..097f8dc426a0 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -43,7 +43,6 @@
43#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) 43#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
44#define FPRHALF(i) (((i) - PT_FPR0) & 1) 44#define FPRHALF(i) (((i) - PT_FPR0) & 1)
45#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) 45#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
46#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
47 46
48long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 47long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
49 compat_ulong_t caddr, compat_ulong_t cdata) 48 compat_ulong_t caddr, compat_ulong_t cdata)
@@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
105 * to be an array of unsigned int (32 bits) - the 104 * to be an array of unsigned int (32 bits) - the
106 * index passed in is based on this assumption. 105 * index passed in is based on this assumption.
107 */ 106 */
108 tmp = ((unsigned int *)child->thread.fpr) 107 tmp = ((unsigned int *)child->thread.fp_state.fpr)
109 [FPRINDEX(index)]; 108 [FPRINDEX(index)];
110 } 109 }
111 ret = put_user((unsigned int)tmp, (u32 __user *)data); 110 ret = put_user((unsigned int)tmp, (u32 __user *)data);
@@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
147 if (numReg >= PT_FPR0) { 146 if (numReg >= PT_FPR0) {
148 flush_fp_to_thread(child); 147 flush_fp_to_thread(child);
149 /* get 64 bit FPR */ 148 /* get 64 bit FPR */
150 tmp = ((u64 *)child->thread.fpr) 149 tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
151 [FPRINDEX_3264(numReg)];
152 } else { /* register within PT_REGS struct */ 150 } else { /* register within PT_REGS struct */
153 unsigned long tmp2; 151 unsigned long tmp2;
154 ret = ptrace_get_reg(child, numReg, &tmp2); 152 ret = ptrace_get_reg(child, numReg, &tmp2);
@@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
207 * to be an array of unsigned int (32 bits) - the 205 * to be an array of unsigned int (32 bits) - the
208 * index passed in is based on this assumption. 206 * index passed in is based on this assumption.
209 */ 207 */
210 ((unsigned int *)child->thread.fpr) 208 ((unsigned int *)child->thread.fp_state.fpr)
211 [FPRINDEX(index)] = data; 209 [FPRINDEX(index)] = data;
212 ret = 0; 210 ret = 0;
213 } 211 }
@@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
251 u64 *tmp; 249 u64 *tmp;
252 flush_fp_to_thread(child); 250 flush_fp_to_thread(child);
253 /* get 64 bit FPR ... */ 251 /* get 64 bit FPR ... */
254 tmp = &(((u64 *)child->thread.fpr) 252 tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
255 [FPRINDEX_3264(numReg)]);
256 /* ... write the 32 bit part we want */ 253 /* ... write the 32 bit part we want */
257 ((u32 *)tmp)[index % 2] = data; 254 ((u32 *)tmp)[index % 2] = data;
258 ret = 0; 255 ret = 0;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index bebdf1a1a540..ea25e45ea959 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -265,27 +265,27 @@ struct rt_sigframe {
265unsigned long copy_fpr_to_user(void __user *to, 265unsigned long copy_fpr_to_user(void __user *to,
266 struct task_struct *task) 266 struct task_struct *task)
267{ 267{
268 double buf[ELF_NFPREG]; 268 u64 buf[ELF_NFPREG];
269 int i; 269 int i;
270 270
271 /* save FPR copy to local buffer then write to the thread_struct */ 271 /* save FPR copy to local buffer then write to the thread_struct */
272 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 272 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
273 buf[i] = task->thread.TS_FPR(i); 273 buf[i] = task->thread.TS_FPR(i);
274 memcpy(&buf[i], &task->thread.fpscr, sizeof(double)); 274 buf[i] = task->thread.fp_state.fpscr;
275 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); 275 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
276} 276}
277 277
278unsigned long copy_fpr_from_user(struct task_struct *task, 278unsigned long copy_fpr_from_user(struct task_struct *task,
279 void __user *from) 279 void __user *from)
280{ 280{
281 double buf[ELF_NFPREG]; 281 u64 buf[ELF_NFPREG];
282 int i; 282 int i;
283 283
284 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) 284 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
285 return 1; 285 return 1;
286 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 286 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
287 task->thread.TS_FPR(i) = buf[i]; 287 task->thread.TS_FPR(i) = buf[i];
288 memcpy(&task->thread.fpscr, &buf[i], sizeof(double)); 288 task->thread.fp_state.fpscr = buf[i];
289 289
290 return 0; 290 return 0;
291} 291}
@@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task,
293unsigned long copy_vsx_to_user(void __user *to, 293unsigned long copy_vsx_to_user(void __user *to,
294 struct task_struct *task) 294 struct task_struct *task)
295{ 295{
296 double buf[ELF_NVSRHALFREG]; 296 u64 buf[ELF_NVSRHALFREG];
297 int i; 297 int i;
298 298
299 /* save FPR copy to local buffer then write to the thread_struct */ 299 /* save FPR copy to local buffer then write to the thread_struct */
300 for (i = 0; i < ELF_NVSRHALFREG; i++) 300 for (i = 0; i < ELF_NVSRHALFREG; i++)
301 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET]; 301 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
302 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); 302 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
303} 303}
304 304
305unsigned long copy_vsx_from_user(struct task_struct *task, 305unsigned long copy_vsx_from_user(struct task_struct *task,
306 void __user *from) 306 void __user *from)
307{ 307{
308 double buf[ELF_NVSRHALFREG]; 308 u64 buf[ELF_NVSRHALFREG];
309 int i; 309 int i;
310 310
311 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) 311 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
312 return 1; 312 return 1;
313 for (i = 0; i < ELF_NVSRHALFREG ; i++) 313 for (i = 0; i < ELF_NVSRHALFREG ; i++)
314 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 314 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
315 return 0; 315 return 0;
316} 316}
317 317
@@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
319unsigned long copy_transact_fpr_to_user(void __user *to, 319unsigned long copy_transact_fpr_to_user(void __user *to,
320 struct task_struct *task) 320 struct task_struct *task)
321{ 321{
322 double buf[ELF_NFPREG]; 322 u64 buf[ELF_NFPREG];
323 int i; 323 int i;
324 324
325 /* save FPR copy to local buffer then write to the thread_struct */ 325 /* save FPR copy to local buffer then write to the thread_struct */
326 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 326 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
327 buf[i] = task->thread.TS_TRANS_FPR(i); 327 buf[i] = task->thread.TS_TRANS_FPR(i);
328 memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double)); 328 buf[i] = task->thread.transact_fp.fpscr;
329 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); 329 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
330} 330}
331 331
332unsigned long copy_transact_fpr_from_user(struct task_struct *task, 332unsigned long copy_transact_fpr_from_user(struct task_struct *task,
333 void __user *from) 333 void __user *from)
334{ 334{
335 double buf[ELF_NFPREG]; 335 u64 buf[ELF_NFPREG];
336 int i; 336 int i;
337 337
338 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) 338 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
339 return 1; 339 return 1;
340 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 340 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
341 task->thread.TS_TRANS_FPR(i) = buf[i]; 341 task->thread.TS_TRANS_FPR(i) = buf[i];
342 memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double)); 342 task->thread.transact_fp.fpscr = buf[i];
343 343
344 return 0; 344 return 0;
345} 345}
@@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task,
347unsigned long copy_transact_vsx_to_user(void __user *to, 347unsigned long copy_transact_vsx_to_user(void __user *to,
348 struct task_struct *task) 348 struct task_struct *task)
349{ 349{
350 double buf[ELF_NVSRHALFREG]; 350 u64 buf[ELF_NVSRHALFREG];
351 int i; 351 int i;
352 352
353 /* save FPR copy to local buffer then write to the thread_struct */ 353 /* save FPR copy to local buffer then write to the thread_struct */
354 for (i = 0; i < ELF_NVSRHALFREG; i++) 354 for (i = 0; i < ELF_NVSRHALFREG; i++)
355 buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET]; 355 buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
356 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); 356 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
357} 357}
358 358
359unsigned long copy_transact_vsx_from_user(struct task_struct *task, 359unsigned long copy_transact_vsx_from_user(struct task_struct *task,
360 void __user *from) 360 void __user *from)
361{ 361{
362 double buf[ELF_NVSRHALFREG]; 362 u64 buf[ELF_NVSRHALFREG];
363 int i; 363 int i;
364 364
365 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) 365 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
366 return 1; 366 return 1;
367 for (i = 0; i < ELF_NVSRHALFREG ; i++) 367 for (i = 0; i < ELF_NVSRHALFREG ; i++)
368 task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i]; 368 task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
369 return 0; 369 return 0;
370} 370}
371#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 371#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task,
373inline unsigned long copy_fpr_to_user(void __user *to, 373inline unsigned long copy_fpr_to_user(void __user *to,
374 struct task_struct *task) 374 struct task_struct *task)
375{ 375{
376 return __copy_to_user(to, task->thread.fpr, 376 return __copy_to_user(to, task->thread.fp_state.fpr,
377 ELF_NFPREG * sizeof(double)); 377 ELF_NFPREG * sizeof(double));
378} 378}
379 379
380inline unsigned long copy_fpr_from_user(struct task_struct *task, 380inline unsigned long copy_fpr_from_user(struct task_struct *task,
381 void __user *from) 381 void __user *from)
382{ 382{
383 return __copy_from_user(task->thread.fpr, from, 383 return __copy_from_user(task->thread.fp_state.fpr, from,
384 ELF_NFPREG * sizeof(double)); 384 ELF_NFPREG * sizeof(double));
385} 385}
386 386
@@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
388inline unsigned long copy_transact_fpr_to_user(void __user *to, 388inline unsigned long copy_transact_fpr_to_user(void __user *to,
389 struct task_struct *task) 389 struct task_struct *task)
390{ 390{
391 return __copy_to_user(to, task->thread.transact_fpr, 391 return __copy_to_user(to, task->thread.transact_fp.fpr,
392 ELF_NFPREG * sizeof(double)); 392 ELF_NFPREG * sizeof(double));
393} 393}
394 394
395inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, 395inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
396 void __user *from) 396 void __user *from)
397{ 397{
398 return __copy_from_user(task->thread.transact_fpr, from, 398 return __copy_from_user(task->thread.transact_fp.fpr, from,
399 ELF_NFPREG * sizeof(double)); 399 ELF_NFPREG * sizeof(double));
400} 400}
401#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 401#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
423 /* save altivec registers */ 423 /* save altivec registers */
424 if (current->thread.used_vr) { 424 if (current->thread.used_vr) {
425 flush_altivec_to_thread(current); 425 flush_altivec_to_thread(current);
426 if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 426 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
427 ELF_NVRREG * sizeof(vector128))) 427 ELF_NVRREG * sizeof(vector128)))
428 return 1; 428 return 1;
429 /* set MSR_VEC in the saved MSR value to indicate that 429 /* set MSR_VEC in the saved MSR value to indicate that
@@ -534,17 +534,17 @@ static int save_tm_user_regs(struct pt_regs *regs,
534 /* save altivec registers */ 534 /* save altivec registers */
535 if (current->thread.used_vr) { 535 if (current->thread.used_vr) {
536 flush_altivec_to_thread(current); 536 flush_altivec_to_thread(current);
537 if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 537 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
538 ELF_NVRREG * sizeof(vector128))) 538 ELF_NVRREG * sizeof(vector128)))
539 return 1; 539 return 1;
540 if (msr & MSR_VEC) { 540 if (msr & MSR_VEC) {
541 if (__copy_to_user(&tm_frame->mc_vregs, 541 if (__copy_to_user(&tm_frame->mc_vregs,
542 current->thread.transact_vr, 542 &current->thread.transact_vr,
543 ELF_NVRREG * sizeof(vector128))) 543 ELF_NVRREG * sizeof(vector128)))
544 return 1; 544 return 1;
545 } else { 545 } else {
546 if (__copy_to_user(&tm_frame->mc_vregs, 546 if (__copy_to_user(&tm_frame->mc_vregs,
547 current->thread.vr, 547 &current->thread.vr_state,
548 ELF_NVRREG * sizeof(vector128))) 548 ELF_NVRREG * sizeof(vector128)))
549 return 1; 549 return 1;
550 } 550 }
@@ -692,11 +692,12 @@ static long restore_user_regs(struct pt_regs *regs,
692 regs->msr &= ~MSR_VEC; 692 regs->msr &= ~MSR_VEC;
693 if (msr & MSR_VEC) { 693 if (msr & MSR_VEC) {
694 /* restore altivec registers from the stack */ 694 /* restore altivec registers from the stack */
695 if (__copy_from_user(current->thread.vr, &sr->mc_vregs, 695 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
696 sizeof(sr->mc_vregs))) 696 sizeof(sr->mc_vregs)))
697 return 1; 697 return 1;
698 } else if (current->thread.used_vr) 698 } else if (current->thread.used_vr)
699 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); 699 memset(&current->thread.vr_state, 0,
700 ELF_NVRREG * sizeof(vector128));
700 701
701 /* Always get VRSAVE back */ 702 /* Always get VRSAVE back */
702 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 703 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
@@ -722,7 +723,7 @@ static long restore_user_regs(struct pt_regs *regs,
722 return 1; 723 return 1;
723 } else if (current->thread.used_vsr) 724 } else if (current->thread.used_vsr)
724 for (i = 0; i < 32 ; i++) 725 for (i = 0; i < 32 ; i++)
725 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 726 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
726#endif /* CONFIG_VSX */ 727#endif /* CONFIG_VSX */
727 /* 728 /*
728 * force the process to reload the FP registers from 729 * force the process to reload the FP registers from
@@ -798,15 +799,16 @@ static long restore_tm_user_regs(struct pt_regs *regs,
798 regs->msr &= ~MSR_VEC; 799 regs->msr &= ~MSR_VEC;
799 if (msr & MSR_VEC) { 800 if (msr & MSR_VEC) {
800 /* restore altivec registers from the stack */ 801 /* restore altivec registers from the stack */
801 if (__copy_from_user(current->thread.vr, &sr->mc_vregs, 802 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
802 sizeof(sr->mc_vregs)) || 803 sizeof(sr->mc_vregs)) ||
803 __copy_from_user(current->thread.transact_vr, 804 __copy_from_user(&current->thread.transact_vr,
804 &tm_sr->mc_vregs, 805 &tm_sr->mc_vregs,
805 sizeof(sr->mc_vregs))) 806 sizeof(sr->mc_vregs)))
806 return 1; 807 return 1;
807 } else if (current->thread.used_vr) { 808 } else if (current->thread.used_vr) {
808 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); 809 memset(&current->thread.vr_state, 0,
809 memset(current->thread.transact_vr, 0, 810 ELF_NVRREG * sizeof(vector128));
811 memset(&current->thread.transact_vr, 0,
810 ELF_NVRREG * sizeof(vector128)); 812 ELF_NVRREG * sizeof(vector128));
811 } 813 }
812 814
@@ -838,8 +840,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
838 return 1; 840 return 1;
839 } else if (current->thread.used_vsr) 841 } else if (current->thread.used_vsr)
840 for (i = 0; i < 32 ; i++) { 842 for (i = 0; i < 32 ; i++) {
841 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 843 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
842 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; 844 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
843 } 845 }
844#endif /* CONFIG_VSX */ 846#endif /* CONFIG_VSX */
845 847
@@ -1030,7 +1032,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
1030 if (__put_user(0, &rt_sf->uc.uc_link)) 1032 if (__put_user(0, &rt_sf->uc.uc_link))
1031 goto badframe; 1033 goto badframe;
1032 1034
1033 current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1035 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1034 1036
1035 /* create a stack frame for the caller of the handler */ 1037 /* create a stack frame for the caller of the handler */
1036 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); 1038 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1462,7 +1464,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1462 1464
1463 regs->link = tramp; 1465 regs->link = tramp;
1464 1466
1465 current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1467 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1466 1468
1467 /* create a stack frame for the caller of the handler */ 1469 /* create a stack frame for the caller of the handler */
1468 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; 1470 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index f93ec2835a13..a3c1ed4b979c 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
103 if (current->thread.used_vr) { 103 if (current->thread.used_vr) {
104 flush_altivec_to_thread(current); 104 flush_altivec_to_thread(current);
105 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 105 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
106 err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); 106 err |= __copy_to_user(v_regs, &current->thread.vr_state,
107 33 * sizeof(vector128));
107 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) 108 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
108 * contains valid data. 109 * contains valid data.
109 */ 110 */
@@ -195,18 +196,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
195 if (current->thread.used_vr) { 196 if (current->thread.used_vr) {
196 flush_altivec_to_thread(current); 197 flush_altivec_to_thread(current);
197 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 198 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
198 err |= __copy_to_user(v_regs, current->thread.vr, 199 err |= __copy_to_user(v_regs, &current->thread.vr_state,
199 33 * sizeof(vector128)); 200 33 * sizeof(vector128));
200 /* If VEC was enabled there are transactional VRs valid too, 201 /* If VEC was enabled there are transactional VRs valid too,
201 * else they're a copy of the checkpointed VRs. 202 * else they're a copy of the checkpointed VRs.
202 */ 203 */
203 if (msr & MSR_VEC) 204 if (msr & MSR_VEC)
204 err |= __copy_to_user(tm_v_regs, 205 err |= __copy_to_user(tm_v_regs,
205 current->thread.transact_vr, 206 &current->thread.transact_vr,
206 33 * sizeof(vector128)); 207 33 * sizeof(vector128));
207 else 208 else
208 err |= __copy_to_user(tm_v_regs, 209 err |= __copy_to_user(tm_v_regs,
209 current->thread.vr, 210 &current->thread.vr_state,
210 33 * sizeof(vector128)); 211 33 * sizeof(vector128));
211 212
212 /* set MSR_VEC in the MSR value in the frame to indicate 213 /* set MSR_VEC in the MSR value in the frame to indicate
@@ -349,10 +350,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
349 return -EFAULT; 350 return -EFAULT;
350 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 351 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
351 if (v_regs != NULL && (msr & MSR_VEC) != 0) 352 if (v_regs != NULL && (msr & MSR_VEC) != 0)
352 err |= __copy_from_user(current->thread.vr, v_regs, 353 err |= __copy_from_user(&current->thread.vr_state, v_regs,
353 33 * sizeof(vector128)); 354 33 * sizeof(vector128));
354 else if (current->thread.used_vr) 355 else if (current->thread.used_vr)
355 memset(current->thread.vr, 0, 33 * sizeof(vector128)); 356 memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
356 /* Always get VRSAVE back */ 357 /* Always get VRSAVE back */
357 if (v_regs != NULL) 358 if (v_regs != NULL)
358 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 359 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
@@ -374,7 +375,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
374 err |= copy_vsx_from_user(current, v_regs); 375 err |= copy_vsx_from_user(current, v_regs);
375 else 376 else
376 for (i = 0; i < 32 ; i++) 377 for (i = 0; i < 32 ; i++)
377 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 378 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
378#endif 379#endif
379 return err; 380 return err;
380} 381}
@@ -468,14 +469,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
468 return -EFAULT; 469 return -EFAULT;
469 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 470 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
470 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { 471 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
471 err |= __copy_from_user(current->thread.vr, v_regs, 472 err |= __copy_from_user(&current->thread.vr_state, v_regs,
472 33 * sizeof(vector128)); 473 33 * sizeof(vector128));
473 err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, 474 err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
474 33 * sizeof(vector128)); 475 33 * sizeof(vector128));
475 } 476 }
476 else if (current->thread.used_vr) { 477 else if (current->thread.used_vr) {
477 memset(current->thread.vr, 0, 33 * sizeof(vector128)); 478 memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
478 memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); 479 memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
479 } 480 }
480 /* Always get VRSAVE back */ 481 /* Always get VRSAVE back */
481 if (v_regs != NULL && tm_v_regs != NULL) { 482 if (v_regs != NULL && tm_v_regs != NULL) {
@@ -507,8 +508,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
507 err |= copy_transact_vsx_from_user(current, tm_v_regs); 508 err |= copy_transact_vsx_from_user(current, tm_v_regs);
508 } else { 509 } else {
509 for (i = 0; i < 32 ; i++) { 510 for (i = 0; i < 32 ; i++) {
510 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 511 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
511 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; 512 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
512 } 513 }
513 } 514 }
514#endif 515#endif
@@ -747,7 +748,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
747 goto badframe; 748 goto badframe;
748 749
749 /* Make sure signal handler doesn't get spurious FP exceptions */ 750 /* Make sure signal handler doesn't get spurious FP exceptions */
750 current->thread.fpscr.val = 0; 751 current->thread.fp_state.fpscr = 0;
751#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 752#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
752 /* Remove TM bits from thread's MSR. The MSR in the sigcontext 753 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
753 * just indicates to userland that we were doing a transaction, but we 754 * just indicates to userland that we were doing a transaction, but we
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index cd809eaa8b5c..761af4f0a632 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -12,16 +12,15 @@
12#include <asm/reg.h> 12#include <asm/reg.h>
13 13
14#ifdef CONFIG_VSX 14#ifdef CONFIG_VSX
15/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */ 15/* See fpu.S, this is borrowed from there */
16#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ 16#define __SAVE_32FPRS_VSRS(n,c,base) \
17BEGIN_FTR_SECTION \ 17BEGIN_FTR_SECTION \
18 b 2f; \ 18 b 2f; \
19END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 19END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
20 SAVE_32FPRS_TRANSACT(n,base); \ 20 SAVE_32FPRS(n,base); \
21 b 3f; \ 21 b 3f; \
222: SAVE_32VSRS_TRANSACT(n,c,base); \ 222: SAVE_32VSRS(n,c,base); \
233: 233:
24/* ...and this is just plain borrowed from there. */
25#define __REST_32FPRS_VSRS(n,c,base) \ 24#define __REST_32FPRS_VSRS(n,c,base) \
26BEGIN_FTR_SECTION \ 25BEGIN_FTR_SECTION \
27 b 2f; \ 26 b 2f; \
@@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
312: REST_32VSRS(n,c,base); \ 302: REST_32VSRS(n,c,base); \
323: 313:
33#else 32#else
34#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base) 33#define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base)
35#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) 34#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base)
36#endif 35#endif
37#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ 36#define SAVE_32FPRS_VSRS(n,c,base) \
38 __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base) 37 __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
39#define REST_32FPRS_VSRS(n,c,base) \ 38#define REST_32FPRS_VSRS(n,c,base) \
40 __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) 39 __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
41 40
@@ -157,10 +156,11 @@ _GLOBAL(tm_reclaim)
157 andis. r0, r4, MSR_VEC@h 156 andis. r0, r4, MSR_VEC@h
158 beq dont_backup_vec 157 beq dont_backup_vec
159 158
160 SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */ 159 addi r7, r3, THREAD_TRANSACT_VRSTATE
160 SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
161 mfvscr vr0 161 mfvscr vr0
162 li r6, THREAD_TRANSACT_VSCR 162 li r6, VRSTATE_VSCR
163 stvx vr0, r3, r6 163 stvx vr0, r7, r6
164dont_backup_vec: 164dont_backup_vec:
165 mfspr r0, SPRN_VRSAVE 165 mfspr r0, SPRN_VRSAVE
166 std r0, THREAD_TRANSACT_VRSAVE(r3) 166 std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -168,10 +168,11 @@ dont_backup_vec:
168 andi. r0, r4, MSR_FP 168 andi. r0, r4, MSR_FP
169 beq dont_backup_fp 169 beq dont_backup_fp
170 170
171 SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */ 171 addi r7, r3, THREAD_TRANSACT_FPSTATE
172 SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */
172 173
173 mffs fr0 174 mffs fr0
174 stfd fr0,THREAD_TRANSACT_FPSCR(r3) 175 stfd fr0,FPSTATE_FPSCR(r7)
175 176
176dont_backup_fp: 177dont_backup_fp:
177 /* The moment we treclaim, ALL of our GPRs will switch 178 /* The moment we treclaim, ALL of our GPRs will switch
@@ -358,10 +359,11 @@ _GLOBAL(tm_recheckpoint)
358 andis. r0, r4, MSR_VEC@h 359 andis. r0, r4, MSR_VEC@h
359 beq dont_restore_vec 360 beq dont_restore_vec
360 361
361 li r5, THREAD_VSCR 362 addi r8, r3, THREAD_VRSTATE
362 lvx vr0, r3, r5 363 li r5, VRSTATE_VSCR
364 lvx vr0, r8, r5
363 mtvscr vr0 365 mtvscr vr0
364 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ 366 REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
365dont_restore_vec: 367dont_restore_vec:
366 ld r5, THREAD_VRSAVE(r3) 368 ld r5, THREAD_VRSAVE(r3)
367 mtspr SPRN_VRSAVE, r5 369 mtspr SPRN_VRSAVE, r5
@@ -370,9 +372,10 @@ dont_restore_vec:
370 andi. r0, r4, MSR_FP 372 andi. r0, r4, MSR_FP
371 beq dont_restore_fp 373 beq dont_restore_fp
372 374
373 lfd fr0, THREAD_FPSCR(r3) 375 addi r8, r3, THREAD_FPSTATE
376 lfd fr0, FPSTATE_FPSCR(r8)
374 MTFSF_L(fr0) 377 MTFSF_L(fr0)
375 REST_32FPRS_VSRS(0, R4, R3) 378 REST_32FPRS_VSRS(0, R4, R8)
376 379
377dont_restore_fp: 380dont_restore_fp:
378 mtmsr r6 /* FP/Vec off again! */ 381 mtmsr r6 /* FP/Vec off again! */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f783c932faeb..f0a6814007a5 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs)
816 816
817 flush_fp_to_thread(current); 817 flush_fp_to_thread(current);
818 818
819 code = __parse_fpscr(current->thread.fpscr.val); 819 code = __parse_fpscr(current->thread.fp_state.fpscr);
820 820
821 _exception(SIGFPE, regs, code, regs->nip); 821 _exception(SIGFPE, regs, code, regs->nip);
822} 822}
@@ -1069,7 +1069,7 @@ static int emulate_math(struct pt_regs *regs)
1069 return 0; 1069 return 0;
1070 case 1: { 1070 case 1: {
1071 int code = 0; 1071 int code = 0;
1072 code = __parse_fpscr(current->thread.fpscr.val); 1072 code = __parse_fpscr(current->thread.fp_state.fpscr);
1073 _exception(SIGFPE, regs, code, regs->nip); 1073 _exception(SIGFPE, regs, code, regs->nip);
1074 return 0; 1074 return 0;
1075 } 1075 }
@@ -1371,8 +1371,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
1371 1371
1372#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1372#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1373 1373
1374extern void do_load_up_fpu(struct pt_regs *regs);
1375
1376void fp_unavailable_tm(struct pt_regs *regs) 1374void fp_unavailable_tm(struct pt_regs *regs)
1377{ 1375{
1378 /* Note: This does not handle any kind of FP laziness. */ 1376 /* Note: This does not handle any kind of FP laziness. */
@@ -1403,8 +1401,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
1403} 1401}
1404 1402
1405#ifdef CONFIG_ALTIVEC 1403#ifdef CONFIG_ALTIVEC
1406extern void do_load_up_altivec(struct pt_regs *regs);
1407
1408void altivec_unavailable_tm(struct pt_regs *regs) 1404void altivec_unavailable_tm(struct pt_regs *regs)
1409{ 1405{
1410 /* See the comments in fp_unavailable_tm(). This function operates 1406 /* See the comments in fp_unavailable_tm(). This function operates
@@ -1634,7 +1630,7 @@ void altivec_assist_exception(struct pt_regs *regs)
1634 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1630 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1635 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1631 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1636 "in %s at %lx\n", current->comm, regs->nip); 1632 "in %s at %lx\n", current->comm, regs->nip);
1637 current->thread.vscr.u[3] |= 0x10000; 1633 current->thread.vr_state.vscr.u[3] |= 0x10000;
1638 } 1634 }
1639} 1635}
1640#endif /* CONFIG_ALTIVEC */ 1636#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 604d0947cb20..c4bfadb2606b 100644
--- a/arch/powerpc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
@@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs)
271 vb = (instr >> 11) & 0x1f; 271 vb = (instr >> 11) & 0x1f;
272 vc = (instr >> 6) & 0x1f; 272 vc = (instr >> 6) & 0x1f;
273 273
274 vrs = current->thread.vr; 274 vrs = current->thread.vr_state.vr;
275 switch (instr & 0x3f) { 275 switch (instr & 0x3f) {
276 case 10: 276 case 10:
277 switch (vc) { 277 switch (vc) {
@@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs)
320 case 14: /* vctuxs */ 320 case 14: /* vctuxs */
321 for (i = 0; i < 4; ++i) 321 for (i = 0; i < 4; ++i)
322 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, 322 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
323 &current->thread.vscr.u[3]); 323 &current->thread.vr_state.vscr.u[3]);
324 break; 324 break;
325 case 15: /* vctsxs */ 325 case 15: /* vctsxs */
326 for (i = 0; i < 4; ++i) 326 for (i = 0; i < 4; ++i)
327 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, 327 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
328 &current->thread.vscr.u[3]); 328 &current->thread.vr_state.vscr.u[3]);
329 break; 329 break;
330 default: 330 default:
331 return -EINVAL; 331 return -EINVAL;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9e20999aaef2..a48df870b696 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -8,29 +8,6 @@
8#include <asm/ptrace.h> 8#include <asm/ptrace.h>
9 9
10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11/*
12 * Wrapper to call load_up_altivec from C.
13 * void do_load_up_altivec(struct pt_regs *regs);
14 */
15_GLOBAL(do_load_up_altivec)
16 mflr r0
17 std r0, 16(r1)
18 stdu r1, -112(r1)
19
20 subi r6, r3, STACK_FRAME_OVERHEAD
21 /* load_up_altivec expects r12=MSR, r13=PACA, and returns
22 * with r12 = new MSR.
23 */
24 ld r12,_MSR(r6)
25 GET_PACA(r13)
26 bl load_up_altivec
27 std r12,_MSR(r6)
28
29 ld r0, 112+16(r1)
30 addi r1, r1, 112
31 mtlr r0
32 blr
33
34/* void do_load_up_transact_altivec(struct thread_struct *thread) 11/* void do_load_up_transact_altivec(struct thread_struct *thread)
35 * 12 *
36 * This is similar to load_up_altivec but for the transactional version of the 13 * This is similar to load_up_altivec but for the transactional version of the
@@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec)
46 li r4,1 23 li r4,1
47 stw r4,THREAD_USED_VR(r3) 24 stw r4,THREAD_USED_VR(r3)
48 25
49 li r10,THREAD_TRANSACT_VSCR 26 li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
50 lvx vr0,r10,r3 27 lvx vr0,r10,r3
51 mtvscr vr0 28 mtvscr vr0
52 REST_32VRS_TRANSACT(0,r4,r3) 29 addi r10,r3,THREAD_TRANSACT_VRSTATE
30 REST_32VRS(0,r4,r10)
53 31
54 /* Disable VEC again. */ 32 /* Disable VEC again. */
55 MTMSRD(r6) 33 MTMSRD(r6)
@@ -59,7 +37,6 @@ _GLOBAL(do_load_up_transact_altivec)
59#endif 37#endif
60 38
61/* 39/*
62 * load_up_altivec(unused, unused, tsk)
63 * Disable VMX for the task which had it previously, 40 * Disable VMX for the task which had it previously,
64 * and save its vector registers in its thread_struct. 41 * and save its vector registers in its thread_struct.
65 * Enables the VMX for use in the kernel on return. 42 * Enables the VMX for use in the kernel on return.
@@ -90,10 +67,11 @@ _GLOBAL(load_up_altivec)
90 /* Save VMX state to last_task_used_altivec's THREAD struct */ 67 /* Save VMX state to last_task_used_altivec's THREAD struct */
91 toreal(r4) 68 toreal(r4)
92 addi r4,r4,THREAD 69 addi r4,r4,THREAD
93 SAVE_32VRS(0,r5,r4) 70 addi r7,r4,THREAD_VRSTATE
71 SAVE_32VRS(0,r5,r7)
94 mfvscr vr0 72 mfvscr vr0
95 li r10,THREAD_VSCR 73 li r10,VRSTATE_VSCR
96 stvx vr0,r10,r4 74 stvx vr0,r10,r7
97 /* Disable VMX for last_task_used_altivec */ 75 /* Disable VMX for last_task_used_altivec */
98 PPC_LL r5,PT_REGS(r4) 76 PPC_LL r5,PT_REGS(r4)
99 toreal(r5) 77 toreal(r5)
@@ -125,12 +103,13 @@ _GLOBAL(load_up_altivec)
125 oris r12,r12,MSR_VEC@h 103 oris r12,r12,MSR_VEC@h
126 std r12,_MSR(r1) 104 std r12,_MSR(r1)
127#endif 105#endif
106 addi r7,r5,THREAD_VRSTATE
128 li r4,1 107 li r4,1
129 li r10,THREAD_VSCR 108 li r10,VRSTATE_VSCR
130 stw r4,THREAD_USED_VR(r5) 109 stw r4,THREAD_USED_VR(r5)
131 lvx vr0,r10,r5 110 lvx vr0,r10,r7
132 mtvscr vr0 111 mtvscr vr0
133 REST_32VRS(0,r4,r5) 112 REST_32VRS(0,r4,r7)
134#ifndef CONFIG_SMP 113#ifndef CONFIG_SMP
135 /* Update last_task_used_altivec to 'current' */ 114 /* Update last_task_used_altivec to 'current' */
136 subi r4,r5,THREAD /* Back to 'current' */ 115 subi r4,r5,THREAD /* Back to 'current' */
@@ -165,12 +144,13 @@ _GLOBAL(giveup_altivec)
165 PPC_LCMPI 0,r3,0 144 PPC_LCMPI 0,r3,0
166 beqlr /* if no previous owner, done */ 145 beqlr /* if no previous owner, done */
167 addi r3,r3,THREAD /* want THREAD of task */ 146 addi r3,r3,THREAD /* want THREAD of task */
147 addi r7,r3,THREAD_VRSTATE
168 PPC_LL r5,PT_REGS(r3) 148 PPC_LL r5,PT_REGS(r3)
169 PPC_LCMPI 0,r5,0 149 PPC_LCMPI 0,r5,0
170 SAVE_32VRS(0,r4,r3) 150 SAVE_32VRS(0,r4,r7)
171 mfvscr vr0 151 mfvscr vr0
172 li r4,THREAD_VSCR 152 li r4,VRSTATE_VSCR
173 stvx vr0,r4,r3 153 stvx vr0,r4,r7
174 beq 1f 154 beq 1f
175 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 155 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
176#ifdef CONFIG_VSX 156#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 27db1e665959..c0b48f96a91c 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -444,7 +444,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
444#ifdef CONFIG_VSX 444#ifdef CONFIG_VSX
445 u64 *vcpu_vsx = vcpu->arch.vsr; 445 u64 *vcpu_vsx = vcpu->arch.vsr;
446#endif 446#endif
447 u64 *thread_fpr = (u64*)t->fpr; 447 u64 *thread_fpr = &t->fp_state.fpr[0][0];
448 int i; 448 int i;
449 449
450 /* 450 /*
@@ -466,14 +466,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
466 /* 466 /*
467 * Note that on CPUs with VSX, giveup_fpu stores 467 * Note that on CPUs with VSX, giveup_fpu stores
468 * both the traditional FP registers and the added VSX 468 * both the traditional FP registers and the added VSX
469 * registers into thread.fpr[]. 469 * registers into thread.fp_state.fpr[].
470 */ 470 */
471 if (current->thread.regs->msr & MSR_FP) 471 if (current->thread.regs->msr & MSR_FP)
472 giveup_fpu(current); 472 giveup_fpu(current);
473 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 473 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
474 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 474 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
475 475
476 vcpu->arch.fpscr = t->fpscr.val; 476 vcpu->arch.fpscr = t->fp_state.fpscr;
477 477
478#ifdef CONFIG_VSX 478#ifdef CONFIG_VSX
479 if (cpu_has_feature(CPU_FTR_VSX)) 479 if (cpu_has_feature(CPU_FTR_VSX))
@@ -486,8 +486,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
486 if (msr & MSR_VEC) { 486 if (msr & MSR_VEC) {
487 if (current->thread.regs->msr & MSR_VEC) 487 if (current->thread.regs->msr & MSR_VEC)
488 giveup_altivec(current); 488 giveup_altivec(current);
489 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 489 memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
490 vcpu->arch.vscr = t->vscr; 490 vcpu->arch.vscr = t->vr_state.vscr;
491 } 491 }
492#endif 492#endif
493 493
@@ -539,7 +539,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
539#ifdef CONFIG_VSX 539#ifdef CONFIG_VSX
540 u64 *vcpu_vsx = vcpu->arch.vsr; 540 u64 *vcpu_vsx = vcpu->arch.vsr;
541#endif 541#endif
542 u64 *thread_fpr = (u64*)t->fpr; 542 u64 *thread_fpr = &t->fp_state.fpr[0][0];
543 int i; 543 int i;
544 544
545 /* When we have paired singles, we emulate in software */ 545 /* When we have paired singles, we emulate in software */
@@ -584,15 +584,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
584 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) 584 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
585 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; 585 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
586#endif 586#endif
587 t->fpscr.val = vcpu->arch.fpscr; 587 t->fp_state.fpscr = vcpu->arch.fpscr;
588 t->fpexc_mode = 0; 588 t->fpexc_mode = 0;
589 kvmppc_load_up_fpu(); 589 kvmppc_load_up_fpu();
590 } 590 }
591 591
592 if (msr & MSR_VEC) { 592 if (msr & MSR_VEC) {
593#ifdef CONFIG_ALTIVEC 593#ifdef CONFIG_ALTIVEC
594 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 594 memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
595 t->vscr = vcpu->arch.vscr; 595 t->vr_state.vscr = vcpu->arch.vscr;
596 t->vrsave = -1; 596 t->vrsave = -1;
597 kvmppc_load_up_altivec(); 597 kvmppc_load_up_altivec();
598#endif 598#endif
@@ -1116,12 +1116,10 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1116int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1116int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1117{ 1117{
1118 int ret; 1118 int ret;
1119 double fpr[32][TS_FPRWIDTH]; 1119 struct thread_fp_state fp;
1120 unsigned int fpscr;
1121 int fpexc_mode; 1120 int fpexc_mode;
1122#ifdef CONFIG_ALTIVEC 1121#ifdef CONFIG_ALTIVEC
1123 vector128 vr[32]; 1122 struct thread_vr_state vr;
1124 vector128 vscr;
1125 unsigned long uninitialized_var(vrsave); 1123 unsigned long uninitialized_var(vrsave);
1126 int used_vr; 1124 int used_vr;
1127#endif 1125#endif
@@ -1153,8 +1151,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1153 /* Save FPU state in stack */ 1151 /* Save FPU state in stack */
1154 if (current->thread.regs->msr & MSR_FP) 1152 if (current->thread.regs->msr & MSR_FP)
1155 giveup_fpu(current); 1153 giveup_fpu(current);
1156 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); 1154 fp = current->thread.fp_state;
1157 fpscr = current->thread.fpscr.val;
1158 fpexc_mode = current->thread.fpexc_mode; 1155 fpexc_mode = current->thread.fpexc_mode;
1159 1156
1160#ifdef CONFIG_ALTIVEC 1157#ifdef CONFIG_ALTIVEC
@@ -1163,8 +1160,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1163 if (used_vr) { 1160 if (used_vr) {
1164 if (current->thread.regs->msr & MSR_VEC) 1161 if (current->thread.regs->msr & MSR_VEC)
1165 giveup_altivec(current); 1162 giveup_altivec(current);
1166 memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); 1163 vr = current->thread.vr_state;
1167 vscr = current->thread.vscr;
1168 vrsave = current->thread.vrsave; 1164 vrsave = current->thread.vrsave;
1169 } 1165 }
1170#endif 1166#endif
@@ -1196,15 +1192,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1196 current->thread.regs->msr = ext_msr; 1192 current->thread.regs->msr = ext_msr;
1197 1193
1198 /* Restore FPU/VSX state from stack */ 1194 /* Restore FPU/VSX state from stack */
1199 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 1195 current->thread.fp_state = fp;
1200 current->thread.fpscr.val = fpscr;
1201 current->thread.fpexc_mode = fpexc_mode; 1196 current->thread.fpexc_mode = fpexc_mode;
1202 1197
1203#ifdef CONFIG_ALTIVEC 1198#ifdef CONFIG_ALTIVEC
1204 /* Restore Altivec state from stack */ 1199 /* Restore Altivec state from stack */
1205 if (used_vr && current->thread.used_vr) { 1200 if (used_vr && current->thread.used_vr) {
1206 memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); 1201 current->thread.vr_state = vr;
1207 current->thread.vscr = vscr;
1208 current->thread.vrsave = vrsave; 1202 current->thread.vrsave = vrsave;
1209 } 1203 }
1210 current->thread.used_vr = used_vr; 1204 current->thread.used_vr = used_vr;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 17722d82f1d1..5133199f6cb7 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -656,9 +656,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
656{ 656{
657 int ret, s; 657 int ret, s;
658#ifdef CONFIG_PPC_FPU 658#ifdef CONFIG_PPC_FPU
659 unsigned int fpscr; 659 struct thread_fp_state fp;
660 int fpexc_mode; 660 int fpexc_mode;
661 u64 fpr[32];
662#endif 661#endif
663 662
664 if (!vcpu->arch.sane) { 663 if (!vcpu->arch.sane) {
@@ -677,13 +676,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
677#ifdef CONFIG_PPC_FPU 676#ifdef CONFIG_PPC_FPU
678 /* Save userspace FPU state in stack */ 677 /* Save userspace FPU state in stack */
679 enable_kernel_fp(); 678 enable_kernel_fp();
680 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); 679 fp = current->thread.fp_state;
681 fpscr = current->thread.fpscr.val;
682 fpexc_mode = current->thread.fpexc_mode; 680 fpexc_mode = current->thread.fpexc_mode;
683 681
684 /* Restore guest FPU state to thread */ 682 /* Restore guest FPU state to thread */
685 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); 683 memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
686 current->thread.fpscr.val = vcpu->arch.fpscr; 684 sizeof(vcpu->arch.fpr));
685 current->thread.fp_state.fpscr = vcpu->arch.fpscr;
687 686
688 /* 687 /*
689 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 688 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -709,12 +708,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
709 vcpu->fpu_active = 0; 708 vcpu->fpu_active = 0;
710 709
711 /* Save guest FPU state from thread */ 710 /* Save guest FPU state from thread */
712 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); 711 memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
713 vcpu->arch.fpscr = current->thread.fpscr.val; 712 sizeof(vcpu->arch.fpr));
713 vcpu->arch.fpscr = current->thread.fp_state.fpscr;
714 714
715 /* Restore userspace FPU state from stack */ 715 /* Restore userspace FPU state from stack */
716 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 716 current->thread.fp_state = fp;
717 current->thread.fpscr.val = fpscr;
718 current->thread.fpexc_mode = fpexc_mode; 717 current->thread.fpexc_mode = fpexc_mode;
719#endif 718#endif
720 719