aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/i387.c
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-01-30 07:31:50 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:50 -0500
commit4421011120b2304e5c248ae4165a2704588aedf1 (patch)
treeb715f9bd5fbe3493a2c6a14d0c30832c42832e62 /arch/x86/kernel/i387.c
parentb7b71725fb9584454bfe5f231223bd63421798fb (diff)
x86: x86 i387 user_regset
This revamps the i387 code to be shared across 32-bit, 64-bit, and 32-on-64. It does so by consolidating the code in one place based on the user_regset accessor interfaces. This switches 32-bit to using the i387_64.h header and 64-bit to using the i387.c that was previously i387_32.c, but that's what took the least cleanup in each file. Here i387.h is stubbed to always include i387_64.h rather than renaming the file, to keep this diff smaller and easier to read. Signed-off-by: Roland McGrath <roland@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/i387.c')
-rw-r--r--arch/x86/kernel/i387.c481
1 files changed, 248 insertions, 233 deletions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index bebe0346346..f7f7568dd7b 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/regset.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/i387.h> 13#include <asm/i387.h>
13#include <asm/math_emu.h> 14#include <asm/math_emu.h>
@@ -16,13 +17,29 @@
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18 19
20#ifdef CONFIG_X86_64
21
22#include <asm/sigcontext32.h>
23#include <asm/user32.h>
24
25#else
26
27#define save_i387_ia32 save_i387
28#define restore_i387_ia32 restore_i387
29
30#define _fpstate_ia32 _fpstate
31#define user_i387_ia32_struct user_i387_struct
32#define user32_fxsr_struct user_fxsr_struct
33
34#endif
35
19#ifdef CONFIG_MATH_EMULATION 36#ifdef CONFIG_MATH_EMULATION
20#define HAVE_HWFP (boot_cpu_data.hard_math) 37#define HAVE_HWFP (boot_cpu_data.hard_math)
21#else 38#else
22#define HAVE_HWFP 1 39#define HAVE_HWFP 1
23#endif 40#endif
24 41
25static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff; 42unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
26 43
27void mxcsr_feature_mask_init(void) 44void mxcsr_feature_mask_init(void)
28{ 45{
@@ -40,6 +57,30 @@ void mxcsr_feature_mask_init(void)
40 stts(); 57 stts();
41} 58}
42 59
60#ifdef CONFIG_X86_64
61/*
62 * Called at bootup to set up the initial FPU state that is later cloned
63 * into all processes.
64 */
65void __cpuinit fpu_init(void)
66{
67 unsigned long oldcr0 = read_cr0();
68 extern void __bad_fxsave_alignment(void);
69
70 if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
71 __bad_fxsave_alignment();
72 set_in_cr4(X86_CR4_OSFXSR);
73 set_in_cr4(X86_CR4_OSXMMEXCPT);
74
75 write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
76
77 mxcsr_feature_mask_init();
78 /* clean state in init */
79 current_thread_info()->status = 0;
80 clear_used_math();
81}
82#endif /* CONFIG_X86_64 */
83
43/* 84/*
44 * The _current_ task is using the FPU for the first time 85 * The _current_ task is using the FPU for the first time
45 * so initialize it and set the mxcsr to its default 86 * so initialize it and set the mxcsr to its default
@@ -48,12 +89,18 @@ void mxcsr_feature_mask_init(void)
48 */ 89 */
49void init_fpu(struct task_struct *tsk) 90void init_fpu(struct task_struct *tsk)
50{ 91{
92 if (tsk_used_math(tsk)) {
93 if (tsk == current)
94 unlazy_fpu(tsk);
95 return;
96 }
97
51 if (cpu_has_fxsr) { 98 if (cpu_has_fxsr) {
52 memset(&tsk->thread.i387.fxsave, 0, 99 memset(&tsk->thread.i387.fxsave, 0,
53 sizeof(struct i387_fxsave_struct)); 100 sizeof(struct i387_fxsave_struct));
54 tsk->thread.i387.fxsave.cwd = 0x37f; 101 tsk->thread.i387.fxsave.cwd = 0x37f;
55 if (cpu_has_xmm) 102 if (cpu_has_xmm)
56 tsk->thread.i387.fxsave.mxcsr = 0x1f80; 103 tsk->thread.i387.fxsave.mxcsr = MXCSR_DEFAULT;
57 } else { 104 } else {
58 memset(&tsk->thread.i387.fsave, 0, 105 memset(&tsk->thread.i387.fsave, 0,
59 sizeof(struct i387_fsave_struct)); 106 sizeof(struct i387_fsave_struct));
@@ -62,27 +109,59 @@ void init_fpu(struct task_struct *tsk)
62 tsk->thread.i387.fsave.twd = 0xffffffffu; 109 tsk->thread.i387.fsave.twd = 0xffffffffu;
63 tsk->thread.i387.fsave.fos = 0xffff0000u; 110 tsk->thread.i387.fsave.fos = 0xffff0000u;
64 } 111 }
65 /* only the device not available exception 112 /*
66 * or ptrace can call init_fpu */ 113 * Only the device not available exception or ptrace can call init_fpu.
114 */
67 set_stopped_child_used_math(tsk); 115 set_stopped_child_used_math(tsk);
68} 116}
69 117
70/* 118int fpregs_active(struct task_struct *target, const struct user_regset *regset)
71 * FPU lazy state save handling. 119{
72 */ 120 return tsk_used_math(target) ? regset->n : 0;
121}
73 122
74void kernel_fpu_begin(void) 123int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
75{ 124{
76 struct thread_info *thread = current_thread_info(); 125 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
126}
77 127
78 preempt_disable(); 128int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
79 if (thread->status & TS_USEDFPU) { 129 unsigned int pos, unsigned int count,
80 __save_init_fpu(thread->task); 130 void *kbuf, void __user *ubuf)
81 return; 131{
82 } 132 if (!cpu_has_fxsr)
83 clts(); 133 return -ENODEV;
134
135 unlazy_fpu(target);
136
137 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
138 &target->thread.i387.fxsave, 0, -1);
84} 139}
85EXPORT_SYMBOL_GPL(kernel_fpu_begin); 140
141int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
142 unsigned int pos, unsigned int count,
143 const void *kbuf, const void __user *ubuf)
144{
145 int ret;
146
147 if (!cpu_has_fxsr)
148 return -ENODEV;
149
150 unlazy_fpu(target);
151 set_stopped_child_used_math(target);
152
153 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
154 &target->thread.i387.fxsave, 0, -1);
155
156 /*
157 * mxcsr reserved bits must be masked to zero for security reasons.
158 */
159 target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
160
161 return ret;
162}
163
164#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
86 165
87/* 166/*
88 * FPU tag word conversions. 167 * FPU tag word conversions.
@@ -94,210 +173,187 @@ static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
94 173
95 /* Transform each pair of bits into 01 (valid) or 00 (empty) */ 174 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
96 tmp = ~twd; 175 tmp = ~twd;
97 tmp = (tmp | (tmp >> 1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ 176 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
98 /* and move the valid bits to the lower byte. */ 177 /* and move the valid bits to the lower byte. */
99 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ 178 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
100 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ 179 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
101 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ 180 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
102
103 return tmp; 181 return tmp;
104} 182}
105 183
106static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
107{
108 struct _fpxreg *st = NULL;
109 unsigned long tos = (fxsave->swd >> 11) & 7;
110 unsigned long twd = (unsigned long) fxsave->twd;
111 unsigned long tag;
112 unsigned long ret = 0xffff0000u;
113 int i;
114
115#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16); 184#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
185#define FP_EXP_TAG_VALID 0
186#define FP_EXP_TAG_ZERO 1
187#define FP_EXP_TAG_SPECIAL 2
188#define FP_EXP_TAG_EMPTY 3
189
190static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
191{
192 struct _fpxreg *st;
193 u32 tos = (fxsave->swd >> 11) & 7;
194 u32 twd = (unsigned long) fxsave->twd;
195 u32 tag;
196 u32 ret = 0xffff0000u;
197 int i;
116 198
117 for (i = 0; i < 8; i++) { 199 for (i = 0; i < 8; i++, twd >>= 1) {
118 if (twd & 0x1) { 200 if (twd & 0x1) {
119 st = FPREG_ADDR(fxsave, (i - tos) & 7); 201 st = FPREG_ADDR(fxsave, (i - tos) & 7);
120 202
121 switch (st->exponent & 0x7fff) { 203 switch (st->exponent & 0x7fff) {
122 case 0x7fff: 204 case 0x7fff:
123 tag = 2; /* Special */ 205 tag = FP_EXP_TAG_SPECIAL;
124 break; 206 break;
125 case 0x0000: 207 case 0x0000:
126 if (!st->significand[0] && 208 if (!st->significand[0] &&
127 !st->significand[1] && 209 !st->significand[1] &&
128 !st->significand[2] && 210 !st->significand[2] &&
129 !st->significand[3]) { 211 !st->significand[3])
130 tag = 1; /* Zero */ 212 tag = FP_EXP_TAG_ZERO;
131 } else { 213 else
132 tag = 2; /* Special */ 214 tag = FP_EXP_TAG_SPECIAL;
133 }
134 break; 215 break;
135 default: 216 default:
136 if (st->significand[3] & 0x8000) { 217 if (st->significand[3] & 0x8000)
137 tag = 0; /* Valid */ 218 tag = FP_EXP_TAG_VALID;
138 } else { 219 else
139 tag = 2; /* Special */ 220 tag = FP_EXP_TAG_SPECIAL;
140 }
141 break; 221 break;
142 } 222 }
143 } else { 223 } else {
144 tag = 3; /* Empty */ 224 tag = FP_EXP_TAG_EMPTY;
145 } 225 }
146 ret |= (tag << (2 * i)); 226 ret |= tag << (2 * i);
147 twd = twd >> 1;
148 } 227 }
149 return ret; 228 return ret;
150} 229}
151 230
152/* 231/*
153 * FPU state interaction. 232 * FXSR floating point environment conversions.
154 */ 233 */
155 234
156unsigned short get_fpu_cwd(struct task_struct *tsk) 235static void convert_from_fxsr(struct user_i387_ia32_struct *env,
236 struct task_struct *tsk)
157{ 237{
158 if (cpu_has_fxsr) { 238 struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
159 return tsk->thread.i387.fxsave.cwd; 239 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
160 } else { 240 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
161 return (unsigned short)tsk->thread.i387.fsave.cwd; 241 int i;
162 }
163}
164 242
165unsigned short get_fpu_swd(struct task_struct *tsk) 243 env->cwd = fxsave->cwd | 0xffff0000u;
166{ 244 env->swd = fxsave->swd | 0xffff0000u;
167 if (cpu_has_fxsr) { 245 env->twd = twd_fxsr_to_i387(fxsave);
168 return tsk->thread.i387.fxsave.swd; 246
247#ifdef CONFIG_X86_64
248 env->fip = fxsave->rip;
249 env->foo = fxsave->rdp;
250 if (tsk == current) {
251 /*
252 * should be actually ds/cs at fpu exception time, but
253 * that information is not available in 64bit mode.
254 */
255 asm("mov %%ds,%0" : "=r" (env->fos));
256 asm("mov %%cs,%0" : "=r" (env->fcs));
169 } else { 257 } else {
170 return (unsigned short)tsk->thread.i387.fsave.swd; 258 struct pt_regs *regs = task_pt_regs(tsk);
259 env->fos = 0xffff0000 | tsk->thread.ds;
260 env->fcs = regs->cs;
171 } 261 }
172} 262#else
173 263 env->fip = fxsave->fip;
174#if 0 264 env->fcs = fxsave->fcs;
175unsigned short get_fpu_twd(struct task_struct *tsk) 265 env->foo = fxsave->foo;
176{ 266 env->fos = fxsave->fos;
177 if (cpu_has_fxsr) { 267#endif
178 return tsk->thread.i387.fxsave.twd;
179 } else {
180 return (unsigned short)tsk->thread.i387.fsave.twd;
181 }
182}
183#endif /* 0 */
184 268
185unsigned short get_fpu_mxcsr(struct task_struct *tsk) 269 for (i = 0; i < 8; ++i)
186{ 270 memcpy(&to[i], &from[i], sizeof(to[0]));
187 if (cpu_has_xmm) {
188 return tsk->thread.i387.fxsave.mxcsr;
189 } else {
190 return 0x1f80;
191 }
192} 271}
193 272
194#if 0 273static void convert_to_fxsr(struct task_struct *tsk,
274 const struct user_i387_ia32_struct *env)
195 275
196void set_fpu_cwd(struct task_struct *tsk, unsigned short cwd)
197{ 276{
198 if (cpu_has_fxsr) { 277 struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
199 tsk->thread.i387.fxsave.cwd = cwd; 278 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
200 } else { 279 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
201 tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u); 280 int i;
202 }
203}
204 281
205void set_fpu_swd(struct task_struct *tsk, unsigned short swd) 282 fxsave->cwd = env->cwd;
206{ 283 fxsave->swd = env->swd;
207 if (cpu_has_fxsr) { 284 fxsave->twd = twd_i387_to_fxsr(env->twd);
208 tsk->thread.i387.fxsave.swd = swd; 285 fxsave->fop = (u16) ((u32) env->fcs >> 16);
209 } else { 286#ifdef CONFIG_X86_64
210 tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u); 287 fxsave->rip = env->fip;
211 } 288 fxsave->rdp = env->foo;
212} 289 /* cs and ds ignored */
290#else
291 fxsave->fip = env->fip;
292 fxsave->fcs = (env->fcs & 0xffff);
293 fxsave->foo = env->foo;
294 fxsave->fos = env->fos;
295#endif
213 296
214void set_fpu_twd(struct task_struct *tsk, unsigned short twd) 297 for (i = 0; i < 8; ++i)
215{ 298 memcpy(&to[i], &from[i], sizeof(from[0]));
216 if (cpu_has_fxsr) {
217 tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
218 } else {
219 tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u);
220 }
221} 299}
222 300
223#endif /* 0 */ 301int fpregs_get(struct task_struct *target, const struct user_regset *regset,
224 302 unsigned int pos, unsigned int count,
225/* 303 void *kbuf, void __user *ubuf)
226 * FXSR floating point environment conversions.
227 */
228
229static int convert_fxsr_to_user(struct _fpstate __user *buf,
230 struct i387_fxsave_struct *fxsave)
231{ 304{
232 unsigned long env[7]; 305 struct user_i387_ia32_struct env;
233 struct _fpreg __user *to;
234 struct _fpxreg *from;
235 int i;
236 306
237 env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul; 307 if (!HAVE_HWFP)
238 env[1] = (unsigned long)fxsave->swd | 0xffff0000ul; 308 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
239 env[2] = twd_fxsr_to_i387(fxsave);
240 env[3] = fxsave->fip;
241 env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
242 env[5] = fxsave->foo;
243 env[6] = fxsave->fos;
244 309
245 if (__copy_to_user(buf, env, 7 * sizeof(unsigned long))) 310 unlazy_fpu(target);
246 return 1;
247 311
248 to = &buf->_st[0]; 312 if (!cpu_has_fxsr)
249 from = (struct _fpxreg *) &fxsave->st_space[0]; 313 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
250 for (i = 0; i < 8; i++, to++, from++) { 314 &target->thread.i387.fsave, 0, -1);
251 unsigned long __user *t = (unsigned long __user *)to;
252 unsigned long *f = (unsigned long *)from;
253 315
254 if (__put_user(*f, t) || 316 if (kbuf && pos == 0 && count == sizeof(env)) {
255 __put_user(*(f + 1), t + 1) || 317 convert_from_fxsr(kbuf, target);
256 __put_user(from->exponent, &to->exponent)) 318 return 0;
257 return 1;
258 } 319 }
259 return 0; 320
321 convert_from_fxsr(&env, target);
322 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
260} 323}
261 324
262static int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave, 325int fpregs_set(struct task_struct *target, const struct user_regset *regset,
263 struct _fpstate __user *buf) 326 unsigned int pos, unsigned int count,
327 const void *kbuf, const void __user *ubuf)
264{ 328{
265 unsigned long env[7]; 329 struct user_i387_ia32_struct env;
266 struct _fpxreg *to; 330 int ret;
267 struct _fpreg __user *from;
268 int i;
269 331
270 if (__copy_from_user(env, buf, 7 * sizeof(long))) 332 if (!HAVE_HWFP)
271 return 1; 333 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
272 334
273 fxsave->cwd = (unsigned short)(env[0] & 0xffff); 335 unlazy_fpu(target);
274 fxsave->swd = (unsigned short)(env[1] & 0xffff); 336 set_stopped_child_used_math(target);
275 fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); 337
276 fxsave->fip = env[3]; 338 if (!cpu_has_fxsr)
277 fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); 339 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
278 fxsave->fcs = (env[4] & 0xffff); 340 &target->thread.i387.fsave, 0, -1);
279 fxsave->foo = env[5]; 341
280 fxsave->fos = env[6]; 342 if (pos > 0 || count < sizeof(env))
281 343 convert_from_fxsr(&env, target);
282 to = (struct _fpxreg *) &fxsave->st_space[0]; 344
283 from = &buf->_st[0]; 345 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
284 for (i = 0; i < 8; i++, to++, from++) { 346 if (!ret)
285 unsigned long *t = (unsigned long *)to; 347 convert_to_fxsr(target, &env);
286 unsigned long __user *f = (unsigned long __user *)from; 348
287 349 return ret;
288 if (__get_user(*t, f) ||
289 __get_user(*(t + 1), f + 1) ||
290 __get_user(to->exponent, &from->exponent))
291 return 1;
292 }
293 return 0;
294} 350}
295 351
296/* 352/*
297 * Signal frame handlers. 353 * Signal frame handlers.
298 */ 354 */
299 355
300static inline int save_i387_fsave(struct _fpstate __user *buf) 356static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
301{ 357{
302 struct task_struct *tsk = current; 358 struct task_struct *tsk = current;
303 359
@@ -309,14 +365,16 @@ static inline int save_i387_fsave(struct _fpstate __user *buf)
309 return 1; 365 return 1;
310} 366}
311 367
312static int save_i387_fxsave(struct _fpstate __user *buf) 368static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
313{ 369{
314 struct task_struct *tsk = current; 370 struct task_struct *tsk = current;
371 struct user_i387_ia32_struct env;
315 int err = 0; 372 int err = 0;
316 373
317 unlazy_fpu(tsk); 374 unlazy_fpu(tsk);
318 375
319 if (convert_fxsr_to_user(buf, &tsk->thread.i387.fxsave)) 376 convert_from_fxsr(&env, tsk);
377 if (__copy_to_user(buf, &env, sizeof(env)))
320 return -1; 378 return -1;
321 379
322 err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status); 380 err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
@@ -330,7 +388,7 @@ static int save_i387_fxsave(struct _fpstate __user *buf)
330 return 1; 388 return 1;
331} 389}
332 390
333int save_i387(struct _fpstate __user *buf) 391int save_i387_ia32(struct _fpstate_ia32 __user *buf)
334{ 392{
335 if (!used_math()) 393 if (!used_math())
336 return 0; 394 return 0;
@@ -347,11 +405,13 @@ int save_i387(struct _fpstate __user *buf)
347 return save_i387_fsave(buf); 405 return save_i387_fsave(buf);
348 } 406 }
349 } else { 407 } else {
350 return save_i387_soft(&current->thread.i387.soft, buf); 408 return fpregs_soft_get(current, NULL,
409 0, sizeof(struct user_i387_ia32_struct),
410 NULL, buf) ? -1 : 1;
351 } 411 }
352} 412}
353 413
354static inline int restore_i387_fsave(struct _fpstate __user *buf) 414static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
355{ 415{
356 struct task_struct *tsk = current; 416 struct task_struct *tsk = current;
357 clear_fpu(tsk); 417 clear_fpu(tsk);
@@ -359,19 +419,23 @@ static inline int restore_i387_fsave(struct _fpstate __user *buf)
359 sizeof(struct i387_fsave_struct)); 419 sizeof(struct i387_fsave_struct));
360} 420}
361 421
362static int restore_i387_fxsave(struct _fpstate __user *buf) 422static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
363{ 423{
364 int err; 424 int err;
365 struct task_struct *tsk = current; 425 struct task_struct *tsk = current;
426 struct user_i387_ia32_struct env;
366 clear_fpu(tsk); 427 clear_fpu(tsk);
367 err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0], 428 err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
368 sizeof(struct i387_fxsave_struct)); 429 sizeof(struct i387_fxsave_struct));
369 /* mxcsr reserved bits must be masked to zero for security reasons */ 430 /* mxcsr reserved bits must be masked to zero for security reasons */
370 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; 431 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
371 return err ? 1 : convert_fxsr_from_user(&tsk->thread.i387.fxsave, buf); 432 if (err || __copy_from_user(&env, buf, sizeof(env)))
433 return 1;
434 convert_to_fxsr(tsk, &env);
435 return 0;
372} 436}
373 437
374int restore_i387(struct _fpstate __user *buf) 438int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
375{ 439{
376 int err; 440 int err;
377 441
@@ -382,101 +446,52 @@ int restore_i387(struct _fpstate __user *buf)
382 err = restore_i387_fsave(buf); 446 err = restore_i387_fsave(buf);
383 } 447 }
384 } else { 448 } else {
385 err = restore_i387_soft(&current->thread.i387.soft, buf); 449 err = fpregs_soft_set(current, NULL,
450 0, sizeof(struct user_i387_ia32_struct),
451 NULL, buf) != 0;
386 } 452 }
387 set_used_math(); 453 set_used_math();
388 return err; 454 return err;
389} 455}
390 456
391/* 457#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
392 * ptrace request handlers.
393 */
394 458
395static inline int get_fpregs_fsave(struct user_i387_struct __user *buf, 459#ifdef CONFIG_X86_64
396 struct task_struct *tsk)
397{
398 return __copy_to_user(buf, &tsk->thread.i387.fsave,
399 sizeof(struct user_i387_struct));
400}
401
402static inline int get_fpregs_fxsave(struct user_i387_struct __user *buf,
403 struct task_struct *tsk)
404{
405 return convert_fxsr_to_user((struct _fpstate __user *)buf,
406 &tsk->thread.i387.fxsave);
407}
408 460
409int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk) 461int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk)
410{ 462{
411 if (HAVE_HWFP) { 463 return xfpregs_get(tsk, NULL, 0, sizeof(*buf), NULL, buf);
412 if (cpu_has_fxsr) {
413 return get_fpregs_fxsave(buf, tsk);
414 } else {
415 return get_fpregs_fsave(buf, tsk);
416 }
417 } else {
418 return save_i387_soft(&tsk->thread.i387.soft,
419 (struct _fpstate __user *)buf);
420 }
421} 464}
422 465
423static inline int set_fpregs_fsave(struct task_struct *tsk, 466int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf)
424 struct user_i387_struct __user *buf)
425{ 467{
426 return __copy_from_user(&tsk->thread.i387.fsave, buf, 468 return xfpregs_set(tsk, NULL, 0, sizeof(*buf), NULL, buf);
427 sizeof(struct user_i387_struct));
428} 469}
429 470
430static inline int set_fpregs_fxsave(struct task_struct *tsk, 471#else
431 struct user_i387_struct __user *buf) 472
473int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk)
432{ 474{
433 return convert_fxsr_from_user(&tsk->thread.i387.fxsave, 475 return fpregs_get(tsk, NULL, 0, sizeof(*buf), NULL, buf);
434 (struct _fpstate __user *)buf);
435} 476}
436 477
437int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf) 478int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf)
438{ 479{
439 if (HAVE_HWFP) { 480 return fpregs_set(tsk, NULL, 0, sizeof(*buf), NULL, buf);
440 if (cpu_has_fxsr) {
441 return set_fpregs_fxsave(tsk, buf);
442 } else {
443 return set_fpregs_fsave(tsk, buf);
444 }
445 } else {
446 return restore_i387_soft(&tsk->thread.i387.soft,
447 (struct _fpstate __user *)buf);
448 }
449} 481}
450 482
451int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *tsk) 483int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *tsk)
452{ 484{
453 if (cpu_has_fxsr) { 485 return xfpregs_get(tsk, NULL, 0, sizeof(*buf), NULL, buf);
454 if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
455 sizeof(struct user_fxsr_struct)))
456 return -EFAULT;
457 return 0;
458 } else {
459 return -EIO;
460 }
461} 486}
462 487
463int set_fpxregs(struct task_struct *tsk, struct user_fxsr_struct __user *buf) 488int set_fpxregs(struct task_struct *tsk, struct user_fxsr_struct __user *buf)
464{ 489{
465 int ret = 0; 490 return xfpregs_get(tsk, NULL, 0, sizeof(*buf), NULL, buf);
466
467 if (cpu_has_fxsr) {
468 if (__copy_from_user(&tsk->thread.i387.fxsave, buf,
469 sizeof(struct user_fxsr_struct)))
470 ret = -EFAULT;
471 /* mxcsr reserved bits must be masked to zero
472 * for security reasons */
473 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
474 } else {
475 ret = -EIO;
476 }
477 return ret;
478} 491}
479 492
493#endif
494
480/* 495/*
481 * FPU state for core dumps. 496 * FPU state for core dumps.
482 */ 497 */
@@ -538,7 +553,7 @@ int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
538} 553}
539 554
540int dump_task_extended_fpu(struct task_struct *tsk, 555int dump_task_extended_fpu(struct task_struct *tsk,
541 struct user_fxsr_struct *fpu) 556 struct user32_fxsr_struct *fpu)
542{ 557{
543 int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr; 558 int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr;
544 559