diff options
author | Cyrill Gorcunov <gorcunov@gmail.com> | 2008-01-30 07:31:26 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:26 -0500 |
commit | 3b095a04e71243bd0f1679c04f1e8d73a3c9c5a9 (patch) | |
tree | fb6162129ca9a1594496233444a88a4740a7a43c /arch/x86/kernel/i387_32.c | |
parent | 3c233d1334ffc8de63a4b6a6a86c40961aed335e (diff) |
x86: cleanup i387_32.c according to checkpatch
clean up checkpatch warnings/errors on i387_32.c
The old and new i387_32.s (asm listings) were checked with diff to
be identical so it's safe to apply this patch.
Signed-off-by: Cyrill Gorcunov <gorunov@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/i387_32.c')
-rw-r--r-- | arch/x86/kernel/i387_32.c | 291 |
1 files changed, 149 insertions, 142 deletions
diff --git a/arch/x86/kernel/i387_32.c b/arch/x86/kernel/i387_32.c index 7d2e12f6c78b..bebe03463461 100644 --- a/arch/x86/kernel/i387_32.c +++ b/arch/x86/kernel/i387_32.c | |||
@@ -29,11 +29,13 @@ void mxcsr_feature_mask_init(void) | |||
29 | unsigned long mask = 0; | 29 | unsigned long mask = 0; |
30 | clts(); | 30 | clts(); |
31 | if (cpu_has_fxsr) { | 31 | if (cpu_has_fxsr) { |
32 | memset(¤t->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct)); | 32 | memset(¤t->thread.i387.fxsave, 0, |
33 | asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave)); | 33 | sizeof(struct i387_fxsave_struct)); |
34 | asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave)); | ||
34 | mask = current->thread.i387.fxsave.mxcsr_mask; | 35 | mask = current->thread.i387.fxsave.mxcsr_mask; |
35 | if (mask == 0) mask = 0x0000ffbf; | 36 | if (mask == 0) |
36 | } | 37 | mask = 0x0000ffbf; |
38 | } | ||
37 | mxcsr_feature_mask &= mask; | 39 | mxcsr_feature_mask &= mask; |
38 | stts(); | 40 | stts(); |
39 | } | 41 | } |
@@ -47,18 +49,21 @@ void mxcsr_feature_mask_init(void) | |||
47 | void init_fpu(struct task_struct *tsk) | 49 | void init_fpu(struct task_struct *tsk) |
48 | { | 50 | { |
49 | if (cpu_has_fxsr) { | 51 | if (cpu_has_fxsr) { |
50 | memset(&tsk->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct)); | 52 | memset(&tsk->thread.i387.fxsave, 0, |
53 | sizeof(struct i387_fxsave_struct)); | ||
51 | tsk->thread.i387.fxsave.cwd = 0x37f; | 54 | tsk->thread.i387.fxsave.cwd = 0x37f; |
52 | if (cpu_has_xmm) | 55 | if (cpu_has_xmm) |
53 | tsk->thread.i387.fxsave.mxcsr = 0x1f80; | 56 | tsk->thread.i387.fxsave.mxcsr = 0x1f80; |
54 | } else { | 57 | } else { |
55 | memset(&tsk->thread.i387.fsave, 0, sizeof(struct i387_fsave_struct)); | 58 | memset(&tsk->thread.i387.fsave, 0, |
59 | sizeof(struct i387_fsave_struct)); | ||
56 | tsk->thread.i387.fsave.cwd = 0xffff037fu; | 60 | tsk->thread.i387.fsave.cwd = 0xffff037fu; |
57 | tsk->thread.i387.fsave.swd = 0xffff0000u; | 61 | tsk->thread.i387.fsave.swd = 0xffff0000u; |
58 | tsk->thread.i387.fsave.twd = 0xffffffffu; | 62 | tsk->thread.i387.fsave.twd = 0xffffffffu; |
59 | tsk->thread.i387.fsave.fos = 0xffff0000u; | 63 | tsk->thread.i387.fsave.fos = 0xffff0000u; |
60 | } | 64 | } |
61 | /* only the device not available exception or ptrace can call init_fpu */ | 65 | /* only the device not available exception |
66 | * or ptrace can call init_fpu */ | ||
62 | set_stopped_child_used_math(tsk); | 67 | set_stopped_child_used_math(tsk); |
63 | } | 68 | } |
64 | 69 | ||
@@ -83,21 +88,22 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin); | |||
83 | * FPU tag word conversions. | 88 | * FPU tag word conversions. |
84 | */ | 89 | */ |
85 | 90 | ||
86 | static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) | 91 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) |
87 | { | 92 | { |
88 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | 93 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ |
89 | 94 | ||
90 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | 95 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ |
91 | tmp = ~twd; | 96 | tmp = ~twd; |
92 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | 97 | tmp = (tmp | (tmp >> 1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ |
93 | /* and move the valid bits to the lower byte. */ | 98 | /* and move the valid bits to the lower byte. */ |
94 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | 99 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ |
95 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | 100 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ |
96 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | 101 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ |
97 | return tmp; | 102 | |
103 | return tmp; | ||
98 | } | 104 | } |
99 | 105 | ||
100 | static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) | 106 | static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) |
101 | { | 107 | { |
102 | struct _fpxreg *st = NULL; | 108 | struct _fpxreg *st = NULL; |
103 | unsigned long tos = (fxsave->swd >> 11) & 7; | 109 | unsigned long tos = (fxsave->swd >> 11) & 7; |
@@ -108,26 +114,26 @@ static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave | |||
108 | 114 | ||
109 | #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16); | 115 | #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16); |
110 | 116 | ||
111 | for ( i = 0 ; i < 8 ; i++ ) { | 117 | for (i = 0; i < 8; i++) { |
112 | if ( twd & 0x1 ) { | 118 | if (twd & 0x1) { |
113 | st = FPREG_ADDR( fxsave, (i - tos) & 7 ); | 119 | st = FPREG_ADDR(fxsave, (i - tos) & 7); |
114 | 120 | ||
115 | switch ( st->exponent & 0x7fff ) { | 121 | switch (st->exponent & 0x7fff) { |
116 | case 0x7fff: | 122 | case 0x7fff: |
117 | tag = 2; /* Special */ | 123 | tag = 2; /* Special */ |
118 | break; | 124 | break; |
119 | case 0x0000: | 125 | case 0x0000: |
120 | if ( !st->significand[0] && | 126 | if (!st->significand[0] && |
121 | !st->significand[1] && | 127 | !st->significand[1] && |
122 | !st->significand[2] && | 128 | !st->significand[2] && |
123 | !st->significand[3] ) { | 129 | !st->significand[3]) { |
124 | tag = 1; /* Zero */ | 130 | tag = 1; /* Zero */ |
125 | } else { | 131 | } else { |
126 | tag = 2; /* Special */ | 132 | tag = 2; /* Special */ |
127 | } | 133 | } |
128 | break; | 134 | break; |
129 | default: | 135 | default: |
130 | if ( st->significand[3] & 0x8000 ) { | 136 | if (st->significand[3] & 0x8000) { |
131 | tag = 0; /* Valid */ | 137 | tag = 0; /* Valid */ |
132 | } else { | 138 | } else { |
133 | tag = 2; /* Special */ | 139 | tag = 2; /* Special */ |
@@ -147,18 +153,18 @@ static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave | |||
147 | * FPU state interaction. | 153 | * FPU state interaction. |
148 | */ | 154 | */ |
149 | 155 | ||
150 | unsigned short get_fpu_cwd( struct task_struct *tsk ) | 156 | unsigned short get_fpu_cwd(struct task_struct *tsk) |
151 | { | 157 | { |
152 | if ( cpu_has_fxsr ) { | 158 | if (cpu_has_fxsr) { |
153 | return tsk->thread.i387.fxsave.cwd; | 159 | return tsk->thread.i387.fxsave.cwd; |
154 | } else { | 160 | } else { |
155 | return (unsigned short)tsk->thread.i387.fsave.cwd; | 161 | return (unsigned short)tsk->thread.i387.fsave.cwd; |
156 | } | 162 | } |
157 | } | 163 | } |
158 | 164 | ||
159 | unsigned short get_fpu_swd( struct task_struct *tsk ) | 165 | unsigned short get_fpu_swd(struct task_struct *tsk) |
160 | { | 166 | { |
161 | if ( cpu_has_fxsr ) { | 167 | if (cpu_has_fxsr) { |
162 | return tsk->thread.i387.fxsave.swd; | 168 | return tsk->thread.i387.fxsave.swd; |
163 | } else { | 169 | } else { |
164 | return (unsigned short)tsk->thread.i387.fsave.swd; | 170 | return (unsigned short)tsk->thread.i387.fsave.swd; |
@@ -166,9 +172,9 @@ unsigned short get_fpu_swd( struct task_struct *tsk ) | |||
166 | } | 172 | } |
167 | 173 | ||
168 | #if 0 | 174 | #if 0 |
169 | unsigned short get_fpu_twd( struct task_struct *tsk ) | 175 | unsigned short get_fpu_twd(struct task_struct *tsk) |
170 | { | 176 | { |
171 | if ( cpu_has_fxsr ) { | 177 | if (cpu_has_fxsr) { |
172 | return tsk->thread.i387.fxsave.twd; | 178 | return tsk->thread.i387.fxsave.twd; |
173 | } else { | 179 | } else { |
174 | return (unsigned short)tsk->thread.i387.fsave.twd; | 180 | return (unsigned short)tsk->thread.i387.fsave.twd; |
@@ -176,9 +182,9 @@ unsigned short get_fpu_twd( struct task_struct *tsk ) | |||
176 | } | 182 | } |
177 | #endif /* 0 */ | 183 | #endif /* 0 */ |
178 | 184 | ||
179 | unsigned short get_fpu_mxcsr( struct task_struct *tsk ) | 185 | unsigned short get_fpu_mxcsr(struct task_struct *tsk) |
180 | { | 186 | { |
181 | if ( cpu_has_xmm ) { | 187 | if (cpu_has_xmm) { |
182 | return tsk->thread.i387.fxsave.mxcsr; | 188 | return tsk->thread.i387.fxsave.mxcsr; |
183 | } else { | 189 | } else { |
184 | return 0x1f80; | 190 | return 0x1f80; |
@@ -187,27 +193,27 @@ unsigned short get_fpu_mxcsr( struct task_struct *tsk ) | |||
187 | 193 | ||
188 | #if 0 | 194 | #if 0 |
189 | 195 | ||
190 | void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd ) | 196 | void set_fpu_cwd(struct task_struct *tsk, unsigned short cwd) |
191 | { | 197 | { |
192 | if ( cpu_has_fxsr ) { | 198 | if (cpu_has_fxsr) { |
193 | tsk->thread.i387.fxsave.cwd = cwd; | 199 | tsk->thread.i387.fxsave.cwd = cwd; |
194 | } else { | 200 | } else { |
195 | tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u); | 201 | tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u); |
196 | } | 202 | } |
197 | } | 203 | } |
198 | 204 | ||
199 | void set_fpu_swd( struct task_struct *tsk, unsigned short swd ) | 205 | void set_fpu_swd(struct task_struct *tsk, unsigned short swd) |
200 | { | 206 | { |
201 | if ( cpu_has_fxsr ) { | 207 | if (cpu_has_fxsr) { |
202 | tsk->thread.i387.fxsave.swd = swd; | 208 | tsk->thread.i387.fxsave.swd = swd; |
203 | } else { | 209 | } else { |
204 | tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u); | 210 | tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u); |
205 | } | 211 | } |
206 | } | 212 | } |
207 | 213 | ||
208 | void set_fpu_twd( struct task_struct *tsk, unsigned short twd ) | 214 | void set_fpu_twd(struct task_struct *tsk, unsigned short twd) |
209 | { | 215 | { |
210 | if ( cpu_has_fxsr ) { | 216 | if (cpu_has_fxsr) { |
211 | tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd); | 217 | tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd); |
212 | } else { | 218 | } else { |
213 | tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u); | 219 | tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u); |
@@ -220,8 +226,8 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd ) | |||
220 | * FXSR floating point environment conversions. | 226 | * FXSR floating point environment conversions. |
221 | */ | 227 | */ |
222 | 228 | ||
223 | static int convert_fxsr_to_user( struct _fpstate __user *buf, | 229 | static int convert_fxsr_to_user(struct _fpstate __user *buf, |
224 | struct i387_fxsave_struct *fxsave ) | 230 | struct i387_fxsave_struct *fxsave) |
225 | { | 231 | { |
226 | unsigned long env[7]; | 232 | unsigned long env[7]; |
227 | struct _fpreg __user *to; | 233 | struct _fpreg __user *to; |
@@ -236,32 +242,32 @@ static int convert_fxsr_to_user( struct _fpstate __user *buf, | |||
236 | env[5] = fxsave->foo; | 242 | env[5] = fxsave->foo; |
237 | env[6] = fxsave->fos; | 243 | env[6] = fxsave->fos; |
238 | 244 | ||
239 | if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) ) | 245 | if (__copy_to_user(buf, env, 7 * sizeof(unsigned long))) |
240 | return 1; | 246 | return 1; |
241 | 247 | ||
242 | to = &buf->_st[0]; | 248 | to = &buf->_st[0]; |
243 | from = (struct _fpxreg *) &fxsave->st_space[0]; | 249 | from = (struct _fpxreg *) &fxsave->st_space[0]; |
244 | for ( i = 0 ; i < 8 ; i++, to++, from++ ) { | 250 | for (i = 0; i < 8; i++, to++, from++) { |
245 | unsigned long __user *t = (unsigned long __user *)to; | 251 | unsigned long __user *t = (unsigned long __user *)to; |
246 | unsigned long *f = (unsigned long *)from; | 252 | unsigned long *f = (unsigned long *)from; |
247 | 253 | ||
248 | if (__put_user(*f, t) || | 254 | if (__put_user(*f, t) || |
249 | __put_user(*(f + 1), t + 1) || | 255 | __put_user(*(f + 1), t + 1) || |
250 | __put_user(from->exponent, &to->exponent)) | 256 | __put_user(from->exponent, &to->exponent)) |
251 | return 1; | 257 | return 1; |
252 | } | 258 | } |
253 | return 0; | 259 | return 0; |
254 | } | 260 | } |
255 | 261 | ||
256 | static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, | 262 | static int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave, |
257 | struct _fpstate __user *buf ) | 263 | struct _fpstate __user *buf) |
258 | { | 264 | { |
259 | unsigned long env[7]; | 265 | unsigned long env[7]; |
260 | struct _fpxreg *to; | 266 | struct _fpxreg *to; |
261 | struct _fpreg __user *from; | 267 | struct _fpreg __user *from; |
262 | int i; | 268 | int i; |
263 | 269 | ||
264 | if ( __copy_from_user( env, buf, 7 * sizeof(long) ) ) | 270 | if (__copy_from_user(env, buf, 7 * sizeof(long))) |
265 | return 1; | 271 | return 1; |
266 | 272 | ||
267 | fxsave->cwd = (unsigned short)(env[0] & 0xffff); | 273 | fxsave->cwd = (unsigned short)(env[0] & 0xffff); |
@@ -275,13 +281,13 @@ static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, | |||
275 | 281 | ||
276 | to = (struct _fpxreg *) &fxsave->st_space[0]; | 282 | to = (struct _fpxreg *) &fxsave->st_space[0]; |
277 | from = &buf->_st[0]; | 283 | from = &buf->_st[0]; |
278 | for ( i = 0 ; i < 8 ; i++, to++, from++ ) { | 284 | for (i = 0; i < 8; i++, to++, from++) { |
279 | unsigned long *t = (unsigned long *)to; | 285 | unsigned long *t = (unsigned long *)to; |
280 | unsigned long __user *f = (unsigned long __user *)from; | 286 | unsigned long __user *f = (unsigned long __user *)from; |
281 | 287 | ||
282 | if (__get_user(*t, f) || | 288 | if (__get_user(*t, f) || |
283 | __get_user(*(t + 1), f + 1) || | 289 | __get_user(*(t + 1), f + 1) || |
284 | __get_user(to->exponent, &from->exponent)) | 290 | __get_user(to->exponent, &from->exponent)) |
285 | return 1; | 291 | return 1; |
286 | } | 292 | } |
287 | return 0; | 293 | return 0; |
@@ -291,42 +297,42 @@ static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, | |||
291 | * Signal frame handlers. | 297 | * Signal frame handlers. |
292 | */ | 298 | */ |
293 | 299 | ||
294 | static inline int save_i387_fsave( struct _fpstate __user *buf ) | 300 | static inline int save_i387_fsave(struct _fpstate __user *buf) |
295 | { | 301 | { |
296 | struct task_struct *tsk = current; | 302 | struct task_struct *tsk = current; |
297 | 303 | ||
298 | unlazy_fpu( tsk ); | 304 | unlazy_fpu(tsk); |
299 | tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd; | 305 | tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd; |
300 | if ( __copy_to_user( buf, &tsk->thread.i387.fsave, | 306 | if (__copy_to_user(buf, &tsk->thread.i387.fsave, |
301 | sizeof(struct i387_fsave_struct) ) ) | 307 | sizeof(struct i387_fsave_struct))) |
302 | return -1; | 308 | return -1; |
303 | return 1; | 309 | return 1; |
304 | } | 310 | } |
305 | 311 | ||
306 | static int save_i387_fxsave( struct _fpstate __user *buf ) | 312 | static int save_i387_fxsave(struct _fpstate __user *buf) |
307 | { | 313 | { |
308 | struct task_struct *tsk = current; | 314 | struct task_struct *tsk = current; |
309 | int err = 0; | 315 | int err = 0; |
310 | 316 | ||
311 | unlazy_fpu( tsk ); | 317 | unlazy_fpu(tsk); |
312 | 318 | ||
313 | if ( convert_fxsr_to_user( buf, &tsk->thread.i387.fxsave ) ) | 319 | if (convert_fxsr_to_user(buf, &tsk->thread.i387.fxsave)) |
314 | return -1; | 320 | return -1; |
315 | 321 | ||
316 | err |= __put_user( tsk->thread.i387.fxsave.swd, &buf->status ); | 322 | err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status); |
317 | err |= __put_user( X86_FXSR_MAGIC, &buf->magic ); | 323 | err |= __put_user(X86_FXSR_MAGIC, &buf->magic); |
318 | if ( err ) | 324 | if (err) |
319 | return -1; | 325 | return -1; |
320 | 326 | ||
321 | if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->thread.i387.fxsave, | 327 | if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave, |
322 | sizeof(struct i387_fxsave_struct) ) ) | 328 | sizeof(struct i387_fxsave_struct))) |
323 | return -1; | 329 | return -1; |
324 | return 1; | 330 | return 1; |
325 | } | 331 | } |
326 | 332 | ||
327 | int save_i387( struct _fpstate __user *buf ) | 333 | int save_i387(struct _fpstate __user *buf) |
328 | { | 334 | { |
329 | if ( !used_math() ) | 335 | if (!used_math()) |
330 | return 0; | 336 | return 0; |
331 | 337 | ||
332 | /* This will cause a "finit" to be triggered by the next | 338 | /* This will cause a "finit" to be triggered by the next |
@@ -334,49 +340,49 @@ int save_i387( struct _fpstate __user *buf ) | |||
334 | */ | 340 | */ |
335 | clear_used_math(); | 341 | clear_used_math(); |
336 | 342 | ||
337 | if ( HAVE_HWFP ) { | 343 | if (HAVE_HWFP) { |
338 | if ( cpu_has_fxsr ) { | 344 | if (cpu_has_fxsr) { |
339 | return save_i387_fxsave( buf ); | 345 | return save_i387_fxsave(buf); |
340 | } else { | 346 | } else { |
341 | return save_i387_fsave( buf ); | 347 | return save_i387_fsave(buf); |
342 | } | 348 | } |
343 | } else { | 349 | } else { |
344 | return save_i387_soft( ¤t->thread.i387.soft, buf ); | 350 | return save_i387_soft(¤t->thread.i387.soft, buf); |
345 | } | 351 | } |
346 | } | 352 | } |
347 | 353 | ||
348 | static inline int restore_i387_fsave( struct _fpstate __user *buf ) | 354 | static inline int restore_i387_fsave(struct _fpstate __user *buf) |
349 | { | 355 | { |
350 | struct task_struct *tsk = current; | 356 | struct task_struct *tsk = current; |
351 | clear_fpu( tsk ); | 357 | clear_fpu(tsk); |
352 | return __copy_from_user( &tsk->thread.i387.fsave, buf, | 358 | return __copy_from_user(&tsk->thread.i387.fsave, buf, |
353 | sizeof(struct i387_fsave_struct) ); | 359 | sizeof(struct i387_fsave_struct)); |
354 | } | 360 | } |
355 | 361 | ||
356 | static int restore_i387_fxsave( struct _fpstate __user *buf ) | 362 | static int restore_i387_fxsave(struct _fpstate __user *buf) |
357 | { | 363 | { |
358 | int err; | 364 | int err; |
359 | struct task_struct *tsk = current; | 365 | struct task_struct *tsk = current; |
360 | clear_fpu( tsk ); | 366 | clear_fpu(tsk); |
361 | err = __copy_from_user( &tsk->thread.i387.fxsave, &buf->_fxsr_env[0], | 367 | err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0], |
362 | sizeof(struct i387_fxsave_struct) ); | 368 | sizeof(struct i387_fxsave_struct)); |
363 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 369 | /* mxcsr reserved bits must be masked to zero for security reasons */ |
364 | tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; | 370 | tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; |
365 | return err ? 1 : convert_fxsr_from_user( &tsk->thread.i387.fxsave, buf ); | 371 | return err ? 1 : convert_fxsr_from_user(&tsk->thread.i387.fxsave, buf); |
366 | } | 372 | } |
367 | 373 | ||
368 | int restore_i387( struct _fpstate __user *buf ) | 374 | int restore_i387(struct _fpstate __user *buf) |
369 | { | 375 | { |
370 | int err; | 376 | int err; |
371 | 377 | ||
372 | if ( HAVE_HWFP ) { | 378 | if (HAVE_HWFP) { |
373 | if ( cpu_has_fxsr ) { | 379 | if (cpu_has_fxsr) { |
374 | err = restore_i387_fxsave( buf ); | 380 | err = restore_i387_fxsave(buf); |
375 | } else { | 381 | } else { |
376 | err = restore_i387_fsave( buf ); | 382 | err = restore_i387_fsave(buf); |
377 | } | 383 | } |
378 | } else { | 384 | } else { |
379 | err = restore_i387_soft( ¤t->thread.i387.soft, buf ); | 385 | err = restore_i387_soft(¤t->thread.i387.soft, buf); |
380 | } | 386 | } |
381 | set_used_math(); | 387 | set_used_math(); |
382 | return err; | 388 | return err; |
@@ -386,67 +392,67 @@ int restore_i387( struct _fpstate __user *buf ) | |||
386 | * ptrace request handlers. | 392 | * ptrace request handlers. |
387 | */ | 393 | */ |
388 | 394 | ||
389 | static inline int get_fpregs_fsave( struct user_i387_struct __user *buf, | 395 | static inline int get_fpregs_fsave(struct user_i387_struct __user *buf, |
390 | struct task_struct *tsk ) | 396 | struct task_struct *tsk) |
391 | { | 397 | { |
392 | return __copy_to_user( buf, &tsk->thread.i387.fsave, | 398 | return __copy_to_user(buf, &tsk->thread.i387.fsave, |
393 | sizeof(struct user_i387_struct) ); | 399 | sizeof(struct user_i387_struct)); |
394 | } | 400 | } |
395 | 401 | ||
396 | static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf, | 402 | static inline int get_fpregs_fxsave(struct user_i387_struct __user *buf, |
397 | struct task_struct *tsk ) | 403 | struct task_struct *tsk) |
398 | { | 404 | { |
399 | return convert_fxsr_to_user( (struct _fpstate __user *)buf, | 405 | return convert_fxsr_to_user((struct _fpstate __user *)buf, |
400 | &tsk->thread.i387.fxsave ); | 406 | &tsk->thread.i387.fxsave); |
401 | } | 407 | } |
402 | 408 | ||
403 | int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk ) | 409 | int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk) |
404 | { | 410 | { |
405 | if ( HAVE_HWFP ) { | 411 | if (HAVE_HWFP) { |
406 | if ( cpu_has_fxsr ) { | 412 | if (cpu_has_fxsr) { |
407 | return get_fpregs_fxsave( buf, tsk ); | 413 | return get_fpregs_fxsave(buf, tsk); |
408 | } else { | 414 | } else { |
409 | return get_fpregs_fsave( buf, tsk ); | 415 | return get_fpregs_fsave(buf, tsk); |
410 | } | 416 | } |
411 | } else { | 417 | } else { |
412 | return save_i387_soft( &tsk->thread.i387.soft, | 418 | return save_i387_soft(&tsk->thread.i387.soft, |
413 | (struct _fpstate __user *)buf ); | 419 | (struct _fpstate __user *)buf); |
414 | } | 420 | } |
415 | } | 421 | } |
416 | 422 | ||
417 | static inline int set_fpregs_fsave( struct task_struct *tsk, | 423 | static inline int set_fpregs_fsave(struct task_struct *tsk, |
418 | struct user_i387_struct __user *buf ) | 424 | struct user_i387_struct __user *buf) |
419 | { | 425 | { |
420 | return __copy_from_user( &tsk->thread.i387.fsave, buf, | 426 | return __copy_from_user(&tsk->thread.i387.fsave, buf, |
421 | sizeof(struct user_i387_struct) ); | 427 | sizeof(struct user_i387_struct)); |
422 | } | 428 | } |
423 | 429 | ||
424 | static inline int set_fpregs_fxsave( struct task_struct *tsk, | 430 | static inline int set_fpregs_fxsave(struct task_struct *tsk, |
425 | struct user_i387_struct __user *buf ) | 431 | struct user_i387_struct __user *buf) |
426 | { | 432 | { |
427 | return convert_fxsr_from_user( &tsk->thread.i387.fxsave, | 433 | return convert_fxsr_from_user(&tsk->thread.i387.fxsave, |
428 | (struct _fpstate __user *)buf ); | 434 | (struct _fpstate __user *)buf); |
429 | } | 435 | } |
430 | 436 | ||
431 | int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf ) | 437 | int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf) |
432 | { | 438 | { |
433 | if ( HAVE_HWFP ) { | 439 | if (HAVE_HWFP) { |
434 | if ( cpu_has_fxsr ) { | 440 | if (cpu_has_fxsr) { |
435 | return set_fpregs_fxsave( tsk, buf ); | 441 | return set_fpregs_fxsave(tsk, buf); |
436 | } else { | 442 | } else { |
437 | return set_fpregs_fsave( tsk, buf ); | 443 | return set_fpregs_fsave(tsk, buf); |
438 | } | 444 | } |
439 | } else { | 445 | } else { |
440 | return restore_i387_soft( &tsk->thread.i387.soft, | 446 | return restore_i387_soft(&tsk->thread.i387.soft, |
441 | (struct _fpstate __user *)buf ); | 447 | (struct _fpstate __user *)buf); |
442 | } | 448 | } |
443 | } | 449 | } |
444 | 450 | ||
445 | int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk ) | 451 | int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *tsk) |
446 | { | 452 | { |
447 | if ( cpu_has_fxsr ) { | 453 | if (cpu_has_fxsr) { |
448 | if (__copy_to_user( buf, &tsk->thread.i387.fxsave, | 454 | if (__copy_to_user(buf, &tsk->thread.i387.fxsave, |
449 | sizeof(struct user_fxsr_struct) )) | 455 | sizeof(struct user_fxsr_struct))) |
450 | return -EFAULT; | 456 | return -EFAULT; |
451 | return 0; | 457 | return 0; |
452 | } else { | 458 | } else { |
@@ -454,15 +460,16 @@ int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk ) | |||
454 | } | 460 | } |
455 | } | 461 | } |
456 | 462 | ||
457 | int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf ) | 463 | int set_fpxregs(struct task_struct *tsk, struct user_fxsr_struct __user *buf) |
458 | { | 464 | { |
459 | int ret = 0; | 465 | int ret = 0; |
460 | 466 | ||
461 | if ( cpu_has_fxsr ) { | 467 | if (cpu_has_fxsr) { |
462 | if (__copy_from_user( &tsk->thread.i387.fxsave, buf, | 468 | if (__copy_from_user(&tsk->thread.i387.fxsave, buf, |
463 | sizeof(struct user_fxsr_struct) )) | 469 | sizeof(struct user_fxsr_struct))) |
464 | ret = -EFAULT; | 470 | ret = -EFAULT; |
465 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 471 | /* mxcsr reserved bits must be masked to zero |
472 | * for security reasons */ | ||
466 | tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; | 473 | tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; |
467 | } else { | 474 | } else { |
468 | ret = -EIO; | 475 | ret = -EIO; |
@@ -474,41 +481,40 @@ int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf ) | |||
474 | * FPU state for core dumps. | 481 | * FPU state for core dumps. |
475 | */ | 482 | */ |
476 | 483 | ||
477 | static inline void copy_fpu_fsave( struct task_struct *tsk, | 484 | static inline void copy_fpu_fsave(struct task_struct *tsk, |
478 | struct user_i387_struct *fpu ) | 485 | struct user_i387_struct *fpu) |
479 | { | 486 | { |
480 | memcpy( fpu, &tsk->thread.i387.fsave, | 487 | memcpy(fpu, &tsk->thread.i387.fsave, |
481 | sizeof(struct user_i387_struct) ); | 488 | sizeof(struct user_i387_struct)); |
482 | } | 489 | } |
483 | 490 | ||
484 | static inline void copy_fpu_fxsave( struct task_struct *tsk, | 491 | static inline void copy_fpu_fxsave(struct task_struct *tsk, |
485 | struct user_i387_struct *fpu ) | 492 | struct user_i387_struct *fpu) |
486 | { | 493 | { |
487 | unsigned short *to; | 494 | unsigned short *to; |
488 | unsigned short *from; | 495 | unsigned short *from; |
489 | int i; | 496 | int i; |
490 | 497 | ||
491 | memcpy( fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long) ); | 498 | memcpy(fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long)); |
492 | 499 | ||
493 | to = (unsigned short *)&fpu->st_space[0]; | 500 | to = (unsigned short *)&fpu->st_space[0]; |
494 | from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0]; | 501 | from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0]; |
495 | for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) { | 502 | for (i = 0; i < 8; i++, to += 5, from += 8) |
496 | memcpy( to, from, 5 * sizeof(unsigned short) ); | 503 | memcpy(to, from, 5 * sizeof(unsigned short)); |
497 | } | ||
498 | } | 504 | } |
499 | 505 | ||
500 | int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu ) | 506 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) |
501 | { | 507 | { |
502 | int fpvalid; | 508 | int fpvalid; |
503 | struct task_struct *tsk = current; | 509 | struct task_struct *tsk = current; |
504 | 510 | ||
505 | fpvalid = !!used_math(); | 511 | fpvalid = !!used_math(); |
506 | if ( fpvalid ) { | 512 | if (fpvalid) { |
507 | unlazy_fpu( tsk ); | 513 | unlazy_fpu(tsk); |
508 | if ( cpu_has_fxsr ) { | 514 | if (cpu_has_fxsr) { |
509 | copy_fpu_fxsave( tsk, fpu ); | 515 | copy_fpu_fxsave(tsk, fpu); |
510 | } else { | 516 | } else { |
511 | copy_fpu_fsave( tsk, fpu ); | 517 | copy_fpu_fsave(tsk, fpu); |
512 | } | 518 | } |
513 | } | 519 | } |
514 | 520 | ||
@@ -531,7 +537,8 @@ int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu) | |||
531 | return fpvalid; | 537 | return fpvalid; |
532 | } | 538 | } |
533 | 539 | ||
534 | int dump_task_extended_fpu(struct task_struct *tsk, struct user_fxsr_struct *fpu) | 540 | int dump_task_extended_fpu(struct task_struct *tsk, |
541 | struct user_fxsr_struct *fpu) | ||
535 | { | 542 | { |
536 | int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr; | 543 | int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr; |
537 | 544 | ||