diff options
author | Arnd Bergmann <arnd@arndb.de> | 2012-02-24 07:39:18 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2012-02-24 07:39:18 -0500 |
commit | fdc24d4ba20499febb90ff17d3b75674026712f8 (patch) | |
tree | 83cebb162add24be7b395090b4daca4bd752641b /arch/x86 | |
parent | a5f17d1f4c2831b9b9bf8b1a537cdbac995d6e13 (diff) | |
parent | 059289b260826deb43601644a7ad39c2608e6861 (diff) |
Merge branch 'vexpress-dt-v3.3-rc4' of git://git.linaro.org/people/pawelmoll/linux into next/dt
* 'vexpress-dt-v3.3-rc4' of git://git.linaro.org/people/pawelmoll/linux: (573 commits)
ARM: vexpress: Add Device Tree for V2P-CA15 core tile (TC1 variant)
ARM: vexpress: Add Device Tree for V2P-CA9 core tile
ARM: vexpress: Add Device Tree for V2P-CA5s core tile
ARM: vexpress: Motherboard RS1 memory map support
ARM: vexpress: Add Device Tree support
ARM: vexpress: Use FDT data in platform SMP calls
ARM: versatile: Map local timers using Device Tree when possible
ARM: vexpress: Get rid of MMIO_P2V
This adds full device tree boot support for the versatile express
platform, as has been awaited for a long time.
Conflicts:
arch/arm/mach-vexpress/core.h
The definition of AMBA_DEVICE was removed in one branch, and the
definition of MMIO_P2V was removed in the other branch.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/cmpxchg.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/i387.h | 284 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_64.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 29 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 41 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 51 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 45 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/x86/pci/xen.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 7 |
20 files changed, 425 insertions, 152 deletions
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 0c9fa2745f13..b3b733262909 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -145,13 +145,13 @@ extern void __add_wrong_size(void) | |||
145 | 145 | ||
146 | #ifdef __HAVE_ARCH_CMPXCHG | 146 | #ifdef __HAVE_ARCH_CMPXCHG |
147 | #define cmpxchg(ptr, old, new) \ | 147 | #define cmpxchg(ptr, old, new) \ |
148 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) | 148 | __cmpxchg(ptr, old, new, sizeof(*(ptr))) |
149 | 149 | ||
150 | #define sync_cmpxchg(ptr, old, new) \ | 150 | #define sync_cmpxchg(ptr, old, new) \ |
151 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | 151 | __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) |
152 | 152 | ||
153 | #define cmpxchg_local(ptr, old, new) \ | 153 | #define cmpxchg_local(ptr, old, new) \ |
154 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | 154 | __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) |
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | /* | 157 | /* |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 6919e936345b..a850b4d8d14d 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size; | |||
29 | extern void fpu_init(void); | 29 | extern void fpu_init(void); |
30 | extern void mxcsr_feature_mask_init(void); | 30 | extern void mxcsr_feature_mask_init(void); |
31 | extern int init_fpu(struct task_struct *child); | 31 | extern int init_fpu(struct task_struct *child); |
32 | extern asmlinkage void math_state_restore(void); | 32 | extern void __math_state_restore(struct task_struct *); |
33 | extern void __math_state_restore(void); | 33 | extern void math_state_restore(void); |
34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
35 | 35 | ||
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 36 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
@@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
212 | 212 | ||
213 | #endif /* CONFIG_X86_64 */ | 213 | #endif /* CONFIG_X86_64 */ |
214 | 214 | ||
215 | /* We need a safe address that is cheap to find and that is already | ||
216 | in L1 during context switch. The best choices are unfortunately | ||
217 | different for UP and SMP */ | ||
218 | #ifdef CONFIG_SMP | ||
219 | #define safe_address (__per_cpu_offset[0]) | ||
220 | #else | ||
221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) | ||
222 | #endif | ||
223 | |||
224 | /* | 215 | /* |
225 | * These must be called with preempt disabled | 216 | * These must be called with preempt disabled. Returns |
217 | * 'true' if the FPU state is still intact. | ||
226 | */ | 218 | */ |
227 | static inline void fpu_save_init(struct fpu *fpu) | 219 | static inline int fpu_save_init(struct fpu *fpu) |
228 | { | 220 | { |
229 | if (use_xsave()) { | 221 | if (use_xsave()) { |
230 | fpu_xsave(fpu); | 222 | fpu_xsave(fpu); |
@@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu) | |||
233 | * xsave header may indicate the init state of the FP. | 225 | * xsave header may indicate the init state of the FP. |
234 | */ | 226 | */ |
235 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | 227 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
236 | return; | 228 | return 1; |
237 | } else if (use_fxsr()) { | 229 | } else if (use_fxsr()) { |
238 | fpu_fxsave(fpu); | 230 | fpu_fxsave(fpu); |
239 | } else { | 231 | } else { |
240 | asm volatile("fnsave %[fx]; fwait" | 232 | asm volatile("fnsave %[fx]; fwait" |
241 | : [fx] "=m" (fpu->state->fsave)); | 233 | : [fx] "=m" (fpu->state->fsave)); |
242 | return; | 234 | return 0; |
243 | } | 235 | } |
244 | 236 | ||
245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) | 237 | /* |
238 | * If exceptions are pending, we need to clear them so | ||
239 | * that we don't randomly get exceptions later. | ||
240 | * | ||
241 | * FIXME! Is this perhaps only true for the old-style | ||
242 | * irq13 case? Maybe we could leave the x87 state | ||
243 | * intact otherwise? | ||
244 | */ | ||
245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | ||
246 | asm volatile("fnclex"); | 246 | asm volatile("fnclex"); |
247 | 247 | return 0; | |
248 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 248 | } |
249 | is pending. Clear the x87 state here by setting it to fixed | 249 | return 1; |
250 | values. safe_address is a random variable that should be in L1 */ | ||
251 | alternative_input( | ||
252 | ASM_NOP8 ASM_NOP2, | ||
253 | "emms\n\t" /* clear stack tags */ | ||
254 | "fildl %P[addr]", /* set F?P to defined value */ | ||
255 | X86_FEATURE_FXSAVE_LEAK, | ||
256 | [addr] "m" (safe_address)); | ||
257 | } | 250 | } |
258 | 251 | ||
259 | static inline void __save_init_fpu(struct task_struct *tsk) | 252 | static inline int __save_init_fpu(struct task_struct *tsk) |
260 | { | 253 | { |
261 | fpu_save_init(&tsk->thread.fpu); | 254 | return fpu_save_init(&tsk->thread.fpu); |
262 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
263 | } | 255 | } |
264 | 256 | ||
265 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 257 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
@@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
281 | } | 273 | } |
282 | 274 | ||
283 | /* | 275 | /* |
284 | * Signal frame handlers... | 276 | * Software FPU state helpers. Careful: these need to |
277 | * be preemption protection *and* they need to be | ||
278 | * properly paired with the CR0.TS changes! | ||
285 | */ | 279 | */ |
286 | extern int save_i387_xstate(void __user *buf); | 280 | static inline int __thread_has_fpu(struct task_struct *tsk) |
287 | extern int restore_i387_xstate(void __user *buf); | 281 | { |
282 | return tsk->thread.has_fpu; | ||
283 | } | ||
288 | 284 | ||
289 | static inline void __unlazy_fpu(struct task_struct *tsk) | 285 | /* Must be paired with an 'stts' after! */ |
286 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | ||
290 | { | 287 | { |
291 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 288 | tsk->thread.has_fpu = 0; |
292 | __save_init_fpu(tsk); | 289 | } |
293 | stts(); | 290 | |
294 | } else | 291 | /* Must be paired with a 'clts' before! */ |
295 | tsk->fpu_counter = 0; | 292 | static inline void __thread_set_has_fpu(struct task_struct *tsk) |
293 | { | ||
294 | tsk->thread.has_fpu = 1; | ||
296 | } | 295 | } |
297 | 296 | ||
297 | /* | ||
298 | * Encapsulate the CR0.TS handling together with the | ||
299 | * software flag. | ||
300 | * | ||
301 | * These generally need preemption protection to work, | ||
302 | * do try to avoid using these on their own. | ||
303 | */ | ||
304 | static inline void __thread_fpu_end(struct task_struct *tsk) | ||
305 | { | ||
306 | __thread_clear_has_fpu(tsk); | ||
307 | stts(); | ||
308 | } | ||
309 | |||
310 | static inline void __thread_fpu_begin(struct task_struct *tsk) | ||
311 | { | ||
312 | clts(); | ||
313 | __thread_set_has_fpu(tsk); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * FPU state switching for scheduling. | ||
318 | * | ||
319 | * This is a two-stage process: | ||
320 | * | ||
321 | * - switch_fpu_prepare() saves the old state and | ||
322 | * sets the new state of the CR0.TS bit. This is | ||
323 | * done within the context of the old process. | ||
324 | * | ||
325 | * - switch_fpu_finish() restores the new state as | ||
326 | * necessary. | ||
327 | */ | ||
328 | typedef struct { int preload; } fpu_switch_t; | ||
329 | |||
330 | /* | ||
331 | * FIXME! We could do a totally lazy restore, but we need to | ||
332 | * add a per-cpu "this was the task that last touched the FPU | ||
333 | * on this CPU" variable, and the task needs to have a "I last | ||
334 | * touched the FPU on this CPU" and check them. | ||
335 | * | ||
336 | * We don't do that yet, so "fpu_lazy_restore()" always returns | ||
337 | * false, but some day.. | ||
338 | */ | ||
339 | #define fpu_lazy_restore(tsk) (0) | ||
340 | #define fpu_lazy_state_intact(tsk) do { } while (0) | ||
341 | |||
342 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new) | ||
343 | { | ||
344 | fpu_switch_t fpu; | ||
345 | |||
346 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | ||
347 | if (__thread_has_fpu(old)) { | ||
348 | if (__save_init_fpu(old)) | ||
349 | fpu_lazy_state_intact(old); | ||
350 | __thread_clear_has_fpu(old); | ||
351 | old->fpu_counter++; | ||
352 | |||
353 | /* Don't change CR0.TS if we just switch! */ | ||
354 | if (fpu.preload) { | ||
355 | __thread_set_has_fpu(new); | ||
356 | prefetch(new->thread.fpu.state); | ||
357 | } else | ||
358 | stts(); | ||
359 | } else { | ||
360 | old->fpu_counter = 0; | ||
361 | if (fpu.preload) { | ||
362 | if (fpu_lazy_restore(new)) | ||
363 | fpu.preload = 0; | ||
364 | else | ||
365 | prefetch(new->thread.fpu.state); | ||
366 | __thread_fpu_begin(new); | ||
367 | } | ||
368 | } | ||
369 | return fpu; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * By the time this gets called, we've already cleared CR0.TS and | ||
374 | * given the process the FPU if we are going to preload the FPU | ||
375 | * state - all we need to do is to conditionally restore the register | ||
376 | * state itself. | ||
377 | */ | ||
378 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | ||
379 | { | ||
380 | if (fpu.preload) | ||
381 | __math_state_restore(new); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Signal frame handlers... | ||
386 | */ | ||
387 | extern int save_i387_xstate(void __user *buf); | ||
388 | extern int restore_i387_xstate(void __user *buf); | ||
389 | |||
298 | static inline void __clear_fpu(struct task_struct *tsk) | 390 | static inline void __clear_fpu(struct task_struct *tsk) |
299 | { | 391 | { |
300 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 392 | if (__thread_has_fpu(tsk)) { |
301 | /* Ignore delayed exceptions from user space */ | 393 | /* Ignore delayed exceptions from user space */ |
302 | asm volatile("1: fwait\n" | 394 | asm volatile("1: fwait\n" |
303 | "2:\n" | 395 | "2:\n" |
304 | _ASM_EXTABLE(1b, 2b)); | 396 | _ASM_EXTABLE(1b, 2b)); |
305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 397 | __thread_fpu_end(tsk); |
306 | stts(); | ||
307 | } | 398 | } |
308 | } | 399 | } |
309 | 400 | ||
401 | /* | ||
402 | * Were we in an interrupt that interrupted kernel mode? | ||
403 | * | ||
404 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | ||
405 | * pair does nothing at all: the thread must not have fpu (so | ||
406 | * that we don't try to save the FPU state), and TS must | ||
407 | * be set (so that the clts/stts pair does nothing that is | ||
408 | * visible in the interrupted kernel thread). | ||
409 | */ | ||
410 | static inline bool interrupted_kernel_fpu_idle(void) | ||
411 | { | ||
412 | return !__thread_has_fpu(current) && | ||
413 | (read_cr0() & X86_CR0_TS); | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * Were we in user mode (or vm86 mode) when we were | ||
418 | * interrupted? | ||
419 | * | ||
420 | * Doing kernel_fpu_begin/end() is ok if we are running | ||
421 | * in an interrupt context from user mode - we'll just | ||
422 | * save the FPU state as required. | ||
423 | */ | ||
424 | static inline bool interrupted_user_mode(void) | ||
425 | { | ||
426 | struct pt_regs *regs = get_irq_regs(); | ||
427 | return regs && user_mode_vm(regs); | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * Can we use the FPU in kernel mode with the | ||
432 | * whole "kernel_fpu_begin/end()" sequence? | ||
433 | * | ||
434 | * It's always ok in process context (ie "not interrupt") | ||
435 | * but it is sometimes ok even from an irq. | ||
436 | */ | ||
437 | static inline bool irq_fpu_usable(void) | ||
438 | { | ||
439 | return !in_interrupt() || | ||
440 | interrupted_user_mode() || | ||
441 | interrupted_kernel_fpu_idle(); | ||
442 | } | ||
443 | |||
310 | static inline void kernel_fpu_begin(void) | 444 | static inline void kernel_fpu_begin(void) |
311 | { | 445 | { |
312 | struct thread_info *me = current_thread_info(); | 446 | struct task_struct *me = current; |
447 | |||
448 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
313 | preempt_disable(); | 449 | preempt_disable(); |
314 | if (me->status & TS_USEDFPU) | 450 | if (__thread_has_fpu(me)) { |
315 | __save_init_fpu(me->task); | 451 | __save_init_fpu(me); |
316 | else | 452 | __thread_clear_has_fpu(me); |
453 | /* We do 'stts()' in kernel_fpu_end() */ | ||
454 | } else | ||
317 | clts(); | 455 | clts(); |
318 | } | 456 | } |
319 | 457 | ||
@@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void) | |||
323 | preempt_enable(); | 461 | preempt_enable(); |
324 | } | 462 | } |
325 | 463 | ||
326 | static inline bool irq_fpu_usable(void) | ||
327 | { | ||
328 | struct pt_regs *regs; | ||
329 | |||
330 | return !in_interrupt() || !(regs = get_irq_regs()) || \ | ||
331 | user_mode(regs) || (read_cr0() & X86_CR0_TS); | ||
332 | } | ||
333 | |||
334 | /* | 464 | /* |
335 | * Some instructions like VIA's padlock instructions generate a spurious | 465 | * Some instructions like VIA's padlock instructions generate a spurious |
336 | * DNA fault but don't modify SSE registers. And these instructions | 466 | * DNA fault but don't modify SSE registers. And these instructions |
@@ -363,20 +493,64 @@ static inline void irq_ts_restore(int TS_state) | |||
363 | } | 493 | } |
364 | 494 | ||
365 | /* | 495 | /* |
496 | * The question "does this thread have fpu access?" | ||
497 | * is slightly racy, since preemption could come in | ||
498 | * and revoke it immediately after the test. | ||
499 | * | ||
500 | * However, even in that very unlikely scenario, | ||
501 | * we can just assume we have FPU access - typically | ||
502 | * to save the FP state - we'll just take a #NM | ||
503 | * fault and get the FPU access back. | ||
504 | * | ||
505 | * The actual user_fpu_begin/end() functions | ||
506 | * need to be preemption-safe, though. | ||
507 | * | ||
508 | * NOTE! user_fpu_end() must be used only after you | ||
509 | * have saved the FP state, and user_fpu_begin() must | ||
510 | * be used only immediately before restoring it. | ||
511 | * These functions do not do any save/restore on | ||
512 | * their own. | ||
513 | */ | ||
514 | static inline int user_has_fpu(void) | ||
515 | { | ||
516 | return __thread_has_fpu(current); | ||
517 | } | ||
518 | |||
519 | static inline void user_fpu_end(void) | ||
520 | { | ||
521 | preempt_disable(); | ||
522 | __thread_fpu_end(current); | ||
523 | preempt_enable(); | ||
524 | } | ||
525 | |||
526 | static inline void user_fpu_begin(void) | ||
527 | { | ||
528 | preempt_disable(); | ||
529 | if (!user_has_fpu()) | ||
530 | __thread_fpu_begin(current); | ||
531 | preempt_enable(); | ||
532 | } | ||
533 | |||
534 | /* | ||
366 | * These disable preemption on their own and are safe | 535 | * These disable preemption on their own and are safe |
367 | */ | 536 | */ |
368 | static inline void save_init_fpu(struct task_struct *tsk) | 537 | static inline void save_init_fpu(struct task_struct *tsk) |
369 | { | 538 | { |
539 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | ||
370 | preempt_disable(); | 540 | preempt_disable(); |
371 | __save_init_fpu(tsk); | 541 | __save_init_fpu(tsk); |
372 | stts(); | 542 | __thread_fpu_end(tsk); |
373 | preempt_enable(); | 543 | preempt_enable(); |
374 | } | 544 | } |
375 | 545 | ||
376 | static inline void unlazy_fpu(struct task_struct *tsk) | 546 | static inline void unlazy_fpu(struct task_struct *tsk) |
377 | { | 547 | { |
378 | preempt_disable(); | 548 | preempt_disable(); |
379 | __unlazy_fpu(tsk); | 549 | if (__thread_has_fpu(tsk)) { |
550 | __save_init_fpu(tsk); | ||
551 | __thread_fpu_end(tsk); | ||
552 | } else | ||
553 | tsk->fpu_counter = 0; | ||
380 | preempt_enable(); | 554 | preempt_enable(); |
381 | } | 555 | } |
382 | 556 | ||
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index ab4092e3214e..7b9cfc4878af 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -190,6 +190,9 @@ struct x86_emulate_ops { | |||
190 | int (*intercept)(struct x86_emulate_ctxt *ctxt, | 190 | int (*intercept)(struct x86_emulate_ctxt *ctxt, |
191 | struct x86_instruction_info *info, | 191 | struct x86_instruction_info *info, |
192 | enum x86_intercept_stage stage); | 192 | enum x86_intercept_stage stage); |
193 | |||
194 | bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, | ||
195 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); | ||
193 | }; | 196 | }; |
194 | 197 | ||
195 | typedef u32 __attribute__((vector_size(16))) sse128_t; | 198 | typedef u32 __attribute__((vector_size(16))) sse128_t; |
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt { | |||
298 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ | 301 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ |
299 | X86EMUL_MODE_PROT64) | 302 | X86EMUL_MODE_PROT64) |
300 | 303 | ||
304 | /* CPUID vendors */ | ||
305 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 | ||
306 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 | ||
307 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 | ||
308 | |||
309 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 | ||
310 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 | ||
311 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 | ||
312 | |||
313 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 | ||
314 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e | ||
315 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 | ||
316 | |||
301 | enum x86_intercept_stage { | 317 | enum x86_intercept_stage { |
302 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ | 318 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ |
303 | X86_ICPT_PRE_EXCEPT, | 319 | X86_ICPT_PRE_EXCEPT, |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index aa9088c26931..f7c89e231c6c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -454,6 +454,7 @@ struct thread_struct { | |||
454 | unsigned long trap_no; | 454 | unsigned long trap_no; |
455 | unsigned long error_code; | 455 | unsigned long error_code; |
456 | /* floating point and extended processor state */ | 456 | /* floating point and extended processor state */ |
457 | unsigned long has_fpu; | ||
457 | struct fpu fpu; | 458 | struct fpu fpu; |
458 | #ifdef CONFIG_X86_32 | 459 | #ifdef CONFIG_X86_32 |
459 | /* Virtual 86 mode info */ | 460 | /* Virtual 86 mode info */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index bc817cd8b443..cfd8144d5527 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void) | |||
247 | * ever touches our thread-synchronous status, so we don't | 247 | * ever touches our thread-synchronous status, so we don't |
248 | * have to worry about atomic accesses. | 248 | * have to worry about atomic accesses. |
249 | */ | 249 | */ |
250 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
251 | this quantum (SMP) */ | ||
252 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | 250 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
253 | #define TS_POLLING 0x0004 /* idle task polling need_resched, | 251 | #define TS_POLLING 0x0004 /* idle task polling need_resched, |
254 | skip sending interrupt */ | 252 | skip sending interrupt */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 73da6b64f5b7..d6bd49faa40c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
439 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 439 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
440 | 440 | ||
441 | cpuc->pebs_enabled |= 1ULL << hwc->idx; | 441 | cpuc->pebs_enabled |= 1ULL << hwc->idx; |
442 | WARN_ON_ONCE(cpuc->enabled); | ||
443 | 442 | ||
444 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) | 443 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) |
445 | intel_pmu_lbr_enable(event); | 444 | intel_pmu_lbr_enable(event); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 3fab3de3ce96..47a7e63bfe54 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
72 | if (!x86_pmu.lbr_nr) | 72 | if (!x86_pmu.lbr_nr) |
73 | return; | 73 | return; |
74 | 74 | ||
75 | WARN_ON_ONCE(cpuc->enabled); | ||
76 | |||
77 | /* | 75 | /* |
78 | * Reset the LBR stack if we changed task context to | 76 | * Reset the LBR stack if we changed task context to |
79 | * avoid data leaks. | 77 | * avoid data leaks. |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 1aae78f775fc..4025fe4f928f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -252,7 +252,8 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) | |||
252 | unsigned short ss; | 252 | unsigned short ss; |
253 | unsigned long sp; | 253 | unsigned long sp; |
254 | #endif | 254 | #endif |
255 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); | 255 | printk(KERN_DEFAULT |
256 | "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); | ||
256 | #ifdef CONFIG_PREEMPT | 257 | #ifdef CONFIG_PREEMPT |
257 | printk("PREEMPT "); | 258 | printk("PREEMPT "); |
258 | #endif | 259 | #endif |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 6d728d9284bd..17107bd6e1f0 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -129,7 +129,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
129 | if (!stack) { | 129 | if (!stack) { |
130 | if (regs) | 130 | if (regs) |
131 | stack = (unsigned long *)regs->sp; | 131 | stack = (unsigned long *)regs->sp; |
132 | else if (task && task != current) | 132 | else if (task != current) |
133 | stack = (unsigned long *)task->thread.sp; | 133 | stack = (unsigned long *)task->thread.sp; |
134 | else | 134 | else |
135 | stack = &dummy; | 135 | stack = &dummy; |
@@ -269,11 +269,11 @@ void show_registers(struct pt_regs *regs) | |||
269 | unsigned char c; | 269 | unsigned char c; |
270 | u8 *ip; | 270 | u8 *ip; |
271 | 271 | ||
272 | printk(KERN_EMERG "Stack:\n"); | 272 | printk(KERN_DEFAULT "Stack:\n"); |
273 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, | 273 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
274 | 0, KERN_EMERG); | 274 | 0, KERN_DEFAULT); |
275 | 275 | ||
276 | printk(KERN_EMERG "Code: "); | 276 | printk(KERN_DEFAULT "Code: "); |
277 | 277 | ||
278 | ip = (u8 *)regs->ip - code_prologue; | 278 | ip = (u8 *)regs->ip - code_prologue; |
279 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { | 279 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 485204f58cda..80bfe1ab0031 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -299,22 +299,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
299 | *next = &next_p->thread; | 299 | *next = &next_p->thread; |
300 | int cpu = smp_processor_id(); | 300 | int cpu = smp_processor_id(); |
301 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 301 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
302 | bool preload_fpu; | 302 | fpu_switch_t fpu; |
303 | 303 | ||
304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
305 | 305 | ||
306 | /* | 306 | fpu = switch_fpu_prepare(prev_p, next_p); |
307 | * If the task has used fpu the last 5 timeslices, just do a full | ||
308 | * restore of the math state immediately to avoid the trap; the | ||
309 | * chances of needing FPU soon are obviously high now | ||
310 | */ | ||
311 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
312 | |||
313 | __unlazy_fpu(prev_p); | ||
314 | |||
315 | /* we're going to use this soon, after a few expensive things */ | ||
316 | if (preload_fpu) | ||
317 | prefetch(next->fpu.state); | ||
318 | 307 | ||
319 | /* | 308 | /* |
320 | * Reload esp0. | 309 | * Reload esp0. |
@@ -354,11 +343,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
354 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 343 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
355 | __switch_to_xtra(prev_p, next_p, tss); | 344 | __switch_to_xtra(prev_p, next_p, tss); |
356 | 345 | ||
357 | /* If we're going to preload the fpu context, make sure clts | ||
358 | is run while we're batching the cpu state updates. */ | ||
359 | if (preload_fpu) | ||
360 | clts(); | ||
361 | |||
362 | /* | 346 | /* |
363 | * Leave lazy mode, flushing any hypercalls made here. | 347 | * Leave lazy mode, flushing any hypercalls made here. |
364 | * This must be done before restoring TLS segments so | 348 | * This must be done before restoring TLS segments so |
@@ -368,15 +352,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
368 | */ | 352 | */ |
369 | arch_end_context_switch(next_p); | 353 | arch_end_context_switch(next_p); |
370 | 354 | ||
371 | if (preload_fpu) | ||
372 | __math_state_restore(); | ||
373 | |||
374 | /* | 355 | /* |
375 | * Restore %gs if needed (which is common) | 356 | * Restore %gs if needed (which is common) |
376 | */ | 357 | */ |
377 | if (prev->gs | next->gs) | 358 | if (prev->gs | next->gs) |
378 | lazy_load_gs(next->gs); | 359 | lazy_load_gs(next->gs); |
379 | 360 | ||
361 | switch_fpu_finish(next_p, fpu); | ||
362 | |||
380 | percpu_write(current_task, next_p); | 363 | percpu_write(current_task, next_p); |
381 | 364 | ||
382 | return prev_p; | 365 | return prev_p; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9b9fe4a85c87..1fd94bc4279d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -386,18 +386,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
386 | int cpu = smp_processor_id(); | 386 | int cpu = smp_processor_id(); |
387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
388 | unsigned fsindex, gsindex; | 388 | unsigned fsindex, gsindex; |
389 | bool preload_fpu; | 389 | fpu_switch_t fpu; |
390 | 390 | ||
391 | /* | 391 | fpu = switch_fpu_prepare(prev_p, next_p); |
392 | * If the task has used fpu the last 5 timeslices, just do a full | ||
393 | * restore of the math state immediately to avoid the trap; the | ||
394 | * chances of needing FPU soon are obviously high now | ||
395 | */ | ||
396 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
397 | |||
398 | /* we're going to use this soon, after a few expensive things */ | ||
399 | if (preload_fpu) | ||
400 | prefetch(next->fpu.state); | ||
401 | 392 | ||
402 | /* | 393 | /* |
403 | * Reload esp0, LDT and the page table pointer: | 394 | * Reload esp0, LDT and the page table pointer: |
@@ -427,13 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
427 | 418 | ||
428 | load_TLS(next, cpu); | 419 | load_TLS(next, cpu); |
429 | 420 | ||
430 | /* Must be after DS reload */ | ||
431 | __unlazy_fpu(prev_p); | ||
432 | |||
433 | /* Make sure cpu is ready for new context */ | ||
434 | if (preload_fpu) | ||
435 | clts(); | ||
436 | |||
437 | /* | 421 | /* |
438 | * Leave lazy mode, flushing any hypercalls made here. | 422 | * Leave lazy mode, flushing any hypercalls made here. |
439 | * This must be done before restoring TLS segments so | 423 | * This must be done before restoring TLS segments so |
@@ -474,6 +458,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
474 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 458 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
475 | prev->gsindex = gsindex; | 459 | prev->gsindex = gsindex; |
476 | 460 | ||
461 | switch_fpu_finish(next_p, fpu); | ||
462 | |||
477 | /* | 463 | /* |
478 | * Switch the PDA and FPU contexts. | 464 | * Switch the PDA and FPU contexts. |
479 | */ | 465 | */ |
@@ -492,13 +478,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
492 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 478 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
493 | __switch_to_xtra(prev_p, next_p, tss); | 479 | __switch_to_xtra(prev_p, next_p, tss); |
494 | 480 | ||
495 | /* | ||
496 | * Preload the FPU context, now that we've determined that the | ||
497 | * task is likely to be using it. | ||
498 | */ | ||
499 | if (preload_fpu) | ||
500 | __math_state_restore(); | ||
501 | |||
502 | return prev_p; | 481 | return prev_p; |
503 | } | 482 | } |
504 | 483 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 37a458b521a6..d840e69a853c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -39,6 +39,14 @@ static int reboot_mode; | |||
39 | enum reboot_type reboot_type = BOOT_ACPI; | 39 | enum reboot_type reboot_type = BOOT_ACPI; |
40 | int reboot_force; | 40 | int reboot_force; |
41 | 41 | ||
42 | /* This variable is used privately to keep track of whether or not | ||
43 | * reboot_type is still set to its default value (i.e., reboot= hasn't | ||
44 | * been set on the command line). This is needed so that we can | ||
45 | * suppress DMI scanning for reboot quirks. Without it, it's | ||
46 | * impossible to override a faulty reboot quirk without recompiling. | ||
47 | */ | ||
48 | static int reboot_default = 1; | ||
49 | |||
42 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) | 50 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) |
43 | static int reboot_cpu = -1; | 51 | static int reboot_cpu = -1; |
44 | #endif | 52 | #endif |
@@ -67,6 +75,12 @@ bool port_cf9_safe = false; | |||
67 | static int __init reboot_setup(char *str) | 75 | static int __init reboot_setup(char *str) |
68 | { | 76 | { |
69 | for (;;) { | 77 | for (;;) { |
78 | /* Having anything passed on the command line via | ||
79 | * reboot= will cause us to disable DMI checking | ||
80 | * below. | ||
81 | */ | ||
82 | reboot_default = 0; | ||
83 | |||
70 | switch (*str) { | 84 | switch (*str) { |
71 | case 'w': | 85 | case 'w': |
72 | reboot_mode = 0x1234; | 86 | reboot_mode = 0x1234; |
@@ -295,14 +309,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
295 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | 309 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), |
296 | }, | 310 | }, |
297 | }, | 311 | }, |
298 | { /* Handle problems with rebooting on VersaLogic Menlow boards */ | ||
299 | .callback = set_bios_reboot, | ||
300 | .ident = "VersaLogic Menlow based board", | ||
301 | .matches = { | ||
302 | DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), | ||
303 | DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), | ||
304 | }, | ||
305 | }, | ||
306 | { /* Handle reboot issue on Acer Aspire one */ | 312 | { /* Handle reboot issue on Acer Aspire one */ |
307 | .callback = set_kbd_reboot, | 313 | .callback = set_kbd_reboot, |
308 | .ident = "Acer Aspire One A110", | 314 | .ident = "Acer Aspire One A110", |
@@ -316,7 +322,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
316 | 322 | ||
317 | static int __init reboot_init(void) | 323 | static int __init reboot_init(void) |
318 | { | 324 | { |
319 | dmi_check_system(reboot_dmi_table); | 325 | /* Only do the DMI check if reboot_type hasn't been overridden |
326 | * on the command line | ||
327 | */ | ||
328 | if (reboot_default) { | ||
329 | dmi_check_system(reboot_dmi_table); | ||
330 | } | ||
320 | return 0; | 331 | return 0; |
321 | } | 332 | } |
322 | core_initcall(reboot_init); | 333 | core_initcall(reboot_init); |
@@ -465,7 +476,12 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { | |||
465 | 476 | ||
466 | static int __init pci_reboot_init(void) | 477 | static int __init pci_reboot_init(void) |
467 | { | 478 | { |
468 | dmi_check_system(pci_reboot_dmi_table); | 479 | /* Only do the DMI check if reboot_type hasn't been overridden |
480 | * on the command line | ||
481 | */ | ||
482 | if (reboot_default) { | ||
483 | dmi_check_system(pci_reboot_dmi_table); | ||
484 | } | ||
469 | return 0; | 485 | return 0; |
470 | } | 486 | } |
471 | core_initcall(pci_reboot_init); | 487 | core_initcall(pci_reboot_init); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 482ec3af2067..77da5b475ad2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -571,25 +571,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | |||
571 | } | 571 | } |
572 | 572 | ||
573 | /* | 573 | /* |
574 | * __math_state_restore assumes that cr0.TS is already clear and the | 574 | * This gets called with the process already owning the |
575 | * fpu state is all ready for use. Used during context switch. | 575 | * FPU state, and with CR0.TS cleared. It just needs to |
576 | * restore the FPU register state. | ||
576 | */ | 577 | */ |
577 | void __math_state_restore(void) | 578 | void __math_state_restore(struct task_struct *tsk) |
578 | { | 579 | { |
579 | struct thread_info *thread = current_thread_info(); | 580 | /* We need a safe address that is cheap to find and that is already |
580 | struct task_struct *tsk = thread->task; | 581 | in L1. We've just brought in "tsk->thread.has_fpu", so use that */ |
582 | #define safe_address (tsk->thread.has_fpu) | ||
583 | |||
584 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
585 | is pending. Clear the x87 state here by setting it to fixed | ||
586 | values. safe_address is a random variable that should be in L1 */ | ||
587 | alternative_input( | ||
588 | ASM_NOP8 ASM_NOP2, | ||
589 | "emms\n\t" /* clear stack tags */ | ||
590 | "fildl %P[addr]", /* set F?P to defined value */ | ||
591 | X86_FEATURE_FXSAVE_LEAK, | ||
592 | [addr] "m" (safe_address)); | ||
581 | 593 | ||
582 | /* | 594 | /* |
583 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | 595 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. |
584 | */ | 596 | */ |
585 | if (unlikely(restore_fpu_checking(tsk))) { | 597 | if (unlikely(restore_fpu_checking(tsk))) { |
586 | stts(); | 598 | __thread_fpu_end(tsk); |
587 | force_sig(SIGSEGV, tsk); | 599 | force_sig(SIGSEGV, tsk); |
588 | return; | 600 | return; |
589 | } | 601 | } |
590 | |||
591 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
592 | tsk->fpu_counter++; | ||
593 | } | 602 | } |
594 | 603 | ||
595 | /* | 604 | /* |
@@ -599,13 +608,12 @@ void __math_state_restore(void) | |||
599 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 608 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
600 | * Don't touch unless you *really* know how it works. | 609 | * Don't touch unless you *really* know how it works. |
601 | * | 610 | * |
602 | * Must be called with kernel preemption disabled (in this case, | 611 | * Must be called with kernel preemption disabled (eg with local |
603 | * local interrupts are disabled at the call-site in entry.S). | 612 | * local interrupts as in the case of do_device_not_available). |
604 | */ | 613 | */ |
605 | asmlinkage void math_state_restore(void) | 614 | void math_state_restore(void) |
606 | { | 615 | { |
607 | struct thread_info *thread = current_thread_info(); | 616 | struct task_struct *tsk = current; |
608 | struct task_struct *tsk = thread->task; | ||
609 | 617 | ||
610 | if (!tsk_used_math(tsk)) { | 618 | if (!tsk_used_math(tsk)) { |
611 | local_irq_enable(); | 619 | local_irq_enable(); |
@@ -622,9 +630,10 @@ asmlinkage void math_state_restore(void) | |||
622 | local_irq_disable(); | 630 | local_irq_disable(); |
623 | } | 631 | } |
624 | 632 | ||
625 | clts(); /* Allow maths ops (or we recurse) */ | 633 | __thread_fpu_begin(tsk); |
634 | __math_state_restore(tsk); | ||
626 | 635 | ||
627 | __math_state_restore(); | 636 | tsk->fpu_counter++; |
628 | } | 637 | } |
629 | EXPORT_SYMBOL_GPL(math_state_restore); | 638 | EXPORT_SYMBOL_GPL(math_state_restore); |
630 | 639 | ||
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a3911343976b..711091114119 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
47 | if (!fx) | 47 | if (!fx) |
48 | return; | 48 | return; |
49 | 49 | ||
50 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | 50 | BUG_ON(__thread_has_fpu(tsk)); |
51 | 51 | ||
52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | 52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; |
53 | 53 | ||
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf) | |||
168 | if (!used_math()) | 168 | if (!used_math()) |
169 | return 0; | 169 | return 0; |
170 | 170 | ||
171 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 171 | if (user_has_fpu()) { |
172 | if (use_xsave()) | 172 | if (use_xsave()) |
173 | err = xsave_user(buf); | 173 | err = xsave_user(buf); |
174 | else | 174 | else |
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf) | |||
176 | 176 | ||
177 | if (err) | 177 | if (err) |
178 | return err; | 178 | return err; |
179 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 179 | user_fpu_end(); |
180 | stts(); | ||
181 | } else { | 180 | } else { |
182 | sanitize_i387_state(tsk); | 181 | sanitize_i387_state(tsk); |
183 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 182 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf) | |||
292 | return err; | 291 | return err; |
293 | } | 292 | } |
294 | 293 | ||
295 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | 294 | user_fpu_begin(); |
296 | clts(); | ||
297 | task_thread_info(current)->status |= TS_USEDFPU; | ||
298 | } | ||
299 | if (use_xsave()) | 295 | if (use_xsave()) |
300 | err = restore_user_xstate(buf); | 296 | err = restore_user_xstate(buf); |
301 | else | 297 | else |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 05a562b85025..0982507b962a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | |||
1891 | ss->p = 1; | 1891 | ss->p = 1; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) | ||
1895 | { | ||
1896 | struct x86_emulate_ops *ops = ctxt->ops; | ||
1897 | u32 eax, ebx, ecx, edx; | ||
1898 | |||
1899 | /* | ||
1900 | * syscall should always be enabled in longmode - so only become | ||
1901 | * vendor specific (cpuid) if other modes are active... | ||
1902 | */ | ||
1903 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
1904 | return true; | ||
1905 | |||
1906 | eax = 0x00000000; | ||
1907 | ecx = 0x00000000; | ||
1908 | if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { | ||
1909 | /* | ||
1910 | * Intel ("GenuineIntel") | ||
1911 | * remark: Intel CPUs only support "syscall" in 64bit | ||
1912 | * longmode. Also an 64bit guest with a | ||
1913 | * 32bit compat-app running will #UD !! While this | ||
1914 | * behaviour can be fixed (by emulating) into AMD | ||
1915 | * response - CPUs of AMD can't behave like Intel. | ||
1916 | */ | ||
1917 | if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && | ||
1918 | ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && | ||
1919 | edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) | ||
1920 | return false; | ||
1921 | |||
1922 | /* AMD ("AuthenticAMD") */ | ||
1923 | if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && | ||
1924 | ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && | ||
1925 | edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) | ||
1926 | return true; | ||
1927 | |||
1928 | /* AMD ("AMDisbetter!") */ | ||
1929 | if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && | ||
1930 | ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && | ||
1931 | edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) | ||
1932 | return true; | ||
1933 | } | ||
1934 | |||
1935 | /* default: (not Intel, not AMD), apply Intel's stricter rules... */ | ||
1936 | return false; | ||
1937 | } | ||
1938 | |||
1894 | static int em_syscall(struct x86_emulate_ctxt *ctxt) | 1939 | static int em_syscall(struct x86_emulate_ctxt *ctxt) |
1895 | { | 1940 | { |
1896 | struct x86_emulate_ops *ops = ctxt->ops; | 1941 | struct x86_emulate_ops *ops = ctxt->ops; |
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
1904 | ctxt->mode == X86EMUL_MODE_VM86) | 1949 | ctxt->mode == X86EMUL_MODE_VM86) |
1905 | return emulate_ud(ctxt); | 1950 | return emulate_ud(ctxt); |
1906 | 1951 | ||
1952 | if (!(em_syscall_is_enabled(ctxt))) | ||
1953 | return emulate_ud(ctxt); | ||
1954 | |||
1907 | ops->get_msr(ctxt, MSR_EFER, &efer); | 1955 | ops->get_msr(ctxt, MSR_EFER, &efer); |
1908 | setup_syscalls_segments(ctxt, &cs, &ss); | 1956 | setup_syscalls_segments(ctxt, &cs, &ss); |
1909 | 1957 | ||
1958 | if (!(efer & EFER_SCE)) | ||
1959 | return emulate_ud(ctxt); | ||
1960 | |||
1910 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 1961 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1911 | msr_data >>= 32; | 1962 | msr_data >>= 32; |
1912 | cs_sel = (u16)(msr_data & 0xfffc); | 1963 | cs_sel = (u16)(msr_data & 0xfffc); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d29216c462b3..3b4c8d8ad906 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1457 | #ifdef CONFIG_X86_64 | 1457 | #ifdef CONFIG_X86_64 |
1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
1459 | #endif | 1459 | #endif |
1460 | if (current_thread_info()->status & TS_USEDFPU) | 1460 | if (__thread_has_fpu(current)) |
1461 | clts(); | 1461 | clts(); |
1462 | load_gdt(&__get_cpu_var(host_gdt)); | 1462 | load_gdt(&__get_cpu_var(host_gdt)); |
1463 | } | 1463 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 14d6cadc4ba6..9cbfc0698118 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) | |||
1495 | 1495 | ||
1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
1497 | { | 1497 | { |
1498 | bool pr = false; | ||
1499 | |||
1498 | switch (msr) { | 1500 | switch (msr) { |
1499 | case MSR_EFER: | 1501 | case MSR_EFER: |
1500 | return set_efer(vcpu, data); | 1502 | return set_efer(vcpu, data); |
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1635 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " | 1637 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " |
1636 | "0x%x data 0x%llx\n", msr, data); | 1638 | "0x%x data 0x%llx\n", msr, data); |
1637 | break; | 1639 | break; |
1640 | case MSR_P6_PERFCTR0: | ||
1641 | case MSR_P6_PERFCTR1: | ||
1642 | pr = true; | ||
1643 | case MSR_P6_EVNTSEL0: | ||
1644 | case MSR_P6_EVNTSEL1: | ||
1645 | if (kvm_pmu_msr(vcpu, msr)) | ||
1646 | return kvm_pmu_set_msr(vcpu, msr, data); | ||
1647 | |||
1648 | if (pr || data != 0) | ||
1649 | pr_unimpl(vcpu, "disabled perfctr wrmsr: " | ||
1650 | "0x%x data 0x%llx\n", msr, data); | ||
1651 | break; | ||
1638 | case MSR_K7_CLK_CTL: | 1652 | case MSR_K7_CLK_CTL: |
1639 | /* | 1653 | /* |
1640 | * Ignore all writes to this no longer documented MSR. | 1654 | * Ignore all writes to this no longer documented MSR. |
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
1835 | case MSR_FAM10H_MMIO_CONF_BASE: | 1849 | case MSR_FAM10H_MMIO_CONF_BASE: |
1836 | data = 0; | 1850 | data = 0; |
1837 | break; | 1851 | break; |
1852 | case MSR_P6_PERFCTR0: | ||
1853 | case MSR_P6_PERFCTR1: | ||
1854 | case MSR_P6_EVNTSEL0: | ||
1855 | case MSR_P6_EVNTSEL1: | ||
1856 | if (kvm_pmu_msr(vcpu, msr)) | ||
1857 | return kvm_pmu_get_msr(vcpu, msr, pdata); | ||
1858 | data = 0; | ||
1859 | break; | ||
1838 | case MSR_IA32_UCODE_REV: | 1860 | case MSR_IA32_UCODE_REV: |
1839 | data = 0x100000000ULL; | 1861 | data = 0x100000000ULL; |
1840 | break; | 1862 | break; |
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, | |||
4180 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); | 4202 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); |
4181 | } | 4203 | } |
4182 | 4204 | ||
4205 | static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, | ||
4206 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) | ||
4207 | { | ||
4208 | struct kvm_cpuid_entry2 *cpuid = NULL; | ||
4209 | |||
4210 | if (eax && ecx) | ||
4211 | cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), | ||
4212 | *eax, *ecx); | ||
4213 | |||
4214 | if (cpuid) { | ||
4215 | *eax = cpuid->eax; | ||
4216 | *ecx = cpuid->ecx; | ||
4217 | if (ebx) | ||
4218 | *ebx = cpuid->ebx; | ||
4219 | if (edx) | ||
4220 | *edx = cpuid->edx; | ||
4221 | return true; | ||
4222 | } | ||
4223 | |||
4224 | return false; | ||
4225 | } | ||
4226 | |||
4183 | static struct x86_emulate_ops emulate_ops = { | 4227 | static struct x86_emulate_ops emulate_ops = { |
4184 | .read_std = kvm_read_guest_virt_system, | 4228 | .read_std = kvm_read_guest_virt_system, |
4185 | .write_std = kvm_write_guest_virt_system, | 4229 | .write_std = kvm_write_guest_virt_system, |
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = { | |||
4211 | .get_fpu = emulator_get_fpu, | 4255 | .get_fpu = emulator_get_fpu, |
4212 | .put_fpu = emulator_put_fpu, | 4256 | .put_fpu = emulator_put_fpu, |
4213 | .intercept = emulator_intercept, | 4257 | .intercept = emulator_intercept, |
4258 | .get_cpuid = emulator_get_cpuid, | ||
4214 | }; | 4259 | }; |
4215 | 4260 | ||
4216 | static void cache_all_regs(struct kvm_vcpu *vcpu) | 4261 | static void cache_all_regs(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9d74824a708d..f0b4caf85c1a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -673,7 +673,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
673 | 673 | ||
674 | stackend = end_of_stack(tsk); | 674 | stackend = end_of_stack(tsk); |
675 | if (tsk != &init_task && *stackend != STACK_END_MAGIC) | 675 | if (tsk != &init_task && *stackend != STACK_END_MAGIC) |
676 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | 676 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
677 | 677 | ||
678 | tsk->thread.cr2 = address; | 678 | tsk->thread.cr2 = address; |
679 | tsk->thread.trap_no = 14; | 679 | tsk->thread.trap_no = 14; |
@@ -684,7 +684,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
684 | sig = 0; | 684 | sig = 0; |
685 | 685 | ||
686 | /* Executive summary in case the body of the oops scrolled away */ | 686 | /* Executive summary in case the body of the oops scrolled away */ |
687 | printk(KERN_EMERG "CR2: %016lx\n", address); | 687 | printk(KERN_DEFAULT "CR2: %016lx\n", address); |
688 | 688 | ||
689 | oops_end(flags, regs, sig); | 689 | oops_end(flags, regs, sig); |
690 | } | 690 | } |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 492ade8c978e..d99346ea8fdb 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -374,7 +374,7 @@ int __init pci_xen_init(void) | |||
374 | 374 | ||
375 | int __init pci_xen_hvm_init(void) | 375 | int __init pci_xen_hvm_init(void) |
376 | { | 376 | { |
377 | if (!xen_feature(XENFEAT_hvm_pirqs)) | 377 | if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) |
378 | return 0; | 378 | return 0; |
379 | 379 | ||
380 | #ifdef CONFIG_ACPI | 380 | #ifdef CONFIG_ACPI |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 041d4fe9dfe4..501d4e0244ba 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
409 | play_dead_common(); | 409 | play_dead_common(); |
410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
411 | cpu_bringup(); | 411 | cpu_bringup(); |
412 | /* | ||
413 | * Balance out the preempt calls - as we are running in cpu_idle | ||
414 | * loop which has been called at bootup from cpu_bringup_and_idle. | ||
415 | * The cpucpu_bringup_and_idle called cpu_bringup which made a | ||
416 | * preempt_disable() So this preempt_enable will balance it out. | ||
417 | */ | ||
418 | preempt_enable(); | ||
412 | } | 419 | } |
413 | 420 | ||
414 | #else /* !CONFIG_HOTPLUG_CPU */ | 421 | #else /* !CONFIG_HOTPLUG_CPU */ |