diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/i387.h | 307 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 44 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 9 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_amd.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 43 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 51 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 45 | ||||
-rw-r--r-- | arch/x86/pci/xen.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 7 |
25 files changed, 517 insertions, 164 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 6919e936345b..247904945d3f 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -29,10 +29,11 @@ extern unsigned int sig_xstate_size; | |||
29 | extern void fpu_init(void); | 29 | extern void fpu_init(void); |
30 | extern void mxcsr_feature_mask_init(void); | 30 | extern void mxcsr_feature_mask_init(void); |
31 | extern int init_fpu(struct task_struct *child); | 31 | extern int init_fpu(struct task_struct *child); |
32 | extern asmlinkage void math_state_restore(void); | 32 | extern void math_state_restore(void); |
33 | extern void __math_state_restore(void); | ||
34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
35 | 34 | ||
35 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
36 | |||
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 37 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
37 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | 38 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
38 | xstateregs_get; | 39 | xstateregs_get; |
@@ -212,19 +213,11 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
212 | 213 | ||
213 | #endif /* CONFIG_X86_64 */ | 214 | #endif /* CONFIG_X86_64 */ |
214 | 215 | ||
215 | /* We need a safe address that is cheap to find and that is already | ||
216 | in L1 during context switch. The best choices are unfortunately | ||
217 | different for UP and SMP */ | ||
218 | #ifdef CONFIG_SMP | ||
219 | #define safe_address (__per_cpu_offset[0]) | ||
220 | #else | ||
221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) | ||
222 | #endif | ||
223 | |||
224 | /* | 216 | /* |
225 | * These must be called with preempt disabled | 217 | * These must be called with preempt disabled. Returns |
218 | * 'true' if the FPU state is still intact. | ||
226 | */ | 219 | */ |
227 | static inline void fpu_save_init(struct fpu *fpu) | 220 | static inline int fpu_save_init(struct fpu *fpu) |
228 | { | 221 | { |
229 | if (use_xsave()) { | 222 | if (use_xsave()) { |
230 | fpu_xsave(fpu); | 223 | fpu_xsave(fpu); |
@@ -233,33 +226,33 @@ static inline void fpu_save_init(struct fpu *fpu) | |||
233 | * xsave header may indicate the init state of the FP. | 226 | * xsave header may indicate the init state of the FP. |
234 | */ | 227 | */ |
235 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | 228 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
236 | return; | 229 | return 1; |
237 | } else if (use_fxsr()) { | 230 | } else if (use_fxsr()) { |
238 | fpu_fxsave(fpu); | 231 | fpu_fxsave(fpu); |
239 | } else { | 232 | } else { |
240 | asm volatile("fnsave %[fx]; fwait" | 233 | asm volatile("fnsave %[fx]; fwait" |
241 | : [fx] "=m" (fpu->state->fsave)); | 234 | : [fx] "=m" (fpu->state->fsave)); |
242 | return; | 235 | return 0; |
243 | } | 236 | } |
244 | 237 | ||
245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) | 238 | /* |
239 | * If exceptions are pending, we need to clear them so | ||
240 | * that we don't randomly get exceptions later. | ||
241 | * | ||
242 | * FIXME! Is this perhaps only true for the old-style | ||
243 | * irq13 case? Maybe we could leave the x87 state | ||
244 | * intact otherwise? | ||
245 | */ | ||
246 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | ||
246 | asm volatile("fnclex"); | 247 | asm volatile("fnclex"); |
247 | 248 | return 0; | |
248 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 249 | } |
249 | is pending. Clear the x87 state here by setting it to fixed | 250 | return 1; |
250 | values. safe_address is a random variable that should be in L1 */ | ||
251 | alternative_input( | ||
252 | ASM_NOP8 ASM_NOP2, | ||
253 | "emms\n\t" /* clear stack tags */ | ||
254 | "fildl %P[addr]", /* set F?P to defined value */ | ||
255 | X86_FEATURE_FXSAVE_LEAK, | ||
256 | [addr] "m" (safe_address)); | ||
257 | } | 251 | } |
258 | 252 | ||
259 | static inline void __save_init_fpu(struct task_struct *tsk) | 253 | static inline int __save_init_fpu(struct task_struct *tsk) |
260 | { | 254 | { |
261 | fpu_save_init(&tsk->thread.fpu); | 255 | return fpu_save_init(&tsk->thread.fpu); |
262 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
263 | } | 256 | } |
264 | 257 | ||
265 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 258 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
@@ -277,44 +270,212 @@ static inline int fpu_restore_checking(struct fpu *fpu) | |||
277 | 270 | ||
278 | static inline int restore_fpu_checking(struct task_struct *tsk) | 271 | static inline int restore_fpu_checking(struct task_struct *tsk) |
279 | { | 272 | { |
273 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
274 | is pending. Clear the x87 state here by setting it to fixed | ||
275 | values. "m" is a random variable that should be in L1 */ | ||
276 | alternative_input( | ||
277 | ASM_NOP8 ASM_NOP2, | ||
278 | "emms\n\t" /* clear stack tags */ | ||
279 | "fildl %P[addr]", /* set F?P to defined value */ | ||
280 | X86_FEATURE_FXSAVE_LEAK, | ||
281 | [addr] "m" (tsk->thread.fpu.has_fpu)); | ||
282 | |||
280 | return fpu_restore_checking(&tsk->thread.fpu); | 283 | return fpu_restore_checking(&tsk->thread.fpu); |
281 | } | 284 | } |
282 | 285 | ||
283 | /* | 286 | /* |
284 | * Signal frame handlers... | 287 | * Software FPU state helpers. Careful: these need to |
288 | * be preemption protection *and* they need to be | ||
289 | * properly paired with the CR0.TS changes! | ||
285 | */ | 290 | */ |
286 | extern int save_i387_xstate(void __user *buf); | 291 | static inline int __thread_has_fpu(struct task_struct *tsk) |
287 | extern int restore_i387_xstate(void __user *buf); | 292 | { |
293 | return tsk->thread.fpu.has_fpu; | ||
294 | } | ||
288 | 295 | ||
289 | static inline void __unlazy_fpu(struct task_struct *tsk) | 296 | /* Must be paired with an 'stts' after! */ |
297 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | ||
290 | { | 298 | { |
291 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 299 | tsk->thread.fpu.has_fpu = 0; |
292 | __save_init_fpu(tsk); | 300 | percpu_write(fpu_owner_task, NULL); |
293 | stts(); | 301 | } |
294 | } else | 302 | |
295 | tsk->fpu_counter = 0; | 303 | /* Must be paired with a 'clts' before! */ |
304 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | ||
305 | { | ||
306 | tsk->thread.fpu.has_fpu = 1; | ||
307 | percpu_write(fpu_owner_task, tsk); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Encapsulate the CR0.TS handling together with the | ||
312 | * software flag. | ||
313 | * | ||
314 | * These generally need preemption protection to work, | ||
315 | * do try to avoid using these on their own. | ||
316 | */ | ||
317 | static inline void __thread_fpu_end(struct task_struct *tsk) | ||
318 | { | ||
319 | __thread_clear_has_fpu(tsk); | ||
320 | stts(); | ||
321 | } | ||
322 | |||
323 | static inline void __thread_fpu_begin(struct task_struct *tsk) | ||
324 | { | ||
325 | clts(); | ||
326 | __thread_set_has_fpu(tsk); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * FPU state switching for scheduling. | ||
331 | * | ||
332 | * This is a two-stage process: | ||
333 | * | ||
334 | * - switch_fpu_prepare() saves the old state and | ||
335 | * sets the new state of the CR0.TS bit. This is | ||
336 | * done within the context of the old process. | ||
337 | * | ||
338 | * - switch_fpu_finish() restores the new state as | ||
339 | * necessary. | ||
340 | */ | ||
341 | typedef struct { int preload; } fpu_switch_t; | ||
342 | |||
343 | /* | ||
344 | * FIXME! We could do a totally lazy restore, but we need to | ||
345 | * add a per-cpu "this was the task that last touched the FPU | ||
346 | * on this CPU" variable, and the task needs to have a "I last | ||
347 | * touched the FPU on this CPU" and check them. | ||
348 | * | ||
349 | * We don't do that yet, so "fpu_lazy_restore()" always returns | ||
350 | * false, but some day.. | ||
351 | */ | ||
352 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | ||
353 | { | ||
354 | return new == percpu_read_stable(fpu_owner_task) && | ||
355 | cpu == new->thread.fpu.last_cpu; | ||
356 | } | ||
357 | |||
358 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) | ||
359 | { | ||
360 | fpu_switch_t fpu; | ||
361 | |||
362 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | ||
363 | if (__thread_has_fpu(old)) { | ||
364 | if (!__save_init_fpu(old)) | ||
365 | cpu = ~0; | ||
366 | old->thread.fpu.last_cpu = cpu; | ||
367 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ | ||
368 | |||
369 | /* Don't change CR0.TS if we just switch! */ | ||
370 | if (fpu.preload) { | ||
371 | new->fpu_counter++; | ||
372 | __thread_set_has_fpu(new); | ||
373 | prefetch(new->thread.fpu.state); | ||
374 | } else | ||
375 | stts(); | ||
376 | } else { | ||
377 | old->fpu_counter = 0; | ||
378 | old->thread.fpu.last_cpu = ~0; | ||
379 | if (fpu.preload) { | ||
380 | new->fpu_counter++; | ||
381 | if (fpu_lazy_restore(new, cpu)) | ||
382 | fpu.preload = 0; | ||
383 | else | ||
384 | prefetch(new->thread.fpu.state); | ||
385 | __thread_fpu_begin(new); | ||
386 | } | ||
387 | } | ||
388 | return fpu; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * By the time this gets called, we've already cleared CR0.TS and | ||
393 | * given the process the FPU if we are going to preload the FPU | ||
394 | * state - all we need to do is to conditionally restore the register | ||
395 | * state itself. | ||
396 | */ | ||
397 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | ||
398 | { | ||
399 | if (fpu.preload) { | ||
400 | if (unlikely(restore_fpu_checking(new))) | ||
401 | __thread_fpu_end(new); | ||
402 | } | ||
296 | } | 403 | } |
297 | 404 | ||
405 | /* | ||
406 | * Signal frame handlers... | ||
407 | */ | ||
408 | extern int save_i387_xstate(void __user *buf); | ||
409 | extern int restore_i387_xstate(void __user *buf); | ||
410 | |||
298 | static inline void __clear_fpu(struct task_struct *tsk) | 411 | static inline void __clear_fpu(struct task_struct *tsk) |
299 | { | 412 | { |
300 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 413 | if (__thread_has_fpu(tsk)) { |
301 | /* Ignore delayed exceptions from user space */ | 414 | /* Ignore delayed exceptions from user space */ |
302 | asm volatile("1: fwait\n" | 415 | asm volatile("1: fwait\n" |
303 | "2:\n" | 416 | "2:\n" |
304 | _ASM_EXTABLE(1b, 2b)); | 417 | _ASM_EXTABLE(1b, 2b)); |
305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 418 | __thread_fpu_end(tsk); |
306 | stts(); | ||
307 | } | 419 | } |
308 | } | 420 | } |
309 | 421 | ||
422 | /* | ||
423 | * Were we in an interrupt that interrupted kernel mode? | ||
424 | * | ||
425 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | ||
426 | * pair does nothing at all: the thread must not have fpu (so | ||
427 | * that we don't try to save the FPU state), and TS must | ||
428 | * be set (so that the clts/stts pair does nothing that is | ||
429 | * visible in the interrupted kernel thread). | ||
430 | */ | ||
431 | static inline bool interrupted_kernel_fpu_idle(void) | ||
432 | { | ||
433 | return !__thread_has_fpu(current) && | ||
434 | (read_cr0() & X86_CR0_TS); | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Were we in user mode (or vm86 mode) when we were | ||
439 | * interrupted? | ||
440 | * | ||
441 | * Doing kernel_fpu_begin/end() is ok if we are running | ||
442 | * in an interrupt context from user mode - we'll just | ||
443 | * save the FPU state as required. | ||
444 | */ | ||
445 | static inline bool interrupted_user_mode(void) | ||
446 | { | ||
447 | struct pt_regs *regs = get_irq_regs(); | ||
448 | return regs && user_mode_vm(regs); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Can we use the FPU in kernel mode with the | ||
453 | * whole "kernel_fpu_begin/end()" sequence? | ||
454 | * | ||
455 | * It's always ok in process context (ie "not interrupt") | ||
456 | * but it is sometimes ok even from an irq. | ||
457 | */ | ||
458 | static inline bool irq_fpu_usable(void) | ||
459 | { | ||
460 | return !in_interrupt() || | ||
461 | interrupted_user_mode() || | ||
462 | interrupted_kernel_fpu_idle(); | ||
463 | } | ||
464 | |||
310 | static inline void kernel_fpu_begin(void) | 465 | static inline void kernel_fpu_begin(void) |
311 | { | 466 | { |
312 | struct thread_info *me = current_thread_info(); | 467 | struct task_struct *me = current; |
468 | |||
469 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
313 | preempt_disable(); | 470 | preempt_disable(); |
314 | if (me->status & TS_USEDFPU) | 471 | if (__thread_has_fpu(me)) { |
315 | __save_init_fpu(me->task); | 472 | __save_init_fpu(me); |
316 | else | 473 | __thread_clear_has_fpu(me); |
474 | /* We do 'stts()' in kernel_fpu_end() */ | ||
475 | } else { | ||
476 | percpu_write(fpu_owner_task, NULL); | ||
317 | clts(); | 477 | clts(); |
478 | } | ||
318 | } | 479 | } |
319 | 480 | ||
320 | static inline void kernel_fpu_end(void) | 481 | static inline void kernel_fpu_end(void) |
@@ -323,14 +484,6 @@ static inline void kernel_fpu_end(void) | |||
323 | preempt_enable(); | 484 | preempt_enable(); |
324 | } | 485 | } |
325 | 486 | ||
326 | static inline bool irq_fpu_usable(void) | ||
327 | { | ||
328 | struct pt_regs *regs; | ||
329 | |||
330 | return !in_interrupt() || !(regs = get_irq_regs()) || \ | ||
331 | user_mode(regs) || (read_cr0() & X86_CR0_TS); | ||
332 | } | ||
333 | |||
334 | /* | 487 | /* |
335 | * Some instructions like VIA's padlock instructions generate a spurious | 488 | * Some instructions like VIA's padlock instructions generate a spurious |
336 | * DNA fault but don't modify SSE registers. And these instructions | 489 | * DNA fault but don't modify SSE registers. And these instructions |
@@ -363,20 +516,64 @@ static inline void irq_ts_restore(int TS_state) | |||
363 | } | 516 | } |
364 | 517 | ||
365 | /* | 518 | /* |
519 | * The question "does this thread have fpu access?" | ||
520 | * is slightly racy, since preemption could come in | ||
521 | * and revoke it immediately after the test. | ||
522 | * | ||
523 | * However, even in that very unlikely scenario, | ||
524 | * we can just assume we have FPU access - typically | ||
525 | * to save the FP state - we'll just take a #NM | ||
526 | * fault and get the FPU access back. | ||
527 | * | ||
528 | * The actual user_fpu_begin/end() functions | ||
529 | * need to be preemption-safe, though. | ||
530 | * | ||
531 | * NOTE! user_fpu_end() must be used only after you | ||
532 | * have saved the FP state, and user_fpu_begin() must | ||
533 | * be used only immediately before restoring it. | ||
534 | * These functions do not do any save/restore on | ||
535 | * their own. | ||
536 | */ | ||
537 | static inline int user_has_fpu(void) | ||
538 | { | ||
539 | return __thread_has_fpu(current); | ||
540 | } | ||
541 | |||
542 | static inline void user_fpu_end(void) | ||
543 | { | ||
544 | preempt_disable(); | ||
545 | __thread_fpu_end(current); | ||
546 | preempt_enable(); | ||
547 | } | ||
548 | |||
549 | static inline void user_fpu_begin(void) | ||
550 | { | ||
551 | preempt_disable(); | ||
552 | if (!user_has_fpu()) | ||
553 | __thread_fpu_begin(current); | ||
554 | preempt_enable(); | ||
555 | } | ||
556 | |||
557 | /* | ||
366 | * These disable preemption on their own and are safe | 558 | * These disable preemption on their own and are safe |
367 | */ | 559 | */ |
368 | static inline void save_init_fpu(struct task_struct *tsk) | 560 | static inline void save_init_fpu(struct task_struct *tsk) |
369 | { | 561 | { |
562 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | ||
370 | preempt_disable(); | 563 | preempt_disable(); |
371 | __save_init_fpu(tsk); | 564 | __save_init_fpu(tsk); |
372 | stts(); | 565 | __thread_fpu_end(tsk); |
373 | preempt_enable(); | 566 | preempt_enable(); |
374 | } | 567 | } |
375 | 568 | ||
376 | static inline void unlazy_fpu(struct task_struct *tsk) | 569 | static inline void unlazy_fpu(struct task_struct *tsk) |
377 | { | 570 | { |
378 | preempt_disable(); | 571 | preempt_disable(); |
379 | __unlazy_fpu(tsk); | 572 | if (__thread_has_fpu(tsk)) { |
573 | __save_init_fpu(tsk); | ||
574 | __thread_fpu_end(tsk); | ||
575 | } else | ||
576 | tsk->fpu_counter = 0; | ||
380 | preempt_enable(); | 577 | preempt_enable(); |
381 | } | 578 | } |
382 | 579 | ||
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index ab4092e3214e..7b9cfc4878af 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -190,6 +190,9 @@ struct x86_emulate_ops { | |||
190 | int (*intercept)(struct x86_emulate_ctxt *ctxt, | 190 | int (*intercept)(struct x86_emulate_ctxt *ctxt, |
191 | struct x86_instruction_info *info, | 191 | struct x86_instruction_info *info, |
192 | enum x86_intercept_stage stage); | 192 | enum x86_intercept_stage stage); |
193 | |||
194 | bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, | ||
195 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); | ||
193 | }; | 196 | }; |
194 | 197 | ||
195 | typedef u32 __attribute__((vector_size(16))) sse128_t; | 198 | typedef u32 __attribute__((vector_size(16))) sse128_t; |
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt { | |||
298 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ | 301 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ |
299 | X86EMUL_MODE_PROT64) | 302 | X86EMUL_MODE_PROT64) |
300 | 303 | ||
304 | /* CPUID vendors */ | ||
305 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 | ||
306 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 | ||
307 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 | ||
308 | |||
309 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 | ||
310 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 | ||
311 | #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 | ||
312 | |||
313 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 | ||
314 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e | ||
315 | #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 | ||
316 | |||
301 | enum x86_intercept_stage { | 317 | enum x86_intercept_stage { |
302 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ | 318 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ |
303 | X86_ICPT_PRE_EXCEPT, | 319 | X86_ICPT_PRE_EXCEPT, |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 9b922c136254..e8fb2c7a5f4f 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -240,4 +240,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | |||
240 | static inline void perf_events_lapic_init(void) { } | 240 | static inline void perf_events_lapic_init(void) { } |
241 | #endif | 241 | #endif |
242 | 242 | ||
243 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | ||
244 | extern void amd_pmu_enable_virt(void); | ||
245 | extern void amd_pmu_disable_virt(void); | ||
246 | #else | ||
247 | static inline void amd_pmu_enable_virt(void) { } | ||
248 | static inline void amd_pmu_disable_virt(void) { } | ||
249 | #endif | ||
250 | |||
243 | #endif /* _ASM_X86_PERF_EVENT_H */ | 251 | #endif /* _ASM_X86_PERF_EVENT_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index aa9088c26931..58545c97d071 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -374,6 +374,8 @@ union thread_xstate { | |||
374 | }; | 374 | }; |
375 | 375 | ||
376 | struct fpu { | 376 | struct fpu { |
377 | unsigned int last_cpu; | ||
378 | unsigned int has_fpu; | ||
377 | union thread_xstate *state; | 379 | union thread_xstate *state; |
378 | }; | 380 | }; |
379 | 381 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index bc817cd8b443..cfd8144d5527 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void) | |||
247 | * ever touches our thread-synchronous status, so we don't | 247 | * ever touches our thread-synchronous status, so we don't |
248 | * have to worry about atomic accesses. | 248 | * have to worry about atomic accesses. |
249 | */ | 249 | */ |
250 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
251 | this quantum (SMP) */ | ||
252 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | 250 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
253 | #define TS_POLLING 0x0004 /* idle task polling need_resched, | 251 | #define TS_POLLING 0x0004 /* idle task polling need_resched, |
254 | skip sending interrupt */ | 252 | skip sending interrupt */ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d43cad74f166..c0f7d68d318f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1044,6 +1044,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) = | |||
1044 | 1044 | ||
1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
1046 | 1046 | ||
1047 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1048 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1049 | |||
1047 | /* | 1050 | /* |
1048 | * Special IST stacks which the CPU switches to when it calls | 1051 | * Special IST stacks which the CPU switches to when it calls |
1049 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | 1052 | * an IST-marked descriptor entry. Up to 7 stacks (hardware |
@@ -1111,6 +1114,8 @@ void debug_stack_reset(void) | |||
1111 | 1114 | ||
1112 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | 1115 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1113 | EXPORT_PER_CPU_SYMBOL(current_task); | 1116 | EXPORT_PER_CPU_SYMBOL(current_task); |
1117 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1118 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1114 | 1119 | ||
1115 | #ifdef CONFIG_CC_STACKPROTECTOR | 1120 | #ifdef CONFIG_CC_STACKPROTECTOR |
1116 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | 1121 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b45e5e7a901..73d08ed98a64 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | |||
326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | 329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) |
330 | int index) | ||
331 | { | 330 | { |
332 | int node; | 331 | int node; |
333 | 332 | ||
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | |||
725 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | 724 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
726 | 725 | ||
727 | #ifdef CONFIG_SMP | 726 | #ifdef CONFIG_SMP |
728 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 727 | |
728 | static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | ||
729 | { | 729 | { |
730 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 730 | struct _cpuid4_info *this_leaf; |
731 | unsigned long num_threads_sharing; | 731 | int ret, i, sibling; |
732 | int index_msb, i, sibling; | ||
733 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 732 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
734 | 733 | ||
735 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 734 | ret = 0; |
735 | if (index == 3) { | ||
736 | ret = 1; | ||
736 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { | 737 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
737 | if (!per_cpu(ici_cpuid4_info, i)) | 738 | if (!per_cpu(ici_cpuid4_info, i)) |
738 | continue; | 739 | continue; |
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
743 | set_bit(sibling, this_leaf->shared_cpu_map); | 744 | set_bit(sibling, this_leaf->shared_cpu_map); |
744 | } | 745 | } |
745 | } | 746 | } |
746 | return; | 747 | } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { |
748 | ret = 1; | ||
749 | for_each_cpu(i, cpu_sibling_mask(cpu)) { | ||
750 | if (!per_cpu(ici_cpuid4_info, i)) | ||
751 | continue; | ||
752 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
753 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) { | ||
754 | if (!cpu_online(sibling)) | ||
755 | continue; | ||
756 | set_bit(sibling, this_leaf->shared_cpu_map); | ||
757 | } | ||
758 | } | ||
747 | } | 759 | } |
760 | |||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
765 | { | ||
766 | struct _cpuid4_info *this_leaf, *sibling_leaf; | ||
767 | unsigned long num_threads_sharing; | ||
768 | int index_msb, i; | ||
769 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
770 | |||
771 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
772 | if (cache_shared_amd_cpu_map_setup(cpu, index)) | ||
773 | return; | ||
774 | } | ||
775 | |||
748 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 776 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
749 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; | 777 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; |
750 | 778 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 786e76a86322..e4eeaaf58a47 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
528 | 528 | ||
529 | sprintf(name, "threshold_bank%i", bank); | 529 | sprintf(name, "threshold_bank%i", bank); |
530 | 530 | ||
531 | #ifdef CONFIG_SMP | ||
531 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 532 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
532 | i = cpumask_first(cpu_llc_shared_mask(cpu)); | 533 | i = cpumask_first(cpu_llc_shared_mask(cpu)); |
533 | 534 | ||
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
553 | 554 | ||
554 | goto out; | 555 | goto out; |
555 | } | 556 | } |
557 | #endif | ||
556 | 558 | ||
557 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); | 559 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); |
558 | if (!b) { | 560 | if (!b) { |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3c44b712380c..f8bddb5b0600 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -988,6 +988,9 @@ static void x86_pmu_start(struct perf_event *event, int flags) | |||
988 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 988 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
989 | int idx = event->hw.idx; | 989 | int idx = event->hw.idx; |
990 | 990 | ||
991 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
992 | return; | ||
993 | |||
991 | if (WARN_ON_ONCE(idx == -1)) | 994 | if (WARN_ON_ONCE(idx == -1)) |
992 | return; | 995 | return; |
993 | 996 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 513d617b93c4..82db83b5c3bc 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -147,7 +147,9 @@ struct cpu_hw_events { | |||
147 | /* | 147 | /* |
148 | * AMD specific bits | 148 | * AMD specific bits |
149 | */ | 149 | */ |
150 | struct amd_nb *amd_nb; | 150 | struct amd_nb *amd_nb; |
151 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | ||
152 | u64 perf_ctr_virt_mask; | ||
151 | 153 | ||
152 | void *kfree_on_online; | 154 | void *kfree_on_online; |
153 | }; | 155 | }; |
@@ -425,9 +427,11 @@ void x86_pmu_disable_all(void); | |||
425 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 427 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
426 | u64 enable_mask) | 428 | u64 enable_mask) |
427 | { | 429 | { |
430 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); | ||
431 | |||
428 | if (hwc->extra_reg.reg) | 432 | if (hwc->extra_reg.reg) |
429 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | 433 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
430 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | 434 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
431 | } | 435 | } |
432 | 436 | ||
433 | void x86_pmu_enable_all(int added); | 437 | void x86_pmu_enable_all(int added); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 0397b23be8e9..67250a52430b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/perf_event.h> | 1 | #include <linux/perf_event.h> |
2 | #include <linux/export.h> | ||
2 | #include <linux/types.h> | 3 | #include <linux/types.h> |
3 | #include <linux/init.h> | 4 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu) | |||
357 | struct amd_nb *nb; | 358 | struct amd_nb *nb; |
358 | int i, nb_id; | 359 | int i, nb_id; |
359 | 360 | ||
360 | if (boot_cpu_data.x86_max_cores < 2) | 361 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; |
362 | |||
363 | if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) | ||
361 | return; | 364 | return; |
362 | 365 | ||
363 | nb_id = amd_get_nb_id(cpu); | 366 | nb_id = amd_get_nb_id(cpu); |
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
587 | .put_event_constraints = amd_put_event_constraints, | 590 | .put_event_constraints = amd_put_event_constraints, |
588 | 591 | ||
589 | .cpu_prepare = amd_pmu_cpu_prepare, | 592 | .cpu_prepare = amd_pmu_cpu_prepare, |
590 | .cpu_starting = amd_pmu_cpu_starting, | ||
591 | .cpu_dead = amd_pmu_cpu_dead, | 593 | .cpu_dead = amd_pmu_cpu_dead, |
592 | #endif | 594 | #endif |
595 | .cpu_starting = amd_pmu_cpu_starting, | ||
593 | }; | 596 | }; |
594 | 597 | ||
595 | __init int amd_pmu_init(void) | 598 | __init int amd_pmu_init(void) |
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void) | |||
621 | 624 | ||
622 | return 0; | 625 | return 0; |
623 | } | 626 | } |
627 | |||
628 | void amd_pmu_enable_virt(void) | ||
629 | { | ||
630 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
631 | |||
632 | cpuc->perf_ctr_virt_mask = 0; | ||
633 | |||
634 | /* Reload all events */ | ||
635 | x86_pmu_disable_all(); | ||
636 | x86_pmu_enable_all(0); | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | ||
639 | |||
640 | void amd_pmu_disable_virt(void) | ||
641 | { | ||
642 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
643 | |||
644 | /* | ||
645 | * We only mask out the Host-only bit so that host-only counting works | ||
646 | * when SVM is disabled. If someone sets up a guest-only counter when | ||
647 | * SVM is disabled the Guest-only bits still gets set and the counter | ||
648 | * will not count anything. | ||
649 | */ | ||
650 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
651 | |||
652 | /* Reload all events */ | ||
653 | x86_pmu_disable_all(); | ||
654 | x86_pmu_enable_all(0); | ||
655 | } | ||
656 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 3fe8239fd8fb..1333d9851778 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1532,10 +1532,17 @@ ENTRY(nmi) | |||
1532 | pushq_cfi %rdx | 1532 | pushq_cfi %rdx |
1533 | 1533 | ||
1534 | /* | 1534 | /* |
1535 | * If %cs was not the kernel segment, then the NMI triggered in user | ||
1536 | * space, which means it is definitely not nested. | ||
1537 | */ | ||
1538 | cmpl $__KERNEL_CS, 16(%rsp) | ||
1539 | jne first_nmi | ||
1540 | |||
1541 | /* | ||
1535 | * Check the special variable on the stack to see if NMIs are | 1542 | * Check the special variable on the stack to see if NMIs are |
1536 | * executing. | 1543 | * executing. |
1537 | */ | 1544 | */ |
1538 | cmp $1, -8(%rsp) | 1545 | cmpl $1, -8(%rsp) |
1539 | je nested_nmi | 1546 | je nested_nmi |
1540 | 1547 | ||
1541 | /* | 1548 | /* |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index ac0417be9131..73465aab28f8 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -360,7 +360,6 @@ out: | |||
360 | static enum ucode_state | 360 | static enum ucode_state |
361 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 361 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
362 | { | 362 | { |
363 | pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); | ||
364 | return UCODE_ERROR; | 363 | return UCODE_ERROR; |
365 | } | 364 | } |
366 | 365 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 485204f58cda..c08d1ff12b7c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
214 | 214 | ||
215 | task_user_gs(p) = get_user_gs(regs); | 215 | task_user_gs(p) = get_user_gs(regs); |
216 | 216 | ||
217 | p->fpu_counter = 0; | ||
217 | p->thread.io_bitmap_ptr = NULL; | 218 | p->thread.io_bitmap_ptr = NULL; |
218 | tsk = current; | 219 | tsk = current; |
219 | err = -ENOMEM; | 220 | err = -ENOMEM; |
@@ -299,22 +300,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
299 | *next = &next_p->thread; | 300 | *next = &next_p->thread; |
300 | int cpu = smp_processor_id(); | 301 | int cpu = smp_processor_id(); |
301 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 302 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
302 | bool preload_fpu; | 303 | fpu_switch_t fpu; |
303 | 304 | ||
304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 305 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
305 | 306 | ||
306 | /* | 307 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
307 | * If the task has used fpu the last 5 timeslices, just do a full | ||
308 | * restore of the math state immediately to avoid the trap; the | ||
309 | * chances of needing FPU soon are obviously high now | ||
310 | */ | ||
311 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
312 | |||
313 | __unlazy_fpu(prev_p); | ||
314 | |||
315 | /* we're going to use this soon, after a few expensive things */ | ||
316 | if (preload_fpu) | ||
317 | prefetch(next->fpu.state); | ||
318 | 308 | ||
319 | /* | 309 | /* |
320 | * Reload esp0. | 310 | * Reload esp0. |
@@ -354,11 +344,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
354 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 344 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
355 | __switch_to_xtra(prev_p, next_p, tss); | 345 | __switch_to_xtra(prev_p, next_p, tss); |
356 | 346 | ||
357 | /* If we're going to preload the fpu context, make sure clts | ||
358 | is run while we're batching the cpu state updates. */ | ||
359 | if (preload_fpu) | ||
360 | clts(); | ||
361 | |||
362 | /* | 347 | /* |
363 | * Leave lazy mode, flushing any hypercalls made here. | 348 | * Leave lazy mode, flushing any hypercalls made here. |
364 | * This must be done before restoring TLS segments so | 349 | * This must be done before restoring TLS segments so |
@@ -368,15 +353,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
368 | */ | 353 | */ |
369 | arch_end_context_switch(next_p); | 354 | arch_end_context_switch(next_p); |
370 | 355 | ||
371 | if (preload_fpu) | ||
372 | __math_state_restore(); | ||
373 | |||
374 | /* | 356 | /* |
375 | * Restore %gs if needed (which is common) | 357 | * Restore %gs if needed (which is common) |
376 | */ | 358 | */ |
377 | if (prev->gs | next->gs) | 359 | if (prev->gs | next->gs) |
378 | lazy_load_gs(next->gs); | 360 | lazy_load_gs(next->gs); |
379 | 361 | ||
362 | switch_fpu_finish(next_p, fpu); | ||
363 | |||
380 | percpu_write(current_task, next_p); | 364 | percpu_write(current_task, next_p); |
381 | 365 | ||
382 | return prev_p; | 366 | return prev_p; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9b9fe4a85c87..cfa5c90c01db 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
286 | 286 | ||
287 | set_tsk_thread_flag(p, TIF_FORK); | 287 | set_tsk_thread_flag(p, TIF_FORK); |
288 | 288 | ||
289 | p->fpu_counter = 0; | ||
289 | p->thread.io_bitmap_ptr = NULL; | 290 | p->thread.io_bitmap_ptr = NULL; |
290 | 291 | ||
291 | savesegment(gs, p->thread.gsindex); | 292 | savesegment(gs, p->thread.gsindex); |
@@ -386,18 +387,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
386 | int cpu = smp_processor_id(); | 387 | int cpu = smp_processor_id(); |
387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 388 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
388 | unsigned fsindex, gsindex; | 389 | unsigned fsindex, gsindex; |
389 | bool preload_fpu; | 390 | fpu_switch_t fpu; |
390 | 391 | ||
391 | /* | 392 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
392 | * If the task has used fpu the last 5 timeslices, just do a full | ||
393 | * restore of the math state immediately to avoid the trap; the | ||
394 | * chances of needing FPU soon are obviously high now | ||
395 | */ | ||
396 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
397 | |||
398 | /* we're going to use this soon, after a few expensive things */ | ||
399 | if (preload_fpu) | ||
400 | prefetch(next->fpu.state); | ||
401 | 393 | ||
402 | /* | 394 | /* |
403 | * Reload esp0, LDT and the page table pointer: | 395 | * Reload esp0, LDT and the page table pointer: |
@@ -427,13 +419,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
427 | 419 | ||
428 | load_TLS(next, cpu); | 420 | load_TLS(next, cpu); |
429 | 421 | ||
430 | /* Must be after DS reload */ | ||
431 | __unlazy_fpu(prev_p); | ||
432 | |||
433 | /* Make sure cpu is ready for new context */ | ||
434 | if (preload_fpu) | ||
435 | clts(); | ||
436 | |||
437 | /* | 422 | /* |
438 | * Leave lazy mode, flushing any hypercalls made here. | 423 | * Leave lazy mode, flushing any hypercalls made here. |
439 | * This must be done before restoring TLS segments so | 424 | * This must be done before restoring TLS segments so |
@@ -474,6 +459,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
474 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 459 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
475 | prev->gsindex = gsindex; | 460 | prev->gsindex = gsindex; |
476 | 461 | ||
462 | switch_fpu_finish(next_p, fpu); | ||
463 | |||
477 | /* | 464 | /* |
478 | * Switch the PDA and FPU contexts. | 465 | * Switch the PDA and FPU contexts. |
479 | */ | 466 | */ |
@@ -492,13 +479,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
492 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 479 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
493 | __switch_to_xtra(prev_p, next_p, tss); | 480 | __switch_to_xtra(prev_p, next_p, tss); |
494 | 481 | ||
495 | /* | ||
496 | * Preload the FPU context, now that we've determined that the | ||
497 | * task is likely to be using it. | ||
498 | */ | ||
499 | if (preload_fpu) | ||
500 | __math_state_restore(); | ||
501 | |||
502 | return prev_p; | 482 | return prev_p; |
503 | } | 483 | } |
504 | 484 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 482ec3af2067..4bbe04d96744 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -571,41 +571,18 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | |||
571 | } | 571 | } |
572 | 572 | ||
573 | /* | 573 | /* |
574 | * __math_state_restore assumes that cr0.TS is already clear and the | ||
575 | * fpu state is all ready for use. Used during context switch. | ||
576 | */ | ||
577 | void __math_state_restore(void) | ||
578 | { | ||
579 | struct thread_info *thread = current_thread_info(); | ||
580 | struct task_struct *tsk = thread->task; | ||
581 | |||
582 | /* | ||
583 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
584 | */ | ||
585 | if (unlikely(restore_fpu_checking(tsk))) { | ||
586 | stts(); | ||
587 | force_sig(SIGSEGV, tsk); | ||
588 | return; | ||
589 | } | ||
590 | |||
591 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
592 | tsk->fpu_counter++; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * 'math_state_restore()' saves the current math information in the | 574 | * 'math_state_restore()' saves the current math information in the |
597 | * old math state array, and gets the new ones from the current task | 575 | * old math state array, and gets the new ones from the current task |
598 | * | 576 | * |
599 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 577 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
600 | * Don't touch unless you *really* know how it works. | 578 | * Don't touch unless you *really* know how it works. |
601 | * | 579 | * |
602 | * Must be called with kernel preemption disabled (in this case, | 580 | * Must be called with kernel preemption disabled (eg with local |
603 | * local interrupts are disabled at the call-site in entry.S). | 581 | * local interrupts as in the case of do_device_not_available). |
604 | */ | 582 | */ |
605 | asmlinkage void math_state_restore(void) | 583 | void math_state_restore(void) |
606 | { | 584 | { |
607 | struct thread_info *thread = current_thread_info(); | 585 | struct task_struct *tsk = current; |
608 | struct task_struct *tsk = thread->task; | ||
609 | 586 | ||
610 | if (!tsk_used_math(tsk)) { | 587 | if (!tsk_used_math(tsk)) { |
611 | local_irq_enable(); | 588 | local_irq_enable(); |
@@ -622,9 +599,17 @@ asmlinkage void math_state_restore(void) | |||
622 | local_irq_disable(); | 599 | local_irq_disable(); |
623 | } | 600 | } |
624 | 601 | ||
625 | clts(); /* Allow maths ops (or we recurse) */ | 602 | __thread_fpu_begin(tsk); |
603 | /* | ||
604 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
605 | */ | ||
606 | if (unlikely(restore_fpu_checking(tsk))) { | ||
607 | __thread_fpu_end(tsk); | ||
608 | force_sig(SIGSEGV, tsk); | ||
609 | return; | ||
610 | } | ||
626 | 611 | ||
627 | __math_state_restore(); | 612 | tsk->fpu_counter++; |
628 | } | 613 | } |
629 | EXPORT_SYMBOL_GPL(math_state_restore); | 614 | EXPORT_SYMBOL_GPL(math_state_restore); |
630 | 615 | ||
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a3911343976b..711091114119 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
47 | if (!fx) | 47 | if (!fx) |
48 | return; | 48 | return; |
49 | 49 | ||
50 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | 50 | BUG_ON(__thread_has_fpu(tsk)); |
51 | 51 | ||
52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | 52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; |
53 | 53 | ||
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf) | |||
168 | if (!used_math()) | 168 | if (!used_math()) |
169 | return 0; | 169 | return 0; |
170 | 170 | ||
171 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 171 | if (user_has_fpu()) { |
172 | if (use_xsave()) | 172 | if (use_xsave()) |
173 | err = xsave_user(buf); | 173 | err = xsave_user(buf); |
174 | else | 174 | else |
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf) | |||
176 | 176 | ||
177 | if (err) | 177 | if (err) |
178 | return err; | 178 | return err; |
179 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 179 | user_fpu_end(); |
180 | stts(); | ||
181 | } else { | 180 | } else { |
182 | sanitize_i387_state(tsk); | 181 | sanitize_i387_state(tsk); |
183 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 182 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf) | |||
292 | return err; | 291 | return err; |
293 | } | 292 | } |
294 | 293 | ||
295 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | 294 | user_fpu_begin(); |
296 | clts(); | ||
297 | task_thread_info(current)->status |= TS_USEDFPU; | ||
298 | } | ||
299 | if (use_xsave()) | 295 | if (use_xsave()) |
300 | err = restore_user_xstate(buf); | 296 | err = restore_user_xstate(buf); |
301 | else | 297 | else |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 05a562b85025..0982507b962a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | |||
1891 | ss->p = 1; | 1891 | ss->p = 1; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) | ||
1895 | { | ||
1896 | struct x86_emulate_ops *ops = ctxt->ops; | ||
1897 | u32 eax, ebx, ecx, edx; | ||
1898 | |||
1899 | /* | ||
1900 | * syscall should always be enabled in longmode - so only become | ||
1901 | * vendor specific (cpuid) if other modes are active... | ||
1902 | */ | ||
1903 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
1904 | return true; | ||
1905 | |||
1906 | eax = 0x00000000; | ||
1907 | ecx = 0x00000000; | ||
1908 | if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { | ||
1909 | /* | ||
1910 | * Intel ("GenuineIntel") | ||
1911 | * remark: Intel CPUs only support "syscall" in 64bit | ||
1912 | * longmode. Also an 64bit guest with a | ||
1913 | * 32bit compat-app running will #UD !! While this | ||
1914 | * behaviour can be fixed (by emulating) into AMD | ||
1915 | * response - CPUs of AMD can't behave like Intel. | ||
1916 | */ | ||
1917 | if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && | ||
1918 | ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && | ||
1919 | edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) | ||
1920 | return false; | ||
1921 | |||
1922 | /* AMD ("AuthenticAMD") */ | ||
1923 | if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && | ||
1924 | ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && | ||
1925 | edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) | ||
1926 | return true; | ||
1927 | |||
1928 | /* AMD ("AMDisbetter!") */ | ||
1929 | if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && | ||
1930 | ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && | ||
1931 | edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) | ||
1932 | return true; | ||
1933 | } | ||
1934 | |||
1935 | /* default: (not Intel, not AMD), apply Intel's stricter rules... */ | ||
1936 | return false; | ||
1937 | } | ||
1938 | |||
1894 | static int em_syscall(struct x86_emulate_ctxt *ctxt) | 1939 | static int em_syscall(struct x86_emulate_ctxt *ctxt) |
1895 | { | 1940 | { |
1896 | struct x86_emulate_ops *ops = ctxt->ops; | 1941 | struct x86_emulate_ops *ops = ctxt->ops; |
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
1904 | ctxt->mode == X86EMUL_MODE_VM86) | 1949 | ctxt->mode == X86EMUL_MODE_VM86) |
1905 | return emulate_ud(ctxt); | 1950 | return emulate_ud(ctxt); |
1906 | 1951 | ||
1952 | if (!(em_syscall_is_enabled(ctxt))) | ||
1953 | return emulate_ud(ctxt); | ||
1954 | |||
1907 | ops->get_msr(ctxt, MSR_EFER, &efer); | 1955 | ops->get_msr(ctxt, MSR_EFER, &efer); |
1908 | setup_syscalls_segments(ctxt, &cs, &ss); | 1956 | setup_syscalls_segments(ctxt, &cs, &ss); |
1909 | 1957 | ||
1958 | if (!(efer & EFER_SCE)) | ||
1959 | return emulate_ud(ctxt); | ||
1960 | |||
1910 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 1961 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1911 | msr_data >>= 32; | 1962 | msr_data >>= 32; |
1912 | cs_sel = (u16)(msr_data & 0xfffc); | 1963 | cs_sel = (u16)(msr_data & 0xfffc); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5fa553babe56..e385214711cb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ftrace_event.h> | 29 | #include <linux/ftrace_event.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <asm/perf_event.h> | ||
32 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
33 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
34 | #include <asm/kvm_para.h> | 35 | #include <asm/kvm_para.h> |
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage) | |||
575 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 576 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
576 | 577 | ||
577 | cpu_svm_disable(); | 578 | cpu_svm_disable(); |
579 | |||
580 | amd_pmu_disable_virt(); | ||
578 | } | 581 | } |
579 | 582 | ||
580 | static int svm_hardware_enable(void *garbage) | 583 | static int svm_hardware_enable(void *garbage) |
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage) | |||
622 | 625 | ||
623 | svm_init_erratum_383(); | 626 | svm_init_erratum_383(); |
624 | 627 | ||
628 | amd_pmu_enable_virt(); | ||
629 | |||
625 | return 0; | 630 | return 0; |
626 | } | 631 | } |
627 | 632 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d29216c462b3..3b4c8d8ad906 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1457 | #ifdef CONFIG_X86_64 | 1457 | #ifdef CONFIG_X86_64 |
1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
1459 | #endif | 1459 | #endif |
1460 | if (current_thread_info()->status & TS_USEDFPU) | 1460 | if (__thread_has_fpu(current)) |
1461 | clts(); | 1461 | clts(); |
1462 | load_gdt(&__get_cpu_var(host_gdt)); | 1462 | load_gdt(&__get_cpu_var(host_gdt)); |
1463 | } | 1463 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 14d6cadc4ba6..9cbfc0698118 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) | |||
1495 | 1495 | ||
1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
1497 | { | 1497 | { |
1498 | bool pr = false; | ||
1499 | |||
1498 | switch (msr) { | 1500 | switch (msr) { |
1499 | case MSR_EFER: | 1501 | case MSR_EFER: |
1500 | return set_efer(vcpu, data); | 1502 | return set_efer(vcpu, data); |
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1635 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " | 1637 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " |
1636 | "0x%x data 0x%llx\n", msr, data); | 1638 | "0x%x data 0x%llx\n", msr, data); |
1637 | break; | 1639 | break; |
1640 | case MSR_P6_PERFCTR0: | ||
1641 | case MSR_P6_PERFCTR1: | ||
1642 | pr = true; | ||
1643 | case MSR_P6_EVNTSEL0: | ||
1644 | case MSR_P6_EVNTSEL1: | ||
1645 | if (kvm_pmu_msr(vcpu, msr)) | ||
1646 | return kvm_pmu_set_msr(vcpu, msr, data); | ||
1647 | |||
1648 | if (pr || data != 0) | ||
1649 | pr_unimpl(vcpu, "disabled perfctr wrmsr: " | ||
1650 | "0x%x data 0x%llx\n", msr, data); | ||
1651 | break; | ||
1638 | case MSR_K7_CLK_CTL: | 1652 | case MSR_K7_CLK_CTL: |
1639 | /* | 1653 | /* |
1640 | * Ignore all writes to this no longer documented MSR. | 1654 | * Ignore all writes to this no longer documented MSR. |
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
1835 | case MSR_FAM10H_MMIO_CONF_BASE: | 1849 | case MSR_FAM10H_MMIO_CONF_BASE: |
1836 | data = 0; | 1850 | data = 0; |
1837 | break; | 1851 | break; |
1852 | case MSR_P6_PERFCTR0: | ||
1853 | case MSR_P6_PERFCTR1: | ||
1854 | case MSR_P6_EVNTSEL0: | ||
1855 | case MSR_P6_EVNTSEL1: | ||
1856 | if (kvm_pmu_msr(vcpu, msr)) | ||
1857 | return kvm_pmu_get_msr(vcpu, msr, pdata); | ||
1858 | data = 0; | ||
1859 | break; | ||
1838 | case MSR_IA32_UCODE_REV: | 1860 | case MSR_IA32_UCODE_REV: |
1839 | data = 0x100000000ULL; | 1861 | data = 0x100000000ULL; |
1840 | break; | 1862 | break; |
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, | |||
4180 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); | 4202 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); |
4181 | } | 4203 | } |
4182 | 4204 | ||
4205 | static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, | ||
4206 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) | ||
4207 | { | ||
4208 | struct kvm_cpuid_entry2 *cpuid = NULL; | ||
4209 | |||
4210 | if (eax && ecx) | ||
4211 | cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), | ||
4212 | *eax, *ecx); | ||
4213 | |||
4214 | if (cpuid) { | ||
4215 | *eax = cpuid->eax; | ||
4216 | *ecx = cpuid->ecx; | ||
4217 | if (ebx) | ||
4218 | *ebx = cpuid->ebx; | ||
4219 | if (edx) | ||
4220 | *edx = cpuid->edx; | ||
4221 | return true; | ||
4222 | } | ||
4223 | |||
4224 | return false; | ||
4225 | } | ||
4226 | |||
4183 | static struct x86_emulate_ops emulate_ops = { | 4227 | static struct x86_emulate_ops emulate_ops = { |
4184 | .read_std = kvm_read_guest_virt_system, | 4228 | .read_std = kvm_read_guest_virt_system, |
4185 | .write_std = kvm_write_guest_virt_system, | 4229 | .write_std = kvm_write_guest_virt_system, |
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = { | |||
4211 | .get_fpu = emulator_get_fpu, | 4255 | .get_fpu = emulator_get_fpu, |
4212 | .put_fpu = emulator_put_fpu, | 4256 | .put_fpu = emulator_put_fpu, |
4213 | .intercept = emulator_intercept, | 4257 | .intercept = emulator_intercept, |
4258 | .get_cpuid = emulator_get_cpuid, | ||
4214 | }; | 4259 | }; |
4215 | 4260 | ||
4216 | static void cache_all_regs(struct kvm_vcpu *vcpu) | 4261 | static void cache_all_regs(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 492ade8c978e..d99346ea8fdb 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -374,7 +374,7 @@ int __init pci_xen_init(void) | |||
374 | 374 | ||
375 | int __init pci_xen_hvm_init(void) | 375 | int __init pci_xen_hvm_init(void) |
376 | { | 376 | { |
377 | if (!xen_feature(XENFEAT_hvm_pirqs)) | 377 | if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) |
378 | return 0; | 378 | return 0; |
379 | 379 | ||
380 | #ifdef CONFIG_ACPI | 380 | #ifdef CONFIG_ACPI |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 12eb07bfb267..4172af8ceeb3 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1141,7 +1141,9 @@ asmlinkage void __init xen_start_kernel(void) | |||
1141 | 1141 | ||
1142 | /* Prevent unwanted bits from being set in PTEs. */ | 1142 | /* Prevent unwanted bits from being set in PTEs. */ |
1143 | __supported_pte_mask &= ~_PAGE_GLOBAL; | 1143 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
1144 | #if 0 | ||
1144 | if (!xen_initial_domain()) | 1145 | if (!xen_initial_domain()) |
1146 | #endif | ||
1145 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | 1147 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); |
1146 | 1148 | ||
1147 | __supported_pte_mask |= _PAGE_IOMAP; | 1149 | __supported_pte_mask |= _PAGE_IOMAP; |
@@ -1204,10 +1206,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1204 | 1206 | ||
1205 | pgd = (pgd_t *)xen_start_info->pt_base; | 1207 | pgd = (pgd_t *)xen_start_info->pt_base; |
1206 | 1208 | ||
1207 | if (!xen_initial_domain()) | ||
1208 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | ||
1209 | |||
1210 | __supported_pte_mask |= _PAGE_IOMAP; | ||
1211 | /* Don't do the full vcpu_info placement stuff until we have a | 1209 | /* Don't do the full vcpu_info placement stuff until we have a |
1212 | possible map and a non-dummy shared_info. */ | 1210 | possible map and a non-dummy shared_info. */ |
1213 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 1211 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 58a0e46c404d..95c1cf60c669 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -415,13 +415,13 @@ static pteval_t iomap_pte(pteval_t val) | |||
415 | static pteval_t xen_pte_val(pte_t pte) | 415 | static pteval_t xen_pte_val(pte_t pte) |
416 | { | 416 | { |
417 | pteval_t pteval = pte.pte; | 417 | pteval_t pteval = pte.pte; |
418 | 418 | #if 0 | |
419 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ | 419 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ |
420 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { | 420 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { |
421 | WARN_ON(!pat_enabled); | 421 | WARN_ON(!pat_enabled); |
422 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; | 422 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; |
423 | } | 423 | } |
424 | 424 | #endif | |
425 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) | 425 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) |
426 | return pteval; | 426 | return pteval; |
427 | 427 | ||
@@ -463,7 +463,7 @@ void xen_set_pat(u64 pat) | |||
463 | static pte_t xen_make_pte(pteval_t pte) | 463 | static pte_t xen_make_pte(pteval_t pte) |
464 | { | 464 | { |
465 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 465 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
466 | 466 | #if 0 | |
467 | /* If Linux is trying to set a WC pte, then map to the Xen WC. | 467 | /* If Linux is trying to set a WC pte, then map to the Xen WC. |
468 | * If _PAGE_PAT is set, then it probably means it is really | 468 | * If _PAGE_PAT is set, then it probably means it is really |
469 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope | 469 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope |
@@ -476,7 +476,7 @@ static pte_t xen_make_pte(pteval_t pte) | |||
476 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) | 476 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) |
477 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; | 477 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; |
478 | } | 478 | } |
479 | 479 | #endif | |
480 | /* | 480 | /* |
481 | * Unprivileged domains are allowed to do IOMAPpings for | 481 | * Unprivileged domains are allowed to do IOMAPpings for |
482 | * PCI passthrough, but not map ISA space. The ISA | 482 | * PCI passthrough, but not map ISA space. The ISA |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 041d4fe9dfe4..501d4e0244ba 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
409 | play_dead_common(); | 409 | play_dead_common(); |
410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
411 | cpu_bringup(); | 411 | cpu_bringup(); |
412 | /* | ||
413 | * Balance out the preempt calls - as we are running in cpu_idle | ||
414 | * loop which has been called at bootup from cpu_bringup_and_idle. | ||
415 | * The cpucpu_bringup_and_idle called cpu_bringup which made a | ||
416 | * preempt_disable() So this preempt_enable will balance it out. | ||
417 | */ | ||
418 | preempt_enable(); | ||
412 | } | 419 | } |
413 | 420 | ||
414 | #else /* !CONFIG_HOTPLUG_CPU */ | 421 | #else /* !CONFIG_HOTPLUG_CPU */ |