diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-03-13 11:34:48 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2012-03-13 11:35:16 -0400 |
commit | df8d291f28aa1e8437c8f7816328a6516379c71b (patch) | |
tree | 26ae585aa259584ddd0d9088933547c065c37086 /arch/x86 | |
parent | 5234ffb9f74cfa8993d174782bc861dd9b7b5bfb (diff) | |
parent | fde7d9049e55ab85a390be7f415d74c9f62dd0f9 (diff) |
Merge branch 'linus' into irq/core
Reason: Get upstream fixes integrated before further modifications.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/ia32/ia32_aout.c | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/i387.h | 53 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 44 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 9 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_amd.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 40 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 5 | ||||
-rw-r--r-- | arch/x86/lib/delay.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/hugetlbpage.c | 4 | ||||
-rw-r--r-- | arch/x86/pci/acpi.c | 22 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 8 |
20 files changed, 192 insertions, 87 deletions
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index fd843877e841..39e49091f648 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -315,6 +315,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
315 | current->mm->free_area_cache = TASK_UNMAPPED_BASE; | 315 | current->mm->free_area_cache = TASK_UNMAPPED_BASE; |
316 | current->mm->cached_hole_size = 0; | 316 | current->mm->cached_hole_size = 0; |
317 | 317 | ||
318 | retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); | ||
319 | if (retval < 0) { | ||
320 | /* Someone check-me: is this error path enough? */ | ||
321 | send_sig(SIGKILL, current, 0); | ||
322 | return retval; | ||
323 | } | ||
324 | |||
318 | install_exec_creds(bprm); | 325 | install_exec_creds(bprm); |
319 | current->flags &= ~PF_FORKNOEXEC; | 326 | current->flags &= ~PF_FORKNOEXEC; |
320 | 327 | ||
@@ -410,13 +417,6 @@ beyond_if: | |||
410 | 417 | ||
411 | set_brk(current->mm->start_brk, current->mm->brk); | 418 | set_brk(current->mm->start_brk, current->mm->brk); |
412 | 419 | ||
413 | retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); | ||
414 | if (retval < 0) { | ||
415 | /* Someone check-me: is this error path enough? */ | ||
416 | send_sig(SIGKILL, current, 0); | ||
417 | return retval; | ||
418 | } | ||
419 | |||
420 | current->mm->start_stack = | 420 | current->mm->start_stack = |
421 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); | 421 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); |
422 | /* start thread */ | 422 | /* start thread */ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a850b4d8d14d..247904945d3f 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -29,10 +29,11 @@ extern unsigned int sig_xstate_size; | |||
29 | extern void fpu_init(void); | 29 | extern void fpu_init(void); |
30 | extern void mxcsr_feature_mask_init(void); | 30 | extern void mxcsr_feature_mask_init(void); |
31 | extern int init_fpu(struct task_struct *child); | 31 | extern int init_fpu(struct task_struct *child); |
32 | extern void __math_state_restore(struct task_struct *); | ||
33 | extern void math_state_restore(void); | 32 | extern void math_state_restore(void); |
34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
35 | 34 | ||
35 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
36 | |||
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 37 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
37 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | 38 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
38 | xstateregs_get; | 39 | xstateregs_get; |
@@ -269,6 +270,16 @@ static inline int fpu_restore_checking(struct fpu *fpu) | |||
269 | 270 | ||
270 | static inline int restore_fpu_checking(struct task_struct *tsk) | 271 | static inline int restore_fpu_checking(struct task_struct *tsk) |
271 | { | 272 | { |
273 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
274 | is pending. Clear the x87 state here by setting it to fixed | ||
275 | values. "m" is a random variable that should be in L1 */ | ||
276 | alternative_input( | ||
277 | ASM_NOP8 ASM_NOP2, | ||
278 | "emms\n\t" /* clear stack tags */ | ||
279 | "fildl %P[addr]", /* set F?P to defined value */ | ||
280 | X86_FEATURE_FXSAVE_LEAK, | ||
281 | [addr] "m" (tsk->thread.fpu.has_fpu)); | ||
282 | |||
272 | return fpu_restore_checking(&tsk->thread.fpu); | 283 | return fpu_restore_checking(&tsk->thread.fpu); |
273 | } | 284 | } |
274 | 285 | ||
@@ -279,19 +290,21 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
279 | */ | 290 | */ |
280 | static inline int __thread_has_fpu(struct task_struct *tsk) | 291 | static inline int __thread_has_fpu(struct task_struct *tsk) |
281 | { | 292 | { |
282 | return tsk->thread.has_fpu; | 293 | return tsk->thread.fpu.has_fpu; |
283 | } | 294 | } |
284 | 295 | ||
285 | /* Must be paired with an 'stts' after! */ | 296 | /* Must be paired with an 'stts' after! */ |
286 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | 297 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) |
287 | { | 298 | { |
288 | tsk->thread.has_fpu = 0; | 299 | tsk->thread.fpu.has_fpu = 0; |
300 | percpu_write(fpu_owner_task, NULL); | ||
289 | } | 301 | } |
290 | 302 | ||
291 | /* Must be paired with a 'clts' before! */ | 303 | /* Must be paired with a 'clts' before! */ |
292 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | 304 | static inline void __thread_set_has_fpu(struct task_struct *tsk) |
293 | { | 305 | { |
294 | tsk->thread.has_fpu = 1; | 306 | tsk->thread.fpu.has_fpu = 1; |
307 | percpu_write(fpu_owner_task, tsk); | ||
295 | } | 308 | } |
296 | 309 | ||
297 | /* | 310 | /* |
@@ -336,30 +349,36 @@ typedef struct { int preload; } fpu_switch_t; | |||
336 | * We don't do that yet, so "fpu_lazy_restore()" always returns | 349 | * We don't do that yet, so "fpu_lazy_restore()" always returns |
337 | * false, but some day.. | 350 | * false, but some day.. |
338 | */ | 351 | */ |
339 | #define fpu_lazy_restore(tsk) (0) | 352 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) |
340 | #define fpu_lazy_state_intact(tsk) do { } while (0) | 353 | { |
354 | return new == percpu_read_stable(fpu_owner_task) && | ||
355 | cpu == new->thread.fpu.last_cpu; | ||
356 | } | ||
341 | 357 | ||
342 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new) | 358 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) |
343 | { | 359 | { |
344 | fpu_switch_t fpu; | 360 | fpu_switch_t fpu; |
345 | 361 | ||
346 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | 362 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; |
347 | if (__thread_has_fpu(old)) { | 363 | if (__thread_has_fpu(old)) { |
348 | if (__save_init_fpu(old)) | 364 | if (!__save_init_fpu(old)) |
349 | fpu_lazy_state_intact(old); | 365 | cpu = ~0; |
350 | __thread_clear_has_fpu(old); | 366 | old->thread.fpu.last_cpu = cpu; |
351 | old->fpu_counter++; | 367 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ |
352 | 368 | ||
353 | /* Don't change CR0.TS if we just switch! */ | 369 | /* Don't change CR0.TS if we just switch! */ |
354 | if (fpu.preload) { | 370 | if (fpu.preload) { |
371 | new->fpu_counter++; | ||
355 | __thread_set_has_fpu(new); | 372 | __thread_set_has_fpu(new); |
356 | prefetch(new->thread.fpu.state); | 373 | prefetch(new->thread.fpu.state); |
357 | } else | 374 | } else |
358 | stts(); | 375 | stts(); |
359 | } else { | 376 | } else { |
360 | old->fpu_counter = 0; | 377 | old->fpu_counter = 0; |
378 | old->thread.fpu.last_cpu = ~0; | ||
361 | if (fpu.preload) { | 379 | if (fpu.preload) { |
362 | if (fpu_lazy_restore(new)) | 380 | new->fpu_counter++; |
381 | if (fpu_lazy_restore(new, cpu)) | ||
363 | fpu.preload = 0; | 382 | fpu.preload = 0; |
364 | else | 383 | else |
365 | prefetch(new->thread.fpu.state); | 384 | prefetch(new->thread.fpu.state); |
@@ -377,8 +396,10 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta | |||
377 | */ | 396 | */ |
378 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | 397 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) |
379 | { | 398 | { |
380 | if (fpu.preload) | 399 | if (fpu.preload) { |
381 | __math_state_restore(new); | 400 | if (unlikely(restore_fpu_checking(new))) |
401 | __thread_fpu_end(new); | ||
402 | } | ||
382 | } | 403 | } |
383 | 404 | ||
384 | /* | 405 | /* |
@@ -451,8 +472,10 @@ static inline void kernel_fpu_begin(void) | |||
451 | __save_init_fpu(me); | 472 | __save_init_fpu(me); |
452 | __thread_clear_has_fpu(me); | 473 | __thread_clear_has_fpu(me); |
453 | /* We do 'stts()' in kernel_fpu_end() */ | 474 | /* We do 'stts()' in kernel_fpu_end() */ |
454 | } else | 475 | } else { |
476 | percpu_write(fpu_owner_task, NULL); | ||
455 | clts(); | 477 | clts(); |
478 | } | ||
456 | } | 479 | } |
457 | 480 | ||
458 | static inline void kernel_fpu_end(void) | 481 | static inline void kernel_fpu_end(void) |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 096c975e099f..461ce432b1c2 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -242,4 +242,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | |||
242 | static inline void perf_events_lapic_init(void) { } | 242 | static inline void perf_events_lapic_init(void) { } |
243 | #endif | 243 | #endif |
244 | 244 | ||
245 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | ||
246 | extern void amd_pmu_enable_virt(void); | ||
247 | extern void amd_pmu_disable_virt(void); | ||
248 | #else | ||
249 | static inline void amd_pmu_enable_virt(void) { } | ||
250 | static inline void amd_pmu_disable_virt(void) { } | ||
251 | #endif | ||
252 | |||
245 | #endif /* _ASM_X86_PERF_EVENT_H */ | 253 | #endif /* _ASM_X86_PERF_EVENT_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index f7c89e231c6c..58545c97d071 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -374,6 +374,8 @@ union thread_xstate { | |||
374 | }; | 374 | }; |
375 | 375 | ||
376 | struct fpu { | 376 | struct fpu { |
377 | unsigned int last_cpu; | ||
378 | unsigned int has_fpu; | ||
377 | union thread_xstate *state; | 379 | union thread_xstate *state; |
378 | }; | 380 | }; |
379 | 381 | ||
@@ -454,7 +456,6 @@ struct thread_struct { | |||
454 | unsigned long trap_no; | 456 | unsigned long trap_no; |
455 | unsigned long error_code; | 457 | unsigned long error_code; |
456 | /* floating point and extended processor state */ | 458 | /* floating point and extended processor state */ |
457 | unsigned long has_fpu; | ||
458 | struct fpu fpu; | 459 | struct fpu fpu; |
459 | #ifdef CONFIG_X86_32 | 460 | #ifdef CONFIG_X86_32 |
460 | /* Virtual 86 mode info */ | 461 | /* Virtual 86 mode info */ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d43cad74f166..c0f7d68d318f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1044,6 +1044,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) = | |||
1044 | 1044 | ||
1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
1046 | 1046 | ||
1047 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1048 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1049 | |||
1047 | /* | 1050 | /* |
1048 | * Special IST stacks which the CPU switches to when it calls | 1051 | * Special IST stacks which the CPU switches to when it calls |
1049 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | 1052 | * an IST-marked descriptor entry. Up to 7 stacks (hardware |
@@ -1111,6 +1114,8 @@ void debug_stack_reset(void) | |||
1111 | 1114 | ||
1112 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | 1115 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1113 | EXPORT_PER_CPU_SYMBOL(current_task); | 1116 | EXPORT_PER_CPU_SYMBOL(current_task); |
1117 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1118 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1114 | 1119 | ||
1115 | #ifdef CONFIG_CC_STACKPROTECTOR | 1120 | #ifdef CONFIG_CC_STACKPROTECTOR |
1116 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | 1121 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b45e5e7a901..73d08ed98a64 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | |||
326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | 329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) |
330 | int index) | ||
331 | { | 330 | { |
332 | int node; | 331 | int node; |
333 | 332 | ||
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | |||
725 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | 724 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
726 | 725 | ||
727 | #ifdef CONFIG_SMP | 726 | #ifdef CONFIG_SMP |
728 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 727 | |
728 | static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | ||
729 | { | 729 | { |
730 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 730 | struct _cpuid4_info *this_leaf; |
731 | unsigned long num_threads_sharing; | 731 | int ret, i, sibling; |
732 | int index_msb, i, sibling; | ||
733 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 732 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
734 | 733 | ||
735 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 734 | ret = 0; |
735 | if (index == 3) { | ||
736 | ret = 1; | ||
736 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { | 737 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
737 | if (!per_cpu(ici_cpuid4_info, i)) | 738 | if (!per_cpu(ici_cpuid4_info, i)) |
738 | continue; | 739 | continue; |
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
743 | set_bit(sibling, this_leaf->shared_cpu_map); | 744 | set_bit(sibling, this_leaf->shared_cpu_map); |
744 | } | 745 | } |
745 | } | 746 | } |
746 | return; | 747 | } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { |
748 | ret = 1; | ||
749 | for_each_cpu(i, cpu_sibling_mask(cpu)) { | ||
750 | if (!per_cpu(ici_cpuid4_info, i)) | ||
751 | continue; | ||
752 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
753 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) { | ||
754 | if (!cpu_online(sibling)) | ||
755 | continue; | ||
756 | set_bit(sibling, this_leaf->shared_cpu_map); | ||
757 | } | ||
758 | } | ||
747 | } | 759 | } |
760 | |||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
765 | { | ||
766 | struct _cpuid4_info *this_leaf, *sibling_leaf; | ||
767 | unsigned long num_threads_sharing; | ||
768 | int index_msb, i; | ||
769 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
770 | |||
771 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
772 | if (cache_shared_amd_cpu_map_setup(cpu, index)) | ||
773 | return; | ||
774 | } | ||
775 | |||
748 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 776 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
749 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; | 777 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; |
750 | 778 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 786e76a86322..e4eeaaf58a47 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
528 | 528 | ||
529 | sprintf(name, "threshold_bank%i", bank); | 529 | sprintf(name, "threshold_bank%i", bank); |
530 | 530 | ||
531 | #ifdef CONFIG_SMP | ||
531 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 532 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
532 | i = cpumask_first(cpu_llc_shared_mask(cpu)); | 533 | i = cpumask_first(cpu_llc_shared_mask(cpu)); |
533 | 534 | ||
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
553 | 554 | ||
554 | goto out; | 555 | goto out; |
555 | } | 556 | } |
557 | #endif | ||
556 | 558 | ||
557 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); | 559 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); |
558 | if (!b) { | 560 | if (!b) { |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 8944062f46e2..c30c807ddc72 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -147,7 +147,9 @@ struct cpu_hw_events { | |||
147 | /* | 147 | /* |
148 | * AMD specific bits | 148 | * AMD specific bits |
149 | */ | 149 | */ |
150 | struct amd_nb *amd_nb; | 150 | struct amd_nb *amd_nb; |
151 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | ||
152 | u64 perf_ctr_virt_mask; | ||
151 | 153 | ||
152 | void *kfree_on_online; | 154 | void *kfree_on_online; |
153 | }; | 155 | }; |
@@ -417,9 +419,11 @@ void x86_pmu_disable_all(void); | |||
417 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 419 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
418 | u64 enable_mask) | 420 | u64 enable_mask) |
419 | { | 421 | { |
422 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); | ||
423 | |||
420 | if (hwc->extra_reg.reg) | 424 | if (hwc->extra_reg.reg) |
421 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | 425 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
422 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | 426 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
423 | } | 427 | } |
424 | 428 | ||
425 | void x86_pmu_enable_all(int added); | 429 | void x86_pmu_enable_all(int added); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 0397b23be8e9..67250a52430b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/perf_event.h> | 1 | #include <linux/perf_event.h> |
2 | #include <linux/export.h> | ||
2 | #include <linux/types.h> | 3 | #include <linux/types.h> |
3 | #include <linux/init.h> | 4 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu) | |||
357 | struct amd_nb *nb; | 358 | struct amd_nb *nb; |
358 | int i, nb_id; | 359 | int i, nb_id; |
359 | 360 | ||
360 | if (boot_cpu_data.x86_max_cores < 2) | 361 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; |
362 | |||
363 | if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) | ||
361 | return; | 364 | return; |
362 | 365 | ||
363 | nb_id = amd_get_nb_id(cpu); | 366 | nb_id = amd_get_nb_id(cpu); |
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
587 | .put_event_constraints = amd_put_event_constraints, | 590 | .put_event_constraints = amd_put_event_constraints, |
588 | 591 | ||
589 | .cpu_prepare = amd_pmu_cpu_prepare, | 592 | .cpu_prepare = amd_pmu_cpu_prepare, |
590 | .cpu_starting = amd_pmu_cpu_starting, | ||
591 | .cpu_dead = amd_pmu_cpu_dead, | 593 | .cpu_dead = amd_pmu_cpu_dead, |
592 | #endif | 594 | #endif |
595 | .cpu_starting = amd_pmu_cpu_starting, | ||
593 | }; | 596 | }; |
594 | 597 | ||
595 | __init int amd_pmu_init(void) | 598 | __init int amd_pmu_init(void) |
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void) | |||
621 | 624 | ||
622 | return 0; | 625 | return 0; |
623 | } | 626 | } |
627 | |||
628 | void amd_pmu_enable_virt(void) | ||
629 | { | ||
630 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
631 | |||
632 | cpuc->perf_ctr_virt_mask = 0; | ||
633 | |||
634 | /* Reload all events */ | ||
635 | x86_pmu_disable_all(); | ||
636 | x86_pmu_enable_all(0); | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | ||
639 | |||
640 | void amd_pmu_disable_virt(void) | ||
641 | { | ||
642 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
643 | |||
644 | /* | ||
645 | * We only mask out the Host-only bit so that host-only counting works | ||
646 | * when SVM is disabled. If someone sets up a guest-only counter when | ||
647 | * SVM is disabled the Guest-only bits still gets set and the counter | ||
648 | * will not count anything. | ||
649 | */ | ||
650 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
651 | |||
652 | /* Reload all events */ | ||
653 | x86_pmu_disable_all(); | ||
654 | x86_pmu_enable_all(0); | ||
655 | } | ||
656 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 3fe8239fd8fb..1333d9851778 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1532,10 +1532,17 @@ ENTRY(nmi) | |||
1532 | pushq_cfi %rdx | 1532 | pushq_cfi %rdx |
1533 | 1533 | ||
1534 | /* | 1534 | /* |
1535 | * If %cs was not the kernel segment, then the NMI triggered in user | ||
1536 | * space, which means it is definitely not nested. | ||
1537 | */ | ||
1538 | cmpl $__KERNEL_CS, 16(%rsp) | ||
1539 | jne first_nmi | ||
1540 | |||
1541 | /* | ||
1535 | * Check the special variable on the stack to see if NMIs are | 1542 | * Check the special variable on the stack to see if NMIs are |
1536 | * executing. | 1543 | * executing. |
1537 | */ | 1544 | */ |
1538 | cmp $1, -8(%rsp) | 1545 | cmpl $1, -8(%rsp) |
1539 | je nested_nmi | 1546 | je nested_nmi |
1540 | 1547 | ||
1541 | /* | 1548 | /* |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index ac0417be9131..73465aab28f8 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -360,7 +360,6 @@ out: | |||
360 | static enum ucode_state | 360 | static enum ucode_state |
361 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 361 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
362 | { | 362 | { |
363 | pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); | ||
364 | return UCODE_ERROR; | 363 | return UCODE_ERROR; |
365 | } | 364 | } |
366 | 365 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 80bfe1ab0031..c08d1ff12b7c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
214 | 214 | ||
215 | task_user_gs(p) = get_user_gs(regs); | 215 | task_user_gs(p) = get_user_gs(regs); |
216 | 216 | ||
217 | p->fpu_counter = 0; | ||
217 | p->thread.io_bitmap_ptr = NULL; | 218 | p->thread.io_bitmap_ptr = NULL; |
218 | tsk = current; | 219 | tsk = current; |
219 | err = -ENOMEM; | 220 | err = -ENOMEM; |
@@ -303,7 +304,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
303 | 304 | ||
304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 305 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
305 | 306 | ||
306 | fpu = switch_fpu_prepare(prev_p, next_p); | 307 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
307 | 308 | ||
308 | /* | 309 | /* |
309 | * Reload esp0. | 310 | * Reload esp0. |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 1fd94bc4279d..cfa5c90c01db 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
286 | 286 | ||
287 | set_tsk_thread_flag(p, TIF_FORK); | 287 | set_tsk_thread_flag(p, TIF_FORK); |
288 | 288 | ||
289 | p->fpu_counter = 0; | ||
289 | p->thread.io_bitmap_ptr = NULL; | 290 | p->thread.io_bitmap_ptr = NULL; |
290 | 291 | ||
291 | savesegment(gs, p->thread.gsindex); | 292 | savesegment(gs, p->thread.gsindex); |
@@ -388,7 +389,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
388 | unsigned fsindex, gsindex; | 389 | unsigned fsindex, gsindex; |
389 | fpu_switch_t fpu; | 390 | fpu_switch_t fpu; |
390 | 391 | ||
391 | fpu = switch_fpu_prepare(prev_p, next_p); | 392 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
392 | 393 | ||
393 | /* | 394 | /* |
394 | * Reload esp0, LDT and the page table pointer: | 395 | * Reload esp0, LDT and the page table pointer: |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 77da5b475ad2..4bbe04d96744 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -571,37 +571,6 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | |||
571 | } | 571 | } |
572 | 572 | ||
573 | /* | 573 | /* |
574 | * This gets called with the process already owning the | ||
575 | * FPU state, and with CR0.TS cleared. It just needs to | ||
576 | * restore the FPU register state. | ||
577 | */ | ||
578 | void __math_state_restore(struct task_struct *tsk) | ||
579 | { | ||
580 | /* We need a safe address that is cheap to find and that is already | ||
581 | in L1. We've just brought in "tsk->thread.has_fpu", so use that */ | ||
582 | #define safe_address (tsk->thread.has_fpu) | ||
583 | |||
584 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
585 | is pending. Clear the x87 state here by setting it to fixed | ||
586 | values. safe_address is a random variable that should be in L1 */ | ||
587 | alternative_input( | ||
588 | ASM_NOP8 ASM_NOP2, | ||
589 | "emms\n\t" /* clear stack tags */ | ||
590 | "fildl %P[addr]", /* set F?P to defined value */ | ||
591 | X86_FEATURE_FXSAVE_LEAK, | ||
592 | [addr] "m" (safe_address)); | ||
593 | |||
594 | /* | ||
595 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
596 | */ | ||
597 | if (unlikely(restore_fpu_checking(tsk))) { | ||
598 | __thread_fpu_end(tsk); | ||
599 | force_sig(SIGSEGV, tsk); | ||
600 | return; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * 'math_state_restore()' saves the current math information in the | 574 | * 'math_state_restore()' saves the current math information in the |
606 | * old math state array, and gets the new ones from the current task | 575 | * old math state array, and gets the new ones from the current task |
607 | * | 576 | * |
@@ -631,7 +600,14 @@ void math_state_restore(void) | |||
631 | } | 600 | } |
632 | 601 | ||
633 | __thread_fpu_begin(tsk); | 602 | __thread_fpu_begin(tsk); |
634 | __math_state_restore(tsk); | 603 | /* |
604 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
605 | */ | ||
606 | if (unlikely(restore_fpu_checking(tsk))) { | ||
607 | __thread_fpu_end(tsk); | ||
608 | force_sig(SIGSEGV, tsk); | ||
609 | return; | ||
610 | } | ||
635 | 611 | ||
636 | tsk->fpu_counter++; | 612 | tsk->fpu_counter++; |
637 | } | 613 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5fa553babe56..e385214711cb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ftrace_event.h> | 29 | #include <linux/ftrace_event.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <asm/perf_event.h> | ||
32 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
33 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
34 | #include <asm/kvm_para.h> | 35 | #include <asm/kvm_para.h> |
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage) | |||
575 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 576 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
576 | 577 | ||
577 | cpu_svm_disable(); | 578 | cpu_svm_disable(); |
579 | |||
580 | amd_pmu_disable_virt(); | ||
578 | } | 581 | } |
579 | 582 | ||
580 | static int svm_hardware_enable(void *garbage) | 583 | static int svm_hardware_enable(void *garbage) |
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage) | |||
622 | 625 | ||
623 | svm_init_erratum_383(); | 626 | svm_init_erratum_383(); |
624 | 627 | ||
628 | amd_pmu_enable_virt(); | ||
629 | |||
625 | return 0; | 630 | return 0; |
626 | } | 631 | } |
627 | 632 | ||
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index fc45ba887d05..e395693abdb1 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | /* TSC based delay: */ | 50 | /* TSC based delay: */ |
51 | static void delay_tsc(unsigned long loops) | 51 | static void delay_tsc(unsigned long __loops) |
52 | { | 52 | { |
53 | unsigned long bclock, now; | 53 | u32 bclock, now, loops = __loops; |
54 | int cpu; | 54 | int cpu; |
55 | 55 | ||
56 | preempt_disable(); | 56 | preempt_disable(); |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f581a18c0d4d..8ecbb4bba4b3 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -333,13 +333,15 @@ try_again: | |||
333 | * Lookup failure means no vma is above this address, | 333 | * Lookup failure means no vma is above this address, |
334 | * i.e. return with success: | 334 | * i.e. return with success: |
335 | */ | 335 | */ |
336 | if (!(vma = find_vma_prev(mm, addr, &prev_vma))) | 336 | vma = find_vma(mm, addr); |
337 | if (!vma) | ||
337 | return addr; | 338 | return addr; |
338 | 339 | ||
339 | /* | 340 | /* |
340 | * new region fits between prev_vma->vm_end and | 341 | * new region fits between prev_vma->vm_end and |
341 | * vma->vm_start, use it: | 342 | * vma->vm_start, use it: |
342 | */ | 343 | */ |
344 | prev_vma = vma->vm_prev; | ||
343 | if (addr + len <= vma->vm_start && | 345 | if (addr + len <= vma->vm_start && |
344 | (!prev_vma || (addr >= prev_vma->vm_end))) { | 346 | (!prev_vma || (addr >= prev_vma->vm_end))) { |
345 | /* remember the address as a hint for next time */ | 347 | /* remember the address as a hint for next time */ |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index a312e76063a7..49a5cb55429b 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -60,6 +60,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { | |||
60 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), | 60 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), |
61 | }, | 61 | }, |
62 | }, | 62 | }, |
63 | /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */ | ||
64 | { | ||
65 | .callback = set_use_crs, | ||
66 | .ident = "MSI MS-7253", | ||
67 | .matches = { | ||
68 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
69 | DMI_MATCH(DMI_BOARD_NAME, "MS-7253"), | ||
70 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), | ||
71 | }, | ||
72 | }, | ||
63 | 73 | ||
64 | /* Now for the blacklist.. */ | 74 | /* Now for the blacklist.. */ |
65 | 75 | ||
@@ -282,9 +292,6 @@ static void add_resources(struct pci_root_info *info) | |||
282 | int i; | 292 | int i; |
283 | struct resource *res, *root, *conflict; | 293 | struct resource *res, *root, *conflict; |
284 | 294 | ||
285 | if (!pci_use_crs) | ||
286 | return; | ||
287 | |||
288 | coalesce_windows(info, IORESOURCE_MEM); | 295 | coalesce_windows(info, IORESOURCE_MEM); |
289 | coalesce_windows(info, IORESOURCE_IO); | 296 | coalesce_windows(info, IORESOURCE_IO); |
290 | 297 | ||
@@ -336,8 +343,13 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
336 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 343 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
337 | &info); | 344 | &info); |
338 | 345 | ||
339 | add_resources(&info); | 346 | if (pci_use_crs) { |
340 | return; | 347 | add_resources(&info); |
348 | |||
349 | return; | ||
350 | } | ||
351 | |||
352 | kfree(info.name); | ||
341 | 353 | ||
342 | name_alloc_fail: | 354 | name_alloc_fail: |
343 | kfree(info.res); | 355 | kfree(info.res); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 12eb07bfb267..4172af8ceeb3 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1141,7 +1141,9 @@ asmlinkage void __init xen_start_kernel(void) | |||
1141 | 1141 | ||
1142 | /* Prevent unwanted bits from being set in PTEs. */ | 1142 | /* Prevent unwanted bits from being set in PTEs. */ |
1143 | __supported_pte_mask &= ~_PAGE_GLOBAL; | 1143 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
1144 | #if 0 | ||
1144 | if (!xen_initial_domain()) | 1145 | if (!xen_initial_domain()) |
1146 | #endif | ||
1145 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | 1147 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); |
1146 | 1148 | ||
1147 | __supported_pte_mask |= _PAGE_IOMAP; | 1149 | __supported_pte_mask |= _PAGE_IOMAP; |
@@ -1204,10 +1206,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1204 | 1206 | ||
1205 | pgd = (pgd_t *)xen_start_info->pt_base; | 1207 | pgd = (pgd_t *)xen_start_info->pt_base; |
1206 | 1208 | ||
1207 | if (!xen_initial_domain()) | ||
1208 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | ||
1209 | |||
1210 | __supported_pte_mask |= _PAGE_IOMAP; | ||
1211 | /* Don't do the full vcpu_info placement stuff until we have a | 1209 | /* Don't do the full vcpu_info placement stuff until we have a |
1212 | possible map and a non-dummy shared_info. */ | 1210 | possible map and a non-dummy shared_info. */ |
1213 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 1211 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 58a0e46c404d..95c1cf60c669 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -415,13 +415,13 @@ static pteval_t iomap_pte(pteval_t val) | |||
415 | static pteval_t xen_pte_val(pte_t pte) | 415 | static pteval_t xen_pte_val(pte_t pte) |
416 | { | 416 | { |
417 | pteval_t pteval = pte.pte; | 417 | pteval_t pteval = pte.pte; |
418 | 418 | #if 0 | |
419 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ | 419 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ |
420 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { | 420 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { |
421 | WARN_ON(!pat_enabled); | 421 | WARN_ON(!pat_enabled); |
422 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; | 422 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; |
423 | } | 423 | } |
424 | 424 | #endif | |
425 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) | 425 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) |
426 | return pteval; | 426 | return pteval; |
427 | 427 | ||
@@ -463,7 +463,7 @@ void xen_set_pat(u64 pat) | |||
463 | static pte_t xen_make_pte(pteval_t pte) | 463 | static pte_t xen_make_pte(pteval_t pte) |
464 | { | 464 | { |
465 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 465 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
466 | 466 | #if 0 | |
467 | /* If Linux is trying to set a WC pte, then map to the Xen WC. | 467 | /* If Linux is trying to set a WC pte, then map to the Xen WC. |
468 | * If _PAGE_PAT is set, then it probably means it is really | 468 | * If _PAGE_PAT is set, then it probably means it is really |
469 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope | 469 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope |
@@ -476,7 +476,7 @@ static pte_t xen_make_pte(pteval_t pte) | |||
476 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) | 476 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) |
477 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; | 477 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; |
478 | } | 478 | } |
479 | 479 | #endif | |
480 | /* | 480 | /* |
481 | * Unprivileged domains are allowed to do IOMAPpings for | 481 | * Unprivileged domains are allowed to do IOMAPpings for |
482 | * PCI passthrough, but not map ISA space. The ISA | 482 | * PCI passthrough, but not map ISA space. The ISA |