diff options
author | Arnd Bergmann <arnd@arndb.de> | 2012-03-19 16:46:32 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2012-03-20 05:41:43 -0400 |
commit | b2f1df8d2fc14bf7e6d9d967043d4b60c2efd8dc (patch) | |
tree | 065282434bde6ef9b4357c042705c5fcef3782ea /arch/x86/kernel | |
parent | c0206e228e34d8b414fcc63db45b831843adea06 (diff) | |
parent | 5cd9eb2736a572a9ef2689829f47ffd4262adc00 (diff) |
Merge branch 'renesas/timer' into next/timer
Conflicts:
arch/arm/mach-shmobile/timer.c
This resolves a nonobvious merge conflict between renesas
timer changes in the global timer changes with those
from the renesas soc branch and last minute bug fixes that
went into v3.3.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 44 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 9 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_amd.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 40 |
10 files changed, 104 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d43cad74f166..c0f7d68d318f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1044,6 +1044,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) = | |||
1044 | 1044 | ||
1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1045 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
1046 | 1046 | ||
1047 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1048 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1049 | |||
1047 | /* | 1050 | /* |
1048 | * Special IST stacks which the CPU switches to when it calls | 1051 | * Special IST stacks which the CPU switches to when it calls |
1049 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | 1052 | * an IST-marked descriptor entry. Up to 7 stacks (hardware |
@@ -1111,6 +1114,8 @@ void debug_stack_reset(void) | |||
1111 | 1114 | ||
1112 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | 1115 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1113 | EXPORT_PER_CPU_SYMBOL(current_task); | 1116 | EXPORT_PER_CPU_SYMBOL(current_task); |
1117 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | ||
1118 | EXPORT_PER_CPU_SYMBOL(fpu_owner_task); | ||
1114 | 1119 | ||
1115 | #ifdef CONFIG_CC_STACKPROTECTOR | 1120 | #ifdef CONFIG_CC_STACKPROTECTOR |
1116 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | 1121 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b45e5e7a901..73d08ed98a64 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | |||
326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 326 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, | 329 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) |
330 | int index) | ||
331 | { | 330 | { |
332 | int node; | 331 | int node; |
333 | 332 | ||
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | |||
725 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | 724 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
726 | 725 | ||
727 | #ifdef CONFIG_SMP | 726 | #ifdef CONFIG_SMP |
728 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 727 | |
728 | static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | ||
729 | { | 729 | { |
730 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 730 | struct _cpuid4_info *this_leaf; |
731 | unsigned long num_threads_sharing; | 731 | int ret, i, sibling; |
732 | int index_msb, i, sibling; | ||
733 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 732 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
734 | 733 | ||
735 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 734 | ret = 0; |
735 | if (index == 3) { | ||
736 | ret = 1; | ||
736 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { | 737 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
737 | if (!per_cpu(ici_cpuid4_info, i)) | 738 | if (!per_cpu(ici_cpuid4_info, i)) |
738 | continue; | 739 | continue; |
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
743 | set_bit(sibling, this_leaf->shared_cpu_map); | 744 | set_bit(sibling, this_leaf->shared_cpu_map); |
744 | } | 745 | } |
745 | } | 746 | } |
746 | return; | 747 | } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { |
748 | ret = 1; | ||
749 | for_each_cpu(i, cpu_sibling_mask(cpu)) { | ||
750 | if (!per_cpu(ici_cpuid4_info, i)) | ||
751 | continue; | ||
752 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
753 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) { | ||
754 | if (!cpu_online(sibling)) | ||
755 | continue; | ||
756 | set_bit(sibling, this_leaf->shared_cpu_map); | ||
757 | } | ||
758 | } | ||
747 | } | 759 | } |
760 | |||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
765 | { | ||
766 | struct _cpuid4_info *this_leaf, *sibling_leaf; | ||
767 | unsigned long num_threads_sharing; | ||
768 | int index_msb, i; | ||
769 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
770 | |||
771 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
772 | if (cache_shared_amd_cpu_map_setup(cpu, index)) | ||
773 | return; | ||
774 | } | ||
775 | |||
748 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 776 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
749 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; | 777 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; |
750 | 778 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 786e76a86322..e4eeaaf58a47 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
528 | 528 | ||
529 | sprintf(name, "threshold_bank%i", bank); | 529 | sprintf(name, "threshold_bank%i", bank); |
530 | 530 | ||
531 | #ifdef CONFIG_SMP | ||
531 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 532 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
532 | i = cpumask_first(cpu_llc_shared_mask(cpu)); | 533 | i = cpumask_first(cpu_llc_shared_mask(cpu)); |
533 | 534 | ||
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
553 | 554 | ||
554 | goto out; | 555 | goto out; |
555 | } | 556 | } |
557 | #endif | ||
556 | 558 | ||
557 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); | 559 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); |
558 | if (!b) { | 560 | if (!b) { |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 8944062f46e2..c30c807ddc72 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -147,7 +147,9 @@ struct cpu_hw_events { | |||
147 | /* | 147 | /* |
148 | * AMD specific bits | 148 | * AMD specific bits |
149 | */ | 149 | */ |
150 | struct amd_nb *amd_nb; | 150 | struct amd_nb *amd_nb; |
151 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | ||
152 | u64 perf_ctr_virt_mask; | ||
151 | 153 | ||
152 | void *kfree_on_online; | 154 | void *kfree_on_online; |
153 | }; | 155 | }; |
@@ -417,9 +419,11 @@ void x86_pmu_disable_all(void); | |||
417 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 419 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
418 | u64 enable_mask) | 420 | u64 enable_mask) |
419 | { | 421 | { |
422 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); | ||
423 | |||
420 | if (hwc->extra_reg.reg) | 424 | if (hwc->extra_reg.reg) |
421 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | 425 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
422 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | 426 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
423 | } | 427 | } |
424 | 428 | ||
425 | void x86_pmu_enable_all(int added); | 429 | void x86_pmu_enable_all(int added); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 0397b23be8e9..67250a52430b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/perf_event.h> | 1 | #include <linux/perf_event.h> |
2 | #include <linux/export.h> | ||
2 | #include <linux/types.h> | 3 | #include <linux/types.h> |
3 | #include <linux/init.h> | 4 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu) | |||
357 | struct amd_nb *nb; | 358 | struct amd_nb *nb; |
358 | int i, nb_id; | 359 | int i, nb_id; |
359 | 360 | ||
360 | if (boot_cpu_data.x86_max_cores < 2) | 361 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; |
362 | |||
363 | if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) | ||
361 | return; | 364 | return; |
362 | 365 | ||
363 | nb_id = amd_get_nb_id(cpu); | 366 | nb_id = amd_get_nb_id(cpu); |
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
587 | .put_event_constraints = amd_put_event_constraints, | 590 | .put_event_constraints = amd_put_event_constraints, |
588 | 591 | ||
589 | .cpu_prepare = amd_pmu_cpu_prepare, | 592 | .cpu_prepare = amd_pmu_cpu_prepare, |
590 | .cpu_starting = amd_pmu_cpu_starting, | ||
591 | .cpu_dead = amd_pmu_cpu_dead, | 593 | .cpu_dead = amd_pmu_cpu_dead, |
592 | #endif | 594 | #endif |
595 | .cpu_starting = amd_pmu_cpu_starting, | ||
593 | }; | 596 | }; |
594 | 597 | ||
595 | __init int amd_pmu_init(void) | 598 | __init int amd_pmu_init(void) |
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void) | |||
621 | 624 | ||
622 | return 0; | 625 | return 0; |
623 | } | 626 | } |
627 | |||
628 | void amd_pmu_enable_virt(void) | ||
629 | { | ||
630 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
631 | |||
632 | cpuc->perf_ctr_virt_mask = 0; | ||
633 | |||
634 | /* Reload all events */ | ||
635 | x86_pmu_disable_all(); | ||
636 | x86_pmu_enable_all(0); | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | ||
639 | |||
640 | void amd_pmu_disable_virt(void) | ||
641 | { | ||
642 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
643 | |||
644 | /* | ||
645 | * We only mask out the Host-only bit so that host-only counting works | ||
646 | * when SVM is disabled. If someone sets up a guest-only counter when | ||
647 | * SVM is disabled the Guest-only bits still gets set and the counter | ||
648 | * will not count anything. | ||
649 | */ | ||
650 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
651 | |||
652 | /* Reload all events */ | ||
653 | x86_pmu_disable_all(); | ||
654 | x86_pmu_enable_all(0); | ||
655 | } | ||
656 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 3fe8239fd8fb..1333d9851778 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1532,10 +1532,17 @@ ENTRY(nmi) | |||
1532 | pushq_cfi %rdx | 1532 | pushq_cfi %rdx |
1533 | 1533 | ||
1534 | /* | 1534 | /* |
1535 | * If %cs was not the kernel segment, then the NMI triggered in user | ||
1536 | * space, which means it is definitely not nested. | ||
1537 | */ | ||
1538 | cmpl $__KERNEL_CS, 16(%rsp) | ||
1539 | jne first_nmi | ||
1540 | |||
1541 | /* | ||
1535 | * Check the special variable on the stack to see if NMIs are | 1542 | * Check the special variable on the stack to see if NMIs are |
1536 | * executing. | 1543 | * executing. |
1537 | */ | 1544 | */ |
1538 | cmp $1, -8(%rsp) | 1545 | cmpl $1, -8(%rsp) |
1539 | je nested_nmi | 1546 | je nested_nmi |
1540 | 1547 | ||
1541 | /* | 1548 | /* |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index ac0417be9131..73465aab28f8 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -360,7 +360,6 @@ out: | |||
360 | static enum ucode_state | 360 | static enum ucode_state |
361 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 361 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
362 | { | 362 | { |
363 | pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); | ||
364 | return UCODE_ERROR; | 363 | return UCODE_ERROR; |
365 | } | 364 | } |
366 | 365 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 80bfe1ab0031..c08d1ff12b7c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
214 | 214 | ||
215 | task_user_gs(p) = get_user_gs(regs); | 215 | task_user_gs(p) = get_user_gs(regs); |
216 | 216 | ||
217 | p->fpu_counter = 0; | ||
217 | p->thread.io_bitmap_ptr = NULL; | 218 | p->thread.io_bitmap_ptr = NULL; |
218 | tsk = current; | 219 | tsk = current; |
219 | err = -ENOMEM; | 220 | err = -ENOMEM; |
@@ -303,7 +304,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
303 | 304 | ||
304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 305 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
305 | 306 | ||
306 | fpu = switch_fpu_prepare(prev_p, next_p); | 307 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
307 | 308 | ||
308 | /* | 309 | /* |
309 | * Reload esp0. | 310 | * Reload esp0. |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 1fd94bc4279d..cfa5c90c01db 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
286 | 286 | ||
287 | set_tsk_thread_flag(p, TIF_FORK); | 287 | set_tsk_thread_flag(p, TIF_FORK); |
288 | 288 | ||
289 | p->fpu_counter = 0; | ||
289 | p->thread.io_bitmap_ptr = NULL; | 290 | p->thread.io_bitmap_ptr = NULL; |
290 | 291 | ||
291 | savesegment(gs, p->thread.gsindex); | 292 | savesegment(gs, p->thread.gsindex); |
@@ -388,7 +389,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
388 | unsigned fsindex, gsindex; | 389 | unsigned fsindex, gsindex; |
389 | fpu_switch_t fpu; | 390 | fpu_switch_t fpu; |
390 | 391 | ||
391 | fpu = switch_fpu_prepare(prev_p, next_p); | 392 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
392 | 393 | ||
393 | /* | 394 | /* |
394 | * Reload esp0, LDT and the page table pointer: | 395 | * Reload esp0, LDT and the page table pointer: |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 77da5b475ad2..4bbe04d96744 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -571,37 +571,6 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | |||
571 | } | 571 | } |
572 | 572 | ||
573 | /* | 573 | /* |
574 | * This gets called with the process already owning the | ||
575 | * FPU state, and with CR0.TS cleared. It just needs to | ||
576 | * restore the FPU register state. | ||
577 | */ | ||
578 | void __math_state_restore(struct task_struct *tsk) | ||
579 | { | ||
580 | /* We need a safe address that is cheap to find and that is already | ||
581 | in L1. We've just brought in "tsk->thread.has_fpu", so use that */ | ||
582 | #define safe_address (tsk->thread.has_fpu) | ||
583 | |||
584 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
585 | is pending. Clear the x87 state here by setting it to fixed | ||
586 | values. safe_address is a random variable that should be in L1 */ | ||
587 | alternative_input( | ||
588 | ASM_NOP8 ASM_NOP2, | ||
589 | "emms\n\t" /* clear stack tags */ | ||
590 | "fildl %P[addr]", /* set F?P to defined value */ | ||
591 | X86_FEATURE_FXSAVE_LEAK, | ||
592 | [addr] "m" (safe_address)); | ||
593 | |||
594 | /* | ||
595 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
596 | */ | ||
597 | if (unlikely(restore_fpu_checking(tsk))) { | ||
598 | __thread_fpu_end(tsk); | ||
599 | force_sig(SIGSEGV, tsk); | ||
600 | return; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * 'math_state_restore()' saves the current math information in the | 574 | * 'math_state_restore()' saves the current math information in the |
606 | * old math state array, and gets the new ones from the current task | 575 | * old math state array, and gets the new ones from the current task |
607 | * | 576 | * |
@@ -631,7 +600,14 @@ void math_state_restore(void) | |||
631 | } | 600 | } |
632 | 601 | ||
633 | __thread_fpu_begin(tsk); | 602 | __thread_fpu_begin(tsk); |
634 | __math_state_restore(tsk); | 603 | /* |
604 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
605 | */ | ||
606 | if (unlikely(restore_fpu_checking(tsk))) { | ||
607 | __thread_fpu_end(tsk); | ||
608 | force_sig(SIGSEGV, tsk); | ||
609 | return; | ||
610 | } | ||
635 | 611 | ||
636 | tsk->fpu_counter++; | 612 | tsk->fpu_counter++; |
637 | } | 613 | } |