aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c44
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c37
-rw-r--r--arch/x86/kernel/entry_64.S9
-rw-r--r--arch/x86/kernel/microcode_amd.c1
6 files changed, 87 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6b45e5e7a901..73d08ed98a64 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
327} 327}
328 328
329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, 329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
330 int index)
331{ 330{
332 int node; 331 int node;
333 332
@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
725#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) 724#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
726 725
727#ifdef CONFIG_SMP 726#ifdef CONFIG_SMP
728static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 727
728static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
729{ 729{
730 struct _cpuid4_info *this_leaf, *sibling_leaf; 730 struct _cpuid4_info *this_leaf;
731 unsigned long num_threads_sharing; 731 int ret, i, sibling;
732 int index_msb, i, sibling;
733 struct cpuinfo_x86 *c = &cpu_data(cpu); 732 struct cpuinfo_x86 *c = &cpu_data(cpu);
734 733
735 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 734 ret = 0;
735 if (index == 3) {
736 ret = 1;
736 for_each_cpu(i, cpu_llc_shared_mask(cpu)) { 737 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
737 if (!per_cpu(ici_cpuid4_info, i)) 738 if (!per_cpu(ici_cpuid4_info, i))
738 continue; 739 continue;
@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
743 set_bit(sibling, this_leaf->shared_cpu_map); 744 set_bit(sibling, this_leaf->shared_cpu_map);
744 } 745 }
745 } 746 }
746 return; 747 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
748 ret = 1;
749 for_each_cpu(i, cpu_sibling_mask(cpu)) {
750 if (!per_cpu(ici_cpuid4_info, i))
751 continue;
752 this_leaf = CPUID4_INFO_IDX(i, index);
753 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
754 if (!cpu_online(sibling))
755 continue;
756 set_bit(sibling, this_leaf->shared_cpu_map);
757 }
758 }
747 } 759 }
760
761 return ret;
762}
763
764static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
765{
766 struct _cpuid4_info *this_leaf, *sibling_leaf;
767 unsigned long num_threads_sharing;
768 int index_msb, i;
769 struct cpuinfo_x86 *c = &cpu_data(cpu);
770
771 if (c->x86_vendor == X86_VENDOR_AMD) {
772 if (cache_shared_amd_cpu_map_setup(cpu, index))
773 return;
774 }
775
748 this_leaf = CPUID4_INFO_IDX(cpu, index); 776 this_leaf = CPUID4_INFO_IDX(cpu, index);
749 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; 777 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
750 778
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 786e76a86322..e4eeaaf58a47 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -528,6 +528,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
528 528
529 sprintf(name, "threshold_bank%i", bank); 529 sprintf(name, "threshold_bank%i", bank);
530 530
531#ifdef CONFIG_SMP
531 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 532 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
532 i = cpumask_first(cpu_llc_shared_mask(cpu)); 533 i = cpumask_first(cpu_llc_shared_mask(cpu));
533 534
@@ -553,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
553 554
554 goto out; 555 goto out;
555 } 556 }
557#endif
556 558
557 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); 559 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
558 if (!b) { 560 if (!b) {
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8944062f46e2..c30c807ddc72 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -147,7 +147,9 @@ struct cpu_hw_events {
147 /* 147 /*
148 * AMD specific bits 148 * AMD specific bits
149 */ 149 */
150 struct amd_nb *amd_nb; 150 struct amd_nb *amd_nb;
151 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
152 u64 perf_ctr_virt_mask;
151 153
152 void *kfree_on_online; 154 void *kfree_on_online;
153}; 155};
@@ -417,9 +419,11 @@ void x86_pmu_disable_all(void);
417static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 419static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
418 u64 enable_mask) 420 u64 enable_mask)
419{ 421{
422 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
423
420 if (hwc->extra_reg.reg) 424 if (hwc->extra_reg.reg)
421 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 425 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
422 wrmsrl(hwc->config_base, hwc->config | enable_mask); 426 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
423} 427}
424 428
425void x86_pmu_enable_all(int added); 429void x86_pmu_enable_all(int added);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 0397b23be8e9..67250a52430b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,5 @@
1#include <linux/perf_event.h> 1#include <linux/perf_event.h>
2#include <linux/export.h>
2#include <linux/types.h> 3#include <linux/types.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
357 struct amd_nb *nb; 358 struct amd_nb *nb;
358 int i, nb_id; 359 int i, nb_id;
359 360
360 if (boot_cpu_data.x86_max_cores < 2) 361 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362
363 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
361 return; 364 return;
362 365
363 nb_id = amd_get_nb_id(cpu); 366 nb_id = amd_get_nb_id(cpu);
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
587 .put_event_constraints = amd_put_event_constraints, 590 .put_event_constraints = amd_put_event_constraints,
588 591
589 .cpu_prepare = amd_pmu_cpu_prepare, 592 .cpu_prepare = amd_pmu_cpu_prepare,
590 .cpu_starting = amd_pmu_cpu_starting,
591 .cpu_dead = amd_pmu_cpu_dead, 593 .cpu_dead = amd_pmu_cpu_dead,
592#endif 594#endif
595 .cpu_starting = amd_pmu_cpu_starting,
593}; 596};
594 597
595__init int amd_pmu_init(void) 598__init int amd_pmu_init(void)
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
621 624
622 return 0; 625 return 0;
623} 626}
627
628void amd_pmu_enable_virt(void)
629{
630 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
631
632 cpuc->perf_ctr_virt_mask = 0;
633
634 /* Reload all events */
635 x86_pmu_disable_all();
636 x86_pmu_enable_all(0);
637}
638EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
639
640void amd_pmu_disable_virt(void)
641{
642 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
643
644 /*
645 * We only mask out the Host-only bit so that host-only counting works
646 * when SVM is disabled. If someone sets up a guest-only counter when
647 * SVM is disabled the Guest-only bits still gets set and the counter
648 * will not count anything.
649 */
650 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
651
652 /* Reload all events */
653 x86_pmu_disable_all();
654 x86_pmu_enable_all(0);
655}
656EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 3fe8239fd8fb..1333d9851778 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1532,10 +1532,17 @@ ENTRY(nmi)
1532 pushq_cfi %rdx 1532 pushq_cfi %rdx
1533 1533
1534 /* 1534 /*
1535 * If %cs was not the kernel segment, then the NMI triggered in user
1536 * space, which means it is definitely not nested.
1537 */
1538 cmpl $__KERNEL_CS, 16(%rsp)
1539 jne first_nmi
1540
1541 /*
1535 * Check the special variable on the stack to see if NMIs are 1542 * Check the special variable on the stack to see if NMIs are
1536 * executing. 1543 * executing.
1537 */ 1544 */
1538 cmp $1, -8(%rsp) 1545 cmpl $1, -8(%rsp)
1539 je nested_nmi 1546 je nested_nmi
1540 1547
1541 /* 1548 /*
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index ac0417be9131..73465aab28f8 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -360,7 +360,6 @@ out:
360static enum ucode_state 360static enum ucode_state
361request_microcode_user(int cpu, const void __user *buf, size_t size) 361request_microcode_user(int cpu, const void __user *buf, size_t size)
362{ 362{
363 pr_info("AMD microcode update via /dev/cpu/microcode not supported\n");
364 return UCODE_ERROR; 363 return UCODE_ERROR;
365} 364}
366 365