diff options
author | Dave Hansen <dave.hansen@linux.intel.com> | 2016-06-02 20:19:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-08 06:05:58 -0400 |
commit | ef5f9f47d4ec4cf42bac48c7c4dafacc1b9f0630 (patch) | |
tree | 0e16ebc7a5a04c455a3a42ce46eab44822a3b86c /arch/x86/events/intel/core.c | |
parent | 020d704c3e107eba4b416b9013179437a128b454 (diff) |
perf/x86/intel: Use Intel family macros for core perf events
Use the new model number macros instead of spelling things out
in the comments.
Note that this is missing a Nehalem model that is mentioned in
intel_idle which is fixed up in a later patch.
The resulting binary (arch/x86/events/intel/core.o) is exactly
the same with and without this patch modulo some harmless changes
to restoring %esi in the return path of functions, even those
untouched by this patch.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: jacob.jun.pan@intel.com
Link: http://lkml.kernel.org/r/20160603001929.C5F1C079@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/events/intel/core.c')
-rw-r--r-- | arch/x86/events/intel/core.c | 87 |
1 files changed, 44 insertions, 43 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 5081b4cdad0d..3ed528c2370c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/cpufeature.h> | 17 | #include <asm/cpufeature.h> |
18 | #include <asm/hardirq.h> | 18 | #include <asm/hardirq.h> |
19 | #include <asm/intel-family.h> | ||
19 | #include <asm/apic.h> | 20 | #include <asm/apic.h> |
20 | 21 | ||
21 | #include "../perf_event.h" | 22 | #include "../perf_event.h" |
@@ -3319,11 +3320,11 @@ static int intel_snb_pebs_broken(int cpu) | |||
3319 | u32 rev = UINT_MAX; /* default to broken for unknown models */ | 3320 | u32 rev = UINT_MAX; /* default to broken for unknown models */ |
3320 | 3321 | ||
3321 | switch (cpu_data(cpu).x86_model) { | 3322 | switch (cpu_data(cpu).x86_model) { |
3322 | case 42: /* SNB */ | 3323 | case INTEL_FAM6_SANDYBRIDGE: |
3323 | rev = 0x28; | 3324 | rev = 0x28; |
3324 | break; | 3325 | break; |
3325 | 3326 | ||
3326 | case 45: /* SNB-EP */ | 3327 | case INTEL_FAM6_SANDYBRIDGE_X: |
3327 | switch (cpu_data(cpu).x86_mask) { | 3328 | switch (cpu_data(cpu).x86_mask) { |
3328 | case 6: rev = 0x618; break; | 3329 | case 6: rev = 0x618; break; |
3329 | case 7: rev = 0x70c; break; | 3330 | case 7: rev = 0x70c; break; |
@@ -3573,15 +3574,15 @@ __init int intel_pmu_init(void) | |||
3573 | * Install the hw-cache-events table: | 3574 | * Install the hw-cache-events table: |
3574 | */ | 3575 | */ |
3575 | switch (boot_cpu_data.x86_model) { | 3576 | switch (boot_cpu_data.x86_model) { |
3576 | case 14: /* 65nm Core "Yonah" */ | 3577 | case INTEL_FAM6_CORE_YONAH: |
3577 | pr_cont("Core events, "); | 3578 | pr_cont("Core events, "); |
3578 | break; | 3579 | break; |
3579 | 3580 | ||
3580 | case 15: /* 65nm Core2 "Merom" */ | 3581 | case INTEL_FAM6_CORE2_MEROM: |
3581 | x86_add_quirk(intel_clovertown_quirk); | 3582 | x86_add_quirk(intel_clovertown_quirk); |
3582 | case 22: /* 65nm Core2 "Merom-L" */ | 3583 | case INTEL_FAM6_CORE2_MEROM_L: |
3583 | case 23: /* 45nm Core2 "Penryn" */ | 3584 | case INTEL_FAM6_CORE2_PENRYN: |
3584 | case 29: /* 45nm Core2 "Dunnington (MP) */ | 3585 | case INTEL_FAM6_CORE2_DUNNINGTON: |
3585 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | 3586 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, |
3586 | sizeof(hw_cache_event_ids)); | 3587 | sizeof(hw_cache_event_ids)); |
3587 | 3588 | ||
@@ -3592,9 +3593,9 @@ __init int intel_pmu_init(void) | |||
3592 | pr_cont("Core2 events, "); | 3593 | pr_cont("Core2 events, "); |
3593 | break; | 3594 | break; |
3594 | 3595 | ||
3595 | case 30: /* 45nm Nehalem */ | 3596 | case INTEL_FAM6_NEHALEM: |
3596 | case 26: /* 45nm Nehalem-EP */ | 3597 | case INTEL_FAM6_NEHALEM_EP: |
3597 | case 46: /* 45nm Nehalem-EX */ | 3598 | case INTEL_FAM6_NEHALEM_EX: |
3598 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 3599 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
3599 | sizeof(hw_cache_event_ids)); | 3600 | sizeof(hw_cache_event_ids)); |
3600 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | 3601 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
@@ -3622,11 +3623,11 @@ __init int intel_pmu_init(void) | |||
3622 | pr_cont("Nehalem events, "); | 3623 | pr_cont("Nehalem events, "); |
3623 | break; | 3624 | break; |
3624 | 3625 | ||
3625 | case 28: /* 45nm Atom "Pineview" */ | 3626 | case INTEL_FAM6_ATOM_PINEVIEW: |
3626 | case 38: /* 45nm Atom "Lincroft" */ | 3627 | case INTEL_FAM6_ATOM_LINCROFT: |
3627 | case 39: /* 32nm Atom "Penwell" */ | 3628 | case INTEL_FAM6_ATOM_PENWELL: |
3628 | case 53: /* 32nm Atom "Cloverview" */ | 3629 | case INTEL_FAM6_ATOM_CLOVERVIEW: |
3629 | case 54: /* 32nm Atom "Cedarview" */ | 3630 | case INTEL_FAM6_ATOM_CEDARVIEW: |
3630 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | 3631 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
3631 | sizeof(hw_cache_event_ids)); | 3632 | sizeof(hw_cache_event_ids)); |
3632 | 3633 | ||
@@ -3638,9 +3639,9 @@ __init int intel_pmu_init(void) | |||
3638 | pr_cont("Atom events, "); | 3639 | pr_cont("Atom events, "); |
3639 | break; | 3640 | break; |
3640 | 3641 | ||
3641 | case 55: /* 22nm Atom "Silvermont" */ | 3642 | case INTEL_FAM6_ATOM_SILVERMONT1: |
3642 | case 76: /* 14nm Atom "Airmont" */ | 3643 | case INTEL_FAM6_ATOM_SILVERMONT2: |
3643 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ | 3644 | case INTEL_FAM6_ATOM_AIRMONT: |
3644 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, | 3645 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, |
3645 | sizeof(hw_cache_event_ids)); | 3646 | sizeof(hw_cache_event_ids)); |
3646 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, | 3647 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, |
@@ -3656,8 +3657,8 @@ __init int intel_pmu_init(void) | |||
3656 | pr_cont("Silvermont events, "); | 3657 | pr_cont("Silvermont events, "); |
3657 | break; | 3658 | break; |
3658 | 3659 | ||
3659 | case 92: /* 14nm Atom "Goldmont" */ | 3660 | case INTEL_FAM6_ATOM_GOLDMONT: |
3660 | case 95: /* 14nm Atom "Goldmont Denverton" */ | 3661 | case INTEL_FAM6_ATOM_DENVERTON: |
3661 | memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, | 3662 | memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, |
3662 | sizeof(hw_cache_event_ids)); | 3663 | sizeof(hw_cache_event_ids)); |
3663 | memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, | 3664 | memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, |
@@ -3680,9 +3681,9 @@ __init int intel_pmu_init(void) | |||
3680 | pr_cont("Goldmont events, "); | 3681 | pr_cont("Goldmont events, "); |
3681 | break; | 3682 | break; |
3682 | 3683 | ||
3683 | case 37: /* 32nm Westmere */ | 3684 | case INTEL_FAM6_WESTMERE: |
3684 | case 44: /* 32nm Westmere-EP */ | 3685 | case INTEL_FAM6_WESTMERE_EP: |
3685 | case 47: /* 32nm Westmere-EX */ | 3686 | case INTEL_FAM6_WESTMERE_EX: |
3686 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | 3687 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, |
3687 | sizeof(hw_cache_event_ids)); | 3688 | sizeof(hw_cache_event_ids)); |
3688 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | 3689 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
@@ -3709,8 +3710,8 @@ __init int intel_pmu_init(void) | |||
3709 | pr_cont("Westmere events, "); | 3710 | pr_cont("Westmere events, "); |
3710 | break; | 3711 | break; |
3711 | 3712 | ||
3712 | case 42: /* 32nm SandyBridge */ | 3713 | case INTEL_FAM6_SANDYBRIDGE: |
3713 | case 45: /* 32nm SandyBridge-E/EN/EP */ | 3714 | case INTEL_FAM6_SANDYBRIDGE_X: |
3714 | x86_add_quirk(intel_sandybridge_quirk); | 3715 | x86_add_quirk(intel_sandybridge_quirk); |
3715 | x86_add_quirk(intel_ht_bug); | 3716 | x86_add_quirk(intel_ht_bug); |
3716 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 3717 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
@@ -3723,7 +3724,7 @@ __init int intel_pmu_init(void) | |||
3723 | x86_pmu.event_constraints = intel_snb_event_constraints; | 3724 | x86_pmu.event_constraints = intel_snb_event_constraints; |
3724 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 3725 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
3725 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 3726 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; |
3726 | if (boot_cpu_data.x86_model == 45) | 3727 | if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X) |
3727 | x86_pmu.extra_regs = intel_snbep_extra_regs; | 3728 | x86_pmu.extra_regs = intel_snbep_extra_regs; |
3728 | else | 3729 | else |
3729 | x86_pmu.extra_regs = intel_snb_extra_regs; | 3730 | x86_pmu.extra_regs = intel_snb_extra_regs; |
@@ -3745,8 +3746,8 @@ __init int intel_pmu_init(void) | |||
3745 | pr_cont("SandyBridge events, "); | 3746 | pr_cont("SandyBridge events, "); |
3746 | break; | 3747 | break; |
3747 | 3748 | ||
3748 | case 58: /* 22nm IvyBridge */ | 3749 | case INTEL_FAM6_IVYBRIDGE: |
3749 | case 62: /* 22nm IvyBridge-EP/EX */ | 3750 | case INTEL_FAM6_IVYBRIDGE_X: |
3750 | x86_add_quirk(intel_ht_bug); | 3751 | x86_add_quirk(intel_ht_bug); |
3751 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 3752 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
3752 | sizeof(hw_cache_event_ids)); | 3753 | sizeof(hw_cache_event_ids)); |
@@ -3762,7 +3763,7 @@ __init int intel_pmu_init(void) | |||
3762 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; | 3763 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; |
3763 | x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; | 3764 | x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; |
3764 | x86_pmu.pebs_prec_dist = true; | 3765 | x86_pmu.pebs_prec_dist = true; |
3765 | if (boot_cpu_data.x86_model == 62) | 3766 | if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X) |
3766 | x86_pmu.extra_regs = intel_snbep_extra_regs; | 3767 | x86_pmu.extra_regs = intel_snbep_extra_regs; |
3767 | else | 3768 | else |
3768 | x86_pmu.extra_regs = intel_snb_extra_regs; | 3769 | x86_pmu.extra_regs = intel_snb_extra_regs; |
@@ -3780,10 +3781,10 @@ __init int intel_pmu_init(void) | |||
3780 | break; | 3781 | break; |
3781 | 3782 | ||
3782 | 3783 | ||
3783 | case 60: /* 22nm Haswell Core */ | 3784 | case INTEL_FAM6_HASWELL_CORE: |
3784 | case 63: /* 22nm Haswell Server */ | 3785 | case INTEL_FAM6_HASWELL_X: |
3785 | case 69: /* 22nm Haswell ULT */ | 3786 | case INTEL_FAM6_HASWELL_ULT: |
3786 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ | 3787 | case INTEL_FAM6_HASWELL_GT3E: |
3787 | x86_add_quirk(intel_ht_bug); | 3788 | x86_add_quirk(intel_ht_bug); |
3788 | x86_pmu.late_ack = true; | 3789 | x86_pmu.late_ack = true; |
3789 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 3790 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
@@ -3807,10 +3808,10 @@ __init int intel_pmu_init(void) | |||
3807 | pr_cont("Haswell events, "); | 3808 | pr_cont("Haswell events, "); |
3808 | break; | 3809 | break; |
3809 | 3810 | ||
3810 | case 61: /* 14nm Broadwell Core-M */ | 3811 | case INTEL_FAM6_BROADWELL_CORE: |
3811 | case 86: /* 14nm Broadwell Xeon D */ | 3812 | case INTEL_FAM6_BROADWELL_XEON_D: |
3812 | case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */ | 3813 | case INTEL_FAM6_BROADWELL_GT3E: |
3813 | case 79: /* 14nm Broadwell Server */ | 3814 | case INTEL_FAM6_BROADWELL_X: |
3814 | x86_pmu.late_ack = true; | 3815 | x86_pmu.late_ack = true; |
3815 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 3816 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
3816 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 3817 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
@@ -3843,7 +3844,7 @@ __init int intel_pmu_init(void) | |||
3843 | pr_cont("Broadwell events, "); | 3844 | pr_cont("Broadwell events, "); |
3844 | break; | 3845 | break; |
3845 | 3846 | ||
3846 | case 87: /* Knights Landing Xeon Phi */ | 3847 | case INTEL_FAM6_XEON_PHI_KNL: |
3847 | memcpy(hw_cache_event_ids, | 3848 | memcpy(hw_cache_event_ids, |
3848 | slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 3849 | slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
3849 | memcpy(hw_cache_extra_regs, | 3850 | memcpy(hw_cache_extra_regs, |
@@ -3861,11 +3862,11 @@ __init int intel_pmu_init(void) | |||
3861 | pr_cont("Knights Landing events, "); | 3862 | pr_cont("Knights Landing events, "); |
3862 | break; | 3863 | break; |
3863 | 3864 | ||
3864 | case 142: /* 14nm Kabylake Mobile */ | 3865 | case INTEL_FAM6_SKYLAKE_MOBILE: |
3865 | case 158: /* 14nm Kabylake Desktop */ | 3866 | case INTEL_FAM6_SKYLAKE_DESKTOP: |
3866 | case 78: /* 14nm Skylake Mobile */ | 3867 | case INTEL_FAM6_SKYLAKE_X: |
3867 | case 94: /* 14nm Skylake Desktop */ | 3868 | case INTEL_FAM6_KABYLAKE_MOBILE: |
3868 | case 85: /* 14nm Skylake Server */ | 3869 | case INTEL_FAM6_KABYLAKE_DESKTOP: |
3869 | x86_pmu.late_ack = true; | 3870 | x86_pmu.late_ack = true; |
3870 | memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 3871 | memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
3871 | memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 3872 | memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |