aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/events/core.c34
-rw-r--r--arch/x86/events/intel/core.c66
-rw-r--r--arch/x86/events/perf_event.h3
3 files changed, 91 insertions, 12 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index af12e294caed..d5f98095a155 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -487,22 +487,28 @@ static inline int precise_br_compat(struct perf_event *event)
487 return m == b; 487 return m == b;
488} 488}
489 489
490int x86_pmu_hw_config(struct perf_event *event) 490int x86_pmu_max_precise(void)
491{ 491{
492 if (event->attr.precise_ip) { 492 int precise = 0;
493 int precise = 0; 493
494 /* Support for constant skid */
495 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
496 precise++;
494 497
495 /* Support for constant skid */ 498 /* Support for IP fixup */
496 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { 499 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
497 precise++; 500 precise++;
498 501
499 /* Support for IP fixup */ 502 if (x86_pmu.pebs_prec_dist)
500 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) 503 precise++;
501 precise++; 504 }
505 return precise;
506}
502 507
503 if (x86_pmu.pebs_prec_dist) 508int x86_pmu_hw_config(struct perf_event *event)
504 precise++; 509{
505 } 510 if (event->attr.precise_ip) {
511 int precise = x86_pmu_max_precise();
506 512
507 if (event->attr.precise_ip > precise) 513 if (event->attr.precise_ip > precise)
508 return -EOPNOTSUPP; 514 return -EOPNOTSUPP;
@@ -1752,6 +1758,10 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1752 1758
1753static struct attribute_group x86_pmu_attr_group; 1759static struct attribute_group x86_pmu_attr_group;
1754 1760
1761static struct attribute_group x86_pmu_caps_group = {
1762 .name = "caps",
1763};
1764
1755static int __init init_hw_perf_events(void) 1765static int __init init_hw_perf_events(void)
1756{ 1766{
1757 struct x86_pmu_quirk *quirk; 1767 struct x86_pmu_quirk *quirk;
@@ -1798,6 +1808,7 @@ static int __init init_hw_perf_events(void)
1798 0, x86_pmu.num_counters, 0, 0); 1808 0, x86_pmu.num_counters, 0, 0);
1799 1809
1800 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1810 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1811 x86_pmu_caps_group.attrs = x86_pmu.caps_attrs;
1801 1812
1802 if (x86_pmu.event_attrs) 1813 if (x86_pmu.event_attrs)
1803 x86_pmu_events_group.attrs = x86_pmu.event_attrs; 1814 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
@@ -2217,6 +2228,7 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
2217 &x86_pmu_attr_group, 2228 &x86_pmu_attr_group,
2218 &x86_pmu_format_group, 2229 &x86_pmu_format_group,
2219 &x86_pmu_events_group, 2230 &x86_pmu_events_group,
2231 &x86_pmu_caps_group,
2220 NULL, 2232 NULL,
2221}; 2233};
2222 2234
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index b00f1353a026..8fa2abd9c8b6 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3795,6 +3795,46 @@ done:
3795 3795
3796static DEVICE_ATTR_RW(freeze_on_smi); 3796static DEVICE_ATTR_RW(freeze_on_smi);
3797 3797
3798static ssize_t branches_show(struct device *cdev,
3799 struct device_attribute *attr,
3800 char *buf)
3801{
3802 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
3803}
3804
3805static DEVICE_ATTR_RO(branches);
3806
3807static struct attribute *lbr_attrs[] = {
3808 &dev_attr_branches.attr,
3809 NULL
3810};
3811
3812static char pmu_name_str[30];
3813
3814static ssize_t pmu_name_show(struct device *cdev,
3815 struct device_attribute *attr,
3816 char *buf)
3817{
3818 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
3819}
3820
3821static DEVICE_ATTR_RO(pmu_name);
3822
3823static ssize_t max_precise_show(struct device *cdev,
3824 struct device_attribute *attr,
3825 char *buf)
3826{
3827 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise());
3828}
3829
3830static DEVICE_ATTR_RO(max_precise);
3831
3832static struct attribute *intel_pmu_caps_attrs[] = {
3833 &dev_attr_pmu_name.attr,
3834 &dev_attr_max_precise.attr,
3835 NULL
3836};
3837
3798static struct attribute *intel_pmu_attrs[] = { 3838static struct attribute *intel_pmu_attrs[] = {
3799 &dev_attr_freeze_on_smi.attr, 3839 &dev_attr_freeze_on_smi.attr,
3800 NULL, 3840 NULL,
@@ -3810,6 +3850,7 @@ __init int intel_pmu_init(void)
3810 struct extra_reg *er; 3850 struct extra_reg *er;
3811 int version, i; 3851 int version, i;
3812 struct attribute **extra_attr = NULL; 3852 struct attribute **extra_attr = NULL;
3853 char *name;
3813 3854
3814 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 3855 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
3815 switch (boot_cpu_data.x86) { 3856 switch (boot_cpu_data.x86) {
@@ -3877,6 +3918,7 @@ __init int intel_pmu_init(void)
3877 switch (boot_cpu_data.x86_model) { 3918 switch (boot_cpu_data.x86_model) {
3878 case INTEL_FAM6_CORE_YONAH: 3919 case INTEL_FAM6_CORE_YONAH:
3879 pr_cont("Core events, "); 3920 pr_cont("Core events, ");
3921 name = "core";
3880 break; 3922 break;
3881 3923
3882 case INTEL_FAM6_CORE2_MEROM: 3924 case INTEL_FAM6_CORE2_MEROM:
@@ -3892,6 +3934,7 @@ __init int intel_pmu_init(void)
3892 x86_pmu.event_constraints = intel_core2_event_constraints; 3934 x86_pmu.event_constraints = intel_core2_event_constraints;
3893 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; 3935 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
3894 pr_cont("Core2 events, "); 3936 pr_cont("Core2 events, ");
3937 name = "core2";
3895 break; 3938 break;
3896 3939
3897 case INTEL_FAM6_NEHALEM: 3940 case INTEL_FAM6_NEHALEM:
@@ -3924,6 +3967,7 @@ __init int intel_pmu_init(void)
3924 extra_attr = nhm_format_attr; 3967 extra_attr = nhm_format_attr;
3925 3968
3926 pr_cont("Nehalem events, "); 3969 pr_cont("Nehalem events, ");
3970 name = "nehalem";
3927 break; 3971 break;
3928 3972
3929 case INTEL_FAM6_ATOM_PINEVIEW: 3973 case INTEL_FAM6_ATOM_PINEVIEW:
@@ -3940,6 +3984,7 @@ __init int intel_pmu_init(void)
3940 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; 3984 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
3941 x86_pmu.pebs_aliases = intel_pebs_aliases_core2; 3985 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
3942 pr_cont("Atom events, "); 3986 pr_cont("Atom events, ");
3987 name = "bonnell";
3943 break; 3988 break;
3944 3989
3945 case INTEL_FAM6_ATOM_SILVERMONT1: 3990 case INTEL_FAM6_ATOM_SILVERMONT1:
@@ -3959,6 +4004,7 @@ __init int intel_pmu_init(void)
3959 x86_pmu.cpu_events = slm_events_attrs; 4004 x86_pmu.cpu_events = slm_events_attrs;
3960 extra_attr = slm_format_attr; 4005 extra_attr = slm_format_attr;
3961 pr_cont("Silvermont events, "); 4006 pr_cont("Silvermont events, ");
4007 name = "silvermont";
3962 break; 4008 break;
3963 4009
3964 case INTEL_FAM6_ATOM_GOLDMONT: 4010 case INTEL_FAM6_ATOM_GOLDMONT:
@@ -3985,6 +4031,7 @@ __init int intel_pmu_init(void)
3985 x86_pmu.cpu_events = glm_events_attrs; 4031 x86_pmu.cpu_events = glm_events_attrs;
3986 extra_attr = slm_format_attr; 4032 extra_attr = slm_format_attr;
3987 pr_cont("Goldmont events, "); 4033 pr_cont("Goldmont events, ");
4034 name = "goldmont";
3988 break; 4035 break;
3989 4036
3990 case INTEL_FAM6_ATOM_GEMINI_LAKE: 4037 case INTEL_FAM6_ATOM_GEMINI_LAKE:
@@ -4012,6 +4059,7 @@ __init int intel_pmu_init(void)
4012 event_attr_td_total_slots_scale_glm.event_str = "4"; 4059 event_attr_td_total_slots_scale_glm.event_str = "4";
4013 extra_attr = slm_format_attr; 4060 extra_attr = slm_format_attr;
4014 pr_cont("Goldmont plus events, "); 4061 pr_cont("Goldmont plus events, ");
4062 name = "goldmont_plus";
4015 break; 4063 break;
4016 4064
4017 case INTEL_FAM6_WESTMERE: 4065 case INTEL_FAM6_WESTMERE:
@@ -4042,6 +4090,7 @@ __init int intel_pmu_init(void)
4042 intel_pmu_pebs_data_source_nhm(); 4090 intel_pmu_pebs_data_source_nhm();
4043 extra_attr = nhm_format_attr; 4091 extra_attr = nhm_format_attr;
4044 pr_cont("Westmere events, "); 4092 pr_cont("Westmere events, ");
4093 name = "westmere";
4045 break; 4094 break;
4046 4095
4047 case INTEL_FAM6_SANDYBRIDGE: 4096 case INTEL_FAM6_SANDYBRIDGE:
@@ -4080,6 +4129,7 @@ __init int intel_pmu_init(void)
4080 extra_attr = nhm_format_attr; 4129 extra_attr = nhm_format_attr;
4081 4130
4082 pr_cont("SandyBridge events, "); 4131 pr_cont("SandyBridge events, ");
4132 name = "sandybridge";
4083 break; 4133 break;
4084 4134
4085 case INTEL_FAM6_IVYBRIDGE: 4135 case INTEL_FAM6_IVYBRIDGE:
@@ -4116,6 +4166,7 @@ __init int intel_pmu_init(void)
4116 extra_attr = nhm_format_attr; 4166 extra_attr = nhm_format_attr;
4117 4167
4118 pr_cont("IvyBridge events, "); 4168 pr_cont("IvyBridge events, ");
4169 name = "ivybridge";
4119 break; 4170 break;
4120 4171
4121 4172
@@ -4146,6 +4197,7 @@ __init int intel_pmu_init(void)
4146 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 4197 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4147 hsw_format_attr : nhm_format_attr; 4198 hsw_format_attr : nhm_format_attr;
4148 pr_cont("Haswell events, "); 4199 pr_cont("Haswell events, ");
4200 name = "haswell";
4149 break; 4201 break;
4150 4202
4151 case INTEL_FAM6_BROADWELL_CORE: 4203 case INTEL_FAM6_BROADWELL_CORE:
@@ -4184,6 +4236,7 @@ __init int intel_pmu_init(void)
4184 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 4236 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4185 hsw_format_attr : nhm_format_attr; 4237 hsw_format_attr : nhm_format_attr;
4186 pr_cont("Broadwell events, "); 4238 pr_cont("Broadwell events, ");
4239 name = "broadwell";
4187 break; 4240 break;
4188 4241
4189 case INTEL_FAM6_XEON_PHI_KNL: 4242 case INTEL_FAM6_XEON_PHI_KNL:
@@ -4203,6 +4256,7 @@ __init int intel_pmu_init(void)
4203 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4256 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4204 extra_attr = slm_format_attr; 4257 extra_attr = slm_format_attr;
4205 pr_cont("Knights Landing/Mill events, "); 4258 pr_cont("Knights Landing/Mill events, ");
4259 name = "knights-landing";
4206 break; 4260 break;
4207 4261
4208 case INTEL_FAM6_SKYLAKE_MOBILE: 4262 case INTEL_FAM6_SKYLAKE_MOBILE:
@@ -4239,6 +4293,7 @@ __init int intel_pmu_init(void)
4239 intel_pmu_pebs_data_source_skl( 4293 intel_pmu_pebs_data_source_skl(
4240 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); 4294 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4241 pr_cont("Skylake events, "); 4295 pr_cont("Skylake events, ");
4296 name = "skylake";
4242 break; 4297 break;
4243 4298
4244 default: 4299 default:
@@ -4246,6 +4301,7 @@ __init int intel_pmu_init(void)
4246 case 1: 4301 case 1:
4247 x86_pmu.event_constraints = intel_v1_event_constraints; 4302 x86_pmu.event_constraints = intel_v1_event_constraints;
4248 pr_cont("generic architected perfmon v1, "); 4303 pr_cont("generic architected perfmon v1, ");
4304 name = "generic_arch_v1";
4249 break; 4305 break;
4250 default: 4306 default:
4251 /* 4307 /*
@@ -4253,10 +4309,13 @@ __init int intel_pmu_init(void)
4253 */ 4309 */
4254 x86_pmu.event_constraints = intel_gen_event_constraints; 4310 x86_pmu.event_constraints = intel_gen_event_constraints;
4255 pr_cont("generic architected perfmon, "); 4311 pr_cont("generic architected perfmon, ");
4312 name = "generic_arch_v2+";
4256 break; 4313 break;
4257 } 4314 }
4258 } 4315 }
4259 4316
4317 snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name);
4318
4260 if (version >= 2 && extra_attr) { 4319 if (version >= 2 && extra_attr) {
4261 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, 4320 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4262 extra_attr); 4321 extra_attr);
@@ -4309,8 +4368,13 @@ __init int intel_pmu_init(void)
4309 x86_pmu.lbr_nr = 0; 4368 x86_pmu.lbr_nr = 0;
4310 } 4369 }
4311 4370
4312 if (x86_pmu.lbr_nr) 4371 x86_pmu.caps_attrs = intel_pmu_caps_attrs;
4372
4373 if (x86_pmu.lbr_nr) {
4374 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
4313 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 4375 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
4376 }
4377
4314 /* 4378 /*
4315 * Access extra MSR may cause #GP under certain circumstances. 4379 * Access extra MSR may cause #GP under certain circumstances.
4316 * E.g. KVM doesn't support offcore event 4380 * E.g. KVM doesn't support offcore event
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 0f7dad8bd358..9337589014cc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -558,6 +558,7 @@ struct x86_pmu {
558 int attr_rdpmc; 558 int attr_rdpmc;
559 struct attribute **format_attrs; 559 struct attribute **format_attrs;
560 struct attribute **event_attrs; 560 struct attribute **event_attrs;
561 struct attribute **caps_attrs;
561 562
562 ssize_t (*events_sysfs_show)(char *page, u64 config); 563 ssize_t (*events_sysfs_show)(char *page, u64 config);
563 struct attribute **cpu_events; 564 struct attribute **cpu_events;
@@ -742,6 +743,8 @@ int x86_reserve_hardware(void);
742 743
743void x86_release_hardware(void); 744void x86_release_hardware(void);
744 745
746int x86_pmu_max_precise(void);
747
745void hw_perf_lbr_event_destroy(struct perf_event *event); 748void hw_perf_lbr_event_destroy(struct perf_event *event);
746 749
747int x86_setup_perfctr(struct perf_event *event); 750int x86_setup_perfctr(struct perf_event *event);