aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-22 15:11:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-22 15:11:54 -0500
commit9b3e7c9b9ab5c2827c1ecd45327b851a1bd01c2a (patch)
treef90e1f70b73dd04c5f0c820f41b9d3cc307ad85a /arch/x86
parent0f0ca14386e0431fbedaae5efc550d46cf93b9cf (diff)
parenta9d3f94ec7708427b9f05a65246d3fd6e287fa51 (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc fixlets from all around the place" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/uncore: Fix IVT/SNB-EP uncore CBOX NID filter table perf/x86: Correctly use FEATURE_PDCM perf, nmi: Fix unknown NMI warning perf trace: Fix ioctl 'request' beautifier build problems on !(i386 || x86_64) arches perf trace: Add fallback definition of EFD_SEMAPHORE perf list: Fix checking for supported events on older kernels perf tools: Handle PERF_RECORD_HEADER_EVENT_TYPE properly perf probe: Do not add offset twice to uprobe address perf/x86: Fix Userspace RDPMC switch perf/x86/intel/p6: Add userspace RDPMC quirk for PPro
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c48
5 files changed, 52 insertions, 26 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b88645191fe5..895604f2e916 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1521,6 +1521,8 @@ static int __init init_hw_perf_events(void)
1521 1521
1522 pr_cont("%s PMU driver.\n", x86_pmu.name); 1522 pr_cont("%s PMU driver.\n", x86_pmu.name);
1523 1523
1524 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1525
1524 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1526 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1525 quirk->func(); 1527 quirk->func();
1526 1528
@@ -1534,7 +1536,6 @@ static int __init init_hw_perf_events(void)
1534 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1536 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1535 0, x86_pmu.num_counters, 0, 0); 1537 0, x86_pmu.num_counters, 0, 0);
1536 1538
1537 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1538 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1539 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1539 1540
1540 if (x86_pmu.event_attrs) 1541 if (x86_pmu.event_attrs)
@@ -1820,9 +1821,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
1820 if (ret) 1821 if (ret)
1821 return ret; 1822 return ret;
1822 1823
1824 if (x86_pmu.attr_rdpmc_broken)
1825 return -ENOTSUPP;
1826
1823 if (!!val != !!x86_pmu.attr_rdpmc) { 1827 if (!!val != !!x86_pmu.attr_rdpmc) {
1824 x86_pmu.attr_rdpmc = !!val; 1828 x86_pmu.attr_rdpmc = !!val;
1825 smp_call_function(change_rdpmc, (void *)val, 1); 1829 on_each_cpu(change_rdpmc, (void *)val, 1);
1826 } 1830 }
1827 1831
1828 return count; 1832 return count;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index c1a861829d81..4972c244d0bc 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -409,6 +409,7 @@ struct x86_pmu {
409 /* 409 /*
410 * sysfs attrs 410 * sysfs attrs
411 */ 411 */
412 int attr_rdpmc_broken;
412 int attr_rdpmc; 413 int attr_rdpmc;
413 struct attribute **format_attrs; 414 struct attribute **format_attrs;
414 struct attribute **event_attrs; 415 struct attribute **event_attrs;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 0fa4f242f050..aa333d966886 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1361 intel_pmu_disable_all(); 1361 intel_pmu_disable_all();
1362 handled = intel_pmu_drain_bts_buffer(); 1362 handled = intel_pmu_drain_bts_buffer();
1363 status = intel_pmu_get_status(); 1363 status = intel_pmu_get_status();
1364 if (!status) { 1364 if (!status)
1365 intel_pmu_enable_all(0); 1365 goto done;
1366 return handled;
1367 }
1368 1366
1369 loops = 0; 1367 loops = 0;
1370again: 1368again:
@@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void)
2310 if (version > 1) 2308 if (version > 1)
2311 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 2309 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
2312 2310
2313 /* 2311 if (boot_cpu_has(X86_FEATURE_PDCM)) {
2314 * v2 and above have a perf capabilities MSR
2315 */
2316 if (version > 1) {
2317 u64 capabilities; 2312 u64 capabilities;
2318 2313
2319 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 2314 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 29c248799ced..c88f7f4b03ee 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
504 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 505 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
506 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
505 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 507 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
508 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
506 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 509 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
507 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 510 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
508 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 511 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
@@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1178 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1181 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1179 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1182 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1180 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1183 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1184 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1185 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1186 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1181 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1187 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1188 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1182 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1189 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1190 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1183 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1191 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1184 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1192 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1185 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1193 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1186 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1194 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1187 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 1195 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index b1e2fe115323..7c1a0c07b607 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = {
231 231
232}; 232};
233 233
234static __init void p6_pmu_rdpmc_quirk(void)
235{
236 if (boot_cpu_data.x86_mask < 9) {
237 /*
238 * PPro erratum 26; fixed in stepping 9 and above.
239 */
240 pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
241 x86_pmu.attr_rdpmc_broken = 1;
242 x86_pmu.attr_rdpmc = 0;
243 }
244}
245
234__init int p6_pmu_init(void) 246__init int p6_pmu_init(void)
235{ 247{
248 x86_pmu = p6_pmu;
249
236 switch (boot_cpu_data.x86_model) { 250 switch (boot_cpu_data.x86_model) {
237 case 1: 251 case 1: /* Pentium Pro */
238 case 3: /* Pentium Pro */ 252 x86_add_quirk(p6_pmu_rdpmc_quirk);
239 case 5: 253 break;
240 case 6: /* Pentium II */ 254
241 case 7: 255 case 3: /* Pentium II - Klamath */
242 case 8: 256 case 5: /* Pentium II - Deschutes */
243 case 11: /* Pentium III */ 257 case 6: /* Pentium II - Mendocino */
244 case 9:
245 case 13:
246 /* Pentium M */
247 break; 258 break;
259
260 case 7: /* Pentium III - Katmai */
261 case 8: /* Pentium III - Coppermine */
262 case 10: /* Pentium III Xeon */
263 case 11: /* Pentium III - Tualatin */
264 break;
265
266 case 9: /* Pentium M - Banias */
267 case 13: /* Pentium M - Dothan */
268 break;
269
248 default: 270 default:
249 pr_cont("unsupported p6 CPU model %d ", 271 pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
250 boot_cpu_data.x86_model);
251 return -ENODEV; 272 return -ENODEV;
252 } 273 }
253 274
254 x86_pmu = p6_pmu;
255
256 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, 275 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
257 sizeof(hw_cache_event_ids)); 276 sizeof(hw_cache_event_ids));
258 277
259
260 return 0; 278 return 0;
261} 279}