diff options
-rw-r--r-- | arch/powerpc/perf/hv-24x7.c | 6 | ||||
-rw-r--r-- | arch/powerpc/perf/hv-gpci.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_sf.c | 6 | ||||
-rw-r--r-- | arch/x86/Kconfig | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 173 | ||||
-rw-r--r-- | include/uapi/linux/perf_event.h | 14 | ||||
-rw-r--r-- | kernel/events/core.c | 15 | ||||
-rw-r--r-- | kernel/events/hw_breakpoint.c | 7 | ||||
-rw-r--r-- | tools/perf/builtin-diff.c | 5 | ||||
-rw-r--r-- | tools/perf/builtin-probe.c | 2 | ||||
-rw-r--r-- | tools/perf/perf-sys.h | 30 | ||||
-rw-r--r-- | tools/perf/util/header.c | 27 | ||||
-rw-r--r-- | tools/perf/util/sort.c | 102 | ||||
-rw-r--r-- | tools/perf/util/thread.c | 3 | ||||
-rw-r--r-- | tools/perf/util/unwind-libunwind.c | 12 | ||||
-rw-r--r-- | tools/perf/util/unwind.h | 3 |
19 files changed, 158 insertions, 279 deletions
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 6c8710dd90c9..dba34088da28 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c | |||
@@ -417,11 +417,6 @@ static int h_24x7_event_add(struct perf_event *event, int flags) | |||
417 | return 0; | 417 | return 0; |
418 | } | 418 | } |
419 | 419 | ||
420 | static int h_24x7_event_idx(struct perf_event *event) | ||
421 | { | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static struct pmu h_24x7_pmu = { | 420 | static struct pmu h_24x7_pmu = { |
426 | .task_ctx_nr = perf_invalid_context, | 421 | .task_ctx_nr = perf_invalid_context, |
427 | 422 | ||
@@ -433,7 +428,6 @@ static struct pmu h_24x7_pmu = { | |||
433 | .start = h_24x7_event_start, | 428 | .start = h_24x7_event_start, |
434 | .stop = h_24x7_event_stop, | 429 | .stop = h_24x7_event_stop, |
435 | .read = h_24x7_event_update, | 430 | .read = h_24x7_event_update, |
436 | .event_idx = h_24x7_event_idx, | ||
437 | }; | 431 | }; |
438 | 432 | ||
439 | static int hv_24x7_init(void) | 433 | static int hv_24x7_init(void) |
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 15fc76c93022..a051fe946c63 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c | |||
@@ -246,11 +246,6 @@ static int h_gpci_event_init(struct perf_event *event) | |||
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | static int h_gpci_event_idx(struct perf_event *event) | ||
250 | { | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static struct pmu h_gpci_pmu = { | 249 | static struct pmu h_gpci_pmu = { |
255 | .task_ctx_nr = perf_invalid_context, | 250 | .task_ctx_nr = perf_invalid_context, |
256 | 251 | ||
@@ -262,7 +257,6 @@ static struct pmu h_gpci_pmu = { | |||
262 | .start = h_gpci_event_start, | 257 | .start = h_gpci_event_start, |
263 | .stop = h_gpci_event_stop, | 258 | .stop = h_gpci_event_stop, |
264 | .read = h_gpci_event_update, | 259 | .read = h_gpci_event_update, |
265 | .event_idx = h_gpci_event_idx, | ||
266 | }; | 260 | }; |
267 | 261 | ||
268 | static int hv_gpci_init(void) | 262 | static int hv_gpci_init(void) |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 08e761318c17..b878f12a9597 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1411,11 +1411,6 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags) | |||
1411 | perf_pmu_enable(event->pmu); | 1411 | perf_pmu_enable(event->pmu); |
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | static int cpumsf_pmu_event_idx(struct perf_event *event) | ||
1415 | { | ||
1416 | return event->hw.idx; | ||
1417 | } | ||
1418 | |||
1419 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); | 1414 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); |
1420 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); | 1415 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); |
1421 | 1416 | ||
@@ -1458,7 +1453,6 @@ static struct pmu cpumf_sampling = { | |||
1458 | .stop = cpumsf_pmu_stop, | 1453 | .stop = cpumsf_pmu_stop, |
1459 | .read = cpumsf_pmu_read, | 1454 | .read = cpumsf_pmu_read, |
1460 | 1455 | ||
1461 | .event_idx = cpumsf_pmu_event_idx, | ||
1462 | .attr_groups = cpumsf_pmu_attr_groups, | 1456 | .attr_groups = cpumsf_pmu_attr_groups, |
1463 | }; | 1457 | }; |
1464 | 1458 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f2327e88e07c..ded8a6774ac9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -142,6 +142,10 @@ config INSTRUCTION_DECODER | |||
142 | def_bool y | 142 | def_bool y |
143 | depends on KPROBES || PERF_EVENTS || UPROBES | 143 | depends on KPROBES || PERF_EVENTS || UPROBES |
144 | 144 | ||
145 | config PERF_EVENTS_INTEL_UNCORE | ||
146 | def_bool y | ||
147 | depends on PERF_EVENTS && SUP_SUP_INTEL && PCI | ||
148 | |||
145 | config OUTPUT_FORMAT | 149 | config OUTPUT_FORMAT |
146 | string | 150 | string |
147 | default "elf32-i386" if X86_32 | 151 | default "elf32-i386" if X86_32 |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 01d5453b5502..e27b49d7c922 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o | |||
39 | endif | 39 | endif |
40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
42 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o | ||
43 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o | ||
44 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o | 42 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o |
43 | |||
44 | obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ | ||
45 | perf_event_intel_uncore_snb.o \ | ||
46 | perf_event_intel_uncore_snbep.o \ | ||
47 | perf_event_intel_uncore_nhmex.o | ||
45 | endif | 48 | endif |
46 | 49 | ||
47 | 50 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1b8299dd3d91..143e5f5dc855 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -243,8 +243,9 @@ static bool check_hw_exists(void) | |||
243 | 243 | ||
244 | msr_fail: | 244 | msr_fail: |
245 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); | 245 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); |
246 | printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR | 246 | printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n", |
247 | "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); | 247 | boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, |
248 | reg, val_new); | ||
248 | 249 | ||
249 | return false; | 250 | return false; |
250 | } | 251 | } |
@@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
444 | if (event->attr.type == PERF_TYPE_RAW) | 445 | if (event->attr.type == PERF_TYPE_RAW) |
445 | event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; | 446 | event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; |
446 | 447 | ||
447 | if (event->attr.sample_period && x86_pmu.limit_period) { | ||
448 | if (x86_pmu.limit_period(event, event->attr.sample_period) > | ||
449 | event->attr.sample_period) | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | |||
453 | return x86_setup_perfctr(event); | 448 | return x86_setup_perfctr(event); |
454 | } | 449 | } |
455 | 450 | ||
@@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event) | |||
987 | if (left > x86_pmu.max_period) | 982 | if (left > x86_pmu.max_period) |
988 | left = x86_pmu.max_period; | 983 | left = x86_pmu.max_period; |
989 | 984 | ||
990 | if (x86_pmu.limit_period) | ||
991 | left = x86_pmu.limit_period(event, left); | ||
992 | |||
993 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; | 985 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; |
994 | 986 | ||
995 | /* | 987 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index d98a34d435d7..fc5eb390b368 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -445,7 +445,6 @@ struct x86_pmu { | |||
445 | struct x86_pmu_quirk *quirks; | 445 | struct x86_pmu_quirk *quirks; |
446 | int perfctr_second_write; | 446 | int perfctr_second_write; |
447 | bool late_ack; | 447 | bool late_ack; |
448 | unsigned (*limit_period)(struct perf_event *event, unsigned l); | ||
449 | 448 | ||
450 | /* | 449 | /* |
451 | * sysfs attrs | 450 | * sysfs attrs |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a73947c53b65..944bf019b74f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
221 | }; | 221 | }; |
222 | 222 | ||
223 | static struct event_constraint intel_bdw_event_constraints[] = { | ||
224 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
225 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
226 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | ||
227 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ | ||
228 | INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ | ||
229 | EVENT_CONSTRAINT_END | ||
230 | }; | ||
231 | |||
232 | static u64 intel_pmu_event_map(int hw_event) | 223 | static u64 intel_pmu_event_map(int hw_event) |
233 | { | 224 | { |
234 | return intel_perfmon_event_map[hw_event]; | 225 | return intel_perfmon_event_map[hw_event]; |
@@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids | |||
424 | 415 | ||
425 | }; | 416 | }; |
426 | 417 | ||
427 | static __initconst const u64 hsw_hw_cache_event_ids | ||
428 | [PERF_COUNT_HW_CACHE_MAX] | ||
429 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
430 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
431 | { | ||
432 | [ C(L1D ) ] = { | ||
433 | [ C(OP_READ) ] = { | ||
434 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
435 | [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ | ||
436 | }, | ||
437 | [ C(OP_WRITE) ] = { | ||
438 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
439 | [ C(RESULT_MISS) ] = 0x0, | ||
440 | }, | ||
441 | [ C(OP_PREFETCH) ] = { | ||
442 | [ C(RESULT_ACCESS) ] = 0x0, | ||
443 | [ C(RESULT_MISS) ] = 0x0, | ||
444 | }, | ||
445 | }, | ||
446 | [ C(L1I ) ] = { | ||
447 | [ C(OP_READ) ] = { | ||
448 | [ C(RESULT_ACCESS) ] = 0x0, | ||
449 | [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ | ||
450 | }, | ||
451 | [ C(OP_WRITE) ] = { | ||
452 | [ C(RESULT_ACCESS) ] = -1, | ||
453 | [ C(RESULT_MISS) ] = -1, | ||
454 | }, | ||
455 | [ C(OP_PREFETCH) ] = { | ||
456 | [ C(RESULT_ACCESS) ] = 0x0, | ||
457 | [ C(RESULT_MISS) ] = 0x0, | ||
458 | }, | ||
459 | }, | ||
460 | [ C(LL ) ] = { | ||
461 | [ C(OP_READ) ] = { | ||
462 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ | ||
463 | [ C(RESULT_ACCESS) ] = 0x1b7, | ||
464 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| | ||
465 | L3_MISS|ANY_SNOOP */ | ||
466 | [ C(RESULT_MISS) ] = 0x1b7, | ||
467 | }, | ||
468 | [ C(OP_WRITE) ] = { | ||
469 | [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */ | ||
470 | /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ | ||
471 | [ C(RESULT_MISS) ] = 0x1b7, | ||
472 | }, | ||
473 | [ C(OP_PREFETCH) ] = { | ||
474 | [ C(RESULT_ACCESS) ] = 0x0, | ||
475 | [ C(RESULT_MISS) ] = 0x0, | ||
476 | }, | ||
477 | }, | ||
478 | [ C(DTLB) ] = { | ||
479 | [ C(OP_READ) ] = { | ||
480 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
481 | [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ | ||
482 | }, | ||
483 | [ C(OP_WRITE) ] = { | ||
484 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
485 | [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | ||
486 | }, | ||
487 | [ C(OP_PREFETCH) ] = { | ||
488 | [ C(RESULT_ACCESS) ] = 0x0, | ||
489 | [ C(RESULT_MISS) ] = 0x0, | ||
490 | }, | ||
491 | }, | ||
492 | [ C(ITLB) ] = { | ||
493 | [ C(OP_READ) ] = { | ||
494 | [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ | ||
495 | [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ | ||
496 | }, | ||
497 | [ C(OP_WRITE) ] = { | ||
498 | [ C(RESULT_ACCESS) ] = -1, | ||
499 | [ C(RESULT_MISS) ] = -1, | ||
500 | }, | ||
501 | [ C(OP_PREFETCH) ] = { | ||
502 | [ C(RESULT_ACCESS) ] = -1, | ||
503 | [ C(RESULT_MISS) ] = -1, | ||
504 | }, | ||
505 | }, | ||
506 | [ C(BPU ) ] = { | ||
507 | [ C(OP_READ) ] = { | ||
508 | [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
509 | [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
510 | }, | ||
511 | [ C(OP_WRITE) ] = { | ||
512 | [ C(RESULT_ACCESS) ] = -1, | ||
513 | [ C(RESULT_MISS) ] = -1, | ||
514 | }, | ||
515 | [ C(OP_PREFETCH) ] = { | ||
516 | [ C(RESULT_ACCESS) ] = -1, | ||
517 | [ C(RESULT_MISS) ] = -1, | ||
518 | }, | ||
519 | }, | ||
520 | }; | ||
521 | |||
522 | static __initconst const u64 hsw_hw_cache_extra_regs | ||
523 | [PERF_COUNT_HW_CACHE_MAX] | ||
524 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
525 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
526 | { | ||
527 | [ C(LL ) ] = { | ||
528 | [ C(OP_READ) ] = { | ||
529 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ | ||
530 | [ C(RESULT_ACCESS) ] = 0x2d5, | ||
531 | /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| | ||
532 | L3_MISS|ANY_SNOOP */ | ||
533 | [ C(RESULT_MISS) ] = 0x3fbc0202d5ull, | ||
534 | }, | ||
535 | [ C(OP_WRITE) ] = { | ||
536 | [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */ | ||
537 | /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ | ||
538 | [ C(RESULT_MISS) ] = 0x3fbc020122ull, | ||
539 | }, | ||
540 | [ C(OP_PREFETCH) ] = { | ||
541 | [ C(RESULT_ACCESS) ] = 0x0, | ||
542 | [ C(RESULT_MISS) ] = 0x0, | ||
543 | }, | ||
544 | }, | ||
545 | }; | ||
546 | |||
547 | static __initconst const u64 westmere_hw_cache_event_ids | 418 | static __initconst const u64 westmere_hw_cache_event_ids |
548 | [PERF_COUNT_HW_CACHE_MAX] | 419 | [PERF_COUNT_HW_CACHE_MAX] |
549 | [PERF_COUNT_HW_CACHE_OP_MAX] | 420 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
2034 | return c; | 1905 | return c; |
2035 | } | 1906 | } |
2036 | 1907 | ||
2037 | /* | ||
2038 | * Broadwell: | ||
2039 | * The INST_RETIRED.ALL period always needs to have lowest | ||
2040 | * 6bits cleared (BDM57). It shall not use a period smaller | ||
2041 | * than 100 (BDM11). We combine the two to enforce | ||
2042 | * a min-period of 128. | ||
2043 | */ | ||
2044 | static unsigned bdw_limit_period(struct perf_event *event, unsigned left) | ||
2045 | { | ||
2046 | if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == | ||
2047 | X86_CONFIG(.event=0xc0, .umask=0x01)) { | ||
2048 | if (left < 128) | ||
2049 | left = 128; | ||
2050 | left &= ~0x3fu; | ||
2051 | } | ||
2052 | return left; | ||
2053 | } | ||
2054 | |||
2055 | PMU_FORMAT_ATTR(event, "config:0-7" ); | 1908 | PMU_FORMAT_ATTR(event, "config:0-7" ); |
2056 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 1909 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
2057 | PMU_FORMAT_ATTR(edge, "config:18" ); | 1910 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void) | |||
2692 | case 69: /* 22nm Haswell ULT */ | 2545 | case 69: /* 22nm Haswell ULT */ |
2693 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ | 2546 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ |
2694 | x86_pmu.late_ack = true; | 2547 | x86_pmu.late_ack = true; |
2695 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2548 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2696 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2549 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
2697 | 2550 | ||
2698 | intel_pmu_lbr_init_snb(); | 2551 | intel_pmu_lbr_init_snb(); |
2699 | 2552 | ||
@@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void) | |||
2712 | pr_cont("Haswell events, "); | 2565 | pr_cont("Haswell events, "); |
2713 | break; | 2566 | break; |
2714 | 2567 | ||
2715 | case 61: /* 14nm Broadwell Core-M */ | ||
2716 | x86_pmu.late_ack = true; | ||
2717 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | ||
2718 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | ||
2719 | |||
2720 | intel_pmu_lbr_init_snb(); | ||
2721 | |||
2722 | x86_pmu.event_constraints = intel_bdw_event_constraints; | ||
2723 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | ||
2724 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2725 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
2726 | /* all extra regs are per-cpu when HT is on */ | ||
2727 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
2728 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | ||
2729 | |||
2730 | x86_pmu.hw_config = hsw_hw_config; | ||
2731 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | ||
2732 | x86_pmu.cpu_events = hsw_events_attrs; | ||
2733 | x86_pmu.limit_period = bdw_limit_period; | ||
2734 | pr_cont("Broadwell events, "); | ||
2735 | break; | ||
2736 | |||
2737 | default: | 2568 | default: |
2738 | switch (x86_pmu.version) { | 2569 | switch (x86_pmu.version) { |
2739 | case 1: | 2570 | case 1: |
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 9269de254874..9d845404d875 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h | |||
@@ -364,7 +364,7 @@ struct perf_event_mmap_page { | |||
364 | /* | 364 | /* |
365 | * Bits needed to read the hw events in user-space. | 365 | * Bits needed to read the hw events in user-space. |
366 | * | 366 | * |
367 | * u32 seq, time_mult, time_shift, idx, width; | 367 | * u32 seq, time_mult, time_shift, index, width; |
368 | * u64 count, enabled, running; | 368 | * u64 count, enabled, running; |
369 | * u64 cyc, time_offset; | 369 | * u64 cyc, time_offset; |
370 | * s64 pmc = 0; | 370 | * s64 pmc = 0; |
@@ -383,11 +383,11 @@ struct perf_event_mmap_page { | |||
383 | * time_shift = pc->time_shift; | 383 | * time_shift = pc->time_shift; |
384 | * } | 384 | * } |
385 | * | 385 | * |
386 | * idx = pc->index; | 386 | * index = pc->index; |
387 | * count = pc->offset; | 387 | * count = pc->offset; |
388 | * if (pc->cap_usr_rdpmc && idx) { | 388 | * if (pc->cap_user_rdpmc && index) { |
389 | * width = pc->pmc_width; | 389 | * width = pc->pmc_width; |
390 | * pmc = rdpmc(idx - 1); | 390 | * pmc = rdpmc(index - 1); |
391 | * } | 391 | * } |
392 | * | 392 | * |
393 | * barrier(); | 393 | * barrier(); |
@@ -415,7 +415,7 @@ struct perf_event_mmap_page { | |||
415 | }; | 415 | }; |
416 | 416 | ||
417 | /* | 417 | /* |
418 | * If cap_usr_rdpmc this field provides the bit-width of the value | 418 | * If cap_user_rdpmc this field provides the bit-width of the value |
419 | * read using the rdpmc() or equivalent instruction. This can be used | 419 | * read using the rdpmc() or equivalent instruction. This can be used |
420 | * to sign extend the result like: | 420 | * to sign extend the result like: |
421 | * | 421 | * |
@@ -439,10 +439,10 @@ struct perf_event_mmap_page { | |||
439 | * | 439 | * |
440 | * Where time_offset,time_mult,time_shift and cyc are read in the | 440 | * Where time_offset,time_mult,time_shift and cyc are read in the |
441 | * seqcount loop described above. This delta can then be added to | 441 | * seqcount loop described above. This delta can then be added to |
442 | * enabled and possible running (if idx), improving the scaling: | 442 | * enabled and possible running (if index), improving the scaling: |
443 | * | 443 | * |
444 | * enabled += delta; | 444 | * enabled += delta; |
445 | * if (idx) | 445 | * if (index) |
446 | * running += delta; | 446 | * running += delta; |
447 | * | 447 | * |
448 | * quot = count / running; | 448 | * quot = count / running; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1425d07018de..2b02c9fda790 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -6071,11 +6071,6 @@ static int perf_swevent_init(struct perf_event *event) | |||
6071 | return 0; | 6071 | return 0; |
6072 | } | 6072 | } |
6073 | 6073 | ||
6074 | static int perf_swevent_event_idx(struct perf_event *event) | ||
6075 | { | ||
6076 | return 0; | ||
6077 | } | ||
6078 | |||
6079 | static struct pmu perf_swevent = { | 6074 | static struct pmu perf_swevent = { |
6080 | .task_ctx_nr = perf_sw_context, | 6075 | .task_ctx_nr = perf_sw_context, |
6081 | 6076 | ||
@@ -6085,8 +6080,6 @@ static struct pmu perf_swevent = { | |||
6085 | .start = perf_swevent_start, | 6080 | .start = perf_swevent_start, |
6086 | .stop = perf_swevent_stop, | 6081 | .stop = perf_swevent_stop, |
6087 | .read = perf_swevent_read, | 6082 | .read = perf_swevent_read, |
6088 | |||
6089 | .event_idx = perf_swevent_event_idx, | ||
6090 | }; | 6083 | }; |
6091 | 6084 | ||
6092 | #ifdef CONFIG_EVENT_TRACING | 6085 | #ifdef CONFIG_EVENT_TRACING |
@@ -6204,8 +6197,6 @@ static struct pmu perf_tracepoint = { | |||
6204 | .start = perf_swevent_start, | 6197 | .start = perf_swevent_start, |
6205 | .stop = perf_swevent_stop, | 6198 | .stop = perf_swevent_stop, |
6206 | .read = perf_swevent_read, | 6199 | .read = perf_swevent_read, |
6207 | |||
6208 | .event_idx = perf_swevent_event_idx, | ||
6209 | }; | 6200 | }; |
6210 | 6201 | ||
6211 | static inline void perf_tp_register(void) | 6202 | static inline void perf_tp_register(void) |
@@ -6431,8 +6422,6 @@ static struct pmu perf_cpu_clock = { | |||
6431 | .start = cpu_clock_event_start, | 6422 | .start = cpu_clock_event_start, |
6432 | .stop = cpu_clock_event_stop, | 6423 | .stop = cpu_clock_event_stop, |
6433 | .read = cpu_clock_event_read, | 6424 | .read = cpu_clock_event_read, |
6434 | |||
6435 | .event_idx = perf_swevent_event_idx, | ||
6436 | }; | 6425 | }; |
6437 | 6426 | ||
6438 | /* | 6427 | /* |
@@ -6511,8 +6500,6 @@ static struct pmu perf_task_clock = { | |||
6511 | .start = task_clock_event_start, | 6500 | .start = task_clock_event_start, |
6512 | .stop = task_clock_event_stop, | 6501 | .stop = task_clock_event_stop, |
6513 | .read = task_clock_event_read, | 6502 | .read = task_clock_event_read, |
6514 | |||
6515 | .event_idx = perf_swevent_event_idx, | ||
6516 | }; | 6503 | }; |
6517 | 6504 | ||
6518 | static void perf_pmu_nop_void(struct pmu *pmu) | 6505 | static void perf_pmu_nop_void(struct pmu *pmu) |
@@ -6542,7 +6529,7 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) | |||
6542 | 6529 | ||
6543 | static int perf_event_idx_default(struct perf_event *event) | 6530 | static int perf_event_idx_default(struct perf_event *event) |
6544 | { | 6531 | { |
6545 | return event->hw.idx + 1; | 6532 | return 0; |
6546 | } | 6533 | } |
6547 | 6534 | ||
6548 | /* | 6535 | /* |
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 1559fb0b9296..9803a6600d49 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -605,11 +605,6 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags) | |||
605 | bp->hw.state = PERF_HES_STOPPED; | 605 | bp->hw.state = PERF_HES_STOPPED; |
606 | } | 606 | } |
607 | 607 | ||
608 | static int hw_breakpoint_event_idx(struct perf_event *bp) | ||
609 | { | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static struct pmu perf_breakpoint = { | 608 | static struct pmu perf_breakpoint = { |
614 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ | 609 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
615 | 610 | ||
@@ -619,8 +614,6 @@ static struct pmu perf_breakpoint = { | |||
619 | .start = hw_breakpoint_start, | 614 | .start = hw_breakpoint_start, |
620 | .stop = hw_breakpoint_stop, | 615 | .stop = hw_breakpoint_stop, |
621 | .read = hw_breakpoint_pmu_read, | 616 | .read = hw_breakpoint_pmu_read, |
622 | |||
623 | .event_idx = hw_breakpoint_event_idx, | ||
624 | }; | 617 | }; |
625 | 618 | ||
626 | int __init init_hw_breakpoint(void) | 619 | int __init init_hw_breakpoint(void) |
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 8c5c11ca8c53..25114c9a6801 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c | |||
@@ -1142,6 +1142,11 @@ static int data_init(int argc, const char **argv) | |||
1142 | 1142 | ||
1143 | int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) | 1143 | int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) |
1144 | { | 1144 | { |
1145 | int ret = hists__init(); | ||
1146 | |||
1147 | if (ret < 0) | ||
1148 | return ret; | ||
1149 | |||
1145 | perf_config(perf_default_config, NULL); | 1150 | perf_config(perf_default_config, NULL); |
1146 | 1151 | ||
1147 | argc = parse_options(argc, argv, options, diff_usage, 0); | 1152 | argc = parse_options(argc, argv, options, diff_usage, 0); |
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 04412b4770a2..7af26acf06d9 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c | |||
@@ -375,7 +375,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) | |||
375 | OPT_CALLBACK('x', "exec", NULL, "executable|path", | 375 | OPT_CALLBACK('x', "exec", NULL, "executable|path", |
376 | "target executable name or path", opt_set_target), | 376 | "target executable name or path", opt_set_target), |
377 | OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, | 377 | OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, |
378 | "Disable symbol demangling"), | 378 | "Enable symbol demangling"), |
379 | OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, | 379 | OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, |
380 | "Enable kernel symbol demangling"), | 380 | "Enable kernel symbol demangling"), |
381 | OPT_END() | 381 | OPT_END() |
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h index 937e4324ad94..a3b13d7dc1d4 100644 --- a/tools/perf/perf-sys.h +++ b/tools/perf/perf-sys.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | 13 | #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") |
14 | #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | 14 | #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") |
15 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); | 15 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); |
16 | #define CPUINFO_PROC "model name" | 16 | #define CPUINFO_PROC {"model name"} |
17 | #ifndef __NR_perf_event_open | 17 | #ifndef __NR_perf_event_open |
18 | # define __NR_perf_event_open 336 | 18 | # define __NR_perf_event_open 336 |
19 | #endif | 19 | #endif |
@@ -30,7 +30,7 @@ | |||
30 | #define wmb() asm volatile("sfence" ::: "memory") | 30 | #define wmb() asm volatile("sfence" ::: "memory") |
31 | #define rmb() asm volatile("lfence" ::: "memory") | 31 | #define rmb() asm volatile("lfence" ::: "memory") |
32 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); | 32 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); |
33 | #define CPUINFO_PROC "model name" | 33 | #define CPUINFO_PROC {"model name"} |
34 | #ifndef __NR_perf_event_open | 34 | #ifndef __NR_perf_event_open |
35 | # define __NR_perf_event_open 298 | 35 | # define __NR_perf_event_open 298 |
36 | #endif | 36 | #endif |
@@ -47,14 +47,14 @@ | |||
47 | #define mb() asm volatile ("sync" ::: "memory") | 47 | #define mb() asm volatile ("sync" ::: "memory") |
48 | #define wmb() asm volatile ("sync" ::: "memory") | 48 | #define wmb() asm volatile ("sync" ::: "memory") |
49 | #define rmb() asm volatile ("sync" ::: "memory") | 49 | #define rmb() asm volatile ("sync" ::: "memory") |
50 | #define CPUINFO_PROC "cpu" | 50 | #define CPUINFO_PROC {"cpu"} |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef __s390__ | 53 | #ifdef __s390__ |
54 | #define mb() asm volatile("bcr 15,0" ::: "memory") | 54 | #define mb() asm volatile("bcr 15,0" ::: "memory") |
55 | #define wmb() asm volatile("bcr 15,0" ::: "memory") | 55 | #define wmb() asm volatile("bcr 15,0" ::: "memory") |
56 | #define rmb() asm volatile("bcr 15,0" ::: "memory") | 56 | #define rmb() asm volatile("bcr 15,0" ::: "memory") |
57 | #define CPUINFO_PROC "vendor_id" | 57 | #define CPUINFO_PROC {"vendor_id"} |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #ifdef __sh__ | 60 | #ifdef __sh__ |
@@ -67,14 +67,14 @@ | |||
67 | # define wmb() asm volatile("" ::: "memory") | 67 | # define wmb() asm volatile("" ::: "memory") |
68 | # define rmb() asm volatile("" ::: "memory") | 68 | # define rmb() asm volatile("" ::: "memory") |
69 | #endif | 69 | #endif |
70 | #define CPUINFO_PROC "cpu type" | 70 | #define CPUINFO_PROC {"cpu type"} |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #ifdef __hppa__ | 73 | #ifdef __hppa__ |
74 | #define mb() asm volatile("" ::: "memory") | 74 | #define mb() asm volatile("" ::: "memory") |
75 | #define wmb() asm volatile("" ::: "memory") | 75 | #define wmb() asm volatile("" ::: "memory") |
76 | #define rmb() asm volatile("" ::: "memory") | 76 | #define rmb() asm volatile("" ::: "memory") |
77 | #define CPUINFO_PROC "cpu" | 77 | #define CPUINFO_PROC {"cpu"} |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | #ifdef __sparc__ | 80 | #ifdef __sparc__ |
@@ -87,14 +87,14 @@ | |||
87 | #endif | 87 | #endif |
88 | #define wmb() asm volatile("":::"memory") | 88 | #define wmb() asm volatile("":::"memory") |
89 | #define rmb() asm volatile("":::"memory") | 89 | #define rmb() asm volatile("":::"memory") |
90 | #define CPUINFO_PROC "cpu" | 90 | #define CPUINFO_PROC {"cpu"} |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #ifdef __alpha__ | 93 | #ifdef __alpha__ |
94 | #define mb() asm volatile("mb" ::: "memory") | 94 | #define mb() asm volatile("mb" ::: "memory") |
95 | #define wmb() asm volatile("wmb" ::: "memory") | 95 | #define wmb() asm volatile("wmb" ::: "memory") |
96 | #define rmb() asm volatile("mb" ::: "memory") | 96 | #define rmb() asm volatile("mb" ::: "memory") |
97 | #define CPUINFO_PROC "cpu model" | 97 | #define CPUINFO_PROC {"cpu model"} |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #ifdef __ia64__ | 100 | #ifdef __ia64__ |
@@ -102,7 +102,7 @@ | |||
102 | #define wmb() asm volatile ("mf" ::: "memory") | 102 | #define wmb() asm volatile ("mf" ::: "memory") |
103 | #define rmb() asm volatile ("mf" ::: "memory") | 103 | #define rmb() asm volatile ("mf" ::: "memory") |
104 | #define cpu_relax() asm volatile ("hint @pause" ::: "memory") | 104 | #define cpu_relax() asm volatile ("hint @pause" ::: "memory") |
105 | #define CPUINFO_PROC "model name" | 105 | #define CPUINFO_PROC {"model name"} |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | #ifdef __arm__ | 108 | #ifdef __arm__ |
@@ -113,7 +113,7 @@ | |||
113 | #define mb() ((void(*)(void))0xffff0fa0)() | 113 | #define mb() ((void(*)(void))0xffff0fa0)() |
114 | #define wmb() ((void(*)(void))0xffff0fa0)() | 114 | #define wmb() ((void(*)(void))0xffff0fa0)() |
115 | #define rmb() ((void(*)(void))0xffff0fa0)() | 115 | #define rmb() ((void(*)(void))0xffff0fa0)() |
116 | #define CPUINFO_PROC "Processor" | 116 | #define CPUINFO_PROC {"model name", "Processor"} |
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | #ifdef __aarch64__ | 119 | #ifdef __aarch64__ |
@@ -133,28 +133,28 @@ | |||
133 | : "memory") | 133 | : "memory") |
134 | #define wmb() mb() | 134 | #define wmb() mb() |
135 | #define rmb() mb() | 135 | #define rmb() mb() |
136 | #define CPUINFO_PROC "cpu model" | 136 | #define CPUINFO_PROC {"cpu model"} |
137 | #endif | 137 | #endif |
138 | 138 | ||
139 | #ifdef __arc__ | 139 | #ifdef __arc__ |
140 | #define mb() asm volatile("" ::: "memory") | 140 | #define mb() asm volatile("" ::: "memory") |
141 | #define wmb() asm volatile("" ::: "memory") | 141 | #define wmb() asm volatile("" ::: "memory") |
142 | #define rmb() asm volatile("" ::: "memory") | 142 | #define rmb() asm volatile("" ::: "memory") |
143 | #define CPUINFO_PROC "Processor" | 143 | #define CPUINFO_PROC {"Processor"} |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | #ifdef __metag__ | 146 | #ifdef __metag__ |
147 | #define mb() asm volatile("" ::: "memory") | 147 | #define mb() asm volatile("" ::: "memory") |
148 | #define wmb() asm volatile("" ::: "memory") | 148 | #define wmb() asm volatile("" ::: "memory") |
149 | #define rmb() asm volatile("" ::: "memory") | 149 | #define rmb() asm volatile("" ::: "memory") |
150 | #define CPUINFO_PROC "CPU" | 150 | #define CPUINFO_PROC {"CPU"} |
151 | #endif | 151 | #endif |
152 | 152 | ||
153 | #ifdef __xtensa__ | 153 | #ifdef __xtensa__ |
154 | #define mb() asm volatile("memw" ::: "memory") | 154 | #define mb() asm volatile("memw" ::: "memory") |
155 | #define wmb() asm volatile("memw" ::: "memory") | 155 | #define wmb() asm volatile("memw" ::: "memory") |
156 | #define rmb() asm volatile("" ::: "memory") | 156 | #define rmb() asm volatile("" ::: "memory") |
157 | #define CPUINFO_PROC "core ID" | 157 | #define CPUINFO_PROC {"core ID"} |
158 | #endif | 158 | #endif |
159 | 159 | ||
160 | #ifdef __tile__ | 160 | #ifdef __tile__ |
@@ -162,7 +162,7 @@ | |||
162 | #define wmb() asm volatile ("mf" ::: "memory") | 162 | #define wmb() asm volatile ("mf" ::: "memory") |
163 | #define rmb() asm volatile ("mf" ::: "memory") | 163 | #define rmb() asm volatile ("mf" ::: "memory") |
164 | #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory") | 164 | #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory") |
165 | #define CPUINFO_PROC "model name" | 165 | #define CPUINFO_PROC {"model name"} |
166 | #endif | 166 | #endif |
167 | 167 | ||
168 | #define barrier() asm volatile ("" ::: "memory") | 168 | #define barrier() asm volatile ("" ::: "memory") |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ce0de00399da..26f5b2fe5dc8 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -579,16 +579,12 @@ static int write_version(int fd, struct perf_header *h __maybe_unused, | |||
579 | return do_write_string(fd, perf_version_string); | 579 | return do_write_string(fd, perf_version_string); |
580 | } | 580 | } |
581 | 581 | ||
582 | static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, | 582 | static int __write_cpudesc(int fd, const char *cpuinfo_proc) |
583 | struct perf_evlist *evlist __maybe_unused) | ||
584 | { | 583 | { |
585 | #ifndef CPUINFO_PROC | ||
586 | #define CPUINFO_PROC NULL | ||
587 | #endif | ||
588 | FILE *file; | 584 | FILE *file; |
589 | char *buf = NULL; | 585 | char *buf = NULL; |
590 | char *s, *p; | 586 | char *s, *p; |
591 | const char *search = CPUINFO_PROC; | 587 | const char *search = cpuinfo_proc; |
592 | size_t len = 0; | 588 | size_t len = 0; |
593 | int ret = -1; | 589 | int ret = -1; |
594 | 590 | ||
@@ -638,6 +634,25 @@ done: | |||
638 | return ret; | 634 | return ret; |
639 | } | 635 | } |
640 | 636 | ||
637 | static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, | ||
638 | struct perf_evlist *evlist __maybe_unused) | ||
639 | { | ||
640 | #ifndef CPUINFO_PROC | ||
641 | #define CPUINFO_PROC {"model name", } | ||
642 | #endif | ||
643 | const char *cpuinfo_procs[] = CPUINFO_PROC; | ||
644 | unsigned int i; | ||
645 | |||
646 | for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { | ||
647 | int ret; | ||
648 | ret = __write_cpudesc(fd, cpuinfo_procs[i]); | ||
649 | if (ret >= 0) | ||
650 | return ret; | ||
651 | } | ||
652 | return -1; | ||
653 | } | ||
654 | |||
655 | |||
641 | static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, | 656 | static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, |
642 | struct perf_evlist *evlist __maybe_unused) | 657 | struct perf_evlist *evlist __maybe_unused) |
643 | { | 658 | { |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 4906cd81cb56..9402885a77f3 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -373,6 +373,9 @@ struct sort_entry sort_cpu = { | |||
373 | static int64_t | 373 | static int64_t |
374 | sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) | 374 | sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) |
375 | { | 375 | { |
376 | if (!left->branch_info || !right->branch_info) | ||
377 | return cmp_null(left->branch_info, right->branch_info); | ||
378 | |||
376 | return _sort__dso_cmp(left->branch_info->from.map, | 379 | return _sort__dso_cmp(left->branch_info->from.map, |
377 | right->branch_info->from.map); | 380 | right->branch_info->from.map); |
378 | } | 381 | } |
@@ -380,13 +383,19 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) | |||
380 | static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, | 383 | static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, |
381 | size_t size, unsigned int width) | 384 | size_t size, unsigned int width) |
382 | { | 385 | { |
383 | return _hist_entry__dso_snprintf(he->branch_info->from.map, | 386 | if (he->branch_info) |
384 | bf, size, width); | 387 | return _hist_entry__dso_snprintf(he->branch_info->from.map, |
388 | bf, size, width); | ||
389 | else | ||
390 | return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); | ||
385 | } | 391 | } |
386 | 392 | ||
387 | static int64_t | 393 | static int64_t |
388 | sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) | 394 | sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) |
389 | { | 395 | { |
396 | if (!left->branch_info || !right->branch_info) | ||
397 | return cmp_null(left->branch_info, right->branch_info); | ||
398 | |||
390 | return _sort__dso_cmp(left->branch_info->to.map, | 399 | return _sort__dso_cmp(left->branch_info->to.map, |
391 | right->branch_info->to.map); | 400 | right->branch_info->to.map); |
392 | } | 401 | } |
@@ -394,8 +403,11 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) | |||
394 | static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, | 403 | static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, |
395 | size_t size, unsigned int width) | 404 | size_t size, unsigned int width) |
396 | { | 405 | { |
397 | return _hist_entry__dso_snprintf(he->branch_info->to.map, | 406 | if (he->branch_info) |
398 | bf, size, width); | 407 | return _hist_entry__dso_snprintf(he->branch_info->to.map, |
408 | bf, size, width); | ||
409 | else | ||
410 | return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); | ||
399 | } | 411 | } |
400 | 412 | ||
401 | static int64_t | 413 | static int64_t |
@@ -404,6 +416,12 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) | |||
404 | struct addr_map_symbol *from_l = &left->branch_info->from; | 416 | struct addr_map_symbol *from_l = &left->branch_info->from; |
405 | struct addr_map_symbol *from_r = &right->branch_info->from; | 417 | struct addr_map_symbol *from_r = &right->branch_info->from; |
406 | 418 | ||
419 | if (!left->branch_info || !right->branch_info) | ||
420 | return cmp_null(left->branch_info, right->branch_info); | ||
421 | |||
422 | from_l = &left->branch_info->from; | ||
423 | from_r = &right->branch_info->from; | ||
424 | |||
407 | if (!from_l->sym && !from_r->sym) | 425 | if (!from_l->sym && !from_r->sym) |
408 | return _sort__addr_cmp(from_l->addr, from_r->addr); | 426 | return _sort__addr_cmp(from_l->addr, from_r->addr); |
409 | 427 | ||
@@ -413,8 +431,13 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) | |||
413 | static int64_t | 431 | static int64_t |
414 | sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) | 432 | sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) |
415 | { | 433 | { |
416 | struct addr_map_symbol *to_l = &left->branch_info->to; | 434 | struct addr_map_symbol *to_l, *to_r; |
417 | struct addr_map_symbol *to_r = &right->branch_info->to; | 435 | |
436 | if (!left->branch_info || !right->branch_info) | ||
437 | return cmp_null(left->branch_info, right->branch_info); | ||
438 | |||
439 | to_l = &left->branch_info->to; | ||
440 | to_r = &right->branch_info->to; | ||
418 | 441 | ||
419 | if (!to_l->sym && !to_r->sym) | 442 | if (!to_l->sym && !to_r->sym) |
420 | return _sort__addr_cmp(to_l->addr, to_r->addr); | 443 | return _sort__addr_cmp(to_l->addr, to_r->addr); |
@@ -425,19 +448,27 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) | |||
425 | static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, | 448 | static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, |
426 | size_t size, unsigned int width) | 449 | size_t size, unsigned int width) |
427 | { | 450 | { |
428 | struct addr_map_symbol *from = &he->branch_info->from; | 451 | if (he->branch_info) { |
429 | return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, | 452 | struct addr_map_symbol *from = &he->branch_info->from; |
430 | he->level, bf, size, width); | ||
431 | 453 | ||
454 | return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, | ||
455 | he->level, bf, size, width); | ||
456 | } | ||
457 | |||
458 | return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); | ||
432 | } | 459 | } |
433 | 460 | ||
434 | static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, | 461 | static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, |
435 | size_t size, unsigned int width) | 462 | size_t size, unsigned int width) |
436 | { | 463 | { |
437 | struct addr_map_symbol *to = &he->branch_info->to; | 464 | if (he->branch_info) { |
438 | return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, | 465 | struct addr_map_symbol *to = &he->branch_info->to; |
439 | he->level, bf, size, width); | ||
440 | 466 | ||
467 | return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, | ||
468 | he->level, bf, size, width); | ||
469 | } | ||
470 | |||
471 | return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); | ||
441 | } | 472 | } |
442 | 473 | ||
443 | struct sort_entry sort_dso_from = { | 474 | struct sort_entry sort_dso_from = { |
@@ -471,11 +502,13 @@ struct sort_entry sort_sym_to = { | |||
471 | static int64_t | 502 | static int64_t |
472 | sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) | 503 | sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) |
473 | { | 504 | { |
474 | const unsigned char mp = left->branch_info->flags.mispred != | 505 | unsigned char mp, p; |
475 | right->branch_info->flags.mispred; | 506 | |
476 | const unsigned char p = left->branch_info->flags.predicted != | 507 | if (!left->branch_info || !right->branch_info) |
477 | right->branch_info->flags.predicted; | 508 | return cmp_null(left->branch_info, right->branch_info); |
478 | 509 | ||
510 | mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; | ||
511 | p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; | ||
479 | return mp || p; | 512 | return mp || p; |
480 | } | 513 | } |
481 | 514 | ||
@@ -483,10 +516,12 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, | |||
483 | size_t size, unsigned int width){ | 516 | size_t size, unsigned int width){ |
484 | static const char *out = "N/A"; | 517 | static const char *out = "N/A"; |
485 | 518 | ||
486 | if (he->branch_info->flags.predicted) | 519 | if (he->branch_info) { |
487 | out = "N"; | 520 | if (he->branch_info->flags.predicted) |
488 | else if (he->branch_info->flags.mispred) | 521 | out = "N"; |
489 | out = "Y"; | 522 | else if (he->branch_info->flags.mispred) |
523 | out = "Y"; | ||
524 | } | ||
490 | 525 | ||
491 | return repsep_snprintf(bf, size, "%-*.*s", width, width, out); | 526 | return repsep_snprintf(bf, size, "%-*.*s", width, width, out); |
492 | } | 527 | } |
@@ -989,6 +1024,9 @@ struct sort_entry sort_mem_dcacheline = { | |||
989 | static int64_t | 1024 | static int64_t |
990 | sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) | 1025 | sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) |
991 | { | 1026 | { |
1027 | if (!left->branch_info || !right->branch_info) | ||
1028 | return cmp_null(left->branch_info, right->branch_info); | ||
1029 | |||
992 | return left->branch_info->flags.abort != | 1030 | return left->branch_info->flags.abort != |
993 | right->branch_info->flags.abort; | 1031 | right->branch_info->flags.abort; |
994 | } | 1032 | } |
@@ -996,10 +1034,15 @@ sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) | |||
996 | static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, | 1034 | static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, |
997 | size_t size, unsigned int width) | 1035 | size_t size, unsigned int width) |
998 | { | 1036 | { |
999 | static const char *out = "."; | 1037 | static const char *out = "N/A"; |
1038 | |||
1039 | if (he->branch_info) { | ||
1040 | if (he->branch_info->flags.abort) | ||
1041 | out = "A"; | ||
1042 | else | ||
1043 | out = "."; | ||
1044 | } | ||
1000 | 1045 | ||
1001 | if (he->branch_info->flags.abort) | ||
1002 | out = "A"; | ||
1003 | return repsep_snprintf(bf, size, "%-*s", width, out); | 1046 | return repsep_snprintf(bf, size, "%-*s", width, out); |
1004 | } | 1047 | } |
1005 | 1048 | ||
@@ -1013,6 +1056,9 @@ struct sort_entry sort_abort = { | |||
1013 | static int64_t | 1056 | static int64_t |
1014 | sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) | 1057 | sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) |
1015 | { | 1058 | { |
1059 | if (!left->branch_info || !right->branch_info) | ||
1060 | return cmp_null(left->branch_info, right->branch_info); | ||
1061 | |||
1016 | return left->branch_info->flags.in_tx != | 1062 | return left->branch_info->flags.in_tx != |
1017 | right->branch_info->flags.in_tx; | 1063 | right->branch_info->flags.in_tx; |
1018 | } | 1064 | } |
@@ -1020,10 +1066,14 @@ sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) | |||
1020 | static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, | 1066 | static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, |
1021 | size_t size, unsigned int width) | 1067 | size_t size, unsigned int width) |
1022 | { | 1068 | { |
1023 | static const char *out = "."; | 1069 | static const char *out = "N/A"; |
1024 | 1070 | ||
1025 | if (he->branch_info->flags.in_tx) | 1071 | if (he->branch_info) { |
1026 | out = "T"; | 1072 | if (he->branch_info->flags.in_tx) |
1073 | out = "T"; | ||
1074 | else | ||
1075 | out = "."; | ||
1076 | } | ||
1027 | 1077 | ||
1028 | return repsep_snprintf(bf, size, "%-*s", width, out); | 1078 | return repsep_snprintf(bf, size, "%-*s", width, out); |
1029 | } | 1079 | } |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 2b7b2d91c016..c41411726c7a 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -117,6 +117,9 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, | |||
117 | if (!new) | 117 | if (!new) |
118 | return -ENOMEM; | 118 | return -ENOMEM; |
119 | list_add(&new->list, &thread->comm_list); | 119 | list_add(&new->list, &thread->comm_list); |
120 | |||
121 | if (exec) | ||
122 | unwind__flush_access(thread); | ||
120 | } | 123 | } |
121 | 124 | ||
122 | thread->comm_set = true; | 125 | thread->comm_set = true; |
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index e060386165c5..4d45c0dfe343 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c | |||
@@ -539,11 +539,23 @@ int unwind__prepare_access(struct thread *thread) | |||
539 | return -ENOMEM; | 539 | return -ENOMEM; |
540 | } | 540 | } |
541 | 541 | ||
542 | unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL); | ||
542 | thread__set_priv(thread, addr_space); | 543 | thread__set_priv(thread, addr_space); |
543 | 544 | ||
544 | return 0; | 545 | return 0; |
545 | } | 546 | } |
546 | 547 | ||
548 | void unwind__flush_access(struct thread *thread) | ||
549 | { | ||
550 | unw_addr_space_t addr_space; | ||
551 | |||
552 | if (callchain_param.record_mode != CALLCHAIN_DWARF) | ||
553 | return; | ||
554 | |||
555 | addr_space = thread__priv(thread); | ||
556 | unw_flush_cache(addr_space, 0, 0); | ||
557 | } | ||
558 | |||
547 | void unwind__finish_access(struct thread *thread) | 559 | void unwind__finish_access(struct thread *thread) |
548 | { | 560 | { |
549 | unw_addr_space_t addr_space; | 561 | unw_addr_space_t addr_space; |
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h index c17c4855bdbc..f50b737235eb 100644 --- a/tools/perf/util/unwind.h +++ b/tools/perf/util/unwind.h | |||
@@ -23,6 +23,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, | |||
23 | #ifdef HAVE_LIBUNWIND_SUPPORT | 23 | #ifdef HAVE_LIBUNWIND_SUPPORT |
24 | int libunwind__arch_reg_id(int regnum); | 24 | int libunwind__arch_reg_id(int regnum); |
25 | int unwind__prepare_access(struct thread *thread); | 25 | int unwind__prepare_access(struct thread *thread); |
26 | void unwind__flush_access(struct thread *thread); | ||
26 | void unwind__finish_access(struct thread *thread); | 27 | void unwind__finish_access(struct thread *thread); |
27 | #else | 28 | #else |
28 | static inline int unwind__prepare_access(struct thread *thread __maybe_unused) | 29 | static inline int unwind__prepare_access(struct thread *thread __maybe_unused) |
@@ -30,6 +31,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused) | |||
30 | return 0; | 31 | return 0; |
31 | } | 32 | } |
32 | 33 | ||
34 | static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} | ||
33 | static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} | 35 | static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} |
34 | #endif | 36 | #endif |
35 | #else | 37 | #else |
@@ -49,6 +51,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused) | |||
49 | return 0; | 51 | return 0; |
50 | } | 52 | } |
51 | 53 | ||
54 | static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} | ||
52 | static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} | 55 | static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} |
53 | #endif /* HAVE_DWARF_UNWIND_SUPPORT */ | 56 | #endif /* HAVE_DWARF_UNWIND_SUPPORT */ |
54 | #endif /* __UNWIND_H */ | 57 | #endif /* __UNWIND_H */ |