diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-12-08 05:50:24 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-12-08 05:50:24 -0500 |
commit | 2a2662bf88e693d477ef08351d03934f7bc0b51c (patch) | |
tree | cef243df159cc12ada7e97998a253df7c0abb2a2 /arch/x86/kernel | |
parent | b2776bf7149bddd1f4161f14f79520f17fc1d71d (diff) | |
parent | 36748b9518a2437beffe861b47dff6d12b736b3f (diff) |
Merge branch 'perf/core-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into perf/hw_breakpoints
Pull AMD range breakpoints support from Frederic Weisbecker:
" - Extend breakpoint tools and core to support address range through perf
event with initial backend support for AMD extended breakpoints.
Syntax is:
perf record -e mem:addr/len:type
For example set write breakpoint from 0x1000 to 0x1200 (0x1000 + 512)
perf record -e mem:0x1000/512:w
- Clean up a bit breakpoint code validation
It has been acked by Jiri and Oleg. "
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd_ibs.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 81 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/hw_breakpoint.c | 45 |
6 files changed, 115 insertions, 57 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 813d29d00a17..abe4ec760db3 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -870,3 +870,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) | |||
870 | 870 | ||
871 | return false; | 871 | return false; |
872 | } | 872 | } |
873 | |||
874 | void set_dr_addr_mask(unsigned long mask, int dr) | ||
875 | { | ||
876 | if (!cpu_has_bpext) | ||
877 | return; | ||
878 | |||
879 | switch (dr) { | ||
880 | case 0: | ||
881 | wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0); | ||
882 | break; | ||
883 | case 1: | ||
884 | case 2: | ||
885 | case 3: | ||
886 | wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0); | ||
887 | break; | ||
888 | default: | ||
889 | break; | ||
890 | } | ||
891 | } | ||
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fc5eb390b368..4e6cdb0ddc70 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -253,6 +253,10 @@ struct cpu_hw_events { | |||
253 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | 253 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
254 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 254 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
255 | 255 | ||
256 | /* Like UEVENT_CONSTRAINT, but match flags too */ | ||
257 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ | ||
258 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | ||
259 | |||
256 | #define INTEL_PLD_CONSTRAINT(c, n) \ | 260 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
257 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | 261 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
258 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) | 262 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index cbb1be3ed9e4..a61f5c6911da 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -565,6 +565,21 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) | |||
565 | perf_ibs->offset_max, | 565 | perf_ibs->offset_max, |
566 | offset + 1); | 566 | offset + 1); |
567 | } while (offset < offset_max); | 567 | } while (offset < offset_max); |
568 | if (event->attr.sample_type & PERF_SAMPLE_RAW) { | ||
569 | /* | ||
570 | * Read IbsBrTarget and IbsOpData4 separately | ||
571 | * depending on their availability. | ||
572 | * Can't add to offset_max as they are staggered | ||
573 | */ | ||
574 | if (ibs_caps & IBS_CAPS_BRNTRGT) { | ||
575 | rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); | ||
576 | size++; | ||
577 | } | ||
578 | if (ibs_caps & IBS_CAPS_OPDATA4) { | ||
579 | rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); | ||
580 | size++; | ||
581 | } | ||
582 | } | ||
568 | ibs_data.size = sizeof(u64) * size; | 583 | ibs_data.size = sizeof(u64) * size; |
569 | 584 | ||
570 | regs = *iregs; | 585 | regs = *iregs; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 46211bcc813e..495ae9793628 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -552,18 +552,18 @@ int intel_pmu_drain_bts_buffer(void) | |||
552 | * PEBS | 552 | * PEBS |
553 | */ | 553 | */ |
554 | struct event_constraint intel_core2_pebs_event_constraints[] = { | 554 | struct event_constraint intel_core2_pebs_event_constraints[] = { |
555 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | 555 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
556 | INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ | 556 | INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ |
557 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ | 557 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ |
558 | INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ | 558 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ |
559 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | 559 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
560 | EVENT_CONSTRAINT_END | 560 | EVENT_CONSTRAINT_END |
561 | }; | 561 | }; |
562 | 562 | ||
563 | struct event_constraint intel_atom_pebs_event_constraints[] = { | 563 | struct event_constraint intel_atom_pebs_event_constraints[] = { |
564 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | 564 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
565 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ | 565 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ |
566 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | 566 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
567 | EVENT_CONSTRAINT_END | 567 | EVENT_CONSTRAINT_END |
568 | }; | 568 | }; |
569 | 569 | ||
@@ -577,36 +577,36 @@ struct event_constraint intel_slm_pebs_event_constraints[] = { | |||
577 | 577 | ||
578 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { | 578 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
579 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ | 579 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
580 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ | 580 | INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
581 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | 581 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
582 | INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ | 582 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ |
583 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ | 583 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
584 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | 584 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
585 | INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ | 585 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ |
586 | INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ | 586 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
587 | INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ | 587 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
588 | INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | 588 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ |
589 | INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | 589 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ |
590 | EVENT_CONSTRAINT_END | 590 | EVENT_CONSTRAINT_END |
591 | }; | 591 | }; |
592 | 592 | ||
593 | struct event_constraint intel_westmere_pebs_event_constraints[] = { | 593 | struct event_constraint intel_westmere_pebs_event_constraints[] = { |
594 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ | 594 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
595 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ | 595 | INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
596 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | 596 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
597 | INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ | 597 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ |
598 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ | 598 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
599 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | 599 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
600 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | 600 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ |
601 | INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ | 601 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
602 | INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ | 602 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
603 | INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | 603 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ |
604 | INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | 604 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ |
605 | EVENT_CONSTRAINT_END | 605 | EVENT_CONSTRAINT_END |
606 | }; | 606 | }; |
607 | 607 | ||
608 | struct event_constraint intel_snb_pebs_event_constraints[] = { | 608 | struct event_constraint intel_snb_pebs_event_constraints[] = { |
609 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 609 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
610 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ | 610 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
611 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ | 611 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
612 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ | 612 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
@@ -617,7 +617,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { | |||
617 | }; | 617 | }; |
618 | 618 | ||
619 | struct event_constraint intel_ivb_pebs_event_constraints[] = { | 619 | struct event_constraint intel_ivb_pebs_event_constraints[] = { |
620 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 620 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
621 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ | 621 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
622 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ | 622 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
623 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ | 623 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
@@ -628,7 +628,7 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { | |||
628 | }; | 628 | }; |
629 | 629 | ||
630 | struct event_constraint intel_hsw_pebs_event_constraints[] = { | 630 | struct event_constraint intel_hsw_pebs_event_constraints[] = { |
631 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 631 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
632 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ | 632 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
633 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ | 633 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
634 | INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), | 634 | INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), |
@@ -886,6 +886,29 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
886 | regs.bp = pebs->bp; | 886 | regs.bp = pebs->bp; |
887 | regs.sp = pebs->sp; | 887 | regs.sp = pebs->sp; |
888 | 888 | ||
889 | if (sample_type & PERF_SAMPLE_REGS_INTR) { | ||
890 | regs.ax = pebs->ax; | ||
891 | regs.bx = pebs->bx; | ||
892 | regs.cx = pebs->cx; | ||
893 | regs.dx = pebs->dx; | ||
894 | regs.si = pebs->si; | ||
895 | regs.di = pebs->di; | ||
896 | regs.bp = pebs->bp; | ||
897 | regs.sp = pebs->sp; | ||
898 | |||
899 | regs.flags = pebs->flags; | ||
900 | #ifndef CONFIG_X86_32 | ||
901 | regs.r8 = pebs->r8; | ||
902 | regs.r9 = pebs->r9; | ||
903 | regs.r10 = pebs->r10; | ||
904 | regs.r11 = pebs->r11; | ||
905 | regs.r12 = pebs->r12; | ||
906 | regs.r13 = pebs->r13; | ||
907 | regs.r14 = pebs->r14; | ||
908 | regs.r15 = pebs->r15; | ||
909 | #endif | ||
910 | } | ||
911 | |||
889 | if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { | 912 | if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { |
890 | regs.ip = pebs->real_ip; | 913 | regs.ip = pebs->real_ip; |
891 | regs.flags |= PERF_EFLAGS_EXACT; | 914 | regs.flags |= PERF_EFLAGS_EXACT; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index f9ed429d6e4f..745b158e9a65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | |||
@@ -449,7 +449,11 @@ static struct attribute *snbep_uncore_qpi_formats_attr[] = { | |||
449 | static struct uncore_event_desc snbep_uncore_imc_events[] = { | 449 | static struct uncore_event_desc snbep_uncore_imc_events[] = { |
450 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | 450 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), |
451 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), | 451 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), |
452 | INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), | ||
453 | INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), | ||
452 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), | 454 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), |
455 | INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), | ||
456 | INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), | ||
453 | { /* end: all zeroes */ }, | 457 | { /* end: all zeroes */ }, |
454 | }; | 458 | }; |
455 | 459 | ||
@@ -2036,7 +2040,11 @@ static struct intel_uncore_type hswep_uncore_ha = { | |||
2036 | static struct uncore_event_desc hswep_uncore_imc_events[] = { | 2040 | static struct uncore_event_desc hswep_uncore_imc_events[] = { |
2037 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), | 2041 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), |
2038 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), | 2042 | INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), |
2043 | INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), | ||
2044 | INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), | ||
2039 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), | 2045 | INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), |
2046 | INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), | ||
2047 | INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), | ||
2040 | { /* end: all zeroes */ }, | 2048 | { /* end: all zeroes */ }, |
2041 | }; | 2049 | }; |
2042 | 2050 | ||
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 3d5fb509bdeb..7114ba220fd4 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
126 | *dr7 |= encode_dr7(i, info->len, info->type); | 126 | *dr7 |= encode_dr7(i, info->len, info->type); |
127 | 127 | ||
128 | set_debugreg(*dr7, 7); | 128 | set_debugreg(*dr7, 7); |
129 | if (info->mask) | ||
130 | set_dr_addr_mask(info->mask, i); | ||
129 | 131 | ||
130 | return 0; | 132 | return 0; |
131 | } | 133 | } |
@@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); | 163 | *dr7 &= ~__encode_dr7(i, info->len, info->type); |
162 | 164 | ||
163 | set_debugreg(*dr7, 7); | 165 | set_debugreg(*dr7, 7); |
164 | } | 166 | if (info->mask) |
165 | 167 | set_dr_addr_mask(0, i); | |
166 | static int get_hbp_len(u8 hbp_len) | ||
167 | { | ||
168 | unsigned int len_in_bytes = 0; | ||
169 | |||
170 | switch (hbp_len) { | ||
171 | case X86_BREAKPOINT_LEN_1: | ||
172 | len_in_bytes = 1; | ||
173 | break; | ||
174 | case X86_BREAKPOINT_LEN_2: | ||
175 | len_in_bytes = 2; | ||
176 | break; | ||
177 | case X86_BREAKPOINT_LEN_4: | ||
178 | len_in_bytes = 4; | ||
179 | break; | ||
180 | #ifdef CONFIG_X86_64 | ||
181 | case X86_BREAKPOINT_LEN_8: | ||
182 | len_in_bytes = 8; | ||
183 | break; | ||
184 | #endif | ||
185 | } | ||
186 | return len_in_bytes; | ||
187 | } | 168 | } |
188 | 169 | ||
189 | /* | 170 | /* |
@@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) | |||
196 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 177 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
197 | 178 | ||
198 | va = info->address; | 179 | va = info->address; |
199 | len = get_hbp_len(info->len); | 180 | len = bp->attr.bp_len; |
200 | 181 | ||
201 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | 182 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); |
202 | } | 183 | } |
@@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp) | |||
277 | } | 258 | } |
278 | 259 | ||
279 | /* Len */ | 260 | /* Len */ |
261 | info->mask = 0; | ||
262 | |||
280 | switch (bp->attr.bp_len) { | 263 | switch (bp->attr.bp_len) { |
281 | case HW_BREAKPOINT_LEN_1: | 264 | case HW_BREAKPOINT_LEN_1: |
282 | info->len = X86_BREAKPOINT_LEN_1; | 265 | info->len = X86_BREAKPOINT_LEN_1; |
@@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp) | |||
293 | break; | 276 | break; |
294 | #endif | 277 | #endif |
295 | default: | 278 | default: |
296 | return -EINVAL; | 279 | if (!is_power_of_2(bp->attr.bp_len)) |
280 | return -EINVAL; | ||
281 | if (!cpu_has_bpext) | ||
282 | return -EOPNOTSUPP; | ||
283 | info->mask = bp->attr.bp_len - 1; | ||
284 | info->len = X86_BREAKPOINT_LEN_1; | ||
297 | } | 285 | } |
298 | 286 | ||
299 | return 0; | 287 | return 0; |
300 | } | 288 | } |
289 | |||
301 | /* | 290 | /* |
302 | * Validate the arch-specific HW Breakpoint register settings | 291 | * Validate the arch-specific HW Breakpoint register settings |
303 | */ | 292 | */ |
@@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
312 | if (ret) | 301 | if (ret) |
313 | return ret; | 302 | return ret; |
314 | 303 | ||
315 | ret = -EINVAL; | ||
316 | |||
317 | switch (info->len) { | 304 | switch (info->len) { |
318 | case X86_BREAKPOINT_LEN_1: | 305 | case X86_BREAKPOINT_LEN_1: |
319 | align = 0; | 306 | align = 0; |
307 | if (info->mask) | ||
308 | align = info->mask; | ||
320 | break; | 309 | break; |
321 | case X86_BREAKPOINT_LEN_2: | 310 | case X86_BREAKPOINT_LEN_2: |
322 | align = 1; | 311 | align = 1; |
@@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
330 | break; | 319 | break; |
331 | #endif | 320 | #endif |
332 | default: | 321 | default: |
333 | return ret; | 322 | WARN_ON_ONCE(1); |
334 | } | 323 | } |
335 | 324 | ||
336 | /* | 325 | /* |