diff options
| -rw-r--r-- | arch/x86/events/amd/core.c | 8 | ||||
| -rw-r--r-- | arch/x86/events/core.c | 10 | ||||
| -rw-r--r-- | arch/x86/events/intel/ds.c | 35 | ||||
| -rw-r--r-- | arch/x86/events/intel/uncore.c | 8 | ||||
| -rw-r--r-- | arch/x86/events/intel/uncore_snb.c | 12 | ||||
| -rw-r--r-- | arch/x86/events/perf_event.h | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 13 |
7 files changed, 50 insertions, 38 deletions
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index f5f4b3fbbbc2..afb222b63cae 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
| @@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void) | |||
| 662 | pr_cont("Fam15h "); | 662 | pr_cont("Fam15h "); |
| 663 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; | 663 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
| 664 | break; | 664 | break; |
| 665 | 665 | case 0x17: | |
| 666 | pr_cont("Fam17h "); | ||
| 667 | /* | ||
| 668 | * In family 17h, there are no event constraints in the PMC hardware. | ||
| 669 | * We fallback to using default amd_get_event_constraints. | ||
| 670 | */ | ||
| 671 | break; | ||
| 666 | default: | 672 | default: |
| 667 | pr_err("core perfctr but no constraints; unknown hardware!\n"); | 673 | pr_err("core perfctr but no constraints; unknown hardware!\n"); |
| 668 | return -ENODEV; | 674 | return -ENODEV; |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d31735f37ed7..9d4bf3ab049e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
| @@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent | |||
| 2352 | frame.next_frame = 0; | 2352 | frame.next_frame = 0; |
| 2353 | frame.return_address = 0; | 2353 | frame.return_address = 0; |
| 2354 | 2354 | ||
| 2355 | if (!access_ok(VERIFY_READ, fp, 8)) | 2355 | if (!valid_user_frame(fp, sizeof(frame))) |
| 2356 | break; | 2356 | break; |
| 2357 | 2357 | ||
| 2358 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); | 2358 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); |
| @@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent | |||
| 2362 | if (bytes != 0) | 2362 | if (bytes != 0) |
| 2363 | break; | 2363 | break; |
| 2364 | 2364 | ||
| 2365 | if (!valid_user_frame(fp, sizeof(frame))) | ||
| 2366 | break; | ||
| 2367 | |||
| 2368 | perf_callchain_store(entry, cs_base + frame.return_address); | 2365 | perf_callchain_store(entry, cs_base + frame.return_address); |
| 2369 | fp = compat_ptr(ss_base + frame.next_frame); | 2366 | fp = compat_ptr(ss_base + frame.next_frame); |
| 2370 | } | 2367 | } |
| @@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
| 2413 | frame.next_frame = NULL; | 2410 | frame.next_frame = NULL; |
| 2414 | frame.return_address = 0; | 2411 | frame.return_address = 0; |
| 2415 | 2412 | ||
| 2416 | if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) | 2413 | if (!valid_user_frame(fp, sizeof(frame))) |
| 2417 | break; | 2414 | break; |
| 2418 | 2415 | ||
| 2419 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); | 2416 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); |
| @@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
| 2423 | if (bytes != 0) | 2420 | if (bytes != 0) |
| 2424 | break; | 2421 | break; |
| 2425 | 2422 | ||
| 2426 | if (!valid_user_frame(fp, sizeof(frame))) | ||
| 2427 | break; | ||
| 2428 | |||
| 2429 | perf_callchain_store(entry, frame.return_address); | 2423 | perf_callchain_store(entry, frame.return_address); |
| 2430 | fp = (void __user *)frame.next_frame; | 2424 | fp = (void __user *)frame.next_frame; |
| 2431 | } | 2425 | } |
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 0319311dbdbb..be202390bbd3 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c | |||
| @@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event, | |||
| 1108 | } | 1108 | } |
| 1109 | 1109 | ||
| 1110 | /* | 1110 | /* |
| 1111 | * We use the interrupt regs as a base because the PEBS record | 1111 | * We use the interrupt regs as a base because the PEBS record does not |
| 1112 | * does not contain a full regs set, specifically it seems to | 1112 | * contain a full regs set, specifically it seems to lack segment |
| 1113 | * lack segment descriptors, which get used by things like | 1113 | * descriptors, which get used by things like user_mode(). |
| 1114 | * user_mode(). | ||
| 1115 | * | 1114 | * |
| 1116 | * In the simple case fix up only the IP and BP,SP regs, for | 1115 | * In the simple case fix up only the IP for PERF_SAMPLE_IP. |
| 1117 | * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. | 1116 | * |
| 1118 | * A possible PERF_SAMPLE_REGS will have to transfer all regs. | 1117 | * We must however always use BP,SP from iregs for the unwinder to stay |
| 1118 | * sane; the record BP,SP can point into thin air when the record is | ||
| 1119 | * from a previous PMI context or an (I)RET happend between the record | ||
| 1120 | * and PMI. | ||
| 1119 | */ | 1121 | */ |
| 1120 | *regs = *iregs; | 1122 | *regs = *iregs; |
| 1121 | regs->flags = pebs->flags; | 1123 | regs->flags = pebs->flags; |
| 1122 | set_linear_ip(regs, pebs->ip); | 1124 | set_linear_ip(regs, pebs->ip); |
| 1123 | regs->bp = pebs->bp; | ||
| 1124 | regs->sp = pebs->sp; | ||
| 1125 | 1125 | ||
| 1126 | if (sample_type & PERF_SAMPLE_REGS_INTR) { | 1126 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
| 1127 | regs->ax = pebs->ax; | 1127 | regs->ax = pebs->ax; |
| @@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event, | |||
| 1130 | regs->dx = pebs->dx; | 1130 | regs->dx = pebs->dx; |
| 1131 | regs->si = pebs->si; | 1131 | regs->si = pebs->si; |
| 1132 | regs->di = pebs->di; | 1132 | regs->di = pebs->di; |
| 1133 | regs->bp = pebs->bp; | ||
| 1134 | regs->sp = pebs->sp; | ||
| 1135 | 1133 | ||
| 1136 | regs->flags = pebs->flags; | 1134 | /* |
| 1135 | * Per the above; only set BP,SP if we don't need callchains. | ||
| 1136 | * | ||
| 1137 | * XXX: does this make sense? | ||
| 1138 | */ | ||
| 1139 | if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { | ||
| 1140 | regs->bp = pebs->bp; | ||
| 1141 | regs->sp = pebs->sp; | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | /* | ||
| 1145 | * Preserve PERF_EFLAGS_VM from set_linear_ip(). | ||
| 1146 | */ | ||
| 1147 | regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); | ||
| 1137 | #ifndef CONFIG_X86_32 | 1148 | #ifndef CONFIG_X86_32 |
| 1138 | regs->r8 = pebs->r8; | 1149 | regs->r8 = pebs->r8; |
| 1139 | regs->r9 = pebs->r9; | 1150 | regs->r9 = pebs->r9; |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index efca2685d876..dbaaf7dc8373 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
| @@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, | |||
| 319 | */ | 319 | */ |
| 320 | static int uncore_pmu_event_init(struct perf_event *event); | 320 | static int uncore_pmu_event_init(struct perf_event *event); |
| 321 | 321 | ||
| 322 | static bool is_uncore_event(struct perf_event *event) | 322 | static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) |
| 323 | { | 323 | { |
| 324 | return event->pmu->event_init == uncore_pmu_event_init; | 324 | return &box->pmu->pmu == event->pmu; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | static int | 327 | static int |
| @@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, | |||
| 340 | 340 | ||
| 341 | n = box->n_events; | 341 | n = box->n_events; |
| 342 | 342 | ||
| 343 | if (is_uncore_event(leader)) { | 343 | if (is_box_event(box, leader)) { |
| 344 | box->event_list[n] = leader; | 344 | box->event_list[n] = leader; |
| 345 | n++; | 345 | n++; |
| 346 | } | 346 | } |
| @@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, | |||
| 349 | return n; | 349 | return n; |
| 350 | 350 | ||
| 351 | list_for_each_entry(event, &leader->sibling_list, group_entry) { | 351 | list_for_each_entry(event, &leader->sibling_list, group_entry) { |
| 352 | if (!is_uncore_event(event) || | 352 | if (!is_box_event(box, event) || |
| 353 | event->state <= PERF_EVENT_STATE_OFF) | 353 | event->state <= PERF_EVENT_STATE_OFF) |
| 354 | continue; | 354 | continue; |
| 355 | 355 | ||
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 81195cca7eae..a3dcc12bef4a 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c | |||
| @@ -490,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags) | |||
| 490 | 490 | ||
| 491 | snb_uncore_imc_event_start(event, 0); | 491 | snb_uncore_imc_event_start(event, 0); |
| 492 | 492 | ||
| 493 | box->n_events++; | ||
| 494 | |||
| 495 | return 0; | 493 | return 0; |
| 496 | } | 494 | } |
| 497 | 495 | ||
| 498 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) | 496 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) |
| 499 | { | 497 | { |
| 500 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
| 501 | int i; | ||
| 502 | |||
| 503 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); | 498 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); |
| 504 | |||
| 505 | for (i = 0; i < box->n_events; i++) { | ||
| 506 | if (event == box->event_list[i]) { | ||
| 507 | --box->n_events; | ||
| 508 | break; | ||
| 509 | } | ||
| 510 | } | ||
| 511 | } | 499 | } |
| 512 | 500 | ||
| 513 | int snb_pci2phy_map_init(int devid) | 501 | int snb_pci2phy_map_init(int devid) |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 5874d8de1f8d..a77ee026643d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
| @@ -113,7 +113,7 @@ struct debug_store { | |||
| 113 | * Per register state. | 113 | * Per register state. |
| 114 | */ | 114 | */ |
| 115 | struct er_account { | 115 | struct er_account { |
| 116 | raw_spinlock_t lock; /* per-core: protect structure */ | 116 | raw_spinlock_t lock; /* per-core: protect structure */ |
| 117 | u64 config; /* extra MSR config */ | 117 | u64 config; /* extra MSR config */ |
| 118 | u64 reg; /* extra MSR number */ | 118 | u64 reg; /* extra MSR number */ |
| 119 | atomic_t ref; /* reference count */ | 119 | atomic_t ref; /* reference count */ |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e292132efac..6ee1febdf6ff 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event, | |||
| 902 | * this will always be called from the right CPU. | 902 | * this will always be called from the right CPU. |
| 903 | */ | 903 | */ |
| 904 | cpuctx = __get_cpu_context(ctx); | 904 | cpuctx = __get_cpu_context(ctx); |
| 905 | |||
| 906 | /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */ | ||
| 907 | if (perf_cgroup_from_task(current, ctx) != event->cgrp) { | ||
| 908 | /* | ||
| 909 | * We are removing the last cpu event in this context. | ||
| 910 | * If that event is not active in this cpu, cpuctx->cgrp | ||
| 911 | * should've been cleared by perf_cgroup_switch. | ||
| 912 | */ | ||
| 913 | WARN_ON_ONCE(!add && cpuctx->cgrp); | ||
| 914 | return; | ||
| 915 | } | ||
| 905 | cpuctx->cgrp = add ? event->cgrp : NULL; | 916 | cpuctx->cgrp = add ? event->cgrp : NULL; |
| 906 | } | 917 | } |
| 907 | 918 | ||
| @@ -8018,6 +8029,7 @@ restart: | |||
| 8018 | * if <size> is not specified, the range is treated as a single address. | 8029 | * if <size> is not specified, the range is treated as a single address. |
| 8019 | */ | 8030 | */ |
| 8020 | enum { | 8031 | enum { |
| 8032 | IF_ACT_NONE = -1, | ||
| 8021 | IF_ACT_FILTER, | 8033 | IF_ACT_FILTER, |
| 8022 | IF_ACT_START, | 8034 | IF_ACT_START, |
| 8023 | IF_ACT_STOP, | 8035 | IF_ACT_STOP, |
| @@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = { | |||
| 8041 | { IF_SRC_KERNEL, "%u/%u" }, | 8053 | { IF_SRC_KERNEL, "%u/%u" }, |
| 8042 | { IF_SRC_FILEADDR, "%u@%s" }, | 8054 | { IF_SRC_FILEADDR, "%u@%s" }, |
| 8043 | { IF_SRC_KERNELADDR, "%u" }, | 8055 | { IF_SRC_KERNELADDR, "%u" }, |
| 8056 | { IF_ACT_NONE, NULL }, | ||
| 8044 | }; | 8057 | }; |
| 8045 | 8058 | ||
| 8046 | /* | 8059 | /* |
