diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-17 11:38:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-17 11:38:13 -0500 |
commit | dd6f29da695dbfe5a8ff84ebfdd2110d68e8f511 (patch) | |
tree | 015ae77d5d6abcb1ec56d79c51541ddf5f07ff46 | |
parent | c5f1ac5e9afb199638414be77cbc22eb68e14d97 (diff) | |
parent | 528871b456026e6127d95b1b2bd8e3a003dc1614 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Two fixes on the kernel side: fix an over-eager condition that failed
larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
for a corner case, found by fuzzing"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Fix impossible ring-buffer sizes warning
perf/x86: Add check_period PMU callback
-rw-r--r-- | arch/x86/events/core.c | 14 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 9 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 16 | ||||
-rw-r--r-- | include/linux/perf_event.h | 5 | ||||
-rw-r--r-- | kernel/events/core.c | 16 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 2 |
6 files changed, 59 insertions, 3 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void) | |||
2278 | x86_pmu.check_microcode(); | 2278 | x86_pmu.check_microcode(); |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | static int x86_pmu_check_period(struct perf_event *event, u64 value) | ||
2282 | { | ||
2283 | if (x86_pmu.check_period && x86_pmu.check_period(event, value)) | ||
2284 | return -EINVAL; | ||
2285 | |||
2286 | if (value && x86_pmu.limit_period) { | ||
2287 | if (x86_pmu.limit_period(event, value) > value) | ||
2288 | return -EINVAL; | ||
2289 | } | ||
2290 | |||
2291 | return 0; | ||
2292 | } | ||
2293 | |||
2281 | static struct pmu pmu = { | 2294 | static struct pmu pmu = { |
2282 | .pmu_enable = x86_pmu_enable, | 2295 | .pmu_enable = x86_pmu_enable, |
2283 | .pmu_disable = x86_pmu_disable, | 2296 | .pmu_disable = x86_pmu_disable, |
@@ -2302,6 +2315,7 @@ static struct pmu pmu = { | |||
2302 | .event_idx = x86_pmu_event_idx, | 2315 | .event_idx = x86_pmu_event_idx, |
2303 | .sched_task = x86_pmu_sched_task, | 2316 | .sched_task = x86_pmu_sched_task, |
2304 | .task_ctx_size = sizeof(struct x86_perf_task_context), | 2317 | .task_ctx_size = sizeof(struct x86_perf_task_context), |
2318 | .check_period = x86_pmu_check_period, | ||
2305 | }; | 2319 | }; |
2306 | 2320 | ||
2307 | void arch_perf_update_userpage(struct perf_event *event, | 2321 | void arch_perf_update_userpage(struct perf_event *event, |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index daafb893449b..730978dff63f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, | |||
3587 | intel_pmu_lbr_sched_task(ctx, sched_in); | 3587 | intel_pmu_lbr_sched_task(ctx, sched_in); |
3588 | } | 3588 | } |
3589 | 3589 | ||
3590 | static int intel_pmu_check_period(struct perf_event *event, u64 value) | ||
3591 | { | ||
3592 | return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; | ||
3593 | } | ||
3594 | |||
3590 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 3595 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
3591 | 3596 | ||
3592 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 3597 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = { | |||
3667 | .cpu_starting = intel_pmu_cpu_starting, | 3672 | .cpu_starting = intel_pmu_cpu_starting, |
3668 | .cpu_dying = intel_pmu_cpu_dying, | 3673 | .cpu_dying = intel_pmu_cpu_dying, |
3669 | .cpu_dead = intel_pmu_cpu_dead, | 3674 | .cpu_dead = intel_pmu_cpu_dead, |
3675 | |||
3676 | .check_period = intel_pmu_check_period, | ||
3670 | }; | 3677 | }; |
3671 | 3678 | ||
3672 | static struct attribute *intel_pmu_attrs[]; | 3679 | static struct attribute *intel_pmu_attrs[]; |
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
3711 | 3718 | ||
3712 | .guest_get_msrs = intel_guest_get_msrs, | 3719 | .guest_get_msrs = intel_guest_get_msrs, |
3713 | .sched_task = intel_pmu_sched_task, | 3720 | .sched_task = intel_pmu_sched_task, |
3721 | |||
3722 | .check_period = intel_pmu_check_period, | ||
3714 | }; | 3723 | }; |
3715 | 3724 | ||
3716 | static __init void intel_clovertown_quirk(void) | 3725 | static __init void intel_clovertown_quirk(void) |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..d46fd6754d92 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -646,6 +646,11 @@ struct x86_pmu { | |||
646 | * Intel host/guest support (KVM) | 646 | * Intel host/guest support (KVM) |
647 | */ | 647 | */ |
648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
649 | |||
650 | /* | ||
651 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
652 | */ | ||
653 | int (*check_period) (struct perf_event *event, u64 period); | ||
649 | }; | 654 | }; |
650 | 655 | ||
651 | struct x86_perf_task_context { | 656 | struct x86_perf_task_context { |
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) | |||
857 | 862 | ||
858 | #ifdef CONFIG_CPU_SUP_INTEL | 863 | #ifdef CONFIG_CPU_SUP_INTEL |
859 | 864 | ||
860 | static inline bool intel_pmu_has_bts(struct perf_event *event) | 865 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
861 | { | 866 | { |
862 | struct hw_perf_event *hwc = &event->hw; | 867 | struct hw_perf_event *hwc = &event->hw; |
863 | unsigned int hw_event, bts_event; | 868 | unsigned int hw_event, bts_event; |
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) | |||
868 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | 873 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
869 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | 874 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
870 | 875 | ||
871 | return hw_event == bts_event && hwc->sample_period == 1; | 876 | return hw_event == bts_event && period == 1; |
877 | } | ||
878 | |||
879 | static inline bool intel_pmu_has_bts(struct perf_event *event) | ||
880 | { | ||
881 | struct hw_perf_event *hwc = &event->hw; | ||
882 | |||
883 | return intel_pmu_has_bts_period(event, hwc->sample_period); | ||
872 | } | 884 | } |
873 | 885 | ||
874 | int intel_pmu_save_and_restart(struct perf_event *event); | 886 | int intel_pmu_save_and_restart(struct perf_event *event); |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..e1a051724f7e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -447,6 +447,11 @@ struct pmu { | |||
447 | * Filter events for PMU-specific reasons. | 447 | * Filter events for PMU-specific reasons. |
448 | */ | 448 | */ |
449 | int (*filter_match) (struct perf_event *event); /* optional */ | 449 | int (*filter_match) (struct perf_event *event); /* optional */ |
450 | |||
451 | /* | ||
452 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
453 | */ | ||
454 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | ||
450 | }; | 455 | }; |
451 | 456 | ||
452 | enum perf_addr_filter_action_t { | 457 | enum perf_addr_filter_action_t { |
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, | |||
4963 | } | 4963 | } |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | static int perf_event_check_period(struct perf_event *event, u64 value) | ||
4967 | { | ||
4968 | return event->pmu->check_period(event, value); | ||
4969 | } | ||
4970 | |||
4966 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4971 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
4967 | { | 4972 | { |
4968 | u64 value; | 4973 | u64 value; |
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4979 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4984 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
4980 | return -EINVAL; | 4985 | return -EINVAL; |
4981 | 4986 | ||
4987 | if (perf_event_check_period(event, value)) | ||
4988 | return -EINVAL; | ||
4989 | |||
4982 | event_function_call(event, __perf_event_period, &value); | 4990 | event_function_call(event, __perf_event_period, &value); |
4983 | 4991 | ||
4984 | return 0; | 4992 | return 0; |
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
9391 | return 0; | 9399 | return 0; |
9392 | } | 9400 | } |
9393 | 9401 | ||
9402 | static int perf_event_nop_int(struct perf_event *event, u64 value) | ||
9403 | { | ||
9404 | return 0; | ||
9405 | } | ||
9406 | |||
9394 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 9407 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
9395 | 9408 | ||
9396 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 9409 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
@@ -9691,6 +9704,9 @@ got_cpu_context: | |||
9691 | pmu->pmu_disable = perf_pmu_nop_void; | 9704 | pmu->pmu_disable = perf_pmu_nop_void; |
9692 | } | 9705 | } |
9693 | 9706 | ||
9707 | if (!pmu->check_period) | ||
9708 | pmu->check_period = perf_event_nop_int; | ||
9709 | |||
9694 | if (!pmu->event_idx) | 9710 | if (!pmu->event_idx) |
9695 | pmu->event_idx = perf_event_idx_default; | 9711 | pmu->event_idx = perf_event_idx_default; |
9696 | 9712 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 309ef5a64af5..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
736 | 736 | ||
737 | if (order_base_2(size) >= MAX_ORDER) | 737 | if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) |
738 | goto fail; | 738 | goto fail; |
739 | 739 | ||
740 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |