diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-17 11:38:13 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-17 11:38:13 -0500 |
| commit | dd6f29da695dbfe5a8ff84ebfdd2110d68e8f511 (patch) | |
| tree | 015ae77d5d6abcb1ec56d79c51541ddf5f07ff46 /kernel | |
| parent | c5f1ac5e9afb199638414be77cbc22eb68e14d97 (diff) | |
| parent | 528871b456026e6127d95b1b2bd8e3a003dc1614 (diff) | |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Two fixes on the kernel side: fix an over-eager condition that failed
larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
for a corner case, found by fuzzing"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Fix impossible ring-buffer sizes warning
perf/x86: Add check_period PMU callback
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 16 | ||||
| -rw-r--r-- | kernel/events/ring_buffer.c | 2 |
2 files changed, 17 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, | |||
| 4963 | } | 4963 | } |
| 4964 | } | 4964 | } |
| 4965 | 4965 | ||
| 4966 | static int perf_event_check_period(struct perf_event *event, u64 value) | ||
| 4967 | { | ||
| 4968 | return event->pmu->check_period(event, value); | ||
| 4969 | } | ||
| 4970 | |||
| 4966 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4971 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
| 4967 | { | 4972 | { |
| 4968 | u64 value; | 4973 | u64 value; |
| @@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
| 4979 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4984 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
| 4980 | return -EINVAL; | 4985 | return -EINVAL; |
| 4981 | 4986 | ||
| 4987 | if (perf_event_check_period(event, value)) | ||
| 4988 | return -EINVAL; | ||
| 4989 | |||
| 4982 | event_function_call(event, __perf_event_period, &value); | 4990 | event_function_call(event, __perf_event_period, &value); |
| 4983 | 4991 | ||
| 4984 | return 0; | 4992 | return 0; |
| @@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
| 9391 | return 0; | 9399 | return 0; |
| 9392 | } | 9400 | } |
| 9393 | 9401 | ||
| 9402 | static int perf_event_nop_int(struct perf_event *event, u64 value) | ||
| 9403 | { | ||
| 9404 | return 0; | ||
| 9405 | } | ||
| 9406 | |||
| 9394 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 9407 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
| 9395 | 9408 | ||
| 9396 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 9409 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
| @@ -9691,6 +9704,9 @@ got_cpu_context: | |||
| 9691 | pmu->pmu_disable = perf_pmu_nop_void; | 9704 | pmu->pmu_disable = perf_pmu_nop_void; |
| 9692 | } | 9705 | } |
| 9693 | 9706 | ||
| 9707 | if (!pmu->check_period) | ||
| 9708 | pmu->check_period = perf_event_nop_int; | ||
| 9709 | |||
| 9694 | if (!pmu->event_idx) | 9710 | if (!pmu->event_idx) |
| 9695 | pmu->event_idx = perf_event_idx_default; | 9711 | pmu->event_idx = perf_event_idx_default; |
| 9696 | 9712 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 309ef5a64af5..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
| @@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
| 734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
| 735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
| 736 | 736 | ||
| 737 | if (order_base_2(size) >= MAX_ORDER) | 737 | if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) |
| 738 | goto fail; | 738 | goto fail; |
| 739 | 739 | ||
| 740 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |
