diff options
-rw-r--r-- | kernel/events/core.c | 95 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 5 |
2 files changed, 71 insertions, 29 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1903b8f3a705..5650f5317e0c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -242,18 +242,6 @@ unlock: | |||
242 | return ret; | 242 | return ret; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void event_function_local(struct perf_event *event, event_f func, void *data) | ||
246 | { | ||
247 | struct event_function_struct efs = { | ||
248 | .event = event, | ||
249 | .func = func, | ||
250 | .data = data, | ||
251 | }; | ||
252 | |||
253 | int ret = event_function(&efs); | ||
254 | WARN_ON_ONCE(ret); | ||
255 | } | ||
256 | |||
257 | static void event_function_call(struct perf_event *event, event_f func, void *data) | 245 | static void event_function_call(struct perf_event *event, event_f func, void *data) |
258 | { | 246 | { |
259 | struct perf_event_context *ctx = event->ctx; | 247 | struct perf_event_context *ctx = event->ctx; |
@@ -303,6 +291,54 @@ again: | |||
303 | raw_spin_unlock_irq(&ctx->lock); | 291 | raw_spin_unlock_irq(&ctx->lock); |
304 | } | 292 | } |
305 | 293 | ||
294 | /* | ||
295 | * Similar to event_function_call() + event_function(), but hard assumes IRQs | ||
296 | * are already disabled and we're on the right CPU. | ||
297 | */ | ||
298 | static void event_function_local(struct perf_event *event, event_f func, void *data) | ||
299 | { | ||
300 | struct perf_event_context *ctx = event->ctx; | ||
301 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
302 | struct task_struct *task = READ_ONCE(ctx->task); | ||
303 | struct perf_event_context *task_ctx = NULL; | ||
304 | |||
305 | WARN_ON_ONCE(!irqs_disabled()); | ||
306 | |||
307 | if (task) { | ||
308 | if (task == TASK_TOMBSTONE) | ||
309 | return; | ||
310 | |||
311 | task_ctx = ctx; | ||
312 | } | ||
313 | |||
314 | perf_ctx_lock(cpuctx, task_ctx); | ||
315 | |||
316 | task = ctx->task; | ||
317 | if (task == TASK_TOMBSTONE) | ||
318 | goto unlock; | ||
319 | |||
320 | if (task) { | ||
321 | /* | ||
322 | * We must be either inactive or active and the right task, | ||
323 | * otherwise we're screwed, since we cannot IPI to somewhere | ||
324 | * else. | ||
325 | */ | ||
326 | if (ctx->is_active) { | ||
327 | if (WARN_ON_ONCE(task != current)) | ||
328 | goto unlock; | ||
329 | |||
330 | if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) | ||
331 | goto unlock; | ||
332 | } | ||
333 | } else { | ||
334 | WARN_ON_ONCE(&cpuctx->ctx != ctx); | ||
335 | } | ||
336 | |||
337 | func(event, cpuctx, ctx, data); | ||
338 | unlock: | ||
339 | perf_ctx_unlock(cpuctx, task_ctx); | ||
340 | } | ||
341 | |||
306 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ | 342 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ |
307 | PERF_FLAG_FD_OUTPUT |\ | 343 | PERF_FLAG_FD_OUTPUT |\ |
308 | PERF_FLAG_PID_CGROUP |\ | 344 | PERF_FLAG_PID_CGROUP |\ |
@@ -3513,9 +3549,10 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3513 | .group = group, | 3549 | .group = group, |
3514 | .ret = 0, | 3550 | .ret = 0, |
3515 | }; | 3551 | }; |
3516 | smp_call_function_single(event->oncpu, | 3552 | ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1); |
3517 | __perf_event_read, &data, 1); | 3553 | /* The event must have been read from an online CPU: */ |
3518 | ret = data.ret; | 3554 | WARN_ON_ONCE(ret); |
3555 | ret = ret ? : data.ret; | ||
3519 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3556 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3520 | struct perf_event_context *ctx = event->ctx; | 3557 | struct perf_event_context *ctx = event->ctx; |
3521 | unsigned long flags; | 3558 | unsigned long flags; |
@@ -6584,15 +6621,6 @@ got_name: | |||
6584 | } | 6621 | } |
6585 | 6622 | ||
6586 | /* | 6623 | /* |
6587 | * Whether this @filter depends on a dynamic object which is not loaded | ||
6588 | * yet or its load addresses are not known. | ||
6589 | */ | ||
6590 | static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter) | ||
6591 | { | ||
6592 | return filter->filter && filter->inode; | ||
6593 | } | ||
6594 | |||
6595 | /* | ||
6596 | * Check whether inode and address range match filter criteria. | 6624 | * Check whether inode and address range match filter criteria. |
6597 | */ | 6625 | */ |
6598 | static bool perf_addr_filter_match(struct perf_addr_filter *filter, | 6626 | static bool perf_addr_filter_match(struct perf_addr_filter *filter, |
@@ -6653,6 +6681,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma) | |||
6653 | struct perf_event_context *ctx; | 6681 | struct perf_event_context *ctx; |
6654 | int ctxn; | 6682 | int ctxn; |
6655 | 6683 | ||
6684 | /* | ||
6685 | * Data tracing isn't supported yet and as such there is no need | ||
6686 | * to keep track of anything that isn't related to executable code: | ||
6687 | */ | ||
6688 | if (!(vma->vm_flags & VM_EXEC)) | ||
6689 | return; | ||
6690 | |||
6656 | rcu_read_lock(); | 6691 | rcu_read_lock(); |
6657 | for_each_task_context_nr(ctxn) { | 6692 | for_each_task_context_nr(ctxn) { |
6658 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 6693 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
@@ -7805,7 +7840,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event) | |||
7805 | list_for_each_entry(filter, &ifh->list, entry) { | 7840 | list_for_each_entry(filter, &ifh->list, entry) { |
7806 | event->addr_filters_offs[count] = 0; | 7841 | event->addr_filters_offs[count] = 0; |
7807 | 7842 | ||
7808 | if (perf_addr_filter_needs_mmap(filter)) | 7843 | /* |
7844 | * Adjust base offset if the filter is associated to a binary | ||
7845 | * that needs to be mapped: | ||
7846 | */ | ||
7847 | if (filter->inode) | ||
7809 | event->addr_filters_offs[count] = | 7848 | event->addr_filters_offs[count] = |
7810 | perf_addr_filter_apply(filter, mm); | 7849 | perf_addr_filter_apply(filter, mm); |
7811 | 7850 | ||
@@ -7936,8 +7975,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, | |||
7936 | goto fail; | 7975 | goto fail; |
7937 | } | 7976 | } |
7938 | 7977 | ||
7939 | if (token == IF_SRC_FILE) { | 7978 | if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { |
7940 | filename = match_strdup(&args[2]); | 7979 | int fpos = filter->range ? 2 : 1; |
7980 | |||
7981 | filename = match_strdup(&args[fpos]); | ||
7941 | if (!filename) { | 7982 | if (!filename) { |
7942 | ret = -ENOMEM; | 7983 | ret = -ENOMEM; |
7943 | goto fail; | 7984 | goto fail; |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index b7a525ab2083..8c50276b60d1 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
172 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 172 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
173 | err = -EAGAIN; | 173 | err = -EAGAIN; |
174 | ptep = page_check_address(page, mm, addr, &ptl, 0); | 174 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
175 | if (!ptep) | 175 | if (!ptep) { |
176 | mem_cgroup_cancel_charge(kpage, memcg, false); | ||
176 | goto unlock; | 177 | goto unlock; |
178 | } | ||
177 | 179 | ||
178 | get_page(kpage); | 180 | get_page(kpage); |
179 | page_add_new_anon_rmap(kpage, vma, addr, false); | 181 | page_add_new_anon_rmap(kpage, vma, addr, false); |
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
200 | 202 | ||
201 | err = 0; | 203 | err = 0; |
202 | unlock: | 204 | unlock: |
203 | mem_cgroup_cancel_charge(kpage, memcg, false); | ||
204 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 205 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
205 | unlock_page(page); | 206 | unlock_page(page); |
206 | return err; | 207 | return err; |