diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-15 17:07:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-15 17:07:02 -0400 |
commit | 652df602f8c5e99a3cec7d8e0c114e29d93b342f (patch) | |
tree | e52dd062e3a471760015e40ee5bbe5cedcd963b9 /kernel | |
parent | cc51bf6e6d8b03bd459818492e0bc3bef09dcd74 (diff) | |
parent | 52d857a8784a09576215c71cebf368d61c12a754 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner:
- Fix for a task exit cleanup race caused by a missing a preempt
disable
- Cleanup of the event notification functions with a massive reduction
of duplicated code
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf: Factor out auxiliary events notification
perf: Fix EXIT event notification
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 240 |
1 files changed, 89 insertions, 151 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6b41c1899a8b..9dc297faf7c0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event, | |||
4394 | perf_output_end(&handle); | 4394 | perf_output_end(&handle); |
4395 | } | 4395 | } |
4396 | 4396 | ||
4397 | typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data); | ||
4398 | typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); | ||
4399 | |||
4400 | static void | ||
4401 | perf_event_aux_ctx(struct perf_event_context *ctx, | ||
4402 | perf_event_aux_match_cb match, | ||
4403 | perf_event_aux_output_cb output, | ||
4404 | void *data) | ||
4405 | { | ||
4406 | struct perf_event *event; | ||
4407 | |||
4408 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
4409 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
4410 | continue; | ||
4411 | if (!event_filter_match(event)) | ||
4412 | continue; | ||
4413 | if (match(event, data)) | ||
4414 | output(event, data); | ||
4415 | } | ||
4416 | } | ||
4417 | |||
4418 | static void | ||
4419 | perf_event_aux(perf_event_aux_match_cb match, | ||
4420 | perf_event_aux_output_cb output, | ||
4421 | void *data, | ||
4422 | struct perf_event_context *task_ctx) | ||
4423 | { | ||
4424 | struct perf_cpu_context *cpuctx; | ||
4425 | struct perf_event_context *ctx; | ||
4426 | struct pmu *pmu; | ||
4427 | int ctxn; | ||
4428 | |||
4429 | rcu_read_lock(); | ||
4430 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
4431 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | ||
4432 | if (cpuctx->unique_pmu != pmu) | ||
4433 | goto next; | ||
4434 | perf_event_aux_ctx(&cpuctx->ctx, match, output, data); | ||
4435 | if (task_ctx) | ||
4436 | goto next; | ||
4437 | ctxn = pmu->task_ctx_nr; | ||
4438 | if (ctxn < 0) | ||
4439 | goto next; | ||
4440 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
4441 | if (ctx) | ||
4442 | perf_event_aux_ctx(ctx, match, output, data); | ||
4443 | next: | ||
4444 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
4445 | } | ||
4446 | |||
4447 | if (task_ctx) { | ||
4448 | preempt_disable(); | ||
4449 | perf_event_aux_ctx(task_ctx, match, output, data); | ||
4450 | preempt_enable(); | ||
4451 | } | ||
4452 | rcu_read_unlock(); | ||
4453 | } | ||
4454 | |||
4397 | /* | 4455 | /* |
4398 | * task tracking -- fork/exit | 4456 | * task tracking -- fork/exit |
4399 | * | 4457 | * |
@@ -4416,8 +4474,9 @@ struct perf_task_event { | |||
4416 | }; | 4474 | }; |
4417 | 4475 | ||
4418 | static void perf_event_task_output(struct perf_event *event, | 4476 | static void perf_event_task_output(struct perf_event *event, |
4419 | struct perf_task_event *task_event) | 4477 | void *data) |
4420 | { | 4478 | { |
4479 | struct perf_task_event *task_event = data; | ||
4421 | struct perf_output_handle handle; | 4480 | struct perf_output_handle handle; |
4422 | struct perf_sample_data sample; | 4481 | struct perf_sample_data sample; |
4423 | struct task_struct *task = task_event->task; | 4482 | struct task_struct *task = task_event->task; |
@@ -4445,62 +4504,11 @@ out: | |||
4445 | task_event->event_id.header.size = size; | 4504 | task_event->event_id.header.size = size; |
4446 | } | 4505 | } |
4447 | 4506 | ||
4448 | static int perf_event_task_match(struct perf_event *event) | 4507 | static int perf_event_task_match(struct perf_event *event, |
4449 | { | 4508 | void *data __maybe_unused) |
4450 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
4451 | return 0; | ||
4452 | |||
4453 | if (!event_filter_match(event)) | ||
4454 | return 0; | ||
4455 | |||
4456 | if (event->attr.comm || event->attr.mmap || | ||
4457 | event->attr.mmap_data || event->attr.task) | ||
4458 | return 1; | ||
4459 | |||
4460 | return 0; | ||
4461 | } | ||
4462 | |||
4463 | static void perf_event_task_ctx(struct perf_event_context *ctx, | ||
4464 | struct perf_task_event *task_event) | ||
4465 | { | 4509 | { |
4466 | struct perf_event *event; | 4510 | return event->attr.comm || event->attr.mmap || |
4467 | 4511 | event->attr.mmap_data || event->attr.task; | |
4468 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
4469 | if (perf_event_task_match(event)) | ||
4470 | perf_event_task_output(event, task_event); | ||
4471 | } | ||
4472 | } | ||
4473 | |||
4474 | static void perf_event_task_event(struct perf_task_event *task_event) | ||
4475 | { | ||
4476 | struct perf_cpu_context *cpuctx; | ||
4477 | struct perf_event_context *ctx; | ||
4478 | struct pmu *pmu; | ||
4479 | int ctxn; | ||
4480 | |||
4481 | rcu_read_lock(); | ||
4482 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
4483 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | ||
4484 | if (cpuctx->unique_pmu != pmu) | ||
4485 | goto next; | ||
4486 | perf_event_task_ctx(&cpuctx->ctx, task_event); | ||
4487 | |||
4488 | ctx = task_event->task_ctx; | ||
4489 | if (!ctx) { | ||
4490 | ctxn = pmu->task_ctx_nr; | ||
4491 | if (ctxn < 0) | ||
4492 | goto next; | ||
4493 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
4494 | if (ctx) | ||
4495 | perf_event_task_ctx(ctx, task_event); | ||
4496 | } | ||
4497 | next: | ||
4498 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
4499 | } | ||
4500 | if (task_event->task_ctx) | ||
4501 | perf_event_task_ctx(task_event->task_ctx, task_event); | ||
4502 | |||
4503 | rcu_read_unlock(); | ||
4504 | } | 4512 | } |
4505 | 4513 | ||
4506 | static void perf_event_task(struct task_struct *task, | 4514 | static void perf_event_task(struct task_struct *task, |
@@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task, | |||
4531 | }, | 4539 | }, |
4532 | }; | 4540 | }; |
4533 | 4541 | ||
4534 | perf_event_task_event(&task_event); | 4542 | perf_event_aux(perf_event_task_match, |
4543 | perf_event_task_output, | ||
4544 | &task_event, | ||
4545 | task_ctx); | ||
4535 | } | 4546 | } |
4536 | 4547 | ||
4537 | void perf_event_fork(struct task_struct *task) | 4548 | void perf_event_fork(struct task_struct *task) |
@@ -4557,8 +4568,9 @@ struct perf_comm_event { | |||
4557 | }; | 4568 | }; |
4558 | 4569 | ||
4559 | static void perf_event_comm_output(struct perf_event *event, | 4570 | static void perf_event_comm_output(struct perf_event *event, |
4560 | struct perf_comm_event *comm_event) | 4571 | void *data) |
4561 | { | 4572 | { |
4573 | struct perf_comm_event *comm_event = data; | ||
4562 | struct perf_output_handle handle; | 4574 | struct perf_output_handle handle; |
4563 | struct perf_sample_data sample; | 4575 | struct perf_sample_data sample; |
4564 | int size = comm_event->event_id.header.size; | 4576 | int size = comm_event->event_id.header.size; |
@@ -4585,39 +4597,16 @@ out: | |||
4585 | comm_event->event_id.header.size = size; | 4597 | comm_event->event_id.header.size = size; |
4586 | } | 4598 | } |
4587 | 4599 | ||
4588 | static int perf_event_comm_match(struct perf_event *event) | 4600 | static int perf_event_comm_match(struct perf_event *event, |
4589 | { | 4601 | void *data __maybe_unused) |
4590 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
4591 | return 0; | ||
4592 | |||
4593 | if (!event_filter_match(event)) | ||
4594 | return 0; | ||
4595 | |||
4596 | if (event->attr.comm) | ||
4597 | return 1; | ||
4598 | |||
4599 | return 0; | ||
4600 | } | ||
4601 | |||
4602 | static void perf_event_comm_ctx(struct perf_event_context *ctx, | ||
4603 | struct perf_comm_event *comm_event) | ||
4604 | { | 4602 | { |
4605 | struct perf_event *event; | 4603 | return event->attr.comm; |
4606 | |||
4607 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
4608 | if (perf_event_comm_match(event)) | ||
4609 | perf_event_comm_output(event, comm_event); | ||
4610 | } | ||
4611 | } | 4604 | } |
4612 | 4605 | ||
4613 | static void perf_event_comm_event(struct perf_comm_event *comm_event) | 4606 | static void perf_event_comm_event(struct perf_comm_event *comm_event) |
4614 | { | 4607 | { |
4615 | struct perf_cpu_context *cpuctx; | ||
4616 | struct perf_event_context *ctx; | ||
4617 | char comm[TASK_COMM_LEN]; | 4608 | char comm[TASK_COMM_LEN]; |
4618 | unsigned int size; | 4609 | unsigned int size; |
4619 | struct pmu *pmu; | ||
4620 | int ctxn; | ||
4621 | 4610 | ||
4622 | memset(comm, 0, sizeof(comm)); | 4611 | memset(comm, 0, sizeof(comm)); |
4623 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); | 4612 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); |
@@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
4627 | comm_event->comm_size = size; | 4616 | comm_event->comm_size = size; |
4628 | 4617 | ||
4629 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 4618 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
4630 | rcu_read_lock(); | ||
4631 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
4632 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | ||
4633 | if (cpuctx->unique_pmu != pmu) | ||
4634 | goto next; | ||
4635 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | ||
4636 | 4619 | ||
4637 | ctxn = pmu->task_ctx_nr; | 4620 | perf_event_aux(perf_event_comm_match, |
4638 | if (ctxn < 0) | 4621 | perf_event_comm_output, |
4639 | goto next; | 4622 | comm_event, |
4640 | 4623 | NULL); | |
4641 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
4642 | if (ctx) | ||
4643 | perf_event_comm_ctx(ctx, comm_event); | ||
4644 | next: | ||
4645 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
4646 | } | ||
4647 | rcu_read_unlock(); | ||
4648 | } | 4624 | } |
4649 | 4625 | ||
4650 | void perf_event_comm(struct task_struct *task) | 4626 | void perf_event_comm(struct task_struct *task) |
@@ -4706,8 +4682,9 @@ struct perf_mmap_event { | |||
4706 | }; | 4682 | }; |
4707 | 4683 | ||
4708 | static void perf_event_mmap_output(struct perf_event *event, | 4684 | static void perf_event_mmap_output(struct perf_event *event, |
4709 | struct perf_mmap_event *mmap_event) | 4685 | void *data) |
4710 | { | 4686 | { |
4687 | struct perf_mmap_event *mmap_event = data; | ||
4711 | struct perf_output_handle handle; | 4688 | struct perf_output_handle handle; |
4712 | struct perf_sample_data sample; | 4689 | struct perf_sample_data sample; |
4713 | int size = mmap_event->event_id.header.size; | 4690 | int size = mmap_event->event_id.header.size; |
@@ -4734,46 +4711,24 @@ out: | |||
4734 | } | 4711 | } |
4735 | 4712 | ||
4736 | static int perf_event_mmap_match(struct perf_event *event, | 4713 | static int perf_event_mmap_match(struct perf_event *event, |
4737 | struct perf_mmap_event *mmap_event, | 4714 | void *data) |
4738 | int executable) | ||
4739 | { | ||
4740 | if (event->state < PERF_EVENT_STATE_INACTIVE) | ||
4741 | return 0; | ||
4742 | |||
4743 | if (!event_filter_match(event)) | ||
4744 | return 0; | ||
4745 | |||
4746 | if ((!executable && event->attr.mmap_data) || | ||
4747 | (executable && event->attr.mmap)) | ||
4748 | return 1; | ||
4749 | |||
4750 | return 0; | ||
4751 | } | ||
4752 | |||
4753 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | ||
4754 | struct perf_mmap_event *mmap_event, | ||
4755 | int executable) | ||
4756 | { | 4715 | { |
4757 | struct perf_event *event; | 4716 | struct perf_mmap_event *mmap_event = data; |
4717 | struct vm_area_struct *vma = mmap_event->vma; | ||
4718 | int executable = vma->vm_flags & VM_EXEC; | ||
4758 | 4719 | ||
4759 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 4720 | return (!executable && event->attr.mmap_data) || |
4760 | if (perf_event_mmap_match(event, mmap_event, executable)) | 4721 | (executable && event->attr.mmap); |
4761 | perf_event_mmap_output(event, mmap_event); | ||
4762 | } | ||
4763 | } | 4722 | } |
4764 | 4723 | ||
4765 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | 4724 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) |
4766 | { | 4725 | { |
4767 | struct perf_cpu_context *cpuctx; | ||
4768 | struct perf_event_context *ctx; | ||
4769 | struct vm_area_struct *vma = mmap_event->vma; | 4726 | struct vm_area_struct *vma = mmap_event->vma; |
4770 | struct file *file = vma->vm_file; | 4727 | struct file *file = vma->vm_file; |
4771 | unsigned int size; | 4728 | unsigned int size; |
4772 | char tmp[16]; | 4729 | char tmp[16]; |
4773 | char *buf = NULL; | 4730 | char *buf = NULL; |
4774 | const char *name; | 4731 | const char *name; |
4775 | struct pmu *pmu; | ||
4776 | int ctxn; | ||
4777 | 4732 | ||
4778 | memset(tmp, 0, sizeof(tmp)); | 4733 | memset(tmp, 0, sizeof(tmp)); |
4779 | 4734 | ||
@@ -4829,27 +4784,10 @@ got_name: | |||
4829 | 4784 | ||
4830 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | 4785 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
4831 | 4786 | ||
4832 | rcu_read_lock(); | 4787 | perf_event_aux(perf_event_mmap_match, |
4833 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4788 | perf_event_mmap_output, |
4834 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4789 | mmap_event, |
4835 | if (cpuctx->unique_pmu != pmu) | 4790 | NULL); |
4836 | goto next; | ||
4837 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, | ||
4838 | vma->vm_flags & VM_EXEC); | ||
4839 | |||
4840 | ctxn = pmu->task_ctx_nr; | ||
4841 | if (ctxn < 0) | ||
4842 | goto next; | ||
4843 | |||
4844 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
4845 | if (ctx) { | ||
4846 | perf_event_mmap_ctx(ctx, mmap_event, | ||
4847 | vma->vm_flags & VM_EXEC); | ||
4848 | } | ||
4849 | next: | ||
4850 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
4851 | } | ||
4852 | rcu_read_unlock(); | ||
4853 | 4791 | ||
4854 | kfree(buf); | 4792 | kfree(buf); |
4855 | } | 4793 | } |