diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-12-07 01:51:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-12-07 01:51:14 -0500 |
commit | 75b5293a5d176cd9caf6dc590da4f3458c048c3c (patch) | |
tree | 40929d6108c662d6eb4c65f900312a37d0d6d566 /kernel | |
parent | 10a18d7dc0d9f12483c95ffc234118e9b80edfeb (diff) | |
parent | ce47dc56a2241dc035160a85bc5e34283cdd622c (diff) |
Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux-2.6 into perf/core
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 234 |
1 files changed, 167 insertions, 67 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index aede71245e9f..77ad22c00b9d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -133,6 +133,28 @@ static void unclone_ctx(struct perf_event_context *ctx) | |||
133 | } | 133 | } |
134 | } | 134 | } |
135 | 135 | ||
136 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
137 | { | ||
138 | /* | ||
139 | * only top level events have the pid namespace they were created in | ||
140 | */ | ||
141 | if (event->parent) | ||
142 | event = event->parent; | ||
143 | |||
144 | return task_tgid_nr_ns(p, event->ns); | ||
145 | } | ||
146 | |||
147 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
148 | { | ||
149 | /* | ||
150 | * only top level events have the pid namespace they were created in | ||
151 | */ | ||
152 | if (event->parent) | ||
153 | event = event->parent; | ||
154 | |||
155 | return task_pid_nr_ns(p, event->ns); | ||
156 | } | ||
157 | |||
136 | /* | 158 | /* |
137 | * If we inherit events we want to return the parent event id | 159 | * If we inherit events we want to return the parent event id |
138 | * to userspace. | 160 | * to userspace. |
@@ -351,15 +373,30 @@ static void perf_event__header_size(struct perf_event *event) | |||
351 | if (sample_type & PERF_SAMPLE_IP) | 373 | if (sample_type & PERF_SAMPLE_IP) |
352 | size += sizeof(data->ip); | 374 | size += sizeof(data->ip); |
353 | 375 | ||
376 | if (sample_type & PERF_SAMPLE_ADDR) | ||
377 | size += sizeof(data->addr); | ||
378 | |||
379 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
380 | size += sizeof(data->period); | ||
381 | |||
382 | if (sample_type & PERF_SAMPLE_READ) | ||
383 | size += event->read_size; | ||
384 | |||
385 | event->header_size = size; | ||
386 | } | ||
387 | |||
388 | static void perf_event__id_header_size(struct perf_event *event) | ||
389 | { | ||
390 | struct perf_sample_data *data; | ||
391 | u64 sample_type = event->attr.sample_type; | ||
392 | u16 size = 0; | ||
393 | |||
354 | if (sample_type & PERF_SAMPLE_TID) | 394 | if (sample_type & PERF_SAMPLE_TID) |
355 | size += sizeof(data->tid_entry); | 395 | size += sizeof(data->tid_entry); |
356 | 396 | ||
357 | if (sample_type & PERF_SAMPLE_TIME) | 397 | if (sample_type & PERF_SAMPLE_TIME) |
358 | size += sizeof(data->time); | 398 | size += sizeof(data->time); |
359 | 399 | ||
360 | if (sample_type & PERF_SAMPLE_ADDR) | ||
361 | size += sizeof(data->addr); | ||
362 | |||
363 | if (sample_type & PERF_SAMPLE_ID) | 400 | if (sample_type & PERF_SAMPLE_ID) |
364 | size += sizeof(data->id); | 401 | size += sizeof(data->id); |
365 | 402 | ||
@@ -369,13 +406,7 @@ static void perf_event__header_size(struct perf_event *event) | |||
369 | if (sample_type & PERF_SAMPLE_CPU) | 406 | if (sample_type & PERF_SAMPLE_CPU) |
370 | size += sizeof(data->cpu_entry); | 407 | size += sizeof(data->cpu_entry); |
371 | 408 | ||
372 | if (sample_type & PERF_SAMPLE_PERIOD) | 409 | event->id_header_size = size; |
373 | size += sizeof(data->period); | ||
374 | |||
375 | if (sample_type & PERF_SAMPLE_READ) | ||
376 | size += event->read_size; | ||
377 | |||
378 | event->header_size = size; | ||
379 | } | 410 | } |
380 | 411 | ||
381 | static void perf_group_attach(struct perf_event *event) | 412 | static void perf_group_attach(struct perf_event *event) |
@@ -3357,6 +3388,73 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle, | |||
3357 | } while (len); | 3388 | } while (len); |
3358 | } | 3389 | } |
3359 | 3390 | ||
3391 | static void __perf_event_header__init_id(struct perf_event_header *header, | ||
3392 | struct perf_sample_data *data, | ||
3393 | struct perf_event *event) | ||
3394 | { | ||
3395 | u64 sample_type = event->attr.sample_type; | ||
3396 | |||
3397 | data->type = sample_type; | ||
3398 | header->size += event->id_header_size; | ||
3399 | |||
3400 | if (sample_type & PERF_SAMPLE_TID) { | ||
3401 | /* namespace issues */ | ||
3402 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3403 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3404 | } | ||
3405 | |||
3406 | if (sample_type & PERF_SAMPLE_TIME) | ||
3407 | data->time = perf_clock(); | ||
3408 | |||
3409 | if (sample_type & PERF_SAMPLE_ID) | ||
3410 | data->id = primary_event_id(event); | ||
3411 | |||
3412 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3413 | data->stream_id = event->id; | ||
3414 | |||
3415 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3416 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3417 | data->cpu_entry.reserved = 0; | ||
3418 | } | ||
3419 | } | ||
3420 | |||
3421 | static void perf_event_header__init_id(struct perf_event_header *header, | ||
3422 | struct perf_sample_data *data, | ||
3423 | struct perf_event *event) | ||
3424 | { | ||
3425 | if (event->attr.sample_id_all) | ||
3426 | __perf_event_header__init_id(header, data, event); | ||
3427 | } | ||
3428 | |||
3429 | static void __perf_event__output_id_sample(struct perf_output_handle *handle, | ||
3430 | struct perf_sample_data *data) | ||
3431 | { | ||
3432 | u64 sample_type = data->type; | ||
3433 | |||
3434 | if (sample_type & PERF_SAMPLE_TID) | ||
3435 | perf_output_put(handle, data->tid_entry); | ||
3436 | |||
3437 | if (sample_type & PERF_SAMPLE_TIME) | ||
3438 | perf_output_put(handle, data->time); | ||
3439 | |||
3440 | if (sample_type & PERF_SAMPLE_ID) | ||
3441 | perf_output_put(handle, data->id); | ||
3442 | |||
3443 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3444 | perf_output_put(handle, data->stream_id); | ||
3445 | |||
3446 | if (sample_type & PERF_SAMPLE_CPU) | ||
3447 | perf_output_put(handle, data->cpu_entry); | ||
3448 | } | ||
3449 | |||
3450 | static void perf_event__output_id_sample(struct perf_event *event, | ||
3451 | struct perf_output_handle *handle, | ||
3452 | struct perf_sample_data *sample) | ||
3453 | { | ||
3454 | if (event->attr.sample_id_all) | ||
3455 | __perf_event__output_id_sample(handle, sample); | ||
3456 | } | ||
3457 | |||
3360 | int perf_output_begin(struct perf_output_handle *handle, | 3458 | int perf_output_begin(struct perf_output_handle *handle, |
3361 | struct perf_event *event, unsigned int size, | 3459 | struct perf_event *event, unsigned int size, |
3362 | int nmi, int sample) | 3460 | int nmi, int sample) |
@@ -3364,6 +3462,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3364 | struct perf_buffer *buffer; | 3462 | struct perf_buffer *buffer; |
3365 | unsigned long tail, offset, head; | 3463 | unsigned long tail, offset, head; |
3366 | int have_lost; | 3464 | int have_lost; |
3465 | struct perf_sample_data sample_data; | ||
3367 | struct { | 3466 | struct { |
3368 | struct perf_event_header header; | 3467 | struct perf_event_header header; |
3369 | u64 id; | 3468 | u64 id; |
@@ -3390,8 +3489,12 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3390 | goto out; | 3489 | goto out; |
3391 | 3490 | ||
3392 | have_lost = local_read(&buffer->lost); | 3491 | have_lost = local_read(&buffer->lost); |
3393 | if (have_lost) | 3492 | if (have_lost) { |
3394 | size += sizeof(lost_event); | 3493 | lost_event.header.size = sizeof(lost_event); |
3494 | perf_event_header__init_id(&lost_event.header, &sample_data, | ||
3495 | event); | ||
3496 | size += lost_event.header.size; | ||
3497 | } | ||
3395 | 3498 | ||
3396 | perf_output_get_handle(handle); | 3499 | perf_output_get_handle(handle); |
3397 | 3500 | ||
@@ -3422,11 +3525,11 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3422 | if (have_lost) { | 3525 | if (have_lost) { |
3423 | lost_event.header.type = PERF_RECORD_LOST; | 3526 | lost_event.header.type = PERF_RECORD_LOST; |
3424 | lost_event.header.misc = 0; | 3527 | lost_event.header.misc = 0; |
3425 | lost_event.header.size = sizeof(lost_event); | ||
3426 | lost_event.id = event->id; | 3528 | lost_event.id = event->id; |
3427 | lost_event.lost = local_xchg(&buffer->lost, 0); | 3529 | lost_event.lost = local_xchg(&buffer->lost, 0); |
3428 | 3530 | ||
3429 | perf_output_put(handle, lost_event); | 3531 | perf_output_put(handle, lost_event); |
3532 | perf_event__output_id_sample(event, handle, &sample_data); | ||
3430 | } | 3533 | } |
3431 | 3534 | ||
3432 | return 0; | 3535 | return 0; |
@@ -3459,28 +3562,6 @@ void perf_output_end(struct perf_output_handle *handle) | |||
3459 | rcu_read_unlock(); | 3562 | rcu_read_unlock(); |
3460 | } | 3563 | } |
3461 | 3564 | ||
3462 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
3463 | { | ||
3464 | /* | ||
3465 | * only top level events have the pid namespace they were created in | ||
3466 | */ | ||
3467 | if (event->parent) | ||
3468 | event = event->parent; | ||
3469 | |||
3470 | return task_tgid_nr_ns(p, event->ns); | ||
3471 | } | ||
3472 | |||
3473 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
3474 | { | ||
3475 | /* | ||
3476 | * only top level events have the pid namespace they were created in | ||
3477 | */ | ||
3478 | if (event->parent) | ||
3479 | event = event->parent; | ||
3480 | |||
3481 | return task_pid_nr_ns(p, event->ns); | ||
3482 | } | ||
3483 | |||
3484 | static void perf_output_read_one(struct perf_output_handle *handle, | 3565 | static void perf_output_read_one(struct perf_output_handle *handle, |
3485 | struct perf_event *event, | 3566 | struct perf_event *event, |
3486 | u64 enabled, u64 running) | 3567 | u64 enabled, u64 running) |
@@ -3655,37 +3736,17 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
3655 | { | 3736 | { |
3656 | u64 sample_type = event->attr.sample_type; | 3737 | u64 sample_type = event->attr.sample_type; |
3657 | 3738 | ||
3658 | data->type = sample_type; | ||
3659 | |||
3660 | header->type = PERF_RECORD_SAMPLE; | 3739 | header->type = PERF_RECORD_SAMPLE; |
3661 | header->size = sizeof(*header) + event->header_size; | 3740 | header->size = sizeof(*header) + event->header_size; |
3662 | 3741 | ||
3663 | header->misc = 0; | 3742 | header->misc = 0; |
3664 | header->misc |= perf_misc_flags(regs); | 3743 | header->misc |= perf_misc_flags(regs); |
3665 | 3744 | ||
3745 | __perf_event_header__init_id(header, data, event); | ||
3746 | |||
3666 | if (sample_type & PERF_SAMPLE_IP) | 3747 | if (sample_type & PERF_SAMPLE_IP) |
3667 | data->ip = perf_instruction_pointer(regs); | 3748 | data->ip = perf_instruction_pointer(regs); |
3668 | 3749 | ||
3669 | if (sample_type & PERF_SAMPLE_TID) { | ||
3670 | /* namespace issues */ | ||
3671 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3672 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3673 | } | ||
3674 | |||
3675 | if (sample_type & PERF_SAMPLE_TIME) | ||
3676 | data->time = perf_clock(); | ||
3677 | |||
3678 | if (sample_type & PERF_SAMPLE_ID) | ||
3679 | data->id = primary_event_id(event); | ||
3680 | |||
3681 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3682 | data->stream_id = event->id; | ||
3683 | |||
3684 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3685 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3686 | data->cpu_entry.reserved = 0; | ||
3687 | } | ||
3688 | |||
3689 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 3750 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
3690 | int size = 1; | 3751 | int size = 1; |
3691 | 3752 | ||
@@ -3749,6 +3810,7 @@ perf_event_read_event(struct perf_event *event, | |||
3749 | struct task_struct *task) | 3810 | struct task_struct *task) |
3750 | { | 3811 | { |
3751 | struct perf_output_handle handle; | 3812 | struct perf_output_handle handle; |
3813 | struct perf_sample_data sample; | ||
3752 | struct perf_read_event read_event = { | 3814 | struct perf_read_event read_event = { |
3753 | .header = { | 3815 | .header = { |
3754 | .type = PERF_RECORD_READ, | 3816 | .type = PERF_RECORD_READ, |
@@ -3760,12 +3822,14 @@ perf_event_read_event(struct perf_event *event, | |||
3760 | }; | 3822 | }; |
3761 | int ret; | 3823 | int ret; |
3762 | 3824 | ||
3825 | perf_event_header__init_id(&read_event.header, &sample, event); | ||
3763 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | 3826 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); |
3764 | if (ret) | 3827 | if (ret) |
3765 | return; | 3828 | return; |
3766 | 3829 | ||
3767 | perf_output_put(&handle, read_event); | 3830 | perf_output_put(&handle, read_event); |
3768 | perf_output_read(&handle, event); | 3831 | perf_output_read(&handle, event); |
3832 | perf_event__output_id_sample(event, &handle, &sample); | ||
3769 | 3833 | ||
3770 | perf_output_end(&handle); | 3834 | perf_output_end(&handle); |
3771 | } | 3835 | } |
@@ -3795,14 +3859,16 @@ static void perf_event_task_output(struct perf_event *event, | |||
3795 | struct perf_task_event *task_event) | 3859 | struct perf_task_event *task_event) |
3796 | { | 3860 | { |
3797 | struct perf_output_handle handle; | 3861 | struct perf_output_handle handle; |
3862 | struct perf_sample_data sample; | ||
3798 | struct task_struct *task = task_event->task; | 3863 | struct task_struct *task = task_event->task; |
3799 | int size, ret; | 3864 | int ret, size = task_event->event_id.header.size; |
3800 | 3865 | ||
3801 | size = task_event->event_id.header.size; | 3866 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
3802 | ret = perf_output_begin(&handle, event, size, 0, 0); | ||
3803 | 3867 | ||
3868 | ret = perf_output_begin(&handle, event, | ||
3869 | task_event->event_id.header.size, 0, 0); | ||
3804 | if (ret) | 3870 | if (ret) |
3805 | return; | 3871 | goto out; |
3806 | 3872 | ||
3807 | task_event->event_id.pid = perf_event_pid(event, task); | 3873 | task_event->event_id.pid = perf_event_pid(event, task); |
3808 | task_event->event_id.ppid = perf_event_pid(event, current); | 3874 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3812,7 +3878,11 @@ static void perf_event_task_output(struct perf_event *event, | |||
3812 | 3878 | ||
3813 | perf_output_put(&handle, task_event->event_id); | 3879 | perf_output_put(&handle, task_event->event_id); |
3814 | 3880 | ||
3881 | perf_event__output_id_sample(event, &handle, &sample); | ||
3882 | |||
3815 | perf_output_end(&handle); | 3883 | perf_output_end(&handle); |
3884 | out: | ||
3885 | task_event->event_id.header.size = size; | ||
3816 | } | 3886 | } |
3817 | 3887 | ||
3818 | static int perf_event_task_match(struct perf_event *event) | 3888 | static int perf_event_task_match(struct perf_event *event) |
@@ -3925,11 +3995,16 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3925 | struct perf_comm_event *comm_event) | 3995 | struct perf_comm_event *comm_event) |
3926 | { | 3996 | { |
3927 | struct perf_output_handle handle; | 3997 | struct perf_output_handle handle; |
3998 | struct perf_sample_data sample; | ||
3928 | int size = comm_event->event_id.header.size; | 3999 | int size = comm_event->event_id.header.size; |
3929 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 4000 | int ret; |
4001 | |||
4002 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); | ||
4003 | ret = perf_output_begin(&handle, event, | ||
4004 | comm_event->event_id.header.size, 0, 0); | ||
3930 | 4005 | ||
3931 | if (ret) | 4006 | if (ret) |
3932 | return; | 4007 | goto out; |
3933 | 4008 | ||
3934 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); | 4009 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); |
3935 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | 4010 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
@@ -3937,7 +4012,12 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3937 | perf_output_put(&handle, comm_event->event_id); | 4012 | perf_output_put(&handle, comm_event->event_id); |
3938 | perf_output_copy(&handle, comm_event->comm, | 4013 | perf_output_copy(&handle, comm_event->comm, |
3939 | comm_event->comm_size); | 4014 | comm_event->comm_size); |
4015 | |||
4016 | perf_event__output_id_sample(event, &handle, &sample); | ||
4017 | |||
3940 | perf_output_end(&handle); | 4018 | perf_output_end(&handle); |
4019 | out: | ||
4020 | comm_event->event_id.header.size = size; | ||
3941 | } | 4021 | } |
3942 | 4022 | ||
3943 | static int perf_event_comm_match(struct perf_event *event) | 4023 | static int perf_event_comm_match(struct perf_event *event) |
@@ -3982,7 +4062,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
3982 | comm_event->comm_size = size; | 4062 | comm_event->comm_size = size; |
3983 | 4063 | ||
3984 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 4064 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
3985 | |||
3986 | rcu_read_lock(); | 4065 | rcu_read_lock(); |
3987 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4066 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
3988 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4067 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
@@ -4061,11 +4140,15 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4061 | struct perf_mmap_event *mmap_event) | 4140 | struct perf_mmap_event *mmap_event) |
4062 | { | 4141 | { |
4063 | struct perf_output_handle handle; | 4142 | struct perf_output_handle handle; |
4143 | struct perf_sample_data sample; | ||
4064 | int size = mmap_event->event_id.header.size; | 4144 | int size = mmap_event->event_id.header.size; |
4065 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 4145 | int ret; |
4066 | 4146 | ||
4147 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); | ||
4148 | ret = perf_output_begin(&handle, event, | ||
4149 | mmap_event->event_id.header.size, 0, 0); | ||
4067 | if (ret) | 4150 | if (ret) |
4068 | return; | 4151 | goto out; |
4069 | 4152 | ||
4070 | mmap_event->event_id.pid = perf_event_pid(event, current); | 4153 | mmap_event->event_id.pid = perf_event_pid(event, current); |
4071 | mmap_event->event_id.tid = perf_event_tid(event, current); | 4154 | mmap_event->event_id.tid = perf_event_tid(event, current); |
@@ -4073,7 +4156,12 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4073 | perf_output_put(&handle, mmap_event->event_id); | 4156 | perf_output_put(&handle, mmap_event->event_id); |
4074 | perf_output_copy(&handle, mmap_event->file_name, | 4157 | perf_output_copy(&handle, mmap_event->file_name, |
4075 | mmap_event->file_size); | 4158 | mmap_event->file_size); |
4159 | |||
4160 | perf_event__output_id_sample(event, &handle, &sample); | ||
4161 | |||
4076 | perf_output_end(&handle); | 4162 | perf_output_end(&handle); |
4163 | out: | ||
4164 | mmap_event->event_id.header.size = size; | ||
4077 | } | 4165 | } |
4078 | 4166 | ||
4079 | static int perf_event_mmap_match(struct perf_event *event, | 4167 | static int perf_event_mmap_match(struct perf_event *event, |
@@ -4226,6 +4314,7 @@ void perf_event_mmap(struct vm_area_struct *vma) | |||
4226 | static void perf_log_throttle(struct perf_event *event, int enable) | 4314 | static void perf_log_throttle(struct perf_event *event, int enable) |
4227 | { | 4315 | { |
4228 | struct perf_output_handle handle; | 4316 | struct perf_output_handle handle; |
4317 | struct perf_sample_data sample; | ||
4229 | int ret; | 4318 | int ret; |
4230 | 4319 | ||
4231 | struct { | 4320 | struct { |
@@ -4247,11 +4336,15 @@ static void perf_log_throttle(struct perf_event *event, int enable) | |||
4247 | if (enable) | 4336 | if (enable) |
4248 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; | 4337 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; |
4249 | 4338 | ||
4250 | ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); | 4339 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
4340 | |||
4341 | ret = perf_output_begin(&handle, event, | ||
4342 | throttle_event.header.size, 1, 0); | ||
4251 | if (ret) | 4343 | if (ret) |
4252 | return; | 4344 | return; |
4253 | 4345 | ||
4254 | perf_output_put(&handle, throttle_event); | 4346 | perf_output_put(&handle, throttle_event); |
4347 | perf_event__output_id_sample(event, &handle, &sample); | ||
4255 | perf_output_end(&handle); | 4348 | perf_output_end(&handle); |
4256 | } | 4349 | } |
4257 | 4350 | ||
@@ -5745,6 +5838,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5745 | * Precalculate sample_data sizes | 5838 | * Precalculate sample_data sizes |
5746 | */ | 5839 | */ |
5747 | perf_event__header_size(event); | 5840 | perf_event__header_size(event); |
5841 | perf_event__id_header_size(event); | ||
5748 | 5842 | ||
5749 | /* | 5843 | /* |
5750 | * Drop the reference on the group_event after placing the | 5844 | * Drop the reference on the group_event after placing the |
@@ -6099,6 +6193,12 @@ inherit_event(struct perf_event *parent_event, | |||
6099 | child_event->overflow_handler = parent_event->overflow_handler; | 6193 | child_event->overflow_handler = parent_event->overflow_handler; |
6100 | 6194 | ||
6101 | /* | 6195 | /* |
6196 | * Precalculate sample_data sizes | ||
6197 | */ | ||
6198 | perf_event__header_size(child_event); | ||
6199 | perf_event__id_header_size(child_event); | ||
6200 | |||
6201 | /* | ||
6102 | * Link it up in the child's context: | 6202 | * Link it up in the child's context: |
6103 | */ | 6203 | */ |
6104 | raw_spin_lock_irqsave(&child_ctx->lock, flags); | 6204 | raw_spin_lock_irqsave(&child_ctx->lock, flags); |