diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 573 |
1 files changed, 428 insertions, 145 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2870feee81dd..11847bf1e8cc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/idr.h> | ||
16 | #include <linux/file.h> | 17 | #include <linux/file.h> |
17 | #include <linux/poll.h> | 18 | #include <linux/poll.h> |
18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -21,7 +22,9 @@ | |||
21 | #include <linux/dcache.h> | 22 | #include <linux/dcache.h> |
22 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
23 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
25 | #include <linux/reboot.h> | ||
24 | #include <linux/vmstat.h> | 26 | #include <linux/vmstat.h> |
27 | #include <linux/device.h> | ||
25 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
26 | #include <linux/hardirq.h> | 29 | #include <linux/hardirq.h> |
27 | #include <linux/rculist.h> | 30 | #include <linux/rculist.h> |
@@ -133,6 +136,28 @@ static void unclone_ctx(struct perf_event_context *ctx) | |||
133 | } | 136 | } |
134 | } | 137 | } |
135 | 138 | ||
139 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
140 | { | ||
141 | /* | ||
142 | * only top level events have the pid namespace they were created in | ||
143 | */ | ||
144 | if (event->parent) | ||
145 | event = event->parent; | ||
146 | |||
147 | return task_tgid_nr_ns(p, event->ns); | ||
148 | } | ||
149 | |||
150 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
151 | { | ||
152 | /* | ||
153 | * only top level events have the pid namespace they were created in | ||
154 | */ | ||
155 | if (event->parent) | ||
156 | event = event->parent; | ||
157 | |||
158 | return task_pid_nr_ns(p, event->ns); | ||
159 | } | ||
160 | |||
136 | /* | 161 | /* |
137 | * If we inherit events we want to return the parent event id | 162 | * If we inherit events we want to return the parent event id |
138 | * to userspace. | 163 | * to userspace. |
@@ -312,9 +337,84 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) | |||
312 | ctx->nr_stat++; | 337 | ctx->nr_stat++; |
313 | } | 338 | } |
314 | 339 | ||
340 | /* | ||
341 | * Called at perf_event creation and when events are attached/detached from a | ||
342 | * group. | ||
343 | */ | ||
344 | static void perf_event__read_size(struct perf_event *event) | ||
345 | { | ||
346 | int entry = sizeof(u64); /* value */ | ||
347 | int size = 0; | ||
348 | int nr = 1; | ||
349 | |||
350 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
351 | size += sizeof(u64); | ||
352 | |||
353 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
354 | size += sizeof(u64); | ||
355 | |||
356 | if (event->attr.read_format & PERF_FORMAT_ID) | ||
357 | entry += sizeof(u64); | ||
358 | |||
359 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | ||
360 | nr += event->group_leader->nr_siblings; | ||
361 | size += sizeof(u64); | ||
362 | } | ||
363 | |||
364 | size += entry * nr; | ||
365 | event->read_size = size; | ||
366 | } | ||
367 | |||
368 | static void perf_event__header_size(struct perf_event *event) | ||
369 | { | ||
370 | struct perf_sample_data *data; | ||
371 | u64 sample_type = event->attr.sample_type; | ||
372 | u16 size = 0; | ||
373 | |||
374 | perf_event__read_size(event); | ||
375 | |||
376 | if (sample_type & PERF_SAMPLE_IP) | ||
377 | size += sizeof(data->ip); | ||
378 | |||
379 | if (sample_type & PERF_SAMPLE_ADDR) | ||
380 | size += sizeof(data->addr); | ||
381 | |||
382 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
383 | size += sizeof(data->period); | ||
384 | |||
385 | if (sample_type & PERF_SAMPLE_READ) | ||
386 | size += event->read_size; | ||
387 | |||
388 | event->header_size = size; | ||
389 | } | ||
390 | |||
391 | static void perf_event__id_header_size(struct perf_event *event) | ||
392 | { | ||
393 | struct perf_sample_data *data; | ||
394 | u64 sample_type = event->attr.sample_type; | ||
395 | u16 size = 0; | ||
396 | |||
397 | if (sample_type & PERF_SAMPLE_TID) | ||
398 | size += sizeof(data->tid_entry); | ||
399 | |||
400 | if (sample_type & PERF_SAMPLE_TIME) | ||
401 | size += sizeof(data->time); | ||
402 | |||
403 | if (sample_type & PERF_SAMPLE_ID) | ||
404 | size += sizeof(data->id); | ||
405 | |||
406 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
407 | size += sizeof(data->stream_id); | ||
408 | |||
409 | if (sample_type & PERF_SAMPLE_CPU) | ||
410 | size += sizeof(data->cpu_entry); | ||
411 | |||
412 | event->id_header_size = size; | ||
413 | } | ||
414 | |||
315 | static void perf_group_attach(struct perf_event *event) | 415 | static void perf_group_attach(struct perf_event *event) |
316 | { | 416 | { |
317 | struct perf_event *group_leader = event->group_leader; | 417 | struct perf_event *group_leader = event->group_leader, *pos; |
318 | 418 | ||
319 | /* | 419 | /* |
320 | * We can have double attach due to group movement in perf_event_open. | 420 | * We can have double attach due to group movement in perf_event_open. |
@@ -333,6 +433,11 @@ static void perf_group_attach(struct perf_event *event) | |||
333 | 433 | ||
334 | list_add_tail(&event->group_entry, &group_leader->sibling_list); | 434 | list_add_tail(&event->group_entry, &group_leader->sibling_list); |
335 | group_leader->nr_siblings++; | 435 | group_leader->nr_siblings++; |
436 | |||
437 | perf_event__header_size(group_leader); | ||
438 | |||
439 | list_for_each_entry(pos, &group_leader->sibling_list, group_entry) | ||
440 | perf_event__header_size(pos); | ||
336 | } | 441 | } |
337 | 442 | ||
338 | /* | 443 | /* |
@@ -391,7 +496,7 @@ static void perf_group_detach(struct perf_event *event) | |||
391 | if (event->group_leader != event) { | 496 | if (event->group_leader != event) { |
392 | list_del_init(&event->group_entry); | 497 | list_del_init(&event->group_entry); |
393 | event->group_leader->nr_siblings--; | 498 | event->group_leader->nr_siblings--; |
394 | return; | 499 | goto out; |
395 | } | 500 | } |
396 | 501 | ||
397 | if (!list_empty(&event->group_entry)) | 502 | if (!list_empty(&event->group_entry)) |
@@ -410,6 +515,12 @@ static void perf_group_detach(struct perf_event *event) | |||
410 | /* Inherit group flags from the previous leader */ | 515 | /* Inherit group flags from the previous leader */ |
411 | sibling->group_flags = event->group_flags; | 516 | sibling->group_flags = event->group_flags; |
412 | } | 517 | } |
518 | |||
519 | out: | ||
520 | perf_event__header_size(event->group_leader); | ||
521 | |||
522 | list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) | ||
523 | perf_event__header_size(tmp); | ||
413 | } | 524 | } |
414 | 525 | ||
415 | static inline int | 526 | static inline int |
@@ -1073,7 +1184,7 @@ static int perf_event_refresh(struct perf_event *event, int refresh) | |||
1073 | /* | 1184 | /* |
1074 | * not supported on inherited events | 1185 | * not supported on inherited events |
1075 | */ | 1186 | */ |
1076 | if (event->attr.inherit) | 1187 | if (event->attr.inherit || !is_sampling_event(event)) |
1077 | return -EINVAL; | 1188 | return -EINVAL; |
1078 | 1189 | ||
1079 | atomic_add(refresh, &event->event_limit); | 1190 | atomic_add(refresh, &event->event_limit); |
@@ -2289,31 +2400,6 @@ static int perf_release(struct inode *inode, struct file *file) | |||
2289 | return perf_event_release_kernel(event); | 2400 | return perf_event_release_kernel(event); |
2290 | } | 2401 | } |
2291 | 2402 | ||
2292 | static int perf_event_read_size(struct perf_event *event) | ||
2293 | { | ||
2294 | int entry = sizeof(u64); /* value */ | ||
2295 | int size = 0; | ||
2296 | int nr = 1; | ||
2297 | |||
2298 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2299 | size += sizeof(u64); | ||
2300 | |||
2301 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2302 | size += sizeof(u64); | ||
2303 | |||
2304 | if (event->attr.read_format & PERF_FORMAT_ID) | ||
2305 | entry += sizeof(u64); | ||
2306 | |||
2307 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | ||
2308 | nr += event->group_leader->nr_siblings; | ||
2309 | size += sizeof(u64); | ||
2310 | } | ||
2311 | |||
2312 | size += entry * nr; | ||
2313 | |||
2314 | return size; | ||
2315 | } | ||
2316 | |||
2317 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) | 2403 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
2318 | { | 2404 | { |
2319 | struct perf_event *child; | 2405 | struct perf_event *child; |
@@ -2428,7 +2514,7 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count) | |||
2428 | if (event->state == PERF_EVENT_STATE_ERROR) | 2514 | if (event->state == PERF_EVENT_STATE_ERROR) |
2429 | return 0; | 2515 | return 0; |
2430 | 2516 | ||
2431 | if (count < perf_event_read_size(event)) | 2517 | if (count < event->read_size) |
2432 | return -ENOSPC; | 2518 | return -ENOSPC; |
2433 | 2519 | ||
2434 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2520 | WARN_ON_ONCE(event->ctx->parent_ctx); |
@@ -2514,7 +2600,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
2514 | int ret = 0; | 2600 | int ret = 0; |
2515 | u64 value; | 2601 | u64 value; |
2516 | 2602 | ||
2517 | if (!event->attr.sample_period) | 2603 | if (!is_sampling_event(event)) |
2518 | return -EINVAL; | 2604 | return -EINVAL; |
2519 | 2605 | ||
2520 | if (copy_from_user(&value, arg, sizeof(value))) | 2606 | if (copy_from_user(&value, arg, sizeof(value))) |
@@ -3305,6 +3391,73 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle, | |||
3305 | } while (len); | 3391 | } while (len); |
3306 | } | 3392 | } |
3307 | 3393 | ||
3394 | static void __perf_event_header__init_id(struct perf_event_header *header, | ||
3395 | struct perf_sample_data *data, | ||
3396 | struct perf_event *event) | ||
3397 | { | ||
3398 | u64 sample_type = event->attr.sample_type; | ||
3399 | |||
3400 | data->type = sample_type; | ||
3401 | header->size += event->id_header_size; | ||
3402 | |||
3403 | if (sample_type & PERF_SAMPLE_TID) { | ||
3404 | /* namespace issues */ | ||
3405 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3406 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3407 | } | ||
3408 | |||
3409 | if (sample_type & PERF_SAMPLE_TIME) | ||
3410 | data->time = perf_clock(); | ||
3411 | |||
3412 | if (sample_type & PERF_SAMPLE_ID) | ||
3413 | data->id = primary_event_id(event); | ||
3414 | |||
3415 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3416 | data->stream_id = event->id; | ||
3417 | |||
3418 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3419 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3420 | data->cpu_entry.reserved = 0; | ||
3421 | } | ||
3422 | } | ||
3423 | |||
3424 | static void perf_event_header__init_id(struct perf_event_header *header, | ||
3425 | struct perf_sample_data *data, | ||
3426 | struct perf_event *event) | ||
3427 | { | ||
3428 | if (event->attr.sample_id_all) | ||
3429 | __perf_event_header__init_id(header, data, event); | ||
3430 | } | ||
3431 | |||
3432 | static void __perf_event__output_id_sample(struct perf_output_handle *handle, | ||
3433 | struct perf_sample_data *data) | ||
3434 | { | ||
3435 | u64 sample_type = data->type; | ||
3436 | |||
3437 | if (sample_type & PERF_SAMPLE_TID) | ||
3438 | perf_output_put(handle, data->tid_entry); | ||
3439 | |||
3440 | if (sample_type & PERF_SAMPLE_TIME) | ||
3441 | perf_output_put(handle, data->time); | ||
3442 | |||
3443 | if (sample_type & PERF_SAMPLE_ID) | ||
3444 | perf_output_put(handle, data->id); | ||
3445 | |||
3446 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3447 | perf_output_put(handle, data->stream_id); | ||
3448 | |||
3449 | if (sample_type & PERF_SAMPLE_CPU) | ||
3450 | perf_output_put(handle, data->cpu_entry); | ||
3451 | } | ||
3452 | |||
3453 | static void perf_event__output_id_sample(struct perf_event *event, | ||
3454 | struct perf_output_handle *handle, | ||
3455 | struct perf_sample_data *sample) | ||
3456 | { | ||
3457 | if (event->attr.sample_id_all) | ||
3458 | __perf_event__output_id_sample(handle, sample); | ||
3459 | } | ||
3460 | |||
3308 | int perf_output_begin(struct perf_output_handle *handle, | 3461 | int perf_output_begin(struct perf_output_handle *handle, |
3309 | struct perf_event *event, unsigned int size, | 3462 | struct perf_event *event, unsigned int size, |
3310 | int nmi, int sample) | 3463 | int nmi, int sample) |
@@ -3312,6 +3465,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3312 | struct perf_buffer *buffer; | 3465 | struct perf_buffer *buffer; |
3313 | unsigned long tail, offset, head; | 3466 | unsigned long tail, offset, head; |
3314 | int have_lost; | 3467 | int have_lost; |
3468 | struct perf_sample_data sample_data; | ||
3315 | struct { | 3469 | struct { |
3316 | struct perf_event_header header; | 3470 | struct perf_event_header header; |
3317 | u64 id; | 3471 | u64 id; |
@@ -3338,8 +3492,12 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3338 | goto out; | 3492 | goto out; |
3339 | 3493 | ||
3340 | have_lost = local_read(&buffer->lost); | 3494 | have_lost = local_read(&buffer->lost); |
3341 | if (have_lost) | 3495 | if (have_lost) { |
3342 | size += sizeof(lost_event); | 3496 | lost_event.header.size = sizeof(lost_event); |
3497 | perf_event_header__init_id(&lost_event.header, &sample_data, | ||
3498 | event); | ||
3499 | size += lost_event.header.size; | ||
3500 | } | ||
3343 | 3501 | ||
3344 | perf_output_get_handle(handle); | 3502 | perf_output_get_handle(handle); |
3345 | 3503 | ||
@@ -3370,11 +3528,11 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3370 | if (have_lost) { | 3528 | if (have_lost) { |
3371 | lost_event.header.type = PERF_RECORD_LOST; | 3529 | lost_event.header.type = PERF_RECORD_LOST; |
3372 | lost_event.header.misc = 0; | 3530 | lost_event.header.misc = 0; |
3373 | lost_event.header.size = sizeof(lost_event); | ||
3374 | lost_event.id = event->id; | 3531 | lost_event.id = event->id; |
3375 | lost_event.lost = local_xchg(&buffer->lost, 0); | 3532 | lost_event.lost = local_xchg(&buffer->lost, 0); |
3376 | 3533 | ||
3377 | perf_output_put(handle, lost_event); | 3534 | perf_output_put(handle, lost_event); |
3535 | perf_event__output_id_sample(event, handle, &sample_data); | ||
3378 | } | 3536 | } |
3379 | 3537 | ||
3380 | return 0; | 3538 | return 0; |
@@ -3407,28 +3565,6 @@ void perf_output_end(struct perf_output_handle *handle) | |||
3407 | rcu_read_unlock(); | 3565 | rcu_read_unlock(); |
3408 | } | 3566 | } |
3409 | 3567 | ||
3410 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
3411 | { | ||
3412 | /* | ||
3413 | * only top level events have the pid namespace they were created in | ||
3414 | */ | ||
3415 | if (event->parent) | ||
3416 | event = event->parent; | ||
3417 | |||
3418 | return task_tgid_nr_ns(p, event->ns); | ||
3419 | } | ||
3420 | |||
3421 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
3422 | { | ||
3423 | /* | ||
3424 | * only top level events have the pid namespace they were created in | ||
3425 | */ | ||
3426 | if (event->parent) | ||
3427 | event = event->parent; | ||
3428 | |||
3429 | return task_pid_nr_ns(p, event->ns); | ||
3430 | } | ||
3431 | |||
3432 | static void perf_output_read_one(struct perf_output_handle *handle, | 3568 | static void perf_output_read_one(struct perf_output_handle *handle, |
3433 | struct perf_event *event, | 3569 | struct perf_event *event, |
3434 | u64 enabled, u64 running) | 3570 | u64 enabled, u64 running) |
@@ -3603,61 +3739,16 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
3603 | { | 3739 | { |
3604 | u64 sample_type = event->attr.sample_type; | 3740 | u64 sample_type = event->attr.sample_type; |
3605 | 3741 | ||
3606 | data->type = sample_type; | ||
3607 | |||
3608 | header->type = PERF_RECORD_SAMPLE; | 3742 | header->type = PERF_RECORD_SAMPLE; |
3609 | header->size = sizeof(*header); | 3743 | header->size = sizeof(*header) + event->header_size; |
3610 | 3744 | ||
3611 | header->misc = 0; | 3745 | header->misc = 0; |
3612 | header->misc |= perf_misc_flags(regs); | 3746 | header->misc |= perf_misc_flags(regs); |
3613 | 3747 | ||
3614 | if (sample_type & PERF_SAMPLE_IP) { | 3748 | __perf_event_header__init_id(header, data, event); |
3615 | data->ip = perf_instruction_pointer(regs); | ||
3616 | |||
3617 | header->size += sizeof(data->ip); | ||
3618 | } | ||
3619 | |||
3620 | if (sample_type & PERF_SAMPLE_TID) { | ||
3621 | /* namespace issues */ | ||
3622 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3623 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3624 | |||
3625 | header->size += sizeof(data->tid_entry); | ||
3626 | } | ||
3627 | |||
3628 | if (sample_type & PERF_SAMPLE_TIME) { | ||
3629 | data->time = perf_clock(); | ||
3630 | |||
3631 | header->size += sizeof(data->time); | ||
3632 | } | ||
3633 | |||
3634 | if (sample_type & PERF_SAMPLE_ADDR) | ||
3635 | header->size += sizeof(data->addr); | ||
3636 | |||
3637 | if (sample_type & PERF_SAMPLE_ID) { | ||
3638 | data->id = primary_event_id(event); | ||
3639 | |||
3640 | header->size += sizeof(data->id); | ||
3641 | } | ||
3642 | |||
3643 | if (sample_type & PERF_SAMPLE_STREAM_ID) { | ||
3644 | data->stream_id = event->id; | ||
3645 | |||
3646 | header->size += sizeof(data->stream_id); | ||
3647 | } | ||
3648 | |||
3649 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3650 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3651 | data->cpu_entry.reserved = 0; | ||
3652 | |||
3653 | header->size += sizeof(data->cpu_entry); | ||
3654 | } | ||
3655 | |||
3656 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
3657 | header->size += sizeof(data->period); | ||
3658 | 3749 | ||
3659 | if (sample_type & PERF_SAMPLE_READ) | 3750 | if (sample_type & PERF_SAMPLE_IP) |
3660 | header->size += perf_event_read_size(event); | 3751 | data->ip = perf_instruction_pointer(regs); |
3661 | 3752 | ||
3662 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 3753 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
3663 | int size = 1; | 3754 | int size = 1; |
@@ -3722,23 +3813,26 @@ perf_event_read_event(struct perf_event *event, | |||
3722 | struct task_struct *task) | 3813 | struct task_struct *task) |
3723 | { | 3814 | { |
3724 | struct perf_output_handle handle; | 3815 | struct perf_output_handle handle; |
3816 | struct perf_sample_data sample; | ||
3725 | struct perf_read_event read_event = { | 3817 | struct perf_read_event read_event = { |
3726 | .header = { | 3818 | .header = { |
3727 | .type = PERF_RECORD_READ, | 3819 | .type = PERF_RECORD_READ, |
3728 | .misc = 0, | 3820 | .misc = 0, |
3729 | .size = sizeof(read_event) + perf_event_read_size(event), | 3821 | .size = sizeof(read_event) + event->read_size, |
3730 | }, | 3822 | }, |
3731 | .pid = perf_event_pid(event, task), | 3823 | .pid = perf_event_pid(event, task), |
3732 | .tid = perf_event_tid(event, task), | 3824 | .tid = perf_event_tid(event, task), |
3733 | }; | 3825 | }; |
3734 | int ret; | 3826 | int ret; |
3735 | 3827 | ||
3828 | perf_event_header__init_id(&read_event.header, &sample, event); | ||
3736 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | 3829 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); |
3737 | if (ret) | 3830 | if (ret) |
3738 | return; | 3831 | return; |
3739 | 3832 | ||
3740 | perf_output_put(&handle, read_event); | 3833 | perf_output_put(&handle, read_event); |
3741 | perf_output_read(&handle, event); | 3834 | perf_output_read(&handle, event); |
3835 | perf_event__output_id_sample(event, &handle, &sample); | ||
3742 | 3836 | ||
3743 | perf_output_end(&handle); | 3837 | perf_output_end(&handle); |
3744 | } | 3838 | } |
@@ -3768,14 +3862,16 @@ static void perf_event_task_output(struct perf_event *event, | |||
3768 | struct perf_task_event *task_event) | 3862 | struct perf_task_event *task_event) |
3769 | { | 3863 | { |
3770 | struct perf_output_handle handle; | 3864 | struct perf_output_handle handle; |
3865 | struct perf_sample_data sample; | ||
3771 | struct task_struct *task = task_event->task; | 3866 | struct task_struct *task = task_event->task; |
3772 | int size, ret; | 3867 | int ret, size = task_event->event_id.header.size; |
3773 | 3868 | ||
3774 | size = task_event->event_id.header.size; | 3869 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
3775 | ret = perf_output_begin(&handle, event, size, 0, 0); | ||
3776 | 3870 | ||
3871 | ret = perf_output_begin(&handle, event, | ||
3872 | task_event->event_id.header.size, 0, 0); | ||
3777 | if (ret) | 3873 | if (ret) |
3778 | return; | 3874 | goto out; |
3779 | 3875 | ||
3780 | task_event->event_id.pid = perf_event_pid(event, task); | 3876 | task_event->event_id.pid = perf_event_pid(event, task); |
3781 | task_event->event_id.ppid = perf_event_pid(event, current); | 3877 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3785,7 +3881,11 @@ static void perf_event_task_output(struct perf_event *event, | |||
3785 | 3881 | ||
3786 | perf_output_put(&handle, task_event->event_id); | 3882 | perf_output_put(&handle, task_event->event_id); |
3787 | 3883 | ||
3884 | perf_event__output_id_sample(event, &handle, &sample); | ||
3885 | |||
3788 | perf_output_end(&handle); | 3886 | perf_output_end(&handle); |
3887 | out: | ||
3888 | task_event->event_id.header.size = size; | ||
3789 | } | 3889 | } |
3790 | 3890 | ||
3791 | static int perf_event_task_match(struct perf_event *event) | 3891 | static int perf_event_task_match(struct perf_event *event) |
@@ -3900,11 +4000,16 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3900 | struct perf_comm_event *comm_event) | 4000 | struct perf_comm_event *comm_event) |
3901 | { | 4001 | { |
3902 | struct perf_output_handle handle; | 4002 | struct perf_output_handle handle; |
4003 | struct perf_sample_data sample; | ||
3903 | int size = comm_event->event_id.header.size; | 4004 | int size = comm_event->event_id.header.size; |
3904 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 4005 | int ret; |
4006 | |||
4007 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); | ||
4008 | ret = perf_output_begin(&handle, event, | ||
4009 | comm_event->event_id.header.size, 0, 0); | ||
3905 | 4010 | ||
3906 | if (ret) | 4011 | if (ret) |
3907 | return; | 4012 | goto out; |
3908 | 4013 | ||
3909 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); | 4014 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); |
3910 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | 4015 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
@@ -3912,7 +4017,12 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3912 | perf_output_put(&handle, comm_event->event_id); | 4017 | perf_output_put(&handle, comm_event->event_id); |
3913 | perf_output_copy(&handle, comm_event->comm, | 4018 | perf_output_copy(&handle, comm_event->comm, |
3914 | comm_event->comm_size); | 4019 | comm_event->comm_size); |
4020 | |||
4021 | perf_event__output_id_sample(event, &handle, &sample); | ||
4022 | |||
3915 | perf_output_end(&handle); | 4023 | perf_output_end(&handle); |
4024 | out: | ||
4025 | comm_event->event_id.header.size = size; | ||
3916 | } | 4026 | } |
3917 | 4027 | ||
3918 | static int perf_event_comm_match(struct perf_event *event) | 4028 | static int perf_event_comm_match(struct perf_event *event) |
@@ -3957,7 +4067,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
3957 | comm_event->comm_size = size; | 4067 | comm_event->comm_size = size; |
3958 | 4068 | ||
3959 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 4069 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
3960 | |||
3961 | rcu_read_lock(); | 4070 | rcu_read_lock(); |
3962 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4071 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
3963 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4072 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
@@ -4038,11 +4147,15 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4038 | struct perf_mmap_event *mmap_event) | 4147 | struct perf_mmap_event *mmap_event) |
4039 | { | 4148 | { |
4040 | struct perf_output_handle handle; | 4149 | struct perf_output_handle handle; |
4150 | struct perf_sample_data sample; | ||
4041 | int size = mmap_event->event_id.header.size; | 4151 | int size = mmap_event->event_id.header.size; |
4042 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 4152 | int ret; |
4043 | 4153 | ||
4154 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); | ||
4155 | ret = perf_output_begin(&handle, event, | ||
4156 | mmap_event->event_id.header.size, 0, 0); | ||
4044 | if (ret) | 4157 | if (ret) |
4045 | return; | 4158 | goto out; |
4046 | 4159 | ||
4047 | mmap_event->event_id.pid = perf_event_pid(event, current); | 4160 | mmap_event->event_id.pid = perf_event_pid(event, current); |
4048 | mmap_event->event_id.tid = perf_event_tid(event, current); | 4161 | mmap_event->event_id.tid = perf_event_tid(event, current); |
@@ -4050,7 +4163,12 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4050 | perf_output_put(&handle, mmap_event->event_id); | 4163 | perf_output_put(&handle, mmap_event->event_id); |
4051 | perf_output_copy(&handle, mmap_event->file_name, | 4164 | perf_output_copy(&handle, mmap_event->file_name, |
4052 | mmap_event->file_size); | 4165 | mmap_event->file_size); |
4166 | |||
4167 | perf_event__output_id_sample(event, &handle, &sample); | ||
4168 | |||
4053 | perf_output_end(&handle); | 4169 | perf_output_end(&handle); |
4170 | out: | ||
4171 | mmap_event->event_id.header.size = size; | ||
4054 | } | 4172 | } |
4055 | 4173 | ||
4056 | static int perf_event_mmap_match(struct perf_event *event, | 4174 | static int perf_event_mmap_match(struct perf_event *event, |
@@ -4205,6 +4323,7 @@ void perf_event_mmap(struct vm_area_struct *vma) | |||
4205 | static void perf_log_throttle(struct perf_event *event, int enable) | 4323 | static void perf_log_throttle(struct perf_event *event, int enable) |
4206 | { | 4324 | { |
4207 | struct perf_output_handle handle; | 4325 | struct perf_output_handle handle; |
4326 | struct perf_sample_data sample; | ||
4208 | int ret; | 4327 | int ret; |
4209 | 4328 | ||
4210 | struct { | 4329 | struct { |
@@ -4226,11 +4345,15 @@ static void perf_log_throttle(struct perf_event *event, int enable) | |||
4226 | if (enable) | 4345 | if (enable) |
4227 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; | 4346 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; |
4228 | 4347 | ||
4229 | ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); | 4348 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
4349 | |||
4350 | ret = perf_output_begin(&handle, event, | ||
4351 | throttle_event.header.size, 1, 0); | ||
4230 | if (ret) | 4352 | if (ret) |
4231 | return; | 4353 | return; |
4232 | 4354 | ||
4233 | perf_output_put(&handle, throttle_event); | 4355 | perf_output_put(&handle, throttle_event); |
4356 | perf_event__output_id_sample(event, &handle, &sample); | ||
4234 | perf_output_end(&handle); | 4357 | perf_output_end(&handle); |
4235 | } | 4358 | } |
4236 | 4359 | ||
@@ -4246,6 +4369,13 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
4246 | struct hw_perf_event *hwc = &event->hw; | 4369 | struct hw_perf_event *hwc = &event->hw; |
4247 | int ret = 0; | 4370 | int ret = 0; |
4248 | 4371 | ||
4372 | /* | ||
4373 | * Non-sampling counters might still use the PMI to fold short | ||
4374 | * hardware counters, ignore those. | ||
4375 | */ | ||
4376 | if (unlikely(!is_sampling_event(event))) | ||
4377 | return 0; | ||
4378 | |||
4249 | if (!throttle) { | 4379 | if (!throttle) { |
4250 | hwc->interrupts++; | 4380 | hwc->interrupts++; |
4251 | } else { | 4381 | } else { |
@@ -4391,7 +4521,7 @@ static void perf_swevent_event(struct perf_event *event, u64 nr, | |||
4391 | if (!regs) | 4521 | if (!regs) |
4392 | return; | 4522 | return; |
4393 | 4523 | ||
4394 | if (!hwc->sample_period) | 4524 | if (!is_sampling_event(event)) |
4395 | return; | 4525 | return; |
4396 | 4526 | ||
4397 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4527 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
@@ -4554,7 +4684,7 @@ static int perf_swevent_add(struct perf_event *event, int flags) | |||
4554 | struct hw_perf_event *hwc = &event->hw; | 4684 | struct hw_perf_event *hwc = &event->hw; |
4555 | struct hlist_head *head; | 4685 | struct hlist_head *head; |
4556 | 4686 | ||
4557 | if (hwc->sample_period) { | 4687 | if (is_sampling_event(event)) { |
4558 | hwc->last_period = hwc->sample_period; | 4688 | hwc->last_period = hwc->sample_period; |
4559 | perf_swevent_set_period(event); | 4689 | perf_swevent_set_period(event); |
4560 | } | 4690 | } |
@@ -4811,15 +4941,6 @@ static int perf_tp_event_init(struct perf_event *event) | |||
4811 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | 4941 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
4812 | return -ENOENT; | 4942 | return -ENOENT; |
4813 | 4943 | ||
4814 | /* | ||
4815 | * Raw tracepoint data is a severe data leak, only allow root to | ||
4816 | * have these. | ||
4817 | */ | ||
4818 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && | ||
4819 | perf_paranoid_tracepoint_raw() && | ||
4820 | !capable(CAP_SYS_ADMIN)) | ||
4821 | return -EPERM; | ||
4822 | |||
4823 | err = perf_trace_init(event); | 4944 | err = perf_trace_init(event); |
4824 | if (err) | 4945 | if (err) |
4825 | return err; | 4946 | return err; |
@@ -4842,7 +4963,7 @@ static struct pmu perf_tracepoint = { | |||
4842 | 4963 | ||
4843 | static inline void perf_tp_register(void) | 4964 | static inline void perf_tp_register(void) |
4844 | { | 4965 | { |
4845 | perf_pmu_register(&perf_tracepoint); | 4966 | perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); |
4846 | } | 4967 | } |
4847 | 4968 | ||
4848 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 4969 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
@@ -4932,31 +5053,33 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
4932 | static void perf_swevent_start_hrtimer(struct perf_event *event) | 5053 | static void perf_swevent_start_hrtimer(struct perf_event *event) |
4933 | { | 5054 | { |
4934 | struct hw_perf_event *hwc = &event->hw; | 5055 | struct hw_perf_event *hwc = &event->hw; |
5056 | s64 period; | ||
5057 | |||
5058 | if (!is_sampling_event(event)) | ||
5059 | return; | ||
4935 | 5060 | ||
4936 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 5061 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
4937 | hwc->hrtimer.function = perf_swevent_hrtimer; | 5062 | hwc->hrtimer.function = perf_swevent_hrtimer; |
4938 | if (hwc->sample_period) { | ||
4939 | s64 period = local64_read(&hwc->period_left); | ||
4940 | 5063 | ||
4941 | if (period) { | 5064 | period = local64_read(&hwc->period_left); |
4942 | if (period < 0) | 5065 | if (period) { |
4943 | period = 10000; | 5066 | if (period < 0) |
5067 | period = 10000; | ||
4944 | 5068 | ||
4945 | local64_set(&hwc->period_left, 0); | 5069 | local64_set(&hwc->period_left, 0); |
4946 | } else { | 5070 | } else { |
4947 | period = max_t(u64, 10000, hwc->sample_period); | 5071 | period = max_t(u64, 10000, hwc->sample_period); |
4948 | } | 5072 | } |
4949 | __hrtimer_start_range_ns(&hwc->hrtimer, | 5073 | __hrtimer_start_range_ns(&hwc->hrtimer, |
4950 | ns_to_ktime(period), 0, | 5074 | ns_to_ktime(period), 0, |
4951 | HRTIMER_MODE_REL_PINNED, 0); | 5075 | HRTIMER_MODE_REL_PINNED, 0); |
4952 | } | ||
4953 | } | 5076 | } |
4954 | 5077 | ||
4955 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | 5078 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) |
4956 | { | 5079 | { |
4957 | struct hw_perf_event *hwc = &event->hw; | 5080 | struct hw_perf_event *hwc = &event->hw; |
4958 | 5081 | ||
4959 | if (hwc->sample_period) { | 5082 | if (is_sampling_event(event)) { |
4960 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | 5083 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); |
4961 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); | 5084 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); |
4962 | 5085 | ||
@@ -5184,8 +5307,61 @@ static void free_pmu_context(struct pmu *pmu) | |||
5184 | out: | 5307 | out: |
5185 | mutex_unlock(&pmus_lock); | 5308 | mutex_unlock(&pmus_lock); |
5186 | } | 5309 | } |
5310 | static struct idr pmu_idr; | ||
5311 | |||
5312 | static ssize_t | ||
5313 | type_show(struct device *dev, struct device_attribute *attr, char *page) | ||
5314 | { | ||
5315 | struct pmu *pmu = dev_get_drvdata(dev); | ||
5316 | |||
5317 | return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); | ||
5318 | } | ||
5319 | |||
5320 | static struct device_attribute pmu_dev_attrs[] = { | ||
5321 | __ATTR_RO(type), | ||
5322 | __ATTR_NULL, | ||
5323 | }; | ||
5324 | |||
5325 | static int pmu_bus_running; | ||
5326 | static struct bus_type pmu_bus = { | ||
5327 | .name = "event_source", | ||
5328 | .dev_attrs = pmu_dev_attrs, | ||
5329 | }; | ||
5330 | |||
5331 | static void pmu_dev_release(struct device *dev) | ||
5332 | { | ||
5333 | kfree(dev); | ||
5334 | } | ||
5335 | |||
5336 | static int pmu_dev_alloc(struct pmu *pmu) | ||
5337 | { | ||
5338 | int ret = -ENOMEM; | ||
5339 | |||
5340 | pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
5341 | if (!pmu->dev) | ||
5342 | goto out; | ||
5343 | |||
5344 | device_initialize(pmu->dev); | ||
5345 | ret = dev_set_name(pmu->dev, "%s", pmu->name); | ||
5346 | if (ret) | ||
5347 | goto free_dev; | ||
5348 | |||
5349 | dev_set_drvdata(pmu->dev, pmu); | ||
5350 | pmu->dev->bus = &pmu_bus; | ||
5351 | pmu->dev->release = pmu_dev_release; | ||
5352 | ret = device_add(pmu->dev); | ||
5353 | if (ret) | ||
5354 | goto free_dev; | ||
5355 | |||
5356 | out: | ||
5357 | return ret; | ||
5358 | |||
5359 | free_dev: | ||
5360 | put_device(pmu->dev); | ||
5361 | goto out; | ||
5362 | } | ||
5187 | 5363 | ||
5188 | int perf_pmu_register(struct pmu *pmu) | 5364 | int perf_pmu_register(struct pmu *pmu, char *name, int type) |
5189 | { | 5365 | { |
5190 | int cpu, ret; | 5366 | int cpu, ret; |
5191 | 5367 | ||
@@ -5195,13 +5371,38 @@ int perf_pmu_register(struct pmu *pmu) | |||
5195 | if (!pmu->pmu_disable_count) | 5371 | if (!pmu->pmu_disable_count) |
5196 | goto unlock; | 5372 | goto unlock; |
5197 | 5373 | ||
5374 | pmu->type = -1; | ||
5375 | if (!name) | ||
5376 | goto skip_type; | ||
5377 | pmu->name = name; | ||
5378 | |||
5379 | if (type < 0) { | ||
5380 | int err = idr_pre_get(&pmu_idr, GFP_KERNEL); | ||
5381 | if (!err) | ||
5382 | goto free_pdc; | ||
5383 | |||
5384 | err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); | ||
5385 | if (err) { | ||
5386 | ret = err; | ||
5387 | goto free_pdc; | ||
5388 | } | ||
5389 | } | ||
5390 | pmu->type = type; | ||
5391 | |||
5392 | if (pmu_bus_running) { | ||
5393 | ret = pmu_dev_alloc(pmu); | ||
5394 | if (ret) | ||
5395 | goto free_idr; | ||
5396 | } | ||
5397 | |||
5398 | skip_type: | ||
5198 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); | 5399 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); |
5199 | if (pmu->pmu_cpu_context) | 5400 | if (pmu->pmu_cpu_context) |
5200 | goto got_cpu_context; | 5401 | goto got_cpu_context; |
5201 | 5402 | ||
5202 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); | 5403 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); |
5203 | if (!pmu->pmu_cpu_context) | 5404 | if (!pmu->pmu_cpu_context) |
5204 | goto free_pdc; | 5405 | goto free_dev; |
5205 | 5406 | ||
5206 | for_each_possible_cpu(cpu) { | 5407 | for_each_possible_cpu(cpu) { |
5207 | struct perf_cpu_context *cpuctx; | 5408 | struct perf_cpu_context *cpuctx; |
@@ -5245,6 +5446,14 @@ unlock: | |||
5245 | 5446 | ||
5246 | return ret; | 5447 | return ret; |
5247 | 5448 | ||
5449 | free_dev: | ||
5450 | device_del(pmu->dev); | ||
5451 | put_device(pmu->dev); | ||
5452 | |||
5453 | free_idr: | ||
5454 | if (pmu->type >= PERF_TYPE_MAX) | ||
5455 | idr_remove(&pmu_idr, pmu->type); | ||
5456 | |||
5248 | free_pdc: | 5457 | free_pdc: |
5249 | free_percpu(pmu->pmu_disable_count); | 5458 | free_percpu(pmu->pmu_disable_count); |
5250 | goto unlock; | 5459 | goto unlock; |
@@ -5264,6 +5473,10 @@ void perf_pmu_unregister(struct pmu *pmu) | |||
5264 | synchronize_rcu(); | 5473 | synchronize_rcu(); |
5265 | 5474 | ||
5266 | free_percpu(pmu->pmu_disable_count); | 5475 | free_percpu(pmu->pmu_disable_count); |
5476 | if (pmu->type >= PERF_TYPE_MAX) | ||
5477 | idr_remove(&pmu_idr, pmu->type); | ||
5478 | device_del(pmu->dev); | ||
5479 | put_device(pmu->dev); | ||
5267 | free_pmu_context(pmu); | 5480 | free_pmu_context(pmu); |
5268 | } | 5481 | } |
5269 | 5482 | ||
@@ -5273,6 +5486,13 @@ struct pmu *perf_init_event(struct perf_event *event) | |||
5273 | int idx; | 5486 | int idx; |
5274 | 5487 | ||
5275 | idx = srcu_read_lock(&pmus_srcu); | 5488 | idx = srcu_read_lock(&pmus_srcu); |
5489 | |||
5490 | rcu_read_lock(); | ||
5491 | pmu = idr_find(&pmu_idr, event->attr.type); | ||
5492 | rcu_read_unlock(); | ||
5493 | if (pmu) | ||
5494 | goto unlock; | ||
5495 | |||
5276 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 5496 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
5277 | int ret = pmu->event_init(event); | 5497 | int ret = pmu->event_init(event); |
5278 | if (!ret) | 5498 | if (!ret) |
@@ -5738,6 +5958,12 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5738 | mutex_unlock(¤t->perf_event_mutex); | 5958 | mutex_unlock(¤t->perf_event_mutex); |
5739 | 5959 | ||
5740 | /* | 5960 | /* |
5961 | * Precalculate sample_data sizes | ||
5962 | */ | ||
5963 | perf_event__header_size(event); | ||
5964 | perf_event__id_header_size(event); | ||
5965 | |||
5966 | /* | ||
5741 | * Drop the reference on the group_event after placing the | 5967 | * Drop the reference on the group_event after placing the |
5742 | * new event on the sibling_list. This ensures destruction | 5968 | * new event on the sibling_list. This ensures destruction |
5743 | * of the group leader will find the pointer to itself in | 5969 | * of the group leader will find the pointer to itself in |
@@ -6090,6 +6316,12 @@ inherit_event(struct perf_event *parent_event, | |||
6090 | child_event->overflow_handler = parent_event->overflow_handler; | 6316 | child_event->overflow_handler = parent_event->overflow_handler; |
6091 | 6317 | ||
6092 | /* | 6318 | /* |
6319 | * Precalculate sample_data sizes | ||
6320 | */ | ||
6321 | perf_event__header_size(child_event); | ||
6322 | perf_event__id_header_size(child_event); | ||
6323 | |||
6324 | /* | ||
6093 | * Link it up in the child's context: | 6325 | * Link it up in the child's context: |
6094 | */ | 6326 | */ |
6095 | raw_spin_lock_irqsave(&child_ctx->lock, flags); | 6327 | raw_spin_lock_irqsave(&child_ctx->lock, flags); |
@@ -6320,7 +6552,7 @@ static void __cpuinit perf_event_init_cpu(int cpu) | |||
6320 | mutex_unlock(&swhash->hlist_mutex); | 6552 | mutex_unlock(&swhash->hlist_mutex); |
6321 | } | 6553 | } |
6322 | 6554 | ||
6323 | #ifdef CONFIG_HOTPLUG_CPU | 6555 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC |
6324 | static void perf_pmu_rotate_stop(struct pmu *pmu) | 6556 | static void perf_pmu_rotate_stop(struct pmu *pmu) |
6325 | { | 6557 | { |
6326 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 6558 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
@@ -6374,6 +6606,26 @@ static void perf_event_exit_cpu(int cpu) | |||
6374 | static inline void perf_event_exit_cpu(int cpu) { } | 6606 | static inline void perf_event_exit_cpu(int cpu) { } |
6375 | #endif | 6607 | #endif |
6376 | 6608 | ||
6609 | static int | ||
6610 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) | ||
6611 | { | ||
6612 | int cpu; | ||
6613 | |||
6614 | for_each_online_cpu(cpu) | ||
6615 | perf_event_exit_cpu(cpu); | ||
6616 | |||
6617 | return NOTIFY_OK; | ||
6618 | } | ||
6619 | |||
6620 | /* | ||
6621 | * Run the perf reboot notifier at the very last possible moment so that | ||
6622 | * the generic watchdog code runs as long as possible. | ||
6623 | */ | ||
6624 | static struct notifier_block perf_reboot_notifier = { | ||
6625 | .notifier_call = perf_reboot, | ||
6626 | .priority = INT_MIN, | ||
6627 | }; | ||
6628 | |||
6377 | static int __cpuinit | 6629 | static int __cpuinit |
6378 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 6630 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
6379 | { | 6631 | { |
@@ -6402,14 +6654,45 @@ void __init perf_event_init(void) | |||
6402 | { | 6654 | { |
6403 | int ret; | 6655 | int ret; |
6404 | 6656 | ||
6657 | idr_init(&pmu_idr); | ||
6658 | |||
6405 | perf_event_init_all_cpus(); | 6659 | perf_event_init_all_cpus(); |
6406 | init_srcu_struct(&pmus_srcu); | 6660 | init_srcu_struct(&pmus_srcu); |
6407 | perf_pmu_register(&perf_swevent); | 6661 | perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); |
6408 | perf_pmu_register(&perf_cpu_clock); | 6662 | perf_pmu_register(&perf_cpu_clock, NULL, -1); |
6409 | perf_pmu_register(&perf_task_clock); | 6663 | perf_pmu_register(&perf_task_clock, NULL, -1); |
6410 | perf_tp_register(); | 6664 | perf_tp_register(); |
6411 | perf_cpu_notifier(perf_cpu_notify); | 6665 | perf_cpu_notifier(perf_cpu_notify); |
6666 | register_reboot_notifier(&perf_reboot_notifier); | ||
6412 | 6667 | ||
6413 | ret = init_hw_breakpoint(); | 6668 | ret = init_hw_breakpoint(); |
6414 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | 6669 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); |
6415 | } | 6670 | } |
6671 | |||
6672 | static int __init perf_event_sysfs_init(void) | ||
6673 | { | ||
6674 | struct pmu *pmu; | ||
6675 | int ret; | ||
6676 | |||
6677 | mutex_lock(&pmus_lock); | ||
6678 | |||
6679 | ret = bus_register(&pmu_bus); | ||
6680 | if (ret) | ||
6681 | goto unlock; | ||
6682 | |||
6683 | list_for_each_entry(pmu, &pmus, entry) { | ||
6684 | if (!pmu->name || pmu->type < 0) | ||
6685 | continue; | ||
6686 | |||
6687 | ret = pmu_dev_alloc(pmu); | ||
6688 | WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); | ||
6689 | } | ||
6690 | pmu_bus_running = 1; | ||
6691 | ret = 0; | ||
6692 | |||
6693 | unlock: | ||
6694 | mutex_unlock(&pmus_lock); | ||
6695 | |||
6696 | return ret; | ||
6697 | } | ||
6698 | device_initcall(perf_event_sysfs_init); | ||