diff options
Diffstat (limited to 'kernel/perf_event.c')
| -rw-r--r-- | kernel/perf_event.c | 32 |
1 files changed, 25 insertions, 7 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b198..db5b56064687 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event) | |||
| 402 | } | 402 | } |
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | static inline int | ||
| 406 | event_filter_match(struct perf_event *event) | ||
| 407 | { | ||
| 408 | return event->cpu == -1 || event->cpu == smp_processor_id(); | ||
| 409 | } | ||
| 410 | |||
| 405 | static void | 411 | static void |
| 406 | event_sched_out(struct perf_event *event, | 412 | event_sched_out(struct perf_event *event, |
| 407 | struct perf_cpu_context *cpuctx, | 413 | struct perf_cpu_context *cpuctx, |
| 408 | struct perf_event_context *ctx) | 414 | struct perf_event_context *ctx) |
| 409 | { | 415 | { |
| 416 | u64 delta; | ||
| 417 | /* | ||
| 418 | * An event which could not be activated because of | ||
| 419 | * filter mismatch still needs to have its timings | ||
| 420 | * maintained, otherwise bogus information is return | ||
| 421 | * via read() for time_enabled, time_running: | ||
| 422 | */ | ||
| 423 | if (event->state == PERF_EVENT_STATE_INACTIVE | ||
| 424 | && !event_filter_match(event)) { | ||
| 425 | delta = ctx->time - event->tstamp_stopped; | ||
| 426 | event->tstamp_running += delta; | ||
| 427 | event->tstamp_stopped = ctx->time; | ||
| 428 | } | ||
| 429 | |||
| 410 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 430 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 411 | return; | 431 | return; |
| 412 | 432 | ||
| @@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event, | |||
| 432 | struct perf_event_context *ctx) | 452 | struct perf_event_context *ctx) |
| 433 | { | 453 | { |
| 434 | struct perf_event *event; | 454 | struct perf_event *event; |
| 435 | 455 | int state = group_event->state; | |
| 436 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
| 437 | return; | ||
| 438 | 456 | ||
| 439 | event_sched_out(group_event, cpuctx, ctx); | 457 | event_sched_out(group_event, cpuctx, ctx); |
| 440 | 458 | ||
| @@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event, | |||
| 444 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 462 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
| 445 | event_sched_out(event, cpuctx, ctx); | 463 | event_sched_out(event, cpuctx, ctx); |
| 446 | 464 | ||
| 447 | if (group_event->attr.exclusive) | 465 | if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) |
| 448 | cpuctx->exclusive = 0; | 466 | cpuctx->exclusive = 0; |
| 449 | } | 467 | } |
| 450 | 468 | ||
| @@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
| 5743 | { | 5761 | { |
| 5744 | unsigned int cpu = (long)hcpu; | 5762 | unsigned int cpu = (long)hcpu; |
| 5745 | 5763 | ||
| 5746 | switch (action) { | 5764 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5747 | 5765 | ||
| 5748 | case CPU_UP_PREPARE: | 5766 | case CPU_UP_PREPARE: |
| 5749 | case CPU_UP_PREPARE_FROZEN: | 5767 | case CPU_DOWN_FAILED: |
| 5750 | perf_event_init_cpu(cpu); | 5768 | perf_event_init_cpu(cpu); |
| 5751 | break; | 5769 | break; |
| 5752 | 5770 | ||
| 5771 | case CPU_UP_CANCELED: | ||
| 5753 | case CPU_DOWN_PREPARE: | 5772 | case CPU_DOWN_PREPARE: |
| 5754 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 5755 | perf_event_exit_cpu(cpu); | 5773 | perf_event_exit_cpu(cpu); |
| 5756 | break; | 5774 | break; |
| 5757 | 5775 | ||
