diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-11 13:26:50 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-11 13:26:50 -0400 |
commit | ad0f7cfaa85fc033523a09ab1f3dd6b8ded3dff5 (patch) | |
tree | 2565121e4b9945d953e02c77a2e53065b3789aa4 /kernel/perf_event.c | |
parent | 86c8c04792f152c5469023885510140dd34817bc (diff) | |
parent | c7a27aa4652c63172489a73f3961455650a79a7f (diff) |
Merge branch 'oprofile/urgent' (early part) into oprofile/perf
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 32 |
1 files changed, 25 insertions, 7 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e2534691db0d..fc512684423f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -407,11 +407,31 @@ static void perf_group_detach(struct perf_event *event) | |||
407 | } | 407 | } |
408 | } | 408 | } |
409 | 409 | ||
410 | static inline int | ||
411 | event_filter_match(struct perf_event *event) | ||
412 | { | ||
413 | return event->cpu == -1 || event->cpu == smp_processor_id(); | ||
414 | } | ||
415 | |||
410 | static void | 416 | static void |
411 | event_sched_out(struct perf_event *event, | 417 | event_sched_out(struct perf_event *event, |
412 | struct perf_cpu_context *cpuctx, | 418 | struct perf_cpu_context *cpuctx, |
413 | struct perf_event_context *ctx) | 419 | struct perf_event_context *ctx) |
414 | { | 420 | { |
421 | u64 delta; | ||
422 | /* | ||
423 | * An event which could not be activated because of | ||
424 | * filter mismatch still needs to have its timings | ||
425 | * maintained, otherwise bogus information is return | ||
426 | * via read() for time_enabled, time_running: | ||
427 | */ | ||
428 | if (event->state == PERF_EVENT_STATE_INACTIVE | ||
429 | && !event_filter_match(event)) { | ||
430 | delta = ctx->time - event->tstamp_stopped; | ||
431 | event->tstamp_running += delta; | ||
432 | event->tstamp_stopped = ctx->time; | ||
433 | } | ||
434 | |||
415 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 435 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
416 | return; | 436 | return; |
417 | 437 | ||
@@ -437,9 +457,7 @@ group_sched_out(struct perf_event *group_event, | |||
437 | struct perf_event_context *ctx) | 457 | struct perf_event_context *ctx) |
438 | { | 458 | { |
439 | struct perf_event *event; | 459 | struct perf_event *event; |
440 | 460 | int state = group_event->state; | |
441 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
442 | return; | ||
443 | 461 | ||
444 | event_sched_out(group_event, cpuctx, ctx); | 462 | event_sched_out(group_event, cpuctx, ctx); |
445 | 463 | ||
@@ -449,7 +467,7 @@ group_sched_out(struct perf_event *group_event, | |||
449 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 467 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
450 | event_sched_out(event, cpuctx, ctx); | 468 | event_sched_out(event, cpuctx, ctx); |
451 | 469 | ||
452 | if (group_event->attr.exclusive) | 470 | if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) |
453 | cpuctx->exclusive = 0; | 471 | cpuctx->exclusive = 0; |
454 | } | 472 | } |
455 | 473 | ||
@@ -5748,15 +5766,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
5748 | { | 5766 | { |
5749 | unsigned int cpu = (long)hcpu; | 5767 | unsigned int cpu = (long)hcpu; |
5750 | 5768 | ||
5751 | switch (action) { | 5769 | switch (action & ~CPU_TASKS_FROZEN) { |
5752 | 5770 | ||
5753 | case CPU_UP_PREPARE: | 5771 | case CPU_UP_PREPARE: |
5754 | case CPU_UP_PREPARE_FROZEN: | 5772 | case CPU_DOWN_FAILED: |
5755 | perf_event_init_cpu(cpu); | 5773 | perf_event_init_cpu(cpu); |
5756 | break; | 5774 | break; |
5757 | 5775 | ||
5776 | case CPU_UP_CANCELED: | ||
5758 | case CPU_DOWN_PREPARE: | 5777 | case CPU_DOWN_PREPARE: |
5759 | case CPU_DOWN_PREPARE_FROZEN: | ||
5760 | perf_event_exit_cpu(cpu); | 5778 | perf_event_exit_cpu(cpu); |
5761 | break; | 5779 | break; |
5762 | 5780 | ||