diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-17 08:10:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-23 06:45:14 -0500 |
commit | aa9c4c0f967fdb482ea95e8473ec3d201e6e0781 (patch) | |
tree | 8223d34630b7d3130825e8a2197e9bb51c34b7fa /kernel/perf_counter.c | |
parent | 7671581f1666ef4b54a1c1e598c51ac44c060a9b (diff) |
perfcounters: fix task clock counter
Impact: fix per task clock counter precision
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 70 |
1 files changed, 54 insertions, 16 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 961d651aa574..f1110ac1267b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/syscalls.h> | 19 | #include <linux/syscalls.h> |
20 | #include <linux/anon_inodes.h> | 20 | #include <linux/anon_inodes.h> |
21 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/perf_counter.h> | 22 | #include <linux/perf_counter.h> |
22 | 23 | ||
23 | /* | 24 | /* |
@@ -106,7 +107,8 @@ static void __perf_counter_remove_from_context(void *info) | |||
106 | if (ctx->task && cpuctx->task_ctx != ctx) | 107 | if (ctx->task && cpuctx->task_ctx != ctx) |
107 | return; | 108 | return; |
108 | 109 | ||
109 | spin_lock_irqsave(&ctx->lock, flags); | 110 | curr_rq_lock_irq_save(&flags); |
111 | spin_lock(&ctx->lock); | ||
110 | 112 | ||
111 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | 113 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { |
112 | counter->hw_ops->disable(counter); | 114 | counter->hw_ops->disable(counter); |
@@ -135,7 +137,8 @@ static void __perf_counter_remove_from_context(void *info) | |||
135 | perf_max_counters - perf_reserved_percpu); | 137 | perf_max_counters - perf_reserved_percpu); |
136 | } | 138 | } |
137 | 139 | ||
138 | spin_unlock_irqrestore(&ctx->lock, flags); | 140 | spin_unlock(&ctx->lock); |
141 | curr_rq_unlock_irq_restore(&flags); | ||
139 | } | 142 | } |
140 | 143 | ||
141 | 144 | ||
@@ -209,7 +212,8 @@ static void __perf_install_in_context(void *info) | |||
209 | if (ctx->task && cpuctx->task_ctx != ctx) | 212 | if (ctx->task && cpuctx->task_ctx != ctx) |
210 | return; | 213 | return; |
211 | 214 | ||
212 | spin_lock_irqsave(&ctx->lock, flags); | 215 | curr_rq_lock_irq_save(&flags); |
216 | spin_lock(&ctx->lock); | ||
213 | 217 | ||
214 | /* | 218 | /* |
215 | * Protect the list operation against NMI by disabling the | 219 | * Protect the list operation against NMI by disabling the |
@@ -232,7 +236,8 @@ static void __perf_install_in_context(void *info) | |||
232 | if (!ctx->task && cpuctx->max_pertask) | 236 | if (!ctx->task && cpuctx->max_pertask) |
233 | cpuctx->max_pertask--; | 237 | cpuctx->max_pertask--; |
234 | 238 | ||
235 | spin_unlock_irqrestore(&ctx->lock, flags); | 239 | spin_unlock(&ctx->lock); |
240 | curr_rq_unlock_irq_restore(&flags); | ||
236 | } | 241 | } |
237 | 242 | ||
238 | /* | 243 | /* |
@@ -438,15 +443,19 @@ int perf_counter_task_disable(void) | |||
438 | struct task_struct *curr = current; | 443 | struct task_struct *curr = current; |
439 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | 444 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; |
440 | struct perf_counter *counter; | 445 | struct perf_counter *counter; |
446 | unsigned long flags; | ||
441 | u64 perf_flags; | 447 | u64 perf_flags; |
442 | int cpu; | 448 | int cpu; |
443 | 449 | ||
444 | if (likely(!ctx->nr_counters)) | 450 | if (likely(!ctx->nr_counters)) |
445 | return 0; | 451 | return 0; |
446 | 452 | ||
447 | local_irq_disable(); | 453 | curr_rq_lock_irq_save(&flags); |
448 | cpu = smp_processor_id(); | 454 | cpu = smp_processor_id(); |
449 | 455 | ||
456 | /* force the update of the task clock: */ | ||
457 | __task_delta_exec(curr, 1); | ||
458 | |||
450 | perf_counter_task_sched_out(curr, cpu); | 459 | perf_counter_task_sched_out(curr, cpu); |
451 | 460 | ||
452 | spin_lock(&ctx->lock); | 461 | spin_lock(&ctx->lock); |
@@ -463,7 +472,7 @@ int perf_counter_task_disable(void) | |||
463 | 472 | ||
464 | spin_unlock(&ctx->lock); | 473 | spin_unlock(&ctx->lock); |
465 | 474 | ||
466 | local_irq_enable(); | 475 | curr_rq_unlock_irq_restore(&flags); |
467 | 476 | ||
468 | return 0; | 477 | return 0; |
469 | } | 478 | } |
@@ -473,15 +482,19 @@ int perf_counter_task_enable(void) | |||
473 | struct task_struct *curr = current; | 482 | struct task_struct *curr = current; |
474 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | 483 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; |
475 | struct perf_counter *counter; | 484 | struct perf_counter *counter; |
485 | unsigned long flags; | ||
476 | u64 perf_flags; | 486 | u64 perf_flags; |
477 | int cpu; | 487 | int cpu; |
478 | 488 | ||
479 | if (likely(!ctx->nr_counters)) | 489 | if (likely(!ctx->nr_counters)) |
480 | return 0; | 490 | return 0; |
481 | 491 | ||
482 | local_irq_disable(); | 492 | curr_rq_lock_irq_save(&flags); |
483 | cpu = smp_processor_id(); | 493 | cpu = smp_processor_id(); |
484 | 494 | ||
495 | /* force the update of the task clock: */ | ||
496 | __task_delta_exec(curr, 1); | ||
497 | |||
485 | spin_lock(&ctx->lock); | 498 | spin_lock(&ctx->lock); |
486 | 499 | ||
487 | /* | 500 | /* |
@@ -493,6 +506,7 @@ int perf_counter_task_enable(void) | |||
493 | if (counter->state != PERF_COUNTER_STATE_OFF) | 506 | if (counter->state != PERF_COUNTER_STATE_OFF) |
494 | continue; | 507 | continue; |
495 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 508 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
509 | counter->hw_event.disabled = 0; | ||
496 | } | 510 | } |
497 | hw_perf_restore(perf_flags); | 511 | hw_perf_restore(perf_flags); |
498 | 512 | ||
@@ -500,7 +514,7 @@ int perf_counter_task_enable(void) | |||
500 | 514 | ||
501 | perf_counter_task_sched_in(curr, cpu); | 515 | perf_counter_task_sched_in(curr, cpu); |
502 | 516 | ||
503 | local_irq_enable(); | 517 | curr_rq_unlock_irq_restore(&flags); |
504 | 518 | ||
505 | return 0; | 519 | return 0; |
506 | } | 520 | } |
@@ -540,8 +554,11 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
540 | static void __read(void *info) | 554 | static void __read(void *info) |
541 | { | 555 | { |
542 | struct perf_counter *counter = info; | 556 | struct perf_counter *counter = info; |
557 | unsigned long flags; | ||
543 | 558 | ||
559 | curr_rq_lock_irq_save(&flags); | ||
544 | counter->hw_ops->read(counter); | 560 | counter->hw_ops->read(counter); |
561 | curr_rq_unlock_irq_restore(&flags); | ||
545 | } | 562 | } |
546 | 563 | ||
547 | static u64 perf_counter_read(struct perf_counter *counter) | 564 | static u64 perf_counter_read(struct perf_counter *counter) |
@@ -860,13 +877,27 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | |||
860 | .read = cpu_clock_perf_counter_read, | 877 | .read = cpu_clock_perf_counter_read, |
861 | }; | 878 | }; |
862 | 879 | ||
863 | static void task_clock_perf_counter_update(struct perf_counter *counter) | 880 | /* |
881 | * Called from within the scheduler: | ||
882 | */ | ||
883 | static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update) | ||
864 | { | 884 | { |
865 | u64 prev, now; | 885 | struct task_struct *curr = counter->task; |
886 | u64 delta; | ||
887 | |||
888 | WARN_ON_ONCE(counter->task != current); | ||
889 | |||
890 | delta = __task_delta_exec(curr, update); | ||
891 | |||
892 | return curr->se.sum_exec_runtime + delta; | ||
893 | } | ||
894 | |||
895 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) | ||
896 | { | ||
897 | u64 prev; | ||
866 | s64 delta; | 898 | s64 delta; |
867 | 899 | ||
868 | prev = atomic64_read(&counter->hw.prev_count); | 900 | prev = atomic64_read(&counter->hw.prev_count); |
869 | now = current->se.sum_exec_runtime; | ||
870 | 901 | ||
871 | atomic64_set(&counter->hw.prev_count, now); | 902 | atomic64_set(&counter->hw.prev_count, now); |
872 | 903 | ||
@@ -877,17 +908,23 @@ static void task_clock_perf_counter_update(struct perf_counter *counter) | |||
877 | 908 | ||
878 | static void task_clock_perf_counter_read(struct perf_counter *counter) | 909 | static void task_clock_perf_counter_read(struct perf_counter *counter) |
879 | { | 910 | { |
880 | task_clock_perf_counter_update(counter); | 911 | u64 now = task_clock_perf_counter_val(counter, 1); |
912 | |||
913 | task_clock_perf_counter_update(counter, now); | ||
881 | } | 914 | } |
882 | 915 | ||
883 | static void task_clock_perf_counter_enable(struct perf_counter *counter) | 916 | static void task_clock_perf_counter_enable(struct perf_counter *counter) |
884 | { | 917 | { |
885 | atomic64_set(&counter->hw.prev_count, current->se.sum_exec_runtime); | 918 | u64 now = task_clock_perf_counter_val(counter, 0); |
919 | |||
920 | atomic64_set(&counter->hw.prev_count, now); | ||
886 | } | 921 | } |
887 | 922 | ||
888 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | 923 | static void task_clock_perf_counter_disable(struct perf_counter *counter) |
889 | { | 924 | { |
890 | task_clock_perf_counter_update(counter); | 925 | u64 now = task_clock_perf_counter_val(counter, 0); |
926 | |||
927 | task_clock_perf_counter_update(counter, now); | ||
891 | } | 928 | } |
892 | 929 | ||
893 | static const struct hw_perf_counter_ops perf_ops_task_clock = { | 930 | static const struct hw_perf_counter_ops perf_ops_task_clock = { |
@@ -1267,6 +1304,7 @@ __perf_counter_exit_task(struct task_struct *child, | |||
1267 | { | 1304 | { |
1268 | struct perf_counter *parent_counter; | 1305 | struct perf_counter *parent_counter; |
1269 | u64 parent_val, child_val; | 1306 | u64 parent_val, child_val; |
1307 | unsigned long flags; | ||
1270 | u64 perf_flags; | 1308 | u64 perf_flags; |
1271 | 1309 | ||
1272 | /* | 1310 | /* |
@@ -1275,7 +1313,7 @@ __perf_counter_exit_task(struct task_struct *child, | |||
1275 | * Be careful about zapping the list - IRQ/NMI context | 1313 | * Be careful about zapping the list - IRQ/NMI context |
1276 | * could still be processing it: | 1314 | * could still be processing it: |
1277 | */ | 1315 | */ |
1278 | local_irq_disable(); | 1316 | curr_rq_lock_irq_save(&flags); |
1279 | perf_flags = hw_perf_save_disable(); | 1317 | perf_flags = hw_perf_save_disable(); |
1280 | 1318 | ||
1281 | if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { | 1319 | if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) { |
@@ -1294,7 +1332,7 @@ __perf_counter_exit_task(struct task_struct *child, | |||
1294 | list_del_init(&child_counter->list_entry); | 1332 | list_del_init(&child_counter->list_entry); |
1295 | 1333 | ||
1296 | hw_perf_restore(perf_flags); | 1334 | hw_perf_restore(perf_flags); |
1297 | local_irq_enable(); | 1335 | curr_rq_unlock_irq_restore(&flags); |
1298 | 1336 | ||
1299 | parent_counter = child_counter->parent; | 1337 | parent_counter = child_counter->parent; |
1300 | /* | 1338 | /* |