aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 20:44:08 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 20:44:08 -0500
commitc0d362a832ee70435fc4555a64f820893b1da0bd (patch)
tree162497df64451936dfdb70f972dd7849d3e3e520 /kernel/perf_counter.c
parent506c10f26c481b7f8ef27c1c79290f68989b2e9e (diff)
parentf78628374a13bc150db77c6e02d4f2c0a7f932ef (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounters into perfcounters/core
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c55
1 files changed, 46 insertions, 9 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 37f771691f93..4c0dccb756ad 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -41,12 +41,20 @@ static DEFINE_MUTEX(perf_resource_mutex);
41extern __weak const struct hw_perf_counter_ops * 41extern __weak const struct hw_perf_counter_ops *
42hw_perf_counter_init(struct perf_counter *counter) 42hw_perf_counter_init(struct perf_counter *counter)
43{ 43{
44 return ERR_PTR(-EINVAL); 44 return NULL;
45} 45}
46 46
47u64 __weak hw_perf_save_disable(void) { return 0; } 47u64 __weak hw_perf_save_disable(void) { return 0; }
48void __weak hw_perf_restore(u64 ctrl) { barrier(); } 48void __weak hw_perf_restore(u64 ctrl) { barrier(); }
49void __weak hw_perf_counter_setup(void) { barrier(); } 49void __weak hw_perf_counter_setup(void) { barrier(); }
50int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
51 struct perf_cpu_context *cpuctx,
52 struct perf_counter_context *ctx, int cpu)
53{
54 return 0;
55}
56
57void __weak perf_counter_print_debug(void) { }
50 58
51static void 59static void
52list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 60list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
@@ -341,6 +349,9 @@ group_sched_out(struct perf_counter *group_counter,
341{ 349{
342 struct perf_counter *counter; 350 struct perf_counter *counter;
343 351
352 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
353 return;
354
344 counter_sched_out(group_counter, cpuctx, ctx); 355 counter_sched_out(group_counter, cpuctx, ctx);
345 356
346 /* 357 /*
@@ -354,15 +365,18 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
354 struct perf_cpu_context *cpuctx) 365 struct perf_cpu_context *cpuctx)
355{ 366{
356 struct perf_counter *counter; 367 struct perf_counter *counter;
368 u64 flags;
357 369
358 if (likely(!ctx->nr_counters)) 370 if (likely(!ctx->nr_counters))
359 return; 371 return;
360 372
361 spin_lock(&ctx->lock); 373 spin_lock(&ctx->lock);
374 flags = hw_perf_save_disable();
362 if (ctx->nr_active) { 375 if (ctx->nr_active) {
363 list_for_each_entry(counter, &ctx->counter_list, list_entry) 376 list_for_each_entry(counter, &ctx->counter_list, list_entry)
364 group_sched_out(counter, cpuctx, ctx); 377 group_sched_out(counter, cpuctx, ctx);
365 } 378 }
379 hw_perf_restore(flags);
366 spin_unlock(&ctx->lock); 380 spin_unlock(&ctx->lock);
367} 381}
368 382
@@ -402,7 +416,14 @@ group_sched_in(struct perf_counter *group_counter,
402 int cpu) 416 int cpu)
403{ 417{
404 struct perf_counter *counter, *partial_group; 418 struct perf_counter *counter, *partial_group;
405 int ret = 0; 419 int ret;
420
421 if (group_counter->state == PERF_COUNTER_STATE_OFF)
422 return 0;
423
424 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
425 if (ret)
426 return ret < 0 ? ret : 0;
406 427
407 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 428 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
408 return -EAGAIN; 429 return -EAGAIN;
@@ -415,10 +436,9 @@ group_sched_in(struct perf_counter *group_counter,
415 partial_group = counter; 436 partial_group = counter;
416 goto group_error; 437 goto group_error;
417 } 438 }
418 ret = -EAGAIN;
419 } 439 }
420 440
421 return ret; 441 return 0;
422 442
423group_error: 443group_error:
424 /* 444 /*
@@ -440,11 +460,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
440 struct perf_cpu_context *cpuctx, int cpu) 460 struct perf_cpu_context *cpuctx, int cpu)
441{ 461{
442 struct perf_counter *counter; 462 struct perf_counter *counter;
463 u64 flags;
443 464
444 if (likely(!ctx->nr_counters)) 465 if (likely(!ctx->nr_counters))
445 return; 466 return;
446 467
447 spin_lock(&ctx->lock); 468 spin_lock(&ctx->lock);
469 flags = hw_perf_save_disable();
448 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 470 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
449 /* 471 /*
450 * Listen to the 'cpu' scheduling filter constraint 472 * Listen to the 'cpu' scheduling filter constraint
@@ -454,12 +476,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
454 continue; 476 continue;
455 477
456 /* 478 /*
457 * If we scheduled in a group atomically and 479 * If we scheduled in a group atomically and exclusively,
458 * exclusively, break out: 480 * or if this group can't go on, break out:
459 */ 481 */
460 if (group_sched_in(counter, cpuctx, ctx, cpu)) 482 if (group_sched_in(counter, cpuctx, ctx, cpu))
461 break; 483 break;
462 } 484 }
485 hw_perf_restore(flags);
463 spin_unlock(&ctx->lock); 486 spin_unlock(&ctx->lock);
464} 487}
465 488
@@ -928,18 +951,32 @@ static const struct file_operations perf_fops = {
928 951
929static int cpu_clock_perf_counter_enable(struct perf_counter *counter) 952static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
930{ 953{
954 int cpu = raw_smp_processor_id();
955
956 atomic64_set(&counter->hw.prev_count, cpu_clock(cpu));
931 return 0; 957 return 0;
932} 958}
933 959
960static void cpu_clock_perf_counter_update(struct perf_counter *counter)
961{
962 int cpu = raw_smp_processor_id();
963 s64 prev;
964 u64 now;
965
966 now = cpu_clock(cpu);
967 prev = atomic64_read(&counter->hw.prev_count);
968 atomic64_set(&counter->hw.prev_count, now);
969 atomic64_add(now - prev, &counter->count);
970}
971
934static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 972static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
935{ 973{
974 cpu_clock_perf_counter_update(counter);
936} 975}
937 976
938static void cpu_clock_perf_counter_read(struct perf_counter *counter) 977static void cpu_clock_perf_counter_read(struct perf_counter *counter)
939{ 978{
940 int cpu = raw_smp_processor_id(); 979 cpu_clock_perf_counter_update(counter);
941
942 atomic64_set(&counter->count, cpu_clock(cpu));
943} 980}
944 981
945static const struct hw_perf_counter_ops perf_ops_cpu_clock = { 982static const struct hw_perf_counter_ops perf_ops_cpu_clock = {