diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-04-06 05:45:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 04:49:00 -0400 |
commit | a39d6f2556c4a19f58f538c6aa28bf8faca4fcb8 (patch) | |
tree | 8c2ca4d3042bc90beb8d733584c7427476ae92f7 /kernel | |
parent | 4af4998b8aa35600f4c4a4f3c3a23baca6081d02 (diff) |
perf_counter: rework the task clock software counter
Rework the task clock software counter to use the context time instead
of the task runtime clock, this removes the last such user.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094518.445450972@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 42 |
1 files changed, 12 insertions, 30 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 84d85ab4e161..56b7eb53d673 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -974,9 +974,6 @@ int perf_counter_task_disable(void) | |||
974 | curr_rq_lock_irq_save(&flags); | 974 | curr_rq_lock_irq_save(&flags); |
975 | cpu = smp_processor_id(); | 975 | cpu = smp_processor_id(); |
976 | 976 | ||
977 | /* force the update of the task clock: */ | ||
978 | __task_delta_exec(curr, 1); | ||
979 | |||
980 | perf_counter_task_sched_out(curr, cpu); | 977 | perf_counter_task_sched_out(curr, cpu); |
981 | 978 | ||
982 | spin_lock(&ctx->lock); | 979 | spin_lock(&ctx->lock); |
@@ -1017,9 +1014,6 @@ int perf_counter_task_enable(void) | |||
1017 | curr_rq_lock_irq_save(&flags); | 1014 | curr_rq_lock_irq_save(&flags); |
1018 | cpu = smp_processor_id(); | 1015 | cpu = smp_processor_id(); |
1019 | 1016 | ||
1020 | /* force the update of the task clock: */ | ||
1021 | __task_delta_exec(curr, 1); | ||
1022 | |||
1023 | perf_counter_task_sched_out(curr, cpu); | 1017 | perf_counter_task_sched_out(curr, cpu); |
1024 | 1018 | ||
1025 | spin_lock(&ctx->lock); | 1019 | spin_lock(&ctx->lock); |
@@ -2347,38 +2341,28 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | |||
2347 | * Software counter: task time clock | 2341 | * Software counter: task time clock |
2348 | */ | 2342 | */ |
2349 | 2343 | ||
2350 | /* | 2344 | static void task_clock_perf_counter_update(struct perf_counter *counter) |
2351 | * Called from within the scheduler: | ||
2352 | */ | ||
2353 | static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update) | ||
2354 | { | ||
2355 | struct task_struct *curr = counter->task; | ||
2356 | u64 delta; | ||
2357 | |||
2358 | delta = __task_delta_exec(curr, update); | ||
2359 | |||
2360 | return curr->se.sum_exec_runtime + delta; | ||
2361 | } | ||
2362 | |||
2363 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) | ||
2364 | { | 2345 | { |
2365 | u64 prev; | 2346 | u64 prev, now; |
2366 | s64 delta; | 2347 | s64 delta; |
2367 | 2348 | ||
2368 | prev = atomic64_read(&counter->hw.prev_count); | 2349 | update_context_time(counter->ctx); |
2369 | 2350 | now = counter->ctx->time; | |
2370 | atomic64_set(&counter->hw.prev_count, now); | ||
2371 | 2351 | ||
2352 | prev = atomic64_xchg(&counter->hw.prev_count, now); | ||
2372 | delta = now - prev; | 2353 | delta = now - prev; |
2373 | |||
2374 | atomic64_add(delta, &counter->count); | 2354 | atomic64_add(delta, &counter->count); |
2375 | } | 2355 | } |
2376 | 2356 | ||
2377 | static int task_clock_perf_counter_enable(struct perf_counter *counter) | 2357 | static int task_clock_perf_counter_enable(struct perf_counter *counter) |
2378 | { | 2358 | { |
2379 | struct hw_perf_counter *hwc = &counter->hw; | 2359 | struct hw_perf_counter *hwc = &counter->hw; |
2360 | u64 now; | ||
2361 | |||
2362 | update_context_time(counter->ctx); | ||
2363 | now = counter->ctx->time; | ||
2380 | 2364 | ||
2381 | atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0)); | 2365 | atomic64_set(&hwc->prev_count, now); |
2382 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 2366 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
2383 | hwc->hrtimer.function = perf_swcounter_hrtimer; | 2367 | hwc->hrtimer.function = perf_swcounter_hrtimer; |
2384 | if (hwc->irq_period) { | 2368 | if (hwc->irq_period) { |
@@ -2393,14 +2377,12 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter) | |||
2393 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | 2377 | static void task_clock_perf_counter_disable(struct perf_counter *counter) |
2394 | { | 2378 | { |
2395 | hrtimer_cancel(&counter->hw.hrtimer); | 2379 | hrtimer_cancel(&counter->hw.hrtimer); |
2396 | task_clock_perf_counter_update(counter, | 2380 | task_clock_perf_counter_update(counter); |
2397 | task_clock_perf_counter_val(counter, 0)); | ||
2398 | } | 2381 | } |
2399 | 2382 | ||
2400 | static void task_clock_perf_counter_read(struct perf_counter *counter) | 2383 | static void task_clock_perf_counter_read(struct perf_counter *counter) |
2401 | { | 2384 | { |
2402 | task_clock_perf_counter_update(counter, | 2385 | task_clock_perf_counter_update(counter); |
2403 | task_clock_perf_counter_val(counter, 1)); | ||
2404 | } | 2386 | } |
2405 | 2387 | ||
2406 | static const struct hw_perf_counter_ops perf_ops_task_clock = { | 2388 | static const struct hw_perf_counter_ops perf_ops_task_clock = { |