aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-01 03:53:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-02 07:10:55 -0400
commitbf4e0ed3d027ce581be18496036862131b5f32aa (patch)
treef55e5c85b286b3ace8b81f3ffc7e48590f4a7020 /kernel
parent3f731ca60afc29f5bcdb5fd2a04391466313a9ac (diff)
perf_counter: Remove unused prev_state field
This removes the prev_state field of struct perf_counter since it is now unused. It was only used by the cpu migration counter, which doesn't use it any more. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <18979.35052.915728.626374@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index cd94cf3bf9e2..fbed4d28ad7d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -572,7 +572,6 @@ group_sched_in(struct perf_counter *group_counter,
572 if (ret) 572 if (ret)
573 return ret < 0 ? ret : 0; 573 return ret < 0 ? ret : 0;
574 574
575 group_counter->prev_state = group_counter->state;
576 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 575 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
577 return -EAGAIN; 576 return -EAGAIN;
578 577
@@ -580,7 +579,6 @@ group_sched_in(struct perf_counter *group_counter,
580 * Schedule in siblings as one group (if any): 579 * Schedule in siblings as one group (if any):
581 */ 580 */
582 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 581 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
583 counter->prev_state = counter->state;
584 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 582 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
585 partial_group = counter; 583 partial_group = counter;
586 goto group_error; 584 goto group_error;
@@ -657,7 +655,6 @@ static void add_counter_to_ctx(struct perf_counter *counter,
657 struct perf_counter_context *ctx) 655 struct perf_counter_context *ctx)
658{ 656{
659 list_add_counter(counter, ctx); 657 list_add_counter(counter, ctx);
660 counter->prev_state = PERF_COUNTER_STATE_OFF;
661 counter->tstamp_enabled = ctx->time; 658 counter->tstamp_enabled = ctx->time;
662 counter->tstamp_running = ctx->time; 659 counter->tstamp_running = ctx->time;
663 counter->tstamp_stopped = ctx->time; 660 counter->tstamp_stopped = ctx->time;
@@ -820,7 +817,6 @@ static void __perf_counter_enable(void *info)
820 ctx->is_active = 1; 817 ctx->is_active = 1;
821 update_context_time(ctx); 818 update_context_time(ctx);
822 819
823 counter->prev_state = counter->state;
824 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 820 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
825 goto unlock; 821 goto unlock;
826 counter->state = PERF_COUNTER_STATE_INACTIVE; 822 counter->state = PERF_COUNTER_STATE_INACTIVE;