diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-23 12:29:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-24 02:24:30 -0400 |
commit | 475c55797323b67435083f6e2eb8ee670f6410ec (patch) | |
tree | 2aad9762396b01ccf92581489e70bd3689246f19 | |
parent | 082ff5a2767a0679ee543f14883adbafb631ffbe (diff) |
perf_counter: Remove perf_counter_context::nr_enabled
now that pctrl() no longer disables other people's counters,
remove the PMU cache code that deals with that.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090523163013.032998331@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_counter.h | 1 | ||||
-rw-r--r-- | kernel/perf_counter.c | 11 |
2 files changed, 1 insertions, 11 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 4159ee5940f8..2ddf5e3c5518 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -516,7 +516,6 @@ struct perf_counter_context { | |||
516 | struct list_head event_list; | 516 | struct list_head event_list; |
517 | int nr_counters; | 517 | int nr_counters; |
518 | int nr_active; | 518 | int nr_active; |
519 | int nr_enabled; | ||
520 | int is_active; | 519 | int is_active; |
521 | atomic_t refcount; | 520 | atomic_t refcount; |
522 | struct task_struct *task; | 521 | struct task_struct *task; |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4c86a6369764..cb4062559b47 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -134,8 +134,6 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | |||
134 | 134 | ||
135 | list_add_rcu(&counter->event_entry, &ctx->event_list); | 135 | list_add_rcu(&counter->event_entry, &ctx->event_list); |
136 | ctx->nr_counters++; | 136 | ctx->nr_counters++; |
137 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
138 | ctx->nr_enabled++; | ||
139 | } | 137 | } |
140 | 138 | ||
141 | /* | 139 | /* |
@@ -150,8 +148,6 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | |||
150 | if (list_empty(&counter->list_entry)) | 148 | if (list_empty(&counter->list_entry)) |
151 | return; | 149 | return; |
152 | ctx->nr_counters--; | 150 | ctx->nr_counters--; |
153 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
154 | ctx->nr_enabled--; | ||
155 | 151 | ||
156 | list_del_init(&counter->list_entry); | 152 | list_del_init(&counter->list_entry); |
157 | list_del_rcu(&counter->event_entry); | 153 | list_del_rcu(&counter->event_entry); |
@@ -406,7 +402,6 @@ static void __perf_counter_disable(void *info) | |||
406 | else | 402 | else |
407 | counter_sched_out(counter, cpuctx, ctx); | 403 | counter_sched_out(counter, cpuctx, ctx); |
408 | counter->state = PERF_COUNTER_STATE_OFF; | 404 | counter->state = PERF_COUNTER_STATE_OFF; |
409 | ctx->nr_enabled--; | ||
410 | } | 405 | } |
411 | 406 | ||
412 | spin_unlock_irqrestore(&ctx->lock, flags); | 407 | spin_unlock_irqrestore(&ctx->lock, flags); |
@@ -448,7 +443,6 @@ static void perf_counter_disable(struct perf_counter *counter) | |||
448 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 443 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { |
449 | update_counter_times(counter); | 444 | update_counter_times(counter); |
450 | counter->state = PERF_COUNTER_STATE_OFF; | 445 | counter->state = PERF_COUNTER_STATE_OFF; |
451 | ctx->nr_enabled--; | ||
452 | } | 446 | } |
453 | 447 | ||
454 | spin_unlock_irq(&ctx->lock); | 448 | spin_unlock_irq(&ctx->lock); |
@@ -759,7 +753,6 @@ static void __perf_counter_enable(void *info) | |||
759 | goto unlock; | 753 | goto unlock; |
760 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 754 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
761 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | 755 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; |
762 | ctx->nr_enabled++; | ||
763 | 756 | ||
764 | /* | 757 | /* |
765 | * If the counter is in a group and isn't the group leader, | 758 | * If the counter is in a group and isn't the group leader, |
@@ -850,7 +843,6 @@ static void perf_counter_enable(struct perf_counter *counter) | |||
850 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 843 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
851 | counter->tstamp_enabled = | 844 | counter->tstamp_enabled = |
852 | ctx->time - counter->total_time_enabled; | 845 | ctx->time - counter->total_time_enabled; |
853 | ctx->nr_enabled++; | ||
854 | } | 846 | } |
855 | out: | 847 | out: |
856 | spin_unlock_irq(&ctx->lock); | 848 | spin_unlock_irq(&ctx->lock); |
@@ -910,8 +902,7 @@ static int context_equiv(struct perf_counter_context *ctx1, | |||
910 | struct perf_counter_context *ctx2) | 902 | struct perf_counter_context *ctx2) |
911 | { | 903 | { |
912 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | 904 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx |
913 | && ctx1->parent_gen == ctx2->parent_gen | 905 | && ctx1->parent_gen == ctx2->parent_gen; |
914 | && ctx1->nr_enabled == ctx2->nr_enabled; | ||
915 | } | 906 | } |
916 | 907 | ||
917 | /* | 908 | /* |