diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-23 12:29:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-24 02:24:08 -0400 |
commit | 082ff5a2767a0679ee543f14883adbafb631ffbe (patch) | |
tree | 5ddf792ed3f80b17bc427edea1dc1d4b4303b4f6 /kernel/perf_counter.c | |
parent | aa9c67f53d1969cf1db4c9c2db3a78c4ceb96469 (diff) |
perf_counter: Change pctrl() behaviour
Instead of en/dis-abling all counters acting on a particular
task, en/dis- able all counters we created.
[ v2: fix crash on first counter enable ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090523163012.916937244@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 87 |
1 files changed, 24 insertions, 63 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0e97f8961333..4c86a6369764 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1076,79 +1076,26 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1076 | __perf_counter_sched_in(ctx, cpuctx, cpu); | 1076 | __perf_counter_sched_in(ctx, cpuctx, cpu); |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | int perf_counter_task_disable(void) | 1079 | int perf_counter_task_enable(void) |
1080 | { | 1080 | { |
1081 | struct task_struct *curr = current; | ||
1082 | struct perf_counter_context *ctx = curr->perf_counter_ctxp; | ||
1083 | struct perf_counter *counter; | 1081 | struct perf_counter *counter; |
1084 | unsigned long flags; | ||
1085 | |||
1086 | if (!ctx || !ctx->nr_counters) | ||
1087 | return 0; | ||
1088 | |||
1089 | local_irq_save(flags); | ||
1090 | 1082 | ||
1091 | __perf_counter_task_sched_out(ctx); | 1083 | mutex_lock(¤t->perf_counter_mutex); |
1092 | 1084 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | |
1093 | spin_lock(&ctx->lock); | 1085 | perf_counter_enable(counter); |
1094 | 1086 | mutex_unlock(¤t->perf_counter_mutex); | |
1095 | /* | ||
1096 | * Disable all the counters: | ||
1097 | */ | ||
1098 | perf_disable(); | ||
1099 | |||
1100 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1101 | if (counter->state != PERF_COUNTER_STATE_ERROR) { | ||
1102 | update_group_times(counter); | ||
1103 | counter->state = PERF_COUNTER_STATE_OFF; | ||
1104 | } | ||
1105 | } | ||
1106 | |||
1107 | perf_enable(); | ||
1108 | |||
1109 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
1110 | 1087 | ||
1111 | return 0; | 1088 | return 0; |
1112 | } | 1089 | } |
1113 | 1090 | ||
1114 | int perf_counter_task_enable(void) | 1091 | int perf_counter_task_disable(void) |
1115 | { | 1092 | { |
1116 | struct task_struct *curr = current; | ||
1117 | struct perf_counter_context *ctx = curr->perf_counter_ctxp; | ||
1118 | struct perf_counter *counter; | 1093 | struct perf_counter *counter; |
1119 | unsigned long flags; | ||
1120 | int cpu; | ||
1121 | |||
1122 | if (!ctx || !ctx->nr_counters) | ||
1123 | return 0; | ||
1124 | |||
1125 | local_irq_save(flags); | ||
1126 | cpu = smp_processor_id(); | ||
1127 | |||
1128 | __perf_counter_task_sched_out(ctx); | ||
1129 | |||
1130 | spin_lock(&ctx->lock); | ||
1131 | 1094 | ||
1132 | /* | 1095 | mutex_lock(¤t->perf_counter_mutex); |
1133 | * Disable all the counters: | 1096 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) |
1134 | */ | 1097 | perf_counter_disable(counter); |
1135 | perf_disable(); | 1098 | mutex_unlock(¤t->perf_counter_mutex); |
1136 | |||
1137 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1138 | if (counter->state > PERF_COUNTER_STATE_OFF) | ||
1139 | continue; | ||
1140 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
1141 | counter->tstamp_enabled = | ||
1142 | ctx->time - counter->total_time_enabled; | ||
1143 | counter->hw_event.disabled = 0; | ||
1144 | } | ||
1145 | perf_enable(); | ||
1146 | |||
1147 | spin_unlock(&ctx->lock); | ||
1148 | |||
1149 | perf_counter_task_sched_in(curr, cpu); | ||
1150 | |||
1151 | local_irq_restore(flags); | ||
1152 | 1099 | ||
1153 | return 0; | 1100 | return 0; |
1154 | } | 1101 | } |
@@ -1416,6 +1363,11 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1416 | perf_counter_remove_from_context(counter); | 1363 | perf_counter_remove_from_context(counter); |
1417 | mutex_unlock(&ctx->mutex); | 1364 | mutex_unlock(&ctx->mutex); |
1418 | 1365 | ||
1366 | mutex_lock(&counter->owner->perf_counter_mutex); | ||
1367 | list_del_init(&counter->owner_entry); | ||
1368 | mutex_unlock(&counter->owner->perf_counter_mutex); | ||
1369 | put_task_struct(counter->owner); | ||
1370 | |||
1419 | free_counter(counter); | 1371 | free_counter(counter); |
1420 | put_context(ctx); | 1372 | put_context(ctx); |
1421 | 1373 | ||
@@ -3272,6 +3224,12 @@ SYSCALL_DEFINE5(perf_counter_open, | |||
3272 | perf_install_in_context(ctx, counter, cpu); | 3224 | perf_install_in_context(ctx, counter, cpu); |
3273 | mutex_unlock(&ctx->mutex); | 3225 | mutex_unlock(&ctx->mutex); |
3274 | 3226 | ||
3227 | counter->owner = current; | ||
3228 | get_task_struct(current); | ||
3229 | mutex_lock(¤t->perf_counter_mutex); | ||
3230 | list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); | ||
3231 | mutex_unlock(¤t->perf_counter_mutex); | ||
3232 | |||
3275 | fput_light(counter_file, fput_needed2); | 3233 | fput_light(counter_file, fput_needed2); |
3276 | 3234 | ||
3277 | out_fput: | 3235 | out_fput: |
@@ -3488,6 +3446,9 @@ void perf_counter_init_task(struct task_struct *child) | |||
3488 | 3446 | ||
3489 | child->perf_counter_ctxp = NULL; | 3447 | child->perf_counter_ctxp = NULL; |
3490 | 3448 | ||
3449 | mutex_init(&child->perf_counter_mutex); | ||
3450 | INIT_LIST_HEAD(&child->perf_counter_list); | ||
3451 | |||
3491 | /* | 3452 | /* |
3492 | * This is executed from the parent task context, so inherit | 3453 | * This is executed from the parent task context, so inherit |
3493 | * counters that have been marked for cloning. | 3454 | * counters that have been marked for cloning. |