aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-23 12:29:00 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-24 02:24:08 -0400
commit082ff5a2767a0679ee543f14883adbafb631ffbe (patch)
tree5ddf792ed3f80b17bc427edea1dc1d4b4303b4f6
parentaa9c67f53d1969cf1db4c9c2db3a78c4ceb96469 (diff)
perf_counter: Change pctrl() behaviour
Instead of en/dis-abling all counters acting on a particular task, en/dis- able all counters we created. [ v2: fix crash on first counter enable ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090523163012.916937244@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/perf_counter.h3
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/perf_counter.c87
4 files changed, 39 insertions, 63 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d87247d2641f..353c0ac7723a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -108,6 +108,15 @@ extern struct group_info init_groups;
108 108
109extern struct cred init_cred; 109extern struct cred init_cred;
110 110
111#ifdef CONFIG_PERF_COUNTERS
112# define INIT_PERF_COUNTERS(tsk) \
113 .perf_counter_mutex = \
114 __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \
115 .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
116#else
117# define INIT_PERF_COUNTERS(tsk)
118#endif
119
111/* 120/*
112 * INIT_TASK is used to set up the first task table, touch at 121 * INIT_TASK is used to set up the first task table, touch at
113 * your own risk!. Base=0, limit=0x1fffff (=2MB) 122 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -171,6 +180,7 @@ extern struct cred init_cred;
171 }, \ 180 }, \
172 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 181 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
173 INIT_IDS \ 182 INIT_IDS \
183 INIT_PERF_COUNTERS(tsk) \
174 INIT_TRACE_IRQFLAGS \ 184 INIT_TRACE_IRQFLAGS \
175 INIT_LOCKDEP \ 185 INIT_LOCKDEP \
176 INIT_FTRACE_GRAPH \ 186 INIT_FTRACE_GRAPH \
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 4ab8050eb9e8..4159ee5940f8 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -469,6 +469,9 @@ struct perf_counter {
469 int oncpu; 469 int oncpu;
470 int cpu; 470 int cpu;
471 471
472 struct list_head owner_entry;
473 struct task_struct *owner;
474
472 /* mmap bits */ 475 /* mmap bits */
473 struct mutex mmap_mutex; 476 struct mutex mmap_mutex;
474 atomic_t mmap_count; 477 atomic_t mmap_count;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9714d450f417..bc9326dcdde1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1389,6 +1389,8 @@ struct task_struct {
1389#endif 1389#endif
1390#ifdef CONFIG_PERF_COUNTERS 1390#ifdef CONFIG_PERF_COUNTERS
1391 struct perf_counter_context *perf_counter_ctxp; 1391 struct perf_counter_context *perf_counter_ctxp;
1392 struct mutex perf_counter_mutex;
1393 struct list_head perf_counter_list;
1392#endif 1394#endif
1393#ifdef CONFIG_NUMA 1395#ifdef CONFIG_NUMA
1394 struct mempolicy *mempolicy; 1396 struct mempolicy *mempolicy;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0e97f8961333..4c86a6369764 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1076,79 +1076,26 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1076 __perf_counter_sched_in(ctx, cpuctx, cpu); 1076 __perf_counter_sched_in(ctx, cpuctx, cpu);
1077} 1077}
1078 1078
1079int perf_counter_task_disable(void) 1079int perf_counter_task_enable(void)
1080{ 1080{
1081 struct task_struct *curr = current;
1082 struct perf_counter_context *ctx = curr->perf_counter_ctxp;
1083 struct perf_counter *counter; 1081 struct perf_counter *counter;
1084 unsigned long flags;
1085
1086 if (!ctx || !ctx->nr_counters)
1087 return 0;
1088
1089 local_irq_save(flags);
1090 1082
1091 __perf_counter_task_sched_out(ctx); 1083 mutex_lock(&current->perf_counter_mutex);
1092 1084 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1093 spin_lock(&ctx->lock); 1085 perf_counter_enable(counter);
1094 1086 mutex_unlock(&current->perf_counter_mutex);
1095 /*
1096 * Disable all the counters:
1097 */
1098 perf_disable();
1099
1100 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1101 if (counter->state != PERF_COUNTER_STATE_ERROR) {
1102 update_group_times(counter);
1103 counter->state = PERF_COUNTER_STATE_OFF;
1104 }
1105 }
1106
1107 perf_enable();
1108
1109 spin_unlock_irqrestore(&ctx->lock, flags);
1110 1087
1111 return 0; 1088 return 0;
1112} 1089}
1113 1090
1114int perf_counter_task_enable(void) 1091int perf_counter_task_disable(void)
1115{ 1092{
1116 struct task_struct *curr = current;
1117 struct perf_counter_context *ctx = curr->perf_counter_ctxp;
1118 struct perf_counter *counter; 1093 struct perf_counter *counter;
1119 unsigned long flags;
1120 int cpu;
1121
1122 if (!ctx || !ctx->nr_counters)
1123 return 0;
1124
1125 local_irq_save(flags);
1126 cpu = smp_processor_id();
1127
1128 __perf_counter_task_sched_out(ctx);
1129
1130 spin_lock(&ctx->lock);
1131 1094
1132 /* 1095 mutex_lock(&current->perf_counter_mutex);
1133 * Disable all the counters: 1096 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1134 */ 1097 perf_counter_disable(counter);
1135 perf_disable(); 1098 mutex_unlock(&current->perf_counter_mutex);
1136
1137 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1138 if (counter->state > PERF_COUNTER_STATE_OFF)
1139 continue;
1140 counter->state = PERF_COUNTER_STATE_INACTIVE;
1141 counter->tstamp_enabled =
1142 ctx->time - counter->total_time_enabled;
1143 counter->hw_event.disabled = 0;
1144 }
1145 perf_enable();
1146
1147 spin_unlock(&ctx->lock);
1148
1149 perf_counter_task_sched_in(curr, cpu);
1150
1151 local_irq_restore(flags);
1152 1099
1153 return 0; 1100 return 0;
1154} 1101}
@@ -1416,6 +1363,11 @@ static int perf_release(struct inode *inode, struct file *file)
1416 perf_counter_remove_from_context(counter); 1363 perf_counter_remove_from_context(counter);
1417 mutex_unlock(&ctx->mutex); 1364 mutex_unlock(&ctx->mutex);
1418 1365
1366 mutex_lock(&counter->owner->perf_counter_mutex);
1367 list_del_init(&counter->owner_entry);
1368 mutex_unlock(&counter->owner->perf_counter_mutex);
1369 put_task_struct(counter->owner);
1370
1419 free_counter(counter); 1371 free_counter(counter);
1420 put_context(ctx); 1372 put_context(ctx);
1421 1373
@@ -3272,6 +3224,12 @@ SYSCALL_DEFINE5(perf_counter_open,
3272 perf_install_in_context(ctx, counter, cpu); 3224 perf_install_in_context(ctx, counter, cpu);
3273 mutex_unlock(&ctx->mutex); 3225 mutex_unlock(&ctx->mutex);
3274 3226
3227 counter->owner = current;
3228 get_task_struct(current);
3229 mutex_lock(&current->perf_counter_mutex);
3230 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3231 mutex_unlock(&current->perf_counter_mutex);
3232
3275 fput_light(counter_file, fput_needed2); 3233 fput_light(counter_file, fput_needed2);
3276 3234
3277out_fput: 3235out_fput:
@@ -3488,6 +3446,9 @@ void perf_counter_init_task(struct task_struct *child)
3488 3446
3489 child->perf_counter_ctxp = NULL; 3447 child->perf_counter_ctxp = NULL;
3490 3448
3449 mutex_init(&child->perf_counter_mutex);
3450 INIT_LIST_HEAD(&child->perf_counter_list);
3451
3491 /* 3452 /*
3492 * This is executed from the parent task context, so inherit 3453 * This is executed from the parent task context, so inherit
3493 * counters that have been marked for cloning. 3454 * counters that have been marked for cloning.