aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-21 07:50:42 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:19 -0500
commit95cdd2e7851cce79ab839cb0b3cbe68d7911d0f1 (patch)
treefad7723f344027dd64a1ca44d0117c3da61b75ca
parent78b6084c907cea15bb40a564b974e072f5163781 (diff)
perfcounters: enable lowlevel pmc code to schedule counters
Allow lowlevel ->enable() op to return an error if a counter can not be added. This can be used to handle counter constraints. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c6
-rw-r--r--include/linux/perf_counter.h2
-rw-r--r--kernel/perf_counter.c62
3 files changed, 51 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index b67557121425..74090a393a7c 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -244,7 +244,7 @@ static int fixed_mode_idx(struct hw_perf_counter *hwc)
244/* 244/*
245 * Find a PMC slot for the freshly enabled / scheduled in counter: 245 * Find a PMC slot for the freshly enabled / scheduled in counter:
246 */ 246 */
247static void pmc_generic_enable(struct perf_counter *counter) 247static int pmc_generic_enable(struct perf_counter *counter)
248{ 248{
249 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 249 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
250 struct hw_perf_counter *hwc = &counter->hw; 250 struct hw_perf_counter *hwc = &counter->hw;
@@ -253,6 +253,8 @@ static void pmc_generic_enable(struct perf_counter *counter)
253 /* Try to get the previous counter again */ 253 /* Try to get the previous counter again */
254 if (test_and_set_bit(idx, cpuc->used)) { 254 if (test_and_set_bit(idx, cpuc->used)) {
255 idx = find_first_zero_bit(cpuc->used, nr_counters_generic); 255 idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
256 if (idx == nr_counters_generic)
257 return -EAGAIN;
256 set_bit(idx, cpuc->used); 258 set_bit(idx, cpuc->used);
257 hwc->idx = idx; 259 hwc->idx = idx;
258 } 260 }
@@ -265,6 +267,8 @@ static void pmc_generic_enable(struct perf_counter *counter)
265 267
266 __hw_perf_counter_set_period(counter, hwc, idx); 268 __hw_perf_counter_set_period(counter, hwc, idx);
267 __pmc_generic_enable(counter, hwc, idx); 269 __pmc_generic_enable(counter, hwc, idx);
270
271 return 0;
268} 272}
269 273
270void perf_counter_print_debug(void) 274void perf_counter_print_debug(void)
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 48f76d2e54c2..53af11d3767b 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -128,7 +128,7 @@ struct perf_counter;
128 * struct hw_perf_counter_ops - performance counter hw ops 128 * struct hw_perf_counter_ops - performance counter hw ops
129 */ 129 */
130struct hw_perf_counter_ops { 130struct hw_perf_counter_ops {
131 void (*enable) (struct perf_counter *counter); 131 int (*enable) (struct perf_counter *counter);
132 void (*disable) (struct perf_counter *counter); 132 void (*disable) (struct perf_counter *counter);
133 void (*read) (struct perf_counter *counter); 133 void (*read) (struct perf_counter *counter);
134}; 134};
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f1110ac1267b..2e73929a6959 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -355,21 +355,25 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
355 cpuctx->task_ctx = NULL; 355 cpuctx->task_ctx = NULL;
356} 356}
357 357
358static void 358static int
359counter_sched_in(struct perf_counter *counter, 359counter_sched_in(struct perf_counter *counter,
360 struct perf_cpu_context *cpuctx, 360 struct perf_cpu_context *cpuctx,
361 struct perf_counter_context *ctx, 361 struct perf_counter_context *ctx,
362 int cpu) 362 int cpu)
363{ 363{
364 if (counter->state == PERF_COUNTER_STATE_OFF) 364 if (counter->state == PERF_COUNTER_STATE_OFF)
365 return; 365 return 0;
366
367 if (counter->hw_ops->enable(counter))
368 return -EAGAIN;
366 369
367 counter->hw_ops->enable(counter);
368 counter->state = PERF_COUNTER_STATE_ACTIVE; 370 counter->state = PERF_COUNTER_STATE_ACTIVE;
369 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 371 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
370 372
371 cpuctx->active_oncpu++; 373 cpuctx->active_oncpu++;
372 ctx->nr_active++; 374 ctx->nr_active++;
375
376 return 0;
373} 377}
374 378
375static int 379static int
@@ -378,20 +382,38 @@ group_sched_in(struct perf_counter *group_counter,
378 struct perf_counter_context *ctx, 382 struct perf_counter_context *ctx,
379 int cpu) 383 int cpu)
380{ 384{
381 struct perf_counter *counter; 385 struct perf_counter *counter, *partial_group;
382 int was_group = 0; 386 int ret = 0;
383 387
384 counter_sched_in(group_counter, cpuctx, ctx, cpu); 388 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
389 return -EAGAIN;
385 390
386 /* 391 /*
387 * Schedule in siblings as one group (if any): 392 * Schedule in siblings as one group (if any):
388 */ 393 */
389 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 394 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
390 counter_sched_in(counter, cpuctx, ctx, cpu); 395 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
391 was_group = 1; 396 partial_group = counter;
397 goto group_error;
398 }
399 ret = -EAGAIN;
392 } 400 }
393 401
394 return was_group; 402 return ret;
403
404group_error:
405 /*
406 * Groups can be scheduled in as one unit only, so undo any
407 * partial group before returning:
408 */
409 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
410 if (counter == partial_group)
411 break;
412 counter_sched_out(counter, cpuctx, ctx);
413 }
414 counter_sched_out(group_counter, cpuctx, ctx);
415
416 return -EAGAIN;
395} 417}
396 418
397/* 419/*
@@ -416,9 +438,6 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
416 438
417 spin_lock(&ctx->lock); 439 spin_lock(&ctx->lock);
418 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 440 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
419 if (ctx->nr_active == cpuctx->max_pertask)
420 break;
421
422 /* 441 /*
423 * Listen to the 'cpu' scheduling filter constraint 442 * Listen to the 'cpu' scheduling filter constraint
424 * of counters: 443 * of counters:
@@ -856,8 +875,9 @@ static const struct file_operations perf_fops = {
856 .poll = perf_poll, 875 .poll = perf_poll,
857}; 876};
858 877
859static void cpu_clock_perf_counter_enable(struct perf_counter *counter) 878static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
860{ 879{
880 return 0;
861} 881}
862 882
863static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 883static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
@@ -913,11 +933,13 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
913 task_clock_perf_counter_update(counter, now); 933 task_clock_perf_counter_update(counter, now);
914} 934}
915 935
916static void task_clock_perf_counter_enable(struct perf_counter *counter) 936static int task_clock_perf_counter_enable(struct perf_counter *counter)
917{ 937{
918 u64 now = task_clock_perf_counter_val(counter, 0); 938 u64 now = task_clock_perf_counter_val(counter, 0);
919 939
920 atomic64_set(&counter->hw.prev_count, now); 940 atomic64_set(&counter->hw.prev_count, now);
941
942 return 0;
921} 943}
922 944
923static void task_clock_perf_counter_disable(struct perf_counter *counter) 945static void task_clock_perf_counter_disable(struct perf_counter *counter)
@@ -960,12 +982,14 @@ static void page_faults_perf_counter_read(struct perf_counter *counter)
960 page_faults_perf_counter_update(counter); 982 page_faults_perf_counter_update(counter);
961} 983}
962 984
963static void page_faults_perf_counter_enable(struct perf_counter *counter) 985static int page_faults_perf_counter_enable(struct perf_counter *counter)
964{ 986{
965 /* 987 /*
966 * page-faults is a per-task value already, 988 * page-faults is a per-task value already,
967 * so we dont have to clear it on switch-in. 989 * so we dont have to clear it on switch-in.
968 */ 990 */
991
992 return 0;
969} 993}
970 994
971static void page_faults_perf_counter_disable(struct perf_counter *counter) 995static void page_faults_perf_counter_disable(struct perf_counter *counter)
@@ -1006,12 +1030,14 @@ static void context_switches_perf_counter_read(struct perf_counter *counter)
1006 context_switches_perf_counter_update(counter); 1030 context_switches_perf_counter_update(counter);
1007} 1031}
1008 1032
1009static void context_switches_perf_counter_enable(struct perf_counter *counter) 1033static int context_switches_perf_counter_enable(struct perf_counter *counter)
1010{ 1034{
1011 /* 1035 /*
1012 * ->nvcsw + curr->nivcsw is a per-task value already, 1036 * ->nvcsw + curr->nivcsw is a per-task value already,
1013 * so we dont have to clear it on switch-in. 1037 * so we dont have to clear it on switch-in.
1014 */ 1038 */
1039
1040 return 0;
1015} 1041}
1016 1042
1017static void context_switches_perf_counter_disable(struct perf_counter *counter) 1043static void context_switches_perf_counter_disable(struct perf_counter *counter)
@@ -1050,12 +1076,14 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1050 cpu_migrations_perf_counter_update(counter); 1076 cpu_migrations_perf_counter_update(counter);
1051} 1077}
1052 1078
1053static void cpu_migrations_perf_counter_enable(struct perf_counter *counter) 1079static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1054{ 1080{
1055 /* 1081 /*
1056 * se.nr_migrations is a per-task value already, 1082 * se.nr_migrations is a per-task value already,
1057 * so we dont have to clear it on switch-in. 1083 * so we dont have to clear it on switch-in.
1058 */ 1084 */
1085
1086 return 0;
1059} 1087}
1060 1088
1061static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) 1089static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)