aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-11 09:17:03 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-11 09:45:56 -0500
commit6a930700c8b655a9e25e42fc4adc0b225ebbcefc (patch)
tree5b6aa07d2734f43d0c5bc7d97aebe256e6d86fa8
parent1d1c7ddbfab358445a542715551301b7fc363e28 (diff)
perf counters: clean up state transitions
Impact: cleanup Introduce a proper enum for the 3 states of a counter: PERF_COUNTER_STATE_OFF = -1 PERF_COUNTER_STATE_INACTIVE = 0 PERF_COUNTER_STATE_ACTIVE = 1 and rename counter->active to counter->state and propagate the changes everywhere. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c2
-rw-r--r--include/linux/perf_counter.h11
-rw-r--r--kernel/perf_counter.c29
3 files changed, 25 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 3e1dbebe22b9..4854cca7fffd 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
332 * Then store sibling timestamps (if any): 332 * Then store sibling timestamps (if any):
333 */ 333 */
334 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { 334 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
335 if (!counter->active) { 335 if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
336 /* 336 /*
337 * When counter was not in the overflow mask, we have to 337 * When counter was not in the overflow mask, we have to
338 * read it from hardware. We read it as well, when it 338 * read it from hardware. We read it as well, when it
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 97d86c293ee8..8cb095fa442c 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -128,6 +128,15 @@ struct hw_perf_counter_ops {
128}; 128};
129 129
130/** 130/**
131 * enum perf_counter_active_state - the states of a counter
132 */
133enum perf_counter_active_state {
134 PERF_COUNTER_STATE_OFF = -1,
135 PERF_COUNTER_STATE_INACTIVE = 0,
136 PERF_COUNTER_STATE_ACTIVE = 1,
137};
138
139/**
131 * struct perf_counter - performance counter kernel representation: 140 * struct perf_counter - performance counter kernel representation:
132 */ 141 */
133struct perf_counter { 142struct perf_counter {
@@ -136,7 +145,7 @@ struct perf_counter {
136 struct perf_counter *group_leader; 145 struct perf_counter *group_leader;
137 const struct hw_perf_counter_ops *hw_ops; 146 const struct hw_perf_counter_ops *hw_ops;
138 147
139 int active; 148 enum perf_counter_active_state state;
140#if BITS_PER_LONG == 64 149#if BITS_PER_LONG == 64
141 atomic64_t count; 150 atomic64_t count;
142#else 151#else
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4e679b91d8bb..559130b8774d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info)
167 167
168 spin_lock(&ctx->lock); 168 spin_lock(&ctx->lock);
169 169
170 if (counter->active) { 170 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
171 counter->hw_ops->hw_perf_counter_disable(counter); 171 counter->hw_ops->hw_perf_counter_disable(counter);
172 counter->active = 0; 172 counter->state = PERF_COUNTER_STATE_INACTIVE;
173 ctx->nr_active--; 173 ctx->nr_active--;
174 cpuctx->active_oncpu--; 174 cpuctx->active_oncpu--;
175 counter->task = NULL; 175 counter->task = NULL;
@@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info)
281 281
282 if (cpuctx->active_oncpu < perf_max_counters) { 282 if (cpuctx->active_oncpu < perf_max_counters) {
283 counter->hw_ops->hw_perf_counter_enable(counter); 283 counter->hw_ops->hw_perf_counter_enable(counter);
284 counter->active = 1; 284 counter->state = PERF_COUNTER_STATE_ACTIVE;
285 counter->oncpu = cpu; 285 counter->oncpu = cpu;
286 ctx->nr_active++; 286 ctx->nr_active++;
287 cpuctx->active_oncpu++; 287 cpuctx->active_oncpu++;
@@ -328,7 +328,6 @@ retry:
328 328
329 spin_lock_irq(&ctx->lock); 329 spin_lock_irq(&ctx->lock);
330 /* 330 /*
331 * If the context is active and the counter has not been added
332 * we need to retry the smp call. 331 * we need to retry the smp call.
333 */ 332 */
334 if (ctx->nr_active && list_empty(&counter->list_entry)) { 333 if (ctx->nr_active && list_empty(&counter->list_entry)) {
@@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter,
353 struct perf_cpu_context *cpuctx, 352 struct perf_cpu_context *cpuctx,
354 struct perf_counter_context *ctx) 353 struct perf_counter_context *ctx)
355{ 354{
356 if (!counter->active) 355 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
357 return; 356 return;
358 357
359 counter->hw_ops->hw_perf_counter_disable(counter); 358 counter->hw_ops->hw_perf_counter_disable(counter);
360 counter->active = 0; 359 counter->state = PERF_COUNTER_STATE_INACTIVE;
361 counter->oncpu = -1; 360 counter->oncpu = -1;
362 361
363 cpuctx->active_oncpu--; 362 cpuctx->active_oncpu--;
364 ctx->nr_active--; 363 ctx->nr_active--;
@@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter,
415 struct perf_counter_context *ctx, 414 struct perf_counter_context *ctx,
416 int cpu) 415 int cpu)
417{ 416{
418 if (counter->active == -1) 417 if (counter->state == PERF_COUNTER_STATE_OFF)
419 return; 418 return;
420 419
421 counter->hw_ops->hw_perf_counter_enable(counter); 420 counter->hw_ops->hw_perf_counter_enable(counter);
422 counter->active = 1; 421 counter->state = PERF_COUNTER_STATE_ACTIVE;
423 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 422 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
424 423
425 cpuctx->active_oncpu++; 424 cpuctx->active_oncpu++;
@@ -506,8 +505,8 @@ int perf_counter_task_disable(void)
506 perf_flags = hw_perf_save_disable(); 505 perf_flags = hw_perf_save_disable();
507 506
508 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 507 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
509 WARN_ON_ONCE(counter->active == 1); 508 WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
510 counter->active = -1; 509 counter->state = PERF_COUNTER_STATE_OFF;
511 } 510 }
512 hw_perf_restore(perf_flags); 511 hw_perf_restore(perf_flags);
513 512
@@ -540,9 +539,9 @@ int perf_counter_task_enable(void)
540 perf_flags = hw_perf_save_disable(); 539 perf_flags = hw_perf_save_disable();
541 540
542 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 541 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
543 if (counter->active != -1) 542 if (counter->state != PERF_COUNTER_STATE_OFF)
544 continue; 543 continue;
545 counter->active = 0; 544 counter->state = PERF_COUNTER_STATE_INACTIVE;
546 } 545 }
547 hw_perf_restore(perf_flags); 546 hw_perf_restore(perf_flags);
548 547
@@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
620 * If counter is enabled and currently active on a CPU, update the 619 * If counter is enabled and currently active on a CPU, update the
621 * value in the counter structure: 620 * value in the counter structure:
622 */ 621 */
623 if (counter->active) { 622 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
624 smp_call_function_single(counter->oncpu, 623 smp_call_function_single(counter->oncpu,
625 __hw_perf_counter_read, counter, 1); 624 __hw_perf_counter_read, counter, 1);
626 } 625 }
@@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
673 672
674retry: 673retry:
675 spin_lock_irq(&ctx->lock); 674 spin_lock_irq(&ctx->lock);
676 if (!counter->active) { 675 if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
677 counter->irqdata = counter->usrdata; 676 counter->irqdata = counter->usrdata;
678 counter->usrdata = oldirqdata; 677 counter->usrdata = oldirqdata;
679 spin_unlock_irq(&ctx->lock); 678 spin_unlock_irq(&ctx->lock);