aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h1
-rw-r--r--kernel/perf_counter.c21
2 files changed, 16 insertions, 6 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index c83f51d6e359..32cd1acb7386 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -173,6 +173,7 @@ struct perf_counter {
173 const struct hw_perf_counter_ops *hw_ops; 173 const struct hw_perf_counter_ops *hw_ops;
174 174
175 enum perf_counter_active_state state; 175 enum perf_counter_active_state state;
176 enum perf_counter_active_state prev_state;
176 atomic64_t count; 177 atomic64_t count;
177 178
178 struct perf_counter_hw_event hw_event; 179 struct perf_counter_hw_event hw_event;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index fcefb0a726f3..ad62965828d3 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -444,6 +444,7 @@ static void __perf_install_in_context(void *info)
444 444
445 list_add_counter(counter, ctx); 445 list_add_counter(counter, ctx);
446 ctx->nr_counters++; 446 ctx->nr_counters++;
447 counter->prev_state = PERF_COUNTER_STATE_OFF;
447 448
448 /* 449 /*
449 * Don't put the counter on if it is disabled or if 450 * Don't put the counter on if it is disabled or if
@@ -562,6 +563,7 @@ static void __perf_counter_enable(void *info)
562 curr_rq_lock_irq_save(&flags); 563 curr_rq_lock_irq_save(&flags);
563 spin_lock(&ctx->lock); 564 spin_lock(&ctx->lock);
564 565
566 counter->prev_state = counter->state;
565 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 567 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
566 goto unlock; 568 goto unlock;
567 counter->state = PERF_COUNTER_STATE_INACTIVE; 569 counter->state = PERF_COUNTER_STATE_INACTIVE;
@@ -733,6 +735,7 @@ group_sched_in(struct perf_counter *group_counter,
733 if (ret) 735 if (ret)
734 return ret < 0 ? ret : 0; 736 return ret < 0 ? ret : 0;
735 737
738 group_counter->prev_state = group_counter->state;
736 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 739 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
737 return -EAGAIN; 740 return -EAGAIN;
738 741
@@ -740,6 +743,7 @@ group_sched_in(struct perf_counter *group_counter,
740 * Schedule in siblings as one group (if any): 743 * Schedule in siblings as one group (if any):
741 */ 744 */
742 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 745 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
746 counter->prev_state = counter->state;
743 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 747 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
744 partial_group = counter; 748 partial_group = counter;
745 goto group_error; 749 goto group_error;
@@ -1398,9 +1402,9 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
1398 1402
1399static int task_clock_perf_counter_enable(struct perf_counter *counter) 1403static int task_clock_perf_counter_enable(struct perf_counter *counter)
1400{ 1404{
1401 u64 now = task_clock_perf_counter_val(counter, 0); 1405 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1402 1406 atomic64_set(&counter->hw.prev_count,
1403 atomic64_set(&counter->hw.prev_count, now); 1407 task_clock_perf_counter_val(counter, 0));
1404 1408
1405 return 0; 1409 return 0;
1406} 1410}
@@ -1455,7 +1459,8 @@ static void page_faults_perf_counter_read(struct perf_counter *counter)
1455 1459
1456static int page_faults_perf_counter_enable(struct perf_counter *counter) 1460static int page_faults_perf_counter_enable(struct perf_counter *counter)
1457{ 1461{
1458 atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); 1462 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1463 atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
1459 return 0; 1464 return 0;
1460} 1465}
1461 1466
@@ -1501,7 +1506,9 @@ static void context_switches_perf_counter_read(struct perf_counter *counter)
1501 1506
1502static int context_switches_perf_counter_enable(struct perf_counter *counter) 1507static int context_switches_perf_counter_enable(struct perf_counter *counter)
1503{ 1508{
1504 atomic64_set(&counter->hw.prev_count, get_context_switches(counter)); 1509 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1510 atomic64_set(&counter->hw.prev_count,
1511 get_context_switches(counter));
1505 return 0; 1512 return 0;
1506} 1513}
1507 1514
@@ -1547,7 +1554,9 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1547 1554
1548static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) 1555static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1549{ 1556{
1550 atomic64_set(&counter->hw.prev_count, get_cpu_migrations(counter)); 1557 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1558 atomic64_set(&counter->hw.prev_count,
1559 get_cpu_migrations(counter));
1551 return 0; 1560 return 0;
1552} 1561}
1553 1562