diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-13 07:21:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:29:37 -0400 |
commit | 7dd1fcc258b65da718f01e4684a7b9244501a9fb (patch) | |
tree | 4fb864f493b9f1d47bedbc3c97b9de7df572d6ec /kernel | |
parent | 15dbf27cc18559a14e99609f78678aa86b9c6ff1 (diff) |
perf_counter: provide pagefault software events
We use the generic software counter infrastructure to provide
page fault events.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 53 |
1 files changed, 3 insertions, 50 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index eeb1b46cf707..1773c5d7427d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1607,57 +1607,10 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = { | |||
1607 | * Software counter: page faults | 1607 | * Software counter: page faults |
1608 | */ | 1608 | */ |
1609 | 1609 | ||
1610 | #ifdef CONFIG_VM_EVENT_COUNTERS | ||
1611 | #define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT] | ||
1612 | #else | ||
1613 | #define cpu_page_faults() 0 | ||
1614 | #endif | ||
1615 | |||
1616 | static u64 get_page_faults(struct perf_counter *counter) | ||
1617 | { | ||
1618 | struct task_struct *curr = counter->ctx->task; | ||
1619 | |||
1620 | if (curr) | ||
1621 | return curr->maj_flt + curr->min_flt; | ||
1622 | return cpu_page_faults(); | ||
1623 | } | ||
1624 | |||
1625 | static void page_faults_perf_counter_update(struct perf_counter *counter) | ||
1626 | { | ||
1627 | u64 prev, now; | ||
1628 | s64 delta; | ||
1629 | |||
1630 | prev = atomic64_read(&counter->hw.prev_count); | ||
1631 | now = get_page_faults(counter); | ||
1632 | |||
1633 | atomic64_set(&counter->hw.prev_count, now); | ||
1634 | |||
1635 | delta = now - prev; | ||
1636 | |||
1637 | atomic64_add(delta, &counter->count); | ||
1638 | } | ||
1639 | |||
1640 | static void page_faults_perf_counter_read(struct perf_counter *counter) | ||
1641 | { | ||
1642 | page_faults_perf_counter_update(counter); | ||
1643 | } | ||
1644 | |||
1645 | static int page_faults_perf_counter_enable(struct perf_counter *counter) | ||
1646 | { | ||
1647 | if (counter->prev_state <= PERF_COUNTER_STATE_OFF) | ||
1648 | atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); | ||
1649 | return 0; | ||
1650 | } | ||
1651 | |||
1652 | static void page_faults_perf_counter_disable(struct perf_counter *counter) | ||
1653 | { | ||
1654 | page_faults_perf_counter_update(counter); | ||
1655 | } | ||
1656 | |||
1657 | static const struct hw_perf_counter_ops perf_ops_page_faults = { | 1610 | static const struct hw_perf_counter_ops perf_ops_page_faults = { |
1658 | .enable = page_faults_perf_counter_enable, | 1611 | .enable = perf_swcounter_enable, |
1659 | .disable = page_faults_perf_counter_disable, | 1612 | .disable = perf_swcounter_disable, |
1660 | .read = page_faults_perf_counter_read, | 1613 | .read = perf_swcounter_read, |
1661 | }; | 1614 | }; |
1662 | 1615 | ||
1663 | /* | 1616 | /* |