aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-19 15:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:15 -0400
commit4a0deca657f3dbb8a707b5dc8f173beec01e7ed2 (patch)
treeb52fb2d5fa3391f5eac37ca599413b4e8d187c0c /kernel/perf_counter.c
parent01ef09d9ffb5ce9f8d62d1e5206da3d5ca612acc (diff)
perf_counter: generic context switch event
Impact: cleanup Use the generic software events for context switches. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Orig-LKML-Reference: <20090319194233.283522645@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c60
1 files changed, 4 insertions, 56 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4c4e9eb37ab0..99d5930f0a52 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -710,10 +710,13 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
710{ 710{
711 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 711 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
712 struct perf_counter_context *ctx = &task->perf_counter_ctx; 712 struct perf_counter_context *ctx = &task->perf_counter_ctx;
713 struct pt_regs *regs;
713 714
714 if (likely(!cpuctx->task_ctx)) 715 if (likely(!cpuctx->task_ctx))
715 return; 716 return;
716 717
718 regs = task_pt_regs(task);
719 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
717 __perf_counter_sched_out(ctx, cpuctx); 720 __perf_counter_sched_out(ctx, cpuctx);
718 721
719 cpuctx->task_ctx = NULL; 722 cpuctx->task_ctx = NULL;
@@ -1668,58 +1671,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = {
1668}; 1671};
1669 1672
1670/* 1673/*
1671 * Software counter: context switches
1672 */
1673
1674static u64 get_context_switches(struct perf_counter *counter)
1675{
1676 struct task_struct *curr = counter->ctx->task;
1677
1678 if (curr)
1679 return curr->nvcsw + curr->nivcsw;
1680 return cpu_nr_switches(smp_processor_id());
1681}
1682
1683static void context_switches_perf_counter_update(struct perf_counter *counter)
1684{
1685 u64 prev, now;
1686 s64 delta;
1687
1688 prev = atomic64_read(&counter->hw.prev_count);
1689 now = get_context_switches(counter);
1690
1691 atomic64_set(&counter->hw.prev_count, now);
1692
1693 delta = now - prev;
1694
1695 atomic64_add(delta, &counter->count);
1696}
1697
1698static void context_switches_perf_counter_read(struct perf_counter *counter)
1699{
1700 context_switches_perf_counter_update(counter);
1701}
1702
1703static int context_switches_perf_counter_enable(struct perf_counter *counter)
1704{
1705 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1706 atomic64_set(&counter->hw.prev_count,
1707 get_context_switches(counter));
1708 return 0;
1709}
1710
1711static void context_switches_perf_counter_disable(struct perf_counter *counter)
1712{
1713 context_switches_perf_counter_update(counter);
1714}
1715
1716static const struct hw_perf_counter_ops perf_ops_context_switches = {
1717 .enable = context_switches_perf_counter_enable,
1718 .disable = context_switches_perf_counter_disable,
1719 .read = context_switches_perf_counter_read,
1720};
1721
1722/*
1723 * Software counter: cpu migrations 1674 * Software counter: cpu migrations
1724 */ 1675 */
1725 1676
@@ -1808,11 +1759,8 @@ sw_perf_counter_init(struct perf_counter *counter)
1808 case PERF_COUNT_PAGE_FAULTS: 1759 case PERF_COUNT_PAGE_FAULTS:
1809 case PERF_COUNT_PAGE_FAULTS_MIN: 1760 case PERF_COUNT_PAGE_FAULTS_MIN:
1810 case PERF_COUNT_PAGE_FAULTS_MAJ: 1761 case PERF_COUNT_PAGE_FAULTS_MAJ:
1811 hw_ops = &perf_ops_generic;
1812 break;
1813 case PERF_COUNT_CONTEXT_SWITCHES: 1762 case PERF_COUNT_CONTEXT_SWITCHES:
1814 if (!counter->hw_event.exclude_kernel) 1763 hw_ops = &perf_ops_generic;
1815 hw_ops = &perf_ops_context_switches;
1816 break; 1764 break;
1817 case PERF_COUNT_CPU_MIGRATIONS: 1765 case PERF_COUNT_CPU_MIGRATIONS:
1818 if (!counter->hw_event.exclude_kernel) 1766 if (!counter->hw_event.exclude_kernel)