aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-19 15:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:15 -0400
commit4a0deca657f3dbb8a707b5dc8f173beec01e7ed2 (patch)
treeb52fb2d5fa3391f5eac37ca599413b4e8d187c0c
parent01ef09d9ffb5ce9f8d62d1e5206da3d5ca612acc (diff)
perf_counter: generic context switch event
Impact: cleanup Use the generic software events for context switches. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Orig-LKML-Reference: <20090319194233.283522645@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/perf_counter.c60
-rw-r--r--kernel/sched.c6
3 files changed, 4 insertions, 63 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75b2fc5306d8..7ed41f7c5ace 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -138,7 +138,6 @@ extern unsigned long nr_running(void);
138extern unsigned long nr_uninterruptible(void); 138extern unsigned long nr_uninterruptible(void);
139extern unsigned long nr_active(void); 139extern unsigned long nr_active(void);
140extern unsigned long nr_iowait(void); 140extern unsigned long nr_iowait(void);
141extern u64 cpu_nr_switches(int cpu);
142extern u64 cpu_nr_migrations(int cpu); 141extern u64 cpu_nr_migrations(int cpu);
143 142
144extern unsigned long get_parent_ip(unsigned long addr); 143extern unsigned long get_parent_ip(unsigned long addr);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4c4e9eb37ab0..99d5930f0a52 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -710,10 +710,13 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
710{ 710{
711 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 711 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
712 struct perf_counter_context *ctx = &task->perf_counter_ctx; 712 struct perf_counter_context *ctx = &task->perf_counter_ctx;
713 struct pt_regs *regs;
713 714
714 if (likely(!cpuctx->task_ctx)) 715 if (likely(!cpuctx->task_ctx))
715 return; 716 return;
716 717
718 regs = task_pt_regs(task);
719 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
717 __perf_counter_sched_out(ctx, cpuctx); 720 __perf_counter_sched_out(ctx, cpuctx);
718 721
719 cpuctx->task_ctx = NULL; 722 cpuctx->task_ctx = NULL;
@@ -1668,58 +1671,6 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = {
1668}; 1671};
1669 1672
1670/* 1673/*
1671 * Software counter: context switches
1672 */
1673
1674static u64 get_context_switches(struct perf_counter *counter)
1675{
1676 struct task_struct *curr = counter->ctx->task;
1677
1678 if (curr)
1679 return curr->nvcsw + curr->nivcsw;
1680 return cpu_nr_switches(smp_processor_id());
1681}
1682
1683static void context_switches_perf_counter_update(struct perf_counter *counter)
1684{
1685 u64 prev, now;
1686 s64 delta;
1687
1688 prev = atomic64_read(&counter->hw.prev_count);
1689 now = get_context_switches(counter);
1690
1691 atomic64_set(&counter->hw.prev_count, now);
1692
1693 delta = now - prev;
1694
1695 atomic64_add(delta, &counter->count);
1696}
1697
1698static void context_switches_perf_counter_read(struct perf_counter *counter)
1699{
1700 context_switches_perf_counter_update(counter);
1701}
1702
1703static int context_switches_perf_counter_enable(struct perf_counter *counter)
1704{
1705 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1706 atomic64_set(&counter->hw.prev_count,
1707 get_context_switches(counter));
1708 return 0;
1709}
1710
1711static void context_switches_perf_counter_disable(struct perf_counter *counter)
1712{
1713 context_switches_perf_counter_update(counter);
1714}
1715
1716static const struct hw_perf_counter_ops perf_ops_context_switches = {
1717 .enable = context_switches_perf_counter_enable,
1718 .disable = context_switches_perf_counter_disable,
1719 .read = context_switches_perf_counter_read,
1720};
1721
1722/*
1723 * Software counter: cpu migrations 1674 * Software counter: cpu migrations
1724 */ 1675 */
1725 1676
@@ -1808,11 +1759,8 @@ sw_perf_counter_init(struct perf_counter *counter)
1808 case PERF_COUNT_PAGE_FAULTS: 1759 case PERF_COUNT_PAGE_FAULTS:
1809 case PERF_COUNT_PAGE_FAULTS_MIN: 1760 case PERF_COUNT_PAGE_FAULTS_MIN:
1810 case PERF_COUNT_PAGE_FAULTS_MAJ: 1761 case PERF_COUNT_PAGE_FAULTS_MAJ:
1811 hw_ops = &perf_ops_generic;
1812 break;
1813 case PERF_COUNT_CONTEXT_SWITCHES: 1762 case PERF_COUNT_CONTEXT_SWITCHES:
1814 if (!counter->hw_event.exclude_kernel) 1763 hw_ops = &perf_ops_generic;
1815 hw_ops = &perf_ops_context_switches;
1816 break; 1764 break;
1817 case PERF_COUNT_CPU_MIGRATIONS: 1765 case PERF_COUNT_CPU_MIGRATIONS:
1818 if (!counter->hw_event.exclude_kernel) 1766 if (!counter->hw_event.exclude_kernel)
diff --git a/kernel/sched.c b/kernel/sched.c
index 39e708602169..f76e3c0188a2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2900,14 +2900,8 @@ unsigned long nr_active(void)
2900 2900
2901/* 2901/*
2902 * Externally visible per-cpu scheduler statistics: 2902 * Externally visible per-cpu scheduler statistics:
2903 * cpu_nr_switches(cpu) - number of context switches on that cpu
2904 * cpu_nr_migrations(cpu) - number of migrations into that cpu 2903 * cpu_nr_migrations(cpu) - number of migrations into that cpu
2905 */ 2904 */
2906u64 cpu_nr_switches(int cpu)
2907{
2908 return cpu_rq(cpu)->nr_switches;
2909}
2910
2911u64 cpu_nr_migrations(int cpu) 2905u64 cpu_nr_migrations(int cpu)
2912{ 2906{
2913 return cpu_rq(cpu)->nr_migrations_in; 2907 return cpu_rq(cpu)->nr_migrations_in;