diff options
author | Paul Mackerras <paulus@samba.org> | 2009-02-09 06:42:47 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-09 06:47:16 -0500 |
commit | 23a185ca8abbeef64b6ffc33059b1d630e43ec10 (patch) | |
tree | c5eb9454ff969377adb40532119240f6fc893fcb /kernel/sched.c | |
parent | 82aa9a1829199233f9bdaf26e2ee271114f4701e (diff) |
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8db1a4cf2082..173768f142ad 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -558,6 +558,7 @@ struct rq { | |||
558 | struct load_weight load; | 558 | struct load_weight load; |
559 | unsigned long nr_load_updates; | 559 | unsigned long nr_load_updates; |
560 | u64 nr_switches; | 560 | u64 nr_switches; |
561 | u64 nr_migrations_in; | ||
561 | 562 | ||
562 | struct cfs_rq cfs; | 563 | struct cfs_rq cfs; |
563 | struct rt_rq rt; | 564 | struct rt_rq rt; |
@@ -1908,6 +1909,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1908 | #endif | 1909 | #endif |
1909 | if (old_cpu != new_cpu) { | 1910 | if (old_cpu != new_cpu) { |
1910 | p->se.nr_migrations++; | 1911 | p->se.nr_migrations++; |
1912 | new_rq->nr_migrations_in++; | ||
1911 | #ifdef CONFIG_SCHEDSTATS | 1913 | #ifdef CONFIG_SCHEDSTATS |
1912 | if (task_hot(p, old_rq->clock, NULL)) | 1914 | if (task_hot(p, old_rq->clock, NULL)) |
1913 | schedstat_inc(p, se.nr_forced2_migrations); | 1915 | schedstat_inc(p, se.nr_forced2_migrations); |
@@ -2811,6 +2813,21 @@ unsigned long nr_active(void) | |||
2811 | } | 2813 | } |
2812 | 2814 | ||
2813 | /* | 2815 | /* |
2816 | * Externally visible per-cpu scheduler statistics: | ||
2817 | * cpu_nr_switches(cpu) - number of context switches on that cpu | ||
2818 | * cpu_nr_migrations(cpu) - number of migrations into that cpu | ||
2819 | */ | ||
2820 | u64 cpu_nr_switches(int cpu) | ||
2821 | { | ||
2822 | return cpu_rq(cpu)->nr_switches; | ||
2823 | } | ||
2824 | |||
2825 | u64 cpu_nr_migrations(int cpu) | ||
2826 | { | ||
2827 | return cpu_rq(cpu)->nr_migrations_in; | ||
2828 | } | ||
2829 | |||
2830 | /* | ||
2814 | * Update rq->cpu_load[] statistics. This function is usually called every | 2831 | * Update rq->cpu_load[] statistics. This function is usually called every |
2815 | * scheduler tick (TICK_NSEC). | 2832 | * scheduler tick (TICK_NSEC). |
2816 | */ | 2833 | */ |