diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-04 13:13:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-04 13:30:32 -0400 |
commit | 0d905bca23aca5c86a10ee101bcd3b1abbd40b25 (patch) | |
tree | 5a4e6b956d1923ac1d28ae8b8f3034c1c90df5a5 /kernel | |
parent | ba77813a2a22d631fe5bc0bf1ec0d11350544b70 (diff) |
perf_counter: initialize the per-cpu context earlier
percpu scheduling for perfcounters wants to take the context lock,
but that lock first needs to be initialized. Currently it is an
early_initcall() - but that is too late, the task tick runs much
sooner than that.
Call it explicitly from the scheduler init sequence instead.
[ Impact: fix access-before-init crash ]
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 5 |
2 files changed, 5 insertions, 5 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b9679c36bcc2..fcdafa234a5d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -3265,15 +3265,12 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { | |||
3265 | .notifier_call = perf_cpu_notify, | 3265 | .notifier_call = perf_cpu_notify, |
3266 | }; | 3266 | }; |
3267 | 3267 | ||
3268 | static int __init perf_counter_init(void) | 3268 | void __init perf_counter_init(void) |
3269 | { | 3269 | { |
3270 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 3270 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
3271 | (void *)(long)smp_processor_id()); | 3271 | (void *)(long)smp_processor_id()); |
3272 | register_cpu_notifier(&perf_cpu_nb); | 3272 | register_cpu_notifier(&perf_cpu_nb); |
3273 | |||
3274 | return 0; | ||
3275 | } | 3273 | } |
3276 | early_initcall(perf_counter_init); | ||
3277 | 3274 | ||
3278 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | 3275 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) |
3279 | { | 3276 | { |
diff --git a/kernel/sched.c b/kernel/sched.c index 2f600e30dcf0..a728976a3a6c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/completion.h> | 39 | #include <linux/completion.h> |
40 | #include <linux/kernel_stat.h> | 40 | #include <linux/kernel_stat.h> |
41 | #include <linux/debug_locks.h> | 41 | #include <linux/debug_locks.h> |
42 | #include <linux/perf_counter.h> | ||
42 | #include <linux/security.h> | 43 | #include <linux/security.h> |
43 | #include <linux/notifier.h> | 44 | #include <linux/notifier.h> |
44 | #include <linux/profile.h> | 45 | #include <linux/profile.h> |
@@ -8996,7 +8997,7 @@ void __init sched_init(void) | |||
8996 | * 1024) and two child groups A0 and A1 (of weight 1024 each), | 8997 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
8997 | * then A0's share of the cpu resource is: | 8998 | * then A0's share of the cpu resource is: |
8998 | * | 8999 | * |
8999 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% | 9000 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
9000 | * | 9001 | * |
9001 | * We achieve this by letting init_task_group's tasks sit | 9002 | * We achieve this by letting init_task_group's tasks sit |
9002 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). | 9003 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). |
@@ -9097,6 +9098,8 @@ void __init sched_init(void) | |||
9097 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | 9098 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
9098 | #endif /* SMP */ | 9099 | #endif /* SMP */ |
9099 | 9100 | ||
9101 | perf_counter_init(); | ||
9102 | |||
9100 | scheduler_running = 1; | 9103 | scheduler_running = 1; |
9101 | } | 9104 | } |
9102 | 9105 | ||