diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2006-03-23 06:01:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-23 10:38:17 -0500 |
commit | b73b459f72f746a031d1ef4cc7659b20a1f1acb9 (patch) | |
tree | 6e8d58fb0bd01e0a2c24c4debdffd9eb6719a384 /init | |
parent | 394e3902c55e667945f6f1c2bdbc59842cce70f7 (diff) |
[PATCH] __GENERIC_PER_CPU changes
Now CONFIG_DEBUG_INITDATA is in, initial percpu data
[__per_cpu_start,__per_cpu_end] can be declared as a redzone, and invalid
accesses after boot can be detected, at least for i386.
We can let non possible cpus percpu data point to this 'redzone' instead of
NULL .
NULL was not a good choice because part of [0..32768] memory may be
readable and invalid accesses may happen unnoticed.
If CONFIG_DEBUG_INITDATA is not defined, each non possible cpu points to
the initial percpu data (__per_cpu_offset[cpu] == 0), thus invalid accesses
wont be detected/crash.
This patch also moves __per_cpu_offset[] to read_mostly area to avoid false
sharing.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'init')
-rw-r--r-- | init/main.c | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/init/main.c b/init/main.c index 9cf6b307bfd7..2714e0e7cfec 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -325,7 +325,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } | |||
325 | #else | 325 | #else |
326 | 326 | ||
327 | #ifdef __GENERIC_PER_CPU | 327 | #ifdef __GENERIC_PER_CPU |
328 | unsigned long __per_cpu_offset[NR_CPUS]; | 328 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
329 | 329 | ||
330 | EXPORT_SYMBOL(__per_cpu_offset); | 330 | EXPORT_SYMBOL(__per_cpu_offset); |
331 | 331 | ||
@@ -343,11 +343,7 @@ static void __init setup_per_cpu_areas(void) | |||
343 | #endif | 343 | #endif |
344 | ptr = alloc_bootmem(size * nr_possible_cpus); | 344 | ptr = alloc_bootmem(size * nr_possible_cpus); |
345 | 345 | ||
346 | for (i = 0; i < NR_CPUS; i++) { | 346 | for_each_cpu(i) { |
347 | if (!cpu_possible(i)) { | ||
348 | __per_cpu_offset[i] = (char*)0 - __per_cpu_start; | ||
349 | continue; | ||
350 | } | ||
351 | __per_cpu_offset[i] = ptr - __per_cpu_start; | 347 | __per_cpu_offset[i] = ptr - __per_cpu_start; |
352 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 348 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
353 | ptr += size; | 349 | ptr += size; |