diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2005-08-02 00:11:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-08-02 00:38:01 -0400 |
commit | 842bbaaa7394820c8f1fe0629cd15478653caf86 (patch) | |
tree | 5934040b40357f479b16d638ffd2fe435f4837e8 /kernel/module.c | |
parent | 561fb765b97f287211a2c73a844c5edb12f44f1d (diff) |
[PATCH] Module per-cpu alignment cannot always be met
The module code assumes noone will ever ask for a per-cpu area more than
SMP_CACHE_BYTES aligned. However, as these cases show, gcc asks sometimes
asks for 32-byte alignment for the per-cpu section on a module, and if
CONFIG_X86_L1_CACHE_SHIFT is 4, we hit that BUG_ON(). This is obviously an
unusual combination, as there have been few reports, but better to warn
than die.
See:
http://www.ussg.iu.edu/hypermail/linux/kernel/0409.0/0768.html
And more recently:
http://bugs.gentoo.org/show_bug.cgi?id=97006
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/module.c')
-rw-r--r-- | kernel/module.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/module.c b/kernel/module.c index 068e271ab3a..c32995fbd8f 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -250,13 +250,18 @@ static inline unsigned int block_size(int val) | |||
250 | /* Created by linker magic */ | 250 | /* Created by linker magic */ |
251 | extern char __per_cpu_start[], __per_cpu_end[]; | 251 | extern char __per_cpu_start[], __per_cpu_end[]; |
252 | 252 | ||
253 | static void *percpu_modalloc(unsigned long size, unsigned long align) | 253 | static void *percpu_modalloc(unsigned long size, unsigned long align, |
254 | const char *name) | ||
254 | { | 255 | { |
255 | unsigned long extra; | 256 | unsigned long extra; |
256 | unsigned int i; | 257 | unsigned int i; |
257 | void *ptr; | 258 | void *ptr; |
258 | 259 | ||
259 | BUG_ON(align > SMP_CACHE_BYTES); | 260 | if (align > SMP_CACHE_BYTES) { |
261 | printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n", | ||
262 | name, align, SMP_CACHE_BYTES); | ||
263 | align = SMP_CACHE_BYTES; | ||
264 | } | ||
260 | 265 | ||
261 | ptr = __per_cpu_start; | 266 | ptr = __per_cpu_start; |
262 | for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { | 267 | for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { |
@@ -348,7 +353,8 @@ static int percpu_modinit(void) | |||
348 | } | 353 | } |
349 | __initcall(percpu_modinit); | 354 | __initcall(percpu_modinit); |
350 | #else /* ... !CONFIG_SMP */ | 355 | #else /* ... !CONFIG_SMP */ |
351 | static inline void *percpu_modalloc(unsigned long size, unsigned long align) | 356 | static inline void *percpu_modalloc(unsigned long size, unsigned long align, |
357 | const char *name) | ||
352 | { | 358 | { |
353 | return NULL; | 359 | return NULL; |
354 | } | 360 | } |
@@ -1644,7 +1650,8 @@ static struct module *load_module(void __user *umod, | |||
1644 | if (pcpuindex) { | 1650 | if (pcpuindex) { |
1645 | /* We have a special allocation for this section. */ | 1651 | /* We have a special allocation for this section. */ |
1646 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, | 1652 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, |
1647 | sechdrs[pcpuindex].sh_addralign); | 1653 | sechdrs[pcpuindex].sh_addralign, |
1654 | mod->name); | ||
1648 | if (!percpu) { | 1655 | if (!percpu) { |
1649 | err = -ENOMEM; | 1656 | err = -ENOMEM; |
1650 | goto free_mod; | 1657 | goto free_mod; |