diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-12 07:03:06 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-12 11:53:33 -0400 |
commit | 7e85ee0c1d15ca5f8bff0f514f158eba1742dd87 (patch) | |
tree | 8f9c21f0df6bea88740d7dd48834ac9ffc238e93 | |
parent | eb91f1d0a531289e18f5587dc197d12a251c66a3 (diff) |
slab,slub: don't enable interrupts during early boot
As explained by Benjamin Herrenschmidt:
Oh and btw, your patch alone doesn't fix powerpc, because it's missing
a whole bunch of GFP_KERNEL's in the arch code... You would have to
grep the entire kernel for things that check slab_is_available() and
even then you'll be missing some.
For example, slab_is_available() didn't always exist, and so in the
early days on powerpc, we used a mem_init_done global that is set form
mem_init() (not perfect but works in practice). And we still have code
using that to do the test.
Therefore, mask out __GFP_WAIT, __GFP_IO, and __GFP_FS in the slab allocators
in early boot code to avoid enabling interrupts.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | include/linux/gfp.h | 3 | ||||
-rw-r--r-- | include/linux/slab.h | 2 | ||||
-rw-r--r-- | include/linux/slob_def.h | 5 | ||||
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | init/main.c | 1 | ||||
-rw-r--r-- | mm/slab.c | 18 | ||||
-rw-r--r-- | mm/slub.c | 16 |
7 files changed, 47 insertions, 0 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..3760e7c5de02 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -85,6 +85,9 @@ struct vm_area_struct; | |||
85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
86 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 86 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
87 | 87 | ||
88 | /* Control slab gfp mask during early boot */ | ||
89 | #define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | ||
90 | |||
88 | /* Control allocation constraints */ | 91 | /* Control allocation constraints */ |
89 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 92 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
90 | 93 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 48803064cedf..219b8fb4651d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -319,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |||
319 | return kmalloc_node(size, flags | __GFP_ZERO, node); | 319 | return kmalloc_node(size, flags | __GFP_ZERO, node); |
320 | } | 320 | } |
321 | 321 | ||
322 | void __init kmem_cache_init_late(void); | ||
323 | |||
322 | #endif /* _LINUX_SLAB_H */ | 324 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..bb5368df4be8 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags) | |||
34 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline void kmem_cache_init_late(void) | ||
38 | { | ||
39 | /* Nothing to do */ | ||
40 | } | ||
41 | |||
37 | #endif /* __LINUX_SLOB_DEF_H */ | 42 | #endif /* __LINUX_SLOB_DEF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index be5d40c43bd2..4dcbc2c71491 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
302 | } | 302 | } |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | void __init kmem_cache_init_late(void); | ||
306 | |||
305 | #endif /* _LINUX_SLUB_DEF_H */ | 307 | #endif /* _LINUX_SLUB_DEF_H */ |
diff --git a/init/main.c b/init/main.c index b3e8f14c568a..f6204f712e7c 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -640,6 +640,7 @@ asmlinkage void __init start_kernel(void) | |||
640 | "enabled early\n"); | 640 | "enabled early\n"); |
641 | early_boot_irqs_on(); | 641 | early_boot_irqs_on(); |
642 | local_irq_enable(); | 642 | local_irq_enable(); |
643 | kmem_cache_init_late(); | ||
643 | 644 | ||
644 | /* | 645 | /* |
645 | * HACK ALERT! This is early. We're enabling the console before | 646 | * HACK ALERT! This is early. We're enabling the console before |
@@ -304,6 +304,12 @@ struct kmem_list3 { | |||
304 | }; | 304 | }; |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
308 | * sure early boot allocations don't accidentally enable interrupts. | ||
309 | */ | ||
310 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
311 | |||
312 | /* | ||
307 | * Need this for bootstrapping a per node allocator. | 313 | * Need this for bootstrapping a per node allocator. |
308 | */ | 314 | */ |
309 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 315 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
@@ -1654,6 +1660,14 @@ void __init kmem_cache_init(void) | |||
1654 | */ | 1660 | */ |
1655 | } | 1661 | } |
1656 | 1662 | ||
1663 | void __init kmem_cache_init_late(void) | ||
1664 | { | ||
1665 | /* | ||
1666 | * Interrupts are enabled now so all GFP allocations are safe. | ||
1667 | */ | ||
1668 | slab_gfp_mask = __GFP_BITS_MASK; | ||
1669 | } | ||
1670 | |||
1657 | static int __init cpucache_init(void) | 1671 | static int __init cpucache_init(void) |
1658 | { | 1672 | { |
1659 | int cpu; | 1673 | int cpu; |
@@ -3354,6 +3368,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3354 | unsigned long save_flags; | 3368 | unsigned long save_flags; |
3355 | void *ptr; | 3369 | void *ptr; |
3356 | 3370 | ||
3371 | flags &= slab_gfp_mask; | ||
3372 | |||
3357 | lockdep_trace_alloc(flags); | 3373 | lockdep_trace_alloc(flags); |
3358 | 3374 | ||
3359 | if (slab_should_failslab(cachep, flags)) | 3375 | if (slab_should_failslab(cachep, flags)) |
@@ -3434,6 +3450,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3434 | unsigned long save_flags; | 3450 | unsigned long save_flags; |
3435 | void *objp; | 3451 | void *objp; |
3436 | 3452 | ||
3453 | flags &= slab_gfp_mask; | ||
3454 | |||
3437 | lockdep_trace_alloc(flags); | 3455 | lockdep_trace_alloc(flags); |
3438 | 3456 | ||
3439 | if (slab_should_failslab(cachep, flags)) | 3457 | if (slab_should_failslab(cachep, flags)) |
@@ -178,6 +178,12 @@ static enum { | |||
178 | SYSFS /* Sysfs up */ | 178 | SYSFS /* Sysfs up */ |
179 | } slab_state = DOWN; | 179 | } slab_state = DOWN; |
180 | 180 | ||
181 | /* | ||
182 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
183 | * sure early boot allocations don't accidentally enable interrupts. | ||
184 | */ | ||
185 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
186 | |||
181 | /* A list of all slab caches on the system */ | 187 | /* A list of all slab caches on the system */ |
182 | static DECLARE_RWSEM(slub_lock); | 188 | static DECLARE_RWSEM(slub_lock); |
183 | static LIST_HEAD(slab_caches); | 189 | static LIST_HEAD(slab_caches); |
@@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1595 | unsigned long flags; | 1601 | unsigned long flags; |
1596 | unsigned int objsize; | 1602 | unsigned int objsize; |
1597 | 1603 | ||
1604 | gfpflags &= slab_gfp_mask; | ||
1605 | |||
1598 | lockdep_trace_alloc(gfpflags); | 1606 | lockdep_trace_alloc(gfpflags); |
1599 | might_sleep_if(gfpflags & __GFP_WAIT); | 1607 | might_sleep_if(gfpflags & __GFP_WAIT); |
1600 | 1608 | ||
@@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void) | |||
3104 | nr_cpu_ids, nr_node_ids); | 3112 | nr_cpu_ids, nr_node_ids); |
3105 | } | 3113 | } |
3106 | 3114 | ||
3115 | void __init kmem_cache_init_late(void) | ||
3116 | { | ||
3117 | /* | ||
3118 | * Interrupts are enabled now so all GFP allocations are safe. | ||
3119 | */ | ||
3120 | slab_gfp_mask = __GFP_BITS_MASK; | ||
3121 | } | ||
3122 | |||
3107 | /* | 3123 | /* |
3108 | * Find a mergeable slab cache | 3124 | * Find a mergeable slab cache |
3109 | */ | 3125 | */ |