aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-06-12 07:03:06 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2009-06-12 11:53:33 -0400
commit7e85ee0c1d15ca5f8bff0f514f158eba1742dd87 (patch)
tree8f9c21f0df6bea88740d7dd48834ac9ffc238e93 /mm/slab.c
parenteb91f1d0a531289e18f5587dc197d12a251c66a3 (diff)
slab,slub: don't enable interrupts during early boot
As explained by Benjamin Herrenschmidt: Oh and btw, your patch alone doesn't fix powerpc, because it's missing a whole bunch of GFP_KERNEL's in the arch code... You would have to grep the entire kernel for things that check slab_is_available() and even then you'll be missing some. For example, slab_is_available() didn't always exist, and so in the early days on powerpc, we used a mem_init_done global that is set form mem_init() (not perfect but works in practice). And we still have code using that to do the test. Therefore, mask out __GFP_WAIT, __GFP_IO, and __GFP_FS in the slab allocators in early boot code to avoid enabling interrupts. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/slab.c b/mm/slab.c
index cd76964b53bc..453efcb1c980 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,6 +304,12 @@ struct kmem_list3 {
304}; 304};
305 305
306/* 306/*
307 * The slab allocator is initialized with interrupts disabled. Therefore, make
308 * sure early boot allocations don't accidentally enable interrupts.
309 */
310static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
311
312/*
307 * Need this for bootstrapping a per node allocator. 313 * Need this for bootstrapping a per node allocator.
308 */ 314 */
309#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 315#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1654,6 +1660,14 @@ void __init kmem_cache_init(void)
1654 */ 1660 */
1655} 1661}
1656 1662
1663void __init kmem_cache_init_late(void)
1664{
1665 /*
1666 * Interrupts are enabled now so all GFP allocations are safe.
1667 */
1668 slab_gfp_mask = __GFP_BITS_MASK;
1669}
1670
1657static int __init cpucache_init(void) 1671static int __init cpucache_init(void)
1658{ 1672{
1659 int cpu; 1673 int cpu;
@@ -3354,6 +3368,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3354 unsigned long save_flags; 3368 unsigned long save_flags;
3355 void *ptr; 3369 void *ptr;
3356 3370
3371 flags &= slab_gfp_mask;
3372
3357 lockdep_trace_alloc(flags); 3373 lockdep_trace_alloc(flags);
3358 3374
3359 if (slab_should_failslab(cachep, flags)) 3375 if (slab_should_failslab(cachep, flags))
@@ -3434,6 +3450,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3434 unsigned long save_flags; 3450 unsigned long save_flags;
3435 void *objp; 3451 void *objp;
3436 3452
3453 flags &= slab_gfp_mask;
3454
3437 lockdep_trace_alloc(flags); 3455 lockdep_trace_alloc(flags);
3438 3456
3439 if (slab_should_failslab(cachep, flags)) 3457 if (slab_should_failslab(cachep, flags))