diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-12 07:03:06 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-12 11:53:33 -0400 |
commit | 7e85ee0c1d15ca5f8bff0f514f158eba1742dd87 (patch) | |
tree | 8f9c21f0df6bea88740d7dd48834ac9ffc238e93 /mm/slub.c | |
parent | eb91f1d0a531289e18f5587dc197d12a251c66a3 (diff) |
slab,slub: don't enable interrupts during early boot
As explained by Benjamin Herrenschmidt:
Oh and btw, your patch alone doesn't fix powerpc, because it's missing
a whole bunch of GFP_KERNEL's in the arch code... You would have to
grep the entire kernel for things that check slab_is_available() and
even then you'll be missing some.
For example, slab_is_available() didn't always exist, and so in the
early days on powerpc, we used a mem_init_done global that is set form
mem_init() (not perfect but works in practice). And we still have code
using that to do the test.
Therefore, mask out __GFP_WAIT, __GFP_IO, and __GFP_FS in the slab allocators
in early boot code to avoid enabling interrupts.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 16 |
1 files changed, 16 insertions, 0 deletions
@@ -178,6 +178,12 @@ static enum { | |||
178 | SYSFS /* Sysfs up */ | 178 | SYSFS /* Sysfs up */ |
179 | } slab_state = DOWN; | 179 | } slab_state = DOWN; |
180 | 180 | ||
181 | /* | ||
182 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
183 | * sure early boot allocations don't accidentally enable interrupts. | ||
184 | */ | ||
185 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
186 | |||
181 | /* A list of all slab caches on the system */ | 187 | /* A list of all slab caches on the system */ |
182 | static DECLARE_RWSEM(slub_lock); | 188 | static DECLARE_RWSEM(slub_lock); |
183 | static LIST_HEAD(slab_caches); | 189 | static LIST_HEAD(slab_caches); |
@@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1595 | unsigned long flags; | 1601 | unsigned long flags; |
1596 | unsigned int objsize; | 1602 | unsigned int objsize; |
1597 | 1603 | ||
1604 | gfpflags &= slab_gfp_mask; | ||
1605 | |||
1598 | lockdep_trace_alloc(gfpflags); | 1606 | lockdep_trace_alloc(gfpflags); |
1599 | might_sleep_if(gfpflags & __GFP_WAIT); | 1607 | might_sleep_if(gfpflags & __GFP_WAIT); |
1600 | 1608 | ||
@@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void) | |||
3104 | nr_cpu_ids, nr_node_ids); | 3112 | nr_cpu_ids, nr_node_ids); |
3105 | } | 3113 | } |
3106 | 3114 | ||
3115 | void __init kmem_cache_init_late(void) | ||
3116 | { | ||
3117 | /* | ||
3118 | * Interrupts are enabled now so all GFP allocations are safe. | ||
3119 | */ | ||
3120 | slab_gfp_mask = __GFP_BITS_MASK; | ||
3121 | } | ||
3122 | |||
3107 | /* | 3123 | /* |
3108 | * Find a mergeable slab cache | 3124 | * Find a mergeable slab cache |
3109 | */ | 3125 | */ |