diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:50:49 -0400 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:50:49 -0400 |
commit | 722f2a6c87f34ee0fd0130a8cf45f81e0705594a (patch) | |
tree | 50b054df34d2731eb0ba0cf1a6c27e43e7eed428 /mm/slab.c | |
parent | 7a0aeb14e18ad59394bd9bbc6e57fb345819e748 (diff) | |
parent | 45e3e1935e2857c54783291107d33323b3ef33c8 (diff) |
Merge commit 'linus/master' into HEAD
Conflicts:
MAINTAINERS
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 41 |
1 files changed, 30 insertions, 11 deletions
@@ -305,6 +305,12 @@ struct kmem_list3 { | |||
305 | }; | 305 | }; |
306 | 306 | ||
307 | /* | 307 | /* |
308 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
309 | * sure early boot allocations don't accidentally enable interrupts. | ||
310 | */ | ||
311 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
312 | |||
313 | /* | ||
308 | * Need this for bootstrapping a per node allocator. | 314 | * Need this for bootstrapping a per node allocator. |
309 | */ | 315 | */ |
310 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 316 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
@@ -673,6 +679,7 @@ static enum { | |||
673 | NONE, | 679 | NONE, |
674 | PARTIAL_AC, | 680 | PARTIAL_AC, |
675 | PARTIAL_L3, | 681 | PARTIAL_L3, |
682 | EARLY, | ||
676 | FULL | 683 | FULL |
677 | } g_cpucache_up; | 684 | } g_cpucache_up; |
678 | 685 | ||
@@ -681,7 +688,7 @@ static enum { | |||
681 | */ | 688 | */ |
682 | int slab_is_available(void) | 689 | int slab_is_available(void) |
683 | { | 690 | { |
684 | return g_cpucache_up == FULL; | 691 | return g_cpucache_up >= EARLY; |
685 | } | 692 | } |
686 | 693 | ||
687 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 694 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
@@ -1545,19 +1552,27 @@ void __init kmem_cache_init(void) | |||
1545 | } | 1552 | } |
1546 | } | 1553 | } |
1547 | 1554 | ||
1548 | /* 6) resize the head arrays to their final sizes */ | 1555 | g_cpucache_up = EARLY; |
1549 | { | ||
1550 | struct kmem_cache *cachep; | ||
1551 | mutex_lock(&cache_chain_mutex); | ||
1552 | list_for_each_entry(cachep, &cache_chain, next) | ||
1553 | if (enable_cpucache(cachep, GFP_NOWAIT)) | ||
1554 | BUG(); | ||
1555 | mutex_unlock(&cache_chain_mutex); | ||
1556 | } | ||
1557 | 1556 | ||
1558 | /* Annotate slab for lockdep -- annotate the malloc caches */ | 1557 | /* Annotate slab for lockdep -- annotate the malloc caches */ |
1559 | init_lock_keys(); | 1558 | init_lock_keys(); |
1559 | } | ||
1560 | |||
1561 | void __init kmem_cache_init_late(void) | ||
1562 | { | ||
1563 | struct kmem_cache *cachep; | ||
1564 | |||
1565 | /* | ||
1566 | * Interrupts are enabled now so all GFP allocations are safe. | ||
1567 | */ | ||
1568 | slab_gfp_mask = __GFP_BITS_MASK; | ||
1560 | 1569 | ||
1570 | /* 6) resize the head arrays to their final sizes */ | ||
1571 | mutex_lock(&cache_chain_mutex); | ||
1572 | list_for_each_entry(cachep, &cache_chain, next) | ||
1573 | if (enable_cpucache(cachep, GFP_NOWAIT)) | ||
1574 | BUG(); | ||
1575 | mutex_unlock(&cache_chain_mutex); | ||
1561 | 1576 | ||
1562 | /* Done! */ | 1577 | /* Done! */ |
1563 | g_cpucache_up = FULL; | 1578 | g_cpucache_up = FULL; |
@@ -2034,7 +2049,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2034 | for_each_online_node(node) { | 2049 | for_each_online_node(node) { |
2035 | cachep->nodelists[node] = | 2050 | cachep->nodelists[node] = |
2036 | kmalloc_node(sizeof(struct kmem_list3), | 2051 | kmalloc_node(sizeof(struct kmem_list3), |
2037 | GFP_KERNEL, node); | 2052 | gfp, node); |
2038 | BUG_ON(!cachep->nodelists[node]); | 2053 | BUG_ON(!cachep->nodelists[node]); |
2039 | kmem_list3_init(cachep->nodelists[node]); | 2054 | kmem_list3_init(cachep->nodelists[node]); |
2040 | } | 2055 | } |
@@ -3286,6 +3301,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3286 | unsigned long save_flags; | 3301 | unsigned long save_flags; |
3287 | void *ptr; | 3302 | void *ptr; |
3288 | 3303 | ||
3304 | flags &= slab_gfp_mask; | ||
3305 | |||
3289 | lockdep_trace_alloc(flags); | 3306 | lockdep_trace_alloc(flags); |
3290 | 3307 | ||
3291 | if (slab_should_failslab(cachep, flags)) | 3308 | if (slab_should_failslab(cachep, flags)) |
@@ -3369,6 +3386,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3369 | unsigned long save_flags; | 3386 | unsigned long save_flags; |
3370 | void *objp; | 3387 | void *objp; |
3371 | 3388 | ||
3389 | flags &= slab_gfp_mask; | ||
3390 | |||
3372 | lockdep_trace_alloc(flags); | 3391 | lockdep_trace_alloc(flags); |
3373 | 3392 | ||
3374 | if (slab_should_failslab(cachep, flags)) | 3393 | if (slab_should_failslab(cachep, flags)) |