diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-10 12:40:04 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-11 12:15:56 -0400 |
commit | 83b519e8b9572c319c8e0c615ee5dd7272856090 (patch) | |
tree | ecf130629f6228d509bb52c533ca2287f966f972 | |
parent | c91c4773b334d4d3a6d44626dc2a558ad97b86f3 (diff) |
slab: setup allocators earlier in the boot sequence
This patch makes kmalloc() available earlier in the boot sequence so we can get
rid of some bootmem allocations. The bulk of the changes are due to
kmem_cache_init() being called with interrupts disabled which requires some
changes to allocator boostrap code.
Note: 32-bit x86 does WP protect test in mem_init() so we must setup traps
before we call mem_init() during boot as reported by Ingo Molnar:
We have a hard crash in the WP-protect code:
[ 0.000000] Checking if this processor honours the WP bit even in supervisor mode...BUG: Int 14: CR2 ffcff000
[ 0.000000] EDI 00000188 ESI 00000ac7 EBP c17eaf9c ESP c17eaf8c
[ 0.000000] EBX 000014e0 EDX 0000000e ECX 01856067 EAX 00000001
[ 0.000000] err 00000003 EIP c10135b1 CS 00000060 flg 00010002
[ 0.000000] Stack: c17eafa8 c17fd410 c16747bc c17eafc4 c17fd7e5 000011fd f8616000 c18237cc
[ 0.000000] 00099800 c17bb000 c17eafec c17f1668 000001c5 c17f1322 c166e039 c1822bf0
[ 0.000000] c166e033 c153a014 c18237cc 00020800 c17eaff8 c17f106a 00020800 01ba5003
[ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.30-tip-02161-g7a74539-dirty #52203
[ 0.000000] Call Trace:
[ 0.000000] [<c15357c2>] ? printk+0x14/0x16
[ 0.000000] [<c10135b1>] ? do_test_wp_bit+0x19/0x23
[ 0.000000] [<c17fd410>] ? test_wp_bit+0x26/0x64
[ 0.000000] [<c17fd7e5>] ? mem_init+0x1ba/0x1d8
[ 0.000000] [<c17f1668>] ? start_kernel+0x164/0x2f7
[ 0.000000] [<c17f1322>] ? unknown_bootoption+0x0/0x19c
[ 0.000000] [<c17f106a>] ? __init_begin+0x6a/0x6f
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by Linus Torvalds <torvalds@linux-foundation.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | init/main.c | 36 | ||||
-rw-r--r-- | mm/slab.c | 85 | ||||
-rw-r--r-- | mm/slub.c | 17 |
3 files changed, 77 insertions, 61 deletions
diff --git a/init/main.c b/init/main.c index bb7dc57eee36..0ab82a453de5 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -574,6 +574,28 @@ asmlinkage void __init start_kernel(void) | |||
574 | setup_nr_cpu_ids(); | 574 | setup_nr_cpu_ids(); |
575 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ | 575 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
576 | 576 | ||
577 | build_all_zonelists(); | ||
578 | page_alloc_init(); | ||
579 | |||
580 | printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); | ||
581 | parse_early_param(); | ||
582 | parse_args("Booting kernel", static_command_line, __start___param, | ||
583 | __stop___param - __start___param, | ||
584 | &unknown_bootoption); | ||
585 | /* | ||
586 | * These use large bootmem allocations and must precede | ||
587 | * kmem_cache_init() | ||
588 | */ | ||
589 | pidhash_init(); | ||
590 | vmalloc_init(); | ||
591 | vfs_caches_init_early(); | ||
592 | sort_main_extable(); | ||
593 | trap_init(); | ||
594 | /* | ||
595 | * Set up kernel memory allocators | ||
596 | */ | ||
597 | mem_init(); | ||
598 | kmem_cache_init(); | ||
577 | /* | 599 | /* |
578 | * Set up the scheduler prior starting any interrupts (such as the | 600 | * Set up the scheduler prior starting any interrupts (such as the |
579 | * timer interrupt). Full topology setup happens at smp_init() | 601 | * timer interrupt). Full topology setup happens at smp_init() |
@@ -585,25 +607,15 @@ asmlinkage void __init start_kernel(void) | |||
585 | * fragile until we cpu_idle() for the first time. | 607 | * fragile until we cpu_idle() for the first time. |
586 | */ | 608 | */ |
587 | preempt_disable(); | 609 | preempt_disable(); |
588 | build_all_zonelists(); | ||
589 | page_alloc_init(); | ||
590 | printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); | ||
591 | parse_early_param(); | ||
592 | parse_args("Booting kernel", static_command_line, __start___param, | ||
593 | __stop___param - __start___param, | ||
594 | &unknown_bootoption); | ||
595 | if (!irqs_disabled()) { | 610 | if (!irqs_disabled()) { |
596 | printk(KERN_WARNING "start_kernel(): bug: interrupts were " | 611 | printk(KERN_WARNING "start_kernel(): bug: interrupts were " |
597 | "enabled *very* early, fixing it\n"); | 612 | "enabled *very* early, fixing it\n"); |
598 | local_irq_disable(); | 613 | local_irq_disable(); |
599 | } | 614 | } |
600 | sort_main_extable(); | ||
601 | trap_init(); | ||
602 | rcu_init(); | 615 | rcu_init(); |
603 | /* init some links before init_ISA_irqs() */ | 616 | /* init some links before init_ISA_irqs() */ |
604 | early_irq_init(); | 617 | early_irq_init(); |
605 | init_IRQ(); | 618 | init_IRQ(); |
606 | pidhash_init(); | ||
607 | init_timers(); | 619 | init_timers(); |
608 | hrtimers_init(); | 620 | hrtimers_init(); |
609 | softirq_init(); | 621 | softirq_init(); |
@@ -645,14 +657,10 @@ asmlinkage void __init start_kernel(void) | |||
645 | initrd_start = 0; | 657 | initrd_start = 0; |
646 | } | 658 | } |
647 | #endif | 659 | #endif |
648 | vmalloc_init(); | ||
649 | vfs_caches_init_early(); | ||
650 | cpuset_init_early(); | 660 | cpuset_init_early(); |
651 | page_cgroup_init(); | 661 | page_cgroup_init(); |
652 | mem_init(); | ||
653 | enable_debug_pagealloc(); | 662 | enable_debug_pagealloc(); |
654 | cpu_hotplug_init(); | 663 | cpu_hotplug_init(); |
655 | kmem_cache_init(); | ||
656 | kmemtrace_init(); | 664 | kmemtrace_init(); |
657 | debug_objects_mem_init(); | 665 | debug_objects_mem_init(); |
658 | idr_init_cache(); | 666 | idr_init_cache(); |
@@ -315,7 +315,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
315 | struct kmem_list3 *l3, int tofree); | 315 | struct kmem_list3 *l3, int tofree); |
316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
317 | int node); | 317 | int node); |
318 | static int enable_cpucache(struct kmem_cache *cachep); | 318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
319 | static void cache_reap(struct work_struct *unused); | 319 | static void cache_reap(struct work_struct *unused); |
320 | 320 | ||
321 | /* | 321 | /* |
@@ -958,12 +958,12 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
958 | } | 958 | } |
959 | 959 | ||
960 | static struct array_cache *alloc_arraycache(int node, int entries, | 960 | static struct array_cache *alloc_arraycache(int node, int entries, |
961 | int batchcount) | 961 | int batchcount, gfp_t gfp) |
962 | { | 962 | { |
963 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); | 963 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); |
964 | struct array_cache *nc = NULL; | 964 | struct array_cache *nc = NULL; |
965 | 965 | ||
966 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 966 | nc = kmalloc_node(memsize, gfp, node); |
967 | if (nc) { | 967 | if (nc) { |
968 | nc->avail = 0; | 968 | nc->avail = 0; |
969 | nc->limit = entries; | 969 | nc->limit = entries; |
@@ -1003,7 +1003,7 @@ static int transfer_objects(struct array_cache *to, | |||
1003 | #define drain_alien_cache(cachep, alien) do { } while (0) | 1003 | #define drain_alien_cache(cachep, alien) do { } while (0) |
1004 | #define reap_alien(cachep, l3) do { } while (0) | 1004 | #define reap_alien(cachep, l3) do { } while (0) |
1005 | 1005 | ||
1006 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 1006 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1007 | { | 1007 | { |
1008 | return (struct array_cache **)BAD_ALIEN_MAGIC; | 1008 | return (struct array_cache **)BAD_ALIEN_MAGIC; |
1009 | } | 1009 | } |
@@ -1034,7 +1034,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep, | |||
1034 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1034 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); |
1035 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1035 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
1036 | 1036 | ||
1037 | static struct array_cache **alloc_alien_cache(int node, int limit) | 1037 | static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1038 | { | 1038 | { |
1039 | struct array_cache **ac_ptr; | 1039 | struct array_cache **ac_ptr; |
1040 | int memsize = sizeof(void *) * nr_node_ids; | 1040 | int memsize = sizeof(void *) * nr_node_ids; |
@@ -1042,14 +1042,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit) | |||
1042 | 1042 | ||
1043 | if (limit > 1) | 1043 | if (limit > 1) |
1044 | limit = 12; | 1044 | limit = 12; |
1045 | ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); | 1045 | ac_ptr = kmalloc_node(memsize, gfp, node); |
1046 | if (ac_ptr) { | 1046 | if (ac_ptr) { |
1047 | for_each_node(i) { | 1047 | for_each_node(i) { |
1048 | if (i == node || !node_online(i)) { | 1048 | if (i == node || !node_online(i)) { |
1049 | ac_ptr[i] = NULL; | 1049 | ac_ptr[i] = NULL; |
1050 | continue; | 1050 | continue; |
1051 | } | 1051 | } |
1052 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 1052 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); |
1053 | if (!ac_ptr[i]) { | 1053 | if (!ac_ptr[i]) { |
1054 | for (i--; i >= 0; i--) | 1054 | for (i--; i >= 0; i--) |
1055 | kfree(ac_ptr[i]); | 1055 | kfree(ac_ptr[i]); |
@@ -1282,20 +1282,20 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1282 | struct array_cache **alien = NULL; | 1282 | struct array_cache **alien = NULL; |
1283 | 1283 | ||
1284 | nc = alloc_arraycache(node, cachep->limit, | 1284 | nc = alloc_arraycache(node, cachep->limit, |
1285 | cachep->batchcount); | 1285 | cachep->batchcount, GFP_KERNEL); |
1286 | if (!nc) | 1286 | if (!nc) |
1287 | goto bad; | 1287 | goto bad; |
1288 | if (cachep->shared) { | 1288 | if (cachep->shared) { |
1289 | shared = alloc_arraycache(node, | 1289 | shared = alloc_arraycache(node, |
1290 | cachep->shared * cachep->batchcount, | 1290 | cachep->shared * cachep->batchcount, |
1291 | 0xbaadf00d); | 1291 | 0xbaadf00d, GFP_KERNEL); |
1292 | if (!shared) { | 1292 | if (!shared) { |
1293 | kfree(nc); | 1293 | kfree(nc); |
1294 | goto bad; | 1294 | goto bad; |
1295 | } | 1295 | } |
1296 | } | 1296 | } |
1297 | if (use_alien_caches) { | 1297 | if (use_alien_caches) { |
1298 | alien = alloc_alien_cache(node, cachep->limit); | 1298 | alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); |
1299 | if (!alien) { | 1299 | if (!alien) { |
1300 | kfree(shared); | 1300 | kfree(shared); |
1301 | kfree(nc); | 1301 | kfree(nc); |
@@ -1399,10 +1399,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1399 | { | 1399 | { |
1400 | struct kmem_list3 *ptr; | 1400 | struct kmem_list3 *ptr; |
1401 | 1401 | ||
1402 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); | 1402 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); |
1403 | BUG_ON(!ptr); | 1403 | BUG_ON(!ptr); |
1404 | 1404 | ||
1405 | local_irq_disable(); | ||
1406 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 1405 | memcpy(ptr, list, sizeof(struct kmem_list3)); |
1407 | /* | 1406 | /* |
1408 | * Do not assume that spinlocks can be initialized via memcpy: | 1407 | * Do not assume that spinlocks can be initialized via memcpy: |
@@ -1411,7 +1410,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1411 | 1410 | ||
1412 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 1411 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
1413 | cachep->nodelists[nodeid] = ptr; | 1412 | cachep->nodelists[nodeid] = ptr; |
1414 | local_irq_enable(); | ||
1415 | } | 1413 | } |
1416 | 1414 | ||
1417 | /* | 1415 | /* |
@@ -1575,9 +1573,8 @@ void __init kmem_cache_init(void) | |||
1575 | { | 1573 | { |
1576 | struct array_cache *ptr; | 1574 | struct array_cache *ptr; |
1577 | 1575 | ||
1578 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1576 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1579 | 1577 | ||
1580 | local_irq_disable(); | ||
1581 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1578 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); |
1582 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1579 | memcpy(ptr, cpu_cache_get(&cache_cache), |
1583 | sizeof(struct arraycache_init)); | 1580 | sizeof(struct arraycache_init)); |
@@ -1587,11 +1584,9 @@ void __init kmem_cache_init(void) | |||
1587 | spin_lock_init(&ptr->lock); | 1584 | spin_lock_init(&ptr->lock); |
1588 | 1585 | ||
1589 | cache_cache.array[smp_processor_id()] = ptr; | 1586 | cache_cache.array[smp_processor_id()] = ptr; |
1590 | local_irq_enable(); | ||
1591 | 1587 | ||
1592 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1588 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1593 | 1589 | ||
1594 | local_irq_disable(); | ||
1595 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1590 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) |
1596 | != &initarray_generic.cache); | 1591 | != &initarray_generic.cache); |
1597 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1592 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), |
@@ -1603,7 +1598,6 @@ void __init kmem_cache_init(void) | |||
1603 | 1598 | ||
1604 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1599 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = |
1605 | ptr; | 1600 | ptr; |
1606 | local_irq_enable(); | ||
1607 | } | 1601 | } |
1608 | /* 5) Replace the bootstrap kmem_list3's */ | 1602 | /* 5) Replace the bootstrap kmem_list3's */ |
1609 | { | 1603 | { |
@@ -1627,7 +1621,7 @@ void __init kmem_cache_init(void) | |||
1627 | struct kmem_cache *cachep; | 1621 | struct kmem_cache *cachep; |
1628 | mutex_lock(&cache_chain_mutex); | 1622 | mutex_lock(&cache_chain_mutex); |
1629 | list_for_each_entry(cachep, &cache_chain, next) | 1623 | list_for_each_entry(cachep, &cache_chain, next) |
1630 | if (enable_cpucache(cachep)) | 1624 | if (enable_cpucache(cachep, GFP_NOWAIT)) |
1631 | BUG(); | 1625 | BUG(); |
1632 | mutex_unlock(&cache_chain_mutex); | 1626 | mutex_unlock(&cache_chain_mutex); |
1633 | } | 1627 | } |
@@ -2064,10 +2058,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2064 | return left_over; | 2058 | return left_over; |
2065 | } | 2059 | } |
2066 | 2060 | ||
2067 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | 2061 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
2068 | { | 2062 | { |
2069 | if (g_cpucache_up == FULL) | 2063 | if (g_cpucache_up == FULL) |
2070 | return enable_cpucache(cachep); | 2064 | return enable_cpucache(cachep, gfp); |
2071 | 2065 | ||
2072 | if (g_cpucache_up == NONE) { | 2066 | if (g_cpucache_up == NONE) { |
2073 | /* | 2067 | /* |
@@ -2089,7 +2083,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2089 | g_cpucache_up = PARTIAL_AC; | 2083 | g_cpucache_up = PARTIAL_AC; |
2090 | } else { | 2084 | } else { |
2091 | cachep->array[smp_processor_id()] = | 2085 | cachep->array[smp_processor_id()] = |
2092 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 2086 | kmalloc(sizeof(struct arraycache_init), gfp); |
2093 | 2087 | ||
2094 | if (g_cpucache_up == PARTIAL_AC) { | 2088 | if (g_cpucache_up == PARTIAL_AC) { |
2095 | set_up_list3s(cachep, SIZE_L3); | 2089 | set_up_list3s(cachep, SIZE_L3); |
@@ -2153,6 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2153 | { | 2147 | { |
2154 | size_t left_over, slab_size, ralign; | 2148 | size_t left_over, slab_size, ralign; |
2155 | struct kmem_cache *cachep = NULL, *pc; | 2149 | struct kmem_cache *cachep = NULL, *pc; |
2150 | gfp_t gfp; | ||
2156 | 2151 | ||
2157 | /* | 2152 | /* |
2158 | * Sanity checks... these are all serious usage bugs. | 2153 | * Sanity checks... these are all serious usage bugs. |
@@ -2168,8 +2163,10 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2168 | * We use cache_chain_mutex to ensure a consistent view of | 2163 | * We use cache_chain_mutex to ensure a consistent view of |
2169 | * cpu_online_mask as well. Please see cpuup_callback | 2164 | * cpu_online_mask as well. Please see cpuup_callback |
2170 | */ | 2165 | */ |
2171 | get_online_cpus(); | 2166 | if (slab_is_available()) { |
2172 | mutex_lock(&cache_chain_mutex); | 2167 | get_online_cpus(); |
2168 | mutex_lock(&cache_chain_mutex); | ||
2169 | } | ||
2173 | 2170 | ||
2174 | list_for_each_entry(pc, &cache_chain, next) { | 2171 | list_for_each_entry(pc, &cache_chain, next) { |
2175 | char tmp; | 2172 | char tmp; |
@@ -2278,8 +2275,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2278 | */ | 2275 | */ |
2279 | align = ralign; | 2276 | align = ralign; |
2280 | 2277 | ||
2278 | if (slab_is_available()) | ||
2279 | gfp = GFP_KERNEL; | ||
2280 | else | ||
2281 | gfp = GFP_NOWAIT; | ||
2282 | |||
2281 | /* Get cache's description obj. */ | 2283 | /* Get cache's description obj. */ |
2282 | cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); | 2284 | cachep = kmem_cache_zalloc(&cache_cache, gfp); |
2283 | if (!cachep) | 2285 | if (!cachep) |
2284 | goto oops; | 2286 | goto oops; |
2285 | 2287 | ||
@@ -2382,7 +2384,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2382 | cachep->ctor = ctor; | 2384 | cachep->ctor = ctor; |
2383 | cachep->name = name; | 2385 | cachep->name = name; |
2384 | 2386 | ||
2385 | if (setup_cpu_cache(cachep)) { | 2387 | if (setup_cpu_cache(cachep, gfp)) { |
2386 | __kmem_cache_destroy(cachep); | 2388 | __kmem_cache_destroy(cachep); |
2387 | cachep = NULL; | 2389 | cachep = NULL; |
2388 | goto oops; | 2390 | goto oops; |
@@ -2394,8 +2396,10 @@ oops: | |||
2394 | if (!cachep && (flags & SLAB_PANIC)) | 2396 | if (!cachep && (flags & SLAB_PANIC)) |
2395 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 2397 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
2396 | name); | 2398 | name); |
2397 | mutex_unlock(&cache_chain_mutex); | 2399 | if (slab_is_available()) { |
2398 | put_online_cpus(); | 2400 | mutex_unlock(&cache_chain_mutex); |
2401 | put_online_cpus(); | ||
2402 | } | ||
2399 | return cachep; | 2403 | return cachep; |
2400 | } | 2404 | } |
2401 | EXPORT_SYMBOL(kmem_cache_create); | 2405 | EXPORT_SYMBOL(kmem_cache_create); |
@@ -3802,7 +3806,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); | |||
3802 | /* | 3806 | /* |
3803 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3807 | * This initializes kmem_list3 or resizes various caches for all nodes. |
3804 | */ | 3808 | */ |
3805 | static int alloc_kmemlist(struct kmem_cache *cachep) | 3809 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) |
3806 | { | 3810 | { |
3807 | int node; | 3811 | int node; |
3808 | struct kmem_list3 *l3; | 3812 | struct kmem_list3 *l3; |
@@ -3812,7 +3816,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3812 | for_each_online_node(node) { | 3816 | for_each_online_node(node) { |
3813 | 3817 | ||
3814 | if (use_alien_caches) { | 3818 | if (use_alien_caches) { |
3815 | new_alien = alloc_alien_cache(node, cachep->limit); | 3819 | new_alien = alloc_alien_cache(node, cachep->limit, gfp); |
3816 | if (!new_alien) | 3820 | if (!new_alien) |
3817 | goto fail; | 3821 | goto fail; |
3818 | } | 3822 | } |
@@ -3821,7 +3825,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3821 | if (cachep->shared) { | 3825 | if (cachep->shared) { |
3822 | new_shared = alloc_arraycache(node, | 3826 | new_shared = alloc_arraycache(node, |
3823 | cachep->shared*cachep->batchcount, | 3827 | cachep->shared*cachep->batchcount, |
3824 | 0xbaadf00d); | 3828 | 0xbaadf00d, gfp); |
3825 | if (!new_shared) { | 3829 | if (!new_shared) { |
3826 | free_alien_cache(new_alien); | 3830 | free_alien_cache(new_alien); |
3827 | goto fail; | 3831 | goto fail; |
@@ -3850,7 +3854,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3850 | free_alien_cache(new_alien); | 3854 | free_alien_cache(new_alien); |
3851 | continue; | 3855 | continue; |
3852 | } | 3856 | } |
3853 | l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); | 3857 | l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); |
3854 | if (!l3) { | 3858 | if (!l3) { |
3855 | free_alien_cache(new_alien); | 3859 | free_alien_cache(new_alien); |
3856 | kfree(new_shared); | 3860 | kfree(new_shared); |
@@ -3906,18 +3910,18 @@ static void do_ccupdate_local(void *info) | |||
3906 | 3910 | ||
3907 | /* Always called with the cache_chain_mutex held */ | 3911 | /* Always called with the cache_chain_mutex held */ |
3908 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3912 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
3909 | int batchcount, int shared) | 3913 | int batchcount, int shared, gfp_t gfp) |
3910 | { | 3914 | { |
3911 | struct ccupdate_struct *new; | 3915 | struct ccupdate_struct *new; |
3912 | int i; | 3916 | int i; |
3913 | 3917 | ||
3914 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 3918 | new = kzalloc(sizeof(*new), gfp); |
3915 | if (!new) | 3919 | if (!new) |
3916 | return -ENOMEM; | 3920 | return -ENOMEM; |
3917 | 3921 | ||
3918 | for_each_online_cpu(i) { | 3922 | for_each_online_cpu(i) { |
3919 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, | 3923 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, |
3920 | batchcount); | 3924 | batchcount, gfp); |
3921 | if (!new->new[i]) { | 3925 | if (!new->new[i]) { |
3922 | for (i--; i >= 0; i--) | 3926 | for (i--; i >= 0; i--) |
3923 | kfree(new->new[i]); | 3927 | kfree(new->new[i]); |
@@ -3944,11 +3948,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3944 | kfree(ccold); | 3948 | kfree(ccold); |
3945 | } | 3949 | } |
3946 | kfree(new); | 3950 | kfree(new); |
3947 | return alloc_kmemlist(cachep); | 3951 | return alloc_kmemlist(cachep, gfp); |
3948 | } | 3952 | } |
3949 | 3953 | ||
3950 | /* Called with cache_chain_mutex held always */ | 3954 | /* Called with cache_chain_mutex held always */ |
3951 | static int enable_cpucache(struct kmem_cache *cachep) | 3955 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) |
3952 | { | 3956 | { |
3953 | int err; | 3957 | int err; |
3954 | int limit, shared; | 3958 | int limit, shared; |
@@ -3994,7 +3998,7 @@ static int enable_cpucache(struct kmem_cache *cachep) | |||
3994 | if (limit > 32) | 3998 | if (limit > 32) |
3995 | limit = 32; | 3999 | limit = 32; |
3996 | #endif | 4000 | #endif |
3997 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); | 4001 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); |
3998 | if (err) | 4002 | if (err) |
3999 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 4003 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", |
4000 | cachep->name, -err); | 4004 | cachep->name, -err); |
@@ -4300,7 +4304,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
4300 | res = 0; | 4304 | res = 0; |
4301 | } else { | 4305 | } else { |
4302 | res = do_tune_cpucache(cachep, limit, | 4306 | res = do_tune_cpucache(cachep, limit, |
4303 | batchcount, shared); | 4307 | batchcount, shared, |
4308 | GFP_KERNEL); | ||
4304 | } | 4309 | } |
4305 | break; | 4310 | break; |
4306 | } | 4311 | } |
@@ -2557,13 +2557,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2557 | if (gfp_flags & SLUB_DMA) | 2557 | if (gfp_flags & SLUB_DMA) |
2558 | flags = SLAB_CACHE_DMA; | 2558 | flags = SLAB_CACHE_DMA; |
2559 | 2559 | ||
2560 | down_write(&slub_lock); | 2560 | /* |
2561 | * This function is called with IRQs disabled during early-boot on | ||
2562 | * single CPU so there's no need to take slub_lock here. | ||
2563 | */ | ||
2561 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2564 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2562 | flags, NULL)) | 2565 | flags, NULL)) |
2563 | goto panic; | 2566 | goto panic; |
2564 | 2567 | ||
2565 | list_add(&s->list, &slab_caches); | 2568 | list_add(&s->list, &slab_caches); |
2566 | up_write(&slub_lock); | 2569 | |
2567 | if (sysfs_slab_add(s)) | 2570 | if (sysfs_slab_add(s)) |
2568 | goto panic; | 2571 | goto panic; |
2569 | return s; | 2572 | return s; |
@@ -3021,7 +3024,7 @@ void __init kmem_cache_init(void) | |||
3021 | * kmem_cache_open for slab_state == DOWN. | 3024 | * kmem_cache_open for slab_state == DOWN. |
3022 | */ | 3025 | */ |
3023 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 3026 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", |
3024 | sizeof(struct kmem_cache_node), GFP_KERNEL); | 3027 | sizeof(struct kmem_cache_node), GFP_NOWAIT); |
3025 | kmalloc_caches[0].refcount = -1; | 3028 | kmalloc_caches[0].refcount = -1; |
3026 | caches++; | 3029 | caches++; |
3027 | 3030 | ||
@@ -3034,16 +3037,16 @@ void __init kmem_cache_init(void) | |||
3034 | /* Caches that are not of the two-to-the-power-of size */ | 3037 | /* Caches that are not of the two-to-the-power-of size */ |
3035 | if (KMALLOC_MIN_SIZE <= 64) { | 3038 | if (KMALLOC_MIN_SIZE <= 64) { |
3036 | create_kmalloc_cache(&kmalloc_caches[1], | 3039 | create_kmalloc_cache(&kmalloc_caches[1], |
3037 | "kmalloc-96", 96, GFP_KERNEL); | 3040 | "kmalloc-96", 96, GFP_NOWAIT); |
3038 | caches++; | 3041 | caches++; |
3039 | create_kmalloc_cache(&kmalloc_caches[2], | 3042 | create_kmalloc_cache(&kmalloc_caches[2], |
3040 | "kmalloc-192", 192, GFP_KERNEL); | 3043 | "kmalloc-192", 192, GFP_NOWAIT); |
3041 | caches++; | 3044 | caches++; |
3042 | } | 3045 | } |
3043 | 3046 | ||
3044 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 3047 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3045 | create_kmalloc_cache(&kmalloc_caches[i], | 3048 | create_kmalloc_cache(&kmalloc_caches[i], |
3046 | "kmalloc", 1 << i, GFP_KERNEL); | 3049 | "kmalloc", 1 << i, GFP_NOWAIT); |
3047 | caches++; | 3050 | caches++; |
3048 | } | 3051 | } |
3049 | 3052 | ||
@@ -3080,7 +3083,7 @@ void __init kmem_cache_init(void) | |||
3080 | /* Provide the correct kmalloc names now that the caches are up */ | 3083 | /* Provide the correct kmalloc names now that the caches are up */ |
3081 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) | 3084 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3082 | kmalloc_caches[i]. name = | 3085 | kmalloc_caches[i]. name = |
3083 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3086 | kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); |
3084 | 3087 | ||
3085 | #ifdef CONFIG_SMP | 3088 | #ifdef CONFIG_SMP |
3086 | register_cpu_notifier(&slab_notifier); | 3089 | register_cpu_notifier(&slab_notifier); |