aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 15:25:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 15:25:06 -0400
commitb640f042faa2a2fad6464f259a8afec06e2f6386 (patch)
tree44a2943f91859422a207612229031a767c0accd5
parent871fa90791a6f83dd8e2e489feb9534a8c02088d (diff)
parentb8ec757390282e21d349bf6b602a8cb182da0429 (diff)
Merge branch 'topic/slab/earlyboot' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'topic/slab/earlyboot' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: vgacon: use slab allocator instead of the bootmem allocator irq: use kcalloc() instead of the bootmem allocator sched: use slab in cpupri_init() sched: use alloc_cpumask_var() instead of alloc_bootmem_cpumask_var() memcg: don't use bootmem allocator in setup code irq/cpumask: make memoryless node zero happy x86: remove some alloc_bootmem_cpumask_var calling vt: use kzalloc() instead of the bootmem allocator sched: use kzalloc() instead of the bootmem allocator init: introduce mm_init() vmalloc: use kzalloc() instead of alloc_bootmem() slab: setup allocators earlier in the boot sequence bootmem: fix slab fallback on numa bootmem: use slab if bootmem is no longer available
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--include/linux/irq.h18
-rw-r--r--init/main.c41
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/irq/handle.c11
-rw-r--r--kernel/profile.c6
-rw-r--r--kernel/sched.c30
-rw-r--r--kernel/sched_cpupri.c8
-rw-r--r--lib/cpumask.c11
-rw-r--r--mm/bootmem.c12
-rw-r--r--mm/page_cgroup.c12
-rw-r--r--mm/slab.c85
-rw-r--r--mm/slub.c17
-rw-r--r--mm/vmalloc.c3
16 files changed, 145 insertions, 130 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1946fac42ab3..94605e7f6a54 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -177,16 +177,18 @@ int __init arch_early_irq_init(void)
177 struct irq_cfg *cfg; 177 struct irq_cfg *cfg;
178 struct irq_desc *desc; 178 struct irq_desc *desc;
179 int count; 179 int count;
180 int node;
180 int i; 181 int i;
181 182
182 cfg = irq_cfgx; 183 cfg = irq_cfgx;
183 count = ARRAY_SIZE(irq_cfgx); 184 count = ARRAY_SIZE(irq_cfgx);
185 node= cpu_to_node(boot_cpu_id);
184 186
185 for (i = 0; i < count; i++) { 187 for (i = 0; i < count; i++) {
186 desc = irq_to_desc(i); 188 desc = irq_to_desc(i);
187 desc->chip_data = &cfg[i]; 189 desc->chip_data = &cfg[i];
188 alloc_bootmem_cpumask_var(&cfg[i].domain); 190 alloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
189 alloc_bootmem_cpumask_var(&cfg[i].old_domain); 191 alloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
190 if (i < NR_IRQS_LEGACY) 192 if (i < NR_IRQS_LEGACY)
191 cpumask_setall(cfg[i].domain); 193 cpumask_setall(cfg[i].domain);
192 } 194 }
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 08151d4de489..c796a86ab7f3 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -95,7 +95,6 @@
95#include <linux/timer.h> 95#include <linux/timer.h>
96#include <linux/interrupt.h> 96#include <linux/interrupt.h>
97#include <linux/workqueue.h> 97#include <linux/workqueue.h>
98#include <linux/bootmem.h>
99#include <linux/pm.h> 98#include <linux/pm.h>
100#include <linux/font.h> 99#include <linux/font.h>
101#include <linux/bitops.h> 100#include <linux/bitops.h>
@@ -2875,14 +2874,11 @@ static int __init con_init(void)
2875 mod_timer(&console_timer, jiffies + blankinterval); 2874 mod_timer(&console_timer, jiffies + blankinterval);
2876 } 2875 }
2877 2876
2878 /*
2879 * kmalloc is not running yet - we use the bootmem allocator.
2880 */
2881 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { 2877 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
2882 vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data)); 2878 vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
2883 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); 2879 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
2884 visual_init(vc, currcons, 1); 2880 visual_init(vc, currcons, 1);
2885 vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size); 2881 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
2886 vc->vc_kmalloced = 0; 2882 vc->vc_kmalloced = 0;
2887 vc_init(vc, vc->vc_rows, vc->vc_cols, 2883 vc_init(vc, vc->vc_rows, vc->vc_cols,
2888 currcons || !vc->vc_sw->con_save_screen); 2884 currcons || !vc->vc_sw->con_save_screen);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 38e86b84dce0..59d7d5ec17a4 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -180,7 +180,7 @@ static inline void vga_set_mem_top(struct vc_data *c)
180} 180}
181 181
182#ifdef CONFIG_VGACON_SOFT_SCROLLBACK 182#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
183#include <linux/bootmem.h> 183#include <linux/slab.h>
184/* software scrollback */ 184/* software scrollback */
185static void *vgacon_scrollback; 185static void *vgacon_scrollback;
186static int vgacon_scrollback_tail; 186static int vgacon_scrollback_tail;
@@ -210,8 +210,7 @@ static void vgacon_scrollback_init(int pitch)
210 */ 210 */
211static void __init_refok vgacon_scrollback_startup(void) 211static void __init_refok vgacon_scrollback_startup(void)
212{ 212{
213 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE 213 vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
214 * 1024);
215 vgacon_scrollback_init(vga_video_num_columns * 2); 214 vgacon_scrollback_init(vga_video_num_columns * 2);
216} 215}
217 216
diff --git a/include/linux/irq.h b/include/linux/irq.h
index eedbb8e5e0cc..1e50c34f0062 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -430,23 +430,19 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
430 * Returns true if successful (or not required). 430 * Returns true if successful (or not required).
431 */ 431 */
432static inline bool alloc_desc_masks(struct irq_desc *desc, int node, 432static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
433 bool boot) 433 bool boot)
434{ 434{
435#ifdef CONFIG_CPUMASK_OFFSTACK 435 gfp_t gfp = GFP_ATOMIC;
436 if (boot) {
437 alloc_bootmem_cpumask_var(&desc->affinity);
438 436
439#ifdef CONFIG_GENERIC_PENDING_IRQ 437 if (boot)
440 alloc_bootmem_cpumask_var(&desc->pending_mask); 438 gfp = GFP_NOWAIT;
441#endif
442 return true;
443 }
444 439
445 if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) 440#ifdef CONFIG_CPUMASK_OFFSTACK
441 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
446 return false; 442 return false;
447 443
448#ifdef CONFIG_GENERIC_PENDING_IRQ 444#ifdef CONFIG_GENERIC_PENDING_IRQ
449 if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { 445 if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
450 free_cpumask_var(desc->affinity); 446 free_cpumask_var(desc->affinity);
451 return false; 447 return false;
452 } 448 }
diff --git a/init/main.c b/init/main.c
index bb7dc57eee36..7917695bf71e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -533,6 +533,16 @@ void __init __weak thread_info_cache_init(void)
533{ 533{
534} 534}
535 535
536/*
537 * Set up kernel memory allocators
538 */
539static void __init mm_init(void)
540{
541 mem_init();
542 kmem_cache_init();
543 vmalloc_init();
544}
545
536asmlinkage void __init start_kernel(void) 546asmlinkage void __init start_kernel(void)
537{ 547{
538 char * command_line; 548 char * command_line;
@@ -574,6 +584,23 @@ asmlinkage void __init start_kernel(void)
574 setup_nr_cpu_ids(); 584 setup_nr_cpu_ids();
575 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ 585 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
576 586
587 build_all_zonelists();
588 page_alloc_init();
589
590 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
591 parse_early_param();
592 parse_args("Booting kernel", static_command_line, __start___param,
593 __stop___param - __start___param,
594 &unknown_bootoption);
595 /*
596 * These use large bootmem allocations and must precede
597 * kmem_cache_init()
598 */
599 pidhash_init();
600 vfs_caches_init_early();
601 sort_main_extable();
602 trap_init();
603 mm_init();
577 /* 604 /*
578 * Set up the scheduler prior starting any interrupts (such as the 605 * Set up the scheduler prior starting any interrupts (such as the
579 * timer interrupt). Full topology setup happens at smp_init() 606 * timer interrupt). Full topology setup happens at smp_init()
@@ -585,25 +612,15 @@ asmlinkage void __init start_kernel(void)
585 * fragile until we cpu_idle() for the first time. 612 * fragile until we cpu_idle() for the first time.
586 */ 613 */
587 preempt_disable(); 614 preempt_disable();
588 build_all_zonelists();
589 page_alloc_init();
590 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
591 parse_early_param();
592 parse_args("Booting kernel", static_command_line, __start___param,
593 __stop___param - __start___param,
594 &unknown_bootoption);
595 if (!irqs_disabled()) { 615 if (!irqs_disabled()) {
596 printk(KERN_WARNING "start_kernel(): bug: interrupts were " 616 printk(KERN_WARNING "start_kernel(): bug: interrupts were "
597 "enabled *very* early, fixing it\n"); 617 "enabled *very* early, fixing it\n");
598 local_irq_disable(); 618 local_irq_disable();
599 } 619 }
600 sort_main_extable();
601 trap_init();
602 rcu_init(); 620 rcu_init();
603 /* init some links before init_ISA_irqs() */ 621 /* init some links before init_ISA_irqs() */
604 early_irq_init(); 622 early_irq_init();
605 init_IRQ(); 623 init_IRQ();
606 pidhash_init();
607 init_timers(); 624 init_timers();
608 hrtimers_init(); 625 hrtimers_init();
609 softirq_init(); 626 softirq_init();
@@ -645,14 +662,10 @@ asmlinkage void __init start_kernel(void)
645 initrd_start = 0; 662 initrd_start = 0;
646 } 663 }
647#endif 664#endif
648 vmalloc_init();
649 vfs_caches_init_early();
650 cpuset_init_early(); 665 cpuset_init_early();
651 page_cgroup_init(); 666 page_cgroup_init();
652 mem_init();
653 enable_debug_pagealloc(); 667 enable_debug_pagealloc();
654 cpu_hotplug_init(); 668 cpu_hotplug_init();
655 kmem_cache_init();
656 kmemtrace_init(); 669 kmemtrace_init();
657 debug_objects_mem_init(); 670 debug_objects_mem_init();
658 idr_init_cache(); 671 idr_init_cache();
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 026faccca869..d5a7e17474ee 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1857,7 +1857,7 @@ struct cgroup_subsys cpuset_subsys = {
1857 1857
1858int __init cpuset_init_early(void) 1858int __init cpuset_init_early(void)
1859{ 1859{
1860 alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed); 1860 alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT);
1861 1861
1862 top_cpuset.mems_generation = cpuset_mems_generation++; 1862 top_cpuset.mems_generation = cpuset_mems_generation++;
1863 return 0; 1863 return 0;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a60018402f42..104578541230 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -150,6 +150,7 @@ int __init early_irq_init(void)
150{ 150{
151 struct irq_desc *desc; 151 struct irq_desc *desc;
152 int legacy_count; 152 int legacy_count;
153 int node;
153 int i; 154 int i;
154 155
155 init_irq_default_affinity(); 156 init_irq_default_affinity();
@@ -160,20 +161,20 @@ int __init early_irq_init(void)
160 161
161 desc = irq_desc_legacy; 162 desc = irq_desc_legacy;
162 legacy_count = ARRAY_SIZE(irq_desc_legacy); 163 legacy_count = ARRAY_SIZE(irq_desc_legacy);
164 node = first_online_node;
163 165
164 /* allocate irq_desc_ptrs array based on nr_irqs */ 166 /* allocate irq_desc_ptrs array based on nr_irqs */
165 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); 167 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
166 168
167 /* allocate based on nr_cpu_ids */ 169 /* allocate based on nr_cpu_ids */
168 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ 170 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
169 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * 171 sizeof(int), GFP_NOWAIT, node);
170 sizeof(int));
171 172
172 for (i = 0; i < legacy_count; i++) { 173 for (i = 0; i < legacy_count; i++) {
173 desc[i].irq = i; 174 desc[i].irq = i;
174 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 175 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
175 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 176 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
176 alloc_desc_masks(&desc[i], 0, true); 177 alloc_desc_masks(&desc[i], node, true);
177 init_desc_masks(&desc[i]); 178 init_desc_masks(&desc[i]);
178 irq_desc_ptrs[i] = desc + i; 179 irq_desc_ptrs[i] = desc + i;
179 } 180 }
diff --git a/kernel/profile.c b/kernel/profile.c
index 7724e0409bae..28cf26ad2d24 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -111,12 +111,6 @@ int __ref profile_init(void)
111 /* only text is profiled */ 111 /* only text is profiled */
112 prof_len = (_etext - _stext) >> prof_shift; 112 prof_len = (_etext - _stext) >> prof_shift;
113 buffer_bytes = prof_len*sizeof(atomic_t); 113 buffer_bytes = prof_len*sizeof(atomic_t);
114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes);
116 alloc_bootmem_cpumask_var(&prof_cpu_mask);
117 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
118 return 0;
119 }
120 114
121 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 115 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
122 return -ENOMEM; 116 return -ENOMEM;
diff --git a/kernel/sched.c b/kernel/sched.c
index 14c447ae5d53..dcf2dc28931a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -68,7 +68,6 @@
68#include <linux/pagemap.h> 68#include <linux/pagemap.h>
69#include <linux/hrtimer.h> 69#include <linux/hrtimer.h>
70#include <linux/tick.h> 70#include <linux/tick.h>
71#include <linux/bootmem.h>
72#include <linux/debugfs.h> 71#include <linux/debugfs.h>
73#include <linux/ctype.h> 72#include <linux/ctype.h>
74#include <linux/ftrace.h> 73#include <linux/ftrace.h>
@@ -7782,24 +7781,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7782 7781
7783static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 7782static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
7784{ 7783{
7784 gfp_t gfp = GFP_KERNEL;
7785
7785 memset(rd, 0, sizeof(*rd)); 7786 memset(rd, 0, sizeof(*rd));
7786 7787
7787 if (bootmem) { 7788 if (bootmem)
7788 alloc_bootmem_cpumask_var(&def_root_domain.span); 7789 gfp = GFP_NOWAIT;
7789 alloc_bootmem_cpumask_var(&def_root_domain.online);
7790 alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
7791 cpupri_init(&rd->cpupri, true);
7792 return 0;
7793 }
7794 7790
7795 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 7791 if (!alloc_cpumask_var(&rd->span, gfp))
7796 goto out; 7792 goto out;
7797 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 7793 if (!alloc_cpumask_var(&rd->online, gfp))
7798 goto free_span; 7794 goto free_span;
7799 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 7795 if (!alloc_cpumask_var(&rd->rto_mask, gfp))
7800 goto free_online; 7796 goto free_online;
7801 7797
7802 if (cpupri_init(&rd->cpupri, false) != 0) 7798 if (cpupri_init(&rd->cpupri, bootmem) != 0)
7803 goto free_rto_mask; 7799 goto free_rto_mask;
7804 return 0; 7800 return 0;
7805 7801
@@ -9123,7 +9119,7 @@ void __init sched_init(void)
9123 * we use alloc_bootmem(). 9119 * we use alloc_bootmem().
9124 */ 9120 */
9125 if (alloc_size) { 9121 if (alloc_size) {
9126 ptr = (unsigned long)alloc_bootmem(alloc_size); 9122 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
9127 9123
9128#ifdef CONFIG_FAIR_GROUP_SCHED 9124#ifdef CONFIG_FAIR_GROUP_SCHED
9129 init_task_group.se = (struct sched_entity **)ptr; 9125 init_task_group.se = (struct sched_entity **)ptr;
@@ -9314,13 +9310,13 @@ void __init sched_init(void)
9314 current->sched_class = &fair_sched_class; 9310 current->sched_class = &fair_sched_class;
9315 9311
9316 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9312 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9317 alloc_bootmem_cpumask_var(&nohz_cpu_mask); 9313 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9318#ifdef CONFIG_SMP 9314#ifdef CONFIG_SMP
9319#ifdef CONFIG_NO_HZ 9315#ifdef CONFIG_NO_HZ
9320 alloc_bootmem_cpumask_var(&nohz.cpu_mask); 9316 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9321 alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask); 9317 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9322#endif 9318#endif
9323 alloc_bootmem_cpumask_var(&cpu_isolated_map); 9319 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9324#endif /* SMP */ 9320#endif /* SMP */
9325 9321
9326 scheduler_running = 1; 9322 scheduler_running = 1;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 344712a5e3ed..7deffc9f0e5f 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -154,8 +154,12 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
154 */ 154 */
155int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) 155int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
156{ 156{
157 gfp_t gfp = GFP_KERNEL;
157 int i; 158 int i;
158 159
160 if (bootmem)
161 gfp = GFP_NOWAIT;
162
159 memset(cp, 0, sizeof(*cp)); 163 memset(cp, 0, sizeof(*cp));
160 164
161 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { 165 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
@@ -163,9 +167,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
163 167
164 spin_lock_init(&vec->lock); 168 spin_lock_init(&vec->lock);
165 vec->count = 0; 169 vec->count = 0;
166 if (bootmem) 170 if (!zalloc_cpumask_var(&vec->mask, gfp))
167 alloc_bootmem_cpumask_var(&vec->mask);
168 else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
169 goto cleanup; 171 goto cleanup;
170 } 172 }
171 173
diff --git a/lib/cpumask.c b/lib/cpumask.c
index eb23aaa0c7b8..7bb4142a502f 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
92 */ 92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
94{ 94{
95 if (likely(slab_is_available())) 95 *mask = kmalloc_node(cpumask_size(), flags, node);
96 *mask = kmalloc_node(cpumask_size(), flags, node); 96
97 else {
98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
99 printk(KERN_ERR
100 "=> alloc_cpumask_var: kmalloc not available!\n");
101#endif
102 *mask = NULL;
103 }
104#ifdef CONFIG_DEBUG_PER_CPU_MAPS 97#ifdef CONFIG_DEBUG_PER_CPU_MAPS
105 if (!*mask) { 98 if (!*mask) {
106 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); 99 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
diff --git a/mm/bootmem.c b/mm/bootmem.c
index daf92713f7de..282df0a09e6f 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -532,6 +532,9 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
532 unsigned long size, unsigned long align, 532 unsigned long size, unsigned long align,
533 unsigned long goal, unsigned long limit) 533 unsigned long goal, unsigned long limit)
534{ 534{
535 if (WARN_ON_ONCE(slab_is_available()))
536 return kzalloc(size, GFP_NOWAIT);
537
535#ifdef CONFIG_HAVE_ARCH_BOOTMEM 538#ifdef CONFIG_HAVE_ARCH_BOOTMEM
536 bootmem_data_t *p_bdata; 539 bootmem_data_t *p_bdata;
537 540
@@ -662,6 +665,9 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
662void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 665void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
663 unsigned long align, unsigned long goal) 666 unsigned long align, unsigned long goal)
664{ 667{
668 if (WARN_ON_ONCE(slab_is_available()))
669 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
670
665 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); 671 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
666} 672}
667 673
@@ -693,6 +699,9 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
693{ 699{
694 void *ptr; 700 void *ptr;
695 701
702 if (WARN_ON_ONCE(slab_is_available()))
703 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
704
696 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); 705 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
697 if (ptr) 706 if (ptr)
698 return ptr; 707 return ptr;
@@ -745,6 +754,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
745void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 754void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
746 unsigned long align, unsigned long goal) 755 unsigned long align, unsigned long goal)
747{ 756{
757 if (WARN_ON_ONCE(slab_is_available()))
758 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
759
748 return ___alloc_bootmem_node(pgdat->bdata, size, align, 760 return ___alloc_bootmem_node(pgdat->bdata, size, align,
749 goal, ARCH_LOW_ADDRESS_LIMIT); 761 goal, ARCH_LOW_ADDRESS_LIMIT);
750} 762}
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 791905c991df..3dd4a909a1de 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -47,6 +47,8 @@ static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc; 47 struct page_cgroup *base, *pc;
48 unsigned long table_size; 48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index; 49 unsigned long start_pfn, nr_pages, index;
50 struct page *page;
51 unsigned int order;
50 52
51 start_pfn = NODE_DATA(nid)->node_start_pfn; 53 start_pfn = NODE_DATA(nid)->node_start_pfn;
52 nr_pages = NODE_DATA(nid)->node_spanned_pages; 54 nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -55,11 +57,13 @@ static int __init alloc_node_page_cgroup(int nid)
55 return 0; 57 return 0;
56 58
57 table_size = sizeof(struct page_cgroup) * nr_pages; 59 table_size = sizeof(struct page_cgroup) * nr_pages;
58 60 order = get_order(table_size);
59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), 61 page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 62 if (!page)
61 if (!base) 63 page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
64 if (!page)
62 return -ENOMEM; 65 return -ENOMEM;
66 base = page_address(page);
63 for (index = 0; index < nr_pages; index++) { 67 for (index = 0; index < nr_pages; index++) {
64 pc = base + index; 68 pc = base + index;
65 __init_page_cgroup(pc, start_pfn + index); 69 __init_page_cgroup(pc, start_pfn + index);
diff --git a/mm/slab.c b/mm/slab.c
index f85831da9080..2bd611fa87bf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -315,7 +315,7 @@ static int drain_freelist(struct kmem_cache *cache,
315 struct kmem_list3 *l3, int tofree); 315 struct kmem_list3 *l3, int tofree);
316static void free_block(struct kmem_cache *cachep, void **objpp, int len, 316static void free_block(struct kmem_cache *cachep, void **objpp, int len,
317 int node); 317 int node);
318static int enable_cpucache(struct kmem_cache *cachep); 318static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
319static void cache_reap(struct work_struct *unused); 319static void cache_reap(struct work_struct *unused);
320 320
321/* 321/*
@@ -958,12 +958,12 @@ static void __cpuinit start_cpu_timer(int cpu)
958} 958}
959 959
960static struct array_cache *alloc_arraycache(int node, int entries, 960static struct array_cache *alloc_arraycache(int node, int entries,
961 int batchcount) 961 int batchcount, gfp_t gfp)
962{ 962{
963 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 963 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
964 struct array_cache *nc = NULL; 964 struct array_cache *nc = NULL;
965 965
966 nc = kmalloc_node(memsize, GFP_KERNEL, node); 966 nc = kmalloc_node(memsize, gfp, node);
967 if (nc) { 967 if (nc) {
968 nc->avail = 0; 968 nc->avail = 0;
969 nc->limit = entries; 969 nc->limit = entries;
@@ -1003,7 +1003,7 @@ static int transfer_objects(struct array_cache *to,
1003#define drain_alien_cache(cachep, alien) do { } while (0) 1003#define drain_alien_cache(cachep, alien) do { } while (0)
1004#define reap_alien(cachep, l3) do { } while (0) 1004#define reap_alien(cachep, l3) do { } while (0)
1005 1005
1006static inline struct array_cache **alloc_alien_cache(int node, int limit) 1006static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1007{ 1007{
1008 return (struct array_cache **)BAD_ALIEN_MAGIC; 1008 return (struct array_cache **)BAD_ALIEN_MAGIC;
1009} 1009}
@@ -1034,7 +1034,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1034static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1034static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1035static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1035static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1036 1036
1037static struct array_cache **alloc_alien_cache(int node, int limit) 1037static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1038{ 1038{
1039 struct array_cache **ac_ptr; 1039 struct array_cache **ac_ptr;
1040 int memsize = sizeof(void *) * nr_node_ids; 1040 int memsize = sizeof(void *) * nr_node_ids;
@@ -1042,14 +1042,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit)
1042 1042
1043 if (limit > 1) 1043 if (limit > 1)
1044 limit = 12; 1044 limit = 12;
1045 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1045 ac_ptr = kmalloc_node(memsize, gfp, node);
1046 if (ac_ptr) { 1046 if (ac_ptr) {
1047 for_each_node(i) { 1047 for_each_node(i) {
1048 if (i == node || !node_online(i)) { 1048 if (i == node || !node_online(i)) {
1049 ac_ptr[i] = NULL; 1049 ac_ptr[i] = NULL;
1050 continue; 1050 continue;
1051 } 1051 }
1052 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1052 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1053 if (!ac_ptr[i]) { 1053 if (!ac_ptr[i]) {
1054 for (i--; i >= 0; i--) 1054 for (i--; i >= 0; i--)
1055 kfree(ac_ptr[i]); 1055 kfree(ac_ptr[i]);
@@ -1282,20 +1282,20 @@ static int __cpuinit cpuup_prepare(long cpu)
1282 struct array_cache **alien = NULL; 1282 struct array_cache **alien = NULL;
1283 1283
1284 nc = alloc_arraycache(node, cachep->limit, 1284 nc = alloc_arraycache(node, cachep->limit,
1285 cachep->batchcount); 1285 cachep->batchcount, GFP_KERNEL);
1286 if (!nc) 1286 if (!nc)
1287 goto bad; 1287 goto bad;
1288 if (cachep->shared) { 1288 if (cachep->shared) {
1289 shared = alloc_arraycache(node, 1289 shared = alloc_arraycache(node,
1290 cachep->shared * cachep->batchcount, 1290 cachep->shared * cachep->batchcount,
1291 0xbaadf00d); 1291 0xbaadf00d, GFP_KERNEL);
1292 if (!shared) { 1292 if (!shared) {
1293 kfree(nc); 1293 kfree(nc);
1294 goto bad; 1294 goto bad;
1295 } 1295 }
1296 } 1296 }
1297 if (use_alien_caches) { 1297 if (use_alien_caches) {
1298 alien = alloc_alien_cache(node, cachep->limit); 1298 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1299 if (!alien) { 1299 if (!alien) {
1300 kfree(shared); 1300 kfree(shared);
1301 kfree(nc); 1301 kfree(nc);
@@ -1399,10 +1399,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1399{ 1399{
1400 struct kmem_list3 *ptr; 1400 struct kmem_list3 *ptr;
1401 1401
1402 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1402 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
1403 BUG_ON(!ptr); 1403 BUG_ON(!ptr);
1404 1404
1405 local_irq_disable();
1406 memcpy(ptr, list, sizeof(struct kmem_list3)); 1405 memcpy(ptr, list, sizeof(struct kmem_list3));
1407 /* 1406 /*
1408 * Do not assume that spinlocks can be initialized via memcpy: 1407 * Do not assume that spinlocks can be initialized via memcpy:
@@ -1411,7 +1410,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1411 1410
1412 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1411 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1413 cachep->nodelists[nodeid] = ptr; 1412 cachep->nodelists[nodeid] = ptr;
1414 local_irq_enable();
1415} 1413}
1416 1414
1417/* 1415/*
@@ -1575,9 +1573,8 @@ void __init kmem_cache_init(void)
1575 { 1573 {
1576 struct array_cache *ptr; 1574 struct array_cache *ptr;
1577 1575
1578 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1576 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1579 1577
1580 local_irq_disable();
1581 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1578 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1582 memcpy(ptr, cpu_cache_get(&cache_cache), 1579 memcpy(ptr, cpu_cache_get(&cache_cache),
1583 sizeof(struct arraycache_init)); 1580 sizeof(struct arraycache_init));
@@ -1587,11 +1584,9 @@ void __init kmem_cache_init(void)
1587 spin_lock_init(&ptr->lock); 1584 spin_lock_init(&ptr->lock);
1588 1585
1589 cache_cache.array[smp_processor_id()] = ptr; 1586 cache_cache.array[smp_processor_id()] = ptr;
1590 local_irq_enable();
1591 1587
1592 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1588 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1593 1589
1594 local_irq_disable();
1595 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1590 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1596 != &initarray_generic.cache); 1591 != &initarray_generic.cache);
1597 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1592 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
@@ -1603,7 +1598,6 @@ void __init kmem_cache_init(void)
1603 1598
1604 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1599 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1605 ptr; 1600 ptr;
1606 local_irq_enable();
1607 } 1601 }
1608 /* 5) Replace the bootstrap kmem_list3's */ 1602 /* 5) Replace the bootstrap kmem_list3's */
1609 { 1603 {
@@ -1627,7 +1621,7 @@ void __init kmem_cache_init(void)
1627 struct kmem_cache *cachep; 1621 struct kmem_cache *cachep;
1628 mutex_lock(&cache_chain_mutex); 1622 mutex_lock(&cache_chain_mutex);
1629 list_for_each_entry(cachep, &cache_chain, next) 1623 list_for_each_entry(cachep, &cache_chain, next)
1630 if (enable_cpucache(cachep)) 1624 if (enable_cpucache(cachep, GFP_NOWAIT))
1631 BUG(); 1625 BUG();
1632 mutex_unlock(&cache_chain_mutex); 1626 mutex_unlock(&cache_chain_mutex);
1633 } 1627 }
@@ -2064,10 +2058,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2064 return left_over; 2058 return left_over;
2065} 2059}
2066 2060
2067static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) 2061static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2068{ 2062{
2069 if (g_cpucache_up == FULL) 2063 if (g_cpucache_up == FULL)
2070 return enable_cpucache(cachep); 2064 return enable_cpucache(cachep, gfp);
2071 2065
2072 if (g_cpucache_up == NONE) { 2066 if (g_cpucache_up == NONE) {
2073 /* 2067 /*
@@ -2089,7 +2083,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2089 g_cpucache_up = PARTIAL_AC; 2083 g_cpucache_up = PARTIAL_AC;
2090 } else { 2084 } else {
2091 cachep->array[smp_processor_id()] = 2085 cachep->array[smp_processor_id()] =
2092 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2086 kmalloc(sizeof(struct arraycache_init), gfp);
2093 2087
2094 if (g_cpucache_up == PARTIAL_AC) { 2088 if (g_cpucache_up == PARTIAL_AC) {
2095 set_up_list3s(cachep, SIZE_L3); 2089 set_up_list3s(cachep, SIZE_L3);
@@ -2153,6 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2153{ 2147{
2154 size_t left_over, slab_size, ralign; 2148 size_t left_over, slab_size, ralign;
2155 struct kmem_cache *cachep = NULL, *pc; 2149 struct kmem_cache *cachep = NULL, *pc;
2150 gfp_t gfp;
2156 2151
2157 /* 2152 /*
2158 * Sanity checks... these are all serious usage bugs. 2153 * Sanity checks... these are all serious usage bugs.
@@ -2168,8 +2163,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2168 * We use cache_chain_mutex to ensure a consistent view of 2163 * We use cache_chain_mutex to ensure a consistent view of
2169 * cpu_online_mask as well. Please see cpuup_callback 2164 * cpu_online_mask as well. Please see cpuup_callback
2170 */ 2165 */
2171 get_online_cpus(); 2166 if (slab_is_available()) {
2172 mutex_lock(&cache_chain_mutex); 2167 get_online_cpus();
2168 mutex_lock(&cache_chain_mutex);
2169 }
2173 2170
2174 list_for_each_entry(pc, &cache_chain, next) { 2171 list_for_each_entry(pc, &cache_chain, next) {
2175 char tmp; 2172 char tmp;
@@ -2278,8 +2275,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2278 */ 2275 */
2279 align = ralign; 2276 align = ralign;
2280 2277
2278 if (slab_is_available())
2279 gfp = GFP_KERNEL;
2280 else
2281 gfp = GFP_NOWAIT;
2282
2281 /* Get cache's description obj. */ 2283 /* Get cache's description obj. */
2282 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2284 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2283 if (!cachep) 2285 if (!cachep)
2284 goto oops; 2286 goto oops;
2285 2287
@@ -2382,7 +2384,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2382 cachep->ctor = ctor; 2384 cachep->ctor = ctor;
2383 cachep->name = name; 2385 cachep->name = name;
2384 2386
2385 if (setup_cpu_cache(cachep)) { 2387 if (setup_cpu_cache(cachep, gfp)) {
2386 __kmem_cache_destroy(cachep); 2388 __kmem_cache_destroy(cachep);
2387 cachep = NULL; 2389 cachep = NULL;
2388 goto oops; 2390 goto oops;
@@ -2394,8 +2396,10 @@ oops:
2394 if (!cachep && (flags & SLAB_PANIC)) 2396 if (!cachep && (flags & SLAB_PANIC))
2395 panic("kmem_cache_create(): failed to create slab `%s'\n", 2397 panic("kmem_cache_create(): failed to create slab `%s'\n",
2396 name); 2398 name);
2397 mutex_unlock(&cache_chain_mutex); 2399 if (slab_is_available()) {
2398 put_online_cpus(); 2400 mutex_unlock(&cache_chain_mutex);
2401 put_online_cpus();
2402 }
2399 return cachep; 2403 return cachep;
2400} 2404}
2401EXPORT_SYMBOL(kmem_cache_create); 2405EXPORT_SYMBOL(kmem_cache_create);
@@ -3802,7 +3806,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
3802/* 3806/*
3803 * This initializes kmem_list3 or resizes various caches for all nodes. 3807 * This initializes kmem_list3 or resizes various caches for all nodes.
3804 */ 3808 */
3805static int alloc_kmemlist(struct kmem_cache *cachep) 3809static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3806{ 3810{
3807 int node; 3811 int node;
3808 struct kmem_list3 *l3; 3812 struct kmem_list3 *l3;
@@ -3812,7 +3816,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3812 for_each_online_node(node) { 3816 for_each_online_node(node) {
3813 3817
3814 if (use_alien_caches) { 3818 if (use_alien_caches) {
3815 new_alien = alloc_alien_cache(node, cachep->limit); 3819 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3816 if (!new_alien) 3820 if (!new_alien)
3817 goto fail; 3821 goto fail;
3818 } 3822 }
@@ -3821,7 +3825,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3821 if (cachep->shared) { 3825 if (cachep->shared) {
3822 new_shared = alloc_arraycache(node, 3826 new_shared = alloc_arraycache(node,
3823 cachep->shared*cachep->batchcount, 3827 cachep->shared*cachep->batchcount,
3824 0xbaadf00d); 3828 0xbaadf00d, gfp);
3825 if (!new_shared) { 3829 if (!new_shared) {
3826 free_alien_cache(new_alien); 3830 free_alien_cache(new_alien);
3827 goto fail; 3831 goto fail;
@@ -3850,7 +3854,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3850 free_alien_cache(new_alien); 3854 free_alien_cache(new_alien);
3851 continue; 3855 continue;
3852 } 3856 }
3853 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3857 l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
3854 if (!l3) { 3858 if (!l3) {
3855 free_alien_cache(new_alien); 3859 free_alien_cache(new_alien);
3856 kfree(new_shared); 3860 kfree(new_shared);
@@ -3906,18 +3910,18 @@ static void do_ccupdate_local(void *info)
3906 3910
3907/* Always called with the cache_chain_mutex held */ 3911/* Always called with the cache_chain_mutex held */
3908static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3912static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3909 int batchcount, int shared) 3913 int batchcount, int shared, gfp_t gfp)
3910{ 3914{
3911 struct ccupdate_struct *new; 3915 struct ccupdate_struct *new;
3912 int i; 3916 int i;
3913 3917
3914 new = kzalloc(sizeof(*new), GFP_KERNEL); 3918 new = kzalloc(sizeof(*new), gfp);
3915 if (!new) 3919 if (!new)
3916 return -ENOMEM; 3920 return -ENOMEM;
3917 3921
3918 for_each_online_cpu(i) { 3922 for_each_online_cpu(i) {
3919 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3923 new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3920 batchcount); 3924 batchcount, gfp);
3921 if (!new->new[i]) { 3925 if (!new->new[i]) {
3922 for (i--; i >= 0; i--) 3926 for (i--; i >= 0; i--)
3923 kfree(new->new[i]); 3927 kfree(new->new[i]);
@@ -3944,11 +3948,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3944 kfree(ccold); 3948 kfree(ccold);
3945 } 3949 }
3946 kfree(new); 3950 kfree(new);
3947 return alloc_kmemlist(cachep); 3951 return alloc_kmemlist(cachep, gfp);
3948} 3952}
3949 3953
3950/* Called with cache_chain_mutex held always */ 3954/* Called with cache_chain_mutex held always */
3951static int enable_cpucache(struct kmem_cache *cachep) 3955static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3952{ 3956{
3953 int err; 3957 int err;
3954 int limit, shared; 3958 int limit, shared;
@@ -3994,7 +3998,7 @@ static int enable_cpucache(struct kmem_cache *cachep)
3994 if (limit > 32) 3998 if (limit > 32)
3995 limit = 32; 3999 limit = 32;
3996#endif 4000#endif
3997 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 4001 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
3998 if (err) 4002 if (err)
3999 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4003 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4000 cachep->name, -err); 4004 cachep->name, -err);
@@ -4300,7 +4304,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4300 res = 0; 4304 res = 0;
4301 } else { 4305 } else {
4302 res = do_tune_cpucache(cachep, limit, 4306 res = do_tune_cpucache(cachep, limit,
4303 batchcount, shared); 4307 batchcount, shared,
4308 GFP_KERNEL);
4304 } 4309 }
4305 break; 4310 break;
4306 } 4311 }
diff --git a/mm/slub.c b/mm/slub.c
index 5e805a6fe36c..c1815a63807a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2557,13 +2557,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2557 if (gfp_flags & SLUB_DMA) 2557 if (gfp_flags & SLUB_DMA)
2558 flags = SLAB_CACHE_DMA; 2558 flags = SLAB_CACHE_DMA;
2559 2559
2560 down_write(&slub_lock); 2560 /*
2561 * This function is called with IRQs disabled during early-boot on
2562 * single CPU so there's no need to take slub_lock here.
2563 */
2561 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2564 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2562 flags, NULL)) 2565 flags, NULL))
2563 goto panic; 2566 goto panic;
2564 2567
2565 list_add(&s->list, &slab_caches); 2568 list_add(&s->list, &slab_caches);
2566 up_write(&slub_lock); 2569
2567 if (sysfs_slab_add(s)) 2570 if (sysfs_slab_add(s))
2568 goto panic; 2571 goto panic;
2569 return s; 2572 return s;
@@ -3021,7 +3024,7 @@ void __init kmem_cache_init(void)
3021 * kmem_cache_open for slab_state == DOWN. 3024 * kmem_cache_open for slab_state == DOWN.
3022 */ 3025 */
3023 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 3026 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
3024 sizeof(struct kmem_cache_node), GFP_KERNEL); 3027 sizeof(struct kmem_cache_node), GFP_NOWAIT);
3025 kmalloc_caches[0].refcount = -1; 3028 kmalloc_caches[0].refcount = -1;
3026 caches++; 3029 caches++;
3027 3030
@@ -3034,16 +3037,16 @@ void __init kmem_cache_init(void)
3034 /* Caches that are not of the two-to-the-power-of size */ 3037 /* Caches that are not of the two-to-the-power-of size */
3035 if (KMALLOC_MIN_SIZE <= 64) { 3038 if (KMALLOC_MIN_SIZE <= 64) {
3036 create_kmalloc_cache(&kmalloc_caches[1], 3039 create_kmalloc_cache(&kmalloc_caches[1],
3037 "kmalloc-96", 96, GFP_KERNEL); 3040 "kmalloc-96", 96, GFP_NOWAIT);
3038 caches++; 3041 caches++;
3039 create_kmalloc_cache(&kmalloc_caches[2], 3042 create_kmalloc_cache(&kmalloc_caches[2],
3040 "kmalloc-192", 192, GFP_KERNEL); 3043 "kmalloc-192", 192, GFP_NOWAIT);
3041 caches++; 3044 caches++;
3042 } 3045 }
3043 3046
3044 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3047 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3045 create_kmalloc_cache(&kmalloc_caches[i], 3048 create_kmalloc_cache(&kmalloc_caches[i],
3046 "kmalloc", 1 << i, GFP_KERNEL); 3049 "kmalloc", 1 << i, GFP_NOWAIT);
3047 caches++; 3050 caches++;
3048 } 3051 }
3049 3052
@@ -3080,7 +3083,7 @@ void __init kmem_cache_init(void)
3080 /* Provide the correct kmalloc names now that the caches are up */ 3083 /* Provide the correct kmalloc names now that the caches are up */
3081 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) 3084 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3082 kmalloc_caches[i]. name = 3085 kmalloc_caches[i]. name =
3083 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3086 kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3084 3087
3085#ifdef CONFIG_SMP 3088#ifdef CONFIG_SMP
3086 register_cpu_notifier(&slab_notifier); 3089 register_cpu_notifier(&slab_notifier);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 083716ea38c9..323513858c20 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -23,7 +23,6 @@
23#include <linux/rbtree.h> 23#include <linux/rbtree.h>
24#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/bootmem.h>
27#include <linux/pfn.h> 26#include <linux/pfn.h>
28 27
29#include <asm/atomic.h> 28#include <asm/atomic.h>
@@ -1032,7 +1031,7 @@ void __init vmalloc_init(void)
1032 1031
1033 /* Import existing vmlist entries. */ 1032 /* Import existing vmlist entries. */
1034 for (tmp = vmlist; tmp; tmp = tmp->next) { 1033 for (tmp = vmlist; tmp; tmp = tmp->next) {
1035 va = alloc_bootmem(sizeof(struct vmap_area)); 1034 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1036 va->flags = tmp->flags | VM_VM_AREA; 1035 va->flags = tmp->flags | VM_VM_AREA;
1037 va->va_start = (unsigned long)tmp->addr; 1036 va->va_start = (unsigned long)tmp->addr;
1038 va->va_end = va->va_start + tmp->size; 1037 va->va_end = va->va_start + tmp->size;