diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 15:25:06 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 15:25:06 -0400 |
| commit | b640f042faa2a2fad6464f259a8afec06e2f6386 (patch) | |
| tree | 44a2943f91859422a207612229031a767c0accd5 /kernel | |
| parent | 871fa90791a6f83dd8e2e489feb9534a8c02088d (diff) | |
| parent | b8ec757390282e21d349bf6b602a8cb182da0429 (diff) | |
Merge branch 'topic/slab/earlyboot' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'topic/slab/earlyboot' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
vgacon: use slab allocator instead of the bootmem allocator
irq: use kcalloc() instead of the bootmem allocator
sched: use slab in cpupri_init()
sched: use alloc_cpumask_var() instead of alloc_bootmem_cpumask_var()
memcg: don't use bootmem allocator in setup code
irq/cpumask: make memoryless node zero happy
x86: remove some alloc_bootmem_cpumask_var calling
vt: use kzalloc() instead of the bootmem allocator
sched: use kzalloc() instead of the bootmem allocator
init: introduce mm_init()
vmalloc: use kzalloc() instead of alloc_bootmem()
slab: setup allocators earlier in the boot sequence
bootmem: fix slab fallback on numa
bootmem: use slab if bootmem is no longer available
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpuset.c | 2 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 11 | ||||
| -rw-r--r-- | kernel/profile.c | 6 | ||||
| -rw-r--r-- | kernel/sched.c | 30 | ||||
| -rw-r--r-- | kernel/sched_cpupri.c | 8 |
5 files changed, 25 insertions, 32 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 026faccca869..d5a7e17474ee 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1857,7 +1857,7 @@ struct cgroup_subsys cpuset_subsys = { | |||
| 1857 | 1857 | ||
| 1858 | int __init cpuset_init_early(void) | 1858 | int __init cpuset_init_early(void) |
| 1859 | { | 1859 | { |
| 1860 | alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed); | 1860 | alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT); |
| 1861 | 1861 | ||
| 1862 | top_cpuset.mems_generation = cpuset_mems_generation++; | 1862 | top_cpuset.mems_generation = cpuset_mems_generation++; |
| 1863 | return 0; | 1863 | return 0; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a60018402f42..104578541230 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -150,6 +150,7 @@ int __init early_irq_init(void) | |||
| 150 | { | 150 | { |
| 151 | struct irq_desc *desc; | 151 | struct irq_desc *desc; |
| 152 | int legacy_count; | 152 | int legacy_count; |
| 153 | int node; | ||
| 153 | int i; | 154 | int i; |
| 154 | 155 | ||
| 155 | init_irq_default_affinity(); | 156 | init_irq_default_affinity(); |
| @@ -160,20 +161,20 @@ int __init early_irq_init(void) | |||
| 160 | 161 | ||
| 161 | desc = irq_desc_legacy; | 162 | desc = irq_desc_legacy; |
| 162 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 163 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
| 164 | node = first_online_node; | ||
| 163 | 165 | ||
| 164 | /* allocate irq_desc_ptrs array based on nr_irqs */ | 166 | /* allocate irq_desc_ptrs array based on nr_irqs */ |
| 165 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | 167 | irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); |
| 166 | 168 | ||
| 167 | /* allocate based on nr_cpu_ids */ | 169 | /* allocate based on nr_cpu_ids */ |
| 168 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | 170 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
| 169 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | 171 | sizeof(int), GFP_NOWAIT, node); |
| 170 | sizeof(int)); | ||
| 171 | 172 | ||
| 172 | for (i = 0; i < legacy_count; i++) { | 173 | for (i = 0; i < legacy_count; i++) { |
| 173 | desc[i].irq = i; | 174 | desc[i].irq = i; |
| 174 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | 175 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 175 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 176 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 176 | alloc_desc_masks(&desc[i], 0, true); | 177 | alloc_desc_masks(&desc[i], node, true); |
| 177 | init_desc_masks(&desc[i]); | 178 | init_desc_masks(&desc[i]); |
| 178 | irq_desc_ptrs[i] = desc + i; | 179 | irq_desc_ptrs[i] = desc + i; |
| 179 | } | 180 | } |
diff --git a/kernel/profile.c b/kernel/profile.c index 7724e0409bae..28cf26ad2d24 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -111,12 +111,6 @@ int __ref profile_init(void) | |||
| 111 | /* only text is profiled */ | 111 | /* only text is profiled */ |
| 112 | prof_len = (_etext - _stext) >> prof_shift; | 112 | prof_len = (_etext - _stext) >> prof_shift; |
| 113 | buffer_bytes = prof_len*sizeof(atomic_t); | 113 | buffer_bytes = prof_len*sizeof(atomic_t); |
| 114 | if (!slab_is_available()) { | ||
| 115 | prof_buffer = alloc_bootmem(buffer_bytes); | ||
| 116 | alloc_bootmem_cpumask_var(&prof_cpu_mask); | ||
| 117 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | ||
| 118 | return 0; | ||
| 119 | } | ||
| 120 | 114 | ||
| 121 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) | 115 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
| 122 | return -ENOMEM; | 116 | return -ENOMEM; |
diff --git a/kernel/sched.c b/kernel/sched.c index 14c447ae5d53..dcf2dc28931a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -68,7 +68,6 @@ | |||
| 68 | #include <linux/pagemap.h> | 68 | #include <linux/pagemap.h> |
| 69 | #include <linux/hrtimer.h> | 69 | #include <linux/hrtimer.h> |
| 70 | #include <linux/tick.h> | 70 | #include <linux/tick.h> |
| 71 | #include <linux/bootmem.h> | ||
| 72 | #include <linux/debugfs.h> | 71 | #include <linux/debugfs.h> |
| 73 | #include <linux/ctype.h> | 72 | #include <linux/ctype.h> |
| 74 | #include <linux/ftrace.h> | 73 | #include <linux/ftrace.h> |
| @@ -7782,24 +7781,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
| 7782 | 7781 | ||
| 7783 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) | 7782 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
| 7784 | { | 7783 | { |
| 7784 | gfp_t gfp = GFP_KERNEL; | ||
| 7785 | |||
| 7785 | memset(rd, 0, sizeof(*rd)); | 7786 | memset(rd, 0, sizeof(*rd)); |
| 7786 | 7787 | ||
| 7787 | if (bootmem) { | 7788 | if (bootmem) |
| 7788 | alloc_bootmem_cpumask_var(&def_root_domain.span); | 7789 | gfp = GFP_NOWAIT; |
| 7789 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
| 7790 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
| 7791 | cpupri_init(&rd->cpupri, true); | ||
| 7792 | return 0; | ||
| 7793 | } | ||
| 7794 | 7790 | ||
| 7795 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | 7791 | if (!alloc_cpumask_var(&rd->span, gfp)) |
| 7796 | goto out; | 7792 | goto out; |
| 7797 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | 7793 | if (!alloc_cpumask_var(&rd->online, gfp)) |
| 7798 | goto free_span; | 7794 | goto free_span; |
| 7799 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | 7795 | if (!alloc_cpumask_var(&rd->rto_mask, gfp)) |
| 7800 | goto free_online; | 7796 | goto free_online; |
| 7801 | 7797 | ||
| 7802 | if (cpupri_init(&rd->cpupri, false) != 0) | 7798 | if (cpupri_init(&rd->cpupri, bootmem) != 0) |
| 7803 | goto free_rto_mask; | 7799 | goto free_rto_mask; |
| 7804 | return 0; | 7800 | return 0; |
| 7805 | 7801 | ||
| @@ -9123,7 +9119,7 @@ void __init sched_init(void) | |||
| 9123 | * we use alloc_bootmem(). | 9119 | * we use alloc_bootmem(). |
| 9124 | */ | 9120 | */ |
| 9125 | if (alloc_size) { | 9121 | if (alloc_size) { |
| 9126 | ptr = (unsigned long)alloc_bootmem(alloc_size); | 9122 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
| 9127 | 9123 | ||
| 9128 | #ifdef CONFIG_FAIR_GROUP_SCHED | 9124 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 9129 | init_task_group.se = (struct sched_entity **)ptr; | 9125 | init_task_group.se = (struct sched_entity **)ptr; |
| @@ -9314,13 +9310,13 @@ void __init sched_init(void) | |||
| 9314 | current->sched_class = &fair_sched_class; | 9310 | current->sched_class = &fair_sched_class; |
| 9315 | 9311 | ||
| 9316 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 9312 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
| 9317 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | 9313 | alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
| 9318 | #ifdef CONFIG_SMP | 9314 | #ifdef CONFIG_SMP |
| 9319 | #ifdef CONFIG_NO_HZ | 9315 | #ifdef CONFIG_NO_HZ |
| 9320 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | 9316 | alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
| 9321 | alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask); | 9317 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
| 9322 | #endif | 9318 | #endif |
| 9323 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | 9319 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
| 9324 | #endif /* SMP */ | 9320 | #endif /* SMP */ |
| 9325 | 9321 | ||
| 9326 | scheduler_running = 1; | 9322 | scheduler_running = 1; |
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 344712a5e3ed..7deffc9f0e5f 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
| @@ -154,8 +154,12 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
| 154 | */ | 154 | */ |
| 155 | int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) | 155 | int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) |
| 156 | { | 156 | { |
| 157 | gfp_t gfp = GFP_KERNEL; | ||
| 157 | int i; | 158 | int i; |
| 158 | 159 | ||
| 160 | if (bootmem) | ||
| 161 | gfp = GFP_NOWAIT; | ||
| 162 | |||
| 159 | memset(cp, 0, sizeof(*cp)); | 163 | memset(cp, 0, sizeof(*cp)); |
| 160 | 164 | ||
| 161 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { | 165 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { |
| @@ -163,9 +167,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) | |||
| 163 | 167 | ||
| 164 | spin_lock_init(&vec->lock); | 168 | spin_lock_init(&vec->lock); |
| 165 | vec->count = 0; | 169 | vec->count = 0; |
| 166 | if (bootmem) | 170 | if (!zalloc_cpumask_var(&vec->mask, gfp)) |
| 167 | alloc_bootmem_cpumask_var(&vec->mask); | ||
| 168 | else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) | ||
| 169 | goto cleanup; | 171 | goto cleanup; |
| 170 | } | 172 | } |
| 171 | 173 | ||
