aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:12:17 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:08 -0500
commitf97d5f634d3b5133951424fae751db1f339548bd (patch)
treecb4db9a78c8eb1bd0522679d90f553d40d15f3e9
parent9425c58e5445277699ff3c2a87bac1cfebc1b48d (diff)
slab: Common function to create the kmalloc array
The kmalloc array is created in similar ways in both SLAB and SLUB. Create a common function and have both allocators call that function. V1->V2: Whitespace cleanup Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slab.c48
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c54
-rw-r--r--mm/slub.c55
4 files changed, 64 insertions, 99 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 357f0bdc5e43..08ba44f81a28 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1625,30 +1625,6 @@ void __init kmem_cache_init(void)
1625 1625
1626 slab_early_init = 0; 1626 slab_early_init = 0;
1627 1627
1628 for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
1629 size_t cs_size = kmalloc_size(i);
1630
1631 if (cs_size < KMALLOC_MIN_SIZE)
1632 continue;
1633
1634 if (!kmalloc_caches[i]) {
1635 /*
1636 * For performance, all the general caches are L1 aligned.
1637 * This should be particularly beneficial on SMP boxes, as it
1638 * eliminates "false sharing".
1639 * Note for systems short on memory removing the alignment will
1640 * allow tighter packing of the smaller caches.
1641 */
1642 kmalloc_caches[i] = create_kmalloc_cache("kmalloc",
1643 cs_size, ARCH_KMALLOC_FLAGS);
1644 }
1645
1646#ifdef CONFIG_ZONE_DMA
1647 kmalloc_dma_caches[i] = create_kmalloc_cache(
1648 "kmalloc-dma", cs_size,
1649 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
1650#endif
1651 }
1652 /* 4) Replace the bootstrap head arrays */ 1628 /* 4) Replace the bootstrap head arrays */
1653 { 1629 {
1654 struct array_cache *ptr; 1630 struct array_cache *ptr;
@@ -1694,29 +1670,7 @@ void __init kmem_cache_init(void)
1694 } 1670 }
1695 } 1671 }
1696 1672
1697 slab_state = UP; 1673 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1698
1699 /* Create the proper names */
1700 for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
1701 char *s;
1702 struct kmem_cache *c = kmalloc_caches[i];
1703
1704 if (!c)
1705 continue;
1706
1707 s = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
1708
1709 BUG_ON(!s);
1710 c->name = s;
1711
1712#ifdef CONFIG_ZONE_DMA
1713 c = kmalloc_dma_caches[i];
1714 BUG_ON(!c);
1715 s = kasprintf(GFP_NOWAIT, "dma-kmalloc-%d", kmalloc_size(i));
1716 BUG_ON(!s);
1717 c->name = s;
1718#endif
1719 }
1720} 1674}
1721 1675
1722void __init kmem_cache_init_late(void) 1676void __init kmem_cache_init_late(void)
diff --git a/mm/slab.h b/mm/slab.h
index 34a98d642196..44c0bd6dc19e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -35,6 +35,12 @@ extern struct kmem_cache *kmem_cache;
35unsigned long calculate_alignment(unsigned long flags, 35unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size); 36 unsigned long align, unsigned long size);
37 37
38#ifndef CONFIG_SLOB
39/* Kmalloc array related functions */
40void create_kmalloc_caches(unsigned long);
41#endif
42
43
38/* Functions provided by the slab allocators */ 44/* Functions provided by the slab allocators */
39extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 45extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
40 46
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0437b8189b8a..2b0ebb6d071d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -327,6 +327,60 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
327EXPORT_SYMBOL(kmalloc_dma_caches); 327EXPORT_SYMBOL(kmalloc_dma_caches);
328#endif 328#endif
329 329
330/*
331 * Create the kmalloc array. Some of the regular kmalloc arrays
332 * may already have been created because they were needed to
333 * enable allocations for slab creation.
334 */
335void __init create_kmalloc_caches(unsigned long flags)
336{
337 int i;
338
339 /* Caches that are not of the two-to-the-power-of size */
340 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1])
341 kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
342
343 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2])
344 kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
345
346 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
347 if (!kmalloc_caches[i])
348 kmalloc_caches[i] = create_kmalloc_cache(NULL,
349 1 << i, flags);
350
351 /* Kmalloc array is now usable */
352 slab_state = UP;
353
354 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
355 struct kmem_cache *s = kmalloc_caches[i];
356 char *n;
357
358 if (s) {
359 n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
360
361 BUG_ON(!n);
362 s->name = n;
363 }
364 }
365
366#ifdef CONFIG_ZONE_DMA
367 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
368 struct kmem_cache *s = kmalloc_caches[i];
369
370 if (s) {
371 int size = kmalloc_size(i);
372 char *n = kasprintf(GFP_NOWAIT,
373 "dma-kmalloc-%d", size);
374
375 BUG_ON(!n);
376 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
377 size, SLAB_CACHE_DMA | flags);
378 }
379 }
380#endif
381}
382
383
330#endif /* !CONFIG_SLOB */ 384#endif /* !CONFIG_SLOB */
331 385
332 386
diff --git a/mm/slub.c b/mm/slub.c
index 527cbfb5c49b..e813c2d30fe0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3633,7 +3633,6 @@ void __init kmem_cache_init(void)
3633 static __initdata struct kmem_cache boot_kmem_cache, 3633 static __initdata struct kmem_cache boot_kmem_cache,
3634 boot_kmem_cache_node; 3634 boot_kmem_cache_node;
3635 int i; 3635 int i;
3636 int caches = 2;
3637 3636
3638 if (debug_guardpage_minorder()) 3637 if (debug_guardpage_minorder())
3639 slub_max_order = 0; 3638 slub_max_order = 0;
@@ -3703,64 +3702,16 @@ void __init kmem_cache_init(void)
3703 size_index[size_index_elem(i)] = 8; 3702 size_index[size_index_elem(i)] = 8;
3704 } 3703 }
3705 3704
3706 /* Caches that are not of the two-to-the-power-of size */ 3705 create_kmalloc_caches(0);
3707 if (KMALLOC_MIN_SIZE <= 32) {
3708 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3709 caches++;
3710 }
3711
3712 if (KMALLOC_MIN_SIZE <= 64) {
3713 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3714 caches++;
3715 }
3716
3717 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
3718 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3719 caches++;
3720 }
3721
3722 slab_state = UP;
3723
3724 /* Provide the correct kmalloc names now that the caches are up */
3725 if (KMALLOC_MIN_SIZE <= 32) {
3726 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3727 BUG_ON(!kmalloc_caches[1]->name);
3728 }
3729
3730 if (KMALLOC_MIN_SIZE <= 64) {
3731 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3732 BUG_ON(!kmalloc_caches[2]->name);
3733 }
3734
3735 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
3736 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3737
3738 BUG_ON(!s);
3739 kmalloc_caches[i]->name = s;
3740 }
3741 3706
3742#ifdef CONFIG_SMP 3707#ifdef CONFIG_SMP
3743 register_cpu_notifier(&slab_notifier); 3708 register_cpu_notifier(&slab_notifier);
3744#endif 3709#endif
3745 3710
3746#ifdef CONFIG_ZONE_DMA
3747 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
3748 struct kmem_cache *s = kmalloc_caches[i];
3749
3750 if (s && s->size) {
3751 char *name = kasprintf(GFP_NOWAIT,
3752 "dma-kmalloc-%d", s->object_size);
3753
3754 BUG_ON(!name);
3755 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3756 s->object_size, SLAB_CACHE_DMA);
3757 }
3758 }
3759#endif
3760 printk(KERN_INFO 3711 printk(KERN_INFO
3761 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3712 "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
3762 " CPUs=%d, Nodes=%d\n", 3713 " CPUs=%d, Nodes=%d\n",
3763 caches, cache_line_size(), 3714 cache_line_size(),
3764 slub_min_order, slub_max_order, slub_min_objects, 3715 slub_min_order, slub_max_order, slub_min_objects,
3765 nr_cpu_ids, nr_node_ids); 3716 nr_cpu_ids, nr_node_ids);
3766} 3717}