aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2009-12-18 17:26:21 -0500
committerPekka Enberg <penberg@cs.helsinki.fi>2009-12-20 02:57:00 -0500
commit756dee75872a2a764b478e18076360b8a4ec9045 (patch)
treec4a09707be2f926631815dec98b0e0f3f4b9ae20 /mm/slub.c
parent9dfc6e68bfe6ee452efb1a4e9ca26a9007f2b864 (diff)
SLUB: Get rid of dynamic DMA kmalloc cache allocation
Dynamic DMA kmalloc cache allocation is troublesome since the new percpu allocator does not support allocations in atomic contexts. Reserve some statically allocated kmalloc_cpu structures instead. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d6c9ecf629d5..cdb7f0214af0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2092{ 2092{
2093 int cpu; 2093 int cpu;
2094 2094
2095 if (s < kmalloc_caches + SLUB_PAGE_SHIFT && s >= kmalloc_caches) 2095 if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
2096 /* 2096 /*
2097 * Boot time creation of the kmalloc array. Use static per cpu data 2097 * Boot time creation of the kmalloc array. Use static per cpu data
2098 * since the per cpu allocator is not available yet. 2098 * since the per cpu allocator is not available yet.
@@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2539 * Kmalloc subsystem 2539 * Kmalloc subsystem
2540 *******************************************************************/ 2540 *******************************************************************/
2541 2541
2542struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; 2542struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
2543EXPORT_SYMBOL(kmalloc_caches); 2543EXPORT_SYMBOL(kmalloc_caches);
2544 2544
2545static int __init setup_slub_min_order(char *str) 2545static int __init setup_slub_min_order(char *str)
@@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2629 char *text; 2629 char *text;
2630 size_t realsize; 2630 size_t realsize;
2631 unsigned long slabflags; 2631 unsigned long slabflags;
2632 int i;
2632 2633
2633 s = kmalloc_caches_dma[index]; 2634 s = kmalloc_caches_dma[index];
2634 if (s) 2635 if (s)
@@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2649 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2650 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2650 (unsigned int)realsize); 2651 (unsigned int)realsize);
2651 2652
2652 if (flags & __GFP_WAIT) 2653 s = NULL;
2653 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2654 for (i = 0; i < KMALLOC_CACHES; i++)
2654 else { 2655 if (!kmalloc_caches[i].size)
2655 int i; 2656 break;
2656 2657
2657 s = NULL; 2658 BUG_ON(i >= KMALLOC_CACHES);
2658 for (i = 0; i < SLUB_PAGE_SHIFT; i++) 2659 s = kmalloc_caches + i;
2659 if (kmalloc_caches[i].size) {
2660 s = kmalloc_caches + i;
2661 break;
2662 }
2663 }
2664 2660
2665 /* 2661 /*
2666 * Must defer sysfs creation to a workqueue because we don't know 2662 * Must defer sysfs creation to a workqueue because we don't know
@@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2674 2670
2675 if (!s || !text || !kmem_cache_open(s, flags, text, 2671 if (!s || !text || !kmem_cache_open(s, flags, text,
2676 realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { 2672 realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
2677 kfree(s); 2673 s->size = 0;
2678 kfree(text); 2674 kfree(text);
2679 goto unlock_out; 2675 goto unlock_out;
2680 } 2676 }