aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--mm/slub.c24
2 files changed, 21 insertions, 22 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 17ebe0f89bf3..a78fb4ac2015 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -131,11 +131,21 @@ struct kmem_cache {
131 131
132#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) 132#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
133 133
134#ifdef CONFIG_ZONE_DMA
135#define SLUB_DMA __GFP_DMA
136/* Reserve extra caches for potential DMA use */
137#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
138#else
139/* Disable DMA functionality */
140#define SLUB_DMA (__force gfp_t)0
141#define KMALLOC_CACHES SLUB_PAGE_SHIFT
142#endif
143
134/* 144/*
135 * We keep the general caches in an array of slab caches that are used for 145 * We keep the general caches in an array of slab caches that are used for
136 * 2^x bytes of allocations. 146 * 2^x bytes of allocations.
137 */ 147 */
138extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; 148extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
139 149
140/* 150/*
141 * Sorry that the following has to be that ugly but some versions of GCC 151 * Sorry that the following has to be that ugly but some versions of GCC
@@ -203,13 +213,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
203 return &kmalloc_caches[index]; 213 return &kmalloc_caches[index];
204} 214}
205 215
206#ifdef CONFIG_ZONE_DMA
207#define SLUB_DMA __GFP_DMA
208#else
209/* Disable DMA functionality */
210#define SLUB_DMA (__force gfp_t)0
211#endif
212
213void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 216void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
214void *__kmalloc(size_t size, gfp_t flags); 217void *__kmalloc(size_t size, gfp_t flags);
215 218
diff --git a/mm/slub.c b/mm/slub.c
index d6c9ecf629d5..cdb7f0214af0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2092{ 2092{
2093 int cpu; 2093 int cpu;
2094 2094
2095 if (s < kmalloc_caches + SLUB_PAGE_SHIFT && s >= kmalloc_caches) 2095 if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
2096 /* 2096 /*
2097 * Boot time creation of the kmalloc array. Use static per cpu data 2097 * Boot time creation of the kmalloc array. Use static per cpu data
2098 * since the per cpu allocator is not available yet. 2098 * since the per cpu allocator is not available yet.
@@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2539 * Kmalloc subsystem 2539 * Kmalloc subsystem
2540 *******************************************************************/ 2540 *******************************************************************/
2541 2541
2542struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; 2542struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
2543EXPORT_SYMBOL(kmalloc_caches); 2543EXPORT_SYMBOL(kmalloc_caches);
2544 2544
2545static int __init setup_slub_min_order(char *str) 2545static int __init setup_slub_min_order(char *str)
@@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2629 char *text; 2629 char *text;
2630 size_t realsize; 2630 size_t realsize;
2631 unsigned long slabflags; 2631 unsigned long slabflags;
2632 int i;
2632 2633
2633 s = kmalloc_caches_dma[index]; 2634 s = kmalloc_caches_dma[index];
2634 if (s) 2635 if (s)
@@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2649 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2650 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2650 (unsigned int)realsize); 2651 (unsigned int)realsize);
2651 2652
2652 if (flags & __GFP_WAIT) 2653 s = NULL;
2653 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2654 for (i = 0; i < KMALLOC_CACHES; i++)
2654 else { 2655 if (!kmalloc_caches[i].size)
2655 int i; 2656 break;
2656 2657
2657 s = NULL; 2658 BUG_ON(i >= KMALLOC_CACHES);
2658 for (i = 0; i < SLUB_PAGE_SHIFT; i++) 2659 s = kmalloc_caches + i;
2659 if (kmalloc_caches[i].size) {
2660 s = kmalloc_caches + i;
2661 break;
2662 }
2663 }
2664 2660
2665 /* 2661 /*
2666 * Must defer sysfs creation to a workqueue because we don't know 2662 * Must defer sysfs creation to a workqueue because we don't know
@@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2674 2670
2675 if (!s || !text || !kmem_cache_open(s, flags, text, 2671 if (!s || !text || !kmem_cache_open(s, flags, text,
2676 realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { 2672 realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
2677 kfree(s); 2673 s->size = 0;
2678 kfree(text); 2674 kfree(text);
2679 goto unlock_out; 2675 goto unlock_out;
2680 } 2676 }