aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:07 -0500
commit95a05b428cc675694321c8f762591984f3fd2b1e (patch)
tree3a74205955201dd5e1abb0a85104d95cafa49df6 /mm/slub.c
parent6a67368c36e2c0c2578ba62f6264ab739af08cce (diff)
slab: Common constants for kmalloc boundaries
Standardize the constants that describe the smallest and largest object kept in the kmalloc arrays for SLAB and SLUB. Differentiate between the maximum size for which a slab cache is used (KMALLOC_MAX_CACHE_SIZE) and the maximum allocatable size (KMALLOC_MAX_SIZE, KMALLOC_MAX_ORDER). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ba2ca53f6c3a..d0f72ee06310 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2775,7 +2775,7 @@ init_kmem_cache_node(struct kmem_cache_node *n)
2775static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2775static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2776{ 2776{
2777 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2777 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2778 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2778 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
2779 2779
2780 /* 2780 /*
2781 * Must align to double word boundary for the double cmpxchg 2781 * Must align to double word boundary for the double cmpxchg
@@ -3174,11 +3174,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
3174 * Kmalloc subsystem 3174 * Kmalloc subsystem
3175 *******************************************************************/ 3175 *******************************************************************/
3176 3176
3177struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 3177struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
3178EXPORT_SYMBOL(kmalloc_caches); 3178EXPORT_SYMBOL(kmalloc_caches);
3179 3179
3180#ifdef CONFIG_ZONE_DMA 3180#ifdef CONFIG_ZONE_DMA
3181static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 3181static struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
3182#endif 3182#endif
3183 3183
3184static int __init setup_slub_min_order(char *str) 3184static int __init setup_slub_min_order(char *str)
@@ -3280,7 +3280,7 @@ void *__kmalloc(size_t size, gfp_t flags)
3280 struct kmem_cache *s; 3280 struct kmem_cache *s;
3281 void *ret; 3281 void *ret;
3282 3282
3283 if (unlikely(size > SLUB_MAX_SIZE)) 3283 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3284 return kmalloc_large(size, flags); 3284 return kmalloc_large(size, flags);
3285 3285
3286 s = get_slab(size, flags); 3286 s = get_slab(size, flags);
@@ -3316,7 +3316,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3316 struct kmem_cache *s; 3316 struct kmem_cache *s;
3317 void *ret; 3317 void *ret;
3318 3318
3319 if (unlikely(size > SLUB_MAX_SIZE)) { 3319 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3320 ret = kmalloc_large_node(size, flags, node); 3320 ret = kmalloc_large_node(size, flags, node);
3321 3321
3322 trace_kmalloc_node(_RET_IP_, ret, 3322 trace_kmalloc_node(_RET_IP_, ret,
@@ -3721,7 +3721,7 @@ void __init kmem_cache_init(void)
3721 caches++; 3721 caches++;
3722 } 3722 }
3723 3723
3724 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3724 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
3725 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); 3725 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3726 caches++; 3726 caches++;
3727 } 3727 }
@@ -3739,7 +3739,7 @@ void __init kmem_cache_init(void)
3739 BUG_ON(!kmalloc_caches[2]->name); 3739 BUG_ON(!kmalloc_caches[2]->name);
3740 } 3740 }
3741 3741
3742 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3742 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
3743 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3743 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3744 3744
3745 BUG_ON(!s); 3745 BUG_ON(!s);
@@ -3751,7 +3751,7 @@ void __init kmem_cache_init(void)
3751#endif 3751#endif
3752 3752
3753#ifdef CONFIG_ZONE_DMA 3753#ifdef CONFIG_ZONE_DMA
3754 for (i = 0; i < SLUB_PAGE_SHIFT; i++) { 3754 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
3755 struct kmem_cache *s = kmalloc_caches[i]; 3755 struct kmem_cache *s = kmalloc_caches[i];
3756 3756
3757 if (s && s->size) { 3757 if (s && s->size) {
@@ -3930,7 +3930,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3930 struct kmem_cache *s; 3930 struct kmem_cache *s;
3931 void *ret; 3931 void *ret;
3932 3932
3933 if (unlikely(size > SLUB_MAX_SIZE)) 3933 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3934 return kmalloc_large(size, gfpflags); 3934 return kmalloc_large(size, gfpflags);
3935 3935
3936 s = get_slab(size, gfpflags); 3936 s = get_slab(size, gfpflags);
@@ -3953,7 +3953,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3953 struct kmem_cache *s; 3953 struct kmem_cache *s;
3954 void *ret; 3954 void *ret;
3955 3955
3956 if (unlikely(size > SLUB_MAX_SIZE)) { 3956 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3957 ret = kmalloc_large_node(size, gfpflags, node); 3957 ret = kmalloc_large_node(size, gfpflags, node);
3958 3958
3959 trace_kmalloc_node(caller, ret, 3959 trace_kmalloc_node(caller, ret,
@@ -4312,7 +4312,7 @@ static void resiliency_test(void)
4312{ 4312{
4313 u8 *p; 4313 u8 *p;
4314 4314
4315 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 4315 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4316 4316
4317 printk(KERN_ERR "SLUB resiliency testing\n"); 4317 printk(KERN_ERR "SLUB resiliency testing\n");
4318 printk(KERN_ERR "-----------------------\n"); 4318 printk(KERN_ERR "-----------------------\n");