diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 108 |
1 files changed, 5 insertions, 103 deletions
@@ -2982,7 +2982,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2982 | s->allocflags |= __GFP_COMP; | 2982 | s->allocflags |= __GFP_COMP; |
2983 | 2983 | ||
2984 | if (s->flags & SLAB_CACHE_DMA) | 2984 | if (s->flags & SLAB_CACHE_DMA) |
2985 | s->allocflags |= SLUB_DMA; | 2985 | s->allocflags |= GFP_DMA; |
2986 | 2986 | ||
2987 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | 2987 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
2988 | s->allocflags |= __GFP_RECLAIMABLE; | 2988 | s->allocflags |= __GFP_RECLAIMABLE; |
@@ -3210,64 +3210,6 @@ static int __init setup_slub_nomerge(char *str) | |||
3210 | 3210 | ||
3211 | __setup("slub_nomerge", setup_slub_nomerge); | 3211 | __setup("slub_nomerge", setup_slub_nomerge); |
3212 | 3212 | ||
3213 | /* | ||
3214 | * Conversion table for small slabs sizes / 8 to the index in the | ||
3215 | * kmalloc array. This is necessary for slabs < 192 since we have non power | ||
3216 | * of two cache sizes there. The size of larger slabs can be determined using | ||
3217 | * fls. | ||
3218 | */ | ||
3219 | static s8 size_index[24] = { | ||
3220 | 3, /* 8 */ | ||
3221 | 4, /* 16 */ | ||
3222 | 5, /* 24 */ | ||
3223 | 5, /* 32 */ | ||
3224 | 6, /* 40 */ | ||
3225 | 6, /* 48 */ | ||
3226 | 6, /* 56 */ | ||
3227 | 6, /* 64 */ | ||
3228 | 1, /* 72 */ | ||
3229 | 1, /* 80 */ | ||
3230 | 1, /* 88 */ | ||
3231 | 1, /* 96 */ | ||
3232 | 7, /* 104 */ | ||
3233 | 7, /* 112 */ | ||
3234 | 7, /* 120 */ | ||
3235 | 7, /* 128 */ | ||
3236 | 2, /* 136 */ | ||
3237 | 2, /* 144 */ | ||
3238 | 2, /* 152 */ | ||
3239 | 2, /* 160 */ | ||
3240 | 2, /* 168 */ | ||
3241 | 2, /* 176 */ | ||
3242 | 2, /* 184 */ | ||
3243 | 2 /* 192 */ | ||
3244 | }; | ||
3245 | |||
3246 | static inline int size_index_elem(size_t bytes) | ||
3247 | { | ||
3248 | return (bytes - 1) / 8; | ||
3249 | } | ||
3250 | |||
3251 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) | ||
3252 | { | ||
3253 | int index; | ||
3254 | |||
3255 | if (size <= 192) { | ||
3256 | if (!size) | ||
3257 | return ZERO_SIZE_PTR; | ||
3258 | |||
3259 | index = size_index[size_index_elem(size)]; | ||
3260 | } else | ||
3261 | index = fls(size - 1); | ||
3262 | |||
3263 | #ifdef CONFIG_ZONE_DMA | ||
3264 | if (unlikely((flags & SLUB_DMA))) | ||
3265 | return kmalloc_dma_caches[index]; | ||
3266 | |||
3267 | #endif | ||
3268 | return kmalloc_caches[index]; | ||
3269 | } | ||
3270 | |||
3271 | void *__kmalloc(size_t size, gfp_t flags) | 3213 | void *__kmalloc(size_t size, gfp_t flags) |
3272 | { | 3214 | { |
3273 | struct kmem_cache *s; | 3215 | struct kmem_cache *s; |
@@ -3276,7 +3218,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3276 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) | 3218 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3277 | return kmalloc_large(size, flags); | 3219 | return kmalloc_large(size, flags); |
3278 | 3220 | ||
3279 | s = get_slab(size, flags); | 3221 | s = kmalloc_slab(size, flags); |
3280 | 3222 | ||
3281 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3223 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3282 | return s; | 3224 | return s; |
@@ -3319,7 +3261,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3319 | return ret; | 3261 | return ret; |
3320 | } | 3262 | } |
3321 | 3263 | ||
3322 | s = get_slab(size, flags); | 3264 | s = kmalloc_slab(size, flags); |
3323 | 3265 | ||
3324 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3266 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3325 | return s; | 3267 | return s; |
@@ -3632,7 +3574,6 @@ void __init kmem_cache_init(void) | |||
3632 | { | 3574 | { |
3633 | static __initdata struct kmem_cache boot_kmem_cache, | 3575 | static __initdata struct kmem_cache boot_kmem_cache, |
3634 | boot_kmem_cache_node; | 3576 | boot_kmem_cache_node; |
3635 | int i; | ||
3636 | 3577 | ||
3637 | if (debug_guardpage_minorder()) | 3578 | if (debug_guardpage_minorder()) |
3638 | slub_max_order = 0; | 3579 | slub_max_order = 0; |
@@ -3663,45 +3604,6 @@ void __init kmem_cache_init(void) | |||
3663 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); | 3604 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); |
3664 | 3605 | ||
3665 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ | 3606 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ |
3666 | |||
3667 | /* | ||
3668 | * Patch up the size_index table if we have strange large alignment | ||
3669 | * requirements for the kmalloc array. This is only the case for | ||
3670 | * MIPS it seems. The standard arches will not generate any code here. | ||
3671 | * | ||
3672 | * Largest permitted alignment is 256 bytes due to the way we | ||
3673 | * handle the index determination for the smaller caches. | ||
3674 | * | ||
3675 | * Make sure that nothing crazy happens if someone starts tinkering | ||
3676 | * around with ARCH_KMALLOC_MINALIGN | ||
3677 | */ | ||
3678 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || | ||
3679 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); | ||
3680 | |||
3681 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { | ||
3682 | int elem = size_index_elem(i); | ||
3683 | if (elem >= ARRAY_SIZE(size_index)) | ||
3684 | break; | ||
3685 | size_index[elem] = KMALLOC_SHIFT_LOW; | ||
3686 | } | ||
3687 | |||
3688 | if (KMALLOC_MIN_SIZE == 64) { | ||
3689 | /* | ||
3690 | * The 96 byte size cache is not used if the alignment | ||
3691 | * is 64 byte. | ||
3692 | */ | ||
3693 | for (i = 64 + 8; i <= 96; i += 8) | ||
3694 | size_index[size_index_elem(i)] = 7; | ||
3695 | } else if (KMALLOC_MIN_SIZE == 128) { | ||
3696 | /* | ||
3697 | * The 192 byte sized cache is not used if the alignment | ||
3698 | * is 128 byte. Redirect kmalloc to use the 256 byte cache | ||
3699 | * instead. | ||
3700 | */ | ||
3701 | for (i = 128 + 8; i <= 192; i += 8) | ||
3702 | size_index[size_index_elem(i)] = 8; | ||
3703 | } | ||
3704 | |||
3705 | create_kmalloc_caches(0); | 3607 | create_kmalloc_caches(0); |
3706 | 3608 | ||
3707 | #ifdef CONFIG_SMP | 3609 | #ifdef CONFIG_SMP |
@@ -3877,7 +3779,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3877 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) | 3779 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3878 | return kmalloc_large(size, gfpflags); | 3780 | return kmalloc_large(size, gfpflags); |
3879 | 3781 | ||
3880 | s = get_slab(size, gfpflags); | 3782 | s = kmalloc_slab(size, gfpflags); |
3881 | 3783 | ||
3882 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3784 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3883 | return s; | 3785 | return s; |
@@ -3907,7 +3809,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3907 | return ret; | 3809 | return ret; |
3908 | } | 3810 | } |
3909 | 3811 | ||
3910 | s = get_slab(size, gfpflags); | 3812 | s = kmalloc_slab(size, gfpflags); |
3911 | 3813 | ||
3912 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3814 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3913 | return s; | 3815 | return s; |