diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 71 |
1 files changed, 64 insertions, 7 deletions
@@ -2313,20 +2313,59 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2313 | } | 2313 | } |
2314 | #endif | 2314 | #endif |
2315 | 2315 | ||
2316 | /* | ||
2317 | * Conversion table for small slabs sizes / 8 to the index in the | ||
2318 | * kmalloc array. This is necessary for slabs < 192 since we have non power | ||
2319 | * of two cache sizes there. The size of larger slabs can be determined using | ||
2320 | * fls. | ||
2321 | */ | ||
2322 | static s8 size_index[24] = { | ||
2323 | 3, /* 8 */ | ||
2324 | 4, /* 16 */ | ||
2325 | 5, /* 24 */ | ||
2326 | 5, /* 32 */ | ||
2327 | 6, /* 40 */ | ||
2328 | 6, /* 48 */ | ||
2329 | 6, /* 56 */ | ||
2330 | 6, /* 64 */ | ||
2331 | 1, /* 72 */ | ||
2332 | 1, /* 80 */ | ||
2333 | 1, /* 88 */ | ||
2334 | 1, /* 96 */ | ||
2335 | 7, /* 104 */ | ||
2336 | 7, /* 112 */ | ||
2337 | 7, /* 120 */ | ||
2338 | 7, /* 128 */ | ||
2339 | 2, /* 136 */ | ||
2340 | 2, /* 144 */ | ||
2341 | 2, /* 152 */ | ||
2342 | 2, /* 160 */ | ||
2343 | 2, /* 168 */ | ||
2344 | 2, /* 176 */ | ||
2345 | 2, /* 184 */ | ||
2346 | 2 /* 192 */ | ||
2347 | }; | ||
2348 | |||
2316 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) | 2349 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) |
2317 | { | 2350 | { |
2318 | int index = kmalloc_index(size); | 2351 | int index; |
2319 | 2352 | ||
2320 | if (!index) | 2353 | if (size <= 192) { |
2321 | return ZERO_SIZE_PTR; | 2354 | if (!size) |
2355 | return ZERO_SIZE_PTR; | ||
2322 | 2356 | ||
2323 | /* Allocation too large? */ | 2357 | index = size_index[(size - 1) / 8]; |
2324 | if (index < 0) | 2358 | } else { |
2325 | return NULL; | 2359 | if (size > KMALLOC_MAX_SIZE) |
2360 | return NULL; | ||
2361 | |||
2362 | index = fls(size - 1); | ||
2363 | } | ||
2326 | 2364 | ||
2327 | #ifdef CONFIG_ZONE_DMA | 2365 | #ifdef CONFIG_ZONE_DMA |
2328 | if ((flags & SLUB_DMA)) | 2366 | if (unlikely((flags & SLUB_DMA))) |
2329 | return dma_kmalloc_cache(index, flags); | 2367 | return dma_kmalloc_cache(index, flags); |
2368 | |||
2330 | #endif | 2369 | #endif |
2331 | return &kmalloc_caches[index]; | 2370 | return &kmalloc_caches[index]; |
2332 | } | 2371 | } |
@@ -2532,6 +2571,24 @@ void __init kmem_cache_init(void) | |||
2532 | caches++; | 2571 | caches++; |
2533 | } | 2572 | } |
2534 | 2573 | ||
2574 | |||
2575 | /* | ||
2576 | * Patch up the size_index table if we have strange large alignment | ||
2577 | * requirements for the kmalloc array. This is only the case for | ||
2578 | * mips it seems. The standard arches will not generate any code here. | ||
2579 | * | ||
2580 | * Largest permitted alignment is 256 bytes due to the way we | ||
2581 | * handle the index determination for the smaller caches. | ||
2582 | * | ||
2583 | * Make sure that nothing crazy happens if someone starts tinkering | ||
2584 | * around with ARCH_KMALLOC_MINALIGN | ||
2585 | */ | ||
2586 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || | ||
2587 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); | ||
2588 | |||
2589 | for (i = 8; i < KMALLOC_MIN_SIZE;i++) | ||
2590 | size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; | ||
2591 | |||
2535 | slab_state = UP; | 2592 | slab_state = UP; |
2536 | 2593 | ||
2537 | /* Provide the correct kmalloc names now that the caches are up */ | 2594 | /* Provide the correct kmalloc names now that the caches are up */ |