diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 29 |
1 files changed, 16 insertions, 13 deletions
@@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2270 | int index = kmalloc_index(size); | 2270 | int index = kmalloc_index(size); |
2271 | 2271 | ||
2272 | if (!index) | 2272 | if (!index) |
2273 | return NULL; | 2273 | return ZERO_SIZE_PTR; |
2274 | 2274 | ||
2275 | /* Allocation too large? */ | 2275 | /* Allocation too large? */ |
2276 | BUG_ON(index < 0); | 2276 | if (index < 0) |
2277 | return NULL; | ||
2277 | 2278 | ||
2278 | #ifdef CONFIG_ZONE_DMA | 2279 | #ifdef CONFIG_ZONE_DMA |
2279 | if ((flags & SLUB_DMA)) { | 2280 | if ((flags & SLUB_DMA)) { |
@@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2314 | { | 2315 | { |
2315 | struct kmem_cache *s = get_slab(size, flags); | 2316 | struct kmem_cache *s = get_slab(size, flags); |
2316 | 2317 | ||
2317 | if (s) | 2318 | if (ZERO_OR_NULL_PTR(s)) |
2318 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2319 | return s; |
2319 | return ZERO_SIZE_PTR; | 2320 | |
2321 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | ||
2320 | } | 2322 | } |
2321 | EXPORT_SYMBOL(__kmalloc); | 2323 | EXPORT_SYMBOL(__kmalloc); |
2322 | 2324 | ||
@@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2325 | { | 2327 | { |
2326 | struct kmem_cache *s = get_slab(size, flags); | 2328 | struct kmem_cache *s = get_slab(size, flags); |
2327 | 2329 | ||
2328 | if (s) | 2330 | if (ZERO_OR_NULL_PTR(s)) |
2329 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2331 | return s; |
2330 | return ZERO_SIZE_PTR; | 2332 | |
2333 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | ||
2331 | } | 2334 | } |
2332 | EXPORT_SYMBOL(__kmalloc_node); | 2335 | EXPORT_SYMBOL(__kmalloc_node); |
2333 | #endif | 2336 | #endif |
@@ -2378,7 +2381,7 @@ void kfree(const void *x) | |||
2378 | * this comparison would be true for all "negative" pointers | 2381 | * this comparison would be true for all "negative" pointers |
2379 | * (which would cover the whole upper half of the address space). | 2382 | * (which would cover the whole upper half of the address space). |
2380 | */ | 2383 | */ |
2381 | if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) | 2384 | if (ZERO_OR_NULL_PTR(x)) |
2382 | return; | 2385 | return; |
2383 | 2386 | ||
2384 | page = virt_to_head_page(x); | 2387 | page = virt_to_head_page(x); |
@@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2687 | { | 2690 | { |
2688 | struct kmem_cache *s = get_slab(size, gfpflags); | 2691 | struct kmem_cache *s = get_slab(size, gfpflags); |
2689 | 2692 | ||
2690 | if (!s) | 2693 | if (ZERO_OR_NULL_PTR(s)) |
2691 | return ZERO_SIZE_PTR; | 2694 | return s; |
2692 | 2695 | ||
2693 | return slab_alloc(s, gfpflags, -1, caller); | 2696 | return slab_alloc(s, gfpflags, -1, caller); |
2694 | } | 2697 | } |
@@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2698 | { | 2701 | { |
2699 | struct kmem_cache *s = get_slab(size, gfpflags); | 2702 | struct kmem_cache *s = get_slab(size, gfpflags); |
2700 | 2703 | ||
2701 | if (!s) | 2704 | if (ZERO_OR_NULL_PTR(s)) |
2702 | return ZERO_SIZE_PTR; | 2705 | return s; |
2703 | 2706 | ||
2704 | return slab_alloc(s, gfpflags, node, caller); | 2707 | return slab_alloc(s, gfpflags, node, caller); |
2705 | } | 2708 | } |