diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/slab.h | 13 | ||||
-rw-r--r-- | include/linux/slab_def.h | 12 | ||||
-rw-r--r-- | include/linux/slub_def.h | 12 |
3 files changed, 25 insertions, 12 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 27402fea9b79..0289ec89300a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -31,6 +31,19 @@ | |||
31 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ | 31 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | ||
35 | * | ||
36 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | ||
37 | * | ||
38 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | ||
39 | * Both make kfree a no-op. | ||
40 | */ | ||
41 | #define ZERO_SIZE_PTR ((void *)16) | ||
42 | |||
43 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \ | ||
44 | (unsigned long)ZERO_SIZE_PTR) | ||
45 | |||
46 | /* | ||
34 | * struct kmem_cache related prototypes | 47 | * struct kmem_cache related prototypes |
35 | */ | 48 | */ |
36 | void __init kmem_cache_init(void); | 49 | void __init kmem_cache_init(void); |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 365d036c454a..16e814ffab8d 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
32 | { | 32 | { |
33 | if (__builtin_constant_p(size)) { | 33 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 34 | int i = 0; |
35 | |||
36 | if (!size) | ||
37 | return ZERO_SIZE_PTR; | ||
38 | |||
35 | #define CACHE(x) \ | 39 | #define CACHE(x) \ |
36 | if (size <= x) \ | 40 | if (size <= x) \ |
37 | goto found; \ | 41 | goto found; \ |
@@ -58,6 +62,10 @@ static inline void *kzalloc(size_t size, gfp_t flags) | |||
58 | { | 62 | { |
59 | if (__builtin_constant_p(size)) { | 63 | if (__builtin_constant_p(size)) { |
60 | int i = 0; | 64 | int i = 0; |
65 | |||
66 | if (!size) | ||
67 | return ZERO_SIZE_PTR; | ||
68 | |||
61 | #define CACHE(x) \ | 69 | #define CACHE(x) \ |
62 | if (size <= x) \ | 70 | if (size <= x) \ |
63 | goto found; \ | 71 | goto found; \ |
@@ -88,6 +96,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
88 | { | 96 | { |
89 | if (__builtin_constant_p(size)) { | 97 | if (__builtin_constant_p(size)) { |
90 | int i = 0; | 98 | int i = 0; |
99 | |||
100 | if (!size) | ||
101 | return ZERO_SIZE_PTR; | ||
102 | |||
91 | #define CACHE(x) \ | 103 | #define CACHE(x) \ |
92 | if (size <= x) \ | 104 | if (size <= x) \ |
93 | goto found; \ | 105 | goto found; \ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a582f6771525..579b0a22858e 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -159,18 +159,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) | |||
159 | #define SLUB_DMA 0 | 159 | #define SLUB_DMA 0 |
160 | #endif | 160 | #endif |
161 | 161 | ||
162 | |||
163 | /* | ||
164 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | ||
165 | * | ||
166 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | ||
167 | * | ||
168 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | ||
169 | * Both make kfree a no-op. | ||
170 | */ | ||
171 | #define ZERO_SIZE_PTR ((void *)16) | ||
172 | |||
173 | |||
174 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 162 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
175 | void *__kmalloc(size_t size, gfp_t flags); | 163 | void *__kmalloc(size_t size, gfp_t flags); |
176 | 164 | ||