diff options
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 508bd827e6dc..aeb3e6d00a66 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
315 | } | 315 | } |
316 | #endif /* !CONFIG_SLOB */ | 316 | #endif /* !CONFIG_SLOB */ |
317 | 317 | ||
318 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; | 318 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; |
319 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; | 319 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; |
320 | void kmem_cache_free(struct kmem_cache *, void *); | 320 | void kmem_cache_free(struct kmem_cache *, void *); |
321 | 321 | ||
322 | /* | 322 | /* |
@@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p) | |||
339 | } | 339 | } |
340 | 340 | ||
341 | #ifdef CONFIG_NUMA | 341 | #ifdef CONFIG_NUMA |
342 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; | 342 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; |
343 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; | 343 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; |
344 | #else | 344 | #else |
345 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 345 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
346 | { | 346 | { |
@@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | |||
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | #ifdef CONFIG_TRACING | 356 | #ifdef CONFIG_TRACING |
357 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; | 357 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; |
358 | 358 | ||
359 | #ifdef CONFIG_NUMA | 359 | #ifdef CONFIG_NUMA |
360 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | 360 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
361 | gfp_t gfpflags, | 361 | gfp_t gfpflags, |
362 | int node, size_t size) __assume_slab_alignment; | 362 | int node, size_t size) __assume_slab_alignment __malloc; |
363 | #else | 363 | #else |
364 | static __always_inline void * | 364 | static __always_inline void * |
365 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | 365 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
@@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
392 | } | 392 | } |
393 | #endif /* CONFIG_TRACING */ | 393 | #endif /* CONFIG_TRACING */ |
394 | 394 | ||
395 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; | 395 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; |
396 | 396 | ||
397 | #ifdef CONFIG_TRACING | 397 | #ifdef CONFIG_TRACING |
398 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; | 398 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; |
399 | #else | 399 | #else |
400 | static __always_inline void * | 400 | static __always_inline void * |
401 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | 401 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |