aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 508bd827e6dc..aeb3e6d00a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size)
315} 315}
316#endif /* !CONFIG_SLOB */ 316#endif /* !CONFIG_SLOB */
317 317
318void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; 318void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
319void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; 319void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
320void kmem_cache_free(struct kmem_cache *, void *); 320void kmem_cache_free(struct kmem_cache *, void *);
321 321
322/* 322/*
@@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p)
339} 339}
340 340
341#ifdef CONFIG_NUMA 341#ifdef CONFIG_NUMA
342void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; 342void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
343void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; 343void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
344#else 344#else
345static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 345static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
346{ 346{
@@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
354#endif 354#endif
355 355
356#ifdef CONFIG_TRACING 356#ifdef CONFIG_TRACING
357extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; 357extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
358 358
359#ifdef CONFIG_NUMA 359#ifdef CONFIG_NUMA
360extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 360extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
361 gfp_t gfpflags, 361 gfp_t gfpflags,
362 int node, size_t size) __assume_slab_alignment; 362 int node, size_t size) __assume_slab_alignment __malloc;
363#else 363#else
364static __always_inline void * 364static __always_inline void *
365kmem_cache_alloc_node_trace(struct kmem_cache *s, 365kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
392} 392}
393#endif /* CONFIG_TRACING */ 393#endif /* CONFIG_TRACING */
394 394
395extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; 395extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
396 396
397#ifdef CONFIG_TRACING 397#ifdef CONFIG_TRACING
398extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; 398extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
399#else 399#else
400static __always_inline void * 400static __always_inline void *
401kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 401kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)