aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h47
1 files changed, 28 insertions, 19 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7e37d448ed91..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -111,7 +111,7 @@ struct mem_cgroup;
111 * struct kmem_cache related prototypes 111 * struct kmem_cache related prototypes
112 */ 112 */
113void __init kmem_cache_init(void); 113void __init kmem_cache_init(void);
114int slab_is_available(void); 114bool slab_is_available(void);
115 115
116struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 116struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
117 unsigned long, 117 unsigned long,
@@ -158,6 +158,24 @@ size_t ksize(const void *);
158#endif 158#endif
159 159
160/* 160/*
161 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
162 * Intended for arches that get misalignment faults even for 64 bit integer
163 * aligned buffers.
164 */
165#ifndef ARCH_SLAB_MINALIGN
166#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
167#endif
168
169/*
170 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
171 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
172 * aligned pointers.
173 */
174#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
175#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
176#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
177
178/*
161 * Kmalloc array related definitions 179 * Kmalloc array related definitions
162 */ 180 */
163 181
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
286} 304}
287#endif /* !CONFIG_SLOB */ 305#endif /* !CONFIG_SLOB */
288 306
289void *__kmalloc(size_t size, gfp_t flags); 307void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 308void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
291void kmem_cache_free(struct kmem_cache *, void *); 309void kmem_cache_free(struct kmem_cache *, void *);
292 310
293/* 311/*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
298 * Note that interrupts must be enabled when calling these functions. 316 * Note that interrupts must be enabled when calling these functions.
299 */ 317 */
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 318void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 319int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302 320
303#ifdef CONFIG_NUMA 321#ifdef CONFIG_NUMA
304void *__kmalloc_node(size_t size, gfp_t flags, int node); 322void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 323void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
306#else 324#else
307static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 325static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
308{ 326{
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
316#endif 334#endif
317 335
318#ifdef CONFIG_TRACING 336#ifdef CONFIG_TRACING
319extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 337extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
320 338
321#ifdef CONFIG_NUMA 339#ifdef CONFIG_NUMA
322extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 340extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags, 341 gfp_t gfpflags,
324 int node, size_t size); 342 int node, size_t size) __assume_slab_alignment;
325#else 343#else
326static __always_inline void * 344static __always_inline void *
327kmem_cache_alloc_node_trace(struct kmem_cache *s, 345kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
354} 372}
355#endif /* CONFIG_TRACING */ 373#endif /* CONFIG_TRACING */
356 374
357extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 375extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
358 376
359#ifdef CONFIG_TRACING 377#ifdef CONFIG_TRACING
360extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 378extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
361#else 379#else
362static __always_inline void * 380static __always_inline void *
363kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 381kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
482 return __kmalloc_node(size, flags, node); 500 return __kmalloc_node(size, flags, node);
483} 501}
484 502
485/*
486 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
487 * Intended for arches that get misalignment faults even for 64 bit integer
488 * aligned buffers.
489 */
490#ifndef ARCH_SLAB_MINALIGN
491#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
492#endif
493
494struct memcg_cache_array { 503struct memcg_cache_array {
495 struct rcu_head rcu; 504 struct rcu_head rcu;
496 struct kmem_cache *entries[0]; 505 struct kmem_cache *entries[0];