aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h41
1 files changed, 27 insertions, 14 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..231abc8976c5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -135,9 +135,15 @@ struct mem_cgroup;
135void __init kmem_cache_init(void); 135void __init kmem_cache_init(void);
136bool slab_is_available(void); 136bool slab_is_available(void);
137 137
138struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 138extern bool usercopy_fallback;
139 slab_flags_t, 139
140 void (*)(void *)); 140struct kmem_cache *kmem_cache_create(const char *name, size_t size,
141 size_t align, slab_flags_t flags,
142 void (*ctor)(void *));
143struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144 size_t size, size_t align, slab_flags_t flags,
145 size_t useroffset, size_t usersize,
146 void (*ctor)(void *));
141void kmem_cache_destroy(struct kmem_cache *); 147void kmem_cache_destroy(struct kmem_cache *);
142int kmem_cache_shrink(struct kmem_cache *); 148int kmem_cache_shrink(struct kmem_cache *);
143 149
@@ -153,9 +159,20 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
153 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 159 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
154 * then the objects will be properly aligned in SMP configurations. 160 * then the objects will be properly aligned in SMP configurations.
155 */ 161 */
156#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 162#define KMEM_CACHE(__struct, __flags) \
157 sizeof(struct __struct), __alignof__(struct __struct),\ 163 kmem_cache_create(#__struct, sizeof(struct __struct), \
158 (__flags), NULL) 164 __alignof__(struct __struct), (__flags), NULL)
165
166/*
167 * To whitelist a single field for copying to/from usercopy, use this
168 * macro instead for KMEM_CACHE() above.
169 */
170#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
171 kmem_cache_create_usercopy(#__struct, \
172 sizeof(struct __struct), \
173 __alignof__(struct __struct), (__flags), \
174 offsetof(struct __struct, __field), \
175 sizeof_field(struct __struct, __field), NULL)
159 176
160/* 177/*
161 * Common kmalloc functions provided by all allocators 178 * Common kmalloc functions provided by all allocators
@@ -167,15 +184,11 @@ void kzfree(const void *);
167size_t ksize(const void *); 184size_t ksize(const void *);
168 185
169#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 186#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
170const char *__check_heap_object(const void *ptr, unsigned long n, 187void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
171 struct page *page); 188 bool to_user);
172#else 189#else
173static inline const char *__check_heap_object(const void *ptr, 190static inline void __check_heap_object(const void *ptr, unsigned long n,
174 unsigned long n, 191 struct page *page, bool to_user) { }
175 struct page *page)
176{
177 return NULL;
178}
179#endif 192#endif
180 193
181/* 194/*