aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h106
1 files changed, 0 insertions, 106 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd401580bdd3..e9346b4f1ef4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
3 3
4/* 4/*
5 * Definitions unique to the original Linux SLAB allocator. 5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <linux/compiler.h>
15
16/*
17 * struct kmem_cache
18 *
19 * manages a cache.
20 */ 6 */
21 7
22struct kmem_cache { 8struct kmem_cache {
@@ -102,96 +88,4 @@ struct kmem_cache {
102 */ 88 */
103}; 89};
104 90
105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106void *__kmalloc(size_t size, gfp_t flags);
107
108#ifdef CONFIG_TRACING
109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
110#else
111static __always_inline void *
112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
113{
114 return kmem_cache_alloc(cachep, flags);
115}
116#endif
117
118static __always_inline void *kmalloc(size_t size, gfp_t flags)
119{
120 struct kmem_cache *cachep;
121 void *ret;
122
123 if (__builtin_constant_p(size)) {
124 int i;
125
126 if (!size)
127 return ZERO_SIZE_PTR;
128
129 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
130 return NULL;
131
132 i = kmalloc_index(size);
133
134#ifdef CONFIG_ZONE_DMA
135 if (flags & GFP_DMA)
136 cachep = kmalloc_dma_caches[i];
137 else
138#endif
139 cachep = kmalloc_caches[i];
140
141 ret = kmem_cache_alloc_trace(cachep, flags, size);
142
143 return ret;
144 }
145 return __kmalloc(size, flags);
146}
147
148#ifdef CONFIG_NUMA
149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
151
152#ifdef CONFIG_TRACING
153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
154 gfp_t flags,
155 int nodeid,
156 size_t size);
157#else
158static __always_inline void *
159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
160 gfp_t flags,
161 int nodeid,
162 size_t size)
163{
164 return kmem_cache_alloc_node(cachep, flags, nodeid);
165}
166#endif
167
168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
169{
170 struct kmem_cache *cachep;
171
172 if (__builtin_constant_p(size)) {
173 int i;
174
175 if (!size)
176 return ZERO_SIZE_PTR;
177
178 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
179 return NULL;
180
181 i = kmalloc_index(size);
182
183#ifdef CONFIG_ZONE_DMA
184 if (flags & GFP_DMA)
185 cachep = kmalloc_dma_caches[i];
186 else
187#endif
188 cachep = kmalloc_caches[i];
189
190 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
191 }
192 return __kmalloc_node(size, flags, node);
193}
194
195#endif /* CONFIG_NUMA */
196
197#endif /* _LINUX_SLAB_DEF_H */ 91#endif /* _LINUX_SLAB_DEF_H */