diff options
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 110 |
1 files changed, 0 insertions, 110 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 027276fa8713..cc0b67eada42 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -6,14 +6,8 @@ | |||
6 | * | 6 | * |
7 | * (C) 2007 SGI, Christoph Lameter | 7 | * (C) 2007 SGI, Christoph Lameter |
8 | */ | 8 | */ |
9 | #include <linux/types.h> | ||
10 | #include <linux/gfp.h> | ||
11 | #include <linux/bug.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include <linux/kobject.h> | 9 | #include <linux/kobject.h> |
14 | 10 | ||
15 | #include <linux/kmemleak.h> | ||
16 | |||
17 | enum stat_item { | 11 | enum stat_item { |
18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 12 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
19 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | 13 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
@@ -104,108 +98,4 @@ struct kmem_cache { | |||
104 | struct kmem_cache_node *node[MAX_NUMNODES]; | 98 | struct kmem_cache_node *node[MAX_NUMNODES]; |
105 | }; | 99 | }; |
106 | 100 | ||
107 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
108 | void *__kmalloc(size_t size, gfp_t flags); | ||
109 | |||
110 | static __always_inline void * | ||
111 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | ||
112 | { | ||
113 | void *ret; | ||
114 | |||
115 | flags |= (__GFP_COMP | __GFP_KMEMCG); | ||
116 | ret = (void *) __get_free_pages(flags, order); | ||
117 | kmemleak_alloc(ret, size, 1, flags); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * Calling this on allocated memory will check that the memory | ||
123 | * is expected to be in use, and print warnings if not. | ||
124 | */ | ||
125 | #ifdef CONFIG_SLUB_DEBUG | ||
126 | extern bool verify_mem_not_deleted(const void *x); | ||
127 | #else | ||
128 | static inline bool verify_mem_not_deleted(const void *x) | ||
129 | { | ||
130 | return true; | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | #ifdef CONFIG_TRACING | ||
135 | extern void * | ||
136 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); | ||
137 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
138 | #else | ||
139 | static __always_inline void * | ||
140 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | ||
141 | { | ||
142 | return kmem_cache_alloc(s, gfpflags); | ||
143 | } | ||
144 | |||
145 | static __always_inline void * | ||
146 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
147 | { | ||
148 | return kmalloc_order(size, flags, order); | ||
149 | } | ||
150 | #endif | ||
151 | |||
152 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
153 | { | ||
154 | unsigned int order = get_order(size); | ||
155 | return kmalloc_order_trace(size, flags, order); | ||
156 | } | ||
157 | |||
158 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
159 | { | ||
160 | if (__builtin_constant_p(size)) { | ||
161 | if (size > KMALLOC_MAX_CACHE_SIZE) | ||
162 | return kmalloc_large(size, flags); | ||
163 | |||
164 | if (!(flags & GFP_DMA)) { | ||
165 | int index = kmalloc_index(size); | ||
166 | |||
167 | if (!index) | ||
168 | return ZERO_SIZE_PTR; | ||
169 | |||
170 | return kmem_cache_alloc_trace(kmalloc_caches[index], | ||
171 | flags, size); | ||
172 | } | ||
173 | } | ||
174 | return __kmalloc(size, flags); | ||
175 | } | ||
176 | |||
177 | #ifdef CONFIG_NUMA | ||
178 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
179 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
180 | |||
181 | #ifdef CONFIG_TRACING | ||
182 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
183 | gfp_t gfpflags, | ||
184 | int node, size_t size); | ||
185 | #else | ||
186 | static __always_inline void * | ||
187 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
188 | gfp_t gfpflags, | ||
189 | int node, size_t size) | ||
190 | { | ||
191 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
196 | { | ||
197 | if (__builtin_constant_p(size) && | ||
198 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { | ||
199 | int index = kmalloc_index(size); | ||
200 | |||
201 | if (!index) | ||
202 | return ZERO_SIZE_PTR; | ||
203 | |||
204 | return kmem_cache_alloc_node_trace(kmalloc_caches[index], | ||
205 | flags, node, size); | ||
206 | } | ||
207 | return __kmalloc_node(size, flags, node); | ||
208 | } | ||
209 | #endif | ||
210 | |||
211 | #endif /* _LINUX_SLUB_DEF_H */ | 101 | #endif /* _LINUX_SLUB_DEF_H */ |