aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-09-04 12:35:34 -0400
committerPekka Enberg <penberg@kernel.org>2013-09-04 13:51:33 -0400
commitf1b6eb6e6be149b40ebb013f5bfe2ac86b6f1c1b (patch)
tree245897276adc30bc17a23ba1b5364a065e2ecb74 /include/linux
parent9de1bc875261411bf0a900e90cfe0c7a31c4917b (diff)
mm/sl[aou]b: Move kmallocXXX functions to common code
The kmalloc* functions of all slab allcoators are similar now so lets move them into slab.h. This requires some function naming changes in slob. As a results of this patch there is a common set of functions for all allocators. Also means that kmalloc_large() is now available in general to perform large order allocations that go directly via the page allocator. kmalloc_large() can be substituted if kmalloc() throws warnings because of too large allocations. kmalloc_large() has exactly the same semantics as kmalloc but can only used for allocations > PAGE_SIZE. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/slab.h156
-rw-r--r--include/linux/slab_def.h106
-rw-r--r--include/linux/slob_def.h31
-rw-r--r--include/linux/slub_def.h97
4 files changed, 124 insertions, 266 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..386af639dcaa 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -4,6 +4,8 @@
4 * (C) SGI 2006, Christoph Lameter 4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative 5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators. 6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
7 */ 9 */
8 10
9#ifndef _LINUX_SLAB_H 11#ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 96#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR) 97 (unsigned long)ZERO_SIZE_PTR)
96 98
99#include <linux/kmemleak.h>
97 100
98struct mem_cgroup; 101struct mem_cgroup;
99/* 102/*
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size)
289} 292}
290#endif /* !CONFIG_SLOB */ 293#endif /* !CONFIG_SLOB */
291 294
295void *__kmalloc(size_t size, gfp_t flags);
296void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
297
298#ifdef CONFIG_NUMA
299void *__kmalloc_node(size_t size, gfp_t flags, int node);
300void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
301#else
302static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
303{
304 return __kmalloc(size, flags);
305}
306
307static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
308{
309 return kmem_cache_alloc(s, flags);
310}
311#endif
312
313#ifdef CONFIG_TRACING
314extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
315
316#ifdef CONFIG_NUMA
317extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
318 gfp_t gfpflags,
319 int node, size_t size);
320#else
321static __always_inline void *
322kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags,
324 int node, size_t size)
325{
326 return kmem_cache_alloc_trace(s, gfpflags, size);
327}
328#endif /* CONFIG_NUMA */
329
330#else /* CONFIG_TRACING */
331static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
332 gfp_t flags, size_t size)
333{
334 return kmem_cache_alloc(s, flags);
335}
336
337static __always_inline void *
338kmem_cache_alloc_node_trace(struct kmem_cache *s,
339 gfp_t gfpflags,
340 int node, size_t size)
341{
342 return kmem_cache_alloc_node(s, gfpflags, node);
343}
344#endif /* CONFIG_TRACING */
345
292#ifdef CONFIG_SLAB 346#ifdef CONFIG_SLAB
293#include <linux/slab_def.h> 347#include <linux/slab_def.h>
294#endif 348#endif
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size)
297#include <linux/slub_def.h> 351#include <linux/slub_def.h>
298#endif 352#endif
299 353
300#ifdef CONFIG_SLOB 354static __always_inline void *
301#include <linux/slob_def.h> 355kmalloc_order(size_t size, gfp_t flags, unsigned int order)
356{
357 void *ret;
358
359 flags |= (__GFP_COMP | __GFP_KMEMCG);
360 ret = (void *) __get_free_pages(flags, order);
361 kmemleak_alloc(ret, size, 1, flags);
362 return ret;
363}
364
365#ifdef CONFIG_TRACING
366extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
367#else
368static __always_inline void *
369kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
370{
371 return kmalloc_order(size, flags, order);
372}
373#endif
374
375static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
376{
377 unsigned int order = get_order(size);
378 return kmalloc_order_trace(size, flags, order);
379}
380
381/**
382 * kmalloc - allocate memory
383 * @size: how many bytes of memory are required.
384 * @flags: the type of memory to allocate (see kcalloc).
385 *
386 * kmalloc is the normal method of allocating memory
387 * for objects smaller than page size in the kernel.
388 */
389static __always_inline void *kmalloc(size_t size, gfp_t flags)
390{
391 if (__builtin_constant_p(size)) {
392 if (size > KMALLOC_MAX_CACHE_SIZE)
393 return kmalloc_large(size, flags);
394#ifndef CONFIG_SLOB
395 if (!(flags & GFP_DMA)) {
396 int index = kmalloc_index(size);
397
398 if (!index)
399 return ZERO_SIZE_PTR;
400
401 return kmem_cache_alloc_trace(kmalloc_caches[index],
402 flags, size);
403 }
302#endif 404#endif
405 }
406 return __kmalloc(size, flags);
407}
303 408
304/* 409/*
305 * Determine size used for the nth kmalloc cache. 410 * Determine size used for the nth kmalloc cache.
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n)
321 return 0; 426 return 0;
322} 427}
323 428
429static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
430{
431#ifndef CONFIG_SLOB
432 if (__builtin_constant_p(size) &&
433 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLAB_CACHE_DMA)) {
434 int i = kmalloc_index(size);
435
436 if (!i)
437 return ZERO_SIZE_PTR;
438
439 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
440 flags, node, size);
441 }
442#endif
443 return __kmalloc_node(size, flags, node);
444}
445
324/* 446/*
325 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 447 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
326 * Intended for arches that get misalignment faults even for 64 bit integer 448 * Intended for arches that get misalignment faults even for 64 bit integer
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
451 return kmalloc_array(n, size, flags | __GFP_ZERO); 573 return kmalloc_array(n, size, flags | __GFP_ZERO);
452} 574}
453 575
454#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
455/**
456 * kmalloc_node - allocate memory from a specific node
457 * @size: how many bytes of memory are required.
458 * @flags: the type of memory to allocate (see kmalloc).
459 * @node: node to allocate from.
460 *
461 * kmalloc() for non-local nodes, used to allocate from a specific node
462 * if available. Equivalent to kmalloc() in the non-NUMA single-node
463 * case.
464 */
465static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
466{
467 return kmalloc(size, flags);
468}
469
470static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
471{
472 return __kmalloc(size, flags);
473}
474
475void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
476
477static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
478 gfp_t flags, int node)
479{
480 return kmem_cache_alloc(cachep, flags);
481}
482#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
483
484/* 576/*
485 * kmalloc_track_caller is a special version of kmalloc that records the 577 * kmalloc_track_caller is a special version of kmalloc that records the
486 * calling function of the routine calling it for slab leak tracking instead 578 * calling function of the routine calling it for slab leak tracking instead
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd401580bdd3..e9346b4f1ef4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
3 3
4/* 4/*
5 * Definitions unique to the original Linux SLAB allocator. 5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <linux/compiler.h>
15
16/*
17 * struct kmem_cache
18 *
19 * manages a cache.
20 */ 6 */
21 7
22struct kmem_cache { 8struct kmem_cache {
@@ -102,96 +88,4 @@ struct kmem_cache {
102 */ 88 */
103}; 89};
104 90
105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106void *__kmalloc(size_t size, gfp_t flags);
107
108#ifdef CONFIG_TRACING
109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
110#else
111static __always_inline void *
112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
113{
114 return kmem_cache_alloc(cachep, flags);
115}
116#endif
117
118static __always_inline void *kmalloc(size_t size, gfp_t flags)
119{
120 struct kmem_cache *cachep;
121 void *ret;
122
123 if (__builtin_constant_p(size)) {
124 int i;
125
126 if (!size)
127 return ZERO_SIZE_PTR;
128
129 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
130 return NULL;
131
132 i = kmalloc_index(size);
133
134#ifdef CONFIG_ZONE_DMA
135 if (flags & GFP_DMA)
136 cachep = kmalloc_dma_caches[i];
137 else
138#endif
139 cachep = kmalloc_caches[i];
140
141 ret = kmem_cache_alloc_trace(cachep, flags, size);
142
143 return ret;
144 }
145 return __kmalloc(size, flags);
146}
147
148#ifdef CONFIG_NUMA
149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
151
152#ifdef CONFIG_TRACING
153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
154 gfp_t flags,
155 int nodeid,
156 size_t size);
157#else
158static __always_inline void *
159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
160 gfp_t flags,
161 int nodeid,
162 size_t size)
163{
164 return kmem_cache_alloc_node(cachep, flags, nodeid);
165}
166#endif
167
168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
169{
170 struct kmem_cache *cachep;
171
172 if (__builtin_constant_p(size)) {
173 int i;
174
175 if (!size)
176 return ZERO_SIZE_PTR;
177
178 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
179 return NULL;
180
181 i = kmalloc_index(size);
182
183#ifdef CONFIG_ZONE_DMA
184 if (flags & GFP_DMA)
185 cachep = kmalloc_dma_caches[i];
186 else
187#endif
188 cachep = kmalloc_caches[i];
189
190 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
191 }
192 return __kmalloc_node(size, flags, node);
193}
194
195#endif /* CONFIG_NUMA */
196
197#endif /* _LINUX_SLAB_DEF_H */ 91#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644
index 095a5a4a8516..000000000000
--- a/include/linux/slob_def.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __LINUX_SLOB_DEF_H
2#define __LINUX_SLOB_DEF_H
3
4#include <linux/numa.h>
5
6void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
7
8static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
9 gfp_t flags)
10{
11 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
12}
13
14void *__kmalloc_node(size_t size, gfp_t flags, int node);
15
16static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
17{
18 return __kmalloc_node(size, flags, node);
19}
20
21static __always_inline void *kmalloc(size_t size, gfp_t flags)
22{
23 return __kmalloc_node(size, flags, NUMA_NO_NODE);
24}
25
26static __always_inline void *__kmalloc(size_t size, gfp_t flags)
27{
28 return kmalloc(size, flags);
29}
30
31#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 027276fa8713..901fb6eb7467 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -6,14 +6,8 @@
6 * 6 *
7 * (C) 2007 SGI, Christoph Lameter 7 * (C) 2007 SGI, Christoph Lameter
8 */ 8 */
9#include <linux/types.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/workqueue.h>
13#include <linux/kobject.h> 9#include <linux/kobject.h>
14 10
15#include <linux/kmemleak.h>
16
17enum stat_item { 11enum stat_item {
18 ALLOC_FASTPATH, /* Allocation from cpu slab */ 12 ALLOC_FASTPATH, /* Allocation from cpu slab */
19 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
@@ -104,20 +98,6 @@ struct kmem_cache {
104 struct kmem_cache_node *node[MAX_NUMNODES]; 98 struct kmem_cache_node *node[MAX_NUMNODES];
105}; 99};
106 100
107void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
108void *__kmalloc(size_t size, gfp_t flags);
109
110static __always_inline void *
111kmalloc_order(size_t size, gfp_t flags, unsigned int order)
112{
113 void *ret;
114
115 flags |= (__GFP_COMP | __GFP_KMEMCG);
116 ret = (void *) __get_free_pages(flags, order);
117 kmemleak_alloc(ret, size, 1, flags);
118 return ret;
119}
120
121/** 101/**
122 * Calling this on allocated memory will check that the memory 102 * Calling this on allocated memory will check that the memory
123 * is expected to be in use, and print warnings if not. 103 * is expected to be in use, and print warnings if not.
@@ -131,81 +111,4 @@ static inline bool verify_mem_not_deleted(const void *x)
131} 111}
132#endif 112#endif
133 113
134#ifdef CONFIG_TRACING
135extern void *
136kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
137extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
138#else
139static __always_inline void *
140kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
141{
142 return kmem_cache_alloc(s, gfpflags);
143}
144
145static __always_inline void *
146kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
147{
148 return kmalloc_order(size, flags, order);
149}
150#endif
151
152static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
153{
154 unsigned int order = get_order(size);
155 return kmalloc_order_trace(size, flags, order);
156}
157
158static __always_inline void *kmalloc(size_t size, gfp_t flags)
159{
160 if (__builtin_constant_p(size)) {
161 if (size > KMALLOC_MAX_CACHE_SIZE)
162 return kmalloc_large(size, flags);
163
164 if (!(flags & GFP_DMA)) {
165 int index = kmalloc_index(size);
166
167 if (!index)
168 return ZERO_SIZE_PTR;
169
170 return kmem_cache_alloc_trace(kmalloc_caches[index],
171 flags, size);
172 }
173 }
174 return __kmalloc(size, flags);
175}
176
177#ifdef CONFIG_NUMA
178void *__kmalloc_node(size_t size, gfp_t flags, int node);
179void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
180
181#ifdef CONFIG_TRACING
182extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
183 gfp_t gfpflags,
184 int node, size_t size);
185#else
186static __always_inline void *
187kmem_cache_alloc_node_trace(struct kmem_cache *s,
188 gfp_t gfpflags,
189 int node, size_t size)
190{
191 return kmem_cache_alloc_node(s, gfpflags, node);
192}
193#endif
194
195static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
196{
197 if (__builtin_constant_p(size) &&
198 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
199 int index = kmalloc_index(size);
200
201 if (!index)
202 return ZERO_SIZE_PTR;
203
204 return kmem_cache_alloc_node_trace(kmalloc_caches[index],
205 flags, node, size);
206 }
207 return __kmalloc_node(size, flags, node);
208}
209#endif
210
211#endif /* _LINUX_SLUB_DEF_H */ 114#endif /* _LINUX_SLUB_DEF_H */