aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/kmemtrace.h86
-rw-r--r--include/linux/slab.h8
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slub_def.h53
5 files changed, 204 insertions, 20 deletions
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
new file mode 100644
index 000000000000..5bea8ead6a6b
--- /dev/null
+++ b/include/linux/kmemtrace.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <linux/types.h>
13#include <linux/marker.h>
14
15enum kmemtrace_type_id {
16 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
17 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
18 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
19};
20
21#ifdef CONFIG_KMEMTRACE
22
23extern void kmemtrace_init(void);
24
25static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
26 unsigned long call_site,
27 const void *ptr,
28 size_t bytes_req,
29 size_t bytes_alloc,
30 gfp_t gfp_flags,
31 int node)
32{
33 trace_mark(kmemtrace_alloc, "type_id %d call_site %lu ptr %lu "
34 "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d",
35 type_id, call_site, (unsigned long) ptr,
36 (unsigned long) bytes_req, (unsigned long) bytes_alloc,
37 (unsigned long) gfp_flags, node);
38}
39
40static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
41 unsigned long call_site,
42 const void *ptr)
43{
44 trace_mark(kmemtrace_free, "type_id %d call_site %lu ptr %lu",
45 type_id, call_site, (unsigned long) ptr);
46}
47
48#else /* CONFIG_KMEMTRACE */
49
50static inline void kmemtrace_init(void)
51{
52}
53
54static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
55 unsigned long call_site,
56 const void *ptr,
57 size_t bytes_req,
58 size_t bytes_alloc,
59 gfp_t gfp_flags,
60 int node)
61{
62}
63
64static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
65 unsigned long call_site,
66 const void *ptr)
67{
68}
69
70#endif /* CONFIG_KMEMTRACE */
71
72static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
73 unsigned long call_site,
74 const void *ptr,
75 size_t bytes_req,
76 size_t bytes_alloc,
77 gfp_t gfp_flags)
78{
79 kmemtrace_mark_alloc_node(type_id, call_site, ptr,
80 bytes_req, bytes_alloc, gfp_flags, -1);
81}
82
83#endif /* __KERNEL__ */
84
85#endif /* _LINUX_KMEMTRACE_H */
86
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 000da12b5cf0..c97ed28559ec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
253 * request comes from. 253 * request comes from.
254 */ 254 */
255#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 255#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
256extern void *__kmalloc_track_caller(size_t, gfp_t, void*); 256extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
257#define kmalloc_track_caller(size, flags) \ 257#define kmalloc_track_caller(size, flags) \
258 __kmalloc_track_caller(size, flags, __builtin_return_address(0)) 258 __kmalloc_track_caller(size, flags, _RET_IP_)
259#else 259#else
260#define kmalloc_track_caller(size, flags) \ 260#define kmalloc_track_caller(size, flags) \
261 __kmalloc(size, flags) 261 __kmalloc(size, flags)
@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
271 * allocation request comes from. 271 * allocation request comes from.
272 */ 272 */
273#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 273#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
274extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 274extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
275#define kmalloc_node_track_caller(size, flags, node) \ 275#define kmalloc_node_track_caller(size, flags, node) \
276 __kmalloc_node_track_caller(size, flags, node, \ 276 __kmalloc_node_track_caller(size, flags, node, \
277 __builtin_return_address(0)) 277 _RET_IP_)
278#else 278#else
279#define kmalloc_node_track_caller(size, flags, node) \ 279#define kmalloc_node_track_caller(size, flags, node) \
280 __kmalloc_node(size, flags, node) 280 __kmalloc_node(size, flags, node)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 39c3a5eb8ebe..7555ce99f6d2 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
50found: 69found:
51#ifdef CONFIG_ZONE_DMA 70#ifdef CONFIG_ZONE_DMA
52 if (flags & GFP_DMA) 71 if (flags & GFP_DMA)
53 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 72 cachep = malloc_sizes[i].cs_dmacachep;
54 flags); 73 else
55#endif 74#endif
56 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 75 cachep = malloc_sizes[i].cs_cachep;
76
77 ret = kmem_cache_alloc_notrace(cachep, flags);
78
79 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
80 size, slab_buffer_size(cachep), flags);
81
82 return ret;
57 } 83 }
58 return __kmalloc(size, flags); 84 return __kmalloc(size, flags);
59} 85}
@@ -62,8 +88,25 @@ found:
62extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 88extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 89extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64 90
65static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 91#ifdef CONFIG_KMEMTRACE
92extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
93 gfp_t flags,
94 int nodeid);
95#else
96static __always_inline void *
97kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
98 gfp_t flags,
99 int nodeid)
100{
101 return kmem_cache_alloc_node(cachep, flags, nodeid);
102}
103#endif
104
105static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
66{ 106{
107 struct kmem_cache *cachep;
108 void *ret;
109
67 if (__builtin_constant_p(size)) { 110 if (__builtin_constant_p(size)) {
68 int i = 0; 111 int i = 0;
69 112
@@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
84found: 127found:
85#ifdef CONFIG_ZONE_DMA 128#ifdef CONFIG_ZONE_DMA
86 if (flags & GFP_DMA) 129 if (flags & GFP_DMA)
87 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 130 cachep = malloc_sizes[i].cs_dmacachep;
88 flags, node); 131 else
89#endif 132#endif
90 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 133 cachep = malloc_sizes[i].cs_cachep;
91 flags, node); 134
135 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
136
137 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
138 ret, size, slab_buffer_size(cachep),
139 flags, node);
140
141 return ret;
92 } 142 }
93 return __kmalloc_node(size, flags, node); 143 return __kmalloc_node(size, flags, node);
94} 144}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aacd..dc28432b5b9a 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <linux/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
204void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 205void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
205void *__kmalloc(size_t size, gfp_t flags); 206void *__kmalloc(size_t size, gfp_t flags);
206 207
208#ifdef CONFIG_KMEMTRACE
209extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
210#else
211static __always_inline void *
212kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
213{
214 return kmem_cache_alloc(s, gfpflags);
215}
216#endif
217
207static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 218static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
208{ 219{
209 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 220 unsigned int order = get_order(size);
221 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
222
223 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
224 size, PAGE_SIZE << order, flags);
225
226 return ret;
210} 227}
211 228
212static __always_inline void *kmalloc(size_t size, gfp_t flags) 229static __always_inline void *kmalloc(size_t size, gfp_t flags)
213{ 230{
231 void *ret;
232
214 if (__builtin_constant_p(size)) { 233 if (__builtin_constant_p(size)) {
215 if (size > PAGE_SIZE) 234 if (size > PAGE_SIZE)
216 return kmalloc_large(size, flags); 235 return kmalloc_large(size, flags);
@@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
221 if (!s) 240 if (!s)
222 return ZERO_SIZE_PTR; 241 return ZERO_SIZE_PTR;
223 242
224 return kmem_cache_alloc(s, flags); 243 ret = kmem_cache_alloc_notrace(s, flags);
244
245 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
246 _THIS_IP_, ret,
247 size, s->size, flags);
248
249 return ret;
225 } 250 }
226 } 251 }
227 return __kmalloc(size, flags); 252 return __kmalloc(size, flags);
@@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
231void *__kmalloc_node(size_t size, gfp_t flags, int node); 256void *__kmalloc_node(size_t size, gfp_t flags, int node);
232void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 257void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
233 258
259#ifdef CONFIG_KMEMTRACE
260extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
261 gfp_t gfpflags,
262 int node);
263#else
264static __always_inline void *
265kmem_cache_alloc_node_notrace(struct kmem_cache *s,
266 gfp_t gfpflags,
267 int node)
268{
269 return kmem_cache_alloc_node(s, gfpflags, node);
270}
271#endif
272
234static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 273static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
235{ 274{
275 void *ret;
276
236 if (__builtin_constant_p(size) && 277 if (__builtin_constant_p(size) &&
237 size <= PAGE_SIZE && !(flags & SLUB_DMA)) { 278 size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
238 struct kmem_cache *s = kmalloc_slab(size); 279 struct kmem_cache *s = kmalloc_slab(size);
@@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
240 if (!s) 281 if (!s)
241 return ZERO_SIZE_PTR; 282 return ZERO_SIZE_PTR;
242 283
243 return kmem_cache_alloc_node(s, flags, node); 284 ret = kmem_cache_alloc_node_notrace(s, flags, node);
285
286 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
287 _THIS_IP_, ret,
288 size, s->size, flags, node);
289
290 return ret;
244 } 291 }
245 return __kmalloc_node(size, flags, node); 292 return __kmalloc_node(size, flags, node);
246} 293}