aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>2009-03-23 09:12:24 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-03 06:23:06 -0400
commitca2b84cb3c4a0d4d2143b46ec072cdff5d1b3b87 (patch)
tree7163bac040f11c444b24cab53c4a784df73fa4f3
parentac44021fccd8f1f2b267b004f23a2e8d7ef05f7b (diff)
kmemtrace: use tracepoints
kmemtrace now uses tracepoints instead of markers. We no longer need to use format specifiers to pass arguments. Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> [ folded: Use the new TP_PROTO and TP_ARGS to fix the build. ] [ folded: fix build when CONFIG_KMEMTRACE is disabled. ] [ folded: define tracepoints when CONFIG_TRACEPOINTS is enabled. ] Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> LKML-Reference: <ae61c0f37156db8ec8dc0d5778018edde60a92e3.1237813499.git.eduard.munteanu@linux360.ro> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/slab_def.h10
-rw-r--r--include/linux/slub_def.h12
-rw-r--r--include/trace/kmemtrace.h92
-rw-r--r--kernel/trace/kmemtrace.c206
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--mm/slab.c24
-rw-r--r--mm/slob.c28
-rw-r--r--mm/slub.c30
-rw-r--r--mm/util.c16
9 files changed, 251 insertions, 173 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index f4523651fa42..5ac9b0bcaf9a 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -73,8 +73,8 @@ found:
73 73
74 ret = kmem_cache_alloc_notrace(cachep, flags); 74 ret = kmem_cache_alloc_notrace(cachep, flags);
75 75
76 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, 76 trace_kmalloc(_THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags); 77 size, slab_buffer_size(cachep), flags);
78 78
79 return ret; 79 return ret;
80 } 80 }
@@ -128,9 +128,9 @@ found:
128 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130 130
131 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, 131 trace_kmalloc_node(_THIS_IP_, ret,
132 ret, size, slab_buffer_size(cachep), 132 size, slab_buffer_size(cachep),
133 flags, node); 133 flags, node);
134 134
135 return ret; 135 return ret;
136 } 136 }
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a1f90528e70b..5046f90c1171 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -233,8 +233,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
233 unsigned int order = get_order(size); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235 235
236 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, 236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
237 size, PAGE_SIZE << order, flags);
238 237
239 return ret; 238 return ret;
240} 239}
@@ -255,9 +254,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
255 254
256 ret = kmem_cache_alloc_notrace(s, flags); 255 ret = kmem_cache_alloc_notrace(s, flags);
257 256
258 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, 257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
259 _THIS_IP_, ret,
260 size, s->size, flags);
261 258
262 return ret; 259 return ret;
263 } 260 }
@@ -296,9 +293,8 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
296 293
297 ret = kmem_cache_alloc_node_notrace(s, flags, node); 294 ret = kmem_cache_alloc_node_notrace(s, flags, node);
298 295
299 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 296 trace_kmalloc_node(_THIS_IP_, ret,
300 _THIS_IP_, ret, 297 size, s->size, flags, node);
301 size, s->size, flags, node);
302 298
303 return ret; 299 return ret;
304 } 300 }
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
index ad8b7857855a..28ee69f9cd46 100644
--- a/include/trace/kmemtrace.h
+++ b/include/trace/kmemtrace.h
@@ -9,65 +9,53 @@
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12#include <linux/tracepoint.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/marker.h>
14
15enum kmemtrace_type_id {
16 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
17 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
18 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
19};
20 14
21#ifdef CONFIG_KMEMTRACE 15#ifdef CONFIG_KMEMTRACE
22
23extern void kmemtrace_init(void); 16extern void kmemtrace_init(void);
24 17#else
25extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
26 unsigned long call_site,
27 const void *ptr,
28 size_t bytes_req,
29 size_t bytes_alloc,
30 gfp_t gfp_flags,
31 int node);
32
33extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
34 unsigned long call_site,
35 const void *ptr);
36
37#else /* CONFIG_KMEMTRACE */
38
39static inline void kmemtrace_init(void) 18static inline void kmemtrace_init(void)
40{ 19{
41} 20}
42 21#endif
43static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, 22
44 unsigned long call_site, 23DECLARE_TRACE(kmalloc,
45 const void *ptr, 24 TP_PROTO(unsigned long call_site,
46 size_t bytes_req, 25 const void *ptr,
47 size_t bytes_alloc, 26 size_t bytes_req,
48 gfp_t gfp_flags, 27 size_t bytes_alloc,
49 int node) 28 gfp_t gfp_flags),
50{ 29 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
51} 30DECLARE_TRACE(kmem_cache_alloc,
52 31 TP_PROTO(unsigned long call_site,
53static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id, 32 const void *ptr,
54 unsigned long call_site, 33 size_t bytes_req,
55 const void *ptr) 34 size_t bytes_alloc,
56{ 35 gfp_t gfp_flags),
57} 36 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
58 37DECLARE_TRACE(kmalloc_node,
59#endif /* CONFIG_KMEMTRACE */ 38 TP_PROTO(unsigned long call_site,
60 39 const void *ptr,
61static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id, 40 size_t bytes_req,
62 unsigned long call_site, 41 size_t bytes_alloc,
63 const void *ptr, 42 gfp_t gfp_flags,
64 size_t bytes_req, 43 int node),
65 size_t bytes_alloc, 44 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
66 gfp_t gfp_flags) 45DECLARE_TRACE(kmem_cache_alloc_node,
67{ 46 TP_PROTO(unsigned long call_site,
68 kmemtrace_mark_alloc_node(type_id, call_site, ptr, 47 const void *ptr,
69 bytes_req, bytes_alloc, gfp_flags, -1); 48 size_t bytes_req,
70} 49 size_t bytes_alloc,
50 gfp_t gfp_flags,
51 int node),
52 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
53DECLARE_TRACE(kfree,
54 TP_PROTO(unsigned long call_site, const void *ptr),
55 TP_ARGS(call_site, ptr));
56DECLARE_TRACE(kmem_cache_free,
57 TP_PROTO(unsigned long call_site, const void *ptr),
58 TP_ARGS(call_site, ptr));
71 59
72#endif /* __KERNEL__ */ 60#endif /* __KERNEL__ */
73 61
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index ae201b3eda89..4f7b5db5dd06 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/tracepoint.h>
13#include <trace/kmemtrace.h> 14#include <trace/kmemtrace.h>
14 15
15#include "trace.h" 16#include "trace.h"
@@ -29,10 +30,150 @@ static struct tracer_flags kmem_tracer_flags = {
29 .opts = kmem_opts 30 .opts = kmem_opts
30}; 31};
31 32
32
33static bool kmem_tracing_enabled __read_mostly;
34static struct trace_array *kmemtrace_array; 33static struct trace_array *kmemtrace_array;
35 34
35/* Trace allocations */
36static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
37 unsigned long call_site,
38 const void *ptr,
39 size_t bytes_req,
40 size_t bytes_alloc,
41 gfp_t gfp_flags,
42 int node)
43{
44 struct ring_buffer_event *event;
45 struct kmemtrace_alloc_entry *entry;
46 struct trace_array *tr = kmemtrace_array;
47
48 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
49 if (!event)
50 return;
51 entry = ring_buffer_event_data(event);
52 tracing_generic_entry_update(&entry->ent, 0, 0);
53
54 entry->ent.type = TRACE_KMEM_ALLOC;
55 entry->call_site = call_site;
56 entry->ptr = ptr;
57 entry->bytes_req = bytes_req;
58 entry->bytes_alloc = bytes_alloc;
59 entry->gfp_flags = gfp_flags;
60 entry->node = node;
61
62 ring_buffer_unlock_commit(tr->buffer, event);
63
64 trace_wake_up();
65}
66
67static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
68 unsigned long call_site,
69 const void *ptr)
70{
71 struct ring_buffer_event *event;
72 struct kmemtrace_free_entry *entry;
73 struct trace_array *tr = kmemtrace_array;
74
75 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
76 if (!event)
77 return;
78 entry = ring_buffer_event_data(event);
79 tracing_generic_entry_update(&entry->ent, 0, 0);
80
81 entry->ent.type = TRACE_KMEM_FREE;
82 entry->type_id = type_id;
83 entry->call_site = call_site;
84 entry->ptr = ptr;
85
86 ring_buffer_unlock_commit(tr->buffer, event);
87
88 trace_wake_up();
89}
90
91static void kmemtrace_kmalloc(unsigned long call_site,
92 const void *ptr,
93 size_t bytes_req,
94 size_t bytes_alloc,
95 gfp_t gfp_flags)
96{
97 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
98 bytes_req, bytes_alloc, gfp_flags, -1);
99}
100
101static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
102 const void *ptr,
103 size_t bytes_req,
104 size_t bytes_alloc,
105 gfp_t gfp_flags)
106{
107 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
108 bytes_req, bytes_alloc, gfp_flags, -1);
109}
110
111static void kmemtrace_kmalloc_node(unsigned long call_site,
112 const void *ptr,
113 size_t bytes_req,
114 size_t bytes_alloc,
115 gfp_t gfp_flags,
116 int node)
117{
118 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
119 bytes_req, bytes_alloc, gfp_flags, node);
120}
121
122static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
123 const void *ptr,
124 size_t bytes_req,
125 size_t bytes_alloc,
126 gfp_t gfp_flags,
127 int node)
128{
129 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
130 bytes_req, bytes_alloc, gfp_flags, node);
131}
132
133static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
134{
135 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
136}
137
138static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
139{
140 kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
141}
142
143static int kmemtrace_start_probes(void)
144{
145 int err;
146
147 err = register_trace_kmalloc(kmemtrace_kmalloc);
148 if (err)
149 return err;
150 err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
151 if (err)
152 return err;
153 err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
154 if (err)
155 return err;
156 err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
157 if (err)
158 return err;
159 err = register_trace_kfree(kmemtrace_kfree);
160 if (err)
161 return err;
162 err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
163
164 return err;
165}
166
167static void kmemtrace_stop_probes(void)
168{
169 unregister_trace_kmalloc(kmemtrace_kmalloc);
170 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
171 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
172 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
173 unregister_trace_kfree(kmemtrace_kfree);
174 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
175}
176
36static int kmem_trace_init(struct trace_array *tr) 177static int kmem_trace_init(struct trace_array *tr)
37{ 178{
38 int cpu; 179 int cpu;
@@ -41,14 +182,14 @@ static int kmem_trace_init(struct trace_array *tr)
41 for_each_cpu_mask(cpu, cpu_possible_map) 182 for_each_cpu_mask(cpu, cpu_possible_map)
42 tracing_reset(tr, cpu); 183 tracing_reset(tr, cpu);
43 184
44 kmem_tracing_enabled = true; 185 kmemtrace_start_probes();
45 186
46 return 0; 187 return 0;
47} 188}
48 189
49static void kmem_trace_reset(struct trace_array *tr) 190static void kmem_trace_reset(struct trace_array *tr)
50{ 191{
51 kmem_tracing_enabled = false; 192 kmemtrace_stop_probes();
52} 193}
53 194
54static void kmemtrace_headers(struct seq_file *s) 195static void kmemtrace_headers(struct seq_file *s)
@@ -260,63 +401,6 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
260 } 401 }
261} 402}
262 403
263/* Trace allocations */
264void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
265 unsigned long call_site,
266 const void *ptr,
267 size_t bytes_req,
268 size_t bytes_alloc,
269 gfp_t gfp_flags,
270 int node)
271{
272 struct ring_buffer_event *event;
273 struct kmemtrace_alloc_entry *entry;
274 struct trace_array *tr = kmemtrace_array;
275
276 if (!kmem_tracing_enabled)
277 return;
278
279 event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
280 sizeof(*entry), 0, 0);
281 if (!event)
282 return;
283 entry = ring_buffer_event_data(event);
284
285 entry->call_site = call_site;
286 entry->ptr = ptr;
287 entry->bytes_req = bytes_req;
288 entry->bytes_alloc = bytes_alloc;
289 entry->gfp_flags = gfp_flags;
290 entry->node = node;
291
292 trace_buffer_unlock_commit(tr, event, 0, 0);
293}
294EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
295
296void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
297 unsigned long call_site,
298 const void *ptr)
299{
300 struct ring_buffer_event *event;
301 struct kmemtrace_free_entry *entry;
302 struct trace_array *tr = kmemtrace_array;
303
304 if (!kmem_tracing_enabled)
305 return;
306
307 event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
308 sizeof(*entry), 0, 0);
309 if (!event)
310 return;
311 entry = ring_buffer_event_data(event);
312 entry->type_id = type_id;
313 entry->call_site = call_site;
314 entry->ptr = ptr;
315
316 trace_buffer_unlock_commit(tr, event, 0, 0);
317}
318EXPORT_SYMBOL(kmemtrace_mark_free);
319
320static struct tracer kmem_tracer __read_mostly = { 404static struct tracer kmem_tracer __read_mostly = {
321 .name = "kmemtrace", 405 .name = "kmemtrace",
322 .init = kmem_trace_init, 406 .init = kmem_trace_init,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cb0ce3fc36d3..cbc168f1e43d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -182,6 +182,12 @@ struct trace_power {
182 struct power_trace state_data; 182 struct power_trace state_data;
183}; 183};
184 184
185enum kmemtrace_type_id {
186 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
187 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
188 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
189};
190
185struct kmemtrace_alloc_entry { 191struct kmemtrace_alloc_entry {
186 struct trace_entry ent; 192 struct trace_entry ent;
187 enum kmemtrace_type_id type_id; 193 enum kmemtrace_type_id type_id;
diff --git a/mm/slab.c b/mm/slab.c
index 9ec66c3e6ee0..fa00fd6a644d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3565,8 +3565,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3565{ 3565{
3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567 3567
3568 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 3568 trace_kmem_cache_alloc(_RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags); 3569 obj_size(cachep), cachep->buffer_size, flags);
3570 3570
3571 return ret; 3571 return ret;
3572} 3572}
@@ -3627,9 +3627,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3627 void *ret = __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3628 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629 3629
3630 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 3630 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size, 3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid); 3632 flags, nodeid);
3633 3633
3634 return ret; 3634 return ret;
3635} 3635}
@@ -3657,9 +3657,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3657 return cachep; 3657 return cachep;
3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659 3659
3660 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 3660 trace_kmalloc_node((unsigned long) caller, ret,
3661 (unsigned long) caller, ret, 3661 size, cachep->buffer_size, flags, node);
3662 size, cachep->buffer_size, flags, node);
3663 3662
3664 return ret; 3663 return ret;
3665} 3664}
@@ -3709,9 +3708,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3709 return cachep; 3708 return cachep;
3710 ret = __cache_alloc(cachep, flags, caller); 3709 ret = __cache_alloc(cachep, flags, caller);
3711 3710
3712 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, 3711 trace_kmalloc((unsigned long) caller, ret,
3713 (unsigned long) caller, ret, 3712 size, cachep->buffer_size, flags);
3714 size, cachep->buffer_size, flags);
3715 3713
3716 return ret; 3714 return ret;
3717} 3715}
@@ -3757,7 +3755,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3757 __cache_free(cachep, objp); 3755 __cache_free(cachep, objp);
3758 local_irq_restore(flags); 3756 local_irq_restore(flags);
3759 3757
3760 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); 3758 trace_kmem_cache_free(_RET_IP_, objp);
3761} 3759}
3762EXPORT_SYMBOL(kmem_cache_free); 3760EXPORT_SYMBOL(kmem_cache_free);
3763 3761
@@ -3785,7 +3783,7 @@ void kfree(const void *objp)
3785 __cache_free(c, (void *)objp); 3783 __cache_free(c, (void *)objp);
3786 local_irq_restore(flags); 3784 local_irq_restore(flags);
3787 3785
3788 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); 3786 trace_kfree(_RET_IP_, objp);
3789} 3787}
3790EXPORT_SYMBOL(kfree); 3788EXPORT_SYMBOL(kfree);
3791 3789
diff --git a/mm/slob.c b/mm/slob.c
index 4dd6516447f2..00003587ebfa 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -490,9 +490,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
490 *m = size; 490 *m = size;
491 ret = (void *)m + align; 491 ret = (void *)m + align;
492 492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 493 trace_kmalloc_node(_RET_IP_, ret,
494 _RET_IP_, ret, 494 size, size + align, gfp, node);
495 size, size + align, gfp, node);
496 } else { 495 } else {
497 unsigned int order = get_order(size); 496 unsigned int order = get_order(size);
498 497
@@ -503,9 +502,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
503 page->private = size; 502 page->private = size;
504 } 503 }
505 504
506 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 505 trace_kmalloc_node(_RET_IP_, ret,
507 _RET_IP_, ret, 506 size, PAGE_SIZE << order, gfp, node);
508 size, PAGE_SIZE << order, gfp, node);
509 } 507 }
510 508
511 return ret; 509 return ret;
@@ -527,7 +525,7 @@ void kfree(const void *block)
527 } else 525 } else
528 put_page(&sp->page); 526 put_page(&sp->page);
529 527
530 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); 528 trace_kfree(_RET_IP_, block);
531} 529}
532EXPORT_SYMBOL(kfree); 530EXPORT_SYMBOL(kfree);
533 531
@@ -599,16 +597,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
599 597
600 if (c->size < PAGE_SIZE) { 598 if (c->size < PAGE_SIZE) {
601 b = slob_alloc(c->size, flags, c->align, node); 599 b = slob_alloc(c->size, flags, c->align, node);
602 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, 600 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
603 _RET_IP_, b, c->size, 601 SLOB_UNITS(c->size) * SLOB_UNIT,
604 SLOB_UNITS(c->size) * SLOB_UNIT, 602 flags, node);
605 flags, node);
606 } else { 603 } else {
607 b = slob_new_pages(flags, get_order(c->size), node); 604 b = slob_new_pages(flags, get_order(c->size), node);
608 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, 605 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
609 _RET_IP_, b, c->size, 606 PAGE_SIZE << get_order(c->size),
610 PAGE_SIZE << get_order(c->size), 607 flags, node);
611 flags, node);
612 } 608 }
613 609
614 if (c->ctor) 610 if (c->ctor)
@@ -646,7 +642,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
646 __kmem_cache_free(b, c->size); 642 __kmem_cache_free(b, c->size);
647 } 643 }
648 644
649 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); 645 trace_kmem_cache_free(_RET_IP_, b);
650} 646}
651EXPORT_SYMBOL(kmem_cache_free); 647EXPORT_SYMBOL(kmem_cache_free);
652 648
diff --git a/mm/slub.c b/mm/slub.c
index 7aaa121d0ea9..a98078bf738b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1621,8 +1621,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1621{ 1621{
1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623 1623
1624 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 1624 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1625 s->objsize, s->size, gfpflags);
1626 1625
1627 return ret; 1626 return ret;
1628} 1627}
@@ -1641,8 +1640,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1641{ 1640{
1642 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1641 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1643 1642
1644 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 1643 trace_kmem_cache_alloc_node(_RET_IP_, ret,
1645 s->objsize, s->size, gfpflags, node); 1644 s->objsize, s->size, gfpflags, node);
1646 1645
1647 return ret; 1646 return ret;
1648} 1647}
@@ -1767,7 +1766,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1767 1766
1768 slab_free(s, page, x, _RET_IP_); 1767 slab_free(s, page, x, _RET_IP_);
1769 1768
1770 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); 1769 trace_kmem_cache_free(_RET_IP_, x);
1771} 1770}
1772EXPORT_SYMBOL(kmem_cache_free); 1771EXPORT_SYMBOL(kmem_cache_free);
1773 1772
@@ -2702,8 +2701,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2702 2701
2703 ret = slab_alloc(s, flags, -1, _RET_IP_); 2702 ret = slab_alloc(s, flags, -1, _RET_IP_);
2704 2703
2705 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, 2704 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2706 size, s->size, flags);
2707 2705
2708 return ret; 2706 return ret;
2709} 2707}
@@ -2729,10 +2727,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2729 if (unlikely(size > SLUB_MAX_SIZE)) { 2727 if (unlikely(size > SLUB_MAX_SIZE)) {
2730 ret = kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2731 2729
2732 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 2730 trace_kmalloc_node(_RET_IP_, ret,
2733 _RET_IP_, ret, 2731 size, PAGE_SIZE << get_order(size),
2734 size, PAGE_SIZE << get_order(size), 2732 flags, node);
2735 flags, node);
2736 2733
2737 return ret; 2734 return ret;
2738 } 2735 }
@@ -2744,8 +2741,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2744 2741
2745 ret = slab_alloc(s, flags, node, _RET_IP_); 2742 ret = slab_alloc(s, flags, node, _RET_IP_);
2746 2743
2747 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, 2744 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2748 size, s->size, flags, node);
2749 2745
2750 return ret; 2746 return ret;
2751} 2747}
@@ -2807,7 +2803,7 @@ void kfree(const void *x)
2807 } 2803 }
2808 slab_free(page->slab, page, object, _RET_IP_); 2804 slab_free(page->slab, page, object, _RET_IP_);
2809 2805
2810 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); 2806 trace_kfree(_RET_IP_, x);
2811} 2807}
2812EXPORT_SYMBOL(kfree); 2808EXPORT_SYMBOL(kfree);
2813 2809
@@ -3290,8 +3286,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3290 ret = slab_alloc(s, gfpflags, -1, caller); 3286 ret = slab_alloc(s, gfpflags, -1, caller);
3291 3287
3292 /* Honor the call site pointer we recieved. */ 3288 /* Honor the call site pointer we recieved. */
3293 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, 3289 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3294 s->size, gfpflags);
3295 3290
3296 return ret; 3291 return ret;
3297} 3292}
@@ -3313,8 +3308,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3313 ret = slab_alloc(s, gfpflags, node, caller); 3308 ret = slab_alloc(s, gfpflags, node, caller);
3314 3309
3315 /* Honor the call site pointer we recieved. */ 3310 /* Honor the call site pointer we recieved. */
3316 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, 3311 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3317 size, s->size, gfpflags, node);
3318 3312
3319 return ret; 3313 return ret;
3320} 3314}
diff --git a/mm/util.c b/mm/util.c
index 7c122e49f769..2599e83eea17 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/tracepoint.h>
7#include <asm/uaccess.h> 8#include <asm/uaccess.h>
8 9
9/** 10/**
@@ -236,3 +237,18 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
236 return ret; 237 return ret;
237} 238}
238EXPORT_SYMBOL_GPL(get_user_pages_fast); 239EXPORT_SYMBOL_GPL(get_user_pages_fast);
240
241/* Tracepoints definitions. */
242DEFINE_TRACE(kmalloc);
243DEFINE_TRACE(kmem_cache_alloc);
244DEFINE_TRACE(kmalloc_node);
245DEFINE_TRACE(kmem_cache_alloc_node);
246DEFINE_TRACE(kfree);
247DEFINE_TRACE(kmem_cache_free);
248
249EXPORT_TRACEPOINT_SYMBOL(kmalloc);
250EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
251EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
252EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
253EXPORT_TRACEPOINT_SYMBOL(kfree);
254EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);