diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-29 09:16:24 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-29 09:16:24 -0500 |
commit | 2ff9f9d9629bf9530fe2ab8d803d612761ffc059 (patch) | |
tree | b22e3fddffbc0f58b1e1974f4819896d58b7bdaf /mm | |
parent | 0f01f07fad4ee11d98fe6faa442afbeb0328a378 (diff) | |
parent | a4900437f3d76761a1646cd90254ccb01714a9ed (diff) |
Merge branch 'topic/kmemtrace' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6 into tracing/kmemtrace
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Makefile | 1 | ||||
-rw-r--r-- | mm/kmemtrace.c | 333 | ||||
-rw-r--r-- | mm/slab.c | 79 | ||||
-rw-r--r-- | mm/slob.c | 37 | ||||
-rw-r--r-- | mm/slub.c | 123 |
5 files changed, 528 insertions, 45 deletions
diff --git a/mm/Makefile b/mm/Makefile index c06b45a1ff5f..3782eb66d4b3 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -34,3 +34,4 @@ obj-$(CONFIG_MIGRATION) += migrate.o | |||
34 | obj-$(CONFIG_SMP) += allocpercpu.o | 34 | obj-$(CONFIG_SMP) += allocpercpu.o |
35 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 35 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
36 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o | 36 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o |
37 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c new file mode 100644 index 000000000000..2a70a805027c --- /dev/null +++ b/mm/kmemtrace.c | |||
@@ -0,0 +1,333 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Pekka Enberg, Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/debugfs.h> | ||
9 | #include <linux/relay.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/marker.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/kmemtrace.h> | ||
14 | |||
15 | #define KMEMTRACE_SUBBUF_SIZE 524288 | ||
16 | #define KMEMTRACE_DEF_N_SUBBUFS 20 | ||
17 | |||
18 | static struct rchan *kmemtrace_chan; | ||
19 | static u32 kmemtrace_buf_overruns; | ||
20 | |||
21 | static unsigned int kmemtrace_n_subbufs; | ||
22 | |||
23 | /* disabled by default */ | ||
24 | static unsigned int kmemtrace_enabled; | ||
25 | |||
26 | /* | ||
27 | * The sequence number is used for reordering kmemtrace packets | ||
28 | * in userspace, since they are logged as per-CPU data. | ||
29 | * | ||
30 | * atomic_t should always be a 32-bit signed integer. Wraparound is not | ||
31 | * likely to occur, but userspace can deal with it by expecting a certain | ||
32 | * sequence number in the next packet that will be read. | ||
33 | */ | ||
34 | static atomic_t kmemtrace_seq_num; | ||
35 | |||
36 | #define KMEMTRACE_ABI_VERSION 1 | ||
37 | |||
38 | static u32 kmemtrace_abi_version __read_mostly = KMEMTRACE_ABI_VERSION; | ||
39 | |||
40 | enum kmemtrace_event_id { | ||
41 | KMEMTRACE_EVENT_ALLOC = 0, | ||
42 | KMEMTRACE_EVENT_FREE, | ||
43 | }; | ||
44 | |||
45 | struct kmemtrace_event { | ||
46 | u8 event_id; | ||
47 | u8 type_id; | ||
48 | u16 event_size; | ||
49 | s32 seq_num; | ||
50 | u64 call_site; | ||
51 | u64 ptr; | ||
52 | } __attribute__ ((__packed__)); | ||
53 | |||
54 | struct kmemtrace_stats_alloc { | ||
55 | u64 bytes_req; | ||
56 | u64 bytes_alloc; | ||
57 | u32 gfp_flags; | ||
58 | s32 numa_node; | ||
59 | } __attribute__ ((__packed__)); | ||
60 | |||
61 | static void kmemtrace_probe_alloc(void *probe_data, void *call_data, | ||
62 | const char *format, va_list *args) | ||
63 | { | ||
64 | unsigned long flags; | ||
65 | struct kmemtrace_event *ev; | ||
66 | struct kmemtrace_stats_alloc *stats; | ||
67 | void *buf; | ||
68 | |||
69 | local_irq_save(flags); | ||
70 | |||
71 | buf = relay_reserve(kmemtrace_chan, | ||
72 | sizeof(struct kmemtrace_event) + | ||
73 | sizeof(struct kmemtrace_stats_alloc)); | ||
74 | if (!buf) | ||
75 | goto failed; | ||
76 | |||
77 | /* | ||
78 | * Don't convert this to use structure initializers, | ||
79 | * C99 does not guarantee the rvalues evaluation order. | ||
80 | */ | ||
81 | |||
82 | ev = buf; | ||
83 | ev->event_id = KMEMTRACE_EVENT_ALLOC; | ||
84 | ev->type_id = va_arg(*args, int); | ||
85 | ev->event_size = sizeof(struct kmemtrace_event) + | ||
86 | sizeof(struct kmemtrace_stats_alloc); | ||
87 | ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num); | ||
88 | ev->call_site = va_arg(*args, unsigned long); | ||
89 | ev->ptr = va_arg(*args, unsigned long); | ||
90 | |||
91 | stats = buf + sizeof(struct kmemtrace_event); | ||
92 | stats->bytes_req = va_arg(*args, unsigned long); | ||
93 | stats->bytes_alloc = va_arg(*args, unsigned long); | ||
94 | stats->gfp_flags = va_arg(*args, unsigned long); | ||
95 | stats->numa_node = va_arg(*args, int); | ||
96 | |||
97 | failed: | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | |||
101 | static void kmemtrace_probe_free(void *probe_data, void *call_data, | ||
102 | const char *format, va_list *args) | ||
103 | { | ||
104 | unsigned long flags; | ||
105 | struct kmemtrace_event *ev; | ||
106 | |||
107 | local_irq_save(flags); | ||
108 | |||
109 | ev = relay_reserve(kmemtrace_chan, sizeof(struct kmemtrace_event)); | ||
110 | if (!ev) | ||
111 | goto failed; | ||
112 | |||
113 | /* | ||
114 | * Don't convert this to use structure initializers, | ||
115 | * C99 does not guarantee the rvalues evaluation order. | ||
116 | */ | ||
117 | ev->event_id = KMEMTRACE_EVENT_FREE; | ||
118 | ev->type_id = va_arg(*args, int); | ||
119 | ev->event_size = sizeof(struct kmemtrace_event); | ||
120 | ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num); | ||
121 | ev->call_site = va_arg(*args, unsigned long); | ||
122 | ev->ptr = va_arg(*args, unsigned long); | ||
123 | |||
124 | failed: | ||
125 | local_irq_restore(flags); | ||
126 | } | ||
127 | |||
128 | static struct dentry * | ||
129 | kmemtrace_create_buf_file(const char *filename, struct dentry *parent, | ||
130 | int mode, struct rchan_buf *buf, int *is_global) | ||
131 | { | ||
132 | return debugfs_create_file(filename, mode, parent, buf, | ||
133 | &relay_file_operations); | ||
134 | } | ||
135 | |||
136 | static int kmemtrace_remove_buf_file(struct dentry *dentry) | ||
137 | { | ||
138 | debugfs_remove(dentry); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int kmemtrace_subbuf_start(struct rchan_buf *buf, | ||
144 | void *subbuf, | ||
145 | void *prev_subbuf, | ||
146 | size_t prev_padding) | ||
147 | { | ||
148 | if (relay_buf_full(buf)) { | ||
149 | /* | ||
150 | * We know it's not SMP-safe, but neither | ||
151 | * debugfs_create_u32() is. | ||
152 | */ | ||
153 | kmemtrace_buf_overruns++; | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | static struct rchan_callbacks relay_callbacks = { | ||
161 | .create_buf_file = kmemtrace_create_buf_file, | ||
162 | .remove_buf_file = kmemtrace_remove_buf_file, | ||
163 | .subbuf_start = kmemtrace_subbuf_start, | ||
164 | }; | ||
165 | |||
166 | static struct dentry *kmemtrace_dir; | ||
167 | static struct dentry *kmemtrace_overruns_dentry; | ||
168 | static struct dentry *kmemtrace_abi_version_dentry; | ||
169 | |||
170 | static struct dentry *kmemtrace_enabled_dentry; | ||
171 | |||
172 | static int kmemtrace_start_probes(void) | ||
173 | { | ||
174 | int err; | ||
175 | |||
176 | err = marker_probe_register("kmemtrace_alloc", "type_id %d " | ||
177 | "call_site %lu ptr %lu " | ||
178 | "bytes_req %lu bytes_alloc %lu " | ||
179 | "gfp_flags %lu node %d", | ||
180 | kmemtrace_probe_alloc, NULL); | ||
181 | if (err) | ||
182 | return err; | ||
183 | err = marker_probe_register("kmemtrace_free", "type_id %d " | ||
184 | "call_site %lu ptr %lu", | ||
185 | kmemtrace_probe_free, NULL); | ||
186 | |||
187 | return err; | ||
188 | } | ||
189 | |||
190 | static void kmemtrace_stop_probes(void) | ||
191 | { | ||
192 | marker_probe_unregister("kmemtrace_alloc", | ||
193 | kmemtrace_probe_alloc, NULL); | ||
194 | marker_probe_unregister("kmemtrace_free", | ||
195 | kmemtrace_probe_free, NULL); | ||
196 | } | ||
197 | |||
198 | static int kmemtrace_enabled_get(void *data, u64 *val) | ||
199 | { | ||
200 | *val = *((int *) data); | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int kmemtrace_enabled_set(void *data, u64 val) | ||
206 | { | ||
207 | u64 old_val = kmemtrace_enabled; | ||
208 | |||
209 | *((int *) data) = !!val; | ||
210 | |||
211 | if (old_val == val) | ||
212 | return 0; | ||
213 | if (val) | ||
214 | kmemtrace_start_probes(); | ||
215 | else | ||
216 | kmemtrace_stop_probes(); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | DEFINE_SIMPLE_ATTRIBUTE(kmemtrace_enabled_fops, | ||
222 | kmemtrace_enabled_get, | ||
223 | kmemtrace_enabled_set, "%llu\n"); | ||
224 | |||
225 | static void kmemtrace_cleanup(void) | ||
226 | { | ||
227 | if (kmemtrace_enabled_dentry) | ||
228 | debugfs_remove(kmemtrace_enabled_dentry); | ||
229 | |||
230 | kmemtrace_stop_probes(); | ||
231 | |||
232 | if (kmemtrace_abi_version_dentry) | ||
233 | debugfs_remove(kmemtrace_abi_version_dentry); | ||
234 | if (kmemtrace_overruns_dentry) | ||
235 | debugfs_remove(kmemtrace_overruns_dentry); | ||
236 | |||
237 | relay_close(kmemtrace_chan); | ||
238 | kmemtrace_chan = NULL; | ||
239 | |||
240 | if (kmemtrace_dir) | ||
241 | debugfs_remove(kmemtrace_dir); | ||
242 | } | ||
243 | |||
244 | static int __init kmemtrace_setup_late(void) | ||
245 | { | ||
246 | if (!kmemtrace_chan) | ||
247 | goto failed; | ||
248 | |||
249 | kmemtrace_dir = debugfs_create_dir("kmemtrace", NULL); | ||
250 | if (!kmemtrace_dir) | ||
251 | goto cleanup; | ||
252 | |||
253 | kmemtrace_abi_version_dentry = | ||
254 | debugfs_create_u32("abi_version", S_IRUSR, | ||
255 | kmemtrace_dir, &kmemtrace_abi_version); | ||
256 | kmemtrace_overruns_dentry = | ||
257 | debugfs_create_u32("total_overruns", S_IRUSR, | ||
258 | kmemtrace_dir, &kmemtrace_buf_overruns); | ||
259 | if (!kmemtrace_overruns_dentry || !kmemtrace_abi_version_dentry) | ||
260 | goto cleanup; | ||
261 | |||
262 | kmemtrace_enabled_dentry = | ||
263 | debugfs_create_file("enabled", S_IRUSR | S_IWUSR, | ||
264 | kmemtrace_dir, &kmemtrace_enabled, | ||
265 | &kmemtrace_enabled_fops); | ||
266 | if (!kmemtrace_enabled_dentry) | ||
267 | goto cleanup; | ||
268 | |||
269 | if (relay_late_setup_files(kmemtrace_chan, "cpu", kmemtrace_dir)) | ||
270 | goto cleanup; | ||
271 | |||
272 | printk(KERN_INFO "kmemtrace: fully up.\n"); | ||
273 | |||
274 | return 0; | ||
275 | |||
276 | cleanup: | ||
277 | kmemtrace_cleanup(); | ||
278 | failed: | ||
279 | return 1; | ||
280 | } | ||
281 | late_initcall(kmemtrace_setup_late); | ||
282 | |||
283 | static int __init kmemtrace_set_boot_enabled(char *str) | ||
284 | { | ||
285 | if (!str) | ||
286 | return -EINVAL; | ||
287 | |||
288 | if (!strcmp(str, "yes")) | ||
289 | kmemtrace_enabled = 1; | ||
290 | else if (!strcmp(str, "no")) | ||
291 | kmemtrace_enabled = 0; | ||
292 | else | ||
293 | return -EINVAL; | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | early_param("kmemtrace.enable", kmemtrace_set_boot_enabled); | ||
298 | |||
299 | static int __init kmemtrace_set_subbufs(char *str) | ||
300 | { | ||
301 | get_option(&str, &kmemtrace_n_subbufs); | ||
302 | return 0; | ||
303 | } | ||
304 | early_param("kmemtrace.subbufs", kmemtrace_set_subbufs); | ||
305 | |||
306 | void kmemtrace_init(void) | ||
307 | { | ||
308 | if (!kmemtrace_n_subbufs) | ||
309 | kmemtrace_n_subbufs = KMEMTRACE_DEF_N_SUBBUFS; | ||
310 | |||
311 | kmemtrace_chan = relay_open(NULL, NULL, KMEMTRACE_SUBBUF_SIZE, | ||
312 | kmemtrace_n_subbufs, &relay_callbacks, | ||
313 | NULL); | ||
314 | if (!kmemtrace_chan) { | ||
315 | printk(KERN_ERR "kmemtrace: could not open relay channel.\n"); | ||
316 | return; | ||
317 | } | ||
318 | |||
319 | if (!kmemtrace_enabled) { | ||
320 | printk(KERN_INFO "kmemtrace: disabled. Pass " | ||
321 | "kemtrace.enable=yes as kernel parameter for " | ||
322 | "boot-time tracing.\n"); | ||
323 | return; | ||
324 | } | ||
325 | if (kmemtrace_start_probes()) { | ||
326 | printk(KERN_ERR "kmemtrace: could not register marker probes!\n"); | ||
327 | kmemtrace_cleanup(); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | printk(KERN_INFO "kmemtrace: enabled.\n"); | ||
332 | } | ||
333 | |||
@@ -112,6 +112,7 @@ | |||
112 | #include <linux/rtmutex.h> | 112 | #include <linux/rtmutex.h> |
113 | #include <linux/reciprocal_div.h> | 113 | #include <linux/reciprocal_div.h> |
114 | #include <linux/debugobjects.h> | 114 | #include <linux/debugobjects.h> |
115 | #include <linux/kmemtrace.h> | ||
115 | 116 | ||
116 | #include <asm/cacheflush.h> | 117 | #include <asm/cacheflush.h> |
117 | #include <asm/tlbflush.h> | 118 | #include <asm/tlbflush.h> |
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
568 | 569 | ||
569 | #endif | 570 | #endif |
570 | 571 | ||
572 | #ifdef CONFIG_KMEMTRACE | ||
573 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
574 | { | ||
575 | return cachep->buffer_size; | ||
576 | } | ||
577 | EXPORT_SYMBOL(slab_buffer_size); | ||
578 | #endif | ||
579 | |||
571 | /* | 580 | /* |
572 | * Do not go above this order unless 0 objects fit into the slab. | 581 | * Do not go above this order unless 0 objects fit into the slab. |
573 | */ | 582 | */ |
@@ -3613,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3613 | */ | 3622 | */ |
3614 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3623 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3615 | { | 3624 | { |
3616 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3625 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3626 | |||
3627 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3628 | obj_size(cachep), cachep->buffer_size, flags); | ||
3629 | |||
3630 | return ret; | ||
3617 | } | 3631 | } |
3618 | EXPORT_SYMBOL(kmem_cache_alloc); | 3632 | EXPORT_SYMBOL(kmem_cache_alloc); |
3619 | 3633 | ||
3634 | #ifdef CONFIG_KMEMTRACE | ||
3635 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
3636 | { | ||
3637 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
3638 | } | ||
3639 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
3640 | #endif | ||
3641 | |||
3620 | /** | 3642 | /** |
3621 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | 3643 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
3622 | * @cachep: the cache we're checking against | 3644 | * @cachep: the cache we're checking against |
@@ -3661,23 +3683,47 @@ out: | |||
3661 | #ifdef CONFIG_NUMA | 3683 | #ifdef CONFIG_NUMA |
3662 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3684 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3663 | { | 3685 | { |
3664 | return __cache_alloc_node(cachep, flags, nodeid, | 3686 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
3665 | __builtin_return_address(0)); | 3687 | __builtin_return_address(0)); |
3688 | |||
3689 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3690 | obj_size(cachep), cachep->buffer_size, | ||
3691 | flags, nodeid); | ||
3692 | |||
3693 | return ret; | ||
3666 | } | 3694 | } |
3667 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3695 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3668 | 3696 | ||
3697 | #ifdef CONFIG_KMEMTRACE | ||
3698 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
3699 | gfp_t flags, | ||
3700 | int nodeid) | ||
3701 | { | ||
3702 | return __cache_alloc_node(cachep, flags, nodeid, | ||
3703 | __builtin_return_address(0)); | ||
3704 | } | ||
3705 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
3706 | #endif | ||
3707 | |||
3669 | static __always_inline void * | 3708 | static __always_inline void * |
3670 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3709 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3671 | { | 3710 | { |
3672 | struct kmem_cache *cachep; | 3711 | struct kmem_cache *cachep; |
3712 | void *ret; | ||
3673 | 3713 | ||
3674 | cachep = kmem_find_general_cachep(size, flags); | 3714 | cachep = kmem_find_general_cachep(size, flags); |
3675 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3715 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3676 | return cachep; | 3716 | return cachep; |
3677 | return kmem_cache_alloc_node(cachep, flags, node); | 3717 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
3718 | |||
3719 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
3720 | (unsigned long) caller, ret, | ||
3721 | size, cachep->buffer_size, flags, node); | ||
3722 | |||
3723 | return ret; | ||
3678 | } | 3724 | } |
3679 | 3725 | ||
3680 | #ifdef CONFIG_DEBUG_SLAB | 3726 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3681 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3727 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
3682 | { | 3728 | { |
3683 | return __do_kmalloc_node(size, flags, node, | 3729 | return __do_kmalloc_node(size, flags, node, |
@@ -3686,9 +3732,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3686 | EXPORT_SYMBOL(__kmalloc_node); | 3732 | EXPORT_SYMBOL(__kmalloc_node); |
3687 | 3733 | ||
3688 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3734 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
3689 | int node, void *caller) | 3735 | int node, unsigned long caller) |
3690 | { | 3736 | { |
3691 | return __do_kmalloc_node(size, flags, node, caller); | 3737 | return __do_kmalloc_node(size, flags, node, (void *)caller); |
3692 | } | 3738 | } |
3693 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3739 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
3694 | #else | 3740 | #else |
@@ -3710,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3710 | void *caller) | 3756 | void *caller) |
3711 | { | 3757 | { |
3712 | struct kmem_cache *cachep; | 3758 | struct kmem_cache *cachep; |
3759 | void *ret; | ||
3713 | 3760 | ||
3714 | /* If you want to save a few bytes .text space: replace | 3761 | /* If you want to save a few bytes .text space: replace |
3715 | * __ with kmem_. | 3762 | * __ with kmem_. |
@@ -3719,20 +3766,26 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3719 | cachep = __find_general_cachep(size, flags); | 3766 | cachep = __find_general_cachep(size, flags); |
3720 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3767 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3721 | return cachep; | 3768 | return cachep; |
3722 | return __cache_alloc(cachep, flags, caller); | 3769 | ret = __cache_alloc(cachep, flags, caller); |
3770 | |||
3771 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
3772 | (unsigned long) caller, ret, | ||
3773 | size, cachep->buffer_size, flags); | ||
3774 | |||
3775 | return ret; | ||
3723 | } | 3776 | } |
3724 | 3777 | ||
3725 | 3778 | ||
3726 | #ifdef CONFIG_DEBUG_SLAB | 3779 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3727 | void *__kmalloc(size_t size, gfp_t flags) | 3780 | void *__kmalloc(size_t size, gfp_t flags) |
3728 | { | 3781 | { |
3729 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3782 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
3730 | } | 3783 | } |
3731 | EXPORT_SYMBOL(__kmalloc); | 3784 | EXPORT_SYMBOL(__kmalloc); |
3732 | 3785 | ||
3733 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3786 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
3734 | { | 3787 | { |
3735 | return __do_kmalloc(size, flags, caller); | 3788 | return __do_kmalloc(size, flags, (void *)caller); |
3736 | } | 3789 | } |
3737 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3790 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3738 | 3791 | ||
@@ -3762,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3762 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3815 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
3763 | __cache_free(cachep, objp); | 3816 | __cache_free(cachep, objp); |
3764 | local_irq_restore(flags); | 3817 | local_irq_restore(flags); |
3818 | |||
3819 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | ||
3765 | } | 3820 | } |
3766 | EXPORT_SYMBOL(kmem_cache_free); | 3821 | EXPORT_SYMBOL(kmem_cache_free); |
3767 | 3822 | ||
@@ -3788,6 +3843,8 @@ void kfree(const void *objp) | |||
3788 | debug_check_no_obj_freed(objp, obj_size(c)); | 3843 | debug_check_no_obj_freed(objp, obj_size(c)); |
3789 | __cache_free(c, (void *)objp); | 3844 | __cache_free(c, (void *)objp); |
3790 | local_irq_restore(flags); | 3845 | local_irq_restore(flags); |
3846 | |||
3847 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
3791 | } | 3848 | } |
3792 | EXPORT_SYMBOL(kfree); | 3849 | EXPORT_SYMBOL(kfree); |
3793 | 3850 | ||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
68 | #include <linux/kmemtrace.h> | ||
68 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
69 | 70 | ||
70 | /* | 71 | /* |
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
463 | { | 464 | { |
464 | unsigned int *m; | 465 | unsigned int *m; |
465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 466 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
467 | void *ret; | ||
466 | 468 | ||
467 | if (size < PAGE_SIZE - align) { | 469 | if (size < PAGE_SIZE - align) { |
468 | if (!size) | 470 | if (!size) |
469 | return ZERO_SIZE_PTR; | 471 | return ZERO_SIZE_PTR; |
470 | 472 | ||
471 | m = slob_alloc(size + align, gfp, align, node); | 473 | m = slob_alloc(size + align, gfp, align, node); |
474 | |||
472 | if (!m) | 475 | if (!m) |
473 | return NULL; | 476 | return NULL; |
474 | *m = size; | 477 | *m = size; |
475 | return (void *)m + align; | 478 | ret = (void *)m + align; |
479 | |||
480 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
481 | _RET_IP_, ret, | ||
482 | size, size + align, gfp, node); | ||
476 | } else { | 483 | } else { |
477 | void *ret; | 484 | unsigned int order = get_order(size); |
478 | 485 | ||
479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 486 | ret = slob_new_page(gfp | __GFP_COMP, order, node); |
480 | if (ret) { | 487 | if (ret) { |
481 | struct page *page; | 488 | struct page *page; |
482 | page = virt_to_page(ret); | 489 | page = virt_to_page(ret); |
483 | page->private = size; | 490 | page->private = size; |
484 | } | 491 | } |
485 | return ret; | 492 | |
493 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
494 | _RET_IP_, ret, | ||
495 | size, PAGE_SIZE << order, gfp, node); | ||
486 | } | 496 | } |
497 | |||
498 | return ret; | ||
487 | } | 499 | } |
488 | EXPORT_SYMBOL(__kmalloc_node); | 500 | EXPORT_SYMBOL(__kmalloc_node); |
489 | 501 | ||
@@ -501,6 +513,8 @@ void kfree(const void *block) | |||
501 | slob_free(m, *m + align); | 513 | slob_free(m, *m + align); |
502 | } else | 514 | } else |
503 | put_page(&sp->page); | 515 | put_page(&sp->page); |
516 | |||
517 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); | ||
504 | } | 518 | } |
505 | EXPORT_SYMBOL(kfree); | 519 | EXPORT_SYMBOL(kfree); |
506 | 520 | ||
@@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
569 | { | 583 | { |
570 | void *b; | 584 | void *b; |
571 | 585 | ||
572 | if (c->size < PAGE_SIZE) | 586 | if (c->size < PAGE_SIZE) { |
573 | b = slob_alloc(c->size, flags, c->align, node); | 587 | b = slob_alloc(c->size, flags, c->align, node); |
574 | else | 588 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, |
589 | _RET_IP_, b, c->size, | ||
590 | SLOB_UNITS(c->size) * SLOB_UNIT, | ||
591 | flags, node); | ||
592 | } else { | ||
575 | b = slob_new_page(flags, get_order(c->size), node); | 593 | b = slob_new_page(flags, get_order(c->size), node); |
594 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | ||
595 | _RET_IP_, b, c->size, | ||
596 | PAGE_SIZE << get_order(c->size), | ||
597 | flags, node); | ||
598 | } | ||
576 | 599 | ||
577 | if (c->ctor) | 600 | if (c->ctor) |
578 | c->ctor(b); | 601 | c->ctor(b); |
@@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
608 | } else { | 631 | } else { |
609 | __kmem_cache_free(b, c->size); | 632 | __kmem_cache_free(b, c->size); |
610 | } | 633 | } |
634 | |||
635 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); | ||
611 | } | 636 | } |
612 | EXPORT_SYMBOL(kmem_cache_free); | 637 | EXPORT_SYMBOL(kmem_cache_free); |
613 | 638 | ||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include <linux/memory.h> | 25 | #include <linux/memory.h> |
26 | #include <linux/math64.h> | 26 | #include <linux/math64.h> |
27 | #include <linux/kmemtrace.h> | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Lock order: | 30 | * Lock order: |
@@ -178,7 +179,7 @@ static LIST_HEAD(slab_caches); | |||
178 | * Tracking user of a slab. | 179 | * Tracking user of a slab. |
179 | */ | 180 | */ |
180 | struct track { | 181 | struct track { |
181 | void *addr; /* Called from address */ | 182 | unsigned long addr; /* Called from address */ |
182 | int cpu; /* Was running on cpu */ | 183 | int cpu; /* Was running on cpu */ |
183 | int pid; /* Pid context */ | 184 | int pid; /* Pid context */ |
184 | unsigned long when; /* When did the operation occur */ | 185 | unsigned long when; /* When did the operation occur */ |
@@ -367,7 +368,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
367 | } | 368 | } |
368 | 369 | ||
369 | static void set_track(struct kmem_cache *s, void *object, | 370 | static void set_track(struct kmem_cache *s, void *object, |
370 | enum track_item alloc, void *addr) | 371 | enum track_item alloc, unsigned long addr) |
371 | { | 372 | { |
372 | struct track *p; | 373 | struct track *p; |
373 | 374 | ||
@@ -391,8 +392,8 @@ static void init_tracking(struct kmem_cache *s, void *object) | |||
391 | if (!(s->flags & SLAB_STORE_USER)) | 392 | if (!(s->flags & SLAB_STORE_USER)) |
392 | return; | 393 | return; |
393 | 394 | ||
394 | set_track(s, object, TRACK_FREE, NULL); | 395 | set_track(s, object, TRACK_FREE, 0UL); |
395 | set_track(s, object, TRACK_ALLOC, NULL); | 396 | set_track(s, object, TRACK_ALLOC, 0UL); |
396 | } | 397 | } |
397 | 398 | ||
398 | static void print_track(const char *s, struct track *t) | 399 | static void print_track(const char *s, struct track *t) |
@@ -401,7 +402,7 @@ static void print_track(const char *s, struct track *t) | |||
401 | return; | 402 | return; |
402 | 403 | ||
403 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", | 404 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
404 | s, t->addr, jiffies - t->when, t->cpu, t->pid); | 405 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
405 | } | 406 | } |
406 | 407 | ||
407 | static void print_tracking(struct kmem_cache *s, void *object) | 408 | static void print_tracking(struct kmem_cache *s, void *object) |
@@ -866,7 +867,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
866 | } | 867 | } |
867 | 868 | ||
868 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 869 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
869 | void *object, void *addr) | 870 | void *object, unsigned long addr) |
870 | { | 871 | { |
871 | if (!check_slab(s, page)) | 872 | if (!check_slab(s, page)) |
872 | goto bad; | 873 | goto bad; |
@@ -906,7 +907,7 @@ bad: | |||
906 | } | 907 | } |
907 | 908 | ||
908 | static int free_debug_processing(struct kmem_cache *s, struct page *page, | 909 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
909 | void *object, void *addr) | 910 | void *object, unsigned long addr) |
910 | { | 911 | { |
911 | if (!check_slab(s, page)) | 912 | if (!check_slab(s, page)) |
912 | goto fail; | 913 | goto fail; |
@@ -1029,10 +1030,10 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
1029 | struct page *page, void *object) {} | 1030 | struct page *page, void *object) {} |
1030 | 1031 | ||
1031 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1032 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1032 | struct page *page, void *object, void *addr) { return 0; } | 1033 | struct page *page, void *object, unsigned long addr) { return 0; } |
1033 | 1034 | ||
1034 | static inline int free_debug_processing(struct kmem_cache *s, | 1035 | static inline int free_debug_processing(struct kmem_cache *s, |
1035 | struct page *page, void *object, void *addr) { return 0; } | 1036 | struct page *page, void *object, unsigned long addr) { return 0; } |
1036 | 1037 | ||
1037 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1038 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1038 | { return 1; } | 1039 | { return 1; } |
@@ -1499,8 +1500,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1499 | * we need to allocate a new slab. This is the slowest path since it involves | 1500 | * we need to allocate a new slab. This is the slowest path since it involves |
1500 | * a call to the page allocator and the setup of a new slab. | 1501 | * a call to the page allocator and the setup of a new slab. |
1501 | */ | 1502 | */ |
1502 | static void *__slab_alloc(struct kmem_cache *s, | 1503 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
1503 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) | 1504 | unsigned long addr, struct kmem_cache_cpu *c) |
1504 | { | 1505 | { |
1505 | void **object; | 1506 | void **object; |
1506 | struct page *new; | 1507 | struct page *new; |
@@ -1584,7 +1585,7 @@ debug: | |||
1584 | * Otherwise we can simply pick the next object from the lockless free list. | 1585 | * Otherwise we can simply pick the next object from the lockless free list. |
1585 | */ | 1586 | */ |
1586 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 1587 | static __always_inline void *slab_alloc(struct kmem_cache *s, |
1587 | gfp_t gfpflags, int node, void *addr) | 1588 | gfp_t gfpflags, int node, unsigned long addr) |
1588 | { | 1589 | { |
1589 | void **object; | 1590 | void **object; |
1590 | struct kmem_cache_cpu *c; | 1591 | struct kmem_cache_cpu *c; |
@@ -1613,18 +1614,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1613 | 1614 | ||
1614 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1615 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1615 | { | 1616 | { |
1616 | return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); | 1617 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1618 | |||
1619 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1620 | s->objsize, s->size, gfpflags); | ||
1621 | |||
1622 | return ret; | ||
1617 | } | 1623 | } |
1618 | EXPORT_SYMBOL(kmem_cache_alloc); | 1624 | EXPORT_SYMBOL(kmem_cache_alloc); |
1619 | 1625 | ||
1626 | #ifdef CONFIG_KMEMTRACE | ||
1627 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1628 | { | ||
1629 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1630 | } | ||
1631 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1632 | #endif | ||
1633 | |||
1620 | #ifdef CONFIG_NUMA | 1634 | #ifdef CONFIG_NUMA |
1621 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1635 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1622 | { | 1636 | { |
1623 | return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); | 1637 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1638 | |||
1639 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1640 | s->objsize, s->size, gfpflags, node); | ||
1641 | |||
1642 | return ret; | ||
1624 | } | 1643 | } |
1625 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1644 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1626 | #endif | 1645 | #endif |
1627 | 1646 | ||
1647 | #ifdef CONFIG_KMEMTRACE | ||
1648 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1649 | gfp_t gfpflags, | ||
1650 | int node) | ||
1651 | { | ||
1652 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1653 | } | ||
1654 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1655 | #endif | ||
1656 | |||
1628 | /* | 1657 | /* |
1629 | * Slow patch handling. This may still be called frequently since objects | 1658 | * Slow patch handling. This may still be called frequently since objects |
1630 | * have a longer lifetime than the cpu slabs in most processing loads. | 1659 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1634,7 +1663,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); | |||
1634 | * handling required then we can return immediately. | 1663 | * handling required then we can return immediately. |
1635 | */ | 1664 | */ |
1636 | static void __slab_free(struct kmem_cache *s, struct page *page, | 1665 | static void __slab_free(struct kmem_cache *s, struct page *page, |
1637 | void *x, void *addr, unsigned int offset) | 1666 | void *x, unsigned long addr, unsigned int offset) |
1638 | { | 1667 | { |
1639 | void *prior; | 1668 | void *prior; |
1640 | void **object = (void *)x; | 1669 | void **object = (void *)x; |
@@ -1704,7 +1733,7 @@ debug: | |||
1704 | * with all sorts of special processing. | 1733 | * with all sorts of special processing. |
1705 | */ | 1734 | */ |
1706 | static __always_inline void slab_free(struct kmem_cache *s, | 1735 | static __always_inline void slab_free(struct kmem_cache *s, |
1707 | struct page *page, void *x, void *addr) | 1736 | struct page *page, void *x, unsigned long addr) |
1708 | { | 1737 | { |
1709 | void **object = (void *)x; | 1738 | void **object = (void *)x; |
1710 | struct kmem_cache_cpu *c; | 1739 | struct kmem_cache_cpu *c; |
@@ -1731,7 +1760,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1731 | 1760 | ||
1732 | page = virt_to_head_page(x); | 1761 | page = virt_to_head_page(x); |
1733 | 1762 | ||
1734 | slab_free(s, page, x, __builtin_return_address(0)); | 1763 | slab_free(s, page, x, _RET_IP_); |
1764 | |||
1765 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1735 | } | 1766 | } |
1736 | EXPORT_SYMBOL(kmem_cache_free); | 1767 | EXPORT_SYMBOL(kmem_cache_free); |
1737 | 1768 | ||
@@ -2650,6 +2681,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2650 | void *__kmalloc(size_t size, gfp_t flags) | 2681 | void *__kmalloc(size_t size, gfp_t flags) |
2651 | { | 2682 | { |
2652 | struct kmem_cache *s; | 2683 | struct kmem_cache *s; |
2684 | void *ret; | ||
2653 | 2685 | ||
2654 | if (unlikely(size > PAGE_SIZE)) | 2686 | if (unlikely(size > PAGE_SIZE)) |
2655 | return kmalloc_large(size, flags); | 2687 | return kmalloc_large(size, flags); |
@@ -2659,7 +2691,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2659 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2691 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2660 | return s; | 2692 | return s; |
2661 | 2693 | ||
2662 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2694 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2695 | |||
2696 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2697 | size, s->size, flags); | ||
2698 | |||
2699 | return ret; | ||
2663 | } | 2700 | } |
2664 | EXPORT_SYMBOL(__kmalloc); | 2701 | EXPORT_SYMBOL(__kmalloc); |
2665 | 2702 | ||
@@ -2678,16 +2715,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2678 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2715 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2679 | { | 2716 | { |
2680 | struct kmem_cache *s; | 2717 | struct kmem_cache *s; |
2718 | void *ret; | ||
2681 | 2719 | ||
2682 | if (unlikely(size > PAGE_SIZE)) | 2720 | if (unlikely(size > PAGE_SIZE)) { |
2683 | return kmalloc_large_node(size, flags, node); | 2721 | ret = kmalloc_large_node(size, flags, node); |
2722 | |||
2723 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
2724 | _RET_IP_, ret, | ||
2725 | size, PAGE_SIZE << get_order(size), | ||
2726 | flags, node); | ||
2727 | |||
2728 | return ret; | ||
2729 | } | ||
2684 | 2730 | ||
2685 | s = get_slab(size, flags); | 2731 | s = get_slab(size, flags); |
2686 | 2732 | ||
2687 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2733 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2688 | return s; | 2734 | return s; |
2689 | 2735 | ||
2690 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2736 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2737 | |||
2738 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2739 | size, s->size, flags, node); | ||
2740 | |||
2741 | return ret; | ||
2691 | } | 2742 | } |
2692 | EXPORT_SYMBOL(__kmalloc_node); | 2743 | EXPORT_SYMBOL(__kmalloc_node); |
2693 | #endif | 2744 | #endif |
@@ -2744,7 +2795,9 @@ void kfree(const void *x) | |||
2744 | put_page(page); | 2795 | put_page(page); |
2745 | return; | 2796 | return; |
2746 | } | 2797 | } |
2747 | slab_free(page->slab, page, object, __builtin_return_address(0)); | 2798 | slab_free(page->slab, page, object, _RET_IP_); |
2799 | |||
2800 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2748 | } | 2801 | } |
2749 | EXPORT_SYMBOL(kfree); | 2802 | EXPORT_SYMBOL(kfree); |
2750 | 2803 | ||
@@ -3202,9 +3255,10 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3202 | 3255 | ||
3203 | #endif | 3256 | #endif |
3204 | 3257 | ||
3205 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 3258 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3206 | { | 3259 | { |
3207 | struct kmem_cache *s; | 3260 | struct kmem_cache *s; |
3261 | void *ret; | ||
3208 | 3262 | ||
3209 | if (unlikely(size > PAGE_SIZE)) | 3263 | if (unlikely(size > PAGE_SIZE)) |
3210 | return kmalloc_large(size, gfpflags); | 3264 | return kmalloc_large(size, gfpflags); |
@@ -3214,13 +3268,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
3214 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3268 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3215 | return s; | 3269 | return s; |
3216 | 3270 | ||
3217 | return slab_alloc(s, gfpflags, -1, caller); | 3271 | ret = slab_alloc(s, gfpflags, -1, caller); |
3272 | |||
3273 | /* Honor the call site pointer we recieved. */ | ||
3274 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
3275 | s->size, gfpflags); | ||
3276 | |||
3277 | return ret; | ||
3218 | } | 3278 | } |
3219 | 3279 | ||
3220 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3280 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3221 | int node, void *caller) | 3281 | int node, unsigned long caller) |
3222 | { | 3282 | { |
3223 | struct kmem_cache *s; | 3283 | struct kmem_cache *s; |
3284 | void *ret; | ||
3224 | 3285 | ||
3225 | if (unlikely(size > PAGE_SIZE)) | 3286 | if (unlikely(size > PAGE_SIZE)) |
3226 | return kmalloc_large_node(size, gfpflags, node); | 3287 | return kmalloc_large_node(size, gfpflags, node); |
@@ -3230,7 +3291,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3230 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3291 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3231 | return s; | 3292 | return s; |
3232 | 3293 | ||
3233 | return slab_alloc(s, gfpflags, node, caller); | 3294 | ret = slab_alloc(s, gfpflags, node, caller); |
3295 | |||
3296 | /* Honor the call site pointer we recieved. */ | ||
3297 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
3298 | size, s->size, gfpflags, node); | ||
3299 | |||
3300 | return ret; | ||
3234 | } | 3301 | } |
3235 | 3302 | ||
3236 | #ifdef CONFIG_SLUB_DEBUG | 3303 | #ifdef CONFIG_SLUB_DEBUG |
@@ -3429,7 +3496,7 @@ static void resiliency_test(void) {}; | |||
3429 | 3496 | ||
3430 | struct location { | 3497 | struct location { |
3431 | unsigned long count; | 3498 | unsigned long count; |
3432 | void *addr; | 3499 | unsigned long addr; |
3433 | long long sum_time; | 3500 | long long sum_time; |
3434 | long min_time; | 3501 | long min_time; |
3435 | long max_time; | 3502 | long max_time; |
@@ -3477,7 +3544,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3477 | { | 3544 | { |
3478 | long start, end, pos; | 3545 | long start, end, pos; |
3479 | struct location *l; | 3546 | struct location *l; |
3480 | void *caddr; | 3547 | unsigned long caddr; |
3481 | unsigned long age = jiffies - track->when; | 3548 | unsigned long age = jiffies - track->when; |
3482 | 3549 | ||
3483 | start = -1; | 3550 | start = -1; |