diff options
-rw-r--r-- | Documentation/ABI/testing/debugfs-kmemtrace | 71 | ||||
-rw-r--r-- | Documentation/kernel-parameters.txt | 10 | ||||
-rw-r--r-- | Documentation/vm/kmemtrace.txt | 126 | ||||
-rw-r--r-- | MAINTAINERS | 6 | ||||
-rw-r--r-- | include/linux/kmemtrace.h | 86 | ||||
-rw-r--r-- | include/linux/slab.h | 8 | ||||
-rw-r--r-- | include/linux/slab_def.h | 68 | ||||
-rw-r--r-- | include/linux/slob_def.h | 9 | ||||
-rw-r--r-- | include/linux/slub_def.h | 53 | ||||
-rw-r--r-- | init/main.c | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 20 | ||||
-rw-r--r-- | mm/Makefile | 1 | ||||
-rw-r--r-- | mm/kmemtrace.c | 333 | ||||
-rw-r--r-- | mm/slab.c | 79 | ||||
-rw-r--r-- | mm/slob.c | 37 | ||||
-rw-r--r-- | mm/slub.c | 123 |
16 files changed, 967 insertions, 65 deletions
diff --git a/Documentation/ABI/testing/debugfs-kmemtrace b/Documentation/ABI/testing/debugfs-kmemtrace new file mode 100644 index 000000000000..5e6a92a02d85 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-kmemtrace | |||
@@ -0,0 +1,71 @@ | |||
1 | What: /sys/kernel/debug/kmemtrace/ | ||
2 | Date: July 2008 | ||
3 | Contact: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | ||
4 | Description: | ||
5 | |||
6 | In kmemtrace-enabled kernels, the following files are created: | ||
7 | |||
8 | /sys/kernel/debug/kmemtrace/ | ||
9 | cpu<n> (0400) Per-CPU tracing data, see below. (binary) | ||
10 | total_overruns (0400) Total number of bytes which were dropped from | ||
11 | cpu<n> files because of full buffer condition, | ||
12 | non-binary. (text) | ||
13 | abi_version (0400) Kernel's kmemtrace ABI version. (text) | ||
14 | |||
15 | Each per-CPU file should be read according to the relay interface. That is, | ||
16 | the reader should set affinity to that specific CPU and, as currently done by | ||
17 | the userspace application (though there are other methods), use poll() with | ||
18 | an infinite timeout before every read(). Otherwise, erroneous data may be | ||
19 | read. The binary data has the following _core_ format: | ||
20 | |||
21 | Event ID (1 byte) Unsigned integer, one of: | ||
22 | 0 - represents an allocation (KMEMTRACE_EVENT_ALLOC) | ||
23 | 1 - represents a freeing of previously allocated memory | ||
24 | (KMEMTRACE_EVENT_FREE) | ||
25 | Type ID (1 byte) Unsigned integer, one of: | ||
26 | 0 - this is a kmalloc() / kfree() | ||
27 | 1 - this is a kmem_cache_alloc() / kmem_cache_free() | ||
28 | 2 - this is a __get_free_pages() et al. | ||
29 | Event size (2 bytes) Unsigned integer representing the | ||
30 | size of this event. Used to extend | ||
31 | kmemtrace. Discard the bytes you | ||
32 | don't know about. | ||
33 | Sequence number (4 bytes) Signed integer used to reorder data | ||
34 | logged on SMP machines. Wraparound | ||
35 | must be taken into account, although | ||
36 | it is unlikely. | ||
37 | Caller address (8 bytes) Return address to the caller. | ||
38 | Pointer to mem (8 bytes) Pointer to target memory area. Can be | ||
39 | NULL, but not all such calls might be | ||
40 | recorded. | ||
41 | |||
42 | In case of KMEMTRACE_EVENT_ALLOC events, the next fields follow: | ||
43 | |||
44 | Requested bytes (8 bytes) Total number of requested bytes, | ||
45 | unsigned, must not be zero. | ||
46 | Allocated bytes (8 bytes) Total number of actually allocated | ||
47 | bytes, unsigned, must not be lower | ||
48 | than requested bytes. | ||
49 | Requested flags (4 bytes) GFP flags supplied by the caller. | ||
50 | Target CPU (4 bytes) Signed integer, valid for event id 1. | ||
51 | If equal to -1, target CPU is the same | ||
52 | as origin CPU, but the reverse might | ||
53 | not be true. | ||
54 | |||
55 | The data is made available in the same endianness the machine has. | ||
56 | |||
57 | Other event ids and type ids may be defined and added. Other fields may be | ||
58 | added by increasing event size, but see below for details. | ||
59 | Every modification to the ABI, including new id definitions, are followed | ||
60 | by bumping the ABI version by one. | ||
61 | |||
62 | Adding new data to the packet (features) is done at the end of the mandatory | ||
63 | data: | ||
64 | Feature size (2 byte) | ||
65 | Feature ID (1 byte) | ||
66 | Feature data (Feature size - 3 bytes) | ||
67 | |||
68 | |||
69 | Users: | ||
70 | kmemtrace-user - git://repo.or.cz/kmemtrace-user.git | ||
71 | |||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a2d8805c03d5..af600c0fe0ec 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -49,6 +49,7 @@ parameter is applicable: | |||
49 | ISAPNP ISA PnP code is enabled. | 49 | ISAPNP ISA PnP code is enabled. |
50 | ISDN Appropriate ISDN support is enabled. | 50 | ISDN Appropriate ISDN support is enabled. |
51 | JOY Appropriate joystick support is enabled. | 51 | JOY Appropriate joystick support is enabled. |
52 | KMEMTRACE kmemtrace is enabled. | ||
52 | LIBATA Libata driver is enabled | 53 | LIBATA Libata driver is enabled |
53 | LP Printer support is enabled. | 54 | LP Printer support is enabled. |
54 | LOOP Loopback device support is enabled. | 55 | LOOP Loopback device support is enabled. |
@@ -1033,6 +1034,15 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1033 | use the HighMem zone if it exists, and the Normal | 1034 | use the HighMem zone if it exists, and the Normal |
1034 | zone if it does not. | 1035 | zone if it does not. |
1035 | 1036 | ||
1037 | kmemtrace.enable= [KNL,KMEMTRACE] Format: { yes | no } | ||
1038 | Controls whether kmemtrace is enabled | ||
1039 | at boot-time. | ||
1040 | |||
1041 | kmemtrace.subbufs=n [KNL,KMEMTRACE] Overrides the number of | ||
1042 | subbufs kmemtrace's relay channel has. Set this | ||
1043 | higher than default (KMEMTRACE_N_SUBBUFS in code) if | ||
1044 | you experience buffer overruns. | ||
1045 | |||
1036 | movablecore=nn[KMG] [KNL,X86-32,IA-64,PPC,X86-64] This parameter | 1046 | movablecore=nn[KMG] [KNL,X86-32,IA-64,PPC,X86-64] This parameter |
1037 | is similar to kernelcore except it specifies the | 1047 | is similar to kernelcore except it specifies the |
1038 | amount of memory used for migratable allocations. | 1048 | amount of memory used for migratable allocations. |
diff --git a/Documentation/vm/kmemtrace.txt b/Documentation/vm/kmemtrace.txt new file mode 100644 index 000000000000..a956d9b7f943 --- /dev/null +++ b/Documentation/vm/kmemtrace.txt | |||
@@ -0,0 +1,126 @@ | |||
1 | kmemtrace - Kernel Memory Tracer | ||
2 | |||
3 | by Eduard - Gabriel Munteanu | ||
4 | <eduard.munteanu@linux360.ro> | ||
5 | |||
6 | I. Introduction | ||
7 | =============== | ||
8 | |||
9 | kmemtrace helps kernel developers figure out two things: | ||
10 | 1) how different allocators (SLAB, SLUB etc.) perform | ||
11 | 2) how kernel code allocates memory and how much | ||
12 | |||
13 | To do this, we trace every allocation and export information to the userspace | ||
14 | through the relay interface. We export things such as the number of requested | ||
15 | bytes, the number of bytes actually allocated (i.e. including internal | ||
16 | fragmentation), whether this is a slab allocation or a plain kmalloc() and so | ||
17 | on. | ||
18 | |||
19 | The actual analysis is performed by a userspace tool (see section III for | ||
20 | details on where to get it from). It logs the data exported by the kernel, | ||
21 | processes it and (as of writing this) can provide the following information: | ||
22 | - the total amount of memory allocated and fragmentation per call-site | ||
23 | - the amount of memory allocated and fragmentation per allocation | ||
24 | - total memory allocated and fragmentation in the collected dataset | ||
25 | - number of cross-CPU allocation and frees (makes sense in NUMA environments) | ||
26 | |||
27 | Moreover, it can potentially find inconsistent and erroneous behavior in | ||
28 | kernel code, such as using slab free functions on kmalloc'ed memory or | ||
29 | allocating less memory than requested (but not truly failed allocations). | ||
30 | |||
31 | kmemtrace also makes provisions for tracing on some arch and analysing the | ||
32 | data on another. | ||
33 | |||
34 | II. Design and goals | ||
35 | ==================== | ||
36 | |||
37 | kmemtrace was designed to handle rather large amounts of data. Thus, it uses | ||
38 | the relay interface to export whatever is logged to userspace, which then | ||
39 | stores it. Analysis and reporting is done asynchronously, that is, after the | ||
40 | data is collected and stored. By design, it allows one to log and analyse | ||
41 | on different machines and different arches. | ||
42 | |||
43 | As of writing this, the ABI is not considered stable, though it might not | ||
44 | change much. However, no guarantees are made about compatibility yet. When | ||
45 | deemed stable, the ABI should still allow easy extension while maintaining | ||
46 | backward compatibility. This is described further in Documentation/ABI. | ||
47 | |||
48 | Summary of design goals: | ||
49 | - allow logging and analysis to be done across different machines | ||
50 | - be fast and anticipate usage in high-load environments (*) | ||
51 | - be reasonably extensible | ||
52 | - make it possible for GNU/Linux distributions to have kmemtrace | ||
53 | included in their repositories | ||
54 | |||
55 | (*) - one of the reasons Pekka Enberg's original userspace data analysis | ||
56 | tool's code was rewritten from Perl to C (although this is more than a | ||
57 | simple conversion) | ||
58 | |||
59 | |||
60 | III. Quick usage guide | ||
61 | ====================== | ||
62 | |||
63 | 1) Get a kernel that supports kmemtrace and build it accordingly (i.e. enable | ||
64 | CONFIG_KMEMTRACE). | ||
65 | |||
66 | 2) Get the userspace tool and build it: | ||
67 | $ git-clone git://repo.or.cz/kmemtrace-user.git # current repository | ||
68 | $ cd kmemtrace-user/ | ||
69 | $ ./autogen.sh | ||
70 | $ ./configure | ||
71 | $ make | ||
72 | |||
73 | 3) Boot the kmemtrace-enabled kernel if you haven't, preferably in the | ||
74 | 'single' runlevel (so that relay buffers don't fill up easily), and run | ||
75 | kmemtrace: | ||
76 | # '$' does not mean user, but root here. | ||
77 | $ mount -t debugfs none /sys/kernel/debug | ||
78 | $ mount -t proc none /proc | ||
79 | $ cd path/to/kmemtrace-user/ | ||
80 | $ ./kmemtraced | ||
81 | Wait a bit, then stop it with CTRL+C. | ||
82 | $ cat /sys/kernel/debug/kmemtrace/total_overruns # Check if we didn't | ||
83 | # overrun, should | ||
84 | # be zero. | ||
85 | $ (Optionally) [Run kmemtrace_check separately on each cpu[0-9]*.out file to | ||
86 | check its correctness] | ||
87 | $ ./kmemtrace-report | ||
88 | |||
89 | Now you should have a nice and short summary of how the allocator performs. | ||
90 | |||
91 | IV. FAQ and known issues | ||
92 | ======================== | ||
93 | |||
94 | Q: 'cat /sys/kernel/debug/kmemtrace/total_overruns' is non-zero, how do I fix | ||
95 | this? Should I worry? | ||
96 | A: If it's non-zero, this affects kmemtrace's accuracy, depending on how | ||
97 | large the number is. You can fix it by supplying a higher | ||
98 | 'kmemtrace.subbufs=N' kernel parameter. | ||
99 | --- | ||
100 | |||
101 | Q: kmemtrace_check reports errors, how do I fix this? Should I worry? | ||
102 | A: This is a bug and should be reported. It can occur for a variety of | ||
103 | reasons: | ||
104 | - possible bugs in relay code | ||
105 | - possible misuse of relay by kmemtrace | ||
106 | - timestamps being collected unorderly | ||
107 | Or you may fix it yourself and send us a patch. | ||
108 | --- | ||
109 | |||
110 | Q: kmemtrace_report shows many errors, how do I fix this? Should I worry? | ||
111 | A: This is a known issue and I'm working on it. These might be true errors | ||
112 | in kernel code, which may have inconsistent behavior (e.g. allocating memory | ||
113 | with kmem_cache_alloc() and freeing it with kfree()). Pekka Enberg pointed | ||
114 | out this behavior may work with SLAB, but may fail with other allocators. | ||
115 | |||
116 | It may also be due to lack of tracing in some unusual allocator functions. | ||
117 | |||
118 | We don't want bug reports regarding this issue yet. | ||
119 | --- | ||
120 | |||
121 | V. See also | ||
122 | =========== | ||
123 | |||
124 | Documentation/kernel-parameters.txt | ||
125 | Documentation/ABI/testing/debugfs-kmemtrace | ||
126 | |||
diff --git a/MAINTAINERS b/MAINTAINERS index 08d0ab7fa161..857c877eee20 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2566,6 +2566,12 @@ M: jason.wessel@windriver.com | |||
2566 | L: kgdb-bugreport@lists.sourceforge.net | 2566 | L: kgdb-bugreport@lists.sourceforge.net |
2567 | S: Maintained | 2567 | S: Maintained |
2568 | 2568 | ||
2569 | KMEMTRACE | ||
2570 | P: Eduard - Gabriel Munteanu | ||
2571 | M: eduard.munteanu@linux360.ro | ||
2572 | L: linux-kernel@vger.kernel.org | ||
2573 | S: Maintained | ||
2574 | |||
2569 | KPROBES | 2575 | KPROBES |
2570 | P: Ananth N Mavinakayanahalli | 2576 | P: Ananth N Mavinakayanahalli |
2571 | M: ananth@in.ibm.com | 2577 | M: ananth@in.ibm.com |
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h new file mode 100644 index 000000000000..5bea8ead6a6b --- /dev/null +++ b/include/linux/kmemtrace.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/marker.h> | ||
14 | |||
15 | enum kmemtrace_type_id { | ||
16 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | ||
17 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
18 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
19 | }; | ||
20 | |||
21 | #ifdef CONFIG_KMEMTRACE | ||
22 | |||
23 | extern void kmemtrace_init(void); | ||
24 | |||
25 | static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
26 | unsigned long call_site, | ||
27 | const void *ptr, | ||
28 | size_t bytes_req, | ||
29 | size_t bytes_alloc, | ||
30 | gfp_t gfp_flags, | ||
31 | int node) | ||
32 | { | ||
33 | trace_mark(kmemtrace_alloc, "type_id %d call_site %lu ptr %lu " | ||
34 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d", | ||
35 | type_id, call_site, (unsigned long) ptr, | ||
36 | (unsigned long) bytes_req, (unsigned long) bytes_alloc, | ||
37 | (unsigned long) gfp_flags, node); | ||
38 | } | ||
39 | |||
40 | static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
41 | unsigned long call_site, | ||
42 | const void *ptr) | ||
43 | { | ||
44 | trace_mark(kmemtrace_free, "type_id %d call_site %lu ptr %lu", | ||
45 | type_id, call_site, (unsigned long) ptr); | ||
46 | } | ||
47 | |||
48 | #else /* CONFIG_KMEMTRACE */ | ||
49 | |||
50 | static inline void kmemtrace_init(void) | ||
51 | { | ||
52 | } | ||
53 | |||
54 | static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
55 | unsigned long call_site, | ||
56 | const void *ptr, | ||
57 | size_t bytes_req, | ||
58 | size_t bytes_alloc, | ||
59 | gfp_t gfp_flags, | ||
60 | int node) | ||
61 | { | ||
62 | } | ||
63 | |||
64 | static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
65 | unsigned long call_site, | ||
66 | const void *ptr) | ||
67 | { | ||
68 | } | ||
69 | |||
70 | #endif /* CONFIG_KMEMTRACE */ | ||
71 | |||
72 | static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id, | ||
73 | unsigned long call_site, | ||
74 | const void *ptr, | ||
75 | size_t bytes_req, | ||
76 | size_t bytes_alloc, | ||
77 | gfp_t gfp_flags) | ||
78 | { | ||
79 | kmemtrace_mark_alloc_node(type_id, call_site, ptr, | ||
80 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
81 | } | ||
82 | |||
83 | #endif /* __KERNEL__ */ | ||
84 | |||
85 | #endif /* _LINUX_KMEMTRACE_H */ | ||
86 | |||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf0..c97ed28559ec 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
253 | * request comes from. | 253 | * request comes from. |
254 | */ | 254 | */ |
255 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 255 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) |
256 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | 256 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
257 | #define kmalloc_track_caller(size, flags) \ | 257 | #define kmalloc_track_caller(size, flags) \ |
258 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | 258 | __kmalloc_track_caller(size, flags, _RET_IP_) |
259 | #else | 259 | #else |
260 | #define kmalloc_track_caller(size, flags) \ | 260 | #define kmalloc_track_caller(size, flags) \ |
261 | __kmalloc(size, flags) | 261 | __kmalloc(size, flags) |
@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | |||
271 | * allocation request comes from. | 271 | * allocation request comes from. |
272 | */ | 272 | */ |
273 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 273 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) |
274 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | 274 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
275 | #define kmalloc_node_track_caller(size, flags, node) \ | 275 | #define kmalloc_node_track_caller(size, flags, node) \ |
276 | __kmalloc_node_track_caller(size, flags, node, \ | 276 | __kmalloc_node_track_caller(size, flags, node, \ |
277 | __builtin_return_address(0)) | 277 | _RET_IP_) |
278 | #else | 278 | #else |
279 | #define kmalloc_node_track_caller(size, flags, node) \ | 279 | #define kmalloc_node_track_caller(size, flags, node) \ |
280 | __kmalloc_node(size, flags, node) | 280 | __kmalloc_node(size, flags, node) |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5eb8ebe..7555ce99f6d2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/kmemtrace.h> | ||
17 | 18 | ||
18 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
19 | struct cache_sizes { | 20 | struct cache_sizes { |
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; | |||
28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 29 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
29 | void *__kmalloc(size_t size, gfp_t flags); | 30 | void *__kmalloc(size_t size, gfp_t flags); |
30 | 31 | ||
31 | static inline void *kmalloc(size_t size, gfp_t flags) | 32 | #ifdef CONFIG_KMEMTRACE |
33 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | ||
34 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
35 | #else | ||
36 | static __always_inline void * | ||
37 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
32 | { | 38 | { |
39 | return kmem_cache_alloc(cachep, flags); | ||
40 | } | ||
41 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
48 | { | ||
49 | struct kmem_cache *cachep; | ||
50 | void *ret; | ||
51 | |||
33 | if (__builtin_constant_p(size)) { | 52 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 53 | int i = 0; |
35 | 54 | ||
@@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
50 | found: | 69 | found: |
51 | #ifdef CONFIG_ZONE_DMA | 70 | #ifdef CONFIG_ZONE_DMA |
52 | if (flags & GFP_DMA) | 71 | if (flags & GFP_DMA) |
53 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | 72 | cachep = malloc_sizes[i].cs_dmacachep; |
54 | flags); | 73 | else |
55 | #endif | 74 | #endif |
56 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | 75 | cachep = malloc_sizes[i].cs_cachep; |
76 | |||
77 | ret = kmem_cache_alloc_notrace(cachep, flags); | ||
78 | |||
79 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
80 | size, slab_buffer_size(cachep), flags); | ||
81 | |||
82 | return ret; | ||
57 | } | 83 | } |
58 | return __kmalloc(size, flags); | 84 | return __kmalloc(size, flags); |
59 | } | 85 | } |
@@ -62,8 +88,25 @@ found: | |||
62 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 88 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
63 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 89 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
64 | 90 | ||
65 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 91 | #ifdef CONFIG_KMEMTRACE |
92 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
93 | gfp_t flags, | ||
94 | int nodeid); | ||
95 | #else | ||
96 | static __always_inline void * | ||
97 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
98 | gfp_t flags, | ||
99 | int nodeid) | ||
100 | { | ||
101 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
66 | { | 106 | { |
107 | struct kmem_cache *cachep; | ||
108 | void *ret; | ||
109 | |||
67 | if (__builtin_constant_p(size)) { | 110 | if (__builtin_constant_p(size)) { |
68 | int i = 0; | 111 | int i = 0; |
69 | 112 | ||
@@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
84 | found: | 127 | found: |
85 | #ifdef CONFIG_ZONE_DMA | 128 | #ifdef CONFIG_ZONE_DMA |
86 | if (flags & GFP_DMA) | 129 | if (flags & GFP_DMA) |
87 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | 130 | cachep = malloc_sizes[i].cs_dmacachep; |
88 | flags, node); | 131 | else |
89 | #endif | 132 | #endif |
90 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | 133 | cachep = malloc_sizes[i].cs_cachep; |
91 | flags, node); | 134 | |
135 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | ||
136 | |||
137 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, | ||
138 | ret, size, slab_buffer_size(cachep), | ||
139 | flags, node); | ||
140 | |||
141 | return ret; | ||
92 | } | 142 | } |
93 | return __kmalloc_node(size, flags, node); | 143 | return __kmalloc_node(size, flags, node); |
94 | } | 144 | } |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 59a3fa476ab9..0ec00b39d006 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -3,14 +3,15 @@ | |||
3 | 3 | ||
4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
5 | 5 | ||
6 | static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
7 | gfp_t flags) | ||
7 | { | 8 | { |
8 | return kmem_cache_alloc_node(cachep, flags, -1); | 9 | return kmem_cache_alloc_node(cachep, flags, -1); |
9 | } | 10 | } |
10 | 11 | ||
11 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 12 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
12 | 13 | ||
13 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 14 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
14 | { | 15 | { |
15 | return __kmalloc_node(size, flags, node); | 16 | return __kmalloc_node(size, flags, node); |
16 | } | 17 | } |
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
23 | * kmalloc is the normal method of allocating memory | 24 | * kmalloc is the normal method of allocating memory |
24 | * in the kernel. | 25 | * in the kernel. |
25 | */ | 26 | */ |
26 | static inline void *kmalloc(size_t size, gfp_t flags) | 27 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
27 | { | 28 | { |
28 | return __kmalloc_node(size, flags, -1); | 29 | return __kmalloc_node(size, flags, -1); |
29 | } | 30 | } |
30 | 31 | ||
31 | static inline void *__kmalloc(size_t size, gfp_t flags) | 32 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) |
32 | { | 33 | { |
33 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
34 | } | 35 | } |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..dc28432b5b9a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <linux/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 205 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
205 | void *__kmalloc(size_t size, gfp_t flags); | 206 | void *__kmalloc(size_t size, gfp_t flags); |
206 | 207 | ||
208 | #ifdef CONFIG_KMEMTRACE | ||
209 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
210 | #else | ||
211 | static __always_inline void * | ||
212 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
213 | { | ||
214 | return kmem_cache_alloc(s, gfpflags); | ||
215 | } | ||
216 | #endif | ||
217 | |||
207 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 218 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
208 | { | 219 | { |
209 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 220 | unsigned int order = get_order(size); |
221 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
222 | |||
223 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
224 | size, PAGE_SIZE << order, flags); | ||
225 | |||
226 | return ret; | ||
210 | } | 227 | } |
211 | 228 | ||
212 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 229 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
213 | { | 230 | { |
231 | void *ret; | ||
232 | |||
214 | if (__builtin_constant_p(size)) { | 233 | if (__builtin_constant_p(size)) { |
215 | if (size > PAGE_SIZE) | 234 | if (size > PAGE_SIZE) |
216 | return kmalloc_large(size, flags); | 235 | return kmalloc_large(size, flags); |
@@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
221 | if (!s) | 240 | if (!s) |
222 | return ZERO_SIZE_PTR; | 241 | return ZERO_SIZE_PTR; |
223 | 242 | ||
224 | return kmem_cache_alloc(s, flags); | 243 | ret = kmem_cache_alloc_notrace(s, flags); |
244 | |||
245 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
246 | _THIS_IP_, ret, | ||
247 | size, s->size, flags); | ||
248 | |||
249 | return ret; | ||
225 | } | 250 | } |
226 | } | 251 | } |
227 | return __kmalloc(size, flags); | 252 | return __kmalloc(size, flags); |
@@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 256 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
232 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 257 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
233 | 258 | ||
259 | #ifdef CONFIG_KMEMTRACE | ||
260 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
261 | gfp_t gfpflags, | ||
262 | int node); | ||
263 | #else | ||
264 | static __always_inline void * | ||
265 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
266 | gfp_t gfpflags, | ||
267 | int node) | ||
268 | { | ||
269 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
270 | } | ||
271 | #endif | ||
272 | |||
234 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 273 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
235 | { | 274 | { |
275 | void *ret; | ||
276 | |||
236 | if (__builtin_constant_p(size) && | 277 | if (__builtin_constant_p(size) && |
237 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 278 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { |
238 | struct kmem_cache *s = kmalloc_slab(size); | 279 | struct kmem_cache *s = kmalloc_slab(size); |
@@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
240 | if (!s) | 281 | if (!s) |
241 | return ZERO_SIZE_PTR; | 282 | return ZERO_SIZE_PTR; |
242 | 283 | ||
243 | return kmem_cache_alloc_node(s, flags, node); | 284 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
285 | |||
286 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
287 | _THIS_IP_, ret, | ||
288 | size, s->size, flags, node); | ||
289 | |||
290 | return ret; | ||
244 | } | 291 | } |
245 | return __kmalloc_node(size, flags, node); | 292 | return __kmalloc_node(size, flags, node); |
246 | } | 293 | } |
diff --git a/init/main.c b/init/main.c index 17e9757bfde2..9711586aa7c9 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <asm/setup.h> | 70 | #include <asm/setup.h> |
71 | #include <asm/sections.h> | 71 | #include <asm/sections.h> |
72 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
73 | #include <linux/kmemtrace.h> | ||
73 | 74 | ||
74 | #ifdef CONFIG_X86_LOCAL_APIC | 75 | #ifdef CONFIG_X86_LOCAL_APIC |
75 | #include <asm/smp.h> | 76 | #include <asm/smp.h> |
@@ -654,6 +655,7 @@ asmlinkage void __init start_kernel(void) | |||
654 | enable_debug_pagealloc(); | 655 | enable_debug_pagealloc(); |
655 | cpu_hotplug_init(); | 656 | cpu_hotplug_init(); |
656 | kmem_cache_init(); | 657 | kmem_cache_init(); |
658 | kmemtrace_init(); | ||
657 | debug_objects_mem_init(); | 659 | debug_objects_mem_init(); |
658 | idr_init_cache(); | 660 | idr_init_cache(); |
659 | setup_per_cpu_pageset(); | 661 | setup_per_cpu_pageset(); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b0f239e443bc..b5417e23ba94 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -803,6 +803,26 @@ config FIREWIRE_OHCI_REMOTE_DMA | |||
803 | 803 | ||
804 | If unsure, say N. | 804 | If unsure, say N. |
805 | 805 | ||
806 | config KMEMTRACE | ||
807 | bool "Kernel memory tracer (kmemtrace)" | ||
808 | depends on RELAY && DEBUG_FS && MARKERS | ||
809 | help | ||
810 | kmemtrace provides tracing for slab allocator functions, such as | ||
811 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | ||
812 | data is then fed to the userspace application in order to analyse | ||
813 | allocation hotspots, internal fragmentation and so on, making it | ||
814 | possible to see how well an allocator performs, as well as debug | ||
815 | and profile kernel code. | ||
816 | |||
817 | This requires an userspace application to use. See | ||
818 | Documentation/vm/kmemtrace.txt for more information. | ||
819 | |||
820 | Saying Y will make the kernel somewhat larger and slower. However, | ||
821 | if you disable kmemtrace at run-time or boot-time, the performance | ||
822 | impact is minimal (depending on the arch the kernel is built for). | ||
823 | |||
824 | If unsure, say N. | ||
825 | |||
806 | menuconfig BUILD_DOCSRC | 826 | menuconfig BUILD_DOCSRC |
807 | bool "Build targets in Documentation/ tree" | 827 | bool "Build targets in Documentation/ tree" |
808 | depends on HEADERS_CHECK | 828 | depends on HEADERS_CHECK |
diff --git a/mm/Makefile b/mm/Makefile index c06b45a1ff5f..3782eb66d4b3 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -34,3 +34,4 @@ obj-$(CONFIG_MIGRATION) += migrate.o | |||
34 | obj-$(CONFIG_SMP) += allocpercpu.o | 34 | obj-$(CONFIG_SMP) += allocpercpu.o |
35 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 35 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
36 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o | 36 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o |
37 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c new file mode 100644 index 000000000000..2a70a805027c --- /dev/null +++ b/mm/kmemtrace.c | |||
@@ -0,0 +1,333 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Pekka Enberg, Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/debugfs.h> | ||
9 | #include <linux/relay.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/marker.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/kmemtrace.h> | ||
14 | |||
15 | #define KMEMTRACE_SUBBUF_SIZE 524288 | ||
16 | #define KMEMTRACE_DEF_N_SUBBUFS 20 | ||
17 | |||
18 | static struct rchan *kmemtrace_chan; | ||
19 | static u32 kmemtrace_buf_overruns; | ||
20 | |||
21 | static unsigned int kmemtrace_n_subbufs; | ||
22 | |||
23 | /* disabled by default */ | ||
24 | static unsigned int kmemtrace_enabled; | ||
25 | |||
26 | /* | ||
27 | * The sequence number is used for reordering kmemtrace packets | ||
28 | * in userspace, since they are logged as per-CPU data. | ||
29 | * | ||
30 | * atomic_t should always be a 32-bit signed integer. Wraparound is not | ||
31 | * likely to occur, but userspace can deal with it by expecting a certain | ||
32 | * sequence number in the next packet that will be read. | ||
33 | */ | ||
34 | static atomic_t kmemtrace_seq_num; | ||
35 | |||
36 | #define KMEMTRACE_ABI_VERSION 1 | ||
37 | |||
38 | static u32 kmemtrace_abi_version __read_mostly = KMEMTRACE_ABI_VERSION; | ||
39 | |||
40 | enum kmemtrace_event_id { | ||
41 | KMEMTRACE_EVENT_ALLOC = 0, | ||
42 | KMEMTRACE_EVENT_FREE, | ||
43 | }; | ||
44 | |||
45 | struct kmemtrace_event { | ||
46 | u8 event_id; | ||
47 | u8 type_id; | ||
48 | u16 event_size; | ||
49 | s32 seq_num; | ||
50 | u64 call_site; | ||
51 | u64 ptr; | ||
52 | } __attribute__ ((__packed__)); | ||
53 | |||
54 | struct kmemtrace_stats_alloc { | ||
55 | u64 bytes_req; | ||
56 | u64 bytes_alloc; | ||
57 | u32 gfp_flags; | ||
58 | s32 numa_node; | ||
59 | } __attribute__ ((__packed__)); | ||
60 | |||
61 | static void kmemtrace_probe_alloc(void *probe_data, void *call_data, | ||
62 | const char *format, va_list *args) | ||
63 | { | ||
64 | unsigned long flags; | ||
65 | struct kmemtrace_event *ev; | ||
66 | struct kmemtrace_stats_alloc *stats; | ||
67 | void *buf; | ||
68 | |||
69 | local_irq_save(flags); | ||
70 | |||
71 | buf = relay_reserve(kmemtrace_chan, | ||
72 | sizeof(struct kmemtrace_event) + | ||
73 | sizeof(struct kmemtrace_stats_alloc)); | ||
74 | if (!buf) | ||
75 | goto failed; | ||
76 | |||
77 | /* | ||
78 | * Don't convert this to use structure initializers, | ||
79 | * C99 does not guarantee the rvalues evaluation order. | ||
80 | */ | ||
81 | |||
82 | ev = buf; | ||
83 | ev->event_id = KMEMTRACE_EVENT_ALLOC; | ||
84 | ev->type_id = va_arg(*args, int); | ||
85 | ev->event_size = sizeof(struct kmemtrace_event) + | ||
86 | sizeof(struct kmemtrace_stats_alloc); | ||
87 | ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num); | ||
88 | ev->call_site = va_arg(*args, unsigned long); | ||
89 | ev->ptr = va_arg(*args, unsigned long); | ||
90 | |||
91 | stats = buf + sizeof(struct kmemtrace_event); | ||
92 | stats->bytes_req = va_arg(*args, unsigned long); | ||
93 | stats->bytes_alloc = va_arg(*args, unsigned long); | ||
94 | stats->gfp_flags = va_arg(*args, unsigned long); | ||
95 | stats->numa_node = va_arg(*args, int); | ||
96 | |||
97 | failed: | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | |||
101 | static void kmemtrace_probe_free(void *probe_data, void *call_data, | ||
102 | const char *format, va_list *args) | ||
103 | { | ||
104 | unsigned long flags; | ||
105 | struct kmemtrace_event *ev; | ||
106 | |||
107 | local_irq_save(flags); | ||
108 | |||
109 | ev = relay_reserve(kmemtrace_chan, sizeof(struct kmemtrace_event)); | ||
110 | if (!ev) | ||
111 | goto failed; | ||
112 | |||
113 | /* | ||
114 | * Don't convert this to use structure initializers, | ||
115 | * C99 does not guarantee the rvalues evaluation order. | ||
116 | */ | ||
117 | ev->event_id = KMEMTRACE_EVENT_FREE; | ||
118 | ev->type_id = va_arg(*args, int); | ||
119 | ev->event_size = sizeof(struct kmemtrace_event); | ||
120 | ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num); | ||
121 | ev->call_site = va_arg(*args, unsigned long); | ||
122 | ev->ptr = va_arg(*args, unsigned long); | ||
123 | |||
124 | failed: | ||
125 | local_irq_restore(flags); | ||
126 | } | ||
127 | |||
128 | static struct dentry * | ||
129 | kmemtrace_create_buf_file(const char *filename, struct dentry *parent, | ||
130 | int mode, struct rchan_buf *buf, int *is_global) | ||
131 | { | ||
132 | return debugfs_create_file(filename, mode, parent, buf, | ||
133 | &relay_file_operations); | ||
134 | } | ||
135 | |||
136 | static int kmemtrace_remove_buf_file(struct dentry *dentry) | ||
137 | { | ||
138 | debugfs_remove(dentry); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int kmemtrace_subbuf_start(struct rchan_buf *buf, | ||
144 | void *subbuf, | ||
145 | void *prev_subbuf, | ||
146 | size_t prev_padding) | ||
147 | { | ||
148 | if (relay_buf_full(buf)) { | ||
149 | /* | ||
150 | * We know it's not SMP-safe, but neither | ||
151 | * debugfs_create_u32() is. | ||
152 | */ | ||
153 | kmemtrace_buf_overruns++; | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | static struct rchan_callbacks relay_callbacks = { | ||
161 | .create_buf_file = kmemtrace_create_buf_file, | ||
162 | .remove_buf_file = kmemtrace_remove_buf_file, | ||
163 | .subbuf_start = kmemtrace_subbuf_start, | ||
164 | }; | ||
165 | |||
166 | static struct dentry *kmemtrace_dir; | ||
167 | static struct dentry *kmemtrace_overruns_dentry; | ||
168 | static struct dentry *kmemtrace_abi_version_dentry; | ||
169 | |||
170 | static struct dentry *kmemtrace_enabled_dentry; | ||
171 | |||
172 | static int kmemtrace_start_probes(void) | ||
173 | { | ||
174 | int err; | ||
175 | |||
176 | err = marker_probe_register("kmemtrace_alloc", "type_id %d " | ||
177 | "call_site %lu ptr %lu " | ||
178 | "bytes_req %lu bytes_alloc %lu " | ||
179 | "gfp_flags %lu node %d", | ||
180 | kmemtrace_probe_alloc, NULL); | ||
181 | if (err) | ||
182 | return err; | ||
183 | err = marker_probe_register("kmemtrace_free", "type_id %d " | ||
184 | "call_site %lu ptr %lu", | ||
185 | kmemtrace_probe_free, NULL); | ||
186 | |||
187 | return err; | ||
188 | } | ||
189 | |||
190 | static void kmemtrace_stop_probes(void) | ||
191 | { | ||
192 | marker_probe_unregister("kmemtrace_alloc", | ||
193 | kmemtrace_probe_alloc, NULL); | ||
194 | marker_probe_unregister("kmemtrace_free", | ||
195 | kmemtrace_probe_free, NULL); | ||
196 | } | ||
197 | |||
198 | static int kmemtrace_enabled_get(void *data, u64 *val) | ||
199 | { | ||
200 | *val = *((int *) data); | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int kmemtrace_enabled_set(void *data, u64 val) | ||
206 | { | ||
207 | u64 old_val = kmemtrace_enabled; | ||
208 | |||
209 | *((int *) data) = !!val; | ||
210 | |||
211 | if (old_val == val) | ||
212 | return 0; | ||
213 | if (val) | ||
214 | kmemtrace_start_probes(); | ||
215 | else | ||
216 | kmemtrace_stop_probes(); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | DEFINE_SIMPLE_ATTRIBUTE(kmemtrace_enabled_fops, | ||
222 | kmemtrace_enabled_get, | ||
223 | kmemtrace_enabled_set, "%llu\n"); | ||
224 | |||
225 | static void kmemtrace_cleanup(void) | ||
226 | { | ||
227 | if (kmemtrace_enabled_dentry) | ||
228 | debugfs_remove(kmemtrace_enabled_dentry); | ||
229 | |||
230 | kmemtrace_stop_probes(); | ||
231 | |||
232 | if (kmemtrace_abi_version_dentry) | ||
233 | debugfs_remove(kmemtrace_abi_version_dentry); | ||
234 | if (kmemtrace_overruns_dentry) | ||
235 | debugfs_remove(kmemtrace_overruns_dentry); | ||
236 | |||
237 | relay_close(kmemtrace_chan); | ||
238 | kmemtrace_chan = NULL; | ||
239 | |||
240 | if (kmemtrace_dir) | ||
241 | debugfs_remove(kmemtrace_dir); | ||
242 | } | ||
243 | |||
244 | static int __init kmemtrace_setup_late(void) | ||
245 | { | ||
246 | if (!kmemtrace_chan) | ||
247 | goto failed; | ||
248 | |||
249 | kmemtrace_dir = debugfs_create_dir("kmemtrace", NULL); | ||
250 | if (!kmemtrace_dir) | ||
251 | goto cleanup; | ||
252 | |||
253 | kmemtrace_abi_version_dentry = | ||
254 | debugfs_create_u32("abi_version", S_IRUSR, | ||
255 | kmemtrace_dir, &kmemtrace_abi_version); | ||
256 | kmemtrace_overruns_dentry = | ||
257 | debugfs_create_u32("total_overruns", S_IRUSR, | ||
258 | kmemtrace_dir, &kmemtrace_buf_overruns); | ||
259 | if (!kmemtrace_overruns_dentry || !kmemtrace_abi_version_dentry) | ||
260 | goto cleanup; | ||
261 | |||
262 | kmemtrace_enabled_dentry = | ||
263 | debugfs_create_file("enabled", S_IRUSR | S_IWUSR, | ||
264 | kmemtrace_dir, &kmemtrace_enabled, | ||
265 | &kmemtrace_enabled_fops); | ||
266 | if (!kmemtrace_enabled_dentry) | ||
267 | goto cleanup; | ||
268 | |||
269 | if (relay_late_setup_files(kmemtrace_chan, "cpu", kmemtrace_dir)) | ||
270 | goto cleanup; | ||
271 | |||
272 | printk(KERN_INFO "kmemtrace: fully up.\n"); | ||
273 | |||
274 | return 0; | ||
275 | |||
276 | cleanup: | ||
277 | kmemtrace_cleanup(); | ||
278 | failed: | ||
279 | return 1; | ||
280 | } | ||
281 | late_initcall(kmemtrace_setup_late); | ||
282 | |||
283 | static int __init kmemtrace_set_boot_enabled(char *str) | ||
284 | { | ||
285 | if (!str) | ||
286 | return -EINVAL; | ||
287 | |||
288 | if (!strcmp(str, "yes")) | ||
289 | kmemtrace_enabled = 1; | ||
290 | else if (!strcmp(str, "no")) | ||
291 | kmemtrace_enabled = 0; | ||
292 | else | ||
293 | return -EINVAL; | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | early_param("kmemtrace.enable", kmemtrace_set_boot_enabled); | ||
298 | |||
299 | static int __init kmemtrace_set_subbufs(char *str) | ||
300 | { | ||
301 | get_option(&str, &kmemtrace_n_subbufs); | ||
302 | return 0; | ||
303 | } | ||
304 | early_param("kmemtrace.subbufs", kmemtrace_set_subbufs); | ||
305 | |||
306 | void kmemtrace_init(void) | ||
307 | { | ||
308 | if (!kmemtrace_n_subbufs) | ||
309 | kmemtrace_n_subbufs = KMEMTRACE_DEF_N_SUBBUFS; | ||
310 | |||
311 | kmemtrace_chan = relay_open(NULL, NULL, KMEMTRACE_SUBBUF_SIZE, | ||
312 | kmemtrace_n_subbufs, &relay_callbacks, | ||
313 | NULL); | ||
314 | if (!kmemtrace_chan) { | ||
315 | printk(KERN_ERR "kmemtrace: could not open relay channel.\n"); | ||
316 | return; | ||
317 | } | ||
318 | |||
319 | if (!kmemtrace_enabled) { | ||
320 | printk(KERN_INFO "kmemtrace: disabled. Pass " | ||
321 | "kemtrace.enable=yes as kernel parameter for " | ||
322 | "boot-time tracing.\n"); | ||
323 | return; | ||
324 | } | ||
325 | if (kmemtrace_start_probes()) { | ||
326 | printk(KERN_ERR "kmemtrace: could not register marker probes!\n"); | ||
327 | kmemtrace_cleanup(); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | printk(KERN_INFO "kmemtrace: enabled.\n"); | ||
332 | } | ||
333 | |||
@@ -112,6 +112,7 @@ | |||
112 | #include <linux/rtmutex.h> | 112 | #include <linux/rtmutex.h> |
113 | #include <linux/reciprocal_div.h> | 113 | #include <linux/reciprocal_div.h> |
114 | #include <linux/debugobjects.h> | 114 | #include <linux/debugobjects.h> |
115 | #include <linux/kmemtrace.h> | ||
115 | 116 | ||
116 | #include <asm/cacheflush.h> | 117 | #include <asm/cacheflush.h> |
117 | #include <asm/tlbflush.h> | 118 | #include <asm/tlbflush.h> |
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
568 | 569 | ||
569 | #endif | 570 | #endif |
570 | 571 | ||
572 | #ifdef CONFIG_KMEMTRACE | ||
573 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
574 | { | ||
575 | return cachep->buffer_size; | ||
576 | } | ||
577 | EXPORT_SYMBOL(slab_buffer_size); | ||
578 | #endif | ||
579 | |||
571 | /* | 580 | /* |
572 | * Do not go above this order unless 0 objects fit into the slab. | 581 | * Do not go above this order unless 0 objects fit into the slab. |
573 | */ | 582 | */ |
@@ -3613,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3613 | */ | 3622 | */ |
3614 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3623 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3615 | { | 3624 | { |
3616 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3625 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3626 | |||
3627 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3628 | obj_size(cachep), cachep->buffer_size, flags); | ||
3629 | |||
3630 | return ret; | ||
3617 | } | 3631 | } |
3618 | EXPORT_SYMBOL(kmem_cache_alloc); | 3632 | EXPORT_SYMBOL(kmem_cache_alloc); |
3619 | 3633 | ||
3634 | #ifdef CONFIG_KMEMTRACE | ||
3635 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
3636 | { | ||
3637 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
3638 | } | ||
3639 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
3640 | #endif | ||
3641 | |||
3620 | /** | 3642 | /** |
3621 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | 3643 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
3622 | * @cachep: the cache we're checking against | 3644 | * @cachep: the cache we're checking against |
@@ -3661,23 +3683,47 @@ out: | |||
3661 | #ifdef CONFIG_NUMA | 3683 | #ifdef CONFIG_NUMA |
3662 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3684 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3663 | { | 3685 | { |
3664 | return __cache_alloc_node(cachep, flags, nodeid, | 3686 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
3665 | __builtin_return_address(0)); | 3687 | __builtin_return_address(0)); |
3688 | |||
3689 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3690 | obj_size(cachep), cachep->buffer_size, | ||
3691 | flags, nodeid); | ||
3692 | |||
3693 | return ret; | ||
3666 | } | 3694 | } |
3667 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3695 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3668 | 3696 | ||
3697 | #ifdef CONFIG_KMEMTRACE | ||
3698 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
3699 | gfp_t flags, | ||
3700 | int nodeid) | ||
3701 | { | ||
3702 | return __cache_alloc_node(cachep, flags, nodeid, | ||
3703 | __builtin_return_address(0)); | ||
3704 | } | ||
3705 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
3706 | #endif | ||
3707 | |||
3669 | static __always_inline void * | 3708 | static __always_inline void * |
3670 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3709 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3671 | { | 3710 | { |
3672 | struct kmem_cache *cachep; | 3711 | struct kmem_cache *cachep; |
3712 | void *ret; | ||
3673 | 3713 | ||
3674 | cachep = kmem_find_general_cachep(size, flags); | 3714 | cachep = kmem_find_general_cachep(size, flags); |
3675 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3715 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3676 | return cachep; | 3716 | return cachep; |
3677 | return kmem_cache_alloc_node(cachep, flags, node); | 3717 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
3718 | |||
3719 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
3720 | (unsigned long) caller, ret, | ||
3721 | size, cachep->buffer_size, flags, node); | ||
3722 | |||
3723 | return ret; | ||
3678 | } | 3724 | } |
3679 | 3725 | ||
3680 | #ifdef CONFIG_DEBUG_SLAB | 3726 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3681 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3727 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
3682 | { | 3728 | { |
3683 | return __do_kmalloc_node(size, flags, node, | 3729 | return __do_kmalloc_node(size, flags, node, |
@@ -3686,9 +3732,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3686 | EXPORT_SYMBOL(__kmalloc_node); | 3732 | EXPORT_SYMBOL(__kmalloc_node); |
3687 | 3733 | ||
3688 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3734 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
3689 | int node, void *caller) | 3735 | int node, unsigned long caller) |
3690 | { | 3736 | { |
3691 | return __do_kmalloc_node(size, flags, node, caller); | 3737 | return __do_kmalloc_node(size, flags, node, (void *)caller); |
3692 | } | 3738 | } |
3693 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3739 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
3694 | #else | 3740 | #else |
@@ -3710,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3710 | void *caller) | 3756 | void *caller) |
3711 | { | 3757 | { |
3712 | struct kmem_cache *cachep; | 3758 | struct kmem_cache *cachep; |
3759 | void *ret; | ||
3713 | 3760 | ||
3714 | /* If you want to save a few bytes .text space: replace | 3761 | /* If you want to save a few bytes .text space: replace |
3715 | * __ with kmem_. | 3762 | * __ with kmem_. |
@@ -3719,20 +3766,26 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3719 | cachep = __find_general_cachep(size, flags); | 3766 | cachep = __find_general_cachep(size, flags); |
3720 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3767 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3721 | return cachep; | 3768 | return cachep; |
3722 | return __cache_alloc(cachep, flags, caller); | 3769 | ret = __cache_alloc(cachep, flags, caller); |
3770 | |||
3771 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
3772 | (unsigned long) caller, ret, | ||
3773 | size, cachep->buffer_size, flags); | ||
3774 | |||
3775 | return ret; | ||
3723 | } | 3776 | } |
3724 | 3777 | ||
3725 | 3778 | ||
3726 | #ifdef CONFIG_DEBUG_SLAB | 3779 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3727 | void *__kmalloc(size_t size, gfp_t flags) | 3780 | void *__kmalloc(size_t size, gfp_t flags) |
3728 | { | 3781 | { |
3729 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3782 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
3730 | } | 3783 | } |
3731 | EXPORT_SYMBOL(__kmalloc); | 3784 | EXPORT_SYMBOL(__kmalloc); |
3732 | 3785 | ||
3733 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3786 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
3734 | { | 3787 | { |
3735 | return __do_kmalloc(size, flags, caller); | 3788 | return __do_kmalloc(size, flags, (void *)caller); |
3736 | } | 3789 | } |
3737 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3790 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3738 | 3791 | ||
@@ -3762,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3762 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3815 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
3763 | __cache_free(cachep, objp); | 3816 | __cache_free(cachep, objp); |
3764 | local_irq_restore(flags); | 3817 | local_irq_restore(flags); |
3818 | |||
3819 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | ||
3765 | } | 3820 | } |
3766 | EXPORT_SYMBOL(kmem_cache_free); | 3821 | EXPORT_SYMBOL(kmem_cache_free); |
3767 | 3822 | ||
@@ -3788,6 +3843,8 @@ void kfree(const void *objp) | |||
3788 | debug_check_no_obj_freed(objp, obj_size(c)); | 3843 | debug_check_no_obj_freed(objp, obj_size(c)); |
3789 | __cache_free(c, (void *)objp); | 3844 | __cache_free(c, (void *)objp); |
3790 | local_irq_restore(flags); | 3845 | local_irq_restore(flags); |
3846 | |||
3847 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
3791 | } | 3848 | } |
3792 | EXPORT_SYMBOL(kfree); | 3849 | EXPORT_SYMBOL(kfree); |
3793 | 3850 | ||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
68 | #include <linux/kmemtrace.h> | ||
68 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
69 | 70 | ||
70 | /* | 71 | /* |
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
463 | { | 464 | { |
464 | unsigned int *m; | 465 | unsigned int *m; |
465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 466 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
467 | void *ret; | ||
466 | 468 | ||
467 | if (size < PAGE_SIZE - align) { | 469 | if (size < PAGE_SIZE - align) { |
468 | if (!size) | 470 | if (!size) |
469 | return ZERO_SIZE_PTR; | 471 | return ZERO_SIZE_PTR; |
470 | 472 | ||
471 | m = slob_alloc(size + align, gfp, align, node); | 473 | m = slob_alloc(size + align, gfp, align, node); |
474 | |||
472 | if (!m) | 475 | if (!m) |
473 | return NULL; | 476 | return NULL; |
474 | *m = size; | 477 | *m = size; |
475 | return (void *)m + align; | 478 | ret = (void *)m + align; |
479 | |||
480 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
481 | _RET_IP_, ret, | ||
482 | size, size + align, gfp, node); | ||
476 | } else { | 483 | } else { |
477 | void *ret; | 484 | unsigned int order = get_order(size); |
478 | 485 | ||
479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 486 | ret = slob_new_page(gfp | __GFP_COMP, order, node); |
480 | if (ret) { | 487 | if (ret) { |
481 | struct page *page; | 488 | struct page *page; |
482 | page = virt_to_page(ret); | 489 | page = virt_to_page(ret); |
483 | page->private = size; | 490 | page->private = size; |
484 | } | 491 | } |
485 | return ret; | 492 | |
493 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
494 | _RET_IP_, ret, | ||
495 | size, PAGE_SIZE << order, gfp, node); | ||
486 | } | 496 | } |
497 | |||
498 | return ret; | ||
487 | } | 499 | } |
488 | EXPORT_SYMBOL(__kmalloc_node); | 500 | EXPORT_SYMBOL(__kmalloc_node); |
489 | 501 | ||
@@ -501,6 +513,8 @@ void kfree(const void *block) | |||
501 | slob_free(m, *m + align); | 513 | slob_free(m, *m + align); |
502 | } else | 514 | } else |
503 | put_page(&sp->page); | 515 | put_page(&sp->page); |
516 | |||
517 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); | ||
504 | } | 518 | } |
505 | EXPORT_SYMBOL(kfree); | 519 | EXPORT_SYMBOL(kfree); |
506 | 520 | ||
@@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
569 | { | 583 | { |
570 | void *b; | 584 | void *b; |
571 | 585 | ||
572 | if (c->size < PAGE_SIZE) | 586 | if (c->size < PAGE_SIZE) { |
573 | b = slob_alloc(c->size, flags, c->align, node); | 587 | b = slob_alloc(c->size, flags, c->align, node); |
574 | else | 588 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, |
589 | _RET_IP_, b, c->size, | ||
590 | SLOB_UNITS(c->size) * SLOB_UNIT, | ||
591 | flags, node); | ||
592 | } else { | ||
575 | b = slob_new_page(flags, get_order(c->size), node); | 593 | b = slob_new_page(flags, get_order(c->size), node); |
594 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | ||
595 | _RET_IP_, b, c->size, | ||
596 | PAGE_SIZE << get_order(c->size), | ||
597 | flags, node); | ||
598 | } | ||
576 | 599 | ||
577 | if (c->ctor) | 600 | if (c->ctor) |
578 | c->ctor(b); | 601 | c->ctor(b); |
@@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
608 | } else { | 631 | } else { |
609 | __kmem_cache_free(b, c->size); | 632 | __kmem_cache_free(b, c->size); |
610 | } | 633 | } |
634 | |||
635 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); | ||
611 | } | 636 | } |
612 | EXPORT_SYMBOL(kmem_cache_free); | 637 | EXPORT_SYMBOL(kmem_cache_free); |
613 | 638 | ||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include <linux/memory.h> | 25 | #include <linux/memory.h> |
26 | #include <linux/math64.h> | 26 | #include <linux/math64.h> |
27 | #include <linux/kmemtrace.h> | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Lock order: | 30 | * Lock order: |
@@ -178,7 +179,7 @@ static LIST_HEAD(slab_caches); | |||
178 | * Tracking user of a slab. | 179 | * Tracking user of a slab. |
179 | */ | 180 | */ |
180 | struct track { | 181 | struct track { |
181 | void *addr; /* Called from address */ | 182 | unsigned long addr; /* Called from address */ |
182 | int cpu; /* Was running on cpu */ | 183 | int cpu; /* Was running on cpu */ |
183 | int pid; /* Pid context */ | 184 | int pid; /* Pid context */ |
184 | unsigned long when; /* When did the operation occur */ | 185 | unsigned long when; /* When did the operation occur */ |
@@ -367,7 +368,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
367 | } | 368 | } |
368 | 369 | ||
369 | static void set_track(struct kmem_cache *s, void *object, | 370 | static void set_track(struct kmem_cache *s, void *object, |
370 | enum track_item alloc, void *addr) | 371 | enum track_item alloc, unsigned long addr) |
371 | { | 372 | { |
372 | struct track *p; | 373 | struct track *p; |
373 | 374 | ||
@@ -391,8 +392,8 @@ static void init_tracking(struct kmem_cache *s, void *object) | |||
391 | if (!(s->flags & SLAB_STORE_USER)) | 392 | if (!(s->flags & SLAB_STORE_USER)) |
392 | return; | 393 | return; |
393 | 394 | ||
394 | set_track(s, object, TRACK_FREE, NULL); | 395 | set_track(s, object, TRACK_FREE, 0UL); |
395 | set_track(s, object, TRACK_ALLOC, NULL); | 396 | set_track(s, object, TRACK_ALLOC, 0UL); |
396 | } | 397 | } |
397 | 398 | ||
398 | static void print_track(const char *s, struct track *t) | 399 | static void print_track(const char *s, struct track *t) |
@@ -401,7 +402,7 @@ static void print_track(const char *s, struct track *t) | |||
401 | return; | 402 | return; |
402 | 403 | ||
403 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", | 404 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
404 | s, t->addr, jiffies - t->when, t->cpu, t->pid); | 405 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
405 | } | 406 | } |
406 | 407 | ||
407 | static void print_tracking(struct kmem_cache *s, void *object) | 408 | static void print_tracking(struct kmem_cache *s, void *object) |
@@ -866,7 +867,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
866 | } | 867 | } |
867 | 868 | ||
868 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 869 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
869 | void *object, void *addr) | 870 | void *object, unsigned long addr) |
870 | { | 871 | { |
871 | if (!check_slab(s, page)) | 872 | if (!check_slab(s, page)) |
872 | goto bad; | 873 | goto bad; |
@@ -906,7 +907,7 @@ bad: | |||
906 | } | 907 | } |
907 | 908 | ||
908 | static int free_debug_processing(struct kmem_cache *s, struct page *page, | 909 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
909 | void *object, void *addr) | 910 | void *object, unsigned long addr) |
910 | { | 911 | { |
911 | if (!check_slab(s, page)) | 912 | if (!check_slab(s, page)) |
912 | goto fail; | 913 | goto fail; |
@@ -1029,10 +1030,10 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
1029 | struct page *page, void *object) {} | 1030 | struct page *page, void *object) {} |
1030 | 1031 | ||
1031 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1032 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1032 | struct page *page, void *object, void *addr) { return 0; } | 1033 | struct page *page, void *object, unsigned long addr) { return 0; } |
1033 | 1034 | ||
1034 | static inline int free_debug_processing(struct kmem_cache *s, | 1035 | static inline int free_debug_processing(struct kmem_cache *s, |
1035 | struct page *page, void *object, void *addr) { return 0; } | 1036 | struct page *page, void *object, unsigned long addr) { return 0; } |
1036 | 1037 | ||
1037 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1038 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1038 | { return 1; } | 1039 | { return 1; } |
@@ -1499,8 +1500,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1499 | * we need to allocate a new slab. This is the slowest path since it involves | 1500 | * we need to allocate a new slab. This is the slowest path since it involves |
1500 | * a call to the page allocator and the setup of a new slab. | 1501 | * a call to the page allocator and the setup of a new slab. |
1501 | */ | 1502 | */ |
1502 | static void *__slab_alloc(struct kmem_cache *s, | 1503 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
1503 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) | 1504 | unsigned long addr, struct kmem_cache_cpu *c) |
1504 | { | 1505 | { |
1505 | void **object; | 1506 | void **object; |
1506 | struct page *new; | 1507 | struct page *new; |
@@ -1584,7 +1585,7 @@ debug: | |||
1584 | * Otherwise we can simply pick the next object from the lockless free list. | 1585 | * Otherwise we can simply pick the next object from the lockless free list. |
1585 | */ | 1586 | */ |
1586 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 1587 | static __always_inline void *slab_alloc(struct kmem_cache *s, |
1587 | gfp_t gfpflags, int node, void *addr) | 1588 | gfp_t gfpflags, int node, unsigned long addr) |
1588 | { | 1589 | { |
1589 | void **object; | 1590 | void **object; |
1590 | struct kmem_cache_cpu *c; | 1591 | struct kmem_cache_cpu *c; |
@@ -1613,18 +1614,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1613 | 1614 | ||
1614 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1615 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1615 | { | 1616 | { |
1616 | return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); | 1617 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1618 | |||
1619 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1620 | s->objsize, s->size, gfpflags); | ||
1621 | |||
1622 | return ret; | ||
1617 | } | 1623 | } |
1618 | EXPORT_SYMBOL(kmem_cache_alloc); | 1624 | EXPORT_SYMBOL(kmem_cache_alloc); |
1619 | 1625 | ||
1626 | #ifdef CONFIG_KMEMTRACE | ||
1627 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1628 | { | ||
1629 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1630 | } | ||
1631 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1632 | #endif | ||
1633 | |||
1620 | #ifdef CONFIG_NUMA | 1634 | #ifdef CONFIG_NUMA |
1621 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1635 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1622 | { | 1636 | { |
1623 | return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); | 1637 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1638 | |||
1639 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1640 | s->objsize, s->size, gfpflags, node); | ||
1641 | |||
1642 | return ret; | ||
1624 | } | 1643 | } |
1625 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1644 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1626 | #endif | 1645 | #endif |
1627 | 1646 | ||
1647 | #ifdef CONFIG_KMEMTRACE | ||
1648 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1649 | gfp_t gfpflags, | ||
1650 | int node) | ||
1651 | { | ||
1652 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1653 | } | ||
1654 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1655 | #endif | ||
1656 | |||
1628 | /* | 1657 | /* |
1629 | * Slow patch handling. This may still be called frequently since objects | 1658 | * Slow patch handling. This may still be called frequently since objects |
1630 | * have a longer lifetime than the cpu slabs in most processing loads. | 1659 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1634,7 +1663,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); | |||
1634 | * handling required then we can return immediately. | 1663 | * handling required then we can return immediately. |
1635 | */ | 1664 | */ |
1636 | static void __slab_free(struct kmem_cache *s, struct page *page, | 1665 | static void __slab_free(struct kmem_cache *s, struct page *page, |
1637 | void *x, void *addr, unsigned int offset) | 1666 | void *x, unsigned long addr, unsigned int offset) |
1638 | { | 1667 | { |
1639 | void *prior; | 1668 | void *prior; |
1640 | void **object = (void *)x; | 1669 | void **object = (void *)x; |
@@ -1704,7 +1733,7 @@ debug: | |||
1704 | * with all sorts of special processing. | 1733 | * with all sorts of special processing. |
1705 | */ | 1734 | */ |
1706 | static __always_inline void slab_free(struct kmem_cache *s, | 1735 | static __always_inline void slab_free(struct kmem_cache *s, |
1707 | struct page *page, void *x, void *addr) | 1736 | struct page *page, void *x, unsigned long addr) |
1708 | { | 1737 | { |
1709 | void **object = (void *)x; | 1738 | void **object = (void *)x; |
1710 | struct kmem_cache_cpu *c; | 1739 | struct kmem_cache_cpu *c; |
@@ -1731,7 +1760,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1731 | 1760 | ||
1732 | page = virt_to_head_page(x); | 1761 | page = virt_to_head_page(x); |
1733 | 1762 | ||
1734 | slab_free(s, page, x, __builtin_return_address(0)); | 1763 | slab_free(s, page, x, _RET_IP_); |
1764 | |||
1765 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1735 | } | 1766 | } |
1736 | EXPORT_SYMBOL(kmem_cache_free); | 1767 | EXPORT_SYMBOL(kmem_cache_free); |
1737 | 1768 | ||
@@ -2650,6 +2681,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2650 | void *__kmalloc(size_t size, gfp_t flags) | 2681 | void *__kmalloc(size_t size, gfp_t flags) |
2651 | { | 2682 | { |
2652 | struct kmem_cache *s; | 2683 | struct kmem_cache *s; |
2684 | void *ret; | ||
2653 | 2685 | ||
2654 | if (unlikely(size > PAGE_SIZE)) | 2686 | if (unlikely(size > PAGE_SIZE)) |
2655 | return kmalloc_large(size, flags); | 2687 | return kmalloc_large(size, flags); |
@@ -2659,7 +2691,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2659 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2691 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2660 | return s; | 2692 | return s; |
2661 | 2693 | ||
2662 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2694 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2695 | |||
2696 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2697 | size, s->size, flags); | ||
2698 | |||
2699 | return ret; | ||
2663 | } | 2700 | } |
2664 | EXPORT_SYMBOL(__kmalloc); | 2701 | EXPORT_SYMBOL(__kmalloc); |
2665 | 2702 | ||
@@ -2678,16 +2715,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2678 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2715 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2679 | { | 2716 | { |
2680 | struct kmem_cache *s; | 2717 | struct kmem_cache *s; |
2718 | void *ret; | ||
2681 | 2719 | ||
2682 | if (unlikely(size > PAGE_SIZE)) | 2720 | if (unlikely(size > PAGE_SIZE)) { |
2683 | return kmalloc_large_node(size, flags, node); | 2721 | ret = kmalloc_large_node(size, flags, node); |
2722 | |||
2723 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
2724 | _RET_IP_, ret, | ||
2725 | size, PAGE_SIZE << get_order(size), | ||
2726 | flags, node); | ||
2727 | |||
2728 | return ret; | ||
2729 | } | ||
2684 | 2730 | ||
2685 | s = get_slab(size, flags); | 2731 | s = get_slab(size, flags); |
2686 | 2732 | ||
2687 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2733 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2688 | return s; | 2734 | return s; |
2689 | 2735 | ||
2690 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2736 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2737 | |||
2738 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2739 | size, s->size, flags, node); | ||
2740 | |||
2741 | return ret; | ||
2691 | } | 2742 | } |
2692 | EXPORT_SYMBOL(__kmalloc_node); | 2743 | EXPORT_SYMBOL(__kmalloc_node); |
2693 | #endif | 2744 | #endif |
@@ -2744,7 +2795,9 @@ void kfree(const void *x) | |||
2744 | put_page(page); | 2795 | put_page(page); |
2745 | return; | 2796 | return; |
2746 | } | 2797 | } |
2747 | slab_free(page->slab, page, object, __builtin_return_address(0)); | 2798 | slab_free(page->slab, page, object, _RET_IP_); |
2799 | |||
2800 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2748 | } | 2801 | } |
2749 | EXPORT_SYMBOL(kfree); | 2802 | EXPORT_SYMBOL(kfree); |
2750 | 2803 | ||
@@ -3202,9 +3255,10 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3202 | 3255 | ||
3203 | #endif | 3256 | #endif |
3204 | 3257 | ||
3205 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 3258 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3206 | { | 3259 | { |
3207 | struct kmem_cache *s; | 3260 | struct kmem_cache *s; |
3261 | void *ret; | ||
3208 | 3262 | ||
3209 | if (unlikely(size > PAGE_SIZE)) | 3263 | if (unlikely(size > PAGE_SIZE)) |
3210 | return kmalloc_large(size, gfpflags); | 3264 | return kmalloc_large(size, gfpflags); |
@@ -3214,13 +3268,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
3214 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3268 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3215 | return s; | 3269 | return s; |
3216 | 3270 | ||
3217 | return slab_alloc(s, gfpflags, -1, caller); | 3271 | ret = slab_alloc(s, gfpflags, -1, caller); |
3272 | |||
3273 | /* Honor the call site pointer we recieved. */ | ||
3274 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
3275 | s->size, gfpflags); | ||
3276 | |||
3277 | return ret; | ||
3218 | } | 3278 | } |
3219 | 3279 | ||
3220 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3280 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3221 | int node, void *caller) | 3281 | int node, unsigned long caller) |
3222 | { | 3282 | { |
3223 | struct kmem_cache *s; | 3283 | struct kmem_cache *s; |
3284 | void *ret; | ||
3224 | 3285 | ||
3225 | if (unlikely(size > PAGE_SIZE)) | 3286 | if (unlikely(size > PAGE_SIZE)) |
3226 | return kmalloc_large_node(size, gfpflags, node); | 3287 | return kmalloc_large_node(size, gfpflags, node); |
@@ -3230,7 +3291,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3230 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3291 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3231 | return s; | 3292 | return s; |
3232 | 3293 | ||
3233 | return slab_alloc(s, gfpflags, node, caller); | 3294 | ret = slab_alloc(s, gfpflags, node, caller); |
3295 | |||
3296 | /* Honor the call site pointer we recieved. */ | ||
3297 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
3298 | size, s->size, gfpflags, node); | ||
3299 | |||
3300 | return ret; | ||
3234 | } | 3301 | } |
3235 | 3302 | ||
3236 | #ifdef CONFIG_SLUB_DEBUG | 3303 | #ifdef CONFIG_SLUB_DEBUG |
@@ -3429,7 +3496,7 @@ static void resiliency_test(void) {}; | |||
3429 | 3496 | ||
3430 | struct location { | 3497 | struct location { |
3431 | unsigned long count; | 3498 | unsigned long count; |
3432 | void *addr; | 3499 | unsigned long addr; |
3433 | long long sum_time; | 3500 | long long sum_time; |
3434 | long min_time; | 3501 | long min_time; |
3435 | long max_time; | 3502 | long max_time; |
@@ -3477,7 +3544,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3477 | { | 3544 | { |
3478 | long start, end, pos; | 3545 | long start, end, pos; |
3479 | struct location *l; | 3546 | struct location *l; |
3480 | void *caddr; | 3547 | unsigned long caddr; |
3481 | unsigned long age = jiffies - track->when; | 3548 | unsigned long age = jiffies - track->when; |
3482 | 3549 | ||
3483 | start = -1; | 3550 | start = -1; |