diff options
-rw-r--r-- | Documentation/kmemleak.txt | 31 | ||||
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/kmemcheck/kmemcheck.c | 14 | ||||
-rw-r--r-- | include/linux/kmemcheck.h | 7 | ||||
-rw-r--r-- | include/linux/kmemleak.h | 18 | ||||
-rw-r--r-- | mm/bootmem.c | 6 | ||||
-rw-r--r-- | mm/kmemleak.c | 336 |
8 files changed, 323 insertions, 101 deletions
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt index 89068030b01b..34f6638aa5ac 100644 --- a/Documentation/kmemleak.txt +++ b/Documentation/kmemleak.txt | |||
@@ -27,6 +27,13 @@ To trigger an intermediate memory scan: | |||
27 | 27 | ||
28 | # echo scan > /sys/kernel/debug/kmemleak | 28 | # echo scan > /sys/kernel/debug/kmemleak |
29 | 29 | ||
30 | To clear the list of all current possible memory leaks: | ||
31 | |||
32 | # echo clear > /sys/kernel/debug/kmemleak | ||
33 | |||
34 | New leaks will then come up upon reading /sys/kernel/debug/kmemleak | ||
35 | again. | ||
36 | |||
30 | Note that the orphan objects are listed in the order they were allocated | 37 | Note that the orphan objects are listed in the order they were allocated |
31 | and one object at the beginning of the list may cause other subsequent | 38 | and one object at the beginning of the list may cause other subsequent |
32 | objects to be reported as orphan. | 39 | objects to be reported as orphan. |
@@ -42,6 +49,9 @@ Memory scanning parameters can be modified at run-time by writing to the | |||
42 | scan=<secs> - set the automatic memory scanning period in seconds | 49 | scan=<secs> - set the automatic memory scanning period in seconds |
43 | (default 600, 0 to stop the automatic scanning) | 50 | (default 600, 0 to stop the automatic scanning) |
44 | scan - trigger a memory scan | 51 | scan - trigger a memory scan |
52 | clear - clear list of current memory leak suspects, done by | ||
53 | marking all current reported unreferenced objects grey | ||
54 | dump=<addr> - dump information about the object found at <addr> | ||
45 | 55 | ||
46 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on | 56 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on |
47 | the kernel command line. | 57 | the kernel command line. |
@@ -86,6 +96,27 @@ avoid this, kmemleak can also store the number of values pointing to an | |||
86 | address inside the block address range that need to be found so that the | 96 | address inside the block address range that need to be found so that the |
87 | block is not considered a leak. One example is __vmalloc(). | 97 | block is not considered a leak. One example is __vmalloc(). |
88 | 98 | ||
99 | Testing specific sections with kmemleak | ||
100 | --------------------------------------- | ||
101 | |||
102 | Upon initial bootup your /sys/kernel/debug/kmemleak output page may be | ||
103 | quite extensive. This can also be the case if you have very buggy code | ||
104 | when doing development. To work around these situations you can use the | ||
105 | 'clear' command to clear all reported unreferenced objects from the | ||
106 | /sys/kernel/debug/kmemleak output. By issuing a 'scan' after a 'clear' | ||
107 | you can find new unreferenced objects; this should help with testing | ||
108 | specific sections of code. | ||
109 | |||
110 | To test a critical section on demand with a clean kmemleak do: | ||
111 | |||
112 | # echo clear > /sys/kernel/debug/kmemleak | ||
113 | ... test your kernel or modules ... | ||
114 | # echo scan > /sys/kernel/debug/kmemleak | ||
115 | |||
116 | Then as usual to get your report with: | ||
117 | |||
118 | # cat /sys/kernel/debug/kmemleak | ||
119 | |||
89 | Kmemleak API | 120 | Kmemleak API |
90 | ------------ | 121 | ------------ |
91 | 122 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 676debfc1702..128111d8ffe0 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
23 | #include <asm/e820.h> | 24 | #include <asm/e820.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) | |||
94 | * code for safe | 95 | * code for safe |
95 | */ | 96 | */ |
96 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 97 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(p); | ||
97 | if (!p || __pa(p)+aper_size > 0xffffffff) { | 103 | if (!p || __pa(p)+aper_size > 0xffffffff) { |
98 | printk(KERN_ERR | 104 | printk(KERN_ERR |
99 | "Cannot allocate aperture memory hole (%p,%uK)\n", | 105 | "Cannot allocate aperture memory hole (%p,%uK)\n", |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1a041bcf506b..fa80f60e9607 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
4 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/kmemleak.h> | ||
6 | 7 | ||
7 | #include <asm/proto.h> | 8 | #include <asm/proto.h> |
8 | #include <asm/dma.h> | 9 | #include <asm/dma.h> |
@@ -88,6 +89,11 @@ void __init dma32_reserve_bootmem(void) | |||
88 | size = roundup(dma32_bootmem_size, align); | 89 | size = roundup(dma32_bootmem_size, align); |
89 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 90 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
90 | 512ULL<<20); | 91 | 512ULL<<20); |
92 | /* | ||
93 | * Kmemleak should not scan this block as it may not be mapped via the | ||
94 | * kernel direct mapping. | ||
95 | */ | ||
96 | kmemleak_ignore(dma32_bootmem_ptr); | ||
91 | if (dma32_bootmem_ptr) | 97 | if (dma32_bootmem_ptr) |
92 | dma32_bootmem_size = size; | 98 | dma32_bootmem_size = size; |
93 | else | 99 | else |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c55ed098654..528bf954eb74 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs, | |||
331 | kmemcheck_shadow_set(shadow, size); | 331 | kmemcheck_shadow_set(shadow, size); |
332 | } | 332 | } |
333 | 333 | ||
334 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
335 | { | ||
336 | enum kmemcheck_shadow status; | ||
337 | void *shadow; | ||
338 | |||
339 | shadow = kmemcheck_shadow_lookup(addr); | ||
340 | if (!shadow) | ||
341 | return true; | ||
342 | |||
343 | status = kmemcheck_shadow_test(shadow, size); | ||
344 | |||
345 | return status == KMEMCHECK_SHADOW_INITIALIZED; | ||
346 | } | ||
347 | |||
334 | /* Access may cross page boundary */ | 348 | /* Access may cross page boundary */ |
335 | static void kmemcheck_read(struct pt_regs *regs, | 349 | static void kmemcheck_read(struct pt_regs *regs, |
336 | unsigned long addr, unsigned int size) | 350 | unsigned long addr, unsigned int size) |
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 47b39b7c7e84..dc2fd545db00 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h | |||
@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); | |||
34 | int kmemcheck_show_addr(unsigned long address); | 34 | int kmemcheck_show_addr(unsigned long address); |
35 | int kmemcheck_hide_addr(unsigned long address); | 35 | int kmemcheck_hide_addr(unsigned long address); |
36 | 36 | ||
37 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); | ||
38 | |||
37 | #else | 39 | #else |
38 | #define kmemcheck_enabled 0 | 40 | #define kmemcheck_enabled 0 |
39 | 41 | ||
@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p, | |||
99 | { | 101 | { |
100 | } | 102 | } |
101 | 103 | ||
104 | static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
105 | { | ||
106 | return true; | ||
107 | } | ||
108 | |||
102 | #endif /* CONFIG_KMEMCHECK */ | 109 | #endif /* CONFIG_KMEMCHECK */ |
103 | 110 | ||
104 | /* | 111 | /* |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 6a63807f714e..3c7497d46ee9 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
@@ -23,18 +23,18 @@ | |||
23 | 23 | ||
24 | #ifdef CONFIG_DEBUG_KMEMLEAK | 24 | #ifdef CONFIG_DEBUG_KMEMLEAK |
25 | 25 | ||
26 | extern void kmemleak_init(void); | 26 | extern void kmemleak_init(void) __ref; |
27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
28 | gfp_t gfp); | 28 | gfp_t gfp) __ref; |
29 | extern void kmemleak_free(const void *ptr); | 29 | extern void kmemleak_free(const void *ptr) __ref; |
30 | extern void kmemleak_free_part(const void *ptr, size_t size); | 30 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; |
31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | 31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, |
32 | size_t size); | 32 | size_t size) __ref; |
33 | extern void kmemleak_not_leak(const void *ptr); | 33 | extern void kmemleak_not_leak(const void *ptr) __ref; |
34 | extern void kmemleak_ignore(const void *ptr); | 34 | extern void kmemleak_ignore(const void *ptr) __ref; |
35 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | 35 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, |
36 | size_t length, gfp_t gfp); | 36 | size_t length, gfp_t gfp) __ref; |
37 | extern void kmemleak_no_scan(const void *ptr); | 37 | extern void kmemleak_no_scan(const void *ptr) __ref; |
38 | 38 | ||
39 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | 39 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, |
40 | int min_count, unsigned long flags, | 40 | int min_count, unsigned long flags, |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 701740c9e81b..555d5d2731c6 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -521,7 +521,11 @@ find_block: | |||
521 | region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + | 521 | region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + |
522 | start_off); | 522 | start_off); |
523 | memset(region, 0, size); | 523 | memset(region, 0, size); |
524 | kmemleak_alloc(region, size, 1, 0); | 524 | /* |
525 | * The min_count is set to 0 so that bootmem allocated blocks | ||
526 | * are never reported as leaks. | ||
527 | */ | ||
528 | kmemleak_alloc(region, size, 0, 0); | ||
525 | return region; | 529 | return region; |
526 | } | 530 | } |
527 | 531 | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 487267310a84..4ea4510e2996 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -92,11 +92,13 @@ | |||
92 | #include <linux/string.h> | 92 | #include <linux/string.h> |
93 | #include <linux/nodemask.h> | 93 | #include <linux/nodemask.h> |
94 | #include <linux/mm.h> | 94 | #include <linux/mm.h> |
95 | #include <linux/workqueue.h> | ||
95 | 96 | ||
96 | #include <asm/sections.h> | 97 | #include <asm/sections.h> |
97 | #include <asm/processor.h> | 98 | #include <asm/processor.h> |
98 | #include <asm/atomic.h> | 99 | #include <asm/atomic.h> |
99 | 100 | ||
101 | #include <linux/kmemcheck.h> | ||
100 | #include <linux/kmemleak.h> | 102 | #include <linux/kmemleak.h> |
101 | 103 | ||
102 | /* | 104 | /* |
@@ -107,6 +109,7 @@ | |||
107 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ | 109 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
108 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | 110 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
109 | #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ | 111 | #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ |
112 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ | ||
110 | 113 | ||
111 | #define BYTES_PER_POINTER sizeof(void *) | 114 | #define BYTES_PER_POINTER sizeof(void *) |
112 | 115 | ||
@@ -120,6 +123,9 @@ struct kmemleak_scan_area { | |||
120 | size_t length; | 123 | size_t length; |
121 | }; | 124 | }; |
122 | 125 | ||
126 | #define KMEMLEAK_GREY 0 | ||
127 | #define KMEMLEAK_BLACK -1 | ||
128 | |||
123 | /* | 129 | /* |
124 | * Structure holding the metadata for each allocated memory block. | 130 | * Structure holding the metadata for each allocated memory block. |
125 | * Modifications to such objects should be made while holding the | 131 | * Modifications to such objects should be made while holding the |
@@ -161,6 +167,15 @@ struct kmemleak_object { | |||
161 | /* flag set on newly allocated objects */ | 167 | /* flag set on newly allocated objects */ |
162 | #define OBJECT_NEW (1 << 3) | 168 | #define OBJECT_NEW (1 << 3) |
163 | 169 | ||
170 | /* number of bytes to print per line; must be 16 or 32 */ | ||
171 | #define HEX_ROW_SIZE 16 | ||
172 | /* number of bytes to print at a time (1, 2, 4, 8) */ | ||
173 | #define HEX_GROUP_SIZE 1 | ||
174 | /* include ASCII after the hex output */ | ||
175 | #define HEX_ASCII 1 | ||
176 | /* max number of lines to be printed */ | ||
177 | #define HEX_MAX_LINES 2 | ||
178 | |||
164 | /* the list of all allocated objects */ | 179 | /* the list of all allocated objects */ |
165 | static LIST_HEAD(object_list); | 180 | static LIST_HEAD(object_list); |
166 | /* the list of gray-colored objects (see color_gray comment below) */ | 181 | /* the list of gray-colored objects (see color_gray comment below) */ |
@@ -228,11 +243,14 @@ struct early_log { | |||
228 | int min_count; /* minimum reference count */ | 243 | int min_count; /* minimum reference count */ |
229 | unsigned long offset; /* scan area offset */ | 244 | unsigned long offset; /* scan area offset */ |
230 | size_t length; /* scan area length */ | 245 | size_t length; /* scan area length */ |
246 | unsigned long trace[MAX_TRACE]; /* stack trace */ | ||
247 | unsigned int trace_len; /* stack trace length */ | ||
231 | }; | 248 | }; |
232 | 249 | ||
233 | /* early logging buffer and current position */ | 250 | /* early logging buffer and current position */ |
234 | static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; | 251 | static struct early_log |
235 | static int crt_early_log; | 252 | early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; |
253 | static int crt_early_log __initdata; | ||
236 | 254 | ||
237 | static void kmemleak_disable(void); | 255 | static void kmemleak_disable(void); |
238 | 256 | ||
@@ -255,6 +273,35 @@ static void kmemleak_disable(void); | |||
255 | } while (0) | 273 | } while (0) |
256 | 274 | ||
257 | /* | 275 | /* |
276 | * Printing of the objects hex dump to the seq file. The number of lines to be | ||
277 | * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The | ||
278 | * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called | ||
279 | * with the object->lock held. | ||
280 | */ | ||
281 | static void hex_dump_object(struct seq_file *seq, | ||
282 | struct kmemleak_object *object) | ||
283 | { | ||
284 | const u8 *ptr = (const u8 *)object->pointer; | ||
285 | int i, len, remaining; | ||
286 | unsigned char linebuf[HEX_ROW_SIZE * 5]; | ||
287 | |||
288 | /* limit the number of lines to HEX_MAX_LINES */ | ||
289 | remaining = len = | ||
290 | min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE)); | ||
291 | |||
292 | seq_printf(seq, " hex dump (first %d bytes):\n", len); | ||
293 | for (i = 0; i < len; i += HEX_ROW_SIZE) { | ||
294 | int linelen = min(remaining, HEX_ROW_SIZE); | ||
295 | |||
296 | remaining -= HEX_ROW_SIZE; | ||
297 | hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE, | ||
298 | HEX_GROUP_SIZE, linebuf, sizeof(linebuf), | ||
299 | HEX_ASCII); | ||
300 | seq_printf(seq, " %s\n", linebuf); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | /* | ||
258 | * Object colors, encoded with count and min_count: | 305 | * Object colors, encoded with count and min_count: |
259 | * - white - orphan object, not enough references to it (count < min_count) | 306 | * - white - orphan object, not enough references to it (count < min_count) |
260 | * - gray - not orphan, not marked as false positive (min_count == 0) or | 307 | * - gray - not orphan, not marked as false positive (min_count == 0) or |
@@ -264,19 +311,21 @@ static void kmemleak_disable(void); | |||
264 | * Newly created objects don't have any color assigned (object->count == -1) | 311 | * Newly created objects don't have any color assigned (object->count == -1) |
265 | * before the next memory scan when they become white. | 312 | * before the next memory scan when they become white. |
266 | */ | 313 | */ |
267 | static int color_white(const struct kmemleak_object *object) | 314 | static bool color_white(const struct kmemleak_object *object) |
268 | { | 315 | { |
269 | return object->count != -1 && object->count < object->min_count; | 316 | return object->count != KMEMLEAK_BLACK && |
317 | object->count < object->min_count; | ||
270 | } | 318 | } |
271 | 319 | ||
272 | static int color_gray(const struct kmemleak_object *object) | 320 | static bool color_gray(const struct kmemleak_object *object) |
273 | { | 321 | { |
274 | return object->min_count != -1 && object->count >= object->min_count; | 322 | return object->min_count != KMEMLEAK_BLACK && |
323 | object->count >= object->min_count; | ||
275 | } | 324 | } |
276 | 325 | ||
277 | static int color_black(const struct kmemleak_object *object) | 326 | static bool color_black(const struct kmemleak_object *object) |
278 | { | 327 | { |
279 | return object->min_count == -1; | 328 | return object->min_count == KMEMLEAK_BLACK; |
280 | } | 329 | } |
281 | 330 | ||
282 | /* | 331 | /* |
@@ -284,7 +333,7 @@ static int color_black(const struct kmemleak_object *object) | |||
284 | * not be deleted and have a minimum age to avoid false positives caused by | 333 | * not be deleted and have a minimum age to avoid false positives caused by |
285 | * pointers temporarily stored in CPU registers. | 334 | * pointers temporarily stored in CPU registers. |
286 | */ | 335 | */ |
287 | static int unreferenced_object(struct kmemleak_object *object) | 336 | static bool unreferenced_object(struct kmemleak_object *object) |
288 | { | 337 | { |
289 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && | 338 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && |
290 | time_before_eq(object->jiffies + jiffies_min_age, | 339 | time_before_eq(object->jiffies + jiffies_min_age, |
@@ -304,6 +353,7 @@ static void print_unreferenced(struct seq_file *seq, | |||
304 | object->pointer, object->size); | 353 | object->pointer, object->size); |
305 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", | 354 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", |
306 | object->comm, object->pid, object->jiffies); | 355 | object->comm, object->pid, object->jiffies); |
356 | hex_dump_object(seq, object); | ||
307 | seq_printf(seq, " backtrace:\n"); | 357 | seq_printf(seq, " backtrace:\n"); |
308 | 358 | ||
309 | for (i = 0; i < object->trace_len; i++) { | 359 | for (i = 0; i < object->trace_len; i++) { |
@@ -330,6 +380,7 @@ static void dump_object_info(struct kmemleak_object *object) | |||
330 | object->comm, object->pid, object->jiffies); | 380 | object->comm, object->pid, object->jiffies); |
331 | pr_notice(" min_count = %d\n", object->min_count); | 381 | pr_notice(" min_count = %d\n", object->min_count); |
332 | pr_notice(" count = %d\n", object->count); | 382 | pr_notice(" count = %d\n", object->count); |
383 | pr_notice(" flags = 0x%lx\n", object->flags); | ||
333 | pr_notice(" backtrace:\n"); | 384 | pr_notice(" backtrace:\n"); |
334 | print_stack_trace(&trace, 4); | 385 | print_stack_trace(&trace, 4); |
335 | } | 386 | } |
@@ -434,21 +485,36 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) | |||
434 | } | 485 | } |
435 | 486 | ||
436 | /* | 487 | /* |
488 | * Save stack trace to the given array of MAX_TRACE size. | ||
489 | */ | ||
490 | static int __save_stack_trace(unsigned long *trace) | ||
491 | { | ||
492 | struct stack_trace stack_trace; | ||
493 | |||
494 | stack_trace.max_entries = MAX_TRACE; | ||
495 | stack_trace.nr_entries = 0; | ||
496 | stack_trace.entries = trace; | ||
497 | stack_trace.skip = 2; | ||
498 | save_stack_trace(&stack_trace); | ||
499 | |||
500 | return stack_trace.nr_entries; | ||
501 | } | ||
502 | |||
503 | /* | ||
437 | * Create the metadata (struct kmemleak_object) corresponding to an allocated | 504 | * Create the metadata (struct kmemleak_object) corresponding to an allocated |
438 | * memory block and add it to the object_list and object_tree_root. | 505 | * memory block and add it to the object_list and object_tree_root. |
439 | */ | 506 | */ |
440 | static void create_object(unsigned long ptr, size_t size, int min_count, | 507 | static struct kmemleak_object *create_object(unsigned long ptr, size_t size, |
441 | gfp_t gfp) | 508 | int min_count, gfp_t gfp) |
442 | { | 509 | { |
443 | unsigned long flags; | 510 | unsigned long flags; |
444 | struct kmemleak_object *object; | 511 | struct kmemleak_object *object; |
445 | struct prio_tree_node *node; | 512 | struct prio_tree_node *node; |
446 | struct stack_trace trace; | ||
447 | 513 | ||
448 | object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); | 514 | object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); |
449 | if (!object) { | 515 | if (!object) { |
450 | kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); | 516 | kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); |
451 | return; | 517 | return NULL; |
452 | } | 518 | } |
453 | 519 | ||
454 | INIT_LIST_HEAD(&object->object_list); | 520 | INIT_LIST_HEAD(&object->object_list); |
@@ -482,18 +548,14 @@ static void create_object(unsigned long ptr, size_t size, int min_count, | |||
482 | } | 548 | } |
483 | 549 | ||
484 | /* kernel backtrace */ | 550 | /* kernel backtrace */ |
485 | trace.max_entries = MAX_TRACE; | 551 | object->trace_len = __save_stack_trace(object->trace); |
486 | trace.nr_entries = 0; | ||
487 | trace.entries = object->trace; | ||
488 | trace.skip = 1; | ||
489 | save_stack_trace(&trace); | ||
490 | object->trace_len = trace.nr_entries; | ||
491 | 552 | ||
492 | INIT_PRIO_TREE_NODE(&object->tree_node); | 553 | INIT_PRIO_TREE_NODE(&object->tree_node); |
493 | object->tree_node.start = ptr; | 554 | object->tree_node.start = ptr; |
494 | object->tree_node.last = ptr + size - 1; | 555 | object->tree_node.last = ptr + size - 1; |
495 | 556 | ||
496 | write_lock_irqsave(&kmemleak_lock, flags); | 557 | write_lock_irqsave(&kmemleak_lock, flags); |
558 | |||
497 | min_addr = min(min_addr, ptr); | 559 | min_addr = min(min_addr, ptr); |
498 | max_addr = max(max_addr, ptr + size); | 560 | max_addr = max(max_addr, ptr + size); |
499 | node = prio_tree_insert(&object_tree_root, &object->tree_node); | 561 | node = prio_tree_insert(&object_tree_root, &object->tree_node); |
@@ -504,20 +566,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count, | |||
504 | * random memory blocks. | 566 | * random memory blocks. |
505 | */ | 567 | */ |
506 | if (node != &object->tree_node) { | 568 | if (node != &object->tree_node) { |
507 | unsigned long flags; | ||
508 | |||
509 | kmemleak_stop("Cannot insert 0x%lx into the object search tree " | 569 | kmemleak_stop("Cannot insert 0x%lx into the object search tree " |
510 | "(already existing)\n", ptr); | 570 | "(already existing)\n", ptr); |
511 | object = lookup_object(ptr, 1); | 571 | object = lookup_object(ptr, 1); |
512 | spin_lock_irqsave(&object->lock, flags); | 572 | spin_lock(&object->lock); |
513 | dump_object_info(object); | 573 | dump_object_info(object); |
514 | spin_unlock_irqrestore(&object->lock, flags); | 574 | spin_unlock(&object->lock); |
515 | 575 | ||
516 | goto out; | 576 | goto out; |
517 | } | 577 | } |
518 | list_add_tail_rcu(&object->object_list, &object_list); | 578 | list_add_tail_rcu(&object->object_list, &object_list); |
519 | out: | 579 | out: |
520 | write_unlock_irqrestore(&kmemleak_lock, flags); | 580 | write_unlock_irqrestore(&kmemleak_lock, flags); |
581 | return object; | ||
521 | } | 582 | } |
522 | 583 | ||
523 | /* | 584 | /* |
@@ -604,46 +665,55 @@ static void delete_object_part(unsigned long ptr, size_t size) | |||
604 | 665 | ||
605 | put_object(object); | 666 | put_object(object); |
606 | } | 667 | } |
607 | /* | 668 | |
608 | * Make a object permanently as gray-colored so that it can no longer be | 669 | static void __paint_it(struct kmemleak_object *object, int color) |
609 | * reported as a leak. This is used in general to mark a false positive. | 670 | { |
610 | */ | 671 | object->min_count = color; |
611 | static void make_gray_object(unsigned long ptr) | 672 | if (color == KMEMLEAK_BLACK) |
673 | object->flags |= OBJECT_NO_SCAN; | ||
674 | } | ||
675 | |||
676 | static void paint_it(struct kmemleak_object *object, int color) | ||
612 | { | 677 | { |
613 | unsigned long flags; | 678 | unsigned long flags; |
679 | |||
680 | spin_lock_irqsave(&object->lock, flags); | ||
681 | __paint_it(object, color); | ||
682 | spin_unlock_irqrestore(&object->lock, flags); | ||
683 | } | ||
684 | |||
685 | static void paint_ptr(unsigned long ptr, int color) | ||
686 | { | ||
614 | struct kmemleak_object *object; | 687 | struct kmemleak_object *object; |
615 | 688 | ||
616 | object = find_and_get_object(ptr, 0); | 689 | object = find_and_get_object(ptr, 0); |
617 | if (!object) { | 690 | if (!object) { |
618 | kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); | 691 | kmemleak_warn("Trying to color unknown object " |
692 | "at 0x%08lx as %s\n", ptr, | ||
693 | (color == KMEMLEAK_GREY) ? "Grey" : | ||
694 | (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); | ||
619 | return; | 695 | return; |
620 | } | 696 | } |
621 | 697 | paint_it(object, color); | |
622 | spin_lock_irqsave(&object->lock, flags); | ||
623 | object->min_count = 0; | ||
624 | spin_unlock_irqrestore(&object->lock, flags); | ||
625 | put_object(object); | 698 | put_object(object); |
626 | } | 699 | } |
627 | 700 | ||
628 | /* | 701 | /* |
702 | * Make a object permanently as gray-colored so that it can no longer be | ||
703 | * reported as a leak. This is used in general to mark a false positive. | ||
704 | */ | ||
705 | static void make_gray_object(unsigned long ptr) | ||
706 | { | ||
707 | paint_ptr(ptr, KMEMLEAK_GREY); | ||
708 | } | ||
709 | |||
710 | /* | ||
629 | * Mark the object as black-colored so that it is ignored from scans and | 711 | * Mark the object as black-colored so that it is ignored from scans and |
630 | * reporting. | 712 | * reporting. |
631 | */ | 713 | */ |
632 | static void make_black_object(unsigned long ptr) | 714 | static void make_black_object(unsigned long ptr) |
633 | { | 715 | { |
634 | unsigned long flags; | 716 | paint_ptr(ptr, KMEMLEAK_BLACK); |
635 | struct kmemleak_object *object; | ||
636 | |||
637 | object = find_and_get_object(ptr, 0); | ||
638 | if (!object) { | ||
639 | kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr); | ||
640 | return; | ||
641 | } | ||
642 | |||
643 | spin_lock_irqsave(&object->lock, flags); | ||
644 | object->min_count = -1; | ||
645 | spin_unlock_irqrestore(&object->lock, flags); | ||
646 | put_object(object); | ||
647 | } | 717 | } |
648 | 718 | ||
649 | /* | 719 | /* |
@@ -715,14 +785,15 @@ static void object_no_scan(unsigned long ptr) | |||
715 | * Log an early kmemleak_* call to the early_log buffer. These calls will be | 785 | * Log an early kmemleak_* call to the early_log buffer. These calls will be |
716 | * processed later once kmemleak is fully initialized. | 786 | * processed later once kmemleak is fully initialized. |
717 | */ | 787 | */ |
718 | static void log_early(int op_type, const void *ptr, size_t size, | 788 | static void __init log_early(int op_type, const void *ptr, size_t size, |
719 | int min_count, unsigned long offset, size_t length) | 789 | int min_count, unsigned long offset, size_t length) |
720 | { | 790 | { |
721 | unsigned long flags; | 791 | unsigned long flags; |
722 | struct early_log *log; | 792 | struct early_log *log; |
723 | 793 | ||
724 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | 794 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
725 | pr_warning("Early log buffer exceeded\n"); | 795 | pr_warning("Early log buffer exceeded, " |
796 | "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n"); | ||
726 | kmemleak_disable(); | 797 | kmemleak_disable(); |
727 | return; | 798 | return; |
728 | } | 799 | } |
@@ -739,16 +810,45 @@ static void log_early(int op_type, const void *ptr, size_t size, | |||
739 | log->min_count = min_count; | 810 | log->min_count = min_count; |
740 | log->offset = offset; | 811 | log->offset = offset; |
741 | log->length = length; | 812 | log->length = length; |
813 | if (op_type == KMEMLEAK_ALLOC) | ||
814 | log->trace_len = __save_stack_trace(log->trace); | ||
742 | crt_early_log++; | 815 | crt_early_log++; |
743 | local_irq_restore(flags); | 816 | local_irq_restore(flags); |
744 | } | 817 | } |
745 | 818 | ||
746 | /* | 819 | /* |
820 | * Log an early allocated block and populate the stack trace. | ||
821 | */ | ||
822 | static void early_alloc(struct early_log *log) | ||
823 | { | ||
824 | struct kmemleak_object *object; | ||
825 | unsigned long flags; | ||
826 | int i; | ||
827 | |||
828 | if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr)) | ||
829 | return; | ||
830 | |||
831 | /* | ||
832 | * RCU locking needed to ensure object is not freed via put_object(). | ||
833 | */ | ||
834 | rcu_read_lock(); | ||
835 | object = create_object((unsigned long)log->ptr, log->size, | ||
836 | log->min_count, GFP_KERNEL); | ||
837 | spin_lock_irqsave(&object->lock, flags); | ||
838 | for (i = 0; i < log->trace_len; i++) | ||
839 | object->trace[i] = log->trace[i]; | ||
840 | object->trace_len = log->trace_len; | ||
841 | spin_unlock_irqrestore(&object->lock, flags); | ||
842 | rcu_read_unlock(); | ||
843 | } | ||
844 | |||
845 | /* | ||
747 | * Memory allocation function callback. This function is called from the | 846 | * Memory allocation function callback. This function is called from the |
748 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, | 847 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, |
749 | * vmalloc etc.). | 848 | * vmalloc etc.). |
750 | */ | 849 | */ |
751 | void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) | 850 | void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, |
851 | gfp_t gfp) | ||
752 | { | 852 | { |
753 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); | 853 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); |
754 | 854 | ||
@@ -763,7 +863,7 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc); | |||
763 | * Memory freeing function callback. This function is called from the kernel | 863 | * Memory freeing function callback. This function is called from the kernel |
764 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). | 864 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). |
765 | */ | 865 | */ |
766 | void kmemleak_free(const void *ptr) | 866 | void __ref kmemleak_free(const void *ptr) |
767 | { | 867 | { |
768 | pr_debug("%s(0x%p)\n", __func__, ptr); | 868 | pr_debug("%s(0x%p)\n", __func__, ptr); |
769 | 869 | ||
@@ -778,7 +878,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free); | |||
778 | * Partial memory freeing function callback. This function is usually called | 878 | * Partial memory freeing function callback. This function is usually called |
779 | * from bootmem allocator when (part of) a memory block is freed. | 879 | * from bootmem allocator when (part of) a memory block is freed. |
780 | */ | 880 | */ |
781 | void kmemleak_free_part(const void *ptr, size_t size) | 881 | void __ref kmemleak_free_part(const void *ptr, size_t size) |
782 | { | 882 | { |
783 | pr_debug("%s(0x%p)\n", __func__, ptr); | 883 | pr_debug("%s(0x%p)\n", __func__, ptr); |
784 | 884 | ||
@@ -793,7 +893,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part); | |||
793 | * Mark an already allocated memory block as a false positive. This will cause | 893 | * Mark an already allocated memory block as a false positive. This will cause |
794 | * the block to no longer be reported as leak and always be scanned. | 894 | * the block to no longer be reported as leak and always be scanned. |
795 | */ | 895 | */ |
796 | void kmemleak_not_leak(const void *ptr) | 896 | void __ref kmemleak_not_leak(const void *ptr) |
797 | { | 897 | { |
798 | pr_debug("%s(0x%p)\n", __func__, ptr); | 898 | pr_debug("%s(0x%p)\n", __func__, ptr); |
799 | 899 | ||
@@ -809,7 +909,7 @@ EXPORT_SYMBOL(kmemleak_not_leak); | |||
809 | * corresponding block is not a leak and does not contain any references to | 909 | * corresponding block is not a leak and does not contain any references to |
810 | * other allocated memory blocks. | 910 | * other allocated memory blocks. |
811 | */ | 911 | */ |
812 | void kmemleak_ignore(const void *ptr) | 912 | void __ref kmemleak_ignore(const void *ptr) |
813 | { | 913 | { |
814 | pr_debug("%s(0x%p)\n", __func__, ptr); | 914 | pr_debug("%s(0x%p)\n", __func__, ptr); |
815 | 915 | ||
@@ -823,8 +923,8 @@ EXPORT_SYMBOL(kmemleak_ignore); | |||
823 | /* | 923 | /* |
824 | * Limit the range to be scanned in an allocated memory block. | 924 | * Limit the range to be scanned in an allocated memory block. |
825 | */ | 925 | */ |
826 | void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, | 926 | void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, |
827 | gfp_t gfp) | 927 | size_t length, gfp_t gfp) |
828 | { | 928 | { |
829 | pr_debug("%s(0x%p)\n", __func__, ptr); | 929 | pr_debug("%s(0x%p)\n", __func__, ptr); |
830 | 930 | ||
@@ -838,7 +938,7 @@ EXPORT_SYMBOL(kmemleak_scan_area); | |||
838 | /* | 938 | /* |
839 | * Inform kmemleak not to scan the given memory block. | 939 | * Inform kmemleak not to scan the given memory block. |
840 | */ | 940 | */ |
841 | void kmemleak_no_scan(const void *ptr) | 941 | void __ref kmemleak_no_scan(const void *ptr) |
842 | { | 942 | { |
843 | pr_debug("%s(0x%p)\n", __func__, ptr); | 943 | pr_debug("%s(0x%p)\n", __func__, ptr); |
844 | 944 | ||
@@ -882,15 +982,22 @@ static void scan_block(void *_start, void *_end, | |||
882 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); | 982 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
883 | 983 | ||
884 | for (ptr = start; ptr < end; ptr++) { | 984 | for (ptr = start; ptr < end; ptr++) { |
885 | unsigned long flags; | ||
886 | unsigned long pointer = *ptr; | ||
887 | struct kmemleak_object *object; | 985 | struct kmemleak_object *object; |
986 | unsigned long flags; | ||
987 | unsigned long pointer; | ||
888 | 988 | ||
889 | if (allow_resched) | 989 | if (allow_resched) |
890 | cond_resched(); | 990 | cond_resched(); |
891 | if (scan_should_stop()) | 991 | if (scan_should_stop()) |
892 | break; | 992 | break; |
893 | 993 | ||
994 | /* don't scan uninitialized memory */ | ||
995 | if (!kmemcheck_is_obj_initialized((unsigned long)ptr, | ||
996 | BYTES_PER_POINTER)) | ||
997 | continue; | ||
998 | |||
999 | pointer = *ptr; | ||
1000 | |||
894 | object = find_and_get_object(pointer, 1); | 1001 | object = find_and_get_object(pointer, 1); |
895 | if (!object) | 1002 | if (!object) |
896 | continue; | 1003 | continue; |
@@ -949,10 +1056,21 @@ static void scan_object(struct kmemleak_object *object) | |||
949 | if (!(object->flags & OBJECT_ALLOCATED)) | 1056 | if (!(object->flags & OBJECT_ALLOCATED)) |
950 | /* already freed object */ | 1057 | /* already freed object */ |
951 | goto out; | 1058 | goto out; |
952 | if (hlist_empty(&object->area_list)) | 1059 | if (hlist_empty(&object->area_list)) { |
953 | scan_block((void *)object->pointer, | 1060 | void *start = (void *)object->pointer; |
954 | (void *)(object->pointer + object->size), object, 0); | 1061 | void *end = (void *)(object->pointer + object->size); |
955 | else | 1062 | |
1063 | while (start < end && (object->flags & OBJECT_ALLOCATED) && | ||
1064 | !(object->flags & OBJECT_NO_SCAN)) { | ||
1065 | scan_block(start, min(start + MAX_SCAN_SIZE, end), | ||
1066 | object, 0); | ||
1067 | start += MAX_SCAN_SIZE; | ||
1068 | |||
1069 | spin_unlock_irqrestore(&object->lock, flags); | ||
1070 | cond_resched(); | ||
1071 | spin_lock_irqsave(&object->lock, flags); | ||
1072 | } | ||
1073 | } else | ||
956 | hlist_for_each_entry(area, elem, &object->area_list, node) | 1074 | hlist_for_each_entry(area, elem, &object->area_list, node) |
957 | scan_block((void *)(object->pointer + area->offset), | 1075 | scan_block((void *)(object->pointer + area->offset), |
958 | (void *)(object->pointer + area->offset | 1076 | (void *)(object->pointer + area->offset |
@@ -970,7 +1088,6 @@ static void kmemleak_scan(void) | |||
970 | { | 1088 | { |
971 | unsigned long flags; | 1089 | unsigned long flags; |
972 | struct kmemleak_object *object, *tmp; | 1090 | struct kmemleak_object *object, *tmp; |
973 | struct task_struct *task; | ||
974 | int i; | 1091 | int i; |
975 | int new_leaks = 0; | 1092 | int new_leaks = 0; |
976 | int gray_list_pass = 0; | 1093 | int gray_list_pass = 0; |
@@ -1037,15 +1154,16 @@ static void kmemleak_scan(void) | |||
1037 | } | 1154 | } |
1038 | 1155 | ||
1039 | /* | 1156 | /* |
1040 | * Scanning the task stacks may introduce false negatives and it is | 1157 | * Scanning the task stacks (may introduce false negatives). |
1041 | * not enabled by default. | ||
1042 | */ | 1158 | */ |
1043 | if (kmemleak_stack_scan) { | 1159 | if (kmemleak_stack_scan) { |
1160 | struct task_struct *p, *g; | ||
1161 | |||
1044 | read_lock(&tasklist_lock); | 1162 | read_lock(&tasklist_lock); |
1045 | for_each_process(task) | 1163 | do_each_thread(g, p) { |
1046 | scan_block(task_stack_page(task), | 1164 | scan_block(task_stack_page(p), task_stack_page(p) + |
1047 | task_stack_page(task) + THREAD_SIZE, | 1165 | THREAD_SIZE, NULL, 0); |
1048 | NULL, 0); | 1166 | } while_each_thread(g, p); |
1049 | read_unlock(&tasklist_lock); | 1167 | read_unlock(&tasklist_lock); |
1050 | } | 1168 | } |
1051 | 1169 | ||
@@ -1170,7 +1288,7 @@ static int kmemleak_scan_thread(void *arg) | |||
1170 | * Start the automatic memory scanning thread. This function must be called | 1288 | * Start the automatic memory scanning thread. This function must be called |
1171 | * with the scan_mutex held. | 1289 | * with the scan_mutex held. |
1172 | */ | 1290 | */ |
1173 | void start_scan_thread(void) | 1291 | static void start_scan_thread(void) |
1174 | { | 1292 | { |
1175 | if (scan_thread) | 1293 | if (scan_thread) |
1176 | return; | 1294 | return; |
@@ -1185,7 +1303,7 @@ void start_scan_thread(void) | |||
1185 | * Stop the automatic memory scanning thread. This function must be called | 1303 | * Stop the automatic memory scanning thread. This function must be called |
1186 | * with the scan_mutex held. | 1304 | * with the scan_mutex held. |
1187 | */ | 1305 | */ |
1188 | void stop_scan_thread(void) | 1306 | static void stop_scan_thread(void) |
1189 | { | 1307 | { |
1190 | if (scan_thread) { | 1308 | if (scan_thread) { |
1191 | kthread_stop(scan_thread); | 1309 | kthread_stop(scan_thread); |
@@ -1294,6 +1412,49 @@ static int kmemleak_release(struct inode *inode, struct file *file) | |||
1294 | return seq_release(inode, file); | 1412 | return seq_release(inode, file); |
1295 | } | 1413 | } |
1296 | 1414 | ||
1415 | static int dump_str_object_info(const char *str) | ||
1416 | { | ||
1417 | unsigned long flags; | ||
1418 | struct kmemleak_object *object; | ||
1419 | unsigned long addr; | ||
1420 | |||
1421 | addr= simple_strtoul(str, NULL, 0); | ||
1422 | object = find_and_get_object(addr, 0); | ||
1423 | if (!object) { | ||
1424 | pr_info("Unknown object at 0x%08lx\n", addr); | ||
1425 | return -EINVAL; | ||
1426 | } | ||
1427 | |||
1428 | spin_lock_irqsave(&object->lock, flags); | ||
1429 | dump_object_info(object); | ||
1430 | spin_unlock_irqrestore(&object->lock, flags); | ||
1431 | |||
1432 | put_object(object); | ||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | /* | ||
1437 | * We use grey instead of black to ensure we can do future scans on the same | ||
1438 | * objects. If we did not do future scans these black objects could | ||
1439 | * potentially contain references to newly allocated objects in the future and | ||
1440 | * we'd end up with false positives. | ||
1441 | */ | ||
1442 | static void kmemleak_clear(void) | ||
1443 | { | ||
1444 | struct kmemleak_object *object; | ||
1445 | unsigned long flags; | ||
1446 | |||
1447 | rcu_read_lock(); | ||
1448 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
1449 | spin_lock_irqsave(&object->lock, flags); | ||
1450 | if ((object->flags & OBJECT_REPORTED) && | ||
1451 | unreferenced_object(object)) | ||
1452 | __paint_it(object, KMEMLEAK_GREY); | ||
1453 | spin_unlock_irqrestore(&object->lock, flags); | ||
1454 | } | ||
1455 | rcu_read_unlock(); | ||
1456 | } | ||
1457 | |||
1297 | /* | 1458 | /* |
1298 | * File write operation to configure kmemleak at run-time. The following | 1459 | * File write operation to configure kmemleak at run-time. The following |
1299 | * commands can be written to the /sys/kernel/debug/kmemleak file: | 1460 | * commands can be written to the /sys/kernel/debug/kmemleak file: |
@@ -1305,6 +1466,9 @@ static int kmemleak_release(struct inode *inode, struct file *file) | |||
1305 | * scan=... - set the automatic memory scanning period in seconds (0 to | 1466 | * scan=... - set the automatic memory scanning period in seconds (0 to |
1306 | * disable it) | 1467 | * disable it) |
1307 | * scan - trigger a memory scan | 1468 | * scan - trigger a memory scan |
1469 | * clear - mark all current reported unreferenced kmemleak objects as | ||
1470 | * grey to ignore printing them | ||
1471 | * dump=... - dump information about the object found at the given address | ||
1308 | */ | 1472 | */ |
1309 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | 1473 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
1310 | size_t size, loff_t *ppos) | 1474 | size_t size, loff_t *ppos) |
@@ -1345,6 +1509,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
1345 | } | 1509 | } |
1346 | } else if (strncmp(buf, "scan", 4) == 0) | 1510 | } else if (strncmp(buf, "scan", 4) == 0) |
1347 | kmemleak_scan(); | 1511 | kmemleak_scan(); |
1512 | else if (strncmp(buf, "clear", 5) == 0) | ||
1513 | kmemleak_clear(); | ||
1514 | else if (strncmp(buf, "dump=", 5) == 0) | ||
1515 | ret = dump_str_object_info(buf + 5); | ||
1348 | else | 1516 | else |
1349 | ret = -EINVAL; | 1517 | ret = -EINVAL; |
1350 | 1518 | ||
@@ -1371,7 +1539,7 @@ static const struct file_operations kmemleak_fops = { | |||
1371 | * Perform the freeing of the kmemleak internal objects after waiting for any | 1539 | * Perform the freeing of the kmemleak internal objects after waiting for any |
1372 | * current memory scan to complete. | 1540 | * current memory scan to complete. |
1373 | */ | 1541 | */ |
1374 | static int kmemleak_cleanup_thread(void *arg) | 1542 | static void kmemleak_do_cleanup(struct work_struct *work) |
1375 | { | 1543 | { |
1376 | struct kmemleak_object *object; | 1544 | struct kmemleak_object *object; |
1377 | 1545 | ||
@@ -1383,22 +1551,9 @@ static int kmemleak_cleanup_thread(void *arg) | |||
1383 | delete_object_full(object->pointer); | 1551 | delete_object_full(object->pointer); |
1384 | rcu_read_unlock(); | 1552 | rcu_read_unlock(); |
1385 | mutex_unlock(&scan_mutex); | 1553 | mutex_unlock(&scan_mutex); |
1386 | |||
1387 | return 0; | ||
1388 | } | 1554 | } |
1389 | 1555 | ||
1390 | /* | 1556 | static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); |
1391 | * Start the clean-up thread. | ||
1392 | */ | ||
1393 | static void kmemleak_cleanup(void) | ||
1394 | { | ||
1395 | struct task_struct *cleanup_thread; | ||
1396 | |||
1397 | cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, | ||
1398 | "kmemleak-clean"); | ||
1399 | if (IS_ERR(cleanup_thread)) | ||
1400 | pr_warning("Failed to create the clean-up thread\n"); | ||
1401 | } | ||
1402 | 1557 | ||
1403 | /* | 1558 | /* |
1404 | * Disable kmemleak. No memory allocation/freeing will be traced once this | 1559 | * Disable kmemleak. No memory allocation/freeing will be traced once this |
@@ -1416,7 +1571,7 @@ static void kmemleak_disable(void) | |||
1416 | 1571 | ||
1417 | /* check whether it is too early for a kernel thread */ | 1572 | /* check whether it is too early for a kernel thread */ |
1418 | if (atomic_read(&kmemleak_initialized)) | 1573 | if (atomic_read(&kmemleak_initialized)) |
1419 | kmemleak_cleanup(); | 1574 | schedule_work(&cleanup_work); |
1420 | 1575 | ||
1421 | pr_info("Kernel memory leak detector disabled\n"); | 1576 | pr_info("Kernel memory leak detector disabled\n"); |
1422 | } | 1577 | } |
@@ -1469,8 +1624,7 @@ void __init kmemleak_init(void) | |||
1469 | 1624 | ||
1470 | switch (log->op_type) { | 1625 | switch (log->op_type) { |
1471 | case KMEMLEAK_ALLOC: | 1626 | case KMEMLEAK_ALLOC: |
1472 | kmemleak_alloc(log->ptr, log->size, log->min_count, | 1627 | early_alloc(log); |
1473 | GFP_KERNEL); | ||
1474 | break; | 1628 | break; |
1475 | case KMEMLEAK_FREE: | 1629 | case KMEMLEAK_FREE: |
1476 | kmemleak_free(log->ptr); | 1630 | kmemleak_free(log->ptr); |
@@ -1513,7 +1667,7 @@ static int __init kmemleak_late_init(void) | |||
1513 | * after setting kmemleak_initialized and we may end up with | 1667 | * after setting kmemleak_initialized and we may end up with |
1514 | * two clean-up threads but serialized by scan_mutex. | 1668 | * two clean-up threads but serialized by scan_mutex. |
1515 | */ | 1669 | */ |
1516 | kmemleak_cleanup(); | 1670 | schedule_work(&cleanup_work); |
1517 | return -ENOMEM; | 1671 | return -ENOMEM; |
1518 | } | 1672 | } |
1519 | 1673 | ||