diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 17:15:57 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 17:15:57 -0400 |
| commit | 512626a04e72aca60effe111fa0333ed0b195d21 (patch) | |
| tree | c22e23b0dcc2dd2ff5a9a96a007de6799e9223de | |
| parent | 8a1ca8cedd108c8e76a6ab34079d0bbb4f244799 (diff) | |
| parent | 3aa27bbe7a6536d1ec859d3a97caf3319b5081b7 (diff) | |
Merge branch 'for-linus' of git://linux-arm.org/linux-2.6
* 'for-linus' of git://linux-arm.org/linux-2.6:
kmemleak: Add the corresponding MAINTAINERS entry
kmemleak: Simple testing module for kmemleak
kmemleak: Enable the building of the memory leak detector
kmemleak: Remove some of the kmemleak false positives
kmemleak: Add modules support
kmemleak: Add kmemleak_alloc callback from alloc_large_system_hash
kmemleak: Add the vmalloc memory allocation/freeing hooks
kmemleak: Add the slub memory allocation/freeing hooks
kmemleak: Add the slob memory allocation/freeing hooks
kmemleak: Add the slab memory allocation/freeing hooks
kmemleak: Add documentation on the memory leak detector
kmemleak: Add the base support
Manual conflict resolution (with the slab/earlyboot changes) in:
drivers/char/vt.c
init/main.c
mm/slab.c
| -rw-r--r-- | Documentation/kernel-parameters.txt | 4 | ||||
| -rw-r--r-- | Documentation/kmemleak.txt | 142 | ||||
| -rw-r--r-- | MAINTAINERS | 6 | ||||
| -rw-r--r-- | drivers/char/vt.c | 1 | ||||
| -rw-r--r-- | fs/block_dev.c | 6 | ||||
| -rw-r--r-- | include/linux/kmemleak.h | 96 | ||||
| -rw-r--r-- | include/linux/percpu.h | 5 | ||||
| -rw-r--r-- | include/linux/slab.h | 2 | ||||
| -rw-r--r-- | init/main.c | 4 | ||||
| -rw-r--r-- | kernel/module.c | 56 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 32 | ||||
| -rw-r--r-- | mm/Makefile | 2 | ||||
| -rw-r--r-- | mm/kmemleak-test.c | 111 | ||||
| -rw-r--r-- | mm/kmemleak.c | 1498 | ||||
| -rw-r--r-- | mm/page_alloc.c | 11 | ||||
| -rw-r--r-- | mm/slab.c | 32 | ||||
| -rw-r--r-- | mm/slob.c | 7 | ||||
| -rw-r--r-- | mm/slub.c | 5 | ||||
| -rw-r--r-- | mm/vmalloc.c | 30 |
19 files changed, 2043 insertions, 7 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 72d3bf08d79b..7bcdebffdab3 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1083,6 +1083,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 1083 | Configure the RouterBoard 532 series on-chip | 1083 | Configure the RouterBoard 532 series on-chip |
| 1084 | Ethernet adapter MAC address. | 1084 | Ethernet adapter MAC address. |
| 1085 | 1085 | ||
| 1086 | kmemleak= [KNL] Boot-time kmemleak enable/disable | ||
| 1087 | Valid arguments: on, off | ||
| 1088 | Default: on | ||
| 1089 | |||
| 1086 | kstack=N [X86] Print N words from the kernel stack | 1090 | kstack=N [X86] Print N words from the kernel stack |
| 1087 | in oops dumps. | 1091 | in oops dumps. |
| 1088 | 1092 | ||
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt new file mode 100644 index 000000000000..0112da3b9ab8 --- /dev/null +++ b/Documentation/kmemleak.txt | |||
| @@ -0,0 +1,142 @@ | |||
| 1 | Kernel Memory Leak Detector | ||
| 2 | =========================== | ||
| 3 | |||
| 4 | Introduction | ||
| 5 | ------------ | ||
| 6 | |||
| 7 | Kmemleak provides a way of detecting possible kernel memory leaks in a | ||
| 8 | way similar to a tracing garbage collector | ||
| 9 | (http://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29#Tracing_garbage_collectors), | ||
| 10 | with the difference that the orphan objects are not freed but only | ||
| 11 | reported via /sys/kernel/debug/kmemleak. A similar method is used by the | ||
| 12 | Valgrind tool (memcheck --leak-check) to detect the memory leaks in | ||
| 13 | user-space applications. | ||
| 14 | |||
| 15 | Usage | ||
| 16 | ----- | ||
| 17 | |||
| 18 | CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel | ||
| 19 | thread scans the memory every 10 minutes (by default) and prints any new | ||
| 20 | unreferenced objects found. To trigger an intermediate scan and display | ||
| 21 | all the possible memory leaks: | ||
| 22 | |||
| 23 | # mount -t debugfs nodev /sys/kernel/debug/ | ||
| 24 | # cat /sys/kernel/debug/kmemleak | ||
| 25 | |||
| 26 | Note that the orphan objects are listed in the order they were allocated | ||
| 27 | and one object at the beginning of the list may cause other subsequent | ||
| 28 | objects to be reported as orphan. | ||
| 29 | |||
| 30 | Memory scanning parameters can be modified at run-time by writing to the | ||
| 31 | /sys/kernel/debug/kmemleak file. The following parameters are supported: | ||
| 32 | |||
| 33 | off - disable kmemleak (irreversible) | ||
| 34 | stack=on - enable the task stacks scanning | ||
| 35 | stack=off - disable the tasks stacks scanning | ||
| 36 | scan=on - start the automatic memory scanning thread | ||
| 37 | scan=off - stop the automatic memory scanning thread | ||
| 38 | scan=<secs> - set the automatic memory scanning period in seconds (0 | ||
| 39 | to disable it) | ||
| 40 | |||
| 41 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on | ||
| 42 | the kernel command line. | ||
| 43 | |||
| 44 | Basic Algorithm | ||
| 45 | --------------- | ||
| 46 | |||
| 47 | The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and | ||
| 48 | friends are traced and the pointers, together with additional | ||
| 49 | information like size and stack trace, are stored in a prio search tree. | ||
| 50 | The corresponding freeing function calls are tracked and the pointers | ||
| 51 | removed from the kmemleak data structures. | ||
| 52 | |||
| 53 | An allocated block of memory is considered orphan if no pointer to its | ||
| 54 | start address or to any location inside the block can be found by | ||
| 55 | scanning the memory (including saved registers). This means that there | ||
| 56 | might be no way for the kernel to pass the address of the allocated | ||
| 57 | block to a freeing function and therefore the block is considered a | ||
| 58 | memory leak. | ||
| 59 | |||
| 60 | The scanning algorithm steps: | ||
| 61 | |||
| 62 | 1. mark all objects as white (remaining white objects will later be | ||
| 63 | considered orphan) | ||
| 64 | 2. scan the memory starting with the data section and stacks, checking | ||
| 65 | the values against the addresses stored in the prio search tree. If | ||
| 66 | a pointer to a white object is found, the object is added to the | ||
| 67 | gray list | ||
| 68 | 3. scan the gray objects for matching addresses (some white objects | ||
| 69 | can become gray and added at the end of the gray list) until the | ||
| 70 | gray set is finished | ||
| 71 | 4. the remaining white objects are considered orphan and reported via | ||
| 72 | /sys/kernel/debug/kmemleak | ||
| 73 | |||
| 74 | Some allocated memory blocks have pointers stored in the kernel's | ||
| 75 | internal data structures and they cannot be detected as orphans. To | ||
| 76 | avoid this, kmemleak can also store the number of values pointing to an | ||
| 77 | address inside the block address range that need to be found so that the | ||
| 78 | block is not considered a leak. One example is __vmalloc(). | ||
| 79 | |||
| 80 | Kmemleak API | ||
| 81 | ------------ | ||
| 82 | |||
| 83 | See the include/linux/kmemleak.h header for the functions prototype. | ||
| 84 | |||
| 85 | kmemleak_init - initialize kmemleak | ||
| 86 | kmemleak_alloc - notify of a memory block allocation | ||
| 87 | kmemleak_free - notify of a memory block freeing | ||
| 88 | kmemleak_not_leak - mark an object as not a leak | ||
| 89 | kmemleak_ignore - do not scan or report an object as leak | ||
| 90 | kmemleak_scan_area - add scan areas inside a memory block | ||
| 91 | kmemleak_no_scan - do not scan a memory block | ||
| 92 | kmemleak_erase - erase an old value in a pointer variable | ||
| 93 | kmemleak_alloc_recursive - as kmemleak_alloc but checks the recursiveness | ||
| 94 | kmemleak_free_recursive - as kmemleak_free but checks the recursiveness | ||
| 95 | |||
| 96 | Dealing with false positives/negatives | ||
| 97 | -------------------------------------- | ||
| 98 | |||
| 99 | The false negatives are real memory leaks (orphan objects) but not | ||
| 100 | reported by kmemleak because values found during the memory scanning | ||
| 101 | point to such objects. To reduce the number of false negatives, kmemleak | ||
| 102 | provides the kmemleak_ignore, kmemleak_scan_area, kmemleak_no_scan and | ||
| 103 | kmemleak_erase functions (see above). The task stacks also increase the | ||
| 104 | amount of false negatives and their scanning is not enabled by default. | ||
| 105 | |||
| 106 | The false positives are objects wrongly reported as being memory leaks | ||
| 107 | (orphan). For objects known not to be leaks, kmemleak provides the | ||
| 108 | kmemleak_not_leak function. The kmemleak_ignore could also be used if | ||
| 109 | the memory block is known not to contain other pointers and it will no | ||
| 110 | longer be scanned. | ||
| 111 | |||
| 112 | Some of the reported leaks are only transient, especially on SMP | ||
| 113 | systems, because of pointers temporarily stored in CPU registers or | ||
| 114 | stacks. Kmemleak defines MSECS_MIN_AGE (defaulting to 1000) representing | ||
| 115 | the minimum age of an object to be reported as a memory leak. | ||
| 116 | |||
| 117 | Limitations and Drawbacks | ||
| 118 | ------------------------- | ||
| 119 | |||
| 120 | The main drawback is the reduced performance of memory allocation and | ||
| 121 | freeing. To avoid other penalties, the memory scanning is only performed | ||
| 122 | when the /sys/kernel/debug/kmemleak file is read. Anyway, this tool is | ||
| 123 | intended for debugging purposes where the performance might not be the | ||
| 124 | most important requirement. | ||
| 125 | |||
| 126 | To keep the algorithm simple, kmemleak scans for values pointing to any | ||
| 127 | address inside a block's address range. This may lead to an increased | ||
| 128 | number of false negatives. However, it is likely that a real memory leak | ||
| 129 | will eventually become visible. | ||
| 130 | |||
| 131 | Another source of false negatives is the data stored in non-pointer | ||
| 132 | values. In a future version, kmemleak could only scan the pointer | ||
| 133 | members in the allocated structures. This feature would solve many of | ||
| 134 | the false negative cases described above. | ||
| 135 | |||
| 136 | The tool can report false positives. These are cases where an allocated | ||
| 137 | block doesn't need to be freed (some cases in the init_call functions), | ||
| 138 | the pointer is calculated by other methods than the usual container_of | ||
| 139 | macro or the pointer is stored in a location not scanned by kmemleak. | ||
| 140 | |||
| 141 | Page allocations and ioremap are not tracked. Only the ARM and x86 | ||
| 142 | architectures are currently supported. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 70f961d43d9c..1a0084e22cf3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3370,6 +3370,12 @@ F: Documentation/trace/kmemtrace.txt | |||
| 3370 | F: include/trace/kmemtrace.h | 3370 | F: include/trace/kmemtrace.h |
| 3371 | F: kernel/trace/kmemtrace.c | 3371 | F: kernel/trace/kmemtrace.c |
| 3372 | 3372 | ||
| 3373 | KMEMLEAK | ||
| 3374 | P: Catalin Marinas | ||
| 3375 | M: catalin.marinas@arm.com | ||
| 3376 | L: linux-kernel@vger.kernel.org | ||
| 3377 | S: Maintained | ||
| 3378 | |||
| 3373 | KPROBES | 3379 | KPROBES |
| 3374 | P: Ananth N Mavinakayanahalli | 3380 | P: Ananth N Mavinakayanahalli |
| 3375 | M: ananth@in.ibm.com | 3381 | M: ananth@in.ibm.com |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index c796a86ab7f3..de9ebee8657b 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
| @@ -103,6 +103,7 @@ | |||
| 103 | #include <linux/io.h> | 103 | #include <linux/io.h> |
| 104 | #include <asm/system.h> | 104 | #include <asm/system.h> |
| 105 | #include <linux/uaccess.h> | 105 | #include <linux/uaccess.h> |
| 106 | #include <linux/kmemleak.h> | ||
| 106 | 107 | ||
| 107 | #define MAX_NR_CON_DRIVER 16 | 108 | #define MAX_NR_CON_DRIVER 16 |
| 108 | 109 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 2dfc6cdcebbe..931f6b8c4b2f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
| 26 | #include <linux/namei.h> | 26 | #include <linux/namei.h> |
| 27 | #include <linux/log2.h> | 27 | #include <linux/log2.h> |
| 28 | #include <linux/kmemleak.h> | ||
| 28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
| 29 | #include "internal.h" | 30 | #include "internal.h" |
| 30 | 31 | ||
| @@ -492,6 +493,11 @@ void __init bdev_cache_init(void) | |||
| 492 | bd_mnt = kern_mount(&bd_type); | 493 | bd_mnt = kern_mount(&bd_type); |
| 493 | if (IS_ERR(bd_mnt)) | 494 | if (IS_ERR(bd_mnt)) |
| 494 | panic("Cannot create bdev pseudo-fs"); | 495 | panic("Cannot create bdev pseudo-fs"); |
| 496 | /* | ||
| 497 | * This vfsmount structure is only used to obtain the | ||
| 498 | * blockdev_superblock, so tell kmemleak not to report it. | ||
| 499 | */ | ||
| 500 | kmemleak_not_leak(bd_mnt); | ||
| 495 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ | 501 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ |
| 496 | } | 502 | } |
| 497 | 503 | ||
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 000000000000..7796aed6cdd5 --- /dev/null +++ b/include/linux/kmemleak.h | |||
| @@ -0,0 +1,96 @@ | |||
| 1 | /* | ||
| 2 | * include/linux/kmemleak.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 ARM Limited | ||
| 5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __KMEMLEAK_H | ||
| 22 | #define __KMEMLEAK_H | ||
| 23 | |||
| 24 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
| 25 | |||
| 26 | extern void kmemleak_init(void); | ||
| 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
| 28 | gfp_t gfp); | ||
| 29 | extern void kmemleak_free(const void *ptr); | ||
| 30 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | ||
| 31 | size_t size); | ||
| 32 | extern void kmemleak_not_leak(const void *ptr); | ||
| 33 | extern void kmemleak_ignore(const void *ptr); | ||
| 34 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
| 35 | size_t length, gfp_t gfp); | ||
| 36 | extern void kmemleak_no_scan(const void *ptr); | ||
| 37 | |||
| 38 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
| 39 | int min_count, unsigned long flags, | ||
| 40 | gfp_t gfp) | ||
| 41 | { | ||
| 42 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
| 43 | kmemleak_alloc(ptr, size, min_count, gfp); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
| 47 | { | ||
| 48 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
| 49 | kmemleak_free(ptr); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void kmemleak_erase(void **ptr) | ||
| 53 | { | ||
| 54 | *ptr = NULL; | ||
| 55 | } | ||
| 56 | |||
| 57 | #else | ||
| 58 | |||
| 59 | static inline void kmemleak_init(void) | ||
| 60 | { | ||
| 61 | } | ||
| 62 | static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
| 63 | gfp_t gfp) | ||
| 64 | { | ||
| 65 | } | ||
| 66 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
| 67 | int min_count, unsigned long flags, | ||
| 68 | gfp_t gfp) | ||
| 69 | { | ||
| 70 | } | ||
| 71 | static inline void kmemleak_free(const void *ptr) | ||
| 72 | { | ||
| 73 | } | ||
| 74 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
| 75 | { | ||
| 76 | } | ||
| 77 | static inline void kmemleak_not_leak(const void *ptr) | ||
| 78 | { | ||
| 79 | } | ||
| 80 | static inline void kmemleak_ignore(const void *ptr) | ||
| 81 | { | ||
| 82 | } | ||
| 83 | static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
| 84 | size_t length, gfp_t gfp) | ||
| 85 | { | ||
| 86 | } | ||
| 87 | static inline void kmemleak_erase(void **ptr) | ||
| 88 | { | ||
| 89 | } | ||
| 90 | static inline void kmemleak_no_scan(const void *ptr) | ||
| 91 | { | ||
| 92 | } | ||
| 93 | |||
| 94 | #endif /* CONFIG_DEBUG_KMEMLEAK */ | ||
| 95 | |||
| 96 | #endif /* __KMEMLEAK_H */ | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1581ff235c7e..26fd9d12f050 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -86,7 +86,12 @@ struct percpu_data { | |||
| 86 | void *ptrs[1]; | 86 | void *ptrs[1]; |
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | /* pointer disguising messes up the kmemleak objects tracking */ | ||
| 90 | #ifndef CONFIG_DEBUG_KMEMLEAK | ||
| 89 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 91 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
| 92 | #else | ||
| 93 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) | ||
| 94 | #endif | ||
| 90 | 95 | ||
| 91 | #define per_cpu_ptr(ptr, cpu) \ | 96 | #define per_cpu_ptr(ptr, cpu) \ |
| 92 | ({ \ | 97 | ({ \ |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 24c5602bee99..48803064cedf 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -62,6 +62,8 @@ | |||
| 62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | 62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL |
| 63 | #endif | 63 | #endif |
| 64 | 64 | ||
| 65 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ | ||
| 66 | |||
| 65 | /* The following flags affect the page allocator grouping pages by mobility */ | 67 | /* The following flags affect the page allocator grouping pages by mobility */ |
| 66 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | 68 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
| 67 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | 69 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
diff --git a/init/main.c b/init/main.c index 7917695bf71e..5616661eac01 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -56,6 +56,7 @@ | |||
| 56 | #include <linux/debug_locks.h> | 56 | #include <linux/debug_locks.h> |
| 57 | #include <linux/debugobjects.h> | 57 | #include <linux/debugobjects.h> |
| 58 | #include <linux/lockdep.h> | 58 | #include <linux/lockdep.h> |
| 59 | #include <linux/kmemleak.h> | ||
| 59 | #include <linux/pid_namespace.h> | 60 | #include <linux/pid_namespace.h> |
| 60 | #include <linux/device.h> | 61 | #include <linux/device.h> |
| 61 | #include <linux/kthread.h> | 62 | #include <linux/kthread.h> |
| @@ -621,6 +622,7 @@ asmlinkage void __init start_kernel(void) | |||
| 621 | /* init some links before init_ISA_irqs() */ | 622 | /* init some links before init_ISA_irqs() */ |
| 622 | early_irq_init(); | 623 | early_irq_init(); |
| 623 | init_IRQ(); | 624 | init_IRQ(); |
| 625 | prio_tree_init(); | ||
| 624 | init_timers(); | 626 | init_timers(); |
| 625 | hrtimers_init(); | 627 | hrtimers_init(); |
| 626 | softirq_init(); | 628 | softirq_init(); |
| @@ -667,6 +669,7 @@ asmlinkage void __init start_kernel(void) | |||
| 667 | enable_debug_pagealloc(); | 669 | enable_debug_pagealloc(); |
| 668 | cpu_hotplug_init(); | 670 | cpu_hotplug_init(); |
| 669 | kmemtrace_init(); | 671 | kmemtrace_init(); |
| 672 | kmemleak_init(); | ||
| 670 | debug_objects_mem_init(); | 673 | debug_objects_mem_init(); |
| 671 | idr_init_cache(); | 674 | idr_init_cache(); |
| 672 | setup_per_cpu_pageset(); | 675 | setup_per_cpu_pageset(); |
| @@ -676,7 +679,6 @@ asmlinkage void __init start_kernel(void) | |||
| 676 | calibrate_delay(); | 679 | calibrate_delay(); |
| 677 | pidmap_init(); | 680 | pidmap_init(); |
| 678 | pgtable_cache_init(); | 681 | pgtable_cache_init(); |
| 679 | prio_tree_init(); | ||
| 680 | anon_vma_init(); | 682 | anon_vma_init(); |
| 681 | #ifdef CONFIG_X86 | 683 | #ifdef CONFIG_X86 |
| 682 | if (efi_enabled) | 684 | if (efi_enabled) |
diff --git a/kernel/module.c b/kernel/module.c index 278e9b6762bb..35f7de00bf0d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | #include <linux/ftrace.h> | 53 | #include <linux/ftrace.h> |
| 54 | #include <linux/async.h> | 54 | #include <linux/async.h> |
| 55 | #include <linux/percpu.h> | 55 | #include <linux/percpu.h> |
| 56 | #include <linux/kmemleak.h> | ||
| 56 | 57 | ||
| 57 | #if 0 | 58 | #if 0 |
| 58 | #define DEBUGP printk | 59 | #define DEBUGP printk |
| @@ -433,6 +434,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, | |||
| 433 | unsigned long extra; | 434 | unsigned long extra; |
| 434 | unsigned int i; | 435 | unsigned int i; |
| 435 | void *ptr; | 436 | void *ptr; |
| 437 | int cpu; | ||
| 436 | 438 | ||
| 437 | if (align > PAGE_SIZE) { | 439 | if (align > PAGE_SIZE) { |
| 438 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | 440 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", |
| @@ -462,6 +464,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, | |||
| 462 | if (!split_block(i, size)) | 464 | if (!split_block(i, size)) |
| 463 | return NULL; | 465 | return NULL; |
| 464 | 466 | ||
| 467 | /* add the per-cpu scanning areas */ | ||
| 468 | for_each_possible_cpu(cpu) | ||
| 469 | kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0, | ||
| 470 | GFP_KERNEL); | ||
| 471 | |||
| 465 | /* Mark allocated */ | 472 | /* Mark allocated */ |
| 466 | pcpu_size[i] = -pcpu_size[i]; | 473 | pcpu_size[i] = -pcpu_size[i]; |
| 467 | return ptr; | 474 | return ptr; |
| @@ -476,6 +483,7 @@ static void percpu_modfree(void *freeme) | |||
| 476 | { | 483 | { |
| 477 | unsigned int i; | 484 | unsigned int i; |
| 478 | void *ptr = __per_cpu_start + block_size(pcpu_size[0]); | 485 | void *ptr = __per_cpu_start + block_size(pcpu_size[0]); |
| 486 | int cpu; | ||
| 479 | 487 | ||
| 480 | /* First entry is core kernel percpu data. */ | 488 | /* First entry is core kernel percpu data. */ |
| 481 | for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { | 489 | for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { |
| @@ -487,6 +495,10 @@ static void percpu_modfree(void *freeme) | |||
| 487 | BUG(); | 495 | BUG(); |
| 488 | 496 | ||
| 489 | free: | 497 | free: |
| 498 | /* remove the per-cpu scanning areas */ | ||
| 499 | for_each_possible_cpu(cpu) | ||
| 500 | kmemleak_free(freeme + per_cpu_offset(cpu)); | ||
| 501 | |||
| 490 | /* Merge with previous? */ | 502 | /* Merge with previous? */ |
| 491 | if (pcpu_size[i-1] >= 0) { | 503 | if (pcpu_size[i-1] >= 0) { |
| 492 | pcpu_size[i-1] += pcpu_size[i]; | 504 | pcpu_size[i-1] += pcpu_size[i]; |
| @@ -1879,6 +1891,36 @@ static void *module_alloc_update_bounds(unsigned long size) | |||
| 1879 | return ret; | 1891 | return ret; |
| 1880 | } | 1892 | } |
| 1881 | 1893 | ||
| 1894 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
| 1895 | static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | ||
| 1896 | Elf_Shdr *sechdrs, char *secstrings) | ||
| 1897 | { | ||
| 1898 | unsigned int i; | ||
| 1899 | |||
| 1900 | /* only scan the sections containing data */ | ||
| 1901 | kmemleak_scan_area(mod->module_core, (unsigned long)mod - | ||
| 1902 | (unsigned long)mod->module_core, | ||
| 1903 | sizeof(struct module), GFP_KERNEL); | ||
| 1904 | |||
| 1905 | for (i = 1; i < hdr->e_shnum; i++) { | ||
| 1906 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | ||
| 1907 | continue; | ||
| 1908 | if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0 | ||
| 1909 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) | ||
| 1910 | continue; | ||
| 1911 | |||
| 1912 | kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - | ||
| 1913 | (unsigned long)mod->module_core, | ||
| 1914 | sechdrs[i].sh_size, GFP_KERNEL); | ||
| 1915 | } | ||
| 1916 | } | ||
| 1917 | #else | ||
| 1918 | static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | ||
| 1919 | Elf_Shdr *sechdrs, char *secstrings) | ||
| 1920 | { | ||
| 1921 | } | ||
| 1922 | #endif | ||
| 1923 | |||
| 1882 | /* Allocate and load the module: note that size of section 0 is always | 1924 | /* Allocate and load the module: note that size of section 0 is always |
| 1883 | zero, and we rely on this for optional sections. */ | 1925 | zero, and we rely on this for optional sections. */ |
| 1884 | static noinline struct module *load_module(void __user *umod, | 1926 | static noinline struct module *load_module(void __user *umod, |
| @@ -2049,6 +2091,12 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2049 | 2091 | ||
| 2050 | /* Do the allocs. */ | 2092 | /* Do the allocs. */ |
| 2051 | ptr = module_alloc_update_bounds(mod->core_size); | 2093 | ptr = module_alloc_update_bounds(mod->core_size); |
| 2094 | /* | ||
| 2095 | * The pointer to this block is stored in the module structure | ||
| 2096 | * which is inside the block. Just mark it as not being a | ||
| 2097 | * leak. | ||
| 2098 | */ | ||
| 2099 | kmemleak_not_leak(ptr); | ||
| 2052 | if (!ptr) { | 2100 | if (!ptr) { |
| 2053 | err = -ENOMEM; | 2101 | err = -ENOMEM; |
| 2054 | goto free_percpu; | 2102 | goto free_percpu; |
| @@ -2057,6 +2105,13 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2057 | mod->module_core = ptr; | 2105 | mod->module_core = ptr; |
| 2058 | 2106 | ||
| 2059 | ptr = module_alloc_update_bounds(mod->init_size); | 2107 | ptr = module_alloc_update_bounds(mod->init_size); |
| 2108 | /* | ||
| 2109 | * The pointer to this block is stored in the module structure | ||
| 2110 | * which is inside the block. This block doesn't need to be | ||
| 2111 | * scanned as it contains data and code that will be freed | ||
| 2112 | * after the module is initialized. | ||
| 2113 | */ | ||
| 2114 | kmemleak_ignore(ptr); | ||
| 2060 | if (!ptr && mod->init_size) { | 2115 | if (!ptr && mod->init_size) { |
| 2061 | err = -ENOMEM; | 2116 | err = -ENOMEM; |
| 2062 | goto free_core; | 2117 | goto free_core; |
| @@ -2087,6 +2142,7 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2087 | } | 2142 | } |
| 2088 | /* Module has been moved. */ | 2143 | /* Module has been moved. */ |
| 2089 | mod = (void *)sechdrs[modindex].sh_addr; | 2144 | mod = (void *)sechdrs[modindex].sh_addr; |
| 2145 | kmemleak_load_module(mod, hdr, sechdrs, secstrings); | ||
| 2090 | 2146 | ||
| 2091 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | 2147 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) |
| 2092 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | 2148 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6cdcf38f2da9..116a35051be6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -336,6 +336,38 @@ config SLUB_STATS | |||
| 336 | out which slabs are relevant to a particular load. | 336 | out which slabs are relevant to a particular load. |
| 337 | Try running: slabinfo -DA | 337 | Try running: slabinfo -DA |
| 338 | 338 | ||
| 339 | config DEBUG_KMEMLEAK | ||
| 340 | bool "Kernel memory leak detector" | ||
| 341 | depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ | ||
| 342 | !MEMORY_HOTPLUG | ||
| 343 | select DEBUG_SLAB if SLAB | ||
| 344 | select SLUB_DEBUG if SLUB | ||
| 345 | select DEBUG_FS if SYSFS | ||
| 346 | select STACKTRACE if STACKTRACE_SUPPORT | ||
| 347 | select KALLSYMS | ||
| 348 | help | ||
| 349 | Say Y here if you want to enable the memory leak | ||
| 350 | detector. The memory allocation/freeing is traced in a way | ||
| 351 | similar to the Boehm's conservative garbage collector, the | ||
| 352 | difference being that the orphan objects are not freed but | ||
| 353 | only shown in /sys/kernel/debug/kmemleak. Enabling this | ||
| 354 | feature will introduce an overhead to memory | ||
| 355 | allocations. See Documentation/kmemleak.txt for more | ||
| 356 | details. | ||
| 357 | |||
| 358 | In order to access the kmemleak file, debugfs needs to be | ||
| 359 | mounted (usually at /sys/kernel/debug). | ||
| 360 | |||
| 361 | config DEBUG_KMEMLEAK_TEST | ||
| 362 | tristate "Simple test for the kernel memory leak detector" | ||
| 363 | depends on DEBUG_KMEMLEAK | ||
| 364 | help | ||
| 365 | Say Y or M here to build a test for the kernel memory leak | ||
| 366 | detector. This option enables a module that explicitly leaks | ||
| 367 | memory. | ||
| 368 | |||
| 369 | If unsure, say N. | ||
| 370 | |||
| 339 | config DEBUG_PREEMPT | 371 | config DEBUG_PREEMPT |
| 340 | bool "Debug preemptible kernel" | 372 | bool "Debug preemptible kernel" |
| 341 | depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) | 373 | depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) |
diff --git a/mm/Makefile b/mm/Makefile index ec73c68b6015..e89acb090b4d 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
| @@ -38,3 +38,5 @@ obj-$(CONFIG_SMP) += allocpercpu.o | |||
| 38 | endif | 38 | endif |
| 39 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 39 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
| 40 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o | 40 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o |
| 41 | obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o | ||
| 42 | obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o | ||
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c new file mode 100644 index 000000000000..d5292fc6f523 --- /dev/null +++ b/mm/kmemleak-test.c | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * mm/kmemleak-test.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 ARM Limited | ||
| 5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/vmalloc.h> | ||
| 26 | #include <linux/list.h> | ||
| 27 | #include <linux/percpu.h> | ||
| 28 | #include <linux/fdtable.h> | ||
| 29 | |||
| 30 | #include <linux/kmemleak.h> | ||
| 31 | |||
| 32 | struct test_node { | ||
| 33 | long header[25]; | ||
| 34 | struct list_head list; | ||
| 35 | long footer[25]; | ||
| 36 | }; | ||
| 37 | |||
| 38 | static LIST_HEAD(test_list); | ||
| 39 | static DEFINE_PER_CPU(void *, test_pointer); | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Some very simple testing. This function needs to be extended for | ||
| 43 | * proper testing. | ||
| 44 | */ | ||
| 45 | static int __init kmemleak_test_init(void) | ||
| 46 | { | ||
| 47 | struct test_node *elem; | ||
| 48 | int i; | ||
| 49 | |||
| 50 | printk(KERN_INFO "Kmemleak testing\n"); | ||
| 51 | |||
| 52 | /* make some orphan objects */ | ||
| 53 | pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); | ||
| 54 | pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); | ||
| 55 | pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); | ||
| 56 | pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); | ||
| 57 | pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); | ||
| 58 | pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); | ||
| 59 | pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); | ||
| 60 | pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); | ||
| 61 | #ifndef CONFIG_MODULES | ||
| 62 | pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", | ||
| 63 | kmem_cache_alloc(files_cachep, GFP_KERNEL)); | ||
| 64 | pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", | ||
| 65 | kmem_cache_alloc(files_cachep, GFP_KERNEL)); | ||
| 66 | #endif | ||
| 67 | pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); | ||
| 68 | pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); | ||
| 69 | pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); | ||
| 70 | pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); | ||
| 71 | pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Add elements to a list. They should only appear as orphan | ||
| 75 | * after the module is removed. | ||
| 76 | */ | ||
| 77 | for (i = 0; i < 10; i++) { | ||
| 78 | elem = kmalloc(sizeof(*elem), GFP_KERNEL); | ||
| 79 | pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem); | ||
| 80 | if (!elem) | ||
| 81 | return -ENOMEM; | ||
| 82 | memset(elem, 0, sizeof(*elem)); | ||
| 83 | INIT_LIST_HEAD(&elem->list); | ||
| 84 | |||
| 85 | list_add_tail(&elem->list, &test_list); | ||
| 86 | } | ||
| 87 | |||
| 88 | for_each_possible_cpu(i) { | ||
| 89 | per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); | ||
| 90 | pr_info("kmemleak: kmalloc(129) = %p\n", | ||
| 91 | per_cpu(test_pointer, i)); | ||
| 92 | } | ||
| 93 | |||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | module_init(kmemleak_test_init); | ||
| 97 | |||
| 98 | static void __exit kmemleak_test_exit(void) | ||
| 99 | { | ||
| 100 | struct test_node *elem, *tmp; | ||
| 101 | |||
| 102 | /* | ||
| 103 | * Remove the list elements without actually freeing the | ||
| 104 | * memory. | ||
| 105 | */ | ||
| 106 | list_for_each_entry_safe(elem, tmp, &test_list, list) | ||
| 107 | list_del(&elem->list); | ||
| 108 | } | ||
| 109 | module_exit(kmemleak_test_exit); | ||
| 110 | |||
| 111 | MODULE_LICENSE("GPL"); | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c new file mode 100644 index 000000000000..58ec86c9e58a --- /dev/null +++ b/mm/kmemleak.c | |||
| @@ -0,0 +1,1498 @@ | |||
| 1 | /* | ||
| 2 | * mm/kmemleak.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 ARM Limited | ||
| 5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 19 | * | ||
| 20 | * | ||
| 21 | * For more information on the algorithm and kmemleak usage, please see | ||
| 22 | * Documentation/kmemleak.txt. | ||
| 23 | * | ||
| 24 | * Notes on locking | ||
| 25 | * ---------------- | ||
| 26 | * | ||
| 27 | * The following locks and mutexes are used by kmemleak: | ||
| 28 | * | ||
| 29 | * - kmemleak_lock (rwlock): protects the object_list modifications and | ||
| 30 | * accesses to the object_tree_root. The object_list is the main list | ||
| 31 | * holding the metadata (struct kmemleak_object) for the allocated memory | ||
| 32 | * blocks. The object_tree_root is a priority search tree used to look-up | ||
| 33 | * metadata based on a pointer to the corresponding memory block. The | ||
| 34 | * kmemleak_object structures are added to the object_list and | ||
| 35 | * object_tree_root in the create_object() function called from the | ||
| 36 | * kmemleak_alloc() callback and removed in delete_object() called from the | ||
| 37 | * kmemleak_free() callback | ||
| 38 | * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to | ||
| 39 | * the metadata (e.g. count) are protected by this lock. Note that some | ||
| 40 | * members of this structure may be protected by other means (atomic or | ||
| 41 | * kmemleak_lock). This lock is also held when scanning the corresponding | ||
| 42 | * memory block to avoid the kernel freeing it via the kmemleak_free() | ||
| 43 | * callback. This is less heavyweight than holding a global lock like | ||
| 44 | * kmemleak_lock during scanning | ||
| 45 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for | ||
| 46 | * unreferenced objects at a time. The gray_list contains the objects which | ||
| 47 | * are already referenced or marked as false positives and need to be | ||
| 48 | * scanned. This list is only modified during a scanning episode when the | ||
| 49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. | ||
| 50 | * Note that the kmemleak_object.use_count is incremented when an object is | ||
| 51 | * added to the gray_list and therefore cannot be freed | ||
| 52 | * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs | ||
| 53 | * file together with modifications to the memory scanning parameters | ||
| 54 | * including the scan_thread pointer | ||
| 55 | * | ||
| 56 | * The kmemleak_object structures have a use_count incremented or decremented | ||
| 57 | * using the get_object()/put_object() functions. When the use_count becomes | ||
| 58 | * 0, this count can no longer be incremented and put_object() schedules the | ||
| 59 | * kmemleak_object freeing via an RCU callback. All calls to the get_object() | ||
| 60 | * function must be protected by rcu_read_lock() to avoid accessing a freed | ||
| 61 | * structure. | ||
| 62 | */ | ||
| 63 | |||
| 64 | #include <linux/init.h> | ||
| 65 | #include <linux/kernel.h> | ||
| 66 | #include <linux/list.h> | ||
| 67 | #include <linux/sched.h> | ||
| 68 | #include <linux/jiffies.h> | ||
| 69 | #include <linux/delay.h> | ||
| 70 | #include <linux/module.h> | ||
| 71 | #include <linux/kthread.h> | ||
| 72 | #include <linux/prio_tree.h> | ||
| 73 | #include <linux/gfp.h> | ||
| 74 | #include <linux/fs.h> | ||
| 75 | #include <linux/debugfs.h> | ||
| 76 | #include <linux/seq_file.h> | ||
| 77 | #include <linux/cpumask.h> | ||
| 78 | #include <linux/spinlock.h> | ||
| 79 | #include <linux/mutex.h> | ||
| 80 | #include <linux/rcupdate.h> | ||
| 81 | #include <linux/stacktrace.h> | ||
| 82 | #include <linux/cache.h> | ||
| 83 | #include <linux/percpu.h> | ||
| 84 | #include <linux/hardirq.h> | ||
| 85 | #include <linux/mmzone.h> | ||
| 86 | #include <linux/slab.h> | ||
| 87 | #include <linux/thread_info.h> | ||
| 88 | #include <linux/err.h> | ||
| 89 | #include <linux/uaccess.h> | ||
| 90 | #include <linux/string.h> | ||
| 91 | #include <linux/nodemask.h> | ||
| 92 | #include <linux/mm.h> | ||
| 93 | |||
| 94 | #include <asm/sections.h> | ||
| 95 | #include <asm/processor.h> | ||
| 96 | #include <asm/atomic.h> | ||
| 97 | |||
| 98 | #include <linux/kmemleak.h> | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Kmemleak configuration and common defines. | ||
| 102 | */ | ||
| 103 | #define MAX_TRACE 16 /* stack trace length */ | ||
| 104 | #define REPORTS_NR 50 /* maximum number of reported leaks */ | ||
| 105 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ | ||
| 106 | #define MSECS_SCAN_YIELD 10 /* CPU yielding period */ | ||
| 107 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ | ||
| 108 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | ||
| 109 | |||
| 110 | #define BYTES_PER_POINTER sizeof(void *) | ||
| 111 | |||
| 112 | /* scanning area inside a memory block */ | ||
| 113 | struct kmemleak_scan_area { | ||
| 114 | struct hlist_node node; | ||
| 115 | unsigned long offset; | ||
| 116 | size_t length; | ||
| 117 | }; | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Structure holding the metadata for each allocated memory block. | ||
| 121 | * Modifications to such objects should be made while holding the | ||
| 122 | * object->lock. Insertions or deletions from object_list, gray_list or | ||
| 123 | * tree_node are already protected by the corresponding locks or mutex (see | ||
| 124 | * the notes on locking above). These objects are reference-counted | ||
| 125 | * (use_count) and freed using the RCU mechanism. | ||
| 126 | */ | ||
| 127 | struct kmemleak_object { | ||
| 128 | spinlock_t lock; | ||
| 129 | unsigned long flags; /* object status flags */ | ||
| 130 | struct list_head object_list; | ||
| 131 | struct list_head gray_list; | ||
| 132 | struct prio_tree_node tree_node; | ||
| 133 | struct rcu_head rcu; /* object_list lockless traversal */ | ||
| 134 | /* object usage count; object freed when use_count == 0 */ | ||
| 135 | atomic_t use_count; | ||
| 136 | unsigned long pointer; | ||
| 137 | size_t size; | ||
| 138 | /* minimum number of a pointers found before it is considered leak */ | ||
| 139 | int min_count; | ||
| 140 | /* the total number of pointers found pointing to this object */ | ||
| 141 | int count; | ||
| 142 | /* memory ranges to be scanned inside an object (empty for all) */ | ||
| 143 | struct hlist_head area_list; | ||
| 144 | unsigned long trace[MAX_TRACE]; | ||
| 145 | unsigned int trace_len; | ||
| 146 | unsigned long jiffies; /* creation timestamp */ | ||
| 147 | pid_t pid; /* pid of the current task */ | ||
| 148 | char comm[TASK_COMM_LEN]; /* executable name */ | ||
| 149 | }; | ||
| 150 | |||
| 151 | /* flag representing the memory block allocation status */ | ||
| 152 | #define OBJECT_ALLOCATED (1 << 0) | ||
| 153 | /* flag set after the first reporting of an unreference object */ | ||
| 154 | #define OBJECT_REPORTED (1 << 1) | ||
| 155 | /* flag set to not scan the object */ | ||
| 156 | #define OBJECT_NO_SCAN (1 << 2) | ||
| 157 | |||
| 158 | /* the list of all allocated objects */ | ||
| 159 | static LIST_HEAD(object_list); | ||
| 160 | /* the list of gray-colored objects (see color_gray comment below) */ | ||
| 161 | static LIST_HEAD(gray_list); | ||
| 162 | /* prio search tree for object boundaries */ | ||
| 163 | static struct prio_tree_root object_tree_root; | ||
| 164 | /* rw_lock protecting the access to object_list and prio_tree_root */ | ||
| 165 | static DEFINE_RWLOCK(kmemleak_lock); | ||
| 166 | |||
| 167 | /* allocation caches for kmemleak internal data */ | ||
| 168 | static struct kmem_cache *object_cache; | ||
| 169 | static struct kmem_cache *scan_area_cache; | ||
| 170 | |||
| 171 | /* set if tracing memory operations is enabled */ | ||
| 172 | static atomic_t kmemleak_enabled = ATOMIC_INIT(0); | ||
| 173 | /* set in the late_initcall if there were no errors */ | ||
| 174 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); | ||
| 175 | /* enables or disables early logging of the memory operations */ | ||
| 176 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); | ||
| 177 | /* set if a fata kmemleak error has occurred */ | ||
| 178 | static atomic_t kmemleak_error = ATOMIC_INIT(0); | ||
| 179 | |||
| 180 | /* minimum and maximum address that may be valid pointers */ | ||
| 181 | static unsigned long min_addr = ULONG_MAX; | ||
| 182 | static unsigned long max_addr; | ||
| 183 | |||
| 184 | /* used for yielding the CPU to other tasks during scanning */ | ||
| 185 | static unsigned long next_scan_yield; | ||
| 186 | static struct task_struct *scan_thread; | ||
| 187 | static unsigned long jiffies_scan_yield; | ||
| 188 | static unsigned long jiffies_min_age; | ||
| 189 | /* delay between automatic memory scannings */ | ||
| 190 | static signed long jiffies_scan_wait; | ||
| 191 | /* enables or disables the task stacks scanning */ | ||
| 192 | static int kmemleak_stack_scan; | ||
| 193 | /* mutex protecting the memory scanning */ | ||
| 194 | static DEFINE_MUTEX(scan_mutex); | ||
| 195 | /* mutex protecting the access to the /sys/kernel/debug/kmemleak file */ | ||
| 196 | static DEFINE_MUTEX(kmemleak_mutex); | ||
| 197 | |||
| 198 | /* number of leaks reported (for limitation purposes) */ | ||
| 199 | static int reported_leaks; | ||
| 200 | |||
| 201 | /* | ||
| 202 | * Early object allocation/freeing logging. Kkmemleak is initialized after the | ||
| 203 | * kernel allocator. However, both the kernel allocator and kmemleak may | ||
| 204 | * allocate memory blocks which need to be tracked. Kkmemleak defines an | ||
| 205 | * arbitrary buffer to hold the allocation/freeing information before it is | ||
| 206 | * fully initialized. | ||
| 207 | */ | ||
| 208 | |||
| 209 | /* kmemleak operation type for early logging */ | ||
| 210 | enum { | ||
| 211 | KMEMLEAK_ALLOC, | ||
| 212 | KMEMLEAK_FREE, | ||
| 213 | KMEMLEAK_NOT_LEAK, | ||
| 214 | KMEMLEAK_IGNORE, | ||
| 215 | KMEMLEAK_SCAN_AREA, | ||
| 216 | KMEMLEAK_NO_SCAN | ||
| 217 | }; | ||
| 218 | |||
| 219 | /* | ||
| 220 | * Structure holding the information passed to kmemleak callbacks during the | ||
| 221 | * early logging. | ||
| 222 | */ | ||
| 223 | struct early_log { | ||
| 224 | int op_type; /* kmemleak operation type */ | ||
| 225 | const void *ptr; /* allocated/freed memory block */ | ||
| 226 | size_t size; /* memory block size */ | ||
| 227 | int min_count; /* minimum reference count */ | ||
| 228 | unsigned long offset; /* scan area offset */ | ||
| 229 | size_t length; /* scan area length */ | ||
| 230 | }; | ||
| 231 | |||
| 232 | /* early logging buffer and current position */ | ||
| 233 | static struct early_log early_log[200]; | ||
| 234 | static int crt_early_log; | ||
| 235 | |||
| 236 | static void kmemleak_disable(void); | ||
| 237 | |||
| 238 | /* | ||
| 239 | * Print a warning and dump the stack trace. | ||
| 240 | */ | ||
| 241 | #define kmemleak_warn(x...) do { \ | ||
| 242 | pr_warning(x); \ | ||
| 243 | dump_stack(); \ | ||
| 244 | } while (0) | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Macro invoked when a serious kmemleak condition occured and cannot be | ||
| 248 | * recovered from. Kkmemleak will be disabled and further allocation/freeing | ||
| 249 | * tracing no longer available. | ||
| 250 | */ | ||
| 251 | #define kmemleak_panic(x...) do { \ | ||
| 252 | kmemleak_warn(x); \ | ||
| 253 | kmemleak_disable(); \ | ||
| 254 | } while (0) | ||
| 255 | |||
| 256 | /* | ||
| 257 | * Object colors, encoded with count and min_count: | ||
| 258 | * - white - orphan object, not enough references to it (count < min_count) | ||
| 259 | * - gray - not orphan, not marked as false positive (min_count == 0) or | ||
| 260 | * sufficient references to it (count >= min_count) | ||
| 261 | * - black - ignore, it doesn't contain references (e.g. text section) | ||
| 262 | * (min_count == -1). No function defined for this color. | ||
| 263 | * Newly created objects don't have any color assigned (object->count == -1) | ||
| 264 | * before the next memory scan when they become white. | ||
| 265 | */ | ||
| 266 | static int color_white(const struct kmemleak_object *object) | ||
| 267 | { | ||
| 268 | return object->count != -1 && object->count < object->min_count; | ||
| 269 | } | ||
| 270 | |||
| 271 | static int color_gray(const struct kmemleak_object *object) | ||
| 272 | { | ||
| 273 | return object->min_count != -1 && object->count >= object->min_count; | ||
| 274 | } | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Objects are considered referenced if their color is gray and they have not | ||
| 278 | * been deleted. | ||
| 279 | */ | ||
| 280 | static int referenced_object(struct kmemleak_object *object) | ||
| 281 | { | ||
| 282 | return (object->flags & OBJECT_ALLOCATED) && color_gray(object); | ||
| 283 | } | ||
| 284 | |||
| 285 | /* | ||
| 286 | * Objects are considered unreferenced only if their color is white, they have | ||
| 287 | * not be deleted and have a minimum age to avoid false positives caused by | ||
| 288 | * pointers temporarily stored in CPU registers. | ||
| 289 | */ | ||
| 290 | static int unreferenced_object(struct kmemleak_object *object) | ||
| 291 | { | ||
| 292 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && | ||
| 293 | time_is_before_eq_jiffies(object->jiffies + jiffies_min_age); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * Printing of the (un)referenced objects information, either to the seq file | ||
| 298 | * or to the kernel log. The print_referenced/print_unreferenced functions | ||
| 299 | * must be called with the object->lock held. | ||
| 300 | */ | ||
| 301 | #define print_helper(seq, x...) do { \ | ||
| 302 | struct seq_file *s = (seq); \ | ||
| 303 | if (s) \ | ||
| 304 | seq_printf(s, x); \ | ||
| 305 | else \ | ||
| 306 | pr_info(x); \ | ||
| 307 | } while (0) | ||
| 308 | |||
| 309 | static void print_referenced(struct kmemleak_object *object) | ||
| 310 | { | ||
| 311 | pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n", | ||
| 312 | object->pointer, object->size); | ||
| 313 | } | ||
| 314 | |||
| 315 | static void print_unreferenced(struct seq_file *seq, | ||
| 316 | struct kmemleak_object *object) | ||
| 317 | { | ||
| 318 | int i; | ||
| 319 | |||
| 320 | print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n", | ||
| 321 | object->pointer, object->size); | ||
| 322 | print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", | ||
| 323 | object->comm, object->pid, object->jiffies); | ||
| 324 | print_helper(seq, " backtrace:\n"); | ||
| 325 | |||
| 326 | for (i = 0; i < object->trace_len; i++) { | ||
| 327 | void *ptr = (void *)object->trace[i]; | ||
| 328 | print_helper(seq, " [<%p>] %pS\n", ptr, ptr); | ||
| 329 | } | ||
| 330 | } | ||
| 331 | |||
| 332 | /* | ||
| 333 | * Print the kmemleak_object information. This function is used mainly for | ||
| 334 | * debugging special cases when kmemleak operations. It must be called with | ||
| 335 | * the object->lock held. | ||
| 336 | */ | ||
| 337 | static void dump_object_info(struct kmemleak_object *object) | ||
| 338 | { | ||
| 339 | struct stack_trace trace; | ||
| 340 | |||
| 341 | trace.nr_entries = object->trace_len; | ||
| 342 | trace.entries = object->trace; | ||
| 343 | |||
| 344 | pr_notice("kmemleak: Object 0x%08lx (size %zu):\n", | ||
| 345 | object->tree_node.start, object->size); | ||
| 346 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", | ||
| 347 | object->comm, object->pid, object->jiffies); | ||
| 348 | pr_notice(" min_count = %d\n", object->min_count); | ||
| 349 | pr_notice(" count = %d\n", object->count); | ||
| 350 | pr_notice(" backtrace:\n"); | ||
| 351 | print_stack_trace(&trace, 4); | ||
| 352 | } | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Look-up a memory block metadata (kmemleak_object) in the priority search | ||
| 356 | * tree based on a pointer value. If alias is 0, only values pointing to the | ||
| 357 | * beginning of the memory block are allowed. The kmemleak_lock must be held | ||
| 358 | * when calling this function. | ||
| 359 | */ | ||
| 360 | static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) | ||
| 361 | { | ||
| 362 | struct prio_tree_node *node; | ||
| 363 | struct prio_tree_iter iter; | ||
| 364 | struct kmemleak_object *object; | ||
| 365 | |||
| 366 | prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); | ||
| 367 | node = prio_tree_next(&iter); | ||
| 368 | if (node) { | ||
| 369 | object = prio_tree_entry(node, struct kmemleak_object, | ||
| 370 | tree_node); | ||
| 371 | if (!alias && object->pointer != ptr) { | ||
| 372 | kmemleak_warn("kmemleak: Found object by alias"); | ||
| 373 | object = NULL; | ||
| 374 | } | ||
| 375 | } else | ||
| 376 | object = NULL; | ||
| 377 | |||
| 378 | return object; | ||
| 379 | } | ||
| 380 | |||
| 381 | /* | ||
| 382 | * Increment the object use_count. Return 1 if successful or 0 otherwise. Note | ||
| 383 | * that once an object's use_count reached 0, the RCU freeing was already | ||
| 384 | * registered and the object should no longer be used. This function must be | ||
| 385 | * called under the protection of rcu_read_lock(). | ||
| 386 | */ | ||
| 387 | static int get_object(struct kmemleak_object *object) | ||
| 388 | { | ||
| 389 | return atomic_inc_not_zero(&object->use_count); | ||
| 390 | } | ||
| 391 | |||
| 392 | /* | ||
| 393 | * RCU callback to free a kmemleak_object. | ||
| 394 | */ | ||
| 395 | static void free_object_rcu(struct rcu_head *rcu) | ||
| 396 | { | ||
| 397 | struct hlist_node *elem, *tmp; | ||
| 398 | struct kmemleak_scan_area *area; | ||
| 399 | struct kmemleak_object *object = | ||
| 400 | container_of(rcu, struct kmemleak_object, rcu); | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Once use_count is 0 (guaranteed by put_object), there is no other | ||
| 404 | * code accessing this object, hence no need for locking. | ||
| 405 | */ | ||
| 406 | hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { | ||
| 407 | hlist_del(elem); | ||
| 408 | kmem_cache_free(scan_area_cache, area); | ||
| 409 | } | ||
| 410 | kmem_cache_free(object_cache, object); | ||
| 411 | } | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Decrement the object use_count. Once the count is 0, free the object using | ||
| 415 | * an RCU callback. Since put_object() may be called via the kmemleak_free() -> | ||
| 416 | * delete_object() path, the delayed RCU freeing ensures that there is no | ||
| 417 | * recursive call to the kernel allocator. Lock-less RCU object_list traversal | ||
| 418 | * is also possible. | ||
| 419 | */ | ||
| 420 | static void put_object(struct kmemleak_object *object) | ||
| 421 | { | ||
| 422 | if (!atomic_dec_and_test(&object->use_count)) | ||
| 423 | return; | ||
| 424 | |||
| 425 | /* should only get here after delete_object was called */ | ||
| 426 | WARN_ON(object->flags & OBJECT_ALLOCATED); | ||
| 427 | |||
| 428 | call_rcu(&object->rcu, free_object_rcu); | ||
| 429 | } | ||
| 430 | |||
| 431 | /* | ||
| 432 | * Look up an object in the prio search tree and increase its use_count. | ||
| 433 | */ | ||
| 434 | static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) | ||
| 435 | { | ||
| 436 | unsigned long flags; | ||
| 437 | struct kmemleak_object *object = NULL; | ||
| 438 | |||
| 439 | rcu_read_lock(); | ||
| 440 | read_lock_irqsave(&kmemleak_lock, flags); | ||
| 441 | if (ptr >= min_addr && ptr < max_addr) | ||
| 442 | object = lookup_object(ptr, alias); | ||
| 443 | read_unlock_irqrestore(&kmemleak_lock, flags); | ||
| 444 | |||
| 445 | /* check whether the object is still available */ | ||
| 446 | if (object && !get_object(object)) | ||
| 447 | object = NULL; | ||
| 448 | rcu_read_unlock(); | ||
| 449 | |||
| 450 | return object; | ||
| 451 | } | ||
| 452 | |||
| 453 | /* | ||
| 454 | * Create the metadata (struct kmemleak_object) corresponding to an allocated | ||
| 455 | * memory block and add it to the object_list and object_tree_root. | ||
| 456 | */ | ||
| 457 | static void create_object(unsigned long ptr, size_t size, int min_count, | ||
| 458 | gfp_t gfp) | ||
| 459 | { | ||
| 460 | unsigned long flags; | ||
| 461 | struct kmemleak_object *object; | ||
| 462 | struct prio_tree_node *node; | ||
| 463 | struct stack_trace trace; | ||
| 464 | |||
| 465 | object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK); | ||
| 466 | if (!object) { | ||
| 467 | kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object " | ||
| 468 | "structure\n"); | ||
| 469 | return; | ||
| 470 | } | ||
| 471 | |||
| 472 | INIT_LIST_HEAD(&object->object_list); | ||
| 473 | INIT_LIST_HEAD(&object->gray_list); | ||
| 474 | INIT_HLIST_HEAD(&object->area_list); | ||
| 475 | spin_lock_init(&object->lock); | ||
| 476 | atomic_set(&object->use_count, 1); | ||
| 477 | object->flags = OBJECT_ALLOCATED; | ||
| 478 | object->pointer = ptr; | ||
| 479 | object->size = size; | ||
| 480 | object->min_count = min_count; | ||
| 481 | object->count = -1; /* no color initially */ | ||
| 482 | object->jiffies = jiffies; | ||
| 483 | |||
| 484 | /* task information */ | ||
| 485 | if (in_irq()) { | ||
| 486 | object->pid = 0; | ||
| 487 | strncpy(object->comm, "hardirq", sizeof(object->comm)); | ||
| 488 | } else if (in_softirq()) { | ||
| 489 | object->pid = 0; | ||
| 490 | strncpy(object->comm, "softirq", sizeof(object->comm)); | ||
| 491 | } else { | ||
| 492 | object->pid = current->pid; | ||
| 493 | /* | ||
| 494 | * There is a small chance of a race with set_task_comm(), | ||
| 495 | * however using get_task_comm() here may cause locking | ||
| 496 | * dependency issues with current->alloc_lock. In the worst | ||
| 497 | * case, the command line is not correct. | ||
| 498 | */ | ||
| 499 | strncpy(object->comm, current->comm, sizeof(object->comm)); | ||
| 500 | } | ||
| 501 | |||
| 502 | /* kernel backtrace */ | ||
| 503 | trace.max_entries = MAX_TRACE; | ||
| 504 | trace.nr_entries = 0; | ||
| 505 | trace.entries = object->trace; | ||
| 506 | trace.skip = 1; | ||
| 507 | save_stack_trace(&trace); | ||
| 508 | object->trace_len = trace.nr_entries; | ||
| 509 | |||
| 510 | INIT_PRIO_TREE_NODE(&object->tree_node); | ||
| 511 | object->tree_node.start = ptr; | ||
| 512 | object->tree_node.last = ptr + size - 1; | ||
| 513 | |||
| 514 | write_lock_irqsave(&kmemleak_lock, flags); | ||
| 515 | min_addr = min(min_addr, ptr); | ||
| 516 | max_addr = max(max_addr, ptr + size); | ||
| 517 | node = prio_tree_insert(&object_tree_root, &object->tree_node); | ||
| 518 | /* | ||
| 519 | * The code calling the kernel does not yet have the pointer to the | ||
| 520 | * memory block to be able to free it. However, we still hold the | ||
| 521 | * kmemleak_lock here in case parts of the kernel started freeing | ||
| 522 | * random memory blocks. | ||
| 523 | */ | ||
| 524 | if (node != &object->tree_node) { | ||
| 525 | unsigned long flags; | ||
| 526 | |||
| 527 | kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object " | ||
| 528 | "search tree (already existing)\n", ptr); | ||
| 529 | object = lookup_object(ptr, 1); | ||
| 530 | spin_lock_irqsave(&object->lock, flags); | ||
| 531 | dump_object_info(object); | ||
| 532 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 533 | |||
| 534 | goto out; | ||
| 535 | } | ||
| 536 | list_add_tail_rcu(&object->object_list, &object_list); | ||
| 537 | out: | ||
| 538 | write_unlock_irqrestore(&kmemleak_lock, flags); | ||
| 539 | } | ||
| 540 | |||
| 541 | /* | ||
| 542 | * Remove the metadata (struct kmemleak_object) for a memory block from the | ||
| 543 | * object_list and object_tree_root and decrement its use_count. | ||
| 544 | */ | ||
| 545 | static void delete_object(unsigned long ptr) | ||
| 546 | { | ||
| 547 | unsigned long flags; | ||
| 548 | struct kmemleak_object *object; | ||
| 549 | |||
| 550 | write_lock_irqsave(&kmemleak_lock, flags); | ||
| 551 | object = lookup_object(ptr, 0); | ||
| 552 | if (!object) { | ||
| 553 | kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n", | ||
| 554 | ptr); | ||
| 555 | write_unlock_irqrestore(&kmemleak_lock, flags); | ||
| 556 | return; | ||
| 557 | } | ||
| 558 | prio_tree_remove(&object_tree_root, &object->tree_node); | ||
| 559 | list_del_rcu(&object->object_list); | ||
| 560 | write_unlock_irqrestore(&kmemleak_lock, flags); | ||
| 561 | |||
| 562 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); | ||
| 563 | WARN_ON(atomic_read(&object->use_count) < 1); | ||
| 564 | |||
| 565 | /* | ||
| 566 | * Locking here also ensures that the corresponding memory block | ||
| 567 | * cannot be freed when it is being scanned. | ||
| 568 | */ | ||
| 569 | spin_lock_irqsave(&object->lock, flags); | ||
| 570 | if (object->flags & OBJECT_REPORTED) | ||
| 571 | print_referenced(object); | ||
| 572 | object->flags &= ~OBJECT_ALLOCATED; | ||
| 573 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 574 | put_object(object); | ||
| 575 | } | ||
| 576 | |||
| 577 | /* | ||
| 578 | * Make a object permanently as gray-colored so that it can no longer be | ||
| 579 | * reported as a leak. This is used in general to mark a false positive. | ||
| 580 | */ | ||
| 581 | static void make_gray_object(unsigned long ptr) | ||
| 582 | { | ||
| 583 | unsigned long flags; | ||
| 584 | struct kmemleak_object *object; | ||
| 585 | |||
| 586 | object = find_and_get_object(ptr, 0); | ||
| 587 | if (!object) { | ||
| 588 | kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n", | ||
| 589 | ptr); | ||
| 590 | return; | ||
| 591 | } | ||
| 592 | |||
| 593 | spin_lock_irqsave(&object->lock, flags); | ||
| 594 | object->min_count = 0; | ||
| 595 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 596 | put_object(object); | ||
| 597 | } | ||
| 598 | |||
| 599 | /* | ||
| 600 | * Mark the object as black-colored so that it is ignored from scans and | ||
| 601 | * reporting. | ||
| 602 | */ | ||
| 603 | static void make_black_object(unsigned long ptr) | ||
| 604 | { | ||
| 605 | unsigned long flags; | ||
| 606 | struct kmemleak_object *object; | ||
| 607 | |||
| 608 | object = find_and_get_object(ptr, 0); | ||
| 609 | if (!object) { | ||
| 610 | kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n", | ||
| 611 | ptr); | ||
| 612 | return; | ||
| 613 | } | ||
| 614 | |||
| 615 | spin_lock_irqsave(&object->lock, flags); | ||
| 616 | object->min_count = -1; | ||
| 617 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 618 | put_object(object); | ||
| 619 | } | ||
| 620 | |||
| 621 | /* | ||
| 622 | * Add a scanning area to the object. If at least one such area is added, | ||
| 623 | * kmemleak will only scan these ranges rather than the whole memory block. | ||
| 624 | */ | ||
| 625 | static void add_scan_area(unsigned long ptr, unsigned long offset, | ||
| 626 | size_t length, gfp_t gfp) | ||
| 627 | { | ||
| 628 | unsigned long flags; | ||
| 629 | struct kmemleak_object *object; | ||
| 630 | struct kmemleak_scan_area *area; | ||
| 631 | |||
| 632 | object = find_and_get_object(ptr, 0); | ||
| 633 | if (!object) { | ||
| 634 | kmemleak_warn("kmemleak: Adding scan area to unknown " | ||
| 635 | "object at 0x%08lx\n", ptr); | ||
| 636 | return; | ||
| 637 | } | ||
| 638 | |||
| 639 | area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK); | ||
| 640 | if (!area) { | ||
| 641 | kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); | ||
| 642 | goto out; | ||
| 643 | } | ||
| 644 | |||
| 645 | spin_lock_irqsave(&object->lock, flags); | ||
| 646 | if (offset + length > object->size) { | ||
| 647 | kmemleak_warn("kmemleak: Scan area larger than object " | ||
| 648 | "0x%08lx\n", ptr); | ||
| 649 | dump_object_info(object); | ||
| 650 | kmem_cache_free(scan_area_cache, area); | ||
| 651 | goto out_unlock; | ||
| 652 | } | ||
| 653 | |||
| 654 | INIT_HLIST_NODE(&area->node); | ||
| 655 | area->offset = offset; | ||
| 656 | area->length = length; | ||
| 657 | |||
| 658 | hlist_add_head(&area->node, &object->area_list); | ||
| 659 | out_unlock: | ||
| 660 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 661 | out: | ||
| 662 | put_object(object); | ||
| 663 | } | ||
| 664 | |||
| 665 | /* | ||
| 666 | * Set the OBJECT_NO_SCAN flag for the object corresponding to the give | ||
| 667 | * pointer. Such object will not be scanned by kmemleak but references to it | ||
| 668 | * are searched. | ||
| 669 | */ | ||
| 670 | static void object_no_scan(unsigned long ptr) | ||
| 671 | { | ||
| 672 | unsigned long flags; | ||
| 673 | struct kmemleak_object *object; | ||
| 674 | |||
| 675 | object = find_and_get_object(ptr, 0); | ||
| 676 | if (!object) { | ||
| 677 | kmemleak_warn("kmemleak: Not scanning unknown object at " | ||
| 678 | "0x%08lx\n", ptr); | ||
| 679 | return; | ||
| 680 | } | ||
| 681 | |||
| 682 | spin_lock_irqsave(&object->lock, flags); | ||
| 683 | object->flags |= OBJECT_NO_SCAN; | ||
| 684 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 685 | put_object(object); | ||
| 686 | } | ||
| 687 | |||
| 688 | /* | ||
| 689 | * Log an early kmemleak_* call to the early_log buffer. These calls will be | ||
| 690 | * processed later once kmemleak is fully initialized. | ||
| 691 | */ | ||
| 692 | static void log_early(int op_type, const void *ptr, size_t size, | ||
| 693 | int min_count, unsigned long offset, size_t length) | ||
| 694 | { | ||
| 695 | unsigned long flags; | ||
| 696 | struct early_log *log; | ||
| 697 | |||
| 698 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | ||
| 699 | kmemleak_panic("kmemleak: Early log buffer exceeded\n"); | ||
| 700 | return; | ||
| 701 | } | ||
| 702 | |||
| 703 | /* | ||
| 704 | * There is no need for locking since the kernel is still in UP mode | ||
| 705 | * at this stage. Disabling the IRQs is enough. | ||
| 706 | */ | ||
| 707 | local_irq_save(flags); | ||
| 708 | log = &early_log[crt_early_log]; | ||
| 709 | log->op_type = op_type; | ||
| 710 | log->ptr = ptr; | ||
| 711 | log->size = size; | ||
| 712 | log->min_count = min_count; | ||
| 713 | log->offset = offset; | ||
| 714 | log->length = length; | ||
| 715 | crt_early_log++; | ||
| 716 | local_irq_restore(flags); | ||
| 717 | } | ||
| 718 | |||
| 719 | /* | ||
| 720 | * Memory allocation function callback. This function is called from the | ||
| 721 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, | ||
| 722 | * vmalloc etc.). | ||
| 723 | */ | ||
| 724 | void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) | ||
| 725 | { | ||
| 726 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); | ||
| 727 | |||
| 728 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 729 | create_object((unsigned long)ptr, size, min_count, gfp); | ||
| 730 | else if (atomic_read(&kmemleak_early_log)) | ||
| 731 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); | ||
| 732 | } | ||
| 733 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | ||
| 734 | |||
| 735 | /* | ||
| 736 | * Memory freeing function callback. This function is called from the kernel | ||
| 737 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). | ||
| 738 | */ | ||
| 739 | void kmemleak_free(const void *ptr) | ||
| 740 | { | ||
| 741 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 742 | |||
| 743 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 744 | delete_object((unsigned long)ptr); | ||
| 745 | else if (atomic_read(&kmemleak_early_log)) | ||
| 746 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); | ||
| 747 | } | ||
| 748 | EXPORT_SYMBOL_GPL(kmemleak_free); | ||
| 749 | |||
| 750 | /* | ||
| 751 | * Mark an already allocated memory block as a false positive. This will cause | ||
| 752 | * the block to no longer be reported as leak and always be scanned. | ||
| 753 | */ | ||
| 754 | void kmemleak_not_leak(const void *ptr) | ||
| 755 | { | ||
| 756 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 757 | |||
| 758 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 759 | make_gray_object((unsigned long)ptr); | ||
| 760 | else if (atomic_read(&kmemleak_early_log)) | ||
| 761 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); | ||
| 762 | } | ||
| 763 | EXPORT_SYMBOL(kmemleak_not_leak); | ||
| 764 | |||
| 765 | /* | ||
| 766 | * Ignore a memory block. This is usually done when it is known that the | ||
| 767 | * corresponding block is not a leak and does not contain any references to | ||
| 768 | * other allocated memory blocks. | ||
| 769 | */ | ||
| 770 | void kmemleak_ignore(const void *ptr) | ||
| 771 | { | ||
| 772 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 773 | |||
| 774 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 775 | make_black_object((unsigned long)ptr); | ||
| 776 | else if (atomic_read(&kmemleak_early_log)) | ||
| 777 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); | ||
| 778 | } | ||
| 779 | EXPORT_SYMBOL(kmemleak_ignore); | ||
| 780 | |||
| 781 | /* | ||
| 782 | * Limit the range to be scanned in an allocated memory block. | ||
| 783 | */ | ||
| 784 | void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, | ||
| 785 | gfp_t gfp) | ||
| 786 | { | ||
| 787 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 788 | |||
| 789 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 790 | add_scan_area((unsigned long)ptr, offset, length, gfp); | ||
| 791 | else if (atomic_read(&kmemleak_early_log)) | ||
| 792 | log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); | ||
| 793 | } | ||
| 794 | EXPORT_SYMBOL(kmemleak_scan_area); | ||
| 795 | |||
| 796 | /* | ||
| 797 | * Inform kmemleak not to scan the given memory block. | ||
| 798 | */ | ||
| 799 | void kmemleak_no_scan(const void *ptr) | ||
| 800 | { | ||
| 801 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 802 | |||
| 803 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 804 | object_no_scan((unsigned long)ptr); | ||
| 805 | else if (atomic_read(&kmemleak_early_log)) | ||
| 806 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); | ||
| 807 | } | ||
| 808 | EXPORT_SYMBOL(kmemleak_no_scan); | ||
| 809 | |||
| 810 | /* | ||
| 811 | * Yield the CPU so that other tasks get a chance to run. The yielding is | ||
| 812 | * rate-limited to avoid excessive number of calls to the schedule() function | ||
| 813 | * during memory scanning. | ||
| 814 | */ | ||
| 815 | static void scan_yield(void) | ||
| 816 | { | ||
| 817 | might_sleep(); | ||
| 818 | |||
| 819 | if (time_is_before_eq_jiffies(next_scan_yield)) { | ||
| 820 | schedule(); | ||
| 821 | next_scan_yield = jiffies + jiffies_scan_yield; | ||
| 822 | } | ||
| 823 | } | ||
| 824 | |||
| 825 | /* | ||
| 826 | * Memory scanning is a long process and it needs to be interruptable. This | ||
| 827 | * function checks whether such interrupt condition occured. | ||
| 828 | */ | ||
| 829 | static int scan_should_stop(void) | ||
| 830 | { | ||
| 831 | if (!atomic_read(&kmemleak_enabled)) | ||
| 832 | return 1; | ||
| 833 | |||
| 834 | /* | ||
| 835 | * This function may be called from either process or kthread context, | ||
| 836 | * hence the need to check for both stop conditions. | ||
| 837 | */ | ||
| 838 | if (current->mm) | ||
| 839 | return signal_pending(current); | ||
| 840 | else | ||
| 841 | return kthread_should_stop(); | ||
| 842 | |||
| 843 | return 0; | ||
| 844 | } | ||
| 845 | |||
| 846 | /* | ||
| 847 | * Scan a memory block (exclusive range) for valid pointers and add those | ||
| 848 | * found to the gray list. | ||
| 849 | */ | ||
| 850 | static void scan_block(void *_start, void *_end, | ||
| 851 | struct kmemleak_object *scanned) | ||
| 852 | { | ||
| 853 | unsigned long *ptr; | ||
| 854 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); | ||
| 855 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); | ||
| 856 | |||
| 857 | for (ptr = start; ptr < end; ptr++) { | ||
| 858 | unsigned long flags; | ||
| 859 | unsigned long pointer = *ptr; | ||
| 860 | struct kmemleak_object *object; | ||
| 861 | |||
| 862 | if (scan_should_stop()) | ||
| 863 | break; | ||
| 864 | |||
| 865 | /* | ||
| 866 | * When scanning a memory block with a corresponding | ||
| 867 | * kmemleak_object, the CPU yielding is handled in the calling | ||
| 868 | * code since it holds the object->lock to avoid the block | ||
| 869 | * freeing. | ||
| 870 | */ | ||
| 871 | if (!scanned) | ||
| 872 | scan_yield(); | ||
| 873 | |||
| 874 | object = find_and_get_object(pointer, 1); | ||
| 875 | if (!object) | ||
| 876 | continue; | ||
| 877 | if (object == scanned) { | ||
| 878 | /* self referenced, ignore */ | ||
| 879 | put_object(object); | ||
| 880 | continue; | ||
| 881 | } | ||
| 882 | |||
| 883 | /* | ||
| 884 | * Avoid the lockdep recursive warning on object->lock being | ||
| 885 | * previously acquired in scan_object(). These locks are | ||
| 886 | * enclosed by scan_mutex. | ||
| 887 | */ | ||
| 888 | spin_lock_irqsave_nested(&object->lock, flags, | ||
| 889 | SINGLE_DEPTH_NESTING); | ||
| 890 | if (!color_white(object)) { | ||
| 891 | /* non-orphan, ignored or new */ | ||
| 892 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 893 | put_object(object); | ||
| 894 | continue; | ||
| 895 | } | ||
| 896 | |||
| 897 | /* | ||
| 898 | * Increase the object's reference count (number of pointers | ||
| 899 | * to the memory block). If this count reaches the required | ||
| 900 | * minimum, the object's color will become gray and it will be | ||
| 901 | * added to the gray_list. | ||
| 902 | */ | ||
| 903 | object->count++; | ||
| 904 | if (color_gray(object)) | ||
| 905 | list_add_tail(&object->gray_list, &gray_list); | ||
| 906 | else | ||
| 907 | put_object(object); | ||
| 908 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 909 | } | ||
| 910 | } | ||
| 911 | |||
| 912 | /* | ||
| 913 | * Scan a memory block corresponding to a kmemleak_object. A condition is | ||
| 914 | * that object->use_count >= 1. | ||
| 915 | */ | ||
| 916 | static void scan_object(struct kmemleak_object *object) | ||
| 917 | { | ||
| 918 | struct kmemleak_scan_area *area; | ||
| 919 | struct hlist_node *elem; | ||
| 920 | unsigned long flags; | ||
| 921 | |||
| 922 | /* | ||
| 923 | * Once the object->lock is aquired, the corresponding memory block | ||
| 924 | * cannot be freed (the same lock is aquired in delete_object). | ||
| 925 | */ | ||
| 926 | spin_lock_irqsave(&object->lock, flags); | ||
| 927 | if (object->flags & OBJECT_NO_SCAN) | ||
| 928 | goto out; | ||
| 929 | if (!(object->flags & OBJECT_ALLOCATED)) | ||
| 930 | /* already freed object */ | ||
| 931 | goto out; | ||
| 932 | if (hlist_empty(&object->area_list)) | ||
| 933 | scan_block((void *)object->pointer, | ||
| 934 | (void *)(object->pointer + object->size), object); | ||
| 935 | else | ||
| 936 | hlist_for_each_entry(area, elem, &object->area_list, node) | ||
| 937 | scan_block((void *)(object->pointer + area->offset), | ||
| 938 | (void *)(object->pointer + area->offset | ||
| 939 | + area->length), object); | ||
| 940 | out: | ||
| 941 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 942 | } | ||
| 943 | |||
| 944 | /* | ||
| 945 | * Scan data sections and all the referenced memory blocks allocated via the | ||
| 946 | * kernel's standard allocators. This function must be called with the | ||
| 947 | * scan_mutex held. | ||
| 948 | */ | ||
| 949 | static void kmemleak_scan(void) | ||
| 950 | { | ||
| 951 | unsigned long flags; | ||
| 952 | struct kmemleak_object *object, *tmp; | ||
| 953 | struct task_struct *task; | ||
| 954 | int i; | ||
| 955 | |||
| 956 | /* prepare the kmemleak_object's */ | ||
| 957 | rcu_read_lock(); | ||
| 958 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
| 959 | spin_lock_irqsave(&object->lock, flags); | ||
| 960 | #ifdef DEBUG | ||
| 961 | /* | ||
| 962 | * With a few exceptions there should be a maximum of | ||
| 963 | * 1 reference to any object at this point. | ||
| 964 | */ | ||
| 965 | if (atomic_read(&object->use_count) > 1) { | ||
| 966 | pr_debug("kmemleak: object->use_count = %d\n", | ||
| 967 | atomic_read(&object->use_count)); | ||
| 968 | dump_object_info(object); | ||
| 969 | } | ||
| 970 | #endif | ||
| 971 | /* reset the reference count (whiten the object) */ | ||
| 972 | object->count = 0; | ||
| 973 | if (color_gray(object) && get_object(object)) | ||
| 974 | list_add_tail(&object->gray_list, &gray_list); | ||
| 975 | |||
| 976 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 977 | } | ||
| 978 | rcu_read_unlock(); | ||
| 979 | |||
| 980 | /* data/bss scanning */ | ||
| 981 | scan_block(_sdata, _edata, NULL); | ||
| 982 | scan_block(__bss_start, __bss_stop, NULL); | ||
| 983 | |||
| 984 | #ifdef CONFIG_SMP | ||
| 985 | /* per-cpu sections scanning */ | ||
| 986 | for_each_possible_cpu(i) | ||
| 987 | scan_block(__per_cpu_start + per_cpu_offset(i), | ||
| 988 | __per_cpu_end + per_cpu_offset(i), NULL); | ||
| 989 | #endif | ||
| 990 | |||
| 991 | /* | ||
| 992 | * Struct page scanning for each node. The code below is not yet safe | ||
| 993 | * with MEMORY_HOTPLUG. | ||
| 994 | */ | ||
| 995 | for_each_online_node(i) { | ||
| 996 | pg_data_t *pgdat = NODE_DATA(i); | ||
| 997 | unsigned long start_pfn = pgdat->node_start_pfn; | ||
| 998 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; | ||
| 999 | unsigned long pfn; | ||
| 1000 | |||
| 1001 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | ||
| 1002 | struct page *page; | ||
| 1003 | |||
| 1004 | if (!pfn_valid(pfn)) | ||
| 1005 | continue; | ||
| 1006 | page = pfn_to_page(pfn); | ||
| 1007 | /* only scan if page is in use */ | ||
| 1008 | if (page_count(page) == 0) | ||
| 1009 | continue; | ||
| 1010 | scan_block(page, page + 1, NULL); | ||
| 1011 | } | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | /* | ||
| 1015 | * Scanning the task stacks may introduce false negatives and it is | ||
| 1016 | * not enabled by default. | ||
| 1017 | */ | ||
| 1018 | if (kmemleak_stack_scan) { | ||
| 1019 | read_lock(&tasklist_lock); | ||
| 1020 | for_each_process(task) | ||
| 1021 | scan_block(task_stack_page(task), | ||
| 1022 | task_stack_page(task) + THREAD_SIZE, NULL); | ||
| 1023 | read_unlock(&tasklist_lock); | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | /* | ||
| 1027 | * Scan the objects already referenced from the sections scanned | ||
| 1028 | * above. More objects will be referenced and, if there are no memory | ||
| 1029 | * leaks, all the objects will be scanned. The list traversal is safe | ||
| 1030 | * for both tail additions and removals from inside the loop. The | ||
| 1031 | * kmemleak objects cannot be freed from outside the loop because their | ||
| 1032 | * use_count was increased. | ||
| 1033 | */ | ||
| 1034 | object = list_entry(gray_list.next, typeof(*object), gray_list); | ||
| 1035 | while (&object->gray_list != &gray_list) { | ||
| 1036 | scan_yield(); | ||
| 1037 | |||
| 1038 | /* may add new objects to the list */ | ||
| 1039 | if (!scan_should_stop()) | ||
| 1040 | scan_object(object); | ||
| 1041 | |||
| 1042 | tmp = list_entry(object->gray_list.next, typeof(*object), | ||
| 1043 | gray_list); | ||
| 1044 | |||
| 1045 | /* remove the object from the list and release it */ | ||
| 1046 | list_del(&object->gray_list); | ||
| 1047 | put_object(object); | ||
| 1048 | |||
| 1049 | object = tmp; | ||
| 1050 | } | ||
| 1051 | WARN_ON(!list_empty(&gray_list)); | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | /* | ||
| 1055 | * Thread function performing automatic memory scanning. Unreferenced objects | ||
| 1056 | * at the end of a memory scan are reported but only the first time. | ||
| 1057 | */ | ||
| 1058 | static int kmemleak_scan_thread(void *arg) | ||
| 1059 | { | ||
| 1060 | static int first_run = 1; | ||
| 1061 | |||
| 1062 | pr_info("kmemleak: Automatic memory scanning thread started\n"); | ||
| 1063 | |||
| 1064 | /* | ||
| 1065 | * Wait before the first scan to allow the system to fully initialize. | ||
| 1066 | */ | ||
| 1067 | if (first_run) { | ||
| 1068 | first_run = 0; | ||
| 1069 | ssleep(SECS_FIRST_SCAN); | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | while (!kthread_should_stop()) { | ||
| 1073 | struct kmemleak_object *object; | ||
| 1074 | signed long timeout = jiffies_scan_wait; | ||
| 1075 | |||
| 1076 | mutex_lock(&scan_mutex); | ||
| 1077 | |||
| 1078 | kmemleak_scan(); | ||
| 1079 | reported_leaks = 0; | ||
| 1080 | |||
| 1081 | rcu_read_lock(); | ||
| 1082 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
| 1083 | unsigned long flags; | ||
| 1084 | |||
| 1085 | if (reported_leaks >= REPORTS_NR) | ||
| 1086 | break; | ||
| 1087 | spin_lock_irqsave(&object->lock, flags); | ||
| 1088 | if (!(object->flags & OBJECT_REPORTED) && | ||
| 1089 | unreferenced_object(object)) { | ||
| 1090 | print_unreferenced(NULL, object); | ||
| 1091 | object->flags |= OBJECT_REPORTED; | ||
| 1092 | reported_leaks++; | ||
| 1093 | } else if ((object->flags & OBJECT_REPORTED) && | ||
| 1094 | referenced_object(object)) { | ||
| 1095 | print_referenced(object); | ||
| 1096 | object->flags &= ~OBJECT_REPORTED; | ||
| 1097 | } | ||
| 1098 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 1099 | } | ||
| 1100 | rcu_read_unlock(); | ||
| 1101 | |||
| 1102 | mutex_unlock(&scan_mutex); | ||
| 1103 | /* wait before the next scan */ | ||
| 1104 | while (timeout && !kthread_should_stop()) | ||
| 1105 | timeout = schedule_timeout_interruptible(timeout); | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | pr_info("kmemleak: Automatic memory scanning thread ended\n"); | ||
| 1109 | |||
| 1110 | return 0; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | /* | ||
| 1114 | * Start the automatic memory scanning thread. This function must be called | ||
| 1115 | * with the kmemleak_mutex held. | ||
| 1116 | */ | ||
| 1117 | void start_scan_thread(void) | ||
| 1118 | { | ||
| 1119 | if (scan_thread) | ||
| 1120 | return; | ||
| 1121 | scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); | ||
| 1122 | if (IS_ERR(scan_thread)) { | ||
| 1123 | pr_warning("kmemleak: Failed to create the scan thread\n"); | ||
| 1124 | scan_thread = NULL; | ||
| 1125 | } | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | /* | ||
| 1129 | * Stop the automatic memory scanning thread. This function must be called | ||
| 1130 | * with the kmemleak_mutex held. | ||
| 1131 | */ | ||
| 1132 | void stop_scan_thread(void) | ||
| 1133 | { | ||
| 1134 | if (scan_thread) { | ||
| 1135 | kthread_stop(scan_thread); | ||
| 1136 | scan_thread = NULL; | ||
| 1137 | } | ||
| 1138 | } | ||
| 1139 | |||
| 1140 | /* | ||
| 1141 | * Iterate over the object_list and return the first valid object at or after | ||
| 1142 | * the required position with its use_count incremented. The function triggers | ||
| 1143 | * a memory scanning when the pos argument points to the first position. | ||
| 1144 | */ | ||
| 1145 | static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | ||
| 1146 | { | ||
| 1147 | struct kmemleak_object *object; | ||
| 1148 | loff_t n = *pos; | ||
| 1149 | |||
| 1150 | if (!n) { | ||
| 1151 | kmemleak_scan(); | ||
| 1152 | reported_leaks = 0; | ||
| 1153 | } | ||
| 1154 | if (reported_leaks >= REPORTS_NR) | ||
| 1155 | return NULL; | ||
| 1156 | |||
| 1157 | rcu_read_lock(); | ||
| 1158 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
| 1159 | if (n-- > 0) | ||
| 1160 | continue; | ||
| 1161 | if (get_object(object)) | ||
| 1162 | goto out; | ||
| 1163 | } | ||
| 1164 | object = NULL; | ||
| 1165 | out: | ||
| 1166 | rcu_read_unlock(); | ||
| 1167 | return object; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | /* | ||
| 1171 | * Return the next object in the object_list. The function decrements the | ||
| 1172 | * use_count of the previous object and increases that of the next one. | ||
| 1173 | */ | ||
| 1174 | static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
| 1175 | { | ||
| 1176 | struct kmemleak_object *prev_obj = v; | ||
| 1177 | struct kmemleak_object *next_obj = NULL; | ||
| 1178 | struct list_head *n = &prev_obj->object_list; | ||
| 1179 | |||
| 1180 | ++(*pos); | ||
| 1181 | if (reported_leaks >= REPORTS_NR) | ||
| 1182 | goto out; | ||
| 1183 | |||
| 1184 | rcu_read_lock(); | ||
| 1185 | list_for_each_continue_rcu(n, &object_list) { | ||
| 1186 | next_obj = list_entry(n, struct kmemleak_object, object_list); | ||
| 1187 | if (get_object(next_obj)) | ||
| 1188 | break; | ||
| 1189 | } | ||
| 1190 | rcu_read_unlock(); | ||
| 1191 | out: | ||
| 1192 | put_object(prev_obj); | ||
| 1193 | return next_obj; | ||
| 1194 | } | ||
| 1195 | |||
| 1196 | /* | ||
| 1197 | * Decrement the use_count of the last object required, if any. | ||
| 1198 | */ | ||
| 1199 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) | ||
| 1200 | { | ||
| 1201 | if (v) | ||
| 1202 | put_object(v); | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | /* | ||
| 1206 | * Print the information for an unreferenced object to the seq file. | ||
| 1207 | */ | ||
| 1208 | static int kmemleak_seq_show(struct seq_file *seq, void *v) | ||
| 1209 | { | ||
| 1210 | struct kmemleak_object *object = v; | ||
| 1211 | unsigned long flags; | ||
| 1212 | |||
| 1213 | spin_lock_irqsave(&object->lock, flags); | ||
| 1214 | if (!unreferenced_object(object)) | ||
| 1215 | goto out; | ||
| 1216 | print_unreferenced(seq, object); | ||
| 1217 | reported_leaks++; | ||
| 1218 | out: | ||
| 1219 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 1220 | return 0; | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | static const struct seq_operations kmemleak_seq_ops = { | ||
| 1224 | .start = kmemleak_seq_start, | ||
| 1225 | .next = kmemleak_seq_next, | ||
| 1226 | .stop = kmemleak_seq_stop, | ||
| 1227 | .show = kmemleak_seq_show, | ||
| 1228 | }; | ||
| 1229 | |||
| 1230 | static int kmemleak_open(struct inode *inode, struct file *file) | ||
| 1231 | { | ||
| 1232 | int ret = 0; | ||
| 1233 | |||
| 1234 | if (!atomic_read(&kmemleak_enabled)) | ||
| 1235 | return -EBUSY; | ||
| 1236 | |||
| 1237 | ret = mutex_lock_interruptible(&kmemleak_mutex); | ||
| 1238 | if (ret < 0) | ||
| 1239 | goto out; | ||
| 1240 | if (file->f_mode & FMODE_READ) { | ||
| 1241 | ret = mutex_lock_interruptible(&scan_mutex); | ||
| 1242 | if (ret < 0) | ||
| 1243 | goto kmemleak_unlock; | ||
| 1244 | ret = seq_open(file, &kmemleak_seq_ops); | ||
| 1245 | if (ret < 0) | ||
| 1246 | goto scan_unlock; | ||
| 1247 | } | ||
| 1248 | return ret; | ||
| 1249 | |||
| 1250 | scan_unlock: | ||
| 1251 | mutex_unlock(&scan_mutex); | ||
| 1252 | kmemleak_unlock: | ||
| 1253 | mutex_unlock(&kmemleak_mutex); | ||
| 1254 | out: | ||
| 1255 | return ret; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | static int kmemleak_release(struct inode *inode, struct file *file) | ||
| 1259 | { | ||
| 1260 | int ret = 0; | ||
| 1261 | |||
| 1262 | if (file->f_mode & FMODE_READ) { | ||
| 1263 | seq_release(inode, file); | ||
| 1264 | mutex_unlock(&scan_mutex); | ||
| 1265 | } | ||
| 1266 | mutex_unlock(&kmemleak_mutex); | ||
| 1267 | |||
| 1268 | return ret; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | /* | ||
| 1272 | * File write operation to configure kmemleak at run-time. The following | ||
| 1273 | * commands can be written to the /sys/kernel/debug/kmemleak file: | ||
| 1274 | * off - disable kmemleak (irreversible) | ||
| 1275 | * stack=on - enable the task stacks scanning | ||
| 1276 | * stack=off - disable the tasks stacks scanning | ||
| 1277 | * scan=on - start the automatic memory scanning thread | ||
| 1278 | * scan=off - stop the automatic memory scanning thread | ||
| 1279 | * scan=... - set the automatic memory scanning period in seconds (0 to | ||
| 1280 | * disable it) | ||
| 1281 | */ | ||
| 1282 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | ||
| 1283 | size_t size, loff_t *ppos) | ||
| 1284 | { | ||
| 1285 | char buf[64]; | ||
| 1286 | int buf_size; | ||
| 1287 | |||
| 1288 | if (!atomic_read(&kmemleak_enabled)) | ||
| 1289 | return -EBUSY; | ||
| 1290 | |||
| 1291 | buf_size = min(size, (sizeof(buf) - 1)); | ||
| 1292 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) | ||
| 1293 | return -EFAULT; | ||
| 1294 | buf[buf_size] = 0; | ||
| 1295 | |||
| 1296 | if (strncmp(buf, "off", 3) == 0) | ||
| 1297 | kmemleak_disable(); | ||
| 1298 | else if (strncmp(buf, "stack=on", 8) == 0) | ||
| 1299 | kmemleak_stack_scan = 1; | ||
| 1300 | else if (strncmp(buf, "stack=off", 9) == 0) | ||
| 1301 | kmemleak_stack_scan = 0; | ||
| 1302 | else if (strncmp(buf, "scan=on", 7) == 0) | ||
| 1303 | start_scan_thread(); | ||
| 1304 | else if (strncmp(buf, "scan=off", 8) == 0) | ||
| 1305 | stop_scan_thread(); | ||
| 1306 | else if (strncmp(buf, "scan=", 5) == 0) { | ||
| 1307 | unsigned long secs; | ||
| 1308 | int err; | ||
| 1309 | |||
| 1310 | err = strict_strtoul(buf + 5, 0, &secs); | ||
| 1311 | if (err < 0) | ||
| 1312 | return err; | ||
| 1313 | stop_scan_thread(); | ||
| 1314 | if (secs) { | ||
| 1315 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); | ||
| 1316 | start_scan_thread(); | ||
| 1317 | } | ||
| 1318 | } else | ||
| 1319 | return -EINVAL; | ||
| 1320 | |||
| 1321 | /* ignore the rest of the buffer, only one command at a time */ | ||
| 1322 | *ppos += size; | ||
| 1323 | return size; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | static const struct file_operations kmemleak_fops = { | ||
| 1327 | .owner = THIS_MODULE, | ||
| 1328 | .open = kmemleak_open, | ||
| 1329 | .read = seq_read, | ||
| 1330 | .write = kmemleak_write, | ||
| 1331 | .llseek = seq_lseek, | ||
| 1332 | .release = kmemleak_release, | ||
| 1333 | }; | ||
| 1334 | |||
| 1335 | /* | ||
| 1336 | * Perform the freeing of the kmemleak internal objects after waiting for any | ||
| 1337 | * current memory scan to complete. | ||
| 1338 | */ | ||
| 1339 | static int kmemleak_cleanup_thread(void *arg) | ||
| 1340 | { | ||
| 1341 | struct kmemleak_object *object; | ||
| 1342 | |||
| 1343 | mutex_lock(&kmemleak_mutex); | ||
| 1344 | stop_scan_thread(); | ||
| 1345 | mutex_unlock(&kmemleak_mutex); | ||
| 1346 | |||
| 1347 | mutex_lock(&scan_mutex); | ||
| 1348 | rcu_read_lock(); | ||
| 1349 | list_for_each_entry_rcu(object, &object_list, object_list) | ||
| 1350 | delete_object(object->pointer); | ||
| 1351 | rcu_read_unlock(); | ||
| 1352 | mutex_unlock(&scan_mutex); | ||
| 1353 | |||
| 1354 | return 0; | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | /* | ||
| 1358 | * Start the clean-up thread. | ||
| 1359 | */ | ||
| 1360 | static void kmemleak_cleanup(void) | ||
| 1361 | { | ||
| 1362 | struct task_struct *cleanup_thread; | ||
| 1363 | |||
| 1364 | cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, | ||
| 1365 | "kmemleak-clean"); | ||
| 1366 | if (IS_ERR(cleanup_thread)) | ||
| 1367 | pr_warning("kmemleak: Failed to create the clean-up thread\n"); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | /* | ||
| 1371 | * Disable kmemleak. No memory allocation/freeing will be traced once this | ||
| 1372 | * function is called. Disabling kmemleak is an irreversible operation. | ||
| 1373 | */ | ||
| 1374 | static void kmemleak_disable(void) | ||
| 1375 | { | ||
| 1376 | /* atomically check whether it was already invoked */ | ||
| 1377 | if (atomic_cmpxchg(&kmemleak_error, 0, 1)) | ||
| 1378 | return; | ||
| 1379 | |||
| 1380 | /* stop any memory operation tracing */ | ||
| 1381 | atomic_set(&kmemleak_early_log, 0); | ||
| 1382 | atomic_set(&kmemleak_enabled, 0); | ||
| 1383 | |||
| 1384 | /* check whether it is too early for a kernel thread */ | ||
| 1385 | if (atomic_read(&kmemleak_initialized)) | ||
| 1386 | kmemleak_cleanup(); | ||
| 1387 | |||
| 1388 | pr_info("Kernel memory leak detector disabled\n"); | ||
| 1389 | } | ||
| 1390 | |||
| 1391 | /* | ||
| 1392 | * Allow boot-time kmemleak disabling (enabled by default). | ||
| 1393 | */ | ||
| 1394 | static int kmemleak_boot_config(char *str) | ||
| 1395 | { | ||
| 1396 | if (!str) | ||
| 1397 | return -EINVAL; | ||
| 1398 | if (strcmp(str, "off") == 0) | ||
| 1399 | kmemleak_disable(); | ||
| 1400 | else if (strcmp(str, "on") != 0) | ||
| 1401 | return -EINVAL; | ||
| 1402 | return 0; | ||
| 1403 | } | ||
| 1404 | early_param("kmemleak", kmemleak_boot_config); | ||
| 1405 | |||
| 1406 | /* | ||
| 1407 | * Kkmemleak initialization. | ||
| 1408 | */ | ||
| 1409 | void __init kmemleak_init(void) | ||
| 1410 | { | ||
| 1411 | int i; | ||
| 1412 | unsigned long flags; | ||
| 1413 | |||
| 1414 | jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD); | ||
| 1415 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); | ||
| 1416 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); | ||
| 1417 | |||
| 1418 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); | ||
| 1419 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); | ||
| 1420 | INIT_PRIO_TREE_ROOT(&object_tree_root); | ||
| 1421 | |||
| 1422 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ | ||
| 1423 | local_irq_save(flags); | ||
| 1424 | if (!atomic_read(&kmemleak_error)) { | ||
| 1425 | atomic_set(&kmemleak_enabled, 1); | ||
| 1426 | atomic_set(&kmemleak_early_log, 0); | ||
| 1427 | } | ||
| 1428 | local_irq_restore(flags); | ||
| 1429 | |||
| 1430 | /* | ||
| 1431 | * This is the point where tracking allocations is safe. Automatic | ||
| 1432 | * scanning is started during the late initcall. Add the early logged | ||
| 1433 | * callbacks to the kmemleak infrastructure. | ||
| 1434 | */ | ||
| 1435 | for (i = 0; i < crt_early_log; i++) { | ||
| 1436 | struct early_log *log = &early_log[i]; | ||
| 1437 | |||
| 1438 | switch (log->op_type) { | ||
| 1439 | case KMEMLEAK_ALLOC: | ||
| 1440 | kmemleak_alloc(log->ptr, log->size, log->min_count, | ||
| 1441 | GFP_KERNEL); | ||
| 1442 | break; | ||
| 1443 | case KMEMLEAK_FREE: | ||
| 1444 | kmemleak_free(log->ptr); | ||
| 1445 | break; | ||
| 1446 | case KMEMLEAK_NOT_LEAK: | ||
| 1447 | kmemleak_not_leak(log->ptr); | ||
| 1448 | break; | ||
| 1449 | case KMEMLEAK_IGNORE: | ||
| 1450 | kmemleak_ignore(log->ptr); | ||
| 1451 | break; | ||
| 1452 | case KMEMLEAK_SCAN_AREA: | ||
| 1453 | kmemleak_scan_area(log->ptr, log->offset, log->length, | ||
| 1454 | GFP_KERNEL); | ||
| 1455 | break; | ||
| 1456 | case KMEMLEAK_NO_SCAN: | ||
| 1457 | kmemleak_no_scan(log->ptr); | ||
| 1458 | break; | ||
| 1459 | default: | ||
| 1460 | WARN_ON(1); | ||
| 1461 | } | ||
| 1462 | } | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | /* | ||
| 1466 | * Late initialization function. | ||
| 1467 | */ | ||
| 1468 | static int __init kmemleak_late_init(void) | ||
| 1469 | { | ||
| 1470 | struct dentry *dentry; | ||
| 1471 | |||
| 1472 | atomic_set(&kmemleak_initialized, 1); | ||
| 1473 | |||
| 1474 | if (atomic_read(&kmemleak_error)) { | ||
| 1475 | /* | ||
| 1476 | * Some error occured and kmemleak was disabled. There is a | ||
| 1477 | * small chance that kmemleak_disable() was called immediately | ||
| 1478 | * after setting kmemleak_initialized and we may end up with | ||
| 1479 | * two clean-up threads but serialized by scan_mutex. | ||
| 1480 | */ | ||
| 1481 | kmemleak_cleanup(); | ||
| 1482 | return -ENOMEM; | ||
| 1483 | } | ||
| 1484 | |||
| 1485 | dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, | ||
| 1486 | &kmemleak_fops); | ||
| 1487 | if (!dentry) | ||
| 1488 | pr_warning("kmemleak: Failed to create the debugfs kmemleak " | ||
| 1489 | "file\n"); | ||
| 1490 | mutex_lock(&kmemleak_mutex); | ||
| 1491 | start_scan_thread(); | ||
| 1492 | mutex_unlock(&kmemleak_mutex); | ||
| 1493 | |||
| 1494 | pr_info("Kernel memory leak detector initialized\n"); | ||
| 1495 | |||
| 1496 | return 0; | ||
| 1497 | } | ||
| 1498 | late_initcall(kmemleak_late_init); | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 474c7e9dd51a..17d5f539a9aa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/page-isolation.h> | 46 | #include <linux/page-isolation.h> |
| 47 | #include <linux/page_cgroup.h> | 47 | #include <linux/page_cgroup.h> |
| 48 | #include <linux/debugobjects.h> | 48 | #include <linux/debugobjects.h> |
| 49 | #include <linux/kmemleak.h> | ||
| 49 | 50 | ||
| 50 | #include <asm/tlbflush.h> | 51 | #include <asm/tlbflush.h> |
| 51 | #include <asm/div64.h> | 52 | #include <asm/div64.h> |
| @@ -4546,6 +4547,16 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
| 4546 | if (_hash_mask) | 4547 | if (_hash_mask) |
| 4547 | *_hash_mask = (1 << log2qty) - 1; | 4548 | *_hash_mask = (1 << log2qty) - 1; |
| 4548 | 4549 | ||
| 4550 | /* | ||
| 4551 | * If hashdist is set, the table allocation is done with __vmalloc() | ||
| 4552 | * which invokes the kmemleak_alloc() callback. This function may also | ||
| 4553 | * be called before the slab and kmemleak are initialised when | ||
| 4554 | * kmemleak simply buffers the request to be executed later | ||
| 4555 | * (GFP_ATOMIC flag ignored in this case). | ||
| 4556 | */ | ||
| 4557 | if (!hashdist) | ||
| 4558 | kmemleak_alloc(table, size, 1, GFP_ATOMIC); | ||
| 4559 | |||
| 4549 | return table; | 4560 | return table; |
| 4550 | } | 4561 | } |
| 4551 | 4562 | ||
| @@ -107,6 +107,7 @@ | |||
| 107 | #include <linux/string.h> | 107 | #include <linux/string.h> |
| 108 | #include <linux/uaccess.h> | 108 | #include <linux/uaccess.h> |
| 109 | #include <linux/nodemask.h> | 109 | #include <linux/nodemask.h> |
| 110 | #include <linux/kmemleak.h> | ||
| 110 | #include <linux/mempolicy.h> | 111 | #include <linux/mempolicy.h> |
| 111 | #include <linux/mutex.h> | 112 | #include <linux/mutex.h> |
| 112 | #include <linux/fault-inject.h> | 113 | #include <linux/fault-inject.h> |
| @@ -178,13 +179,13 @@ | |||
| 178 | SLAB_STORE_USER | \ | 179 | SLAB_STORE_USER | \ |
| 179 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
| 180 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 181 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
| 181 | SLAB_DEBUG_OBJECTS) | 182 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) |
| 182 | #else | 183 | #else |
| 183 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 184 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
| 184 | SLAB_CACHE_DMA | \ | 185 | SLAB_CACHE_DMA | \ |
| 185 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 186 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
| 186 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 187 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
| 187 | SLAB_DEBUG_OBJECTS) | 188 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) |
| 188 | #endif | 189 | #endif |
| 189 | 190 | ||
| 190 | /* | 191 | /* |
| @@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries, | |||
| 964 | struct array_cache *nc = NULL; | 965 | struct array_cache *nc = NULL; |
| 965 | 966 | ||
| 966 | nc = kmalloc_node(memsize, gfp, node); | 967 | nc = kmalloc_node(memsize, gfp, node); |
| 968 | /* | ||
| 969 | * The array_cache structures contain pointers to free object. | ||
| 970 | * However, when such objects are allocated or transfered to another | ||
| 971 | * cache the pointers are not cleared and they could be counted as | ||
| 972 | * valid references during a kmemleak scan. Therefore, kmemleak must | ||
| 973 | * not scan such objects. | ||
| 974 | */ | ||
| 975 | kmemleak_no_scan(nc); | ||
| 967 | if (nc) { | 976 | if (nc) { |
| 968 | nc->avail = 0; | 977 | nc->avail = 0; |
| 969 | nc->limit = entries; | 978 | nc->limit = entries; |
| @@ -2625,6 +2634,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
| 2625 | /* Slab management obj is off-slab. */ | 2634 | /* Slab management obj is off-slab. */ |
| 2626 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, | 2635 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, |
| 2627 | local_flags, nodeid); | 2636 | local_flags, nodeid); |
| 2637 | /* | ||
| 2638 | * If the first object in the slab is leaked (it's allocated | ||
| 2639 | * but no one has a reference to it), we want to make sure | ||
| 2640 | * kmemleak does not treat the ->s_mem pointer as a reference | ||
| 2641 | * to the object. Otherwise we will not report the leak. | ||
| 2642 | */ | ||
| 2643 | kmemleak_scan_area(slabp, offsetof(struct slab, list), | ||
| 2644 | sizeof(struct list_head), local_flags); | ||
| 2628 | if (!slabp) | 2645 | if (!slabp) |
| 2629 | return NULL; | 2646 | return NULL; |
| 2630 | } else { | 2647 | } else { |
| @@ -3145,6 +3162,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3145 | STATS_INC_ALLOCMISS(cachep); | 3162 | STATS_INC_ALLOCMISS(cachep); |
| 3146 | objp = cache_alloc_refill(cachep, flags); | 3163 | objp = cache_alloc_refill(cachep, flags); |
| 3147 | } | 3164 | } |
| 3165 | /* | ||
| 3166 | * To avoid a false negative, if an object that is in one of the | ||
| 3167 | * per-CPU caches is leaked, we need to make sure kmemleak doesn't | ||
| 3168 | * treat the array pointers as a reference to the object. | ||
| 3169 | */ | ||
| 3170 | kmemleak_erase(&ac->entry[ac->avail]); | ||
| 3148 | return objp; | 3171 | return objp; |
| 3149 | } | 3172 | } |
| 3150 | 3173 | ||
| @@ -3364,6 +3387,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
| 3364 | out: | 3387 | out: |
| 3365 | local_irq_restore(save_flags); | 3388 | local_irq_restore(save_flags); |
| 3366 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); | 3389 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); |
| 3390 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, | ||
| 3391 | flags); | ||
| 3367 | 3392 | ||
| 3368 | if (unlikely((flags & __GFP_ZERO) && ptr)) | 3393 | if (unlikely((flags & __GFP_ZERO) && ptr)) |
| 3369 | memset(ptr, 0, obj_size(cachep)); | 3394 | memset(ptr, 0, obj_size(cachep)); |
| @@ -3419,6 +3444,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
| 3419 | objp = __do_cache_alloc(cachep, flags); | 3444 | objp = __do_cache_alloc(cachep, flags); |
| 3420 | local_irq_restore(save_flags); | 3445 | local_irq_restore(save_flags); |
| 3421 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); | 3446 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); |
| 3447 | kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, | ||
| 3448 | flags); | ||
| 3422 | prefetchw(objp); | 3449 | prefetchw(objp); |
| 3423 | 3450 | ||
| 3424 | if (unlikely((flags & __GFP_ZERO) && objp)) | 3451 | if (unlikely((flags & __GFP_ZERO) && objp)) |
| @@ -3534,6 +3561,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3534 | struct array_cache *ac = cpu_cache_get(cachep); | 3561 | struct array_cache *ac = cpu_cache_get(cachep); |
| 3535 | 3562 | ||
| 3536 | check_irq_off(); | 3563 | check_irq_off(); |
| 3564 | kmemleak_free_recursive(objp, cachep->flags); | ||
| 3537 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3565 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
| 3538 | 3566 | ||
| 3539 | /* | 3567 | /* |
| @@ -67,6 +67,7 @@ | |||
| 67 | #include <linux/rcupdate.h> | 67 | #include <linux/rcupdate.h> |
| 68 | #include <linux/list.h> | 68 | #include <linux/list.h> |
| 69 | #include <linux/kmemtrace.h> | 69 | #include <linux/kmemtrace.h> |
| 70 | #include <linux/kmemleak.h> | ||
| 70 | #include <asm/atomic.h> | 71 | #include <asm/atomic.h> |
| 71 | 72 | ||
| 72 | /* | 73 | /* |
| @@ -509,6 +510,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 509 | size, PAGE_SIZE << order, gfp, node); | 510 | size, PAGE_SIZE << order, gfp, node); |
| 510 | } | 511 | } |
| 511 | 512 | ||
| 513 | kmemleak_alloc(ret, size, 1, gfp); | ||
| 512 | return ret; | 514 | return ret; |
| 513 | } | 515 | } |
| 514 | EXPORT_SYMBOL(__kmalloc_node); | 516 | EXPORT_SYMBOL(__kmalloc_node); |
| @@ -521,6 +523,7 @@ void kfree(const void *block) | |||
| 521 | 523 | ||
| 522 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 524 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
| 523 | return; | 525 | return; |
| 526 | kmemleak_free(block); | ||
| 524 | 527 | ||
| 525 | sp = slob_page(block); | 528 | sp = slob_page(block); |
| 526 | if (is_slob_page(sp)) { | 529 | if (is_slob_page(sp)) { |
| @@ -584,12 +587,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
| 584 | } else if (flags & SLAB_PANIC) | 587 | } else if (flags & SLAB_PANIC) |
| 585 | panic("Cannot create slab cache %s\n", name); | 588 | panic("Cannot create slab cache %s\n", name); |
| 586 | 589 | ||
| 590 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | ||
| 587 | return c; | 591 | return c; |
| 588 | } | 592 | } |
| 589 | EXPORT_SYMBOL(kmem_cache_create); | 593 | EXPORT_SYMBOL(kmem_cache_create); |
| 590 | 594 | ||
| 591 | void kmem_cache_destroy(struct kmem_cache *c) | 595 | void kmem_cache_destroy(struct kmem_cache *c) |
| 592 | { | 596 | { |
| 597 | kmemleak_free(c); | ||
| 593 | slob_free(c, sizeof(struct kmem_cache)); | 598 | slob_free(c, sizeof(struct kmem_cache)); |
| 594 | } | 599 | } |
| 595 | EXPORT_SYMBOL(kmem_cache_destroy); | 600 | EXPORT_SYMBOL(kmem_cache_destroy); |
| @@ -613,6 +618,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 613 | if (c->ctor) | 618 | if (c->ctor) |
| 614 | c->ctor(b); | 619 | c->ctor(b); |
| 615 | 620 | ||
| 621 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | ||
| 616 | return b; | 622 | return b; |
| 617 | } | 623 | } |
| 618 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 624 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| @@ -635,6 +641,7 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
| 635 | 641 | ||
| 636 | void kmem_cache_free(struct kmem_cache *c, void *b) | 642 | void kmem_cache_free(struct kmem_cache *c, void *b) |
| 637 | { | 643 | { |
| 644 | kmemleak_free_recursive(b, c->flags); | ||
| 638 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | 645 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { |
| 639 | struct slob_rcu *slob_rcu; | 646 | struct slob_rcu *slob_rcu; |
| 640 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | 647 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); |
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/kmemtrace.h> | 20 | #include <linux/kmemtrace.h> |
| 21 | #include <linux/cpu.h> | 21 | #include <linux/cpu.h> |
| 22 | #include <linux/cpuset.h> | 22 | #include <linux/cpuset.h> |
| 23 | #include <linux/kmemleak.h> | ||
| 23 | #include <linux/mempolicy.h> | 24 | #include <linux/mempolicy.h> |
| 24 | #include <linux/ctype.h> | 25 | #include <linux/ctype.h> |
| 25 | #include <linux/debugobjects.h> | 26 | #include <linux/debugobjects.h> |
| @@ -143,7 +144,7 @@ | |||
| 143 | * Set of flags that will prevent slab merging | 144 | * Set of flags that will prevent slab merging |
| 144 | */ | 145 | */ |
| 145 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | 146 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
| 146 | SLAB_TRACE | SLAB_DESTROY_BY_RCU) | 147 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) |
| 147 | 148 | ||
| 148 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ | 149 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ |
| 149 | SLAB_CACHE_DMA) | 150 | SLAB_CACHE_DMA) |
| @@ -1617,6 +1618,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1617 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1618 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
| 1618 | memset(object, 0, objsize); | 1619 | memset(object, 0, objsize); |
| 1619 | 1620 | ||
| 1621 | kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); | ||
| 1620 | return object; | 1622 | return object; |
| 1621 | } | 1623 | } |
| 1622 | 1624 | ||
| @@ -1746,6 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
| 1746 | struct kmem_cache_cpu *c; | 1748 | struct kmem_cache_cpu *c; |
| 1747 | unsigned long flags; | 1749 | unsigned long flags; |
| 1748 | 1750 | ||
| 1751 | kmemleak_free_recursive(x, s->flags); | ||
| 1749 | local_irq_save(flags); | 1752 | local_irq_save(flags); |
| 1750 | c = get_cpu_slab(s, smp_processor_id()); | 1753 | c = get_cpu_slab(s, smp_processor_id()); |
| 1751 | debug_check_no_locks_freed(object, c->objsize); | 1754 | debug_check_no_locks_freed(object, c->objsize); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 323513858c20..f8189a4b3e13 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> |
| 25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
| 26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
| 27 | #include <linux/kmemleak.h> | ||
| 27 | 28 | ||
| 28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
| 29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
| @@ -1326,6 +1327,9 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
| 1326 | void vfree(const void *addr) | 1327 | void vfree(const void *addr) |
| 1327 | { | 1328 | { |
| 1328 | BUG_ON(in_interrupt()); | 1329 | BUG_ON(in_interrupt()); |
| 1330 | |||
| 1331 | kmemleak_free(addr); | ||
| 1332 | |||
| 1329 | __vunmap(addr, 1); | 1333 | __vunmap(addr, 1); |
| 1330 | } | 1334 | } |
| 1331 | EXPORT_SYMBOL(vfree); | 1335 | EXPORT_SYMBOL(vfree); |
| @@ -1438,8 +1442,17 @@ fail: | |||
| 1438 | 1442 | ||
| 1439 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | 1443 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) |
| 1440 | { | 1444 | { |
| 1441 | return __vmalloc_area_node(area, gfp_mask, prot, -1, | 1445 | void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, |
| 1442 | __builtin_return_address(0)); | 1446 | __builtin_return_address(0)); |
| 1447 | |||
| 1448 | /* | ||
| 1449 | * A ref_count = 3 is needed because the vm_struct and vmap_area | ||
| 1450 | * structures allocated in the __get_vm_area_node() function contain | ||
| 1451 | * references to the virtual address of the vmalloc'ed block. | ||
| 1452 | */ | ||
| 1453 | kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); | ||
| 1454 | |||
| 1455 | return addr; | ||
| 1443 | } | 1456 | } |
| 1444 | 1457 | ||
| 1445 | /** | 1458 | /** |
| @@ -1458,6 +1471,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
| 1458 | int node, void *caller) | 1471 | int node, void *caller) |
| 1459 | { | 1472 | { |
| 1460 | struct vm_struct *area; | 1473 | struct vm_struct *area; |
| 1474 | void *addr; | ||
| 1475 | unsigned long real_size = size; | ||
| 1461 | 1476 | ||
| 1462 | size = PAGE_ALIGN(size); | 1477 | size = PAGE_ALIGN(size); |
| 1463 | if (!size || (size >> PAGE_SHIFT) > num_physpages) | 1478 | if (!size || (size >> PAGE_SHIFT) > num_physpages) |
| @@ -1469,7 +1484,16 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
| 1469 | if (!area) | 1484 | if (!area) |
| 1470 | return NULL; | 1485 | return NULL; |
| 1471 | 1486 | ||
| 1472 | return __vmalloc_area_node(area, gfp_mask, prot, node, caller); | 1487 | addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); |
| 1488 | |||
| 1489 | /* | ||
| 1490 | * A ref_count = 3 is needed because the vm_struct and vmap_area | ||
| 1491 | * structures allocated in the __get_vm_area_node() function contain | ||
| 1492 | * references to the virtual address of the vmalloc'ed block. | ||
| 1493 | */ | ||
| 1494 | kmemleak_alloc(addr, real_size, 3, gfp_mask); | ||
| 1495 | |||
| 1496 | return addr; | ||
| 1473 | } | 1497 | } |
| 1474 | 1498 | ||
| 1475 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1499 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
