diff options
| -rw-r--r-- | lib/Kconfig.debug | 7 | ||||
| -rw-r--r-- | lib/scatterlist.c | 23 | ||||
| -rw-r--r-- | mm/kmemleak.c | 100 |
3 files changed, 101 insertions, 29 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 79e0dff1cdcb..9e06b7f5ecf1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -410,6 +410,13 @@ config DEBUG_KMEMLEAK_TEST | |||
| 410 | 410 | ||
| 411 | If unsure, say N. | 411 | If unsure, say N. |
| 412 | 412 | ||
| 413 | config DEBUG_KMEMLEAK_DEFAULT_OFF | ||
| 414 | bool "Default kmemleak to off" | ||
| 415 | depends on DEBUG_KMEMLEAK | ||
| 416 | help | ||
| 417 | Say Y here to disable kmemleak by default. It can then be enabled | ||
| 418 | on the command line via kmemleak=on. | ||
| 419 | |||
| 413 | config DEBUG_PREEMPT | 420 | config DEBUG_PREEMPT |
| 414 | bool "Debug preemptible kernel" | 421 | bool "Debug preemptible kernel" |
| 415 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT | 422 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9afa25b52a83..a5ec42868f99 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 11 | #include <linux/scatterlist.h> | 11 | #include <linux/scatterlist.h> |
| 12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
| 13 | #include <linux/kmemleak.h> | ||
| 13 | 14 | ||
| 14 | /** | 15 | /** |
| 15 | * sg_next - return the next scatterlist entry in a list | 16 | * sg_next - return the next scatterlist entry in a list |
| @@ -115,17 +116,29 @@ EXPORT_SYMBOL(sg_init_one); | |||
| 115 | */ | 116 | */ |
| 116 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) | 117 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) |
| 117 | { | 118 | { |
| 118 | if (nents == SG_MAX_SINGLE_ALLOC) | 119 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 119 | return (struct scatterlist *) __get_free_page(gfp_mask); | 120 | /* |
| 120 | else | 121 | * Kmemleak doesn't track page allocations as they are not |
| 122 | * commonly used (in a raw form) for kernel data structures. | ||
| 123 | * As we chain together a list of pages and then a normal | ||
| 124 | * kmalloc (tracked by kmemleak), in order to for that last | ||
| 125 | * allocation not to become decoupled (and thus a | ||
| 126 | * false-positive) we need to inform kmemleak of all the | ||
| 127 | * intermediate allocations. | ||
| 128 | */ | ||
| 129 | void *ptr = (void *) __get_free_page(gfp_mask); | ||
| 130 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); | ||
| 131 | return ptr; | ||
| 132 | } else | ||
| 121 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); | 133 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); |
| 122 | } | 134 | } |
| 123 | 135 | ||
| 124 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) | 136 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) |
| 125 | { | 137 | { |
| 126 | if (nents == SG_MAX_SINGLE_ALLOC) | 138 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 139 | kmemleak_free(sg); | ||
| 127 | free_page((unsigned long) sg); | 140 | free_page((unsigned long) sg); |
| 128 | else | 141 | } else |
| 129 | kfree(sg); | 142 | kfree(sg); |
| 130 | } | 143 | } |
| 131 | 144 | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 2c0d032ac898..bd9bc214091b 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -211,6 +211,9 @@ static signed long jiffies_scan_wait; | |||
| 211 | static int kmemleak_stack_scan = 1; | 211 | static int kmemleak_stack_scan = 1; |
| 212 | /* protects the memory scanning, parameters and debug/kmemleak file access */ | 212 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
| 213 | static DEFINE_MUTEX(scan_mutex); | 213 | static DEFINE_MUTEX(scan_mutex); |
| 214 | /* setting kmemleak=on, will set this var, skipping the disable */ | ||
| 215 | static int kmemleak_skip_disable; | ||
| 216 | |||
| 214 | 217 | ||
| 215 | /* | 218 | /* |
| 216 | * Early object allocation/freeing logging. Kmemleak is initialized after the | 219 | * Early object allocation/freeing logging. Kmemleak is initialized after the |
| @@ -398,7 +401,9 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) | |||
| 398 | object = prio_tree_entry(node, struct kmemleak_object, | 401 | object = prio_tree_entry(node, struct kmemleak_object, |
| 399 | tree_node); | 402 | tree_node); |
| 400 | if (!alias && object->pointer != ptr) { | 403 | if (!alias && object->pointer != ptr) { |
| 401 | kmemleak_warn("Found object by alias"); | 404 | pr_warning("Found object by alias at 0x%08lx\n", ptr); |
| 405 | dump_stack(); | ||
| 406 | dump_object_info(object); | ||
| 402 | object = NULL; | 407 | object = NULL; |
| 403 | } | 408 | } |
| 404 | } else | 409 | } else |
| @@ -695,7 +700,7 @@ static void paint_ptr(unsigned long ptr, int color) | |||
| 695 | } | 700 | } |
| 696 | 701 | ||
| 697 | /* | 702 | /* |
| 698 | * Make a object permanently as gray-colored so that it can no longer be | 703 | * Mark an object permanently as gray-colored so that it can no longer be |
| 699 | * reported as a leak. This is used in general to mark a false positive. | 704 | * reported as a leak. This is used in general to mark a false positive. |
| 700 | */ | 705 | */ |
| 701 | static void make_gray_object(unsigned long ptr) | 706 | static void make_gray_object(unsigned long ptr) |
| @@ -838,10 +843,19 @@ out: | |||
| 838 | rcu_read_unlock(); | 843 | rcu_read_unlock(); |
| 839 | } | 844 | } |
| 840 | 845 | ||
| 841 | /* | 846 | /** |
| 842 | * Memory allocation function callback. This function is called from the | 847 | * kmemleak_alloc - register a newly allocated object |
| 843 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, | 848 | * @ptr: pointer to beginning of the object |
| 844 | * vmalloc etc.). | 849 | * @size: size of the object |
| 850 | * @min_count: minimum number of references to this object. If during memory | ||
| 851 | * scanning a number of references less than @min_count is found, | ||
| 852 | * the object is reported as a memory leak. If @min_count is 0, | ||
| 853 | * the object is never reported as a leak. If @min_count is -1, | ||
| 854 | * the object is ignored (not scanned and not reported as a leak) | ||
| 855 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations | ||
| 856 | * | ||
| 857 | * This function is called from the kernel allocators when a new object | ||
| 858 | * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). | ||
| 845 | */ | 859 | */ |
| 846 | void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | 860 | void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, |
| 847 | gfp_t gfp) | 861 | gfp_t gfp) |
| @@ -855,9 +869,12 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
| 855 | } | 869 | } |
| 856 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 870 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
| 857 | 871 | ||
| 858 | /* | 872 | /** |
| 859 | * Memory freeing function callback. This function is called from the kernel | 873 | * kmemleak_free - unregister a previously registered object |
| 860 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). | 874 | * @ptr: pointer to beginning of the object |
| 875 | * | ||
| 876 | * This function is called from the kernel allocators when an object (memory | ||
| 877 | * block) is freed (kmem_cache_free, kfree, vfree etc.). | ||
| 861 | */ | 878 | */ |
| 862 | void __ref kmemleak_free(const void *ptr) | 879 | void __ref kmemleak_free(const void *ptr) |
| 863 | { | 880 | { |
| @@ -870,9 +887,14 @@ void __ref kmemleak_free(const void *ptr) | |||
| 870 | } | 887 | } |
| 871 | EXPORT_SYMBOL_GPL(kmemleak_free); | 888 | EXPORT_SYMBOL_GPL(kmemleak_free); |
| 872 | 889 | ||
| 873 | /* | 890 | /** |
| 874 | * Partial memory freeing function callback. This function is usually called | 891 | * kmemleak_free_part - partially unregister a previously registered object |
| 875 | * from bootmem allocator when (part of) a memory block is freed. | 892 | * @ptr: pointer to the beginning or inside the object. This also |
| 893 | * represents the start of the range to be freed | ||
| 894 | * @size: size to be unregistered | ||
| 895 | * | ||
| 896 | * This function is called when only a part of a memory block is freed | ||
| 897 | * (usually from the bootmem allocator). | ||
| 876 | */ | 898 | */ |
| 877 | void __ref kmemleak_free_part(const void *ptr, size_t size) | 899 | void __ref kmemleak_free_part(const void *ptr, size_t size) |
| 878 | { | 900 | { |
| @@ -885,9 +907,12 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
| 885 | } | 907 | } |
| 886 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 908 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
| 887 | 909 | ||
| 888 | /* | 910 | /** |
| 889 | * Mark an already allocated memory block as a false positive. This will cause | 911 | * kmemleak_not_leak - mark an allocated object as false positive |
| 890 | * the block to no longer be reported as leak and always be scanned. | 912 | * @ptr: pointer to beginning of the object |
| 913 | * | ||
| 914 | * Calling this function on an object will cause the memory block to no longer | ||
| 915 | * be reported as leak and always be scanned. | ||
| 891 | */ | 916 | */ |
| 892 | void __ref kmemleak_not_leak(const void *ptr) | 917 | void __ref kmemleak_not_leak(const void *ptr) |
| 893 | { | 918 | { |
| @@ -900,10 +925,14 @@ void __ref kmemleak_not_leak(const void *ptr) | |||
| 900 | } | 925 | } |
| 901 | EXPORT_SYMBOL(kmemleak_not_leak); | 926 | EXPORT_SYMBOL(kmemleak_not_leak); |
| 902 | 927 | ||
| 903 | /* | 928 | /** |
| 904 | * Ignore a memory block. This is usually done when it is known that the | 929 | * kmemleak_ignore - ignore an allocated object |
| 905 | * corresponding block is not a leak and does not contain any references to | 930 | * @ptr: pointer to beginning of the object |
| 906 | * other allocated memory blocks. | 931 | * |
| 932 | * Calling this function on an object will cause the memory block to be | ||
| 933 | * ignored (not scanned and not reported as a leak). This is usually done when | ||
| 934 | * it is known that the corresponding block is not a leak and does not contain | ||
| 935 | * any references to other allocated memory blocks. | ||
| 907 | */ | 936 | */ |
| 908 | void __ref kmemleak_ignore(const void *ptr) | 937 | void __ref kmemleak_ignore(const void *ptr) |
| 909 | { | 938 | { |
| @@ -916,8 +945,16 @@ void __ref kmemleak_ignore(const void *ptr) | |||
| 916 | } | 945 | } |
| 917 | EXPORT_SYMBOL(kmemleak_ignore); | 946 | EXPORT_SYMBOL(kmemleak_ignore); |
| 918 | 947 | ||
| 919 | /* | 948 | /** |
| 920 | * Limit the range to be scanned in an allocated memory block. | 949 | * kmemleak_scan_area - limit the range to be scanned in an allocated object |
| 950 | * @ptr: pointer to beginning or inside the object. This also | ||
| 951 | * represents the start of the scan area | ||
| 952 | * @size: size of the scan area | ||
| 953 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations | ||
| 954 | * | ||
| 955 | * This function is used when it is known that only certain parts of an object | ||
| 956 | * contain references to other objects. Kmemleak will only scan these areas | ||
| 957 | * reducing the number false negatives. | ||
| 921 | */ | 958 | */ |
| 922 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | 959 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
| 923 | { | 960 | { |
| @@ -930,8 +967,14 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | |||
| 930 | } | 967 | } |
| 931 | EXPORT_SYMBOL(kmemleak_scan_area); | 968 | EXPORT_SYMBOL(kmemleak_scan_area); |
| 932 | 969 | ||
| 933 | /* | 970 | /** |
| 934 | * Inform kmemleak not to scan the given memory block. | 971 | * kmemleak_no_scan - do not scan an allocated object |
| 972 | * @ptr: pointer to beginning of the object | ||
| 973 | * | ||
| 974 | * This function notifies kmemleak not to scan the given memory block. Useful | ||
| 975 | * in situations where it is known that the given object does not contain any | ||
| 976 | * references to other objects. Kmemleak will not scan such objects reducing | ||
| 977 | * the number of false negatives. | ||
| 935 | */ | 978 | */ |
| 936 | void __ref kmemleak_no_scan(const void *ptr) | 979 | void __ref kmemleak_no_scan(const void *ptr) |
| 937 | { | 980 | { |
| @@ -1602,7 +1645,9 @@ static int kmemleak_boot_config(char *str) | |||
| 1602 | return -EINVAL; | 1645 | return -EINVAL; |
| 1603 | if (strcmp(str, "off") == 0) | 1646 | if (strcmp(str, "off") == 0) |
| 1604 | kmemleak_disable(); | 1647 | kmemleak_disable(); |
| 1605 | else if (strcmp(str, "on") != 0) | 1648 | else if (strcmp(str, "on") == 0) |
| 1649 | kmemleak_skip_disable = 1; | ||
| 1650 | else | ||
| 1606 | return -EINVAL; | 1651 | return -EINVAL; |
| 1607 | return 0; | 1652 | return 0; |
| 1608 | } | 1653 | } |
| @@ -1616,6 +1661,13 @@ void __init kmemleak_init(void) | |||
| 1616 | int i; | 1661 | int i; |
| 1617 | unsigned long flags; | 1662 | unsigned long flags; |
| 1618 | 1663 | ||
| 1664 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF | ||
| 1665 | if (!kmemleak_skip_disable) { | ||
| 1666 | kmemleak_disable(); | ||
| 1667 | return; | ||
| 1668 | } | ||
| 1669 | #endif | ||
| 1670 | |||
| 1619 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); | 1671 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
| 1620 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); | 1672 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); |
| 1621 | 1673 | ||
