diff options
| -rw-r--r-- | include/linux/kmemleak.h | 6 | ||||
| -rw-r--r-- | kernel/module.c | 13 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 1 | ||||
| -rw-r--r-- | mm/kmemleak.c | 188 | ||||
| -rw-r--r-- | mm/slab.c | 10 |
5 files changed, 118 insertions, 100 deletions
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 3c7497d46ee9..99d9a6766f7e 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset, | |||
| 32 | size_t size) __ref; | 32 | size_t size) __ref; |
| 33 | extern void kmemleak_not_leak(const void *ptr) __ref; | 33 | extern void kmemleak_not_leak(const void *ptr) __ref; |
| 34 | extern void kmemleak_ignore(const void *ptr) __ref; | 34 | extern void kmemleak_ignore(const void *ptr) __ref; |
| 35 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | 35 | extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; |
| 36 | size_t length, gfp_t gfp) __ref; | ||
| 37 | extern void kmemleak_no_scan(const void *ptr) __ref; | 36 | extern void kmemleak_no_scan(const void *ptr) __ref; |
| 38 | 37 | ||
| 39 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | 38 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, |
| @@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr) | |||
| 84 | static inline void kmemleak_ignore(const void *ptr) | 83 | static inline void kmemleak_ignore(const void *ptr) |
| 85 | { | 84 | { |
| 86 | } | 85 | } |
| 87 | static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, | 86 | static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
| 88 | size_t length, gfp_t gfp) | ||
| 89 | { | 87 | { |
| 90 | } | 88 | } |
| 91 | static inline void kmemleak_erase(void **ptr) | 89 | static inline void kmemleak_erase(void **ptr) |
diff --git a/kernel/module.c b/kernel/module.c index a65dc787a27b..e96b8ed1cb6a 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1910,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | |||
| 1910 | unsigned int i; | 1910 | unsigned int i; |
| 1911 | 1911 | ||
| 1912 | /* only scan the sections containing data */ | 1912 | /* only scan the sections containing data */ |
| 1913 | kmemleak_scan_area(mod->module_core, (unsigned long)mod - | 1913 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); |
| 1914 | (unsigned long)mod->module_core, | ||
| 1915 | sizeof(struct module), GFP_KERNEL); | ||
| 1916 | 1914 | ||
| 1917 | for (i = 1; i < hdr->e_shnum; i++) { | 1915 | for (i = 1; i < hdr->e_shnum; i++) { |
| 1918 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | 1916 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) |
| @@ -1921,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | |||
| 1921 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) | 1919 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) |
| 1922 | continue; | 1920 | continue; |
| 1923 | 1921 | ||
| 1924 | kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - | 1922 | kmemleak_scan_area((void *)sechdrs[i].sh_addr, |
| 1925 | (unsigned long)mod->module_core, | ||
| 1926 | sechdrs[i].sh_size, GFP_KERNEL); | 1923 | sechdrs[i].sh_size, GFP_KERNEL); |
| 1927 | } | 1924 | } |
| 1928 | } | 1925 | } |
| @@ -2250,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2250 | "_ftrace_events", | 2247 | "_ftrace_events", |
| 2251 | sizeof(*mod->trace_events), | 2248 | sizeof(*mod->trace_events), |
| 2252 | &mod->num_trace_events); | 2249 | &mod->num_trace_events); |
| 2250 | /* | ||
| 2251 | * This section contains pointers to allocated objects in the trace | ||
| 2252 | * code and not scanning it leads to false positives. | ||
| 2253 | */ | ||
| 2254 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | ||
| 2255 | mod->num_trace_events, GFP_KERNEL); | ||
| 2253 | #endif | 2256 | #endif |
| 2254 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2257 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 2255 | /* sechdrs[0].sh_size is always zero */ | 2258 | /* sechdrs[0].sh_size is always zero */ |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8cf9938dd147..25c3ed594c54 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK | |||
| 360 | select DEBUG_FS if SYSFS | 360 | select DEBUG_FS if SYSFS |
| 361 | select STACKTRACE if STACKTRACE_SUPPORT | 361 | select STACKTRACE if STACKTRACE_SUPPORT |
| 362 | select KALLSYMS | 362 | select KALLSYMS |
| 363 | select CRC32 | ||
| 363 | help | 364 | help |
| 364 | Say Y here if you want to enable the memory leak | 365 | Say Y here if you want to enable the memory leak |
| 365 | detector. The memory allocation/freeing is traced in a way | 366 | detector. The memory allocation/freeing is traced in a way |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 13f33b3081ec..5b069e4f5e48 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -93,6 +93,7 @@ | |||
| 93 | #include <linux/nodemask.h> | 93 | #include <linux/nodemask.h> |
| 94 | #include <linux/mm.h> | 94 | #include <linux/mm.h> |
| 95 | #include <linux/workqueue.h> | 95 | #include <linux/workqueue.h> |
| 96 | #include <linux/crc32.h> | ||
| 96 | 97 | ||
| 97 | #include <asm/sections.h> | 98 | #include <asm/sections.h> |
| 98 | #include <asm/processor.h> | 99 | #include <asm/processor.h> |
| @@ -108,7 +109,6 @@ | |||
| 108 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ | 109 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
| 109 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ | 110 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
| 110 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | 111 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
| 111 | #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ | ||
| 112 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ | 112 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ |
| 113 | 113 | ||
| 114 | #define BYTES_PER_POINTER sizeof(void *) | 114 | #define BYTES_PER_POINTER sizeof(void *) |
| @@ -119,8 +119,8 @@ | |||
| 119 | /* scanning area inside a memory block */ | 119 | /* scanning area inside a memory block */ |
| 120 | struct kmemleak_scan_area { | 120 | struct kmemleak_scan_area { |
| 121 | struct hlist_node node; | 121 | struct hlist_node node; |
| 122 | unsigned long offset; | 122 | unsigned long start; |
| 123 | size_t length; | 123 | size_t size; |
| 124 | }; | 124 | }; |
| 125 | 125 | ||
| 126 | #define KMEMLEAK_GREY 0 | 126 | #define KMEMLEAK_GREY 0 |
| @@ -149,6 +149,8 @@ struct kmemleak_object { | |||
| 149 | int min_count; | 149 | int min_count; |
| 150 | /* the total number of pointers found pointing to this object */ | 150 | /* the total number of pointers found pointing to this object */ |
| 151 | int count; | 151 | int count; |
| 152 | /* checksum for detecting modified objects */ | ||
| 153 | u32 checksum; | ||
| 152 | /* memory ranges to be scanned inside an object (empty for all) */ | 154 | /* memory ranges to be scanned inside an object (empty for all) */ |
| 153 | struct hlist_head area_list; | 155 | struct hlist_head area_list; |
| 154 | unsigned long trace[MAX_TRACE]; | 156 | unsigned long trace[MAX_TRACE]; |
| @@ -164,8 +166,6 @@ struct kmemleak_object { | |||
| 164 | #define OBJECT_REPORTED (1 << 1) | 166 | #define OBJECT_REPORTED (1 << 1) |
| 165 | /* flag set to not scan the object */ | 167 | /* flag set to not scan the object */ |
| 166 | #define OBJECT_NO_SCAN (1 << 2) | 168 | #define OBJECT_NO_SCAN (1 << 2) |
| 167 | /* flag set on newly allocated objects */ | ||
| 168 | #define OBJECT_NEW (1 << 3) | ||
| 169 | 169 | ||
| 170 | /* number of bytes to print per line; must be 16 or 32 */ | 170 | /* number of bytes to print per line; must be 16 or 32 */ |
| 171 | #define HEX_ROW_SIZE 16 | 171 | #define HEX_ROW_SIZE 16 |
| @@ -241,8 +241,6 @@ struct early_log { | |||
| 241 | const void *ptr; /* allocated/freed memory block */ | 241 | const void *ptr; /* allocated/freed memory block */ |
| 242 | size_t size; /* memory block size */ | 242 | size_t size; /* memory block size */ |
| 243 | int min_count; /* minimum reference count */ | 243 | int min_count; /* minimum reference count */ |
| 244 | unsigned long offset; /* scan area offset */ | ||
| 245 | size_t length; /* scan area length */ | ||
| 246 | unsigned long trace[MAX_TRACE]; /* stack trace */ | 244 | unsigned long trace[MAX_TRACE]; /* stack trace */ |
| 247 | unsigned int trace_len; /* stack trace length */ | 245 | unsigned int trace_len; /* stack trace length */ |
| 248 | }; | 246 | }; |
| @@ -323,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object) | |||
| 323 | object->count >= object->min_count; | 321 | object->count >= object->min_count; |
| 324 | } | 322 | } |
| 325 | 323 | ||
| 326 | static bool color_black(const struct kmemleak_object *object) | ||
| 327 | { | ||
| 328 | return object->min_count == KMEMLEAK_BLACK; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* | 324 | /* |
| 332 | * Objects are considered unreferenced only if their color is white, they have | 325 | * Objects are considered unreferenced only if their color is white, they have |
| 333 | * not be deleted and have a minimum age to avoid false positives caused by | 326 | * not be deleted and have a minimum age to avoid false positives caused by |
| @@ -335,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object) | |||
| 335 | */ | 328 | */ |
| 336 | static bool unreferenced_object(struct kmemleak_object *object) | 329 | static bool unreferenced_object(struct kmemleak_object *object) |
| 337 | { | 330 | { |
| 338 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && | 331 | return (color_white(object) && object->flags & OBJECT_ALLOCATED) && |
| 339 | time_before_eq(object->jiffies + jiffies_min_age, | 332 | time_before_eq(object->jiffies + jiffies_min_age, |
| 340 | jiffies_last_scan); | 333 | jiffies_last_scan); |
| 341 | } | 334 | } |
| @@ -348,11 +341,13 @@ static void print_unreferenced(struct seq_file *seq, | |||
| 348 | struct kmemleak_object *object) | 341 | struct kmemleak_object *object) |
| 349 | { | 342 | { |
| 350 | int i; | 343 | int i; |
| 344 | unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); | ||
| 351 | 345 | ||
| 352 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", | 346 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
| 353 | object->pointer, object->size); | 347 | object->pointer, object->size); |
| 354 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", | 348 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", |
| 355 | object->comm, object->pid, object->jiffies); | 349 | object->comm, object->pid, object->jiffies, |
| 350 | msecs_age / 1000, msecs_age % 1000); | ||
| 356 | hex_dump_object(seq, object); | 351 | hex_dump_object(seq, object); |
| 357 | seq_printf(seq, " backtrace:\n"); | 352 | seq_printf(seq, " backtrace:\n"); |
| 358 | 353 | ||
| @@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object) | |||
| 381 | pr_notice(" min_count = %d\n", object->min_count); | 376 | pr_notice(" min_count = %d\n", object->min_count); |
| 382 | pr_notice(" count = %d\n", object->count); | 377 | pr_notice(" count = %d\n", object->count); |
| 383 | pr_notice(" flags = 0x%lx\n", object->flags); | 378 | pr_notice(" flags = 0x%lx\n", object->flags); |
| 379 | pr_notice(" checksum = %d\n", object->checksum); | ||
| 384 | pr_notice(" backtrace:\n"); | 380 | pr_notice(" backtrace:\n"); |
| 385 | print_stack_trace(&trace, 4); | 381 | print_stack_trace(&trace, 4); |
| 386 | } | 382 | } |
| @@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, | |||
| 522 | INIT_HLIST_HEAD(&object->area_list); | 518 | INIT_HLIST_HEAD(&object->area_list); |
| 523 | spin_lock_init(&object->lock); | 519 | spin_lock_init(&object->lock); |
| 524 | atomic_set(&object->use_count, 1); | 520 | atomic_set(&object->use_count, 1); |
| 525 | object->flags = OBJECT_ALLOCATED | OBJECT_NEW; | 521 | object->flags = OBJECT_ALLOCATED; |
| 526 | object->pointer = ptr; | 522 | object->pointer = ptr; |
| 527 | object->size = size; | 523 | object->size = size; |
| 528 | object->min_count = min_count; | 524 | object->min_count = min_count; |
| 529 | object->count = -1; /* no color initially */ | 525 | object->count = 0; /* white color initially */ |
| 530 | object->jiffies = jiffies; | 526 | object->jiffies = jiffies; |
| 527 | object->checksum = 0; | ||
| 531 | 528 | ||
| 532 | /* task information */ | 529 | /* task information */ |
| 533 | if (in_irq()) { | 530 | if (in_irq()) { |
| @@ -720,14 +717,13 @@ static void make_black_object(unsigned long ptr) | |||
| 720 | * Add a scanning area to the object. If at least one such area is added, | 717 | * Add a scanning area to the object. If at least one such area is added, |
| 721 | * kmemleak will only scan these ranges rather than the whole memory block. | 718 | * kmemleak will only scan these ranges rather than the whole memory block. |
| 722 | */ | 719 | */ |
| 723 | static void add_scan_area(unsigned long ptr, unsigned long offset, | 720 | static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) |
| 724 | size_t length, gfp_t gfp) | ||
| 725 | { | 721 | { |
| 726 | unsigned long flags; | 722 | unsigned long flags; |
| 727 | struct kmemleak_object *object; | 723 | struct kmemleak_object *object; |
| 728 | struct kmemleak_scan_area *area; | 724 | struct kmemleak_scan_area *area; |
| 729 | 725 | ||
| 730 | object = find_and_get_object(ptr, 0); | 726 | object = find_and_get_object(ptr, 1); |
| 731 | if (!object) { | 727 | if (!object) { |
| 732 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", | 728 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", |
| 733 | ptr); | 729 | ptr); |
| @@ -741,7 +737,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, | |||
| 741 | } | 737 | } |
| 742 | 738 | ||
| 743 | spin_lock_irqsave(&object->lock, flags); | 739 | spin_lock_irqsave(&object->lock, flags); |
| 744 | if (offset + length > object->size) { | 740 | if (ptr + size > object->pointer + object->size) { |
| 745 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); | 741 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); |
| 746 | dump_object_info(object); | 742 | dump_object_info(object); |
| 747 | kmem_cache_free(scan_area_cache, area); | 743 | kmem_cache_free(scan_area_cache, area); |
| @@ -749,8 +745,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, | |||
| 749 | } | 745 | } |
| 750 | 746 | ||
| 751 | INIT_HLIST_NODE(&area->node); | 747 | INIT_HLIST_NODE(&area->node); |
| 752 | area->offset = offset; | 748 | area->start = ptr; |
| 753 | area->length = length; | 749 | area->size = size; |
| 754 | 750 | ||
| 755 | hlist_add_head(&area->node, &object->area_list); | 751 | hlist_add_head(&area->node, &object->area_list); |
| 756 | out_unlock: | 752 | out_unlock: |
| @@ -786,7 +782,7 @@ static void object_no_scan(unsigned long ptr) | |||
| 786 | * processed later once kmemleak is fully initialized. | 782 | * processed later once kmemleak is fully initialized. |
| 787 | */ | 783 | */ |
| 788 | static void __init log_early(int op_type, const void *ptr, size_t size, | 784 | static void __init log_early(int op_type, const void *ptr, size_t size, |
| 789 | int min_count, unsigned long offset, size_t length) | 785 | int min_count) |
| 790 | { | 786 | { |
| 791 | unsigned long flags; | 787 | unsigned long flags; |
| 792 | struct early_log *log; | 788 | struct early_log *log; |
| @@ -808,8 +804,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size, | |||
| 808 | log->ptr = ptr; | 804 | log->ptr = ptr; |
| 809 | log->size = size; | 805 | log->size = size; |
| 810 | log->min_count = min_count; | 806 | log->min_count = min_count; |
| 811 | log->offset = offset; | ||
| 812 | log->length = length; | ||
| 813 | if (op_type == KMEMLEAK_ALLOC) | 807 | if (op_type == KMEMLEAK_ALLOC) |
| 814 | log->trace_len = __save_stack_trace(log->trace); | 808 | log->trace_len = __save_stack_trace(log->trace); |
| 815 | crt_early_log++; | 809 | crt_early_log++; |
| @@ -858,7 +852,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
| 858 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 852 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 859 | create_object((unsigned long)ptr, size, min_count, gfp); | 853 | create_object((unsigned long)ptr, size, min_count, gfp); |
| 860 | else if (atomic_read(&kmemleak_early_log)) | 854 | else if (atomic_read(&kmemleak_early_log)) |
| 861 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); | 855 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
| 862 | } | 856 | } |
| 863 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 857 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
| 864 | 858 | ||
| @@ -873,7 +867,7 @@ void __ref kmemleak_free(const void *ptr) | |||
| 873 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 867 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 874 | delete_object_full((unsigned long)ptr); | 868 | delete_object_full((unsigned long)ptr); |
| 875 | else if (atomic_read(&kmemleak_early_log)) | 869 | else if (atomic_read(&kmemleak_early_log)) |
| 876 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); | 870 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
| 877 | } | 871 | } |
| 878 | EXPORT_SYMBOL_GPL(kmemleak_free); | 872 | EXPORT_SYMBOL_GPL(kmemleak_free); |
| 879 | 873 | ||
| @@ -888,7 +882,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
| 888 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 882 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 889 | delete_object_part((unsigned long)ptr, size); | 883 | delete_object_part((unsigned long)ptr, size); |
| 890 | else if (atomic_read(&kmemleak_early_log)) | 884 | else if (atomic_read(&kmemleak_early_log)) |
| 891 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); | 885 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
| 892 | } | 886 | } |
| 893 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 887 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
| 894 | 888 | ||
| @@ -903,7 +897,7 @@ void __ref kmemleak_not_leak(const void *ptr) | |||
| 903 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 897 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 904 | make_gray_object((unsigned long)ptr); | 898 | make_gray_object((unsigned long)ptr); |
| 905 | else if (atomic_read(&kmemleak_early_log)) | 899 | else if (atomic_read(&kmemleak_early_log)) |
| 906 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); | 900 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
| 907 | } | 901 | } |
| 908 | EXPORT_SYMBOL(kmemleak_not_leak); | 902 | EXPORT_SYMBOL(kmemleak_not_leak); |
| 909 | 903 | ||
| @@ -919,22 +913,21 @@ void __ref kmemleak_ignore(const void *ptr) | |||
| 919 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 913 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 920 | make_black_object((unsigned long)ptr); | 914 | make_black_object((unsigned long)ptr); |
| 921 | else if (atomic_read(&kmemleak_early_log)) | 915 | else if (atomic_read(&kmemleak_early_log)) |
| 922 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); | 916 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
| 923 | } | 917 | } |
| 924 | EXPORT_SYMBOL(kmemleak_ignore); | 918 | EXPORT_SYMBOL(kmemleak_ignore); |
| 925 | 919 | ||
| 926 | /* | 920 | /* |
| 927 | * Limit the range to be scanned in an allocated memory block. | 921 | * Limit the range to be scanned in an allocated memory block. |
| 928 | */ | 922 | */ |
| 929 | void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, | 923 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
| 930 | size_t length, gfp_t gfp) | ||
| 931 | { | 924 | { |
| 932 | pr_debug("%s(0x%p)\n", __func__, ptr); | 925 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 933 | 926 | ||
| 934 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 927 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 935 | add_scan_area((unsigned long)ptr, offset, length, gfp); | 928 | add_scan_area((unsigned long)ptr, size, gfp); |
| 936 | else if (atomic_read(&kmemleak_early_log)) | 929 | else if (atomic_read(&kmemleak_early_log)) |
| 937 | log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); | 930 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
| 938 | } | 931 | } |
| 939 | EXPORT_SYMBOL(kmemleak_scan_area); | 932 | EXPORT_SYMBOL(kmemleak_scan_area); |
| 940 | 933 | ||
| @@ -948,11 +941,25 @@ void __ref kmemleak_no_scan(const void *ptr) | |||
| 948 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 941 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 949 | object_no_scan((unsigned long)ptr); | 942 | object_no_scan((unsigned long)ptr); |
| 950 | else if (atomic_read(&kmemleak_early_log)) | 943 | else if (atomic_read(&kmemleak_early_log)) |
| 951 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); | 944 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
| 952 | } | 945 | } |
| 953 | EXPORT_SYMBOL(kmemleak_no_scan); | 946 | EXPORT_SYMBOL(kmemleak_no_scan); |
| 954 | 947 | ||
| 955 | /* | 948 | /* |
| 949 | * Update an object's checksum and return true if it was modified. | ||
| 950 | */ | ||
| 951 | static bool update_checksum(struct kmemleak_object *object) | ||
| 952 | { | ||
| 953 | u32 old_csum = object->checksum; | ||
| 954 | |||
| 955 | if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) | ||
| 956 | return false; | ||
| 957 | |||
| 958 | object->checksum = crc32(0, (void *)object->pointer, object->size); | ||
| 959 | return object->checksum != old_csum; | ||
| 960 | } | ||
| 961 | |||
| 962 | /* | ||
| 956 | * Memory scanning is a long process and it needs to be interruptable. This | 963 | * Memory scanning is a long process and it needs to be interruptable. This |
| 957 | * function checks whether such interrupt condition occured. | 964 | * function checks whether such interrupt condition occured. |
| 958 | */ | 965 | */ |
| @@ -1031,11 +1038,14 @@ static void scan_block(void *_start, void *_end, | |||
| 1031 | * added to the gray_list. | 1038 | * added to the gray_list. |
| 1032 | */ | 1039 | */ |
| 1033 | object->count++; | 1040 | object->count++; |
| 1034 | if (color_gray(object)) | 1041 | if (color_gray(object)) { |
| 1035 | list_add_tail(&object->gray_list, &gray_list); | 1042 | list_add_tail(&object->gray_list, &gray_list); |
| 1036 | else | 1043 | spin_unlock_irqrestore(&object->lock, flags); |
| 1037 | put_object(object); | 1044 | continue; |
| 1045 | } | ||
| 1046 | |||
| 1038 | spin_unlock_irqrestore(&object->lock, flags); | 1047 | spin_unlock_irqrestore(&object->lock, flags); |
| 1048 | put_object(object); | ||
| 1039 | } | 1049 | } |
| 1040 | } | 1050 | } |
| 1041 | 1051 | ||
| @@ -1075,14 +1085,47 @@ static void scan_object(struct kmemleak_object *object) | |||
| 1075 | } | 1085 | } |
| 1076 | } else | 1086 | } else |
| 1077 | hlist_for_each_entry(area, elem, &object->area_list, node) | 1087 | hlist_for_each_entry(area, elem, &object->area_list, node) |
| 1078 | scan_block((void *)(object->pointer + area->offset), | 1088 | scan_block((void *)area->start, |
| 1079 | (void *)(object->pointer + area->offset | 1089 | (void *)(area->start + area->size), |
| 1080 | + area->length), object, 0); | 1090 | object, 0); |
| 1081 | out: | 1091 | out: |
| 1082 | spin_unlock_irqrestore(&object->lock, flags); | 1092 | spin_unlock_irqrestore(&object->lock, flags); |
| 1083 | } | 1093 | } |
| 1084 | 1094 | ||
| 1085 | /* | 1095 | /* |
| 1096 | * Scan the objects already referenced (gray objects). More objects will be | ||
| 1097 | * referenced and, if there are no memory leaks, all the objects are scanned. | ||
| 1098 | */ | ||
| 1099 | static void scan_gray_list(void) | ||
| 1100 | { | ||
| 1101 | struct kmemleak_object *object, *tmp; | ||
| 1102 | |||
| 1103 | /* | ||
| 1104 | * The list traversal is safe for both tail additions and removals | ||
| 1105 | * from inside the loop. The kmemleak objects cannot be freed from | ||
| 1106 | * outside the loop because their use_count was incremented. | ||
| 1107 | */ | ||
| 1108 | object = list_entry(gray_list.next, typeof(*object), gray_list); | ||
| 1109 | while (&object->gray_list != &gray_list) { | ||
| 1110 | cond_resched(); | ||
| 1111 | |||
| 1112 | /* may add new objects to the list */ | ||
| 1113 | if (!scan_should_stop()) | ||
| 1114 | scan_object(object); | ||
| 1115 | |||
| 1116 | tmp = list_entry(object->gray_list.next, typeof(*object), | ||
| 1117 | gray_list); | ||
| 1118 | |||
| 1119 | /* remove the object from the list and release it */ | ||
| 1120 | list_del(&object->gray_list); | ||
| 1121 | put_object(object); | ||
| 1122 | |||
| 1123 | object = tmp; | ||
| 1124 | } | ||
| 1125 | WARN_ON(!list_empty(&gray_list)); | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | /* | ||
| 1086 | * Scan data sections and all the referenced memory blocks allocated via the | 1129 | * Scan data sections and all the referenced memory blocks allocated via the |
| 1087 | * kernel's standard allocators. This function must be called with the | 1130 | * kernel's standard allocators. This function must be called with the |
| 1088 | * scan_mutex held. | 1131 | * scan_mutex held. |
| @@ -1090,10 +1133,9 @@ out: | |||
| 1090 | static void kmemleak_scan(void) | 1133 | static void kmemleak_scan(void) |
| 1091 | { | 1134 | { |
| 1092 | unsigned long flags; | 1135 | unsigned long flags; |
| 1093 | struct kmemleak_object *object, *tmp; | 1136 | struct kmemleak_object *object; |
| 1094 | int i; | 1137 | int i; |
| 1095 | int new_leaks = 0; | 1138 | int new_leaks = 0; |
| 1096 | int gray_list_pass = 0; | ||
| 1097 | 1139 | ||
| 1098 | jiffies_last_scan = jiffies; | 1140 | jiffies_last_scan = jiffies; |
| 1099 | 1141 | ||
| @@ -1114,7 +1156,6 @@ static void kmemleak_scan(void) | |||
| 1114 | #endif | 1156 | #endif |
| 1115 | /* reset the reference count (whiten the object) */ | 1157 | /* reset the reference count (whiten the object) */ |
| 1116 | object->count = 0; | 1158 | object->count = 0; |
| 1117 | object->flags &= ~OBJECT_NEW; | ||
| 1118 | if (color_gray(object) && get_object(object)) | 1159 | if (color_gray(object) && get_object(object)) |
| 1119 | list_add_tail(&object->gray_list, &gray_list); | 1160 | list_add_tail(&object->gray_list, &gray_list); |
| 1120 | 1161 | ||
| @@ -1172,62 +1213,36 @@ static void kmemleak_scan(void) | |||
| 1172 | 1213 | ||
| 1173 | /* | 1214 | /* |
| 1174 | * Scan the objects already referenced from the sections scanned | 1215 | * Scan the objects already referenced from the sections scanned |
| 1175 | * above. More objects will be referenced and, if there are no memory | 1216 | * above. |
| 1176 | * leaks, all the objects will be scanned. The list traversal is safe | ||
| 1177 | * for both tail additions and removals from inside the loop. The | ||
| 1178 | * kmemleak objects cannot be freed from outside the loop because their | ||
| 1179 | * use_count was increased. | ||
| 1180 | */ | 1217 | */ |
| 1181 | repeat: | 1218 | scan_gray_list(); |
| 1182 | object = list_entry(gray_list.next, typeof(*object), gray_list); | ||
| 1183 | while (&object->gray_list != &gray_list) { | ||
| 1184 | cond_resched(); | ||
| 1185 | |||
| 1186 | /* may add new objects to the list */ | ||
| 1187 | if (!scan_should_stop()) | ||
| 1188 | scan_object(object); | ||
| 1189 | |||
| 1190 | tmp = list_entry(object->gray_list.next, typeof(*object), | ||
| 1191 | gray_list); | ||
| 1192 | |||
| 1193 | /* remove the object from the list and release it */ | ||
| 1194 | list_del(&object->gray_list); | ||
| 1195 | put_object(object); | ||
| 1196 | |||
| 1197 | object = tmp; | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) | ||
| 1201 | goto scan_end; | ||
| 1202 | 1219 | ||
| 1203 | /* | 1220 | /* |
| 1204 | * Check for new objects allocated during this scanning and add them | 1221 | * Check for new or unreferenced objects modified since the previous |
| 1205 | * to the gray list. | 1222 | * scan and color them gray until the next scan. |
| 1206 | */ | 1223 | */ |
| 1207 | rcu_read_lock(); | 1224 | rcu_read_lock(); |
| 1208 | list_for_each_entry_rcu(object, &object_list, object_list) { | 1225 | list_for_each_entry_rcu(object, &object_list, object_list) { |
| 1209 | spin_lock_irqsave(&object->lock, flags); | 1226 | spin_lock_irqsave(&object->lock, flags); |
| 1210 | if ((object->flags & OBJECT_NEW) && !color_black(object) && | 1227 | if (color_white(object) && (object->flags & OBJECT_ALLOCATED) |
| 1211 | get_object(object)) { | 1228 | && update_checksum(object) && get_object(object)) { |
| 1212 | object->flags &= ~OBJECT_NEW; | 1229 | /* color it gray temporarily */ |
| 1230 | object->count = object->min_count; | ||
| 1213 | list_add_tail(&object->gray_list, &gray_list); | 1231 | list_add_tail(&object->gray_list, &gray_list); |
| 1214 | } | 1232 | } |
| 1215 | spin_unlock_irqrestore(&object->lock, flags); | 1233 | spin_unlock_irqrestore(&object->lock, flags); |
| 1216 | } | 1234 | } |
| 1217 | rcu_read_unlock(); | 1235 | rcu_read_unlock(); |
| 1218 | 1236 | ||
| 1219 | if (!list_empty(&gray_list)) | 1237 | /* |
| 1220 | goto repeat; | 1238 | * Re-scan the gray list for modified unreferenced objects. |
| 1221 | 1239 | */ | |
| 1222 | scan_end: | 1240 | scan_gray_list(); |
| 1223 | WARN_ON(!list_empty(&gray_list)); | ||
| 1224 | 1241 | ||
| 1225 | /* | 1242 | /* |
| 1226 | * If scanning was stopped or new objects were being allocated at a | 1243 | * If scanning was stopped do not report any new unreferenced objects. |
| 1227 | * higher rate than gray list scanning, do not report any new | ||
| 1228 | * unreferenced objects. | ||
| 1229 | */ | 1244 | */ |
| 1230 | if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) | 1245 | if (scan_should_stop()) |
| 1231 | return; | 1246 | return; |
| 1232 | 1247 | ||
| 1233 | /* | 1248 | /* |
| @@ -1642,8 +1657,7 @@ void __init kmemleak_init(void) | |||
| 1642 | kmemleak_ignore(log->ptr); | 1657 | kmemleak_ignore(log->ptr); |
| 1643 | break; | 1658 | break; |
| 1644 | case KMEMLEAK_SCAN_AREA: | 1659 | case KMEMLEAK_SCAN_AREA: |
| 1645 | kmemleak_scan_area(log->ptr, log->offset, log->length, | 1660 | kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); |
| 1646 | GFP_KERNEL); | ||
| 1647 | break; | 1661 | break; |
| 1648 | case KMEMLEAK_NO_SCAN: | 1662 | case KMEMLEAK_NO_SCAN: |
| 1649 | kmemleak_no_scan(log->ptr); | 1663 | kmemleak_no_scan(log->ptr); |
| @@ -2275,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2275 | /* | 2275 | /* |
| 2276 | * Determine if the slab management is 'on' or 'off' slab. | 2276 | * Determine if the slab management is 'on' or 'off' slab. |
| 2277 | * (bootstrapping cannot cope with offslab caches so don't do | 2277 | * (bootstrapping cannot cope with offslab caches so don't do |
| 2278 | * it too early on.) | 2278 | * it too early on. Always use on-slab management when |
| 2279 | * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) | ||
| 2279 | */ | 2280 | */ |
| 2280 | if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) | 2281 | if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init && |
| 2282 | !(flags & SLAB_NOLEAKTRACE)) | ||
| 2281 | /* | 2283 | /* |
| 2282 | * Size is large, assume best to place the slab management obj | 2284 | * Size is large, assume best to place the slab management obj |
| 2283 | * off-slab (should allow better packing of objs). | 2285 | * off-slab (should allow better packing of objs). |
| @@ -2596,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
| 2596 | * kmemleak does not treat the ->s_mem pointer as a reference | 2598 | * kmemleak does not treat the ->s_mem pointer as a reference |
| 2597 | * to the object. Otherwise we will not report the leak. | 2599 | * to the object. Otherwise we will not report the leak. |
| 2598 | */ | 2600 | */ |
| 2599 | kmemleak_scan_area(slabp, offsetof(struct slab, list), | 2601 | kmemleak_scan_area(&slabp->list, sizeof(struct list_head), |
| 2600 | sizeof(struct list_head), local_flags); | 2602 | local_flags); |
| 2601 | if (!slabp) | 2603 | if (!slabp) |
| 2602 | return NULL; | 2604 | return NULL; |
| 2603 | } else { | 2605 | } else { |
