summaryrefslogtreecommitdiffstats
path: root/mm/kmemleak.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r--mm/kmemleak.c326
1 files changed, 84 insertions, 242 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f6e602918dac..03a8d84badad 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -168,6 +168,8 @@ struct kmemleak_object {
168#define OBJECT_REPORTED (1 << 1) 168#define OBJECT_REPORTED (1 << 1)
169/* flag set to not scan the object */ 169/* flag set to not scan the object */
170#define OBJECT_NO_SCAN (1 << 2) 170#define OBJECT_NO_SCAN (1 << 2)
171/* flag set to fully scan the object when scan_area allocation failed */
172#define OBJECT_FULL_SCAN (1 << 3)
171 173
172#define HEX_PREFIX " " 174#define HEX_PREFIX " "
173/* number of bytes to print per line; must be 16 or 32 */ 175/* number of bytes to print per line; must be 16 or 32 */
@@ -183,6 +185,10 @@ struct kmemleak_object {
183static LIST_HEAD(object_list); 185static LIST_HEAD(object_list);
184/* the list of gray-colored objects (see color_gray comment below) */ 186/* the list of gray-colored objects (see color_gray comment below) */
185static LIST_HEAD(gray_list); 187static LIST_HEAD(gray_list);
188/* memory pool allocation */
189static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
190static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
191static LIST_HEAD(mem_pool_free_list);
186/* search tree for object boundaries */ 192/* search tree for object boundaries */
187static struct rb_root object_tree_root = RB_ROOT; 193static struct rb_root object_tree_root = RB_ROOT;
188/* rw_lock protecting the access to object_list and object_tree_root */ 194/* rw_lock protecting the access to object_list and object_tree_root */
@@ -193,13 +199,11 @@ static struct kmem_cache *object_cache;
193static struct kmem_cache *scan_area_cache; 199static struct kmem_cache *scan_area_cache;
194 200
195/* set if tracing memory operations is enabled */ 201/* set if tracing memory operations is enabled */
196static int kmemleak_enabled; 202static int kmemleak_enabled = 1;
197/* same as above but only for the kmemleak_free() callback */ 203/* same as above but only for the kmemleak_free() callback */
198static int kmemleak_free_enabled; 204static int kmemleak_free_enabled = 1;
199/* set in the late_initcall if there were no errors */ 205/* set in the late_initcall if there were no errors */
200static int kmemleak_initialized; 206static int kmemleak_initialized;
201/* enables or disables early logging of the memory operations */
202static int kmemleak_early_log = 1;
203/* set if a kmemleak warning was issued */ 207/* set if a kmemleak warning was issued */
204static int kmemleak_warning; 208static int kmemleak_warning;
205/* set if a fatal kmemleak error has occurred */ 209/* set if a fatal kmemleak error has occurred */
@@ -227,49 +231,6 @@ static bool kmemleak_found_leaks;
227static bool kmemleak_verbose; 231static bool kmemleak_verbose;
228module_param_named(verbose, kmemleak_verbose, bool, 0600); 232module_param_named(verbose, kmemleak_verbose, bool, 0600);
229 233
230/*
231 * Early object allocation/freeing logging. Kmemleak is initialized after the
232 * kernel allocator. However, both the kernel allocator and kmemleak may
233 * allocate memory blocks which need to be tracked. Kmemleak defines an
234 * arbitrary buffer to hold the allocation/freeing information before it is
235 * fully initialized.
236 */
237
238/* kmemleak operation type for early logging */
239enum {
240 KMEMLEAK_ALLOC,
241 KMEMLEAK_ALLOC_PERCPU,
242 KMEMLEAK_FREE,
243 KMEMLEAK_FREE_PART,
244 KMEMLEAK_FREE_PERCPU,
245 KMEMLEAK_NOT_LEAK,
246 KMEMLEAK_IGNORE,
247 KMEMLEAK_SCAN_AREA,
248 KMEMLEAK_NO_SCAN,
249 KMEMLEAK_SET_EXCESS_REF
250};
251
252/*
253 * Structure holding the information passed to kmemleak callbacks during the
254 * early logging.
255 */
256struct early_log {
257 int op_type; /* kmemleak operation type */
258 int min_count; /* minimum reference count */
259 const void *ptr; /* allocated/freed memory block */
260 union {
261 size_t size; /* memory block size */
262 unsigned long excess_ref; /* surplus reference passing */
263 };
264 unsigned long trace[MAX_TRACE]; /* stack trace */
265 unsigned int trace_len; /* stack trace length */
266};
267
268/* early logging buffer and current position */
269static struct early_log
270 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
271static int crt_early_log __initdata;
272
273static void kmemleak_disable(void); 234static void kmemleak_disable(void);
274 235
275/* 236/*
@@ -450,6 +411,54 @@ static int get_object(struct kmemleak_object *object)
450} 411}
451 412
452/* 413/*
414 * Memory pool allocation and freeing. kmemleak_lock must not be held.
415 */
416static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
417{
418 unsigned long flags;
419 struct kmemleak_object *object;
420
421 /* try the slab allocator first */
422 if (object_cache) {
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
424 if (object)
425 return object;
426 }
427
428 /* slab allocation failed, try the memory pool */
429 write_lock_irqsave(&kmemleak_lock, flags);
430 object = list_first_entry_or_null(&mem_pool_free_list,
431 typeof(*object), object_list);
432 if (object)
433 list_del(&object->object_list);
434 else if (mem_pool_free_count)
435 object = &mem_pool[--mem_pool_free_count];
436 else
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
438 write_unlock_irqrestore(&kmemleak_lock, flags);
439
440 return object;
441}
442
443/*
444 * Return the object to either the slab allocator or the memory pool.
445 */
446static void mem_pool_free(struct kmemleak_object *object)
447{
448 unsigned long flags;
449
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
451 kmem_cache_free(object_cache, object);
452 return;
453 }
454
455 /* add the object to the memory pool free list */
456 write_lock_irqsave(&kmemleak_lock, flags);
457 list_add(&object->object_list, &mem_pool_free_list);
458 write_unlock_irqrestore(&kmemleak_lock, flags);
459}
460
461/*
453 * RCU callback to free a kmemleak_object. 462 * RCU callback to free a kmemleak_object.
454 */ 463 */
455static void free_object_rcu(struct rcu_head *rcu) 464static void free_object_rcu(struct rcu_head *rcu)
@@ -467,7 +476,7 @@ static void free_object_rcu(struct rcu_head *rcu)
467 hlist_del(&area->node); 476 hlist_del(&area->node);
468 kmem_cache_free(scan_area_cache, area); 477 kmem_cache_free(scan_area_cache, area);
469 } 478 }
470 kmem_cache_free(object_cache, object); 479 mem_pool_free(object);
471} 480}
472 481
473/* 482/*
@@ -485,7 +494,15 @@ static void put_object(struct kmemleak_object *object)
485 /* should only get here after delete_object was called */ 494 /* should only get here after delete_object was called */
486 WARN_ON(object->flags & OBJECT_ALLOCATED); 495 WARN_ON(object->flags & OBJECT_ALLOCATED);
487 496
488 call_rcu(&object->rcu, free_object_rcu); 497 /*
498 * It may be too early for the RCU callbacks, however, there is no
499 * concurrent object_list traversal when !object_cache and all objects
500 * came from the memory pool. Free the object directly.
501 */
502 if (object_cache)
503 call_rcu(&object->rcu, free_object_rcu);
504 else
505 free_object_rcu(&object->rcu);
489} 506}
490 507
491/* 508/*
@@ -550,7 +567,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
550 struct rb_node **link, *rb_parent; 567 struct rb_node **link, *rb_parent;
551 unsigned long untagged_ptr; 568 unsigned long untagged_ptr;
552 569
553 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 570 object = mem_pool_alloc(gfp);
554 if (!object) { 571 if (!object) {
555 pr_warn("Cannot allocate a kmemleak_object structure\n"); 572 pr_warn("Cannot allocate a kmemleak_object structure\n");
556 kmemleak_disable(); 573 kmemleak_disable();
@@ -689,9 +706,7 @@ static void delete_object_part(unsigned long ptr, size_t size)
689 /* 706 /*
690 * Create one or two objects that may result from the memory block 707 * Create one or two objects that may result from the memory block
691 * split. Note that partial freeing is only done by free_bootmem() and 708 * split. Note that partial freeing is only done by free_bootmem() and
692 * this happens before kmemleak_init() is called. The path below is 709 * this happens before kmemleak_init() is called.
693 * only executed during early log recording in kmemleak_init(), so
694 * GFP_KERNEL is enough.
695 */ 710 */
696 start = object->pointer; 711 start = object->pointer;
697 end = object->pointer + object->size; 712 end = object->pointer + object->size;
@@ -763,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
763{ 778{
764 unsigned long flags; 779 unsigned long flags;
765 struct kmemleak_object *object; 780 struct kmemleak_object *object;
766 struct kmemleak_scan_area *area; 781 struct kmemleak_scan_area *area = NULL;
767 782
768 object = find_and_get_object(ptr, 1); 783 object = find_and_get_object(ptr, 1);
769 if (!object) { 784 if (!object) {
@@ -772,13 +787,16 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
772 return; 787 return;
773 } 788 }
774 789
775 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 790 if (scan_area_cache)
776 if (!area) { 791 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
777 pr_warn("Cannot allocate a scan area\n");
778 goto out;
779 }
780 792
781 spin_lock_irqsave(&object->lock, flags); 793 spin_lock_irqsave(&object->lock, flags);
794 if (!area) {
795 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
796 /* mark the object for full scan to avoid false positives */
797 object->flags |= OBJECT_FULL_SCAN;
798 goto out_unlock;
799 }
782 if (size == SIZE_MAX) { 800 if (size == SIZE_MAX) {
783 size = object->pointer + object->size - ptr; 801 size = object->pointer + object->size - ptr;
784 } else if (ptr + size > object->pointer + object->size) { 802 } else if (ptr + size > object->pointer + object->size) {
@@ -795,7 +813,6 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
795 hlist_add_head(&area->node, &object->area_list); 813 hlist_add_head(&area->node, &object->area_list);
796out_unlock: 814out_unlock:
797 spin_unlock_irqrestore(&object->lock, flags); 815 spin_unlock_irqrestore(&object->lock, flags);
798out:
799 put_object(object); 816 put_object(object);
800} 817}
801 818
@@ -845,86 +862,6 @@ static void object_no_scan(unsigned long ptr)
845 put_object(object); 862 put_object(object);
846} 863}
847 864
848/*
849 * Log an early kmemleak_* call to the early_log buffer. These calls will be
850 * processed later once kmemleak is fully initialized.
851 */
852static void __init log_early(int op_type, const void *ptr, size_t size,
853 int min_count)
854{
855 unsigned long flags;
856 struct early_log *log;
857
858 if (kmemleak_error) {
859 /* kmemleak stopped recording, just count the requests */
860 crt_early_log++;
861 return;
862 }
863
864 if (crt_early_log >= ARRAY_SIZE(early_log)) {
865 crt_early_log++;
866 kmemleak_disable();
867 return;
868 }
869
870 /*
871 * There is no need for locking since the kernel is still in UP mode
872 * at this stage. Disabling the IRQs is enough.
873 */
874 local_irq_save(flags);
875 log = &early_log[crt_early_log];
876 log->op_type = op_type;
877 log->ptr = ptr;
878 log->size = size;
879 log->min_count = min_count;
880 log->trace_len = __save_stack_trace(log->trace);
881 crt_early_log++;
882 local_irq_restore(flags);
883}
884
885/*
886 * Log an early allocated block and populate the stack trace.
887 */
888static void early_alloc(struct early_log *log)
889{
890 struct kmemleak_object *object;
891 unsigned long flags;
892 int i;
893
894 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
895 return;
896
897 /*
898 * RCU locking needed to ensure object is not freed via put_object().
899 */
900 rcu_read_lock();
901 object = create_object((unsigned long)log->ptr, log->size,
902 log->min_count, GFP_ATOMIC);
903 if (!object)
904 goto out;
905 spin_lock_irqsave(&object->lock, flags);
906 for (i = 0; i < log->trace_len; i++)
907 object->trace[i] = log->trace[i];
908 object->trace_len = log->trace_len;
909 spin_unlock_irqrestore(&object->lock, flags);
910out:
911 rcu_read_unlock();
912}
913
914/*
915 * Log an early allocated block and populate the stack trace.
916 */
917static void early_alloc_percpu(struct early_log *log)
918{
919 unsigned int cpu;
920 const void __percpu *ptr = log->ptr;
921
922 for_each_possible_cpu(cpu) {
923 log->ptr = per_cpu_ptr(ptr, cpu);
924 early_alloc(log);
925 }
926}
927
928/** 865/**
929 * kmemleak_alloc - register a newly allocated object 866 * kmemleak_alloc - register a newly allocated object
930 * @ptr: pointer to beginning of the object 867 * @ptr: pointer to beginning of the object
@@ -946,8 +883,6 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
946 883
947 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 884 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
948 create_object((unsigned long)ptr, size, min_count, gfp); 885 create_object((unsigned long)ptr, size, min_count, gfp);
949 else if (kmemleak_early_log)
950 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
951} 886}
952EXPORT_SYMBOL_GPL(kmemleak_alloc); 887EXPORT_SYMBOL_GPL(kmemleak_alloc);
953 888
@@ -975,8 +910,6 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
975 for_each_possible_cpu(cpu) 910 for_each_possible_cpu(cpu)
976 create_object((unsigned long)per_cpu_ptr(ptr, cpu), 911 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
977 size, 0, gfp); 912 size, 0, gfp);
978 else if (kmemleak_early_log)
979 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
980} 913}
981EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 914EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
982 915
@@ -1001,11 +934,6 @@ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp
1001 create_object((unsigned long)area->addr, size, 2, gfp); 934 create_object((unsigned long)area->addr, size, 2, gfp);
1002 object_set_excess_ref((unsigned long)area, 935 object_set_excess_ref((unsigned long)area,
1003 (unsigned long)area->addr); 936 (unsigned long)area->addr);
1004 } else if (kmemleak_early_log) {
1005 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1006 /* reusing early_log.size for storing area->addr */
1007 log_early(KMEMLEAK_SET_EXCESS_REF,
1008 area, (unsigned long)area->addr, 0);
1009 } 937 }
1010} 938}
1011EXPORT_SYMBOL_GPL(kmemleak_vmalloc); 939EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
@@ -1023,8 +951,6 @@ void __ref kmemleak_free(const void *ptr)
1023 951
1024 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 952 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1025 delete_object_full((unsigned long)ptr); 953 delete_object_full((unsigned long)ptr);
1026 else if (kmemleak_early_log)
1027 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1028} 954}
1029EXPORT_SYMBOL_GPL(kmemleak_free); 955EXPORT_SYMBOL_GPL(kmemleak_free);
1030 956
@@ -1043,8 +969,6 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
1043 969
1044 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 970 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1045 delete_object_part((unsigned long)ptr, size); 971 delete_object_part((unsigned long)ptr, size);
1046 else if (kmemleak_early_log)
1047 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1048} 972}
1049EXPORT_SYMBOL_GPL(kmemleak_free_part); 973EXPORT_SYMBOL_GPL(kmemleak_free_part);
1050 974
@@ -1065,8 +989,6 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
1065 for_each_possible_cpu(cpu) 989 for_each_possible_cpu(cpu)
1066 delete_object_full((unsigned long)per_cpu_ptr(ptr, 990 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1067 cpu)); 991 cpu));
1068 else if (kmemleak_early_log)
1069 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1070} 992}
1071EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 993EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1072 994
@@ -1117,8 +1039,6 @@ void __ref kmemleak_not_leak(const void *ptr)
1117 1039
1118 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1040 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1119 make_gray_object((unsigned long)ptr); 1041 make_gray_object((unsigned long)ptr);
1120 else if (kmemleak_early_log)
1121 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1122} 1042}
1123EXPORT_SYMBOL(kmemleak_not_leak); 1043EXPORT_SYMBOL(kmemleak_not_leak);
1124 1044
@@ -1137,8 +1057,6 @@ void __ref kmemleak_ignore(const void *ptr)
1137 1057
1138 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1058 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1139 make_black_object((unsigned long)ptr); 1059 make_black_object((unsigned long)ptr);
1140 else if (kmemleak_early_log)
1141 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1142} 1060}
1143EXPORT_SYMBOL(kmemleak_ignore); 1061EXPORT_SYMBOL(kmemleak_ignore);
1144 1062
@@ -1159,8 +1077,6 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1159 1077
1160 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) 1078 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1161 add_scan_area((unsigned long)ptr, size, gfp); 1079 add_scan_area((unsigned long)ptr, size, gfp);
1162 else if (kmemleak_early_log)
1163 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1164} 1080}
1165EXPORT_SYMBOL(kmemleak_scan_area); 1081EXPORT_SYMBOL(kmemleak_scan_area);
1166 1082
@@ -1179,8 +1095,6 @@ void __ref kmemleak_no_scan(const void *ptr)
1179 1095
1180 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1096 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1181 object_no_scan((unsigned long)ptr); 1097 object_no_scan((unsigned long)ptr);
1182 else if (kmemleak_early_log)
1183 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1184} 1098}
1185EXPORT_SYMBOL(kmemleak_no_scan); 1099EXPORT_SYMBOL(kmemleak_no_scan);
1186 1100
@@ -1408,7 +1322,8 @@ static void scan_object(struct kmemleak_object *object)
1408 if (!(object->flags & OBJECT_ALLOCATED)) 1322 if (!(object->flags & OBJECT_ALLOCATED))
1409 /* already freed object */ 1323 /* already freed object */
1410 goto out; 1324 goto out;
1411 if (hlist_empty(&object->area_list)) { 1325 if (hlist_empty(&object->area_list) ||
1326 object->flags & OBJECT_FULL_SCAN) {
1412 void *start = (void *)object->pointer; 1327 void *start = (void *)object->pointer;
1413 void *end = (void *)(object->pointer + object->size); 1328 void *end = (void *)(object->pointer + object->size);
1414 void *next; 1329 void *next;
@@ -1966,7 +1881,6 @@ static void kmemleak_disable(void)
1966 1881
1967 /* stop any memory operation tracing */ 1882 /* stop any memory operation tracing */
1968 kmemleak_enabled = 0; 1883 kmemleak_enabled = 0;
1969 kmemleak_early_log = 0;
1970 1884
1971 /* check whether it is too early for a kernel thread */ 1885 /* check whether it is too early for a kernel thread */
1972 if (kmemleak_initialized) 1886 if (kmemleak_initialized)
@@ -1994,20 +1908,11 @@ static int __init kmemleak_boot_config(char *str)
1994} 1908}
1995early_param("kmemleak", kmemleak_boot_config); 1909early_param("kmemleak", kmemleak_boot_config);
1996 1910
1997static void __init print_log_trace(struct early_log *log)
1998{
1999 pr_notice("Early log backtrace:\n");
2000 stack_trace_print(log->trace, log->trace_len, 2);
2001}
2002
2003/* 1911/*
2004 * Kmemleak initialization. 1912 * Kmemleak initialization.
2005 */ 1913 */
2006void __init kmemleak_init(void) 1914void __init kmemleak_init(void)
2007{ 1915{
2008 int i;
2009 unsigned long flags;
2010
2011#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1916#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2012 if (!kmemleak_skip_disable) { 1917 if (!kmemleak_skip_disable) {
2013 kmemleak_disable(); 1918 kmemleak_disable();
@@ -2015,28 +1920,15 @@ void __init kmemleak_init(void)
2015 } 1920 }
2016#endif 1921#endif
2017 1922
1923 if (kmemleak_error)
1924 return;
1925
2018 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1926 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2019 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1927 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2020 1928
2021 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1929 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2022 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1930 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2023 1931
2024 if (crt_early_log > ARRAY_SIZE(early_log))
2025 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2026 crt_early_log);
2027
2028 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2029 local_irq_save(flags);
2030 kmemleak_early_log = 0;
2031 if (kmemleak_error) {
2032 local_irq_restore(flags);
2033 return;
2034 } else {
2035 kmemleak_enabled = 1;
2036 kmemleak_free_enabled = 1;
2037 }
2038 local_irq_restore(flags);
2039
2040 /* register the data/bss sections */ 1932 /* register the data/bss sections */
2041 create_object((unsigned long)_sdata, _edata - _sdata, 1933 create_object((unsigned long)_sdata, _edata - _sdata,
2042 KMEMLEAK_GREY, GFP_ATOMIC); 1934 KMEMLEAK_GREY, GFP_ATOMIC);
@@ -2047,57 +1939,6 @@ void __init kmemleak_init(void)
2047 create_object((unsigned long)__start_ro_after_init, 1939 create_object((unsigned long)__start_ro_after_init,
2048 __end_ro_after_init - __start_ro_after_init, 1940 __end_ro_after_init - __start_ro_after_init,
2049 KMEMLEAK_GREY, GFP_ATOMIC); 1941 KMEMLEAK_GREY, GFP_ATOMIC);
2050
2051 /*
2052 * This is the point where tracking allocations is safe. Automatic
2053 * scanning is started during the late initcall. Add the early logged
2054 * callbacks to the kmemleak infrastructure.
2055 */
2056 for (i = 0; i < crt_early_log; i++) {
2057 struct early_log *log = &early_log[i];
2058
2059 switch (log->op_type) {
2060 case KMEMLEAK_ALLOC:
2061 early_alloc(log);
2062 break;
2063 case KMEMLEAK_ALLOC_PERCPU:
2064 early_alloc_percpu(log);
2065 break;
2066 case KMEMLEAK_FREE:
2067 kmemleak_free(log->ptr);
2068 break;
2069 case KMEMLEAK_FREE_PART:
2070 kmemleak_free_part(log->ptr, log->size);
2071 break;
2072 case KMEMLEAK_FREE_PERCPU:
2073 kmemleak_free_percpu(log->ptr);
2074 break;
2075 case KMEMLEAK_NOT_LEAK:
2076 kmemleak_not_leak(log->ptr);
2077 break;
2078 case KMEMLEAK_IGNORE:
2079 kmemleak_ignore(log->ptr);
2080 break;
2081 case KMEMLEAK_SCAN_AREA:
2082 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2083 break;
2084 case KMEMLEAK_NO_SCAN:
2085 kmemleak_no_scan(log->ptr);
2086 break;
2087 case KMEMLEAK_SET_EXCESS_REF:
2088 object_set_excess_ref((unsigned long)log->ptr,
2089 log->excess_ref);
2090 break;
2091 default:
2092 kmemleak_warn("Unknown early log operation: %d\n",
2093 log->op_type);
2094 }
2095
2096 if (kmemleak_warning) {
2097 print_log_trace(log);
2098 kmemleak_warning = 0;
2099 }
2100 }
2101} 1942}
2102 1943
2103/* 1944/*
@@ -2126,7 +1967,8 @@ static int __init kmemleak_late_init(void)
2126 mutex_unlock(&scan_mutex); 1967 mutex_unlock(&scan_mutex);
2127 } 1968 }
2128 1969
2129 pr_info("Kernel memory leak detector initialized\n"); 1970 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1971 mem_pool_free_count);
2130 1972
2131 return 0; 1973 return 0;
2132} 1974}