summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2019-09-23 18:34:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:07 -0400
commitc5665868183fec689dbab9fb8505188b2c4f0757 (patch)
tree55c3277578a7444491e77fb3887e9783d3679e82
parent0647398a8c7bd55e0b7565c5076e86b7c3c204c5 (diff)
mm: kmemleak: use the memory pool for early allocations
Currently kmemleak uses a static early_log buffer to trace all memory allocation/freeing before the slab allocator is initialised. Such early log is replayed during kmemleak_init() to properly initialise the kmemleak metadata for objects allocated up that point. With a memory pool that does not rely on the slab allocator, it is possible to skip this early log entirely. In order to remove the early logging, consider kmemleak_enabled == 1 by default while the kmem_cache availability is checked directly on the object_cache and scan_area_cache variables. The RCU callback is only invoked after object_cache has been initialised as we wouldn't have any concurrent list traversal before this. In order to reduce the number of callbacks before kmemleak is fully initialised, move the kmemleak_init() call to mm_init(). [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: remove WARN_ON(), per Catalin] Link: http://lkml.kernel.org/r/20190812160642.52134-4-catalin.marinas@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Qian Cai <cai@lca.pw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--init/main.c2
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--mm/kmemleak.c265
3 files changed, 33 insertions, 245 deletions
diff --git a/init/main.c b/init/main.c
index 653693da8da6..3ca67e8b92fd 100644
--- a/init/main.c
+++ b/init/main.c
@@ -556,6 +556,7 @@ static void __init mm_init(void)
556 report_meminit(); 556 report_meminit();
557 mem_init(); 557 mem_init();
558 kmem_cache_init(); 558 kmem_cache_init();
559 kmemleak_init();
559 pgtable_init(); 560 pgtable_init();
560 debug_objects_mem_init(); 561 debug_objects_mem_init();
561 vmalloc_init(); 562 vmalloc_init();
@@ -740,7 +741,6 @@ asmlinkage __visible void __init start_kernel(void)
740 initrd_start = 0; 741 initrd_start = 0;
741 } 742 }
742#endif 743#endif
743 kmemleak_init();
744 setup_per_cpu_pageset(); 744 setup_per_cpu_pageset();
745 numa_policy_init(); 745 numa_policy_init();
746 acpi_early_init(); 746 acpi_early_init();
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3c88e54da86c..c6975cded461 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -576,17 +576,18 @@ config DEBUG_KMEMLEAK
576 In order to access the kmemleak file, debugfs needs to be 576 In order to access the kmemleak file, debugfs needs to be
577 mounted (usually at /sys/kernel/debug). 577 mounted (usually at /sys/kernel/debug).
578 578
579config DEBUG_KMEMLEAK_EARLY_LOG_SIZE 579config DEBUG_KMEMLEAK_MEM_POOL_SIZE
580 int "Maximum kmemleak early log entries" 580 int "Kmemleak memory pool size"
581 depends on DEBUG_KMEMLEAK 581 depends on DEBUG_KMEMLEAK
582 range 200 40000 582 range 200 40000
583 default 16000 583 default 16000
584 help 584 help
585 Kmemleak must track all the memory allocations to avoid 585 Kmemleak must track all the memory allocations to avoid
586 reporting false positives. Since memory may be allocated or 586 reporting false positives. Since memory may be allocated or
587 freed before kmemleak is initialised, an early log buffer is 587 freed before kmemleak is fully initialised, use a static pool
588 used to store these actions. If kmemleak reports "early log 588 of metadata objects to track such callbacks. After kmemleak is
589 buffer exceeded", please increase this value. 589 fully initialised, this memory pool acts as an emergency one
590 if slab allocations fail.
590 591
591config DEBUG_KMEMLEAK_TEST 592config DEBUG_KMEMLEAK_TEST
592 tristate "Simple test for the kernel memory leak detector" 593 tristate "Simple test for the kernel memory leak detector"
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 2fb86524d70b..b8bbe9ac5472 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -180,15 +180,13 @@ struct kmemleak_object {
180#define HEX_ASCII 1 180#define HEX_ASCII 1
181/* max number of lines to be printed */ 181/* max number of lines to be printed */
182#define HEX_MAX_LINES 2 182#define HEX_MAX_LINES 2
183/* memory pool size */
184#define MEM_POOL_SIZE 16000
185 183
186/* the list of all allocated objects */ 184/* the list of all allocated objects */
187static LIST_HEAD(object_list); 185static LIST_HEAD(object_list);
188/* the list of gray-colored objects (see color_gray comment below) */ 186/* the list of gray-colored objects (see color_gray comment below) */
189static LIST_HEAD(gray_list); 187static LIST_HEAD(gray_list);
190/* memory pool allocation */ 188/* memory pool allocation */
191static struct kmemleak_object mem_pool[MEM_POOL_SIZE]; 189static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
192static int mem_pool_free_count = ARRAY_SIZE(mem_pool); 190static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
193static LIST_HEAD(mem_pool_free_list); 191static LIST_HEAD(mem_pool_free_list);
194/* search tree for object boundaries */ 192/* search tree for object boundaries */
@@ -201,13 +199,11 @@ static struct kmem_cache *object_cache;
201static struct kmem_cache *scan_area_cache; 199static struct kmem_cache *scan_area_cache;
202 200
203/* set if tracing memory operations is enabled */ 201/* set if tracing memory operations is enabled */
204static int kmemleak_enabled; 202static int kmemleak_enabled = 1;
205/* same as above but only for the kmemleak_free() callback */ 203/* same as above but only for the kmemleak_free() callback */
206static int kmemleak_free_enabled; 204static int kmemleak_free_enabled = 1;
207/* set in the late_initcall if there were no errors */ 205/* set in the late_initcall if there were no errors */
208static int kmemleak_initialized; 206static int kmemleak_initialized;
209/* enables or disables early logging of the memory operations */
210static int kmemleak_early_log = 1;
211/* set if a kmemleak warning was issued */ 207/* set if a kmemleak warning was issued */
212static int kmemleak_warning; 208static int kmemleak_warning;
213/* set if a fatal kmemleak error has occurred */ 209/* set if a fatal kmemleak error has occurred */
@@ -235,49 +231,6 @@ static bool kmemleak_found_leaks;
235static bool kmemleak_verbose; 231static bool kmemleak_verbose;
236module_param_named(verbose, kmemleak_verbose, bool, 0600); 232module_param_named(verbose, kmemleak_verbose, bool, 0600);
237 233
238/*
239 * Early object allocation/freeing logging. Kmemleak is initialized after the
240 * kernel allocator. However, both the kernel allocator and kmemleak may
241 * allocate memory blocks which need to be tracked. Kmemleak defines an
242 * arbitrary buffer to hold the allocation/freeing information before it is
243 * fully initialized.
244 */
245
246/* kmemleak operation type for early logging */
247enum {
248 KMEMLEAK_ALLOC,
249 KMEMLEAK_ALLOC_PERCPU,
250 KMEMLEAK_FREE,
251 KMEMLEAK_FREE_PART,
252 KMEMLEAK_FREE_PERCPU,
253 KMEMLEAK_NOT_LEAK,
254 KMEMLEAK_IGNORE,
255 KMEMLEAK_SCAN_AREA,
256 KMEMLEAK_NO_SCAN,
257 KMEMLEAK_SET_EXCESS_REF
258};
259
260/*
261 * Structure holding the information passed to kmemleak callbacks during the
262 * early logging.
263 */
264struct early_log {
265 int op_type; /* kmemleak operation type */
266 int min_count; /* minimum reference count */
267 const void *ptr; /* allocated/freed memory block */
268 union {
269 size_t size; /* memory block size */
270 unsigned long excess_ref; /* surplus reference passing */
271 };
272 unsigned long trace[MAX_TRACE]; /* stack trace */
273 unsigned int trace_len; /* stack trace length */
274};
275
276/* early logging buffer and current position */
277static struct early_log
278 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
279static int crt_early_log __initdata;
280
281static void kmemleak_disable(void); 234static void kmemleak_disable(void);
282 235
283/* 236/*
@@ -466,9 +419,11 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
466 struct kmemleak_object *object; 419 struct kmemleak_object *object;
467 420
468 /* try the slab allocator first */ 421 /* try the slab allocator first */
469 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 422 if (object_cache) {
470 if (object) 423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
471 return object; 424 if (object)
425 return object;
426 }
472 427
473 /* slab allocation failed, try the memory pool */ 428 /* slab allocation failed, try the memory pool */
474 write_lock_irqsave(&kmemleak_lock, flags); 429 write_lock_irqsave(&kmemleak_lock, flags);
@@ -478,6 +433,8 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
478 list_del(&object->object_list); 433 list_del(&object->object_list);
479 else if (mem_pool_free_count) 434 else if (mem_pool_free_count)
480 object = &mem_pool[--mem_pool_free_count]; 435 object = &mem_pool[--mem_pool_free_count];
436 else
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
481 write_unlock_irqrestore(&kmemleak_lock, flags); 438 write_unlock_irqrestore(&kmemleak_lock, flags);
482 439
483 return object; 440 return object;
@@ -537,7 +494,15 @@ static void put_object(struct kmemleak_object *object)
537 /* should only get here after delete_object was called */ 494 /* should only get here after delete_object was called */
538 WARN_ON(object->flags & OBJECT_ALLOCATED); 495 WARN_ON(object->flags & OBJECT_ALLOCATED);
539 496
540 call_rcu(&object->rcu, free_object_rcu); 497 /*
498 * It may be too early for the RCU callbacks, however, there is no
499 * concurrent object_list traversal when !object_cache and all objects
500 * came from the memory pool. Free the object directly.
501 */
502 if (object_cache)
503 call_rcu(&object->rcu, free_object_rcu);
504 else
505 free_object_rcu(&object->rcu);
541} 506}
542 507
543/* 508/*
@@ -741,9 +706,7 @@ static void delete_object_part(unsigned long ptr, size_t size)
741 /* 706 /*
742 * Create one or two objects that may result from the memory block 707 * Create one or two objects that may result from the memory block
743 * split. Note that partial freeing is only done by free_bootmem() and 708 * split. Note that partial freeing is only done by free_bootmem() and
744 * this happens before kmemleak_init() is called. The path below is 709 * this happens before kmemleak_init() is called.
745 * only executed during early log recording in kmemleak_init(), so
746 * GFP_KERNEL is enough.
747 */ 710 */
748 start = object->pointer; 711 start = object->pointer;
749 end = object->pointer + object->size; 712 end = object->pointer + object->size;
@@ -815,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
815{ 778{
816 unsigned long flags; 779 unsigned long flags;
817 struct kmemleak_object *object; 780 struct kmemleak_object *object;
818 struct kmemleak_scan_area *area; 781 struct kmemleak_scan_area *area = NULL;
819 782
820 object = find_and_get_object(ptr, 1); 783 object = find_and_get_object(ptr, 1);
821 if (!object) { 784 if (!object) {
@@ -824,7 +787,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
824 return; 787 return;
825 } 788 }
826 789
827 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 790 if (scan_area_cache)
791 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
828 792
829 spin_lock_irqsave(&object->lock, flags); 793 spin_lock_irqsave(&object->lock, flags);
830 if (!area) { 794 if (!area) {
@@ -898,86 +862,6 @@ static void object_no_scan(unsigned long ptr)
898 put_object(object); 862 put_object(object);
899} 863}
900 864
901/*
902 * Log an early kmemleak_* call to the early_log buffer. These calls will be
903 * processed later once kmemleak is fully initialized.
904 */
905static void __init log_early(int op_type, const void *ptr, size_t size,
906 int min_count)
907{
908 unsigned long flags;
909 struct early_log *log;
910
911 if (kmemleak_error) {
912 /* kmemleak stopped recording, just count the requests */
913 crt_early_log++;
914 return;
915 }
916
917 if (crt_early_log >= ARRAY_SIZE(early_log)) {
918 crt_early_log++;
919 kmemleak_disable();
920 return;
921 }
922
923 /*
924 * There is no need for locking since the kernel is still in UP mode
925 * at this stage. Disabling the IRQs is enough.
926 */
927 local_irq_save(flags);
928 log = &early_log[crt_early_log];
929 log->op_type = op_type;
930 log->ptr = ptr;
931 log->size = size;
932 log->min_count = min_count;
933 log->trace_len = __save_stack_trace(log->trace);
934 crt_early_log++;
935 local_irq_restore(flags);
936}
937
938/*
939 * Log an early allocated block and populate the stack trace.
940 */
941static void early_alloc(struct early_log *log)
942{
943 struct kmemleak_object *object;
944 unsigned long flags;
945 int i;
946
947 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
948 return;
949
950 /*
951 * RCU locking needed to ensure object is not freed via put_object().
952 */
953 rcu_read_lock();
954 object = create_object((unsigned long)log->ptr, log->size,
955 log->min_count, GFP_ATOMIC);
956 if (!object)
957 goto out;
958 spin_lock_irqsave(&object->lock, flags);
959 for (i = 0; i < log->trace_len; i++)
960 object->trace[i] = log->trace[i];
961 object->trace_len = log->trace_len;
962 spin_unlock_irqrestore(&object->lock, flags);
963out:
964 rcu_read_unlock();
965}
966
967/*
968 * Log an early allocated block and populate the stack trace.
969 */
970static void early_alloc_percpu(struct early_log *log)
971{
972 unsigned int cpu;
973 const void __percpu *ptr = log->ptr;
974
975 for_each_possible_cpu(cpu) {
976 log->ptr = per_cpu_ptr(ptr, cpu);
977 early_alloc(log);
978 }
979}
980
981/** 865/**
982 * kmemleak_alloc - register a newly allocated object 866 * kmemleak_alloc - register a newly allocated object
983 * @ptr: pointer to beginning of the object 867 * @ptr: pointer to beginning of the object
@@ -999,8 +883,6 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
999 883
1000 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 884 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1001 create_object((unsigned long)ptr, size, min_count, gfp); 885 create_object((unsigned long)ptr, size, min_count, gfp);
1002 else if (kmemleak_early_log)
1003 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
1004} 886}
1005EXPORT_SYMBOL_GPL(kmemleak_alloc); 887EXPORT_SYMBOL_GPL(kmemleak_alloc);
1006 888
@@ -1028,8 +910,6 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1028 for_each_possible_cpu(cpu) 910 for_each_possible_cpu(cpu)
1029 create_object((unsigned long)per_cpu_ptr(ptr, cpu), 911 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
1030 size, 0, gfp); 912 size, 0, gfp);
1031 else if (kmemleak_early_log)
1032 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
1033} 913}
1034EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 914EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1035 915
@@ -1054,11 +934,6 @@ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp
1054 create_object((unsigned long)area->addr, size, 2, gfp); 934 create_object((unsigned long)area->addr, size, 2, gfp);
1055 object_set_excess_ref((unsigned long)area, 935 object_set_excess_ref((unsigned long)area,
1056 (unsigned long)area->addr); 936 (unsigned long)area->addr);
1057 } else if (kmemleak_early_log) {
1058 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1059 /* reusing early_log.size for storing area->addr */
1060 log_early(KMEMLEAK_SET_EXCESS_REF,
1061 area, (unsigned long)area->addr, 0);
1062 } 937 }
1063} 938}
1064EXPORT_SYMBOL_GPL(kmemleak_vmalloc); 939EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
@@ -1076,8 +951,6 @@ void __ref kmemleak_free(const void *ptr)
1076 951
1077 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 952 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1078 delete_object_full((unsigned long)ptr); 953 delete_object_full((unsigned long)ptr);
1079 else if (kmemleak_early_log)
1080 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1081} 954}
1082EXPORT_SYMBOL_GPL(kmemleak_free); 955EXPORT_SYMBOL_GPL(kmemleak_free);
1083 956
@@ -1096,8 +969,6 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
1096 969
1097 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 970 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1098 delete_object_part((unsigned long)ptr, size); 971 delete_object_part((unsigned long)ptr, size);
1099 else if (kmemleak_early_log)
1100 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1101} 972}
1102EXPORT_SYMBOL_GPL(kmemleak_free_part); 973EXPORT_SYMBOL_GPL(kmemleak_free_part);
1103 974
@@ -1118,8 +989,6 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
1118 for_each_possible_cpu(cpu) 989 for_each_possible_cpu(cpu)
1119 delete_object_full((unsigned long)per_cpu_ptr(ptr, 990 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1120 cpu)); 991 cpu));
1121 else if (kmemleak_early_log)
1122 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1123} 992}
1124EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 993EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1125 994
@@ -1170,8 +1039,6 @@ void __ref kmemleak_not_leak(const void *ptr)
1170 1039
1171 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1040 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1172 make_gray_object((unsigned long)ptr); 1041 make_gray_object((unsigned long)ptr);
1173 else if (kmemleak_early_log)
1174 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1175} 1042}
1176EXPORT_SYMBOL(kmemleak_not_leak); 1043EXPORT_SYMBOL(kmemleak_not_leak);
1177 1044
@@ -1190,8 +1057,6 @@ void __ref kmemleak_ignore(const void *ptr)
1190 1057
1191 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1058 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1192 make_black_object((unsigned long)ptr); 1059 make_black_object((unsigned long)ptr);
1193 else if (kmemleak_early_log)
1194 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1195} 1060}
1196EXPORT_SYMBOL(kmemleak_ignore); 1061EXPORT_SYMBOL(kmemleak_ignore);
1197 1062
@@ -1212,8 +1077,6 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1212 1077
1213 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) 1078 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1214 add_scan_area((unsigned long)ptr, size, gfp); 1079 add_scan_area((unsigned long)ptr, size, gfp);
1215 else if (kmemleak_early_log)
1216 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1217} 1080}
1218EXPORT_SYMBOL(kmemleak_scan_area); 1081EXPORT_SYMBOL(kmemleak_scan_area);
1219 1082
@@ -1232,8 +1095,6 @@ void __ref kmemleak_no_scan(const void *ptr)
1232 1095
1233 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1096 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1234 object_no_scan((unsigned long)ptr); 1097 object_no_scan((unsigned long)ptr);
1235 else if (kmemleak_early_log)
1236 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1237} 1098}
1238EXPORT_SYMBOL(kmemleak_no_scan); 1099EXPORT_SYMBOL(kmemleak_no_scan);
1239 1100
@@ -2020,7 +1881,6 @@ static void kmemleak_disable(void)
2020 1881
2021 /* stop any memory operation tracing */ 1882 /* stop any memory operation tracing */
2022 kmemleak_enabled = 0; 1883 kmemleak_enabled = 0;
2023 kmemleak_early_log = 0;
2024 1884
2025 /* check whether it is too early for a kernel thread */ 1885 /* check whether it is too early for a kernel thread */
2026 if (kmemleak_initialized) 1886 if (kmemleak_initialized)
@@ -2048,20 +1908,11 @@ static int __init kmemleak_boot_config(char *str)
2048} 1908}
2049early_param("kmemleak", kmemleak_boot_config); 1909early_param("kmemleak", kmemleak_boot_config);
2050 1910
2051static void __init print_log_trace(struct early_log *log)
2052{
2053 pr_notice("Early log backtrace:\n");
2054 stack_trace_print(log->trace, log->trace_len, 2);
2055}
2056
2057/* 1911/*
2058 * Kmemleak initialization. 1912 * Kmemleak initialization.
2059 */ 1913 */
2060void __init kmemleak_init(void) 1914void __init kmemleak_init(void)
2061{ 1915{
2062 int i;
2063 unsigned long flags;
2064
2065#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1916#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2066 if (!kmemleak_skip_disable) { 1917 if (!kmemleak_skip_disable) {
2067 kmemleak_disable(); 1918 kmemleak_disable();
@@ -2069,28 +1920,15 @@ void __init kmemleak_init(void)
2069 } 1920 }
2070#endif 1921#endif
2071 1922
1923 if (kmemleak_error)
1924 return;
1925
2072 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1926 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2073 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1927 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2074 1928
2075 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1929 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2076 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1930 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2077 1931
2078 if (crt_early_log > ARRAY_SIZE(early_log))
2079 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2080 crt_early_log);
2081
2082 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2083 local_irq_save(flags);
2084 kmemleak_early_log = 0;
2085 if (kmemleak_error) {
2086 local_irq_restore(flags);
2087 return;
2088 } else {
2089 kmemleak_enabled = 1;
2090 kmemleak_free_enabled = 1;
2091 }
2092 local_irq_restore(flags);
2093
2094 /* register the data/bss sections */ 1932 /* register the data/bss sections */
2095 create_object((unsigned long)_sdata, _edata - _sdata, 1933 create_object((unsigned long)_sdata, _edata - _sdata,
2096 KMEMLEAK_GREY, GFP_ATOMIC); 1934 KMEMLEAK_GREY, GFP_ATOMIC);
@@ -2101,57 +1939,6 @@ void __init kmemleak_init(void)
2101 create_object((unsigned long)__start_ro_after_init, 1939 create_object((unsigned long)__start_ro_after_init,
2102 __end_ro_after_init - __start_ro_after_init, 1940 __end_ro_after_init - __start_ro_after_init,
2103 KMEMLEAK_GREY, GFP_ATOMIC); 1941 KMEMLEAK_GREY, GFP_ATOMIC);
2104
2105 /*
2106 * This is the point where tracking allocations is safe. Automatic
2107 * scanning is started during the late initcall. Add the early logged
2108 * callbacks to the kmemleak infrastructure.
2109 */
2110 for (i = 0; i < crt_early_log; i++) {
2111 struct early_log *log = &early_log[i];
2112
2113 switch (log->op_type) {
2114 case KMEMLEAK_ALLOC:
2115 early_alloc(log);
2116 break;
2117 case KMEMLEAK_ALLOC_PERCPU:
2118 early_alloc_percpu(log);
2119 break;
2120 case KMEMLEAK_FREE:
2121 kmemleak_free(log->ptr);
2122 break;
2123 case KMEMLEAK_FREE_PART:
2124 kmemleak_free_part(log->ptr, log->size);
2125 break;
2126 case KMEMLEAK_FREE_PERCPU:
2127 kmemleak_free_percpu(log->ptr);
2128 break;
2129 case KMEMLEAK_NOT_LEAK:
2130 kmemleak_not_leak(log->ptr);
2131 break;
2132 case KMEMLEAK_IGNORE:
2133 kmemleak_ignore(log->ptr);
2134 break;
2135 case KMEMLEAK_SCAN_AREA:
2136 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2137 break;
2138 case KMEMLEAK_NO_SCAN:
2139 kmemleak_no_scan(log->ptr);
2140 break;
2141 case KMEMLEAK_SET_EXCESS_REF:
2142 object_set_excess_ref((unsigned long)log->ptr,
2143 log->excess_ref);
2144 break;
2145 default:
2146 kmemleak_warn("Unknown early log operation: %d\n",
2147 log->op_type);
2148 }
2149
2150 if (kmemleak_warning) {
2151 print_log_trace(log);
2152 kmemleak_warning = 0;
2153 }
2154 }
2155} 1942}
2156 1943
2157/* 1944/*