diff options
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r-- | mm/kmemleak.c | 49 |
1 files changed, 21 insertions, 28 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 8bf765c4f58d..96106358e042 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -119,8 +119,8 @@ | |||
119 | /* scanning area inside a memory block */ | 119 | /* scanning area inside a memory block */ |
120 | struct kmemleak_scan_area { | 120 | struct kmemleak_scan_area { |
121 | struct hlist_node node; | 121 | struct hlist_node node; |
122 | unsigned long offset; | 122 | unsigned long start; |
123 | size_t length; | 123 | size_t size; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | #define KMEMLEAK_GREY 0 | 126 | #define KMEMLEAK_GREY 0 |
@@ -241,8 +241,6 @@ struct early_log { | |||
241 | const void *ptr; /* allocated/freed memory block */ | 241 | const void *ptr; /* allocated/freed memory block */ |
242 | size_t size; /* memory block size */ | 242 | size_t size; /* memory block size */ |
243 | int min_count; /* minimum reference count */ | 243 | int min_count; /* minimum reference count */ |
244 | unsigned long offset; /* scan area offset */ | ||
245 | size_t length; /* scan area length */ | ||
246 | unsigned long trace[MAX_TRACE]; /* stack trace */ | 244 | unsigned long trace[MAX_TRACE]; /* stack trace */ |
247 | unsigned int trace_len; /* stack trace length */ | 245 | unsigned int trace_len; /* stack trace length */ |
248 | }; | 246 | }; |
@@ -720,14 +718,13 @@ static void make_black_object(unsigned long ptr) | |||
720 | * Add a scanning area to the object. If at least one such area is added, | 718 | * Add a scanning area to the object. If at least one such area is added, |
721 | * kmemleak will only scan these ranges rather than the whole memory block. | 719 | * kmemleak will only scan these ranges rather than the whole memory block. |
722 | */ | 720 | */ |
723 | static void add_scan_area(unsigned long ptr, unsigned long offset, | 721 | static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) |
724 | size_t length, gfp_t gfp) | ||
725 | { | 722 | { |
726 | unsigned long flags; | 723 | unsigned long flags; |
727 | struct kmemleak_object *object; | 724 | struct kmemleak_object *object; |
728 | struct kmemleak_scan_area *area; | 725 | struct kmemleak_scan_area *area; |
729 | 726 | ||
730 | object = find_and_get_object(ptr, 0); | 727 | object = find_and_get_object(ptr, 1); |
731 | if (!object) { | 728 | if (!object) { |
732 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", | 729 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", |
733 | ptr); | 730 | ptr); |
@@ -741,7 +738,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, | |||
741 | } | 738 | } |
742 | 739 | ||
743 | spin_lock_irqsave(&object->lock, flags); | 740 | spin_lock_irqsave(&object->lock, flags); |
744 | if (offset + length > object->size) { | 741 | if (ptr + size > object->pointer + object->size) { |
745 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); | 742 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); |
746 | dump_object_info(object); | 743 | dump_object_info(object); |
747 | kmem_cache_free(scan_area_cache, area); | 744 | kmem_cache_free(scan_area_cache, area); |
@@ -749,8 +746,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, | |||
749 | } | 746 | } |
750 | 747 | ||
751 | INIT_HLIST_NODE(&area->node); | 748 | INIT_HLIST_NODE(&area->node); |
752 | area->offset = offset; | 749 | area->start = ptr; |
753 | area->length = length; | 750 | area->size = size; |
754 | 751 | ||
755 | hlist_add_head(&area->node, &object->area_list); | 752 | hlist_add_head(&area->node, &object->area_list); |
756 | out_unlock: | 753 | out_unlock: |
@@ -786,7 +783,7 @@ static void object_no_scan(unsigned long ptr) | |||
786 | * processed later once kmemleak is fully initialized. | 783 | * processed later once kmemleak is fully initialized. |
787 | */ | 784 | */ |
788 | static void __init log_early(int op_type, const void *ptr, size_t size, | 785 | static void __init log_early(int op_type, const void *ptr, size_t size, |
789 | int min_count, unsigned long offset, size_t length) | 786 | int min_count) |
790 | { | 787 | { |
791 | unsigned long flags; | 788 | unsigned long flags; |
792 | struct early_log *log; | 789 | struct early_log *log; |
@@ -808,8 +805,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size, | |||
808 | log->ptr = ptr; | 805 | log->ptr = ptr; |
809 | log->size = size; | 806 | log->size = size; |
810 | log->min_count = min_count; | 807 | log->min_count = min_count; |
811 | log->offset = offset; | ||
812 | log->length = length; | ||
813 | if (op_type == KMEMLEAK_ALLOC) | 808 | if (op_type == KMEMLEAK_ALLOC) |
814 | log->trace_len = __save_stack_trace(log->trace); | 809 | log->trace_len = __save_stack_trace(log->trace); |
815 | crt_early_log++; | 810 | crt_early_log++; |
@@ -858,7 +853,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
858 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 853 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
859 | create_object((unsigned long)ptr, size, min_count, gfp); | 854 | create_object((unsigned long)ptr, size, min_count, gfp); |
860 | else if (atomic_read(&kmemleak_early_log)) | 855 | else if (atomic_read(&kmemleak_early_log)) |
861 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); | 856 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
862 | } | 857 | } |
863 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 858 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
864 | 859 | ||
@@ -873,7 +868,7 @@ void __ref kmemleak_free(const void *ptr) | |||
873 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 868 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
874 | delete_object_full((unsigned long)ptr); | 869 | delete_object_full((unsigned long)ptr); |
875 | else if (atomic_read(&kmemleak_early_log)) | 870 | else if (atomic_read(&kmemleak_early_log)) |
876 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); | 871 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
877 | } | 872 | } |
878 | EXPORT_SYMBOL_GPL(kmemleak_free); | 873 | EXPORT_SYMBOL_GPL(kmemleak_free); |
879 | 874 | ||
@@ -888,7 +883,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
888 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 883 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
889 | delete_object_part((unsigned long)ptr, size); | 884 | delete_object_part((unsigned long)ptr, size); |
890 | else if (atomic_read(&kmemleak_early_log)) | 885 | else if (atomic_read(&kmemleak_early_log)) |
891 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); | 886 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
892 | } | 887 | } |
893 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 888 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
894 | 889 | ||
@@ -903,7 +898,7 @@ void __ref kmemleak_not_leak(const void *ptr) | |||
903 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 898 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
904 | make_gray_object((unsigned long)ptr); | 899 | make_gray_object((unsigned long)ptr); |
905 | else if (atomic_read(&kmemleak_early_log)) | 900 | else if (atomic_read(&kmemleak_early_log)) |
906 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); | 901 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
907 | } | 902 | } |
908 | EXPORT_SYMBOL(kmemleak_not_leak); | 903 | EXPORT_SYMBOL(kmemleak_not_leak); |
909 | 904 | ||
@@ -919,22 +914,21 @@ void __ref kmemleak_ignore(const void *ptr) | |||
919 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 914 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
920 | make_black_object((unsigned long)ptr); | 915 | make_black_object((unsigned long)ptr); |
921 | else if (atomic_read(&kmemleak_early_log)) | 916 | else if (atomic_read(&kmemleak_early_log)) |
922 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); | 917 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
923 | } | 918 | } |
924 | EXPORT_SYMBOL(kmemleak_ignore); | 919 | EXPORT_SYMBOL(kmemleak_ignore); |
925 | 920 | ||
926 | /* | 921 | /* |
927 | * Limit the range to be scanned in an allocated memory block. | 922 | * Limit the range to be scanned in an allocated memory block. |
928 | */ | 923 | */ |
929 | void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, | 924 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
930 | size_t length, gfp_t gfp) | ||
931 | { | 925 | { |
932 | pr_debug("%s(0x%p)\n", __func__, ptr); | 926 | pr_debug("%s(0x%p)\n", __func__, ptr); |
933 | 927 | ||
934 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 928 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
935 | add_scan_area((unsigned long)ptr, offset, length, gfp); | 929 | add_scan_area((unsigned long)ptr, size, gfp); |
936 | else if (atomic_read(&kmemleak_early_log)) | 930 | else if (atomic_read(&kmemleak_early_log)) |
937 | log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); | 931 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
938 | } | 932 | } |
939 | EXPORT_SYMBOL(kmemleak_scan_area); | 933 | EXPORT_SYMBOL(kmemleak_scan_area); |
940 | 934 | ||
@@ -948,7 +942,7 @@ void __ref kmemleak_no_scan(const void *ptr) | |||
948 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 942 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
949 | object_no_scan((unsigned long)ptr); | 943 | object_no_scan((unsigned long)ptr); |
950 | else if (atomic_read(&kmemleak_early_log)) | 944 | else if (atomic_read(&kmemleak_early_log)) |
951 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); | 945 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
952 | } | 946 | } |
953 | EXPORT_SYMBOL(kmemleak_no_scan); | 947 | EXPORT_SYMBOL(kmemleak_no_scan); |
954 | 948 | ||
@@ -1075,9 +1069,9 @@ static void scan_object(struct kmemleak_object *object) | |||
1075 | } | 1069 | } |
1076 | } else | 1070 | } else |
1077 | hlist_for_each_entry(area, elem, &object->area_list, node) | 1071 | hlist_for_each_entry(area, elem, &object->area_list, node) |
1078 | scan_block((void *)(object->pointer + area->offset), | 1072 | scan_block((void *)area->start, |
1079 | (void *)(object->pointer + area->offset | 1073 | (void *)(area->start + area->size), |
1080 | + area->length), object, 0); | 1074 | object, 0); |
1081 | out: | 1075 | out: |
1082 | spin_unlock_irqrestore(&object->lock, flags); | 1076 | spin_unlock_irqrestore(&object->lock, flags); |
1083 | } | 1077 | } |
@@ -1642,8 +1636,7 @@ void __init kmemleak_init(void) | |||
1642 | kmemleak_ignore(log->ptr); | 1636 | kmemleak_ignore(log->ptr); |
1643 | break; | 1637 | break; |
1644 | case KMEMLEAK_SCAN_AREA: | 1638 | case KMEMLEAK_SCAN_AREA: |
1645 | kmemleak_scan_area(log->ptr, log->offset, log->length, | 1639 | kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); |
1646 | GFP_KERNEL); | ||
1647 | break; | 1640 | break; |
1648 | case KMEMLEAK_NO_SCAN: | 1641 | case KMEMLEAK_NO_SCAN: |
1649 | kmemleak_no_scan(log->ptr); | 1642 | kmemleak_no_scan(log->ptr); |