diff options
| -rw-r--r-- | include/linux/kmemleak.h | 4 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 2 | ||||
| -rw-r--r-- | kernel/pid.c | 7 | ||||
| -rw-r--r-- | mm/bootmem.c | 6 | ||||
| -rw-r--r-- | mm/kmemleak.c | 236 | ||||
| -rw-r--r-- | mm/page_alloc.c | 14 | ||||
| -rw-r--r-- | mm/slub.c | 10 |
7 files changed, 185 insertions, 94 deletions
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 7796aed6cdd5..6a63807f714e 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -27,6 +27,7 @@ extern void kmemleak_init(void); | |||
| 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
| 28 | gfp_t gfp); | 28 | gfp_t gfp); |
| 29 | extern void kmemleak_free(const void *ptr); | 29 | extern void kmemleak_free(const void *ptr); |
| 30 | extern void kmemleak_free_part(const void *ptr, size_t size); | ||
| 30 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | 31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, |
| 31 | size_t size); | 32 | size_t size); |
| 32 | extern void kmemleak_not_leak(const void *ptr); | 33 | extern void kmemleak_not_leak(const void *ptr); |
| @@ -71,6 +72,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | |||
| 71 | static inline void kmemleak_free(const void *ptr) | 72 | static inline void kmemleak_free(const void *ptr) |
| 72 | { | 73 | { |
| 73 | } | 74 | } |
| 75 | static inline void kmemleak_free_part(const void *ptr, size_t size) | ||
| 76 | { | ||
| 77 | } | ||
| 74 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | 78 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) |
| 75 | { | 79 | { |
| 76 | } | 80 | } |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4dcbc2c71491..c1c862b1d01a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
| 13 | #include <linux/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
| 14 | #include <linux/kmemleak.h> | ||
| 14 | 15 | ||
| 15 | enum stat_item { | 16 | enum stat_item { |
| 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 17 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| @@ -233,6 +234,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |||
| 233 | unsigned int order = get_order(size); | 234 | unsigned int order = get_order(size); |
| 234 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | 235 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); |
| 235 | 236 | ||
| 237 | kmemleak_alloc(ret, size, 1, flags); | ||
| 236 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); | 238 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); |
| 237 | 239 | ||
| 238 | return ret; | 240 | return ret; |
diff --git a/kernel/pid.c b/kernel/pid.c index 5fa1db48d8b7..31310b5d3f50 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/pid_namespace.h> | 36 | #include <linux/pid_namespace.h> |
| 37 | #include <linux/init_task.h> | 37 | #include <linux/init_task.h> |
| 38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
| 39 | #include <linux/kmemleak.h> | ||
| 40 | 39 | ||
| 41 | #define pid_hashfn(nr, ns) \ | 40 | #define pid_hashfn(nr, ns) \ |
| 42 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | 41 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) |
| @@ -513,12 +512,6 @@ void __init pidhash_init(void) | |||
| 513 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | 512 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); |
| 514 | if (!pid_hash) | 513 | if (!pid_hash) |
| 515 | panic("Could not alloc pidhash!\n"); | 514 | panic("Could not alloc pidhash!\n"); |
| 516 | /* | ||
| 517 | * pid_hash contains references to allocated struct pid objects and it | ||
| 518 | * must be scanned by kmemleak to avoid false positives. | ||
| 519 | */ | ||
| 520 | kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0, | ||
| 521 | GFP_KERNEL); | ||
| 522 | for (i = 0; i < pidhash_size; i++) | 515 | for (i = 0; i < pidhash_size; i++) |
| 523 | INIT_HLIST_HEAD(&pid_hash[i]); | 516 | INIT_HLIST_HEAD(&pid_hash[i]); |
| 524 | } | 517 | } |
diff --git a/mm/bootmem.c b/mm/bootmem.c index d2a9ce952768..701740c9e81b 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/pfn.h> | 12 | #include <linux/pfn.h> |
| 13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/kmemleak.h> | ||
| 15 | 16 | ||
| 16 | #include <asm/bug.h> | 17 | #include <asm/bug.h> |
| 17 | #include <asm/io.h> | 18 | #include <asm/io.h> |
| @@ -335,6 +336,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
| 335 | { | 336 | { |
| 336 | unsigned long start, end; | 337 | unsigned long start, end; |
| 337 | 338 | ||
| 339 | kmemleak_free_part(__va(physaddr), size); | ||
| 340 | |||
| 338 | start = PFN_UP(physaddr); | 341 | start = PFN_UP(physaddr); |
| 339 | end = PFN_DOWN(physaddr + size); | 342 | end = PFN_DOWN(physaddr + size); |
| 340 | 343 | ||
| @@ -354,6 +357,8 @@ void __init free_bootmem(unsigned long addr, unsigned long size) | |||
| 354 | { | 357 | { |
| 355 | unsigned long start, end; | 358 | unsigned long start, end; |
| 356 | 359 | ||
| 360 | kmemleak_free_part(__va(addr), size); | ||
| 361 | |||
| 357 | start = PFN_UP(addr); | 362 | start = PFN_UP(addr); |
| 358 | end = PFN_DOWN(addr + size); | 363 | end = PFN_DOWN(addr + size); |
| 359 | 364 | ||
| @@ -516,6 +521,7 @@ find_block: | |||
| 516 | region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + | 521 | region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + |
| 517 | start_off); | 522 | start_off); |
| 518 | memset(region, 0, size); | 523 | memset(region, 0, size); |
| 524 | kmemleak_alloc(region, size, 1, 0); | ||
| 519 | return region; | 525 | return region; |
| 520 | } | 526 | } |
| 521 | 527 | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index e766e1da09d2..5aabd41ffb8f 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -103,10 +103,10 @@ | |||
| 103 | * Kmemleak configuration and common defines. | 103 | * Kmemleak configuration and common defines. |
| 104 | */ | 104 | */ |
| 105 | #define MAX_TRACE 16 /* stack trace length */ | 105 | #define MAX_TRACE 16 /* stack trace length */ |
| 106 | #define REPORTS_NR 50 /* maximum number of reported leaks */ | ||
| 107 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ | 106 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
| 108 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ | 107 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
| 109 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | 108 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
| 109 | #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ | ||
| 110 | 110 | ||
| 111 | #define BYTES_PER_POINTER sizeof(void *) | 111 | #define BYTES_PER_POINTER sizeof(void *) |
| 112 | 112 | ||
| @@ -158,6 +158,8 @@ struct kmemleak_object { | |||
| 158 | #define OBJECT_REPORTED (1 << 1) | 158 | #define OBJECT_REPORTED (1 << 1) |
| 159 | /* flag set to not scan the object */ | 159 | /* flag set to not scan the object */ |
| 160 | #define OBJECT_NO_SCAN (1 << 2) | 160 | #define OBJECT_NO_SCAN (1 << 2) |
| 161 | /* flag set on newly allocated objects */ | ||
| 162 | #define OBJECT_NEW (1 << 3) | ||
| 161 | 163 | ||
| 162 | /* the list of all allocated objects */ | 164 | /* the list of all allocated objects */ |
| 163 | static LIST_HEAD(object_list); | 165 | static LIST_HEAD(object_list); |
| @@ -196,9 +198,6 @@ static int kmemleak_stack_scan = 1; | |||
| 196 | /* protects the memory scanning, parameters and debug/kmemleak file access */ | 198 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
| 197 | static DEFINE_MUTEX(scan_mutex); | 199 | static DEFINE_MUTEX(scan_mutex); |
| 198 | 200 | ||
| 199 | /* number of leaks reported (for limitation purposes) */ | ||
| 200 | static int reported_leaks; | ||
| 201 | |||
| 202 | /* | 201 | /* |
| 203 | * Early object allocation/freeing logging. Kmemleak is initialized after the | 202 | * Early object allocation/freeing logging. Kmemleak is initialized after the |
| 204 | * kernel allocator. However, both the kernel allocator and kmemleak may | 203 | * kernel allocator. However, both the kernel allocator and kmemleak may |
| @@ -211,6 +210,7 @@ static int reported_leaks; | |||
| 211 | enum { | 210 | enum { |
| 212 | KMEMLEAK_ALLOC, | 211 | KMEMLEAK_ALLOC, |
| 213 | KMEMLEAK_FREE, | 212 | KMEMLEAK_FREE, |
| 213 | KMEMLEAK_FREE_PART, | ||
| 214 | KMEMLEAK_NOT_LEAK, | 214 | KMEMLEAK_NOT_LEAK, |
| 215 | KMEMLEAK_IGNORE, | 215 | KMEMLEAK_IGNORE, |
| 216 | KMEMLEAK_SCAN_AREA, | 216 | KMEMLEAK_SCAN_AREA, |
| @@ -274,6 +274,11 @@ static int color_gray(const struct kmemleak_object *object) | |||
| 274 | return object->min_count != -1 && object->count >= object->min_count; | 274 | return object->min_count != -1 && object->count >= object->min_count; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static int color_black(const struct kmemleak_object *object) | ||
| 278 | { | ||
| 279 | return object->min_count == -1; | ||
| 280 | } | ||
| 281 | |||
| 277 | /* | 282 | /* |
| 278 | * Objects are considered unreferenced only if their color is white, they have | 283 | * Objects are considered unreferenced only if their color is white, they have |
| 279 | * not be deleted and have a minimum age to avoid false positives caused by | 284 | * not be deleted and have a minimum age to avoid false positives caused by |
| @@ -451,7 +456,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count, | |||
| 451 | INIT_HLIST_HEAD(&object->area_list); | 456 | INIT_HLIST_HEAD(&object->area_list); |
| 452 | spin_lock_init(&object->lock); | 457 | spin_lock_init(&object->lock); |
| 453 | atomic_set(&object->use_count, 1); | 458 | atomic_set(&object->use_count, 1); |
| 454 | object->flags = OBJECT_ALLOCATED; | 459 | object->flags = OBJECT_ALLOCATED | OBJECT_NEW; |
| 455 | object->pointer = ptr; | 460 | object->pointer = ptr; |
| 456 | object->size = size; | 461 | object->size = size; |
| 457 | object->min_count = min_count; | 462 | object->min_count = min_count; |
| @@ -519,27 +524,17 @@ out: | |||
| 519 | * Remove the metadata (struct kmemleak_object) for a memory block from the | 524 | * Remove the metadata (struct kmemleak_object) for a memory block from the |
| 520 | * object_list and object_tree_root and decrement its use_count. | 525 | * object_list and object_tree_root and decrement its use_count. |
| 521 | */ | 526 | */ |
| 522 | static void delete_object(unsigned long ptr) | 527 | static void __delete_object(struct kmemleak_object *object) |
| 523 | { | 528 | { |
| 524 | unsigned long flags; | 529 | unsigned long flags; |
| 525 | struct kmemleak_object *object; | ||
| 526 | 530 | ||
| 527 | write_lock_irqsave(&kmemleak_lock, flags); | 531 | write_lock_irqsave(&kmemleak_lock, flags); |
| 528 | object = lookup_object(ptr, 0); | ||
| 529 | if (!object) { | ||
| 530 | #ifdef DEBUG | ||
| 531 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | ||
| 532 | ptr); | ||
| 533 | #endif | ||
| 534 | write_unlock_irqrestore(&kmemleak_lock, flags); | ||
| 535 | return; | ||
| 536 | } | ||
| 537 | prio_tree_remove(&object_tree_root, &object->tree_node); | 532 | prio_tree_remove(&object_tree_root, &object->tree_node); |
| 538 | list_del_rcu(&object->object_list); | 533 | list_del_rcu(&object->object_list); |
| 539 | write_unlock_irqrestore(&kmemleak_lock, flags); | 534 | write_unlock_irqrestore(&kmemleak_lock, flags); |
| 540 | 535 | ||
| 541 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); | 536 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); |
| 542 | WARN_ON(atomic_read(&object->use_count) < 1); | 537 | WARN_ON(atomic_read(&object->use_count) < 2); |
| 543 | 538 | ||
| 544 | /* | 539 | /* |
| 545 | * Locking here also ensures that the corresponding memory block | 540 | * Locking here also ensures that the corresponding memory block |
| @@ -552,6 +547,64 @@ static void delete_object(unsigned long ptr) | |||
| 552 | } | 547 | } |
| 553 | 548 | ||
| 554 | /* | 549 | /* |
| 550 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | ||
| 551 | * delete it. | ||
| 552 | */ | ||
| 553 | static void delete_object_full(unsigned long ptr) | ||
| 554 | { | ||
| 555 | struct kmemleak_object *object; | ||
| 556 | |||
| 557 | object = find_and_get_object(ptr, 0); | ||
| 558 | if (!object) { | ||
| 559 | #ifdef DEBUG | ||
| 560 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | ||
| 561 | ptr); | ||
| 562 | #endif | ||
| 563 | return; | ||
| 564 | } | ||
| 565 | __delete_object(object); | ||
| 566 | put_object(object); | ||
| 567 | } | ||
| 568 | |||
| 569 | /* | ||
| 570 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | ||
| 571 | * delete it. If the memory block is partially freed, the function may create | ||
| 572 | * additional metadata for the remaining parts of the block. | ||
| 573 | */ | ||
| 574 | static void delete_object_part(unsigned long ptr, size_t size) | ||
| 575 | { | ||
| 576 | struct kmemleak_object *object; | ||
| 577 | unsigned long start, end; | ||
| 578 | |||
| 579 | object = find_and_get_object(ptr, 1); | ||
| 580 | if (!object) { | ||
| 581 | #ifdef DEBUG | ||
| 582 | kmemleak_warn("Partially freeing unknown object at 0x%08lx " | ||
| 583 | "(size %zu)\n", ptr, size); | ||
| 584 | #endif | ||
| 585 | return; | ||
| 586 | } | ||
| 587 | __delete_object(object); | ||
| 588 | |||
| 589 | /* | ||
| 590 | * Create one or two objects that may result from the memory block | ||
| 591 | * split. Note that partial freeing is only done by free_bootmem() and | ||
| 592 | * this happens before kmemleak_init() is called. The path below is | ||
| 593 | * only executed during early log recording in kmemleak_init(), so | ||
| 594 | * GFP_KERNEL is enough. | ||
| 595 | */ | ||
| 596 | start = object->pointer; | ||
| 597 | end = object->pointer + object->size; | ||
| 598 | if (ptr > start) | ||
| 599 | create_object(start, ptr - start, object->min_count, | ||
| 600 | GFP_KERNEL); | ||
| 601 | if (ptr + size < end) | ||
| 602 | create_object(ptr + size, end - ptr - size, object->min_count, | ||
| 603 | GFP_KERNEL); | ||
| 604 | |||
| 605 | put_object(object); | ||
| 606 | } | ||
| 607 | /* | ||
| 555 | * Make a object permanently as gray-colored so that it can no longer be | 608 | * Make a object permanently as gray-colored so that it can no longer be |
| 556 | * reported as a leak. This is used in general to mark a false positive. | 609 | * reported as a leak. This is used in general to mark a false positive. |
| 557 | */ | 610 | */ |
| @@ -715,13 +768,28 @@ void kmemleak_free(const void *ptr) | |||
| 715 | pr_debug("%s(0x%p)\n", __func__, ptr); | 768 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 716 | 769 | ||
| 717 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 770 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 718 | delete_object((unsigned long)ptr); | 771 | delete_object_full((unsigned long)ptr); |
| 719 | else if (atomic_read(&kmemleak_early_log)) | 772 | else if (atomic_read(&kmemleak_early_log)) |
| 720 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); | 773 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); |
| 721 | } | 774 | } |
| 722 | EXPORT_SYMBOL_GPL(kmemleak_free); | 775 | EXPORT_SYMBOL_GPL(kmemleak_free); |
| 723 | 776 | ||
| 724 | /* | 777 | /* |
| 778 | * Partial memory freeing function callback. This function is usually called | ||
| 779 | * from bootmem allocator when (part of) a memory block is freed. | ||
| 780 | */ | ||
| 781 | void kmemleak_free_part(const void *ptr, size_t size) | ||
| 782 | { | ||
| 783 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 784 | |||
| 785 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 786 | delete_object_part((unsigned long)ptr, size); | ||
| 787 | else if (atomic_read(&kmemleak_early_log)) | ||
| 788 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); | ||
| 789 | } | ||
| 790 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | ||
| 791 | |||
| 792 | /* | ||
| 725 | * Mark an already allocated memory block as a false positive. This will cause | 793 | * Mark an already allocated memory block as a false positive. This will cause |
| 726 | * the block to no longer be reported as leak and always be scanned. | 794 | * the block to no longer be reported as leak and always be scanned. |
| 727 | */ | 795 | */ |
| @@ -807,7 +875,7 @@ static int scan_should_stop(void) | |||
| 807 | * found to the gray list. | 875 | * found to the gray list. |
| 808 | */ | 876 | */ |
| 809 | static void scan_block(void *_start, void *_end, | 877 | static void scan_block(void *_start, void *_end, |
| 810 | struct kmemleak_object *scanned) | 878 | struct kmemleak_object *scanned, int allow_resched) |
| 811 | { | 879 | { |
| 812 | unsigned long *ptr; | 880 | unsigned long *ptr; |
| 813 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); | 881 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
| @@ -818,6 +886,8 @@ static void scan_block(void *_start, void *_end, | |||
| 818 | unsigned long pointer = *ptr; | 886 | unsigned long pointer = *ptr; |
| 819 | struct kmemleak_object *object; | 887 | struct kmemleak_object *object; |
| 820 | 888 | ||
| 889 | if (allow_resched) | ||
| 890 | cond_resched(); | ||
| 821 | if (scan_should_stop()) | 891 | if (scan_should_stop()) |
| 822 | break; | 892 | break; |
| 823 | 893 | ||
| @@ -881,12 +951,12 @@ static void scan_object(struct kmemleak_object *object) | |||
| 881 | goto out; | 951 | goto out; |
| 882 | if (hlist_empty(&object->area_list)) | 952 | if (hlist_empty(&object->area_list)) |
| 883 | scan_block((void *)object->pointer, | 953 | scan_block((void *)object->pointer, |
| 884 | (void *)(object->pointer + object->size), object); | 954 | (void *)(object->pointer + object->size), object, 0); |
| 885 | else | 955 | else |
| 886 | hlist_for_each_entry(area, elem, &object->area_list, node) | 956 | hlist_for_each_entry(area, elem, &object->area_list, node) |
| 887 | scan_block((void *)(object->pointer + area->offset), | 957 | scan_block((void *)(object->pointer + area->offset), |
| 888 | (void *)(object->pointer + area->offset | 958 | (void *)(object->pointer + area->offset |
| 889 | + area->length), object); | 959 | + area->length), object, 0); |
| 890 | out: | 960 | out: |
| 891 | spin_unlock_irqrestore(&object->lock, flags); | 961 | spin_unlock_irqrestore(&object->lock, flags); |
| 892 | } | 962 | } |
| @@ -903,6 +973,7 @@ static void kmemleak_scan(void) | |||
| 903 | struct task_struct *task; | 973 | struct task_struct *task; |
| 904 | int i; | 974 | int i; |
| 905 | int new_leaks = 0; | 975 | int new_leaks = 0; |
| 976 | int gray_list_pass = 0; | ||
| 906 | 977 | ||
| 907 | jiffies_last_scan = jiffies; | 978 | jiffies_last_scan = jiffies; |
| 908 | 979 | ||
| @@ -923,6 +994,7 @@ static void kmemleak_scan(void) | |||
| 923 | #endif | 994 | #endif |
| 924 | /* reset the reference count (whiten the object) */ | 995 | /* reset the reference count (whiten the object) */ |
| 925 | object->count = 0; | 996 | object->count = 0; |
| 997 | object->flags &= ~OBJECT_NEW; | ||
| 926 | if (color_gray(object) && get_object(object)) | 998 | if (color_gray(object) && get_object(object)) |
| 927 | list_add_tail(&object->gray_list, &gray_list); | 999 | list_add_tail(&object->gray_list, &gray_list); |
| 928 | 1000 | ||
| @@ -931,14 +1003,14 @@ static void kmemleak_scan(void) | |||
| 931 | rcu_read_unlock(); | 1003 | rcu_read_unlock(); |
| 932 | 1004 | ||
| 933 | /* data/bss scanning */ | 1005 | /* data/bss scanning */ |
| 934 | scan_block(_sdata, _edata, NULL); | 1006 | scan_block(_sdata, _edata, NULL, 1); |
| 935 | scan_block(__bss_start, __bss_stop, NULL); | 1007 | scan_block(__bss_start, __bss_stop, NULL, 1); |
| 936 | 1008 | ||
| 937 | #ifdef CONFIG_SMP | 1009 | #ifdef CONFIG_SMP |
| 938 | /* per-cpu sections scanning */ | 1010 | /* per-cpu sections scanning */ |
| 939 | for_each_possible_cpu(i) | 1011 | for_each_possible_cpu(i) |
| 940 | scan_block(__per_cpu_start + per_cpu_offset(i), | 1012 | scan_block(__per_cpu_start + per_cpu_offset(i), |
| 941 | __per_cpu_end + per_cpu_offset(i), NULL); | 1013 | __per_cpu_end + per_cpu_offset(i), NULL, 1); |
| 942 | #endif | 1014 | #endif |
| 943 | 1015 | ||
| 944 | /* | 1016 | /* |
| @@ -960,7 +1032,7 @@ static void kmemleak_scan(void) | |||
| 960 | /* only scan if page is in use */ | 1032 | /* only scan if page is in use */ |
| 961 | if (page_count(page) == 0) | 1033 | if (page_count(page) == 0) |
| 962 | continue; | 1034 | continue; |
| 963 | scan_block(page, page + 1, NULL); | 1035 | scan_block(page, page + 1, NULL, 1); |
| 964 | } | 1036 | } |
| 965 | } | 1037 | } |
| 966 | 1038 | ||
| @@ -972,7 +1044,8 @@ static void kmemleak_scan(void) | |||
| 972 | read_lock(&tasklist_lock); | 1044 | read_lock(&tasklist_lock); |
| 973 | for_each_process(task) | 1045 | for_each_process(task) |
| 974 | scan_block(task_stack_page(task), | 1046 | scan_block(task_stack_page(task), |
| 975 | task_stack_page(task) + THREAD_SIZE, NULL); | 1047 | task_stack_page(task) + THREAD_SIZE, |
| 1048 | NULL, 0); | ||
| 976 | read_unlock(&tasklist_lock); | 1049 | read_unlock(&tasklist_lock); |
| 977 | } | 1050 | } |
| 978 | 1051 | ||
| @@ -984,6 +1057,7 @@ static void kmemleak_scan(void) | |||
| 984 | * kmemleak objects cannot be freed from outside the loop because their | 1057 | * kmemleak objects cannot be freed from outside the loop because their |
| 985 | * use_count was increased. | 1058 | * use_count was increased. |
| 986 | */ | 1059 | */ |
| 1060 | repeat: | ||
| 987 | object = list_entry(gray_list.next, typeof(*object), gray_list); | 1061 | object = list_entry(gray_list.next, typeof(*object), gray_list); |
| 988 | while (&object->gray_list != &gray_list) { | 1062 | while (&object->gray_list != &gray_list) { |
| 989 | cond_resched(); | 1063 | cond_resched(); |
| @@ -1001,12 +1075,38 @@ static void kmemleak_scan(void) | |||
| 1001 | 1075 | ||
| 1002 | object = tmp; | 1076 | object = tmp; |
| 1003 | } | 1077 | } |
| 1078 | |||
| 1079 | if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) | ||
| 1080 | goto scan_end; | ||
| 1081 | |||
| 1082 | /* | ||
| 1083 | * Check for new objects allocated during this scanning and add them | ||
| 1084 | * to the gray list. | ||
| 1085 | */ | ||
| 1086 | rcu_read_lock(); | ||
| 1087 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
| 1088 | spin_lock_irqsave(&object->lock, flags); | ||
| 1089 | if ((object->flags & OBJECT_NEW) && !color_black(object) && | ||
| 1090 | get_object(object)) { | ||
| 1091 | object->flags &= ~OBJECT_NEW; | ||
| 1092 | list_add_tail(&object->gray_list, &gray_list); | ||
| 1093 | } | ||
| 1094 | spin_unlock_irqrestore(&object->lock, flags); | ||
| 1095 | } | ||
| 1096 | rcu_read_unlock(); | ||
| 1097 | |||
| 1098 | if (!list_empty(&gray_list)) | ||
| 1099 | goto repeat; | ||
| 1100 | |||
| 1101 | scan_end: | ||
| 1004 | WARN_ON(!list_empty(&gray_list)); | 1102 | WARN_ON(!list_empty(&gray_list)); |
| 1005 | 1103 | ||
| 1006 | /* | 1104 | /* |
| 1007 | * If scanning was stopped do not report any new unreferenced objects. | 1105 | * If scanning was stopped or new objects were being allocated at a |
| 1106 | * higher rate than gray list scanning, do not report any new | ||
| 1107 | * unreferenced objects. | ||
| 1008 | */ | 1108 | */ |
| 1009 | if (scan_should_stop()) | 1109 | if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) |
| 1010 | return; | 1110 | return; |
| 1011 | 1111 | ||
| 1012 | /* | 1112 | /* |
| @@ -1039,6 +1139,7 @@ static int kmemleak_scan_thread(void *arg) | |||
| 1039 | static int first_run = 1; | 1139 | static int first_run = 1; |
| 1040 | 1140 | ||
| 1041 | pr_info("Automatic memory scanning thread started\n"); | 1141 | pr_info("Automatic memory scanning thread started\n"); |
| 1142 | set_user_nice(current, 10); | ||
| 1042 | 1143 | ||
| 1043 | /* | 1144 | /* |
| 1044 | * Wait before the first scan to allow the system to fully initialize. | 1145 | * Wait before the first scan to allow the system to fully initialize. |
| @@ -1101,11 +1202,11 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 1101 | { | 1202 | { |
| 1102 | struct kmemleak_object *object; | 1203 | struct kmemleak_object *object; |
| 1103 | loff_t n = *pos; | 1204 | loff_t n = *pos; |
| 1205 | int err; | ||
| 1104 | 1206 | ||
| 1105 | if (!n) | 1207 | err = mutex_lock_interruptible(&scan_mutex); |
| 1106 | reported_leaks = 0; | 1208 | if (err < 0) |
| 1107 | if (reported_leaks >= REPORTS_NR) | 1209 | return ERR_PTR(err); |
| 1108 | return NULL; | ||
| 1109 | 1210 | ||
| 1110 | rcu_read_lock(); | 1211 | rcu_read_lock(); |
| 1111 | list_for_each_entry_rcu(object, &object_list, object_list) { | 1212 | list_for_each_entry_rcu(object, &object_list, object_list) { |
| @@ -1131,8 +1232,6 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 1131 | struct list_head *n = &prev_obj->object_list; | 1232 | struct list_head *n = &prev_obj->object_list; |
| 1132 | 1233 | ||
| 1133 | ++(*pos); | 1234 | ++(*pos); |
| 1134 | if (reported_leaks >= REPORTS_NR) | ||
| 1135 | goto out; | ||
| 1136 | 1235 | ||
| 1137 | rcu_read_lock(); | 1236 | rcu_read_lock(); |
| 1138 | list_for_each_continue_rcu(n, &object_list) { | 1237 | list_for_each_continue_rcu(n, &object_list) { |
| @@ -1141,7 +1240,7 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 1141 | break; | 1240 | break; |
| 1142 | } | 1241 | } |
| 1143 | rcu_read_unlock(); | 1242 | rcu_read_unlock(); |
| 1144 | out: | 1243 | |
| 1145 | put_object(prev_obj); | 1244 | put_object(prev_obj); |
| 1146 | return next_obj; | 1245 | return next_obj; |
| 1147 | } | 1246 | } |
| @@ -1151,8 +1250,15 @@ out: | |||
| 1151 | */ | 1250 | */ |
| 1152 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) | 1251 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) |
| 1153 | { | 1252 | { |
| 1154 | if (v) | 1253 | if (!IS_ERR(v)) { |
| 1155 | put_object(v); | 1254 | /* |
| 1255 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex | ||
| 1256 | * waiting was interrupted, so only release it if !IS_ERR. | ||
| 1257 | */ | ||
| 1258 | mutex_unlock(&scan_mutex); | ||
| 1259 | if (v) | ||
| 1260 | put_object(v); | ||
| 1261 | } | ||
| 1156 | } | 1262 | } |
| 1157 | 1263 | ||
| 1158 | /* | 1264 | /* |
| @@ -1164,10 +1270,8 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) | |||
| 1164 | unsigned long flags; | 1270 | unsigned long flags; |
| 1165 | 1271 | ||
| 1166 | spin_lock_irqsave(&object->lock, flags); | 1272 | spin_lock_irqsave(&object->lock, flags); |
| 1167 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) { | 1273 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) |
| 1168 | print_unreferenced(seq, object); | 1274 | print_unreferenced(seq, object); |
| 1169 | reported_leaks++; | ||
| 1170 | } | ||
| 1171 | spin_unlock_irqrestore(&object->lock, flags); | 1275 | spin_unlock_irqrestore(&object->lock, flags); |
| 1172 | return 0; | 1276 | return 0; |
| 1173 | } | 1277 | } |
| @@ -1181,36 +1285,15 @@ static const struct seq_operations kmemleak_seq_ops = { | |||
| 1181 | 1285 | ||
| 1182 | static int kmemleak_open(struct inode *inode, struct file *file) | 1286 | static int kmemleak_open(struct inode *inode, struct file *file) |
| 1183 | { | 1287 | { |
| 1184 | int ret = 0; | ||
| 1185 | |||
| 1186 | if (!atomic_read(&kmemleak_enabled)) | 1288 | if (!atomic_read(&kmemleak_enabled)) |
| 1187 | return -EBUSY; | 1289 | return -EBUSY; |
| 1188 | 1290 | ||
| 1189 | ret = mutex_lock_interruptible(&scan_mutex); | 1291 | return seq_open(file, &kmemleak_seq_ops); |
| 1190 | if (ret < 0) | ||
| 1191 | goto out; | ||
| 1192 | if (file->f_mode & FMODE_READ) { | ||
| 1193 | ret = seq_open(file, &kmemleak_seq_ops); | ||
| 1194 | if (ret < 0) | ||
| 1195 | goto scan_unlock; | ||
| 1196 | } | ||
| 1197 | return ret; | ||
| 1198 | |||
| 1199 | scan_unlock: | ||
| 1200 | mutex_unlock(&scan_mutex); | ||
| 1201 | out: | ||
| 1202 | return ret; | ||
| 1203 | } | 1292 | } |
| 1204 | 1293 | ||
| 1205 | static int kmemleak_release(struct inode *inode, struct file *file) | 1294 | static int kmemleak_release(struct inode *inode, struct file *file) |
| 1206 | { | 1295 | { |
| 1207 | int ret = 0; | 1296 | return seq_release(inode, file); |
| 1208 | |||
| 1209 | if (file->f_mode & FMODE_READ) | ||
| 1210 | seq_release(inode, file); | ||
| 1211 | mutex_unlock(&scan_mutex); | ||
| 1212 | |||
| 1213 | return ret; | ||
| 1214 | } | 1297 | } |
| 1215 | 1298 | ||
| 1216 | /* | 1299 | /* |
| @@ -1230,15 +1313,17 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
| 1230 | { | 1313 | { |
| 1231 | char buf[64]; | 1314 | char buf[64]; |
| 1232 | int buf_size; | 1315 | int buf_size; |
| 1233 | 1316 | int ret; | |
| 1234 | if (!atomic_read(&kmemleak_enabled)) | ||
| 1235 | return -EBUSY; | ||
| 1236 | 1317 | ||
| 1237 | buf_size = min(size, (sizeof(buf) - 1)); | 1318 | buf_size = min(size, (sizeof(buf) - 1)); |
| 1238 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) | 1319 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) |
| 1239 | return -EFAULT; | 1320 | return -EFAULT; |
| 1240 | buf[buf_size] = 0; | 1321 | buf[buf_size] = 0; |
| 1241 | 1322 | ||
| 1323 | ret = mutex_lock_interruptible(&scan_mutex); | ||
| 1324 | if (ret < 0) | ||
| 1325 | return ret; | ||
| 1326 | |||
| 1242 | if (strncmp(buf, "off", 3) == 0) | 1327 | if (strncmp(buf, "off", 3) == 0) |
| 1243 | kmemleak_disable(); | 1328 | kmemleak_disable(); |
| 1244 | else if (strncmp(buf, "stack=on", 8) == 0) | 1329 | else if (strncmp(buf, "stack=on", 8) == 0) |
| @@ -1251,11 +1336,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
| 1251 | stop_scan_thread(); | 1336 | stop_scan_thread(); |
| 1252 | else if (strncmp(buf, "scan=", 5) == 0) { | 1337 | else if (strncmp(buf, "scan=", 5) == 0) { |
| 1253 | unsigned long secs; | 1338 | unsigned long secs; |
| 1254 | int err; | ||
| 1255 | 1339 | ||
| 1256 | err = strict_strtoul(buf + 5, 0, &secs); | 1340 | ret = strict_strtoul(buf + 5, 0, &secs); |
| 1257 | if (err < 0) | 1341 | if (ret < 0) |
| 1258 | return err; | 1342 | goto out; |
| 1259 | stop_scan_thread(); | 1343 | stop_scan_thread(); |
| 1260 | if (secs) { | 1344 | if (secs) { |
| 1261 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); | 1345 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); |
| @@ -1264,7 +1348,12 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
| 1264 | } else if (strncmp(buf, "scan", 4) == 0) | 1348 | } else if (strncmp(buf, "scan", 4) == 0) |
| 1265 | kmemleak_scan(); | 1349 | kmemleak_scan(); |
| 1266 | else | 1350 | else |
| 1267 | return -EINVAL; | 1351 | ret = -EINVAL; |
| 1352 | |||
| 1353 | out: | ||
| 1354 | mutex_unlock(&scan_mutex); | ||
| 1355 | if (ret < 0) | ||
| 1356 | return ret; | ||
| 1268 | 1357 | ||
| 1269 | /* ignore the rest of the buffer, only one command at a time */ | 1358 | /* ignore the rest of the buffer, only one command at a time */ |
| 1270 | *ppos += size; | 1359 | *ppos += size; |
| @@ -1293,7 +1382,7 @@ static int kmemleak_cleanup_thread(void *arg) | |||
| 1293 | 1382 | ||
| 1294 | rcu_read_lock(); | 1383 | rcu_read_lock(); |
| 1295 | list_for_each_entry_rcu(object, &object_list, object_list) | 1384 | list_for_each_entry_rcu(object, &object_list, object_list) |
| 1296 | delete_object(object->pointer); | 1385 | delete_object_full(object->pointer); |
| 1297 | rcu_read_unlock(); | 1386 | rcu_read_unlock(); |
| 1298 | mutex_unlock(&scan_mutex); | 1387 | mutex_unlock(&scan_mutex); |
| 1299 | 1388 | ||
| @@ -1388,6 +1477,9 @@ void __init kmemleak_init(void) | |||
| 1388 | case KMEMLEAK_FREE: | 1477 | case KMEMLEAK_FREE: |
| 1389 | kmemleak_free(log->ptr); | 1478 | kmemleak_free(log->ptr); |
| 1390 | break; | 1479 | break; |
| 1480 | case KMEMLEAK_FREE_PART: | ||
| 1481 | kmemleak_free_part(log->ptr, log->size); | ||
| 1482 | break; | ||
| 1391 | case KMEMLEAK_NOT_LEAK: | 1483 | case KMEMLEAK_NOT_LEAK: |
| 1392 | kmemleak_not_leak(log->ptr); | 1484 | kmemleak_not_leak(log->ptr); |
| 1393 | break; | 1485 | break; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a35eeab2724c..caa92689aac9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -4745,8 +4745,10 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
| 4745 | * some pages at the end of hash table which | 4745 | * some pages at the end of hash table which |
| 4746 | * alloc_pages_exact() automatically does | 4746 | * alloc_pages_exact() automatically does |
| 4747 | */ | 4747 | */ |
| 4748 | if (get_order(size) < MAX_ORDER) | 4748 | if (get_order(size) < MAX_ORDER) { |
| 4749 | table = alloc_pages_exact(size, GFP_ATOMIC); | 4749 | table = alloc_pages_exact(size, GFP_ATOMIC); |
| 4750 | kmemleak_alloc(table, size, 1, GFP_ATOMIC); | ||
| 4751 | } | ||
| 4750 | } | 4752 | } |
| 4751 | } while (!table && size > PAGE_SIZE && --log2qty); | 4753 | } while (!table && size > PAGE_SIZE && --log2qty); |
| 4752 | 4754 | ||
| @@ -4764,16 +4766,6 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
| 4764 | if (_hash_mask) | 4766 | if (_hash_mask) |
| 4765 | *_hash_mask = (1 << log2qty) - 1; | 4767 | *_hash_mask = (1 << log2qty) - 1; |
| 4766 | 4768 | ||
| 4767 | /* | ||
| 4768 | * If hashdist is set, the table allocation is done with __vmalloc() | ||
| 4769 | * which invokes the kmemleak_alloc() callback. This function may also | ||
| 4770 | * be called before the slab and kmemleak are initialised when | ||
| 4771 | * kmemleak simply buffers the request to be executed later | ||
| 4772 | * (GFP_ATOMIC flag ignored in this case). | ||
| 4773 | */ | ||
| 4774 | if (!hashdist) | ||
| 4775 | kmemleak_alloc(table, size, 1, GFP_ATOMIC); | ||
| 4776 | |||
| 4777 | return table; | 4769 | return table; |
| 4778 | } | 4770 | } |
| 4779 | 4771 | ||
| @@ -21,7 +21,6 @@ | |||
| 21 | #include <linux/kmemcheck.h> | 21 | #include <linux/kmemcheck.h> |
| 22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
| 23 | #include <linux/cpuset.h> | 23 | #include <linux/cpuset.h> |
| 24 | #include <linux/kmemleak.h> | ||
| 25 | #include <linux/mempolicy.h> | 24 | #include <linux/mempolicy.h> |
| 26 | #include <linux/ctype.h> | 25 | #include <linux/ctype.h> |
| 27 | #include <linux/debugobjects.h> | 26 | #include <linux/debugobjects.h> |
| @@ -2835,13 +2834,15 @@ EXPORT_SYMBOL(__kmalloc); | |||
| 2835 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | 2834 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) |
| 2836 | { | 2835 | { |
| 2837 | struct page *page; | 2836 | struct page *page; |
| 2837 | void *ptr = NULL; | ||
| 2838 | 2838 | ||
| 2839 | flags |= __GFP_COMP | __GFP_NOTRACK; | 2839 | flags |= __GFP_COMP | __GFP_NOTRACK; |
| 2840 | page = alloc_pages_node(node, flags, get_order(size)); | 2840 | page = alloc_pages_node(node, flags, get_order(size)); |
| 2841 | if (page) | 2841 | if (page) |
| 2842 | return page_address(page); | 2842 | ptr = page_address(page); |
| 2843 | else | 2843 | |
| 2844 | return NULL; | 2844 | kmemleak_alloc(ptr, size, 1, flags); |
| 2845 | return ptr; | ||
| 2845 | } | 2846 | } |
| 2846 | 2847 | ||
| 2847 | #ifdef CONFIG_NUMA | 2848 | #ifdef CONFIG_NUMA |
| @@ -2926,6 +2927,7 @@ void kfree(const void *x) | |||
| 2926 | page = virt_to_head_page(x); | 2927 | page = virt_to_head_page(x); |
| 2927 | if (unlikely(!PageSlab(page))) { | 2928 | if (unlikely(!PageSlab(page))) { |
| 2928 | BUG_ON(!PageCompound(page)); | 2929 | BUG_ON(!PageCompound(page)); |
| 2930 | kmemleak_free(x); | ||
| 2929 | put_page(page); | 2931 | put_page(page); |
| 2930 | return; | 2932 | return; |
| 2931 | } | 2933 | } |
