diff options
| -rw-r--r-- | include/linux/kmemleak.h | 4 | ||||
| -rw-r--r-- | mm/kmemleak.c | 95 |
2 files changed, 85 insertions, 14 deletions
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 7796aed6cdd5..6a63807f714e 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -27,6 +27,7 @@ extern void kmemleak_init(void); | |||
| 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
| 28 | gfp_t gfp); | 28 | gfp_t gfp); |
| 29 | extern void kmemleak_free(const void *ptr); | 29 | extern void kmemleak_free(const void *ptr); |
| 30 | extern void kmemleak_free_part(const void *ptr, size_t size); | ||
| 30 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | 31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, |
| 31 | size_t size); | 32 | size_t size); |
| 32 | extern void kmemleak_not_leak(const void *ptr); | 33 | extern void kmemleak_not_leak(const void *ptr); |
| @@ -71,6 +72,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | |||
| 71 | static inline void kmemleak_free(const void *ptr) | 72 | static inline void kmemleak_free(const void *ptr) |
| 72 | { | 73 | { |
| 73 | } | 74 | } |
| 75 | static inline void kmemleak_free_part(const void *ptr, size_t size) | ||
| 76 | { | ||
| 77 | } | ||
| 74 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | 78 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) |
| 75 | { | 79 | { |
| 76 | } | 80 | } |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 466d39007264..5aabd41ffb8f 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -210,6 +210,7 @@ static DEFINE_MUTEX(scan_mutex); | |||
| 210 | enum { | 210 | enum { |
| 211 | KMEMLEAK_ALLOC, | 211 | KMEMLEAK_ALLOC, |
| 212 | KMEMLEAK_FREE, | 212 | KMEMLEAK_FREE, |
| 213 | KMEMLEAK_FREE_PART, | ||
| 213 | KMEMLEAK_NOT_LEAK, | 214 | KMEMLEAK_NOT_LEAK, |
| 214 | KMEMLEAK_IGNORE, | 215 | KMEMLEAK_IGNORE, |
| 215 | KMEMLEAK_SCAN_AREA, | 216 | KMEMLEAK_SCAN_AREA, |
| @@ -523,27 +524,17 @@ out: | |||
| 523 | * Remove the metadata (struct kmemleak_object) for a memory block from the | 524 | * Remove the metadata (struct kmemleak_object) for a memory block from the |
| 524 | * object_list and object_tree_root and decrement its use_count. | 525 | * object_list and object_tree_root and decrement its use_count. |
| 525 | */ | 526 | */ |
| 526 | static void delete_object(unsigned long ptr) | 527 | static void __delete_object(struct kmemleak_object *object) |
| 527 | { | 528 | { |
| 528 | unsigned long flags; | 529 | unsigned long flags; |
| 529 | struct kmemleak_object *object; | ||
| 530 | 530 | ||
| 531 | write_lock_irqsave(&kmemleak_lock, flags); | 531 | write_lock_irqsave(&kmemleak_lock, flags); |
| 532 | object = lookup_object(ptr, 0); | ||
| 533 | if (!object) { | ||
| 534 | #ifdef DEBUG | ||
| 535 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | ||
| 536 | ptr); | ||
| 537 | #endif | ||
| 538 | write_unlock_irqrestore(&kmemleak_lock, flags); | ||
| 539 | return; | ||
| 540 | } | ||
| 541 | prio_tree_remove(&object_tree_root, &object->tree_node); | 532 | prio_tree_remove(&object_tree_root, &object->tree_node); |
| 542 | list_del_rcu(&object->object_list); | 533 | list_del_rcu(&object->object_list); |
| 543 | write_unlock_irqrestore(&kmemleak_lock, flags); | 534 | write_unlock_irqrestore(&kmemleak_lock, flags); |
| 544 | 535 | ||
| 545 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); | 536 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); |
| 546 | WARN_ON(atomic_read(&object->use_count) < 1); | 537 | WARN_ON(atomic_read(&object->use_count) < 2); |
| 547 | 538 | ||
| 548 | /* | 539 | /* |
| 549 | * Locking here also ensures that the corresponding memory block | 540 | * Locking here also ensures that the corresponding memory block |
| @@ -556,6 +547,64 @@ static void delete_object(unsigned long ptr) | |||
| 556 | } | 547 | } |
| 557 | 548 | ||
| 558 | /* | 549 | /* |
| 550 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | ||
| 551 | * delete it. | ||
| 552 | */ | ||
| 553 | static void delete_object_full(unsigned long ptr) | ||
| 554 | { | ||
| 555 | struct kmemleak_object *object; | ||
| 556 | |||
| 557 | object = find_and_get_object(ptr, 0); | ||
| 558 | if (!object) { | ||
| 559 | #ifdef DEBUG | ||
| 560 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | ||
| 561 | ptr); | ||
| 562 | #endif | ||
| 563 | return; | ||
| 564 | } | ||
| 565 | __delete_object(object); | ||
| 566 | put_object(object); | ||
| 567 | } | ||
| 568 | |||
| 569 | /* | ||
| 570 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | ||
| 571 | * delete it. If the memory block is partially freed, the function may create | ||
| 572 | * additional metadata for the remaining parts of the block. | ||
| 573 | */ | ||
| 574 | static void delete_object_part(unsigned long ptr, size_t size) | ||
| 575 | { | ||
| 576 | struct kmemleak_object *object; | ||
| 577 | unsigned long start, end; | ||
| 578 | |||
| 579 | object = find_and_get_object(ptr, 1); | ||
| 580 | if (!object) { | ||
| 581 | #ifdef DEBUG | ||
| 582 | kmemleak_warn("Partially freeing unknown object at 0x%08lx " | ||
| 583 | "(size %zu)\n", ptr, size); | ||
| 584 | #endif | ||
| 585 | return; | ||
| 586 | } | ||
| 587 | __delete_object(object); | ||
| 588 | |||
| 589 | /* | ||
| 590 | * Create one or two objects that may result from the memory block | ||
| 591 | * split. Note that partial freeing is only done by free_bootmem() and | ||
| 592 | * this happens before kmemleak_init() is called. The path below is | ||
| 593 | * only executed during early log recording in kmemleak_init(), so | ||
| 594 | * GFP_KERNEL is enough. | ||
| 595 | */ | ||
| 596 | start = object->pointer; | ||
| 597 | end = object->pointer + object->size; | ||
| 598 | if (ptr > start) | ||
| 599 | create_object(start, ptr - start, object->min_count, | ||
| 600 | GFP_KERNEL); | ||
| 601 | if (ptr + size < end) | ||
| 602 | create_object(ptr + size, end - ptr - size, object->min_count, | ||
| 603 | GFP_KERNEL); | ||
| 604 | |||
| 605 | put_object(object); | ||
| 606 | } | ||
| 607 | /* | ||
| 559 | * Make a object permanently as gray-colored so that it can no longer be | 608 | * Make a object permanently as gray-colored so that it can no longer be |
| 560 | * reported as a leak. This is used in general to mark a false positive. | 609 | * reported as a leak. This is used in general to mark a false positive. |
| 561 | */ | 610 | */ |
| @@ -719,13 +768,28 @@ void kmemleak_free(const void *ptr) | |||
| 719 | pr_debug("%s(0x%p)\n", __func__, ptr); | 768 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 720 | 769 | ||
| 721 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 770 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
| 722 | delete_object((unsigned long)ptr); | 771 | delete_object_full((unsigned long)ptr); |
| 723 | else if (atomic_read(&kmemleak_early_log)) | 772 | else if (atomic_read(&kmemleak_early_log)) |
| 724 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); | 773 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); |
| 725 | } | 774 | } |
| 726 | EXPORT_SYMBOL_GPL(kmemleak_free); | 775 | EXPORT_SYMBOL_GPL(kmemleak_free); |
| 727 | 776 | ||
| 728 | /* | 777 | /* |
| 778 | * Partial memory freeing function callback. This function is usually called | ||
| 779 | * from bootmem allocator when (part of) a memory block is freed. | ||
| 780 | */ | ||
| 781 | void kmemleak_free_part(const void *ptr, size_t size) | ||
| 782 | { | ||
| 783 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
| 784 | |||
| 785 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
| 786 | delete_object_part((unsigned long)ptr, size); | ||
| 787 | else if (atomic_read(&kmemleak_early_log)) | ||
| 788 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); | ||
| 789 | } | ||
| 790 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | ||
| 791 | |||
| 792 | /* | ||
| 729 | * Mark an already allocated memory block as a false positive. This will cause | 793 | * Mark an already allocated memory block as a false positive. This will cause |
| 730 | * the block to no longer be reported as leak and always be scanned. | 794 | * the block to no longer be reported as leak and always be scanned. |
| 731 | */ | 795 | */ |
| @@ -1318,7 +1382,7 @@ static int kmemleak_cleanup_thread(void *arg) | |||
| 1318 | 1382 | ||
| 1319 | rcu_read_lock(); | 1383 | rcu_read_lock(); |
| 1320 | list_for_each_entry_rcu(object, &object_list, object_list) | 1384 | list_for_each_entry_rcu(object, &object_list, object_list) |
| 1321 | delete_object(object->pointer); | 1385 | delete_object_full(object->pointer); |
| 1322 | rcu_read_unlock(); | 1386 | rcu_read_unlock(); |
| 1323 | mutex_unlock(&scan_mutex); | 1387 | mutex_unlock(&scan_mutex); |
| 1324 | 1388 | ||
| @@ -1413,6 +1477,9 @@ void __init kmemleak_init(void) | |||
| 1413 | case KMEMLEAK_FREE: | 1477 | case KMEMLEAK_FREE: |
| 1414 | kmemleak_free(log->ptr); | 1478 | kmemleak_free(log->ptr); |
| 1415 | break; | 1479 | break; |
| 1480 | case KMEMLEAK_FREE_PART: | ||
| 1481 | kmemleak_free_part(log->ptr, log->size); | ||
| 1482 | break; | ||
| 1416 | case KMEMLEAK_NOT_LEAK: | 1483 | case KMEMLEAK_NOT_LEAK: |
| 1417 | kmemleak_not_leak(log->ptr); | 1484 | kmemleak_not_leak(log->ptr); |
| 1418 | break; | 1485 | break; |
