aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kmemleak.c
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-08-27 09:29:17 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-08-27 09:29:17 -0400
commitfd6789675ebfb9185cb4fb68dc51010b4e95d952 (patch)
tree788cf2a0ac4a1db7f2cf6db5c586085548742c85 /mm/kmemleak.c
parenta6186d89c913b176e7339f37a4ec6ccb38b2c5c0 (diff)
kmemleak: Save the stack trace for early allocations
Before slab is initialised, kmemleak save the allocations in an early log buffer. They are later recorded as normal memory allocations. This patch adds the stack trace saving to the early log buffer, otherwise the information shown for such objects only refers to the kmemleak_init() function. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r--mm/kmemleak.c64
1 files changed, 52 insertions, 12 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 576c0a4cec52..8172154502a6 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -229,6 +229,8 @@ struct early_log {
229 int min_count; /* minimum reference count */ 229 int min_count; /* minimum reference count */
230 unsigned long offset; /* scan area offset */ 230 unsigned long offset; /* scan area offset */
231 size_t length; /* scan area length */ 231 size_t length; /* scan area length */
232 unsigned long trace[MAX_TRACE]; /* stack trace */
233 unsigned int trace_len; /* stack trace length */
232}; 234};
233 235
234/* early logging buffer and current position */ 236/* early logging buffer and current position */
@@ -437,21 +439,36 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
437} 439}
438 440
439/* 441/*
442 * Save stack trace to the given array of MAX_TRACE size.
443 */
444static int __save_stack_trace(unsigned long *trace)
445{
446 struct stack_trace stack_trace;
447
448 stack_trace.max_entries = MAX_TRACE;
449 stack_trace.nr_entries = 0;
450 stack_trace.entries = trace;
451 stack_trace.skip = 2;
452 save_stack_trace(&stack_trace);
453
454 return stack_trace.nr_entries;
455}
456
457/*
440 * Create the metadata (struct kmemleak_object) corresponding to an allocated 458 * Create the metadata (struct kmemleak_object) corresponding to an allocated
441 * memory block and add it to the object_list and object_tree_root. 459 * memory block and add it to the object_list and object_tree_root.
442 */ 460 */
443static void create_object(unsigned long ptr, size_t size, int min_count, 461static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
444 gfp_t gfp) 462 int min_count, gfp_t gfp)
445{ 463{
446 unsigned long flags; 464 unsigned long flags;
447 struct kmemleak_object *object; 465 struct kmemleak_object *object;
448 struct prio_tree_node *node; 466 struct prio_tree_node *node;
449 struct stack_trace trace;
450 467
451 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); 468 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
452 if (!object) { 469 if (!object) {
453 kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); 470 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
454 return; 471 return NULL;
455 } 472 }
456 473
457 INIT_LIST_HEAD(&object->object_list); 474 INIT_LIST_HEAD(&object->object_list);
@@ -485,12 +502,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
485 } 502 }
486 503
487 /* kernel backtrace */ 504 /* kernel backtrace */
488 trace.max_entries = MAX_TRACE; 505 object->trace_len = __save_stack_trace(object->trace);
489 trace.nr_entries = 0;
490 trace.entries = object->trace;
491 trace.skip = 1;
492 save_stack_trace(&trace);
493 object->trace_len = trace.nr_entries;
494 506
495 INIT_PRIO_TREE_NODE(&object->tree_node); 507 INIT_PRIO_TREE_NODE(&object->tree_node);
496 object->tree_node.start = ptr; 508 object->tree_node.start = ptr;
@@ -521,6 +533,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
521 list_add_tail_rcu(&object->object_list, &object_list); 533 list_add_tail_rcu(&object->object_list, &object_list);
522out: 534out:
523 write_unlock_irqrestore(&kmemleak_lock, flags); 535 write_unlock_irqrestore(&kmemleak_lock, flags);
536 return object;
524} 537}
525 538
526/* 539/*
@@ -743,11 +756,39 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
743 log->min_count = min_count; 756 log->min_count = min_count;
744 log->offset = offset; 757 log->offset = offset;
745 log->length = length; 758 log->length = length;
759 if (op_type == KMEMLEAK_ALLOC)
760 log->trace_len = __save_stack_trace(log->trace);
746 crt_early_log++; 761 crt_early_log++;
747 local_irq_restore(flags); 762 local_irq_restore(flags);
748} 763}
749 764
750/* 765/*
766 * Log an early allocated block and populate the stack trace.
767 */
768static void early_alloc(struct early_log *log)
769{
770 struct kmemleak_object *object;
771 unsigned long flags;
772 int i;
773
774 if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
775 return;
776
777 /*
778 * RCU locking needed to ensure object is not freed via put_object().
779 */
780 rcu_read_lock();
781 object = create_object((unsigned long)log->ptr, log->size,
782 log->min_count, GFP_KERNEL);
783 spin_lock_irqsave(&object->lock, flags);
784 for (i = 0; i < log->trace_len; i++)
785 object->trace[i] = log->trace[i];
786 object->trace_len = log->trace_len;
787 spin_unlock_irqrestore(&object->lock, flags);
788 rcu_read_unlock();
789}
790
791/*
751 * Memory allocation function callback. This function is called from the 792 * Memory allocation function callback. This function is called from the
752 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, 793 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
753 * vmalloc etc.). 794 * vmalloc etc.).
@@ -1509,8 +1550,7 @@ void __init kmemleak_init(void)
1509 1550
1510 switch (log->op_type) { 1551 switch (log->op_type) {
1511 case KMEMLEAK_ALLOC: 1552 case KMEMLEAK_ALLOC:
1512 kmemleak_alloc(log->ptr, log->size, log->min_count, 1553 early_alloc(log);
1513 GFP_KERNEL);
1514 break; 1554 break;
1515 case KMEMLEAK_FREE: 1555 case KMEMLEAK_FREE:
1516 kmemleak_free(log->ptr); 1556 kmemleak_free(log->ptr);