aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:45 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:54 -0400
commit88a420e4e21c1ff6592a668cf4e8af42eff30bad (patch)
tree4e946ac5b187d13382d5e790404f7864b324b28e /mm/slub.c
parente95eed571e85d7ad4cde73576296c615f305f59f (diff)
slub: add ability to list alloc / free callers per slab
This patch enables listing the callers who allocated or freed objects in a cache. For example to list the allocators for kmalloc-128 do cat /sys/slab/kmalloc-128/alloc_calls 7 sn_io_slot_fixup+0x40/0x700 7 sn_io_slot_fixup+0x80/0x700 9 sn_bus_fixup+0xe0/0x380 6 param_sysfs_setup+0xf0/0x280 276 percpu_populate+0xf0/0x1a0 19 __register_chrdev_region+0x30/0x360 8 expand_files+0x2e0/0x6e0 1 sys_epoll_create+0x60/0x200 1 __mounts_open+0x140/0x2c0 65 kmem_alloc+0x110/0x280 3 alloc_disk_node+0xe0/0x200 33 as_get_io_context+0x90/0x280 74 kobject_kset_add_dir+0x40/0x140 12 pci_create_bus+0x2a0/0x5c0 1 acpi_ev_create_gpe_block+0x120/0x9e0 41 con_insert_unipair+0x100/0x1c0 1 uart_open+0x1c0/0xba0 1 dma_pool_create+0xe0/0x340 2 neigh_table_init_no_netlink+0x260/0x4c0 6 neigh_parms_alloc+0x30/0x200 1 netlink_kernel_create+0x130/0x320 5 fz_hash_alloc+0x50/0xe0 2 sn_common_hubdev_init+0xd0/0x6e0 28 kernel_param_sysfs_setup+0x30/0x180 72 process_zones+0x70/0x2e0 cat /sys/slab/kmalloc-128/free_calls 558 <not-available> 3 sn_io_slot_fixup+0x600/0x700 84 free_fdtable_rcu+0x120/0x260 2 seq_release+0x40/0x60 6 kmem_free+0x70/0xc0 24 free_as_io_context+0x20/0x200 1 acpi_get_object_info+0x3a0/0x3e0 1 acpi_add_single_object+0xcf0/0x1e40 2 con_release_unimap+0x80/0x140 1 free+0x20/0x40 SLAB_STORE_USER must be enabled for a slab cache by either booting with "slab_debug" or enabling user tracking specifically for the slab of interest. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c184
1 files changed, 181 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4251917c5da1..a6231963cae5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -97,9 +97,6 @@
97 * 97 *
98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
99 * 99 *
100 * - Support DEBUG_SLAB_LEAK. Trouble is we do not know where the full
101 * slabs are in SLUB.
102 *
103 * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of 100 * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of
104 * it. 101 * it.
105 * 102 *
@@ -2659,6 +2656,169 @@ static unsigned long validate_slab_cache(struct kmem_cache *s)
2659 return count; 2656 return count;
2660} 2657}
2661 2658
2659/*
2660 * Generate lists of locations where slabcache objects are allocated
2661 * and freed.
2662 */
2663
2664struct location {
2665 unsigned long count;
2666 void *addr;
2667};
2668
2669struct loc_track {
2670 unsigned long max;
2671 unsigned long count;
2672 struct location *loc;
2673};
2674
2675static void free_loc_track(struct loc_track *t)
2676{
2677 if (t->max)
2678 free_pages((unsigned long)t->loc,
2679 get_order(sizeof(struct location) * t->max));
2680}
2681
2682static int alloc_loc_track(struct loc_track *t, unsigned long max)
2683{
2684 struct location *l;
2685 int order;
2686
2687 if (!max)
2688 max = PAGE_SIZE / sizeof(struct location);
2689
2690 order = get_order(sizeof(struct location) * max);
2691
2692 l = (void *)__get_free_pages(GFP_KERNEL, order);
2693
2694 if (!l)
2695 return 0;
2696
2697 if (t->count) {
2698 memcpy(l, t->loc, sizeof(struct location) * t->count);
2699 free_loc_track(t);
2700 }
2701 t->max = max;
2702 t->loc = l;
2703 return 1;
2704}
2705
2706static int add_location(struct loc_track *t, struct kmem_cache *s,
2707 void *addr)
2708{
2709 long start, end, pos;
2710 struct location *l;
2711 void *caddr;
2712
2713 start = -1;
2714 end = t->count;
2715
2716 for ( ; ; ) {
2717 pos = start + (end - start + 1) / 2;
2718
2719 /*
2720 * There is nothing at "end". If we end up there
2721 * we need to add something to before end.
2722 */
2723 if (pos == end)
2724 break;
2725
2726 caddr = t->loc[pos].addr;
2727 if (addr == caddr) {
2728 t->loc[pos].count++;
2729 return 1;
2730 }
2731
2732 if (addr < caddr)
2733 end = pos;
2734 else
2735 start = pos;
2736 }
2737
2738 /*
2739 * Not found. Insert new tracking element
2740 */
2741 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
2742 return 0;
2743
2744 l = t->loc + pos;
2745 if (pos < t->count)
2746 memmove(l + 1, l,
2747 (t->count - pos) * sizeof(struct location));
2748 t->count++;
2749 l->count = 1;
2750 l->addr = addr;
2751 return 1;
2752}
2753
2754static void process_slab(struct loc_track *t, struct kmem_cache *s,
2755 struct page *page, enum track_item alloc)
2756{
2757 void *addr = page_address(page);
2758 unsigned long map[BITS_TO_LONGS(s->objects)];
2759 void *p;
2760
2761 bitmap_zero(map, s->objects);
2762 for (p = page->freelist; p; p = get_freepointer(s, p))
2763 set_bit((p - addr) / s->size, map);
2764
2765 for (p = addr; p < addr + s->objects * s->size; p += s->size)
2766 if (!test_bit((p - addr) / s->size, map)) {
2767 void *addr = get_track(s, p, alloc)->addr;
2768
2769 add_location(t, s, addr);
2770 }
2771}
2772
2773static int list_locations(struct kmem_cache *s, char *buf,
2774 enum track_item alloc)
2775{
2776 int n = 0;
2777 unsigned long i;
2778 struct loc_track t;
2779 int node;
2780
2781 t.count = 0;
2782 t.max = 0;
2783
2784 /* Push back cpu slabs */
2785 flush_all(s);
2786
2787 for_each_online_node(node) {
2788 struct kmem_cache_node *n = get_node(s, node);
2789 unsigned long flags;
2790 struct page *page;
2791
2792 if (!atomic_read(&n->nr_slabs))
2793 continue;
2794
2795 spin_lock_irqsave(&n->list_lock, flags);
2796 list_for_each_entry(page, &n->partial, lru)
2797 process_slab(&t, s, page, alloc);
2798 list_for_each_entry(page, &n->full, lru)
2799 process_slab(&t, s, page, alloc);
2800 spin_unlock_irqrestore(&n->list_lock, flags);
2801 }
2802
2803 for (i = 0; i < t.count; i++) {
2804 void *addr = t.loc[i].addr;
2805
2806 if (n > PAGE_SIZE - 100)
2807 break;
2808 n += sprintf(buf + n, "%7ld ", t.loc[i].count);
2809 if (addr)
2810 n += sprint_symbol(buf + n, (unsigned long)t.loc[i].addr);
2811 else
2812 n += sprintf(buf + n, "<not-available>");
2813 n += sprintf(buf + n, "\n");
2814 }
2815
2816 free_loc_track(&t);
2817 if (!t.count)
2818 n += sprintf(buf, "No data\n");
2819 return n;
2820}
2821
2662static unsigned long count_partial(struct kmem_cache_node *n) 2822static unsigned long count_partial(struct kmem_cache_node *n)
2663{ 2823{
2664 unsigned long flags; 2824 unsigned long flags;
@@ -3009,6 +3169,22 @@ static ssize_t validate_store(struct kmem_cache *s,
3009} 3169}
3010SLAB_ATTR(validate); 3170SLAB_ATTR(validate);
3011 3171
3172static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3173{
3174 if (!(s->flags & SLAB_STORE_USER))
3175 return -ENOSYS;
3176 return list_locations(s, buf, TRACK_ALLOC);
3177}
3178SLAB_ATTR_RO(alloc_calls);
3179
3180static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3181{
3182 if (!(s->flags & SLAB_STORE_USER))
3183 return -ENOSYS;
3184 return list_locations(s, buf, TRACK_FREE);
3185}
3186SLAB_ATTR_RO(free_calls);
3187
3012#ifdef CONFIG_NUMA 3188#ifdef CONFIG_NUMA
3013static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) 3189static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
3014{ 3190{
@@ -3049,6 +3225,8 @@ static struct attribute * slab_attrs[] = {
3049 &poison_attr.attr, 3225 &poison_attr.attr,
3050 &store_user_attr.attr, 3226 &store_user_attr.attr,
3051 &validate_attr.attr, 3227 &validate_attr.attr,
3228 &alloc_calls_attr.attr,
3229 &free_calls_attr.attr,
3052#ifdef CONFIG_ZONE_DMA 3230#ifdef CONFIG_ZONE_DMA
3053 &cache_dma_attr.attr, 3231 &cache_dma_attr.attr,
3054#endif 3232#endif