aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slub.c184
1 files changed, 181 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4251917c5da1..a6231963cae5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -97,9 +97,6 @@
97 * 97 *
98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
99 * 99 *
100 * - Support DEBUG_SLAB_LEAK. Trouble is we do not know where the full
101 * slabs are in SLUB.
102 *
103 * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of 100 * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of
104 * it. 101 * it.
105 * 102 *
@@ -2659,6 +2656,169 @@ static unsigned long validate_slab_cache(struct kmem_cache *s)
2659 return count; 2656 return count;
2660} 2657}
2661 2658
2659/*
2660 * Generate lists of locations where slabcache objects are allocated
2661 * and freed.
2662 */
2663
2664struct location {
2665 unsigned long count;
2666 void *addr;
2667};
2668
2669struct loc_track {
2670 unsigned long max;
2671 unsigned long count;
2672 struct location *loc;
2673};
2674
2675static void free_loc_track(struct loc_track *t)
2676{
2677 if (t->max)
2678 free_pages((unsigned long)t->loc,
2679 get_order(sizeof(struct location) * t->max));
2680}
2681
2682static int alloc_loc_track(struct loc_track *t, unsigned long max)
2683{
2684 struct location *l;
2685 int order;
2686
2687 if (!max)
2688 max = PAGE_SIZE / sizeof(struct location);
2689
2690 order = get_order(sizeof(struct location) * max);
2691
2692 l = (void *)__get_free_pages(GFP_KERNEL, order);
2693
2694 if (!l)
2695 return 0;
2696
2697 if (t->count) {
2698 memcpy(l, t->loc, sizeof(struct location) * t->count);
2699 free_loc_track(t);
2700 }
2701 t->max = max;
2702 t->loc = l;
2703 return 1;
2704}
2705
2706static int add_location(struct loc_track *t, struct kmem_cache *s,
2707 void *addr)
2708{
2709 long start, end, pos;
2710 struct location *l;
2711 void *caddr;
2712
2713 start = -1;
2714 end = t->count;
2715
2716 for ( ; ; ) {
2717 pos = start + (end - start + 1) / 2;
2718
2719 /*
2720 * There is nothing at "end". If we end up there
2721 * we need to add something to before end.
2722 */
2723 if (pos == end)
2724 break;
2725
2726 caddr = t->loc[pos].addr;
2727 if (addr == caddr) {
2728 t->loc[pos].count++;
2729 return 1;
2730 }
2731
2732 if (addr < caddr)
2733 end = pos;
2734 else
2735 start = pos;
2736 }
2737
2738 /*
2739 * Not found. Insert new tracking element
2740 */
2741 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
2742 return 0;
2743
2744 l = t->loc + pos;
2745 if (pos < t->count)
2746 memmove(l + 1, l,
2747 (t->count - pos) * sizeof(struct location));
2748 t->count++;
2749 l->count = 1;
2750 l->addr = addr;
2751 return 1;
2752}
2753
2754static void process_slab(struct loc_track *t, struct kmem_cache *s,
2755 struct page *page, enum track_item alloc)
2756{
2757 void *addr = page_address(page);
2758 unsigned long map[BITS_TO_LONGS(s->objects)];
2759 void *p;
2760
2761 bitmap_zero(map, s->objects);
2762 for (p = page->freelist; p; p = get_freepointer(s, p))
2763 set_bit((p - addr) / s->size, map);
2764
2765 for (p = addr; p < addr + s->objects * s->size; p += s->size)
2766 if (!test_bit((p - addr) / s->size, map)) {
2767 void *addr = get_track(s, p, alloc)->addr;
2768
2769 add_location(t, s, addr);
2770 }
2771}
2772
2773static int list_locations(struct kmem_cache *s, char *buf,
2774 enum track_item alloc)
2775{
2776 int n = 0;
2777 unsigned long i;
2778 struct loc_track t;
2779 int node;
2780
2781 t.count = 0;
2782 t.max = 0;
2783
2784 /* Push back cpu slabs */
2785 flush_all(s);
2786
2787 for_each_online_node(node) {
2788 struct kmem_cache_node *n = get_node(s, node);
2789 unsigned long flags;
2790 struct page *page;
2791
2792 if (!atomic_read(&n->nr_slabs))
2793 continue;
2794
2795 spin_lock_irqsave(&n->list_lock, flags);
2796 list_for_each_entry(page, &n->partial, lru)
2797 process_slab(&t, s, page, alloc);
2798 list_for_each_entry(page, &n->full, lru)
2799 process_slab(&t, s, page, alloc);
2800 spin_unlock_irqrestore(&n->list_lock, flags);
2801 }
2802
2803 for (i = 0; i < t.count; i++) {
2804 void *addr = t.loc[i].addr;
2805
2806 if (n > PAGE_SIZE - 100)
2807 break;
2808 n += sprintf(buf + n, "%7ld ", t.loc[i].count);
2809 if (addr)
2810 n += sprint_symbol(buf + n, (unsigned long)t.loc[i].addr);
2811 else
2812 n += sprintf(buf + n, "<not-available>");
2813 n += sprintf(buf + n, "\n");
2814 }
2815
2816 free_loc_track(&t);
2817 if (!t.count)
2818 n += sprintf(buf, "No data\n");
2819 return n;
2820}
2821
2662static unsigned long count_partial(struct kmem_cache_node *n) 2822static unsigned long count_partial(struct kmem_cache_node *n)
2663{ 2823{
2664 unsigned long flags; 2824 unsigned long flags;
@@ -3009,6 +3169,22 @@ static ssize_t validate_store(struct kmem_cache *s,
3009} 3169}
3010SLAB_ATTR(validate); 3170SLAB_ATTR(validate);
3011 3171
3172static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3173{
3174 if (!(s->flags & SLAB_STORE_USER))
3175 return -ENOSYS;
3176 return list_locations(s, buf, TRACK_ALLOC);
3177}
3178SLAB_ATTR_RO(alloc_calls);
3179
3180static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3181{
3182 if (!(s->flags & SLAB_STORE_USER))
3183 return -ENOSYS;
3184 return list_locations(s, buf, TRACK_FREE);
3185}
3186SLAB_ATTR_RO(free_calls);
3187
3012#ifdef CONFIG_NUMA 3188#ifdef CONFIG_NUMA
3013static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) 3189static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
3014{ 3190{
@@ -3049,6 +3225,8 @@ static struct attribute * slab_attrs[] = {
3049 &poison_attr.attr, 3225 &poison_attr.attr,
3050 &store_user_attr.attr, 3226 &store_user_attr.attr,
3051 &validate_attr.attr, 3227 &validate_attr.attr,
3228 &alloc_calls_attr.attr,
3229 &free_calls_attr.attr,
3052#ifdef CONFIG_ZONE_DMA 3230#ifdef CONFIG_ZONE_DMA
3053 &cache_dma_attr.attr, 3231 &cache_dma_attr.attr,
3054#endif 3232#endif