diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-17 01:11:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 08:23:04 -0400 |
commit | 3ec0974210fe1b7c0618ad6e39a882a4237d7de2 (patch) | |
tree | a707c84e2dc18cfba9a44e198a44fbb2605f771c /mm/slub.c | |
parent | a35afb830f8d71ec211531aeb9a621b09a2efb39 (diff) |
SLUB: Simplify debug code
Consolidate functionality into the #ifdef section.
Extract tracing into one subroutine.
Move object debug processing into the #ifdef section so that the
code in __slab_alloc and __slab_free becomes minimal.
Reduce number of functions we need to provide stubs for in the !SLUB_DEBUG case.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 112 |
1 files changed, 57 insertions, 55 deletions
@@ -742,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
742 | return search == NULL; | 742 | return search == NULL; |
743 | } | 743 | } |
744 | 744 | ||
745 | static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) | ||
746 | { | ||
747 | if (s->flags & SLAB_TRACE) { | ||
748 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | ||
749 | s->name, | ||
750 | alloc ? "alloc" : "free", | ||
751 | object, page->inuse, | ||
752 | page->freelist); | ||
753 | |||
754 | if (!alloc) | ||
755 | print_section("Object", (void *)object, s->objsize); | ||
756 | |||
757 | dump_stack(); | ||
758 | } | ||
759 | } | ||
760 | |||
745 | /* | 761 | /* |
746 | * Tracking of fully allocated slabs for debugging purposes. | 762 | * Tracking of fully allocated slabs for debugging purposes. |
747 | */ | 763 | */ |
@@ -766,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page) | |||
766 | spin_unlock(&n->list_lock); | 782 | spin_unlock(&n->list_lock); |
767 | } | 783 | } |
768 | 784 | ||
769 | static int alloc_object_checks(struct kmem_cache *s, struct page *page, | 785 | static void setup_object_debug(struct kmem_cache *s, struct page *page, |
770 | void *object) | 786 | void *object) |
787 | { | ||
788 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) | ||
789 | return; | ||
790 | |||
791 | init_object(s, object, 0); | ||
792 | init_tracking(s, object); | ||
793 | } | ||
794 | |||
795 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | ||
796 | void *object, void *addr) | ||
771 | { | 797 | { |
772 | if (!check_slab(s, page)) | 798 | if (!check_slab(s, page)) |
773 | goto bad; | 799 | goto bad; |
@@ -782,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page, | |||
782 | goto bad; | 808 | goto bad; |
783 | } | 809 | } |
784 | 810 | ||
785 | if (!object) | 811 | if (object && !check_object(s, page, object, 0)) |
786 | return 1; | ||
787 | |||
788 | if (!check_object(s, page, object, 0)) | ||
789 | goto bad; | 812 | goto bad; |
790 | 813 | ||
814 | /* Success perform special debug activities for allocs */ | ||
815 | if (s->flags & SLAB_STORE_USER) | ||
816 | set_track(s, object, TRACK_ALLOC, addr); | ||
817 | trace(s, page, object, 1); | ||
818 | init_object(s, object, 1); | ||
791 | return 1; | 819 | return 1; |
820 | |||
792 | bad: | 821 | bad: |
793 | if (PageSlab(page)) { | 822 | if (PageSlab(page)) { |
794 | /* | 823 | /* |
@@ -806,8 +835,8 @@ bad: | |||
806 | return 0; | 835 | return 0; |
807 | } | 836 | } |
808 | 837 | ||
809 | static int free_object_checks(struct kmem_cache *s, struct page *page, | 838 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
810 | void *object) | 839 | void *object, void *addr) |
811 | { | 840 | { |
812 | if (!check_slab(s, page)) | 841 | if (!check_slab(s, page)) |
813 | goto fail; | 842 | goto fail; |
@@ -841,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page, | |||
841 | "to slab %s", object, page->slab->name); | 870 | "to slab %s", object, page->slab->name); |
842 | goto fail; | 871 | goto fail; |
843 | } | 872 | } |
873 | |||
874 | /* Special debug activities for freeing objects */ | ||
875 | if (!SlabFrozen(page) && !page->freelist) | ||
876 | remove_full(s, page); | ||
877 | if (s->flags & SLAB_STORE_USER) | ||
878 | set_track(s, object, TRACK_FREE, addr); | ||
879 | trace(s, page, object, 0); | ||
880 | init_object(s, object, 0); | ||
844 | return 1; | 881 | return 1; |
882 | |||
845 | fail: | 883 | fail: |
846 | printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", | 884 | printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", |
847 | s->name, page, object); | 885 | s->name, page, object); |
848 | return 0; | 886 | return 0; |
849 | } | 887 | } |
850 | 888 | ||
851 | static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) | ||
852 | { | ||
853 | if (s->flags & SLAB_TRACE) { | ||
854 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | ||
855 | s->name, | ||
856 | alloc ? "alloc" : "free", | ||
857 | object, page->inuse, | ||
858 | page->freelist); | ||
859 | |||
860 | if (!alloc) | ||
861 | print_section("Object", (void *)object, s->objsize); | ||
862 | |||
863 | dump_stack(); | ||
864 | } | ||
865 | } | ||
866 | |||
867 | static int __init setup_slub_debug(char *str) | 889 | static int __init setup_slub_debug(char *str) |
868 | { | 890 | { |
869 | if (!str || *str != '=') | 891 | if (!str || *str != '=') |
@@ -932,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) | |||
932 | s->flags |= slub_debug; | 954 | s->flags |= slub_debug; |
933 | } | 955 | } |
934 | #else | 956 | #else |
957 | static inline void setup_object_debug(struct kmem_cache *s, | ||
958 | struct page *page, void *object) {} | ||
935 | 959 | ||
936 | static inline int alloc_object_checks(struct kmem_cache *s, | 960 | static inline int alloc_debug_processing(struct kmem_cache *s, |
937 | struct page *page, void *object) { return 0; } | 961 | struct page *page, void *object, void *addr) { return 0; } |
938 | 962 | ||
939 | static inline int free_object_checks(struct kmem_cache *s, | 963 | static inline int free_debug_processing(struct kmem_cache *s, |
940 | struct page *page, void *object) { return 0; } | 964 | struct page *page, void *object, void *addr) { return 0; } |
941 | 965 | ||
942 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | ||
943 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | ||
944 | static inline void trace(struct kmem_cache *s, struct page *page, | ||
945 | void *object, int alloc) {} | ||
946 | static inline void init_object(struct kmem_cache *s, | ||
947 | void *object, int active) {} | ||
948 | static inline void init_tracking(struct kmem_cache *s, void *object) {} | ||
949 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 966 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
950 | { return 1; } | 967 | { return 1; } |
951 | static inline int check_object(struct kmem_cache *s, struct page *page, | 968 | static inline int check_object(struct kmem_cache *s, struct page *page, |
952 | void *object, int active) { return 1; } | 969 | void *object, int active) { return 1; } |
953 | static inline void set_track(struct kmem_cache *s, void *object, | 970 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
954 | enum track_item alloc, void *addr) {} | ||
955 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} | 971 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} |
956 | #define slub_debug 0 | 972 | #define slub_debug 0 |
957 | #endif | 973 | #endif |
@@ -988,11 +1004,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
988 | static void setup_object(struct kmem_cache *s, struct page *page, | 1004 | static void setup_object(struct kmem_cache *s, struct page *page, |
989 | void *object) | 1005 | void *object) |
990 | { | 1006 | { |
991 | if (SlabDebug(page)) { | 1007 | setup_object_debug(s, page, object); |
992 | init_object(s, object, 0); | ||
993 | init_tracking(s, object); | ||
994 | } | ||
995 | |||
996 | if (unlikely(s->ctor)) | 1008 | if (unlikely(s->ctor)) |
997 | s->ctor(object, s, 0); | 1009 | s->ctor(object, s, 0); |
998 | } | 1010 | } |
@@ -1449,12 +1461,8 @@ new_slab: | |||
1449 | return NULL; | 1461 | return NULL; |
1450 | debug: | 1462 | debug: |
1451 | object = page->freelist; | 1463 | object = page->freelist; |
1452 | if (!alloc_object_checks(s, page, object)) | 1464 | if (!alloc_debug_processing(s, page, object, addr)) |
1453 | goto another_slab; | 1465 | goto another_slab; |
1454 | if (s->flags & SLAB_STORE_USER) | ||
1455 | set_track(s, object, TRACK_ALLOC, addr); | ||
1456 | trace(s, page, object, 1); | ||
1457 | init_object(s, object, 1); | ||
1458 | 1466 | ||
1459 | page->inuse++; | 1467 | page->inuse++; |
1460 | page->freelist = object[page->offset]; | 1468 | page->freelist = object[page->offset]; |
@@ -1561,14 +1569,8 @@ slab_empty: | |||
1561 | return; | 1569 | return; |
1562 | 1570 | ||
1563 | debug: | 1571 | debug: |
1564 | if (!free_object_checks(s, page, x)) | 1572 | if (!free_debug_processing(s, page, x, addr)) |
1565 | goto out_unlock; | 1573 | goto out_unlock; |
1566 | if (!SlabFrozen(page) && !page->freelist) | ||
1567 | remove_full(s, page); | ||
1568 | if (s->flags & SLAB_STORE_USER) | ||
1569 | set_track(s, x, TRACK_FREE, addr); | ||
1570 | trace(s, page, object, 0); | ||
1571 | init_object(s, object, 0); | ||
1572 | goto checks_ok; | 1574 | goto checks_ok; |
1573 | } | 1575 | } |
1574 | 1576 | ||
@@ -1805,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag | |||
1805 | page->freelist = get_freepointer(kmalloc_caches, n); | 1807 | page->freelist = get_freepointer(kmalloc_caches, n); |
1806 | page->inuse++; | 1808 | page->inuse++; |
1807 | kmalloc_caches->node[node] = n; | 1809 | kmalloc_caches->node[node] = n; |
1808 | init_object(kmalloc_caches, n, 1); | 1810 | setup_object_debug(kmalloc_caches, page, n); |
1809 | init_kmem_cache_node(n); | 1811 | init_kmem_cache_node(n); |
1810 | atomic_long_inc(&n->nr_slabs); | 1812 | atomic_long_inc(&n->nr_slabs); |
1811 | add_partial(n, page); | 1813 | add_partial(n, page); |