diff options
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r-- | lib/debugobjects.c | 66 |
1 files changed, 63 insertions, 3 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 90e46fa12721..fdcda3dbcd35 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -30,7 +30,7 @@ struct debug_bucket { | |||
30 | 30 | ||
31 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | 31 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
32 | 32 | ||
33 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; | 33 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
34 | 34 | ||
35 | static DEFINE_SPINLOCK(pool_lock); | 35 | static DEFINE_SPINLOCK(pool_lock); |
36 | 36 | ||
@@ -884,6 +884,63 @@ void __init debug_objects_early_init(void) | |||
884 | } | 884 | } |
885 | 885 | ||
886 | /* | 886 | /* |
887 | * Convert the statically allocated objects to dynamic ones: | ||
888 | */ | ||
889 | static int debug_objects_replace_static_objects(void) | ||
890 | { | ||
891 | struct debug_bucket *db = obj_hash; | ||
892 | struct hlist_node *node, *tmp; | ||
893 | struct debug_obj *obj, *new; | ||
894 | HLIST_HEAD(objects); | ||
895 | int i, cnt = 0; | ||
896 | |||
897 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { | ||
898 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); | ||
899 | if (!obj) | ||
900 | goto free; | ||
901 | hlist_add_head(&obj->node, &objects); | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | * When debug_objects_mem_init() is called we know that only | ||
906 | * one CPU is up, so disabling interrupts is enough | ||
907 | * protection. This avoids the lockdep hell of lock ordering. | ||
908 | */ | ||
909 | local_irq_disable(); | ||
910 | |||
911 | /* Remove the statically allocated objects from the pool */ | ||
912 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) | ||
913 | hlist_del(&obj->node); | ||
914 | /* Move the allocated objects to the pool */ | ||
915 | hlist_move_list(&objects, &obj_pool); | ||
916 | |||
917 | /* Replace the active object references */ | ||
918 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | ||
919 | hlist_move_list(&db->list, &objects); | ||
920 | |||
921 | hlist_for_each_entry(obj, node, &objects, node) { | ||
922 | new = hlist_entry(obj_pool.first, typeof(*obj), node); | ||
923 | hlist_del(&new->node); | ||
924 | /* copy object data */ | ||
925 | *new = *obj; | ||
926 | hlist_add_head(&new->node, &db->list); | ||
927 | cnt++; | ||
928 | } | ||
929 | } | ||
930 | |||
931 | printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, | ||
932 | obj_pool_used); | ||
933 | local_irq_enable(); | ||
934 | return 0; | ||
935 | free: | ||
936 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { | ||
937 | hlist_del(&obj->node); | ||
938 | kmem_cache_free(obj_cache, obj); | ||
939 | } | ||
940 | return -ENOMEM; | ||
941 | } | ||
942 | |||
943 | /* | ||
887 | * Called after the kmem_caches are functional to setup a dedicated | 944 | * Called after the kmem_caches are functional to setup a dedicated |
888 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag | 945 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag |
889 | * prevents that the debug code is called on kmem_cache_free() for the | 946 | * prevents that the debug code is called on kmem_cache_free() for the |
@@ -898,8 +955,11 @@ void __init debug_objects_mem_init(void) | |||
898 | sizeof (struct debug_obj), 0, | 955 | sizeof (struct debug_obj), 0, |
899 | SLAB_DEBUG_OBJECTS, NULL); | 956 | SLAB_DEBUG_OBJECTS, NULL); |
900 | 957 | ||
901 | if (!obj_cache) | 958 | if (!obj_cache || debug_objects_replace_static_objects()) { |
902 | debug_objects_enabled = 0; | 959 | debug_objects_enabled = 0; |
903 | else | 960 | if (obj_cache) |
961 | kmem_cache_destroy(obj_cache); | ||
962 | printk(KERN_WARNING "ODEBUG: out of memory.\n"); | ||
963 | } else | ||
904 | debug_objects_selftest(); | 964 | debug_objects_selftest(); |
905 | } | 965 | } |