aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-03-16 13:53:18 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-03-17 07:28:30 -0400
commit1be1cb7b47f0744141ed61cdb25648819ae1a56f (patch)
tree2d13d83492881e333f633f5513aec79b4816ff78 /lib
parent3e8ebb5c433f016dff5824587436642d87fc2e6c (diff)
debugobjects: replace static objects when slab cache becomes available
Impact: refactor/consolidate object management, prepare for delayed free debugobjects allocates static reference objects to track objects which are initialized or activated before the slab cache becomes available. These static reference objects have to be handled seperately in free_object(). The handling of these objects is in the way of implementing a delayed free functionality. The delayed free is required to avoid callbacks into the mm code from debug_check_no_obj_freed(). Replace the static object references with dynamic ones after the slab cache has been initialized. The static objects are now marked initdata. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <200903162049.58058.nickpiggin@yahoo.com.au>
Diffstat (limited to 'lib')
-rw-r--r--lib/debugobjects.c66
1 files changed, 63 insertions, 3 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 90e46fa12721..fdcda3dbcd35 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -30,7 +30,7 @@ struct debug_bucket {
30 30
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 32
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 34
35static DEFINE_SPINLOCK(pool_lock); 35static DEFINE_SPINLOCK(pool_lock);
36 36
@@ -884,6 +884,63 @@ void __init debug_objects_early_init(void)
884} 884}
885 885
886/* 886/*
887 * Convert the statically allocated objects to dynamic ones:
888 */
889static int debug_objects_replace_static_objects(void)
890{
891 struct debug_bucket *db = obj_hash;
892 struct hlist_node *node, *tmp;
893 struct debug_obj *obj, *new;
894 HLIST_HEAD(objects);
895 int i, cnt = 0;
896
897 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
898 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
899 if (!obj)
900 goto free;
901 hlist_add_head(&obj->node, &objects);
902 }
903
904 /*
905 * When debug_objects_mem_init() is called we know that only
906 * one CPU is up, so disabling interrupts is enough
907 * protection. This avoids the lockdep hell of lock ordering.
908 */
909 local_irq_disable();
910
911 /* Remove the statically allocated objects from the pool */
912 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
913 hlist_del(&obj->node);
914 /* Move the allocated objects to the pool */
915 hlist_move_list(&objects, &obj_pool);
916
917 /* Replace the active object references */
918 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
919 hlist_move_list(&db->list, &objects);
920
921 hlist_for_each_entry(obj, node, &objects, node) {
922 new = hlist_entry(obj_pool.first, typeof(*obj), node);
923 hlist_del(&new->node);
924 /* copy object data */
925 *new = *obj;
926 hlist_add_head(&new->node, &db->list);
927 cnt++;
928 }
929 }
930
931 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
932 obj_pool_used);
933 local_irq_enable();
934 return 0;
935free:
936 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
937 hlist_del(&obj->node);
938 kmem_cache_free(obj_cache, obj);
939 }
940 return -ENOMEM;
941}
942
943/*
887 * Called after the kmem_caches are functional to setup a dedicated 944 * Called after the kmem_caches are functional to setup a dedicated
888 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 945 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
889 * prevents that the debug code is called on kmem_cache_free() for the 946 * prevents that the debug code is called on kmem_cache_free() for the
@@ -898,8 +955,11 @@ void __init debug_objects_mem_init(void)
898 sizeof (struct debug_obj), 0, 955 sizeof (struct debug_obj), 0,
899 SLAB_DEBUG_OBJECTS, NULL); 956 SLAB_DEBUG_OBJECTS, NULL);
900 957
901 if (!obj_cache) 958 if (!obj_cache || debug_objects_replace_static_objects()) {
902 debug_objects_enabled = 0; 959 debug_objects_enabled = 0;
903 else 960 if (obj_cache)
961 kmem_cache_destroy(obj_cache);
962 printk(KERN_WARNING "ODEBUG: out of memory.\n");
963 } else
904 debug_objects_selftest(); 964 debug_objects_selftest();
905} 965}