aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--lib/debugobjects.c127
2 files changed, 115 insertions, 15 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 421920897a37..0b7351b0582c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -617,6 +617,9 @@ and is between 256 and 4096 characters. It is defined in the file
617 617
618 debug_objects [KNL] Enable object debugging 618 debug_objects [KNL] Enable object debugging
619 619
620 no_debug_objects
621 [KNL] Disable object debugging
622
620 debugpat [X86] Enable PAT debugging 623 debugpat [X86] Enable PAT debugging
621 624
622 decnet.addr= [HW,NET] 625 decnet.addr= [HW,NET]
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 5d99be1fd988..2755a3bd16a1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -30,7 +30,7 @@ struct debug_bucket {
30 30
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 32
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 34
35static DEFINE_SPINLOCK(pool_lock); 35static DEFINE_SPINLOCK(pool_lock);
36 36
@@ -50,12 +50,23 @@ static int debug_objects_enabled __read_mostly
50 50
51static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
52 52
53static void free_obj_work(struct work_struct *work);
54static DECLARE_WORK(debug_obj_work, free_obj_work);
55
53static int __init enable_object_debug(char *str) 56static int __init enable_object_debug(char *str)
54{ 57{
55 debug_objects_enabled = 1; 58 debug_objects_enabled = 1;
56 return 0; 59 return 0;
57} 60}
61
62static int __init disable_object_debug(char *str)
63{
64 debug_objects_enabled = 0;
65 return 0;
66}
67
58early_param("debug_objects", enable_object_debug); 68early_param("debug_objects", enable_object_debug);
69early_param("no_debug_objects", disable_object_debug);
59 70
60static const char *obj_states[ODEBUG_STATE_MAX] = { 71static const char *obj_states[ODEBUG_STATE_MAX] = {
61 [ODEBUG_STATE_NONE] = "none", 72 [ODEBUG_STATE_NONE] = "none",
@@ -146,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
146} 157}
147 158
148/* 159/*
149 * Put the object back into the pool or give it back to kmem_cache: 160 * workqueue function to free objects.
150 */ 161 */
151static void free_object(struct debug_obj *obj) 162static void free_obj_work(struct work_struct *work)
152{ 163{
153 unsigned long idx = (unsigned long)(obj - obj_static_pool); 164 struct debug_obj *obj;
154 unsigned long flags; 165 unsigned long flags;
155 166
156 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 167 spin_lock_irqsave(&pool_lock, flags);
157 spin_lock_irqsave(&pool_lock, flags); 168 while (obj_pool_free > ODEBUG_POOL_SIZE) {
158 hlist_add_head(&obj->node, &obj_pool); 169 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
159 obj_pool_free++; 170 hlist_del(&obj->node);
160 obj_pool_used--; 171 obj_pool_free--;
161 spin_unlock_irqrestore(&pool_lock, flags); 172 /*
162 } else { 173 * We release pool_lock across kmem_cache_free() to
163 spin_lock_irqsave(&pool_lock, flags); 174 * avoid contention on pool_lock.
164 obj_pool_used--; 175 */
165 spin_unlock_irqrestore(&pool_lock, flags); 176 spin_unlock_irqrestore(&pool_lock, flags);
166 kmem_cache_free(obj_cache, obj); 177 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags);
167 } 179 }
180 spin_unlock_irqrestore(&pool_lock, flags);
181}
182
183/*
184 * Put the object back into the pool and schedule work to free objects
185 * if necessary.
186 */
187static void free_object(struct debug_obj *obj)
188{
189 unsigned long flags;
190 int sched = 0;
191
192 spin_lock_irqsave(&pool_lock, flags);
193 /*
194 * schedule work when the pool is filled and the cache is
195 * initialized:
196 */
197 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
198 sched = !work_pending(&debug_obj_work);
199 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++;
201 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched)
204 schedule_work(&debug_obj_work);
168} 205}
169 206
170/* 207/*
@@ -876,6 +913,63 @@ void __init debug_objects_early_init(void)
876} 913}
877 914
878/* 915/*
916 * Convert the statically allocated objects to dynamic ones:
917 */
918static int debug_objects_replace_static_objects(void)
919{
920 struct debug_bucket *db = obj_hash;
921 struct hlist_node *node, *tmp;
922 struct debug_obj *obj, *new;
923 HLIST_HEAD(objects);
924 int i, cnt = 0;
925
926 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
927 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
928 if (!obj)
929 goto free;
930 hlist_add_head(&obj->node, &objects);
931 }
932
933 /*
934 * When debug_objects_mem_init() is called we know that only
935 * one CPU is up, so disabling interrupts is enough
936 * protection. This avoids the lockdep hell of lock ordering.
937 */
938 local_irq_disable();
939
940 /* Remove the statically allocated objects from the pool */
941 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
942 hlist_del(&obj->node);
943 /* Move the allocated objects to the pool */
944 hlist_move_list(&objects, &obj_pool);
945
946 /* Replace the active object references */
947 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
948 hlist_move_list(&db->list, &objects);
949
950 hlist_for_each_entry(obj, node, &objects, node) {
951 new = hlist_entry(obj_pool.first, typeof(*obj), node);
952 hlist_del(&new->node);
953 /* copy object data */
954 *new = *obj;
955 hlist_add_head(&new->node, &db->list);
956 cnt++;
957 }
958 }
959
960 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
961 obj_pool_used);
962 local_irq_enable();
963 return 0;
964free:
965 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
966 hlist_del(&obj->node);
967 kmem_cache_free(obj_cache, obj);
968 }
969 return -ENOMEM;
970}
971
972/*
879 * Called after the kmem_caches are functional to setup a dedicated 973 * Called after the kmem_caches are functional to setup a dedicated
880 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 974 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
881 * prevents that the debug code is called on kmem_cache_free() for the 975 * prevents that the debug code is called on kmem_cache_free() for the
@@ -890,8 +984,11 @@ void __init debug_objects_mem_init(void)
890 sizeof (struct debug_obj), 0, 984 sizeof (struct debug_obj), 0,
891 SLAB_DEBUG_OBJECTS, NULL); 985 SLAB_DEBUG_OBJECTS, NULL);
892 986
893 if (!obj_cache) 987 if (!obj_cache || debug_objects_replace_static_objects()) {
894 debug_objects_enabled = 0; 988 debug_objects_enabled = 0;
895 else 989 if (obj_cache)
990 kmem_cache_destroy(obj_cache);
991 printk(KERN_WARNING "ODEBUG: out of memory.\n");
992 } else
896 debug_objects_selftest(); 993 debug_objects_selftest();
897} 994}