aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c128
1 files changed, 113 insertions, 15 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 5d99be1fd988..eae56fddfa3b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/debugobjects.h> 10#include <linux/debugobjects.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/sched.h>
12#include <linux/seq_file.h> 13#include <linux/seq_file.h>
13#include <linux/debugfs.h> 14#include <linux/debugfs.h>
14#include <linux/hash.h> 15#include <linux/hash.h>
@@ -30,7 +31,7 @@ struct debug_bucket {
30 31
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 33
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 35
35static DEFINE_SPINLOCK(pool_lock); 36static DEFINE_SPINLOCK(pool_lock);
36 37
@@ -50,12 +51,23 @@ static int debug_objects_enabled __read_mostly
50 51
51static struct debug_obj_descr *descr_test __read_mostly; 52static struct debug_obj_descr *descr_test __read_mostly;
52 53
54static void free_obj_work(struct work_struct *work);
55static DECLARE_WORK(debug_obj_work, free_obj_work);
56
53static int __init enable_object_debug(char *str) 57static int __init enable_object_debug(char *str)
54{ 58{
55 debug_objects_enabled = 1; 59 debug_objects_enabled = 1;
56 return 0; 60 return 0;
57} 61}
62
63static int __init disable_object_debug(char *str)
64{
65 debug_objects_enabled = 0;
66 return 0;
67}
68
58early_param("debug_objects", enable_object_debug); 69early_param("debug_objects", enable_object_debug);
70early_param("no_debug_objects", disable_object_debug);
59 71
60static const char *obj_states[ODEBUG_STATE_MAX] = { 72static const char *obj_states[ODEBUG_STATE_MAX] = {
61 [ODEBUG_STATE_NONE] = "none", 73 [ODEBUG_STATE_NONE] = "none",
@@ -146,25 +158,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
146} 158}
147 159
148/* 160/*
149 * Put the object back into the pool or give it back to kmem_cache: 161 * workqueue function to free objects.
150 */ 162 */
151static void free_object(struct debug_obj *obj) 163static void free_obj_work(struct work_struct *work)
152{ 164{
153 unsigned long idx = (unsigned long)(obj - obj_static_pool); 165 struct debug_obj *obj;
154 unsigned long flags; 166 unsigned long flags;
155 167
156 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 168 spin_lock_irqsave(&pool_lock, flags);
157 spin_lock_irqsave(&pool_lock, flags); 169 while (obj_pool_free > ODEBUG_POOL_SIZE) {
158 hlist_add_head(&obj->node, &obj_pool); 170 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
159 obj_pool_free++; 171 hlist_del(&obj->node);
160 obj_pool_used--; 172 obj_pool_free--;
161 spin_unlock_irqrestore(&pool_lock, flags); 173 /*
162 } else { 174 * We release pool_lock across kmem_cache_free() to
163 spin_lock_irqsave(&pool_lock, flags); 175 * avoid contention on pool_lock.
164 obj_pool_used--; 176 */
165 spin_unlock_irqrestore(&pool_lock, flags); 177 spin_unlock_irqrestore(&pool_lock, flags);
166 kmem_cache_free(obj_cache, obj); 178 kmem_cache_free(obj_cache, obj);
179 spin_lock_irqsave(&pool_lock, flags);
167 } 180 }
181 spin_unlock_irqrestore(&pool_lock, flags);
182}
183
184/*
185 * Put the object back into the pool and schedule work to free objects
186 * if necessary.
187 */
188static void free_object(struct debug_obj *obj)
189{
190 unsigned long flags;
191 int sched = 0;
192
193 spin_lock_irqsave(&pool_lock, flags);
194 /*
195 * schedule work when the pool is filled and the cache is
196 * initialized:
197 */
198 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
199 sched = !work_pending(&debug_obj_work);
200 hlist_add_head(&obj->node, &obj_pool);
201 obj_pool_free++;
202 obj_pool_used--;
203 spin_unlock_irqrestore(&pool_lock, flags);
204 if (sched)
205 schedule_work(&debug_obj_work);
168} 206}
169 207
170/* 208/*
@@ -876,6 +914,63 @@ void __init debug_objects_early_init(void)
876} 914}
877 915
878/* 916/*
917 * Convert the statically allocated objects to dynamic ones:
918 */
919static int debug_objects_replace_static_objects(void)
920{
921 struct debug_bucket *db = obj_hash;
922 struct hlist_node *node, *tmp;
923 struct debug_obj *obj, *new;
924 HLIST_HEAD(objects);
925 int i, cnt = 0;
926
927 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
928 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
929 if (!obj)
930 goto free;
931 hlist_add_head(&obj->node, &objects);
932 }
933
934 /*
935 * When debug_objects_mem_init() is called we know that only
936 * one CPU is up, so disabling interrupts is enough
937 * protection. This avoids the lockdep hell of lock ordering.
938 */
939 local_irq_disable();
940
941 /* Remove the statically allocated objects from the pool */
942 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
943 hlist_del(&obj->node);
944 /* Move the allocated objects to the pool */
945 hlist_move_list(&objects, &obj_pool);
946
947 /* Replace the active object references */
948 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
949 hlist_move_list(&db->list, &objects);
950
951 hlist_for_each_entry(obj, node, &objects, node) {
952 new = hlist_entry(obj_pool.first, typeof(*obj), node);
953 hlist_del(&new->node);
954 /* copy object data */
955 *new = *obj;
956 hlist_add_head(&new->node, &db->list);
957 cnt++;
958 }
959 }
960
961 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
962 obj_pool_used);
963 local_irq_enable();
964 return 0;
965free:
966 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
967 hlist_del(&obj->node);
968 kmem_cache_free(obj_cache, obj);
969 }
970 return -ENOMEM;
971}
972
973/*
879 * Called after the kmem_caches are functional to setup a dedicated 974 * Called after the kmem_caches are functional to setup a dedicated
880 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 975 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
881 * prevents that the debug code is called on kmem_cache_free() for the 976 * prevents that the debug code is called on kmem_cache_free() for the
@@ -890,8 +985,11 @@ void __init debug_objects_mem_init(void)
890 sizeof (struct debug_obj), 0, 985 sizeof (struct debug_obj), 0,
891 SLAB_DEBUG_OBJECTS, NULL); 986 SLAB_DEBUG_OBJECTS, NULL);
892 987
893 if (!obj_cache) 988 if (!obj_cache || debug_objects_replace_static_objects()) {
894 debug_objects_enabled = 0; 989 debug_objects_enabled = 0;
895 else 990 if (obj_cache)
991 kmem_cache_destroy(obj_cache);
992 printk(KERN_WARNING "ODEBUG: out of memory.\n");
993 } else
896 debug_objects_selftest(); 994 debug_objects_selftest();
897} 995}