aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c53
1 files changed, 41 insertions, 12 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fdcda3dbcd35..2755a3bd16a1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -50,6 +50,9 @@ static int debug_objects_enabled __read_mostly
50 50
51static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
52 52
53static void free_obj_work(struct work_struct *work);
54static DECLARE_WORK(debug_obj_work, free_obj_work);
55
53static int __init enable_object_debug(char *str) 56static int __init enable_object_debug(char *str)
54{ 57{
55 debug_objects_enabled = 1; 58 debug_objects_enabled = 1;
@@ -154,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
154} 157}
155 158
156/* 159/*
157 * Put the object back into the pool or give it back to kmem_cache: 160 * workqueue function to free objects.
158 */ 161 */
159static void free_object(struct debug_obj *obj) 162static void free_obj_work(struct work_struct *work)
160{ 163{
161 unsigned long idx = (unsigned long)(obj - obj_static_pool); 164 struct debug_obj *obj;
162 unsigned long flags; 165 unsigned long flags;
163 166
164 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 167 spin_lock_irqsave(&pool_lock, flags);
165 spin_lock_irqsave(&pool_lock, flags); 168 while (obj_pool_free > ODEBUG_POOL_SIZE) {
166 hlist_add_head(&obj->node, &obj_pool); 169 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
167 obj_pool_free++; 170 hlist_del(&obj->node);
168 obj_pool_used--; 171 obj_pool_free--;
169 spin_unlock_irqrestore(&pool_lock, flags); 172 /*
170 } else { 173 * We release pool_lock across kmem_cache_free() to
171 spin_lock_irqsave(&pool_lock, flags); 174 * avoid contention on pool_lock.
172 obj_pool_used--; 175 */
173 spin_unlock_irqrestore(&pool_lock, flags); 176 spin_unlock_irqrestore(&pool_lock, flags);
174 kmem_cache_free(obj_cache, obj); 177 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags);
175 } 179 }
180 spin_unlock_irqrestore(&pool_lock, flags);
181}
182
183/*
184 * Put the object back into the pool and schedule work to free objects
185 * if necessary.
186 */
187static void free_object(struct debug_obj *obj)
188{
189 unsigned long flags;
190 int sched = 0;
191
192 spin_lock_irqsave(&pool_lock, flags);
193 /*
194 * schedule work when the pool is filled and the cache is
195 * initialized:
196 */
197 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
198 sched = !work_pending(&debug_obj_work);
199 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++;
201 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched)
204 schedule_work(&debug_obj_work);
176} 205}
177 206
178/* 207/*