aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-03-16 05:04:53 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-03-17 07:28:30 -0400
commit337fff8b5ed0573ea106491c6de47bd7fe623500 (patch)
tree7fd379d4a3d23e055f19be2d447de2ea64ecd069 /lib/debugobjects.c
parent1be1cb7b47f0744141ed61cdb25648819ae1a56f (diff)
debugobjects: delay free of internal objects
Impact: avoid recursive kfree calls, less slab activity on heavy load debugobjects checks on kfree whether tracked objects are freed. When a tracked object is freed debugobjects frees the internal reference object as well. The debug object slab cache is marked to not recurse into debugobjects when a slab objects is freed, but the recursive call can be problematic versus locking in the memory allocator. Defer the freeing of debug slab objects via schedule_work. The reasons not to use RCU are: 1) rcu makes the data structure larger 2) there is no real need for rcu as nothing references the obj after we freed it 3) under heavy load it is easier to reuse the to be freed objects instead of allocating new objects from the slab. This lowered the slab activity significantly in a heavy load networking test where lots of timers are created/destroyed. The workqueue based delayed free allows us just to put the to be freed objects back into the object pool and reuse them right away. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <200903162049.58058.nickpiggin@yahoo.com.au>
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c53
1 files changed, 41 insertions, 12 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fdcda3dbcd35..2755a3bd16a1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -50,6 +50,9 @@ static int debug_objects_enabled __read_mostly
50 50
51static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
52 52
53static void free_obj_work(struct work_struct *work);
54static DECLARE_WORK(debug_obj_work, free_obj_work);
55
53static int __init enable_object_debug(char *str) 56static int __init enable_object_debug(char *str)
54{ 57{
55 debug_objects_enabled = 1; 58 debug_objects_enabled = 1;
@@ -154,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
154} 157}
155 158
156/* 159/*
157 * Put the object back into the pool or give it back to kmem_cache: 160 * workqueue function to free objects.
158 */ 161 */
159static void free_object(struct debug_obj *obj) 162static void free_obj_work(struct work_struct *work)
160{ 163{
161 unsigned long idx = (unsigned long)(obj - obj_static_pool); 164 struct debug_obj *obj;
162 unsigned long flags; 165 unsigned long flags;
163 166
164 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 167 spin_lock_irqsave(&pool_lock, flags);
165 spin_lock_irqsave(&pool_lock, flags); 168 while (obj_pool_free > ODEBUG_POOL_SIZE) {
166 hlist_add_head(&obj->node, &obj_pool); 169 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
167 obj_pool_free++; 170 hlist_del(&obj->node);
168 obj_pool_used--; 171 obj_pool_free--;
169 spin_unlock_irqrestore(&pool_lock, flags); 172 /*
170 } else { 173 * We release pool_lock across kmem_cache_free() to
171 spin_lock_irqsave(&pool_lock, flags); 174 * avoid contention on pool_lock.
172 obj_pool_used--; 175 */
173 spin_unlock_irqrestore(&pool_lock, flags); 176 spin_unlock_irqrestore(&pool_lock, flags);
174 kmem_cache_free(obj_cache, obj); 177 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags);
175 } 179 }
180 spin_unlock_irqrestore(&pool_lock, flags);
181}
182
183/*
184 * Put the object back into the pool and schedule work to free objects
185 * if necessary.
186 */
187static void free_object(struct debug_obj *obj)
188{
189 unsigned long flags;
190 int sched = 0;
191
192 spin_lock_irqsave(&pool_lock, flags);
193 /*
194 * schedule work when the pool is filled and the cache is
195 * initialized:
196 */
197 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
198 sched = !work_pending(&debug_obj_work);
199 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++;
201 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched)
204 schedule_work(&debug_obj_work);
176} 205}
177 206
178/* 207/*