diff options
-rw-r--r-- | lib/debugobjects.c | 63 |
1 files changed, 22 insertions, 41 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index e31273b45da5..3e79c100271f 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -201,18 +201,13 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
201 | * workqueue function to free objects. | 201 | * workqueue function to free objects. |
202 | * | 202 | * |
203 | * To reduce contention on the global pool_lock, the actual freeing of | 203 | * To reduce contention on the global pool_lock, the actual freeing of |
204 | * debug objects will be delayed if the pool_lock is busy. We also free | 204 | * debug objects will be delayed if the pool_lock is busy. |
205 | * the objects in a batch of 4 for each lock/unlock cycle. | ||
206 | */ | 205 | */ |
207 | #define ODEBUG_FREE_BATCH 4 | ||
208 | |||
209 | static void free_obj_work(struct work_struct *work) | 206 | static void free_obj_work(struct work_struct *work) |
210 | { | 207 | { |
211 | struct debug_obj *objs[ODEBUG_FREE_BATCH]; | ||
212 | struct hlist_node *tmp; | 208 | struct hlist_node *tmp; |
213 | struct debug_obj *obj; | 209 | struct debug_obj *obj; |
214 | unsigned long flags; | 210 | unsigned long flags; |
215 | int i; | ||
216 | HLIST_HEAD(tofree); | 211 | HLIST_HEAD(tofree); |
217 | 212 | ||
218 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | 213 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
@@ -240,26 +235,6 @@ static void free_obj_work(struct work_struct *work) | |||
240 | hlist_move_list(&obj_to_free, &tofree); | 235 | hlist_move_list(&obj_to_free, &tofree); |
241 | obj_nr_tofree = 0; | 236 | obj_nr_tofree = 0; |
242 | } | 237 | } |
243 | |||
244 | while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { | ||
245 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) { | ||
246 | objs[i] = hlist_entry(obj_pool.first, | ||
247 | typeof(*objs[0]), node); | ||
248 | hlist_del(&objs[i]->node); | ||
249 | } | ||
250 | |||
251 | obj_pool_free -= ODEBUG_FREE_BATCH; | ||
252 | debug_objects_freed += ODEBUG_FREE_BATCH; | ||
253 | /* | ||
254 | * We release pool_lock across kmem_cache_free() to | ||
255 | * avoid contention on pool_lock. | ||
256 | */ | ||
257 | raw_spin_unlock_irqrestore(&pool_lock, flags); | ||
258 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) | ||
259 | kmem_cache_free(obj_cache, objs[i]); | ||
260 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | ||
261 | return; | ||
262 | } | ||
263 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 238 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
264 | 239 | ||
265 | hlist_for_each_entry_safe(obj, tmp, &tofree, node) { | 240 | hlist_for_each_entry_safe(obj, tmp, &tofree, node) { |
@@ -268,27 +243,33 @@ static void free_obj_work(struct work_struct *work) | |||
268 | } | 243 | } |
269 | } | 244 | } |
270 | 245 | ||
271 | /* | 246 | static bool __free_object(struct debug_obj *obj) |
272 | * Put the object back into the pool and schedule work to free objects | ||
273 | * if necessary. | ||
274 | */ | ||
275 | static void free_object(struct debug_obj *obj) | ||
276 | { | 247 | { |
277 | unsigned long flags; | 248 | unsigned long flags; |
278 | int sched = 0; | 249 | bool work; |
279 | 250 | ||
280 | raw_spin_lock_irqsave(&pool_lock, flags); | 251 | raw_spin_lock_irqsave(&pool_lock, flags); |
281 | /* | 252 | work = (obj_pool_free > debug_objects_pool_size) && obj_cache; |
282 | * schedule work when the pool is filled and the cache is | ||
283 | * initialized: | ||
284 | */ | ||
285 | if (obj_pool_free > debug_objects_pool_size && obj_cache) | ||
286 | sched = 1; | ||
287 | hlist_add_head(&obj->node, &obj_pool); | ||
288 | obj_pool_free++; | ||
289 | obj_pool_used--; | 253 | obj_pool_used--; |
254 | |||
255 | if (work) { | ||
256 | obj_nr_tofree++; | ||
257 | hlist_add_head(&obj->node, &obj_to_free); | ||
258 | } else { | ||
259 | obj_pool_free++; | ||
260 | hlist_add_head(&obj->node, &obj_pool); | ||
261 | } | ||
290 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 262 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
291 | if (sched) | 263 | return work; |
264 | } | ||
265 | |||
266 | /* | ||
267 | * Put the object back into the pool and schedule work to free objects | ||
268 | * if necessary. | ||
269 | */ | ||
270 | static void free_object(struct debug_obj *obj) | ||
271 | { | ||
272 | if (__free_object(obj)) | ||
292 | schedule_work(&debug_obj_work); | 273 | schedule_work(&debug_obj_work); |
293 | } | 274 | } |
294 | 275 | ||