aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
authorYang Shi <yang.shi@linux.alibaba.com>2018-02-05 18:18:27 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-02-13 04:58:59 -0500
commit636e1970fd7deaa0d0ee0dfb6ac65fbd690b32d2 (patch)
tree4cec59377bf77600013af40888433dcb57716722 /lib/debugobjects.c
parent36c4ead6f6dfbbe777d3d7e9cc8702530b71a94f (diff)
debugobjects: Use global free list in free_object()
The newly added global free list allows to avoid lengthy pool_list iterations in free_obj_work() by putting objects either into the pool list when the fill level of the pool is below the maximum or by putting them on the global free list immediately. As the pool is now guaranteed to never exceed the maximum fill level this allows to remove the batch removal from pool list in free_obj_work(). Split free_object() into two parts, so the actual queueing function can be reused without invoking schedule_work() on every invocation. [ tglx: Remove the batch removal from pool list and massage changelog ] Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: longman@redhat.com Link: https://lkml.kernel.org/r/1517872708-24207-4-git-send-email-yang.shi@linux.alibaba.com
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c63
1 files changed, 22 insertions, 41 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e31273b45da5..3e79c100271f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -201,18 +201,13 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
201 * workqueue function to free objects. 201 * workqueue function to free objects.
202 * 202 *
203 * To reduce contention on the global pool_lock, the actual freeing of 203 * To reduce contention on the global pool_lock, the actual freeing of
204 * debug objects will be delayed if the pool_lock is busy. We also free 204 * debug objects will be delayed if the pool_lock is busy.
205 * the objects in a batch of 4 for each lock/unlock cycle.
206 */ 205 */
207#define ODEBUG_FREE_BATCH 4
208
209static void free_obj_work(struct work_struct *work) 206static void free_obj_work(struct work_struct *work)
210{ 207{
211 struct debug_obj *objs[ODEBUG_FREE_BATCH];
212 struct hlist_node *tmp; 208 struct hlist_node *tmp;
213 struct debug_obj *obj; 209 struct debug_obj *obj;
214 unsigned long flags; 210 unsigned long flags;
215 int i;
216 HLIST_HEAD(tofree); 211 HLIST_HEAD(tofree);
217 212
218 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 213 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
@@ -240,26 +235,6 @@ static void free_obj_work(struct work_struct *work)
240 hlist_move_list(&obj_to_free, &tofree); 235 hlist_move_list(&obj_to_free, &tofree);
241 obj_nr_tofree = 0; 236 obj_nr_tofree = 0;
242 } 237 }
243
244 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
245 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
246 objs[i] = hlist_entry(obj_pool.first,
247 typeof(*objs[0]), node);
248 hlist_del(&objs[i]->node);
249 }
250
251 obj_pool_free -= ODEBUG_FREE_BATCH;
252 debug_objects_freed += ODEBUG_FREE_BATCH;
253 /*
254 * We release pool_lock across kmem_cache_free() to
255 * avoid contention on pool_lock.
256 */
257 raw_spin_unlock_irqrestore(&pool_lock, flags);
258 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
259 kmem_cache_free(obj_cache, objs[i]);
260 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
261 return;
262 }
263 raw_spin_unlock_irqrestore(&pool_lock, flags); 238 raw_spin_unlock_irqrestore(&pool_lock, flags);
264 239
265 hlist_for_each_entry_safe(obj, tmp, &tofree, node) { 240 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
@@ -268,27 +243,33 @@ static void free_obj_work(struct work_struct *work)
268 } 243 }
269} 244}
270 245
271/* 246static bool __free_object(struct debug_obj *obj)
272 * Put the object back into the pool and schedule work to free objects
273 * if necessary.
274 */
275static void free_object(struct debug_obj *obj)
276{ 247{
277 unsigned long flags; 248 unsigned long flags;
278 int sched = 0; 249 bool work;
279 250
280 raw_spin_lock_irqsave(&pool_lock, flags); 251 raw_spin_lock_irqsave(&pool_lock, flags);
281 /* 252 work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
282 * schedule work when the pool is filled and the cache is
283 * initialized:
284 */
285 if (obj_pool_free > debug_objects_pool_size && obj_cache)
286 sched = 1;
287 hlist_add_head(&obj->node, &obj_pool);
288 obj_pool_free++;
289 obj_pool_used--; 253 obj_pool_used--;
254
255 if (work) {
256 obj_nr_tofree++;
257 hlist_add_head(&obj->node, &obj_to_free);
258 } else {
259 obj_pool_free++;
260 hlist_add_head(&obj->node, &obj_pool);
261 }
290 raw_spin_unlock_irqrestore(&pool_lock, flags); 262 raw_spin_unlock_irqrestore(&pool_lock, flags);
291 if (sched) 263 return work;
264}
265
266/*
267 * Put the object back into the pool and schedule work to free objects
268 * if necessary.
269 */
270static void free_object(struct debug_obj *obj)
271{
272 if (__free_object(obj))
292 schedule_work(&debug_obj_work); 273 schedule_work(&debug_obj_work);
293} 274}
294 275