aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
authorYang Shi <yang.shi@linux.alibaba.com>2018-02-05 18:18:26 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-02-13 04:58:58 -0500
commit36c4ead6f6dfbbe777d3d7e9cc8702530b71a94f (patch)
treecd659d5e9c986440e5734debcabcb5496376856c /lib/debugobjects.c
parentbd9dcd046509cd5355605e43791eacee8bf5e40f (diff)
debugobjects: Add global free list and the counter
free_object() adds objects to the pool list and schedules work when the pool list is larger than the pool size. The worker handles the actual kfree() of the object by iterating the pool list until the pool size is below the maximum pool size again. To iterate the pool list, pool_lock has to be held and the objects which should be freed() need to be put into temporary storage so pool_lock can be dropped for the actual kmem_cache_free() invocation. That's a pointless and expensive exercise if there is a large number of objects to free. In such a case its better to evaulate the fill level of the pool in free_objects() and queue the object to free either in the pool list or if it's full on a separate global free list. The worker can then do the following simpler operation: - Move objects back from the global free list to the pool list if the pool list is not longer full. - Remove the remaining objects in a single list move operation from the global free list and do the kmem_cache_free() operation lockless from the temporary list head. In fill_pool() the global free list is checked as well to avoid real allocations from the kmem cache. Add the necessary list head and a counter for the number of objects on the global free list and export that counter via sysfs: max_chain :79 max_loops :8147 warnings :0 fixups :0 pool_free :1697 pool_min_free :346 pool_used :15356 pool_max_used :23933 on_free_list :39 objs_allocated:32617 objs_freed :16588 Nothing queues objects on the global free list yet. This happens in a follow up change. [ tglx: Simplified implementation and massaged changelog ] Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: longman@redhat.com Link: https://lkml.kernel.org/r/1517872708-24207-3-git-send-email-yang.shi@linux.alibaba.com
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c58
1 files changed, 57 insertions, 1 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index f6d57a11c927..e31273b45da5 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -42,11 +42,14 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
42static DEFINE_RAW_SPINLOCK(pool_lock); 42static DEFINE_RAW_SPINLOCK(pool_lock);
43 43
44static HLIST_HEAD(obj_pool); 44static HLIST_HEAD(obj_pool);
45static HLIST_HEAD(obj_to_free);
45 46
46static int obj_pool_min_free = ODEBUG_POOL_SIZE; 47static int obj_pool_min_free = ODEBUG_POOL_SIZE;
47static int obj_pool_free = ODEBUG_POOL_SIZE; 48static int obj_pool_free = ODEBUG_POOL_SIZE;
48static int obj_pool_used; 49static int obj_pool_used;
49static int obj_pool_max_used; 50static int obj_pool_max_used;
51/* The number of objs on the global free list */
52static int obj_nr_tofree;
50static struct kmem_cache *obj_cache; 53static struct kmem_cache *obj_cache;
51 54
52static int debug_objects_maxchain __read_mostly; 55static int debug_objects_maxchain __read_mostly;
@@ -97,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
97static void fill_pool(void) 100static void fill_pool(void)
98{ 101{
99 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
100 struct debug_obj *new; 103 struct debug_obj *new, *obj;
101 unsigned long flags; 104 unsigned long flags;
102 105
103 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 106 if (likely(obj_pool_free >= debug_objects_pool_min_level))
104 return; 107 return;
105 108
109 /*
110 * Reuse objs from the global free list; they will be reinitialized
111 * when allocating.
112 */
113 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 raw_spin_lock_irqsave(&pool_lock, flags);
115 /*
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
118 */
119 if (obj_nr_tofree) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node);
122 obj_nr_tofree--;
123 hlist_add_head(&obj->node, &obj_pool);
124 obj_pool_free++;
125 }
126 raw_spin_unlock_irqrestore(&pool_lock, flags);
127 }
128
106 if (unlikely(!obj_cache)) 129 if (unlikely(!obj_cache))
107 return; 130 return;
108 131
@@ -186,11 +209,38 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
186static void free_obj_work(struct work_struct *work) 209static void free_obj_work(struct work_struct *work)
187{ 210{
188 struct debug_obj *objs[ODEBUG_FREE_BATCH]; 211 struct debug_obj *objs[ODEBUG_FREE_BATCH];
212 struct hlist_node *tmp;
213 struct debug_obj *obj;
189 unsigned long flags; 214 unsigned long flags;
190 int i; 215 int i;
216 HLIST_HEAD(tofree);
191 217
192 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 218 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
193 return; 219 return;
220
221 /*
222 * The objs on the pool list might be allocated before the work is
223 * run, so recheck if pool list it full or not, if not fill pool
224 * list from the global free list
225 */
226 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
227 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
228 hlist_del(&obj->node);
229 hlist_add_head(&obj->node, &obj_pool);
230 obj_pool_free++;
231 obj_nr_tofree--;
232 }
233
234 /*
235 * Pool list is already full and there are still objs on the free
236 * list. Move remaining free objs to a temporary list to free the
237 * memory outside the pool_lock held region.
238 */
239 if (obj_nr_tofree) {
240 hlist_move_list(&obj_to_free, &tofree);
241 obj_nr_tofree = 0;
242 }
243
194 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { 244 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
195 for (i = 0; i < ODEBUG_FREE_BATCH; i++) { 245 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
196 objs[i] = hlist_entry(obj_pool.first, 246 objs[i] = hlist_entry(obj_pool.first,
@@ -211,6 +261,11 @@ static void free_obj_work(struct work_struct *work)
211 return; 261 return;
212 } 262 }
213 raw_spin_unlock_irqrestore(&pool_lock, flags); 263 raw_spin_unlock_irqrestore(&pool_lock, flags);
264
265 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
266 hlist_del(&obj->node);
267 kmem_cache_free(obj_cache, obj);
268 }
214} 269}
215 270
216/* 271/*
@@ -793,6 +848,7 @@ static int debug_stats_show(struct seq_file *m, void *v)
793 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 848 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
794 seq_printf(m, "pool_used :%d\n", obj_pool_used); 849 seq_printf(m, "pool_used :%d\n", obj_pool_used);
795 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 850 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
851 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
796 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 852 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
797 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 853 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
798 return 0; 854 return 0;