diff options
Diffstat (limited to 'lib/debugobjects.c')
| -rw-r--r-- | lib/debugobjects.c | 58 |
1 files changed, 46 insertions, 12 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 04c1ef717fe0..8c28cbd7e104 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -52,9 +52,18 @@ static int debug_objects_fixups __read_mostly; | |||
| 52 | static int debug_objects_warnings __read_mostly; | 52 | static int debug_objects_warnings __read_mostly; |
| 53 | static int debug_objects_enabled __read_mostly | 53 | static int debug_objects_enabled __read_mostly |
| 54 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; | 54 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; |
| 55 | 55 | static int debug_objects_pool_size __read_mostly | |
| 56 | = ODEBUG_POOL_SIZE; | ||
| 57 | static int debug_objects_pool_min_level __read_mostly | ||
| 58 | = ODEBUG_POOL_MIN_LEVEL; | ||
| 56 | static struct debug_obj_descr *descr_test __read_mostly; | 59 | static struct debug_obj_descr *descr_test __read_mostly; |
| 57 | 60 | ||
| 61 | /* | ||
| 62 | * Track numbers of kmem_cache_alloc()/free() calls done. | ||
| 63 | */ | ||
| 64 | static int debug_objects_allocated; | ||
| 65 | static int debug_objects_freed; | ||
| 66 | |||
| 58 | static void free_obj_work(struct work_struct *work); | 67 | static void free_obj_work(struct work_struct *work); |
| 59 | static DECLARE_WORK(debug_obj_work, free_obj_work); | 68 | static DECLARE_WORK(debug_obj_work, free_obj_work); |
| 60 | 69 | ||
| @@ -88,13 +97,13 @@ static void fill_pool(void) | |||
| 88 | struct debug_obj *new; | 97 | struct debug_obj *new; |
| 89 | unsigned long flags; | 98 | unsigned long flags; |
| 90 | 99 | ||
| 91 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | 100 | if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
| 92 | return; | 101 | return; |
| 93 | 102 | ||
| 94 | if (unlikely(!obj_cache)) | 103 | if (unlikely(!obj_cache)) |
| 95 | return; | 104 | return; |
| 96 | 105 | ||
| 97 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | 106 | while (obj_pool_free < debug_objects_pool_min_level) { |
| 98 | 107 | ||
| 99 | new = kmem_cache_zalloc(obj_cache, gfp); | 108 | new = kmem_cache_zalloc(obj_cache, gfp); |
| 100 | if (!new) | 109 | if (!new) |
| @@ -102,6 +111,7 @@ static void fill_pool(void) | |||
| 102 | 111 | ||
| 103 | raw_spin_lock_irqsave(&pool_lock, flags); | 112 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 104 | hlist_add_head(&new->node, &obj_pool); | 113 | hlist_add_head(&new->node, &obj_pool); |
| 114 | debug_objects_allocated++; | ||
| 105 | obj_pool_free++; | 115 | obj_pool_free++; |
| 106 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 116 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 107 | } | 117 | } |
| @@ -162,24 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 162 | 172 | ||
| 163 | /* | 173 | /* |
| 164 | * workqueue function to free objects. | 174 | * workqueue function to free objects. |
| 175 | * | ||
| 176 | * To reduce contention on the global pool_lock, the actual freeing of | ||
| 177 | * debug objects will be delayed if the pool_lock is busy. We also free | ||
| 178 | * the objects in a batch of 4 for each lock/unlock cycle. | ||
| 165 | */ | 179 | */ |
| 180 | #define ODEBUG_FREE_BATCH 4 | ||
| 181 | |||
| 166 | static void free_obj_work(struct work_struct *work) | 182 | static void free_obj_work(struct work_struct *work) |
| 167 | { | 183 | { |
| 168 | struct debug_obj *obj; | 184 | struct debug_obj *objs[ODEBUG_FREE_BATCH]; |
| 169 | unsigned long flags; | 185 | unsigned long flags; |
| 186 | int i; | ||
| 170 | 187 | ||
| 171 | raw_spin_lock_irqsave(&pool_lock, flags); | 188 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
| 172 | while (obj_pool_free > ODEBUG_POOL_SIZE) { | 189 | return; |
| 173 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 190 | while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { |
| 174 | hlist_del(&obj->node); | 191 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) { |
| 175 | obj_pool_free--; | 192 | objs[i] = hlist_entry(obj_pool.first, |
| 193 | typeof(*objs[0]), node); | ||
| 194 | hlist_del(&objs[i]->node); | ||
| 195 | } | ||
| 196 | |||
| 197 | obj_pool_free -= ODEBUG_FREE_BATCH; | ||
| 198 | debug_objects_freed += ODEBUG_FREE_BATCH; | ||
| 176 | /* | 199 | /* |
| 177 | * We release pool_lock across kmem_cache_free() to | 200 | * We release pool_lock across kmem_cache_free() to |
| 178 | * avoid contention on pool_lock. | 201 | * avoid contention on pool_lock. |
| 179 | */ | 202 | */ |
| 180 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 203 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 181 | kmem_cache_free(obj_cache, obj); | 204 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) |
| 182 | raw_spin_lock_irqsave(&pool_lock, flags); | 205 | kmem_cache_free(obj_cache, objs[i]); |
| 206 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | ||
| 207 | return; | ||
| 183 | } | 208 | } |
| 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 209 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 185 | } | 210 | } |
| @@ -198,7 +223,7 @@ static void free_object(struct debug_obj *obj) | |||
| 198 | * schedule work when the pool is filled and the cache is | 223 | * schedule work when the pool is filled and the cache is |
| 199 | * initialized: | 224 | * initialized: |
| 200 | */ | 225 | */ |
| 201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) | 226 | if (obj_pool_free > debug_objects_pool_size && obj_cache) |
| 202 | sched = 1; | 227 | sched = 1; |
| 203 | hlist_add_head(&obj->node, &obj_pool); | 228 | hlist_add_head(&obj->node, &obj_pool); |
| 204 | obj_pool_free++; | 229 | obj_pool_free++; |
| @@ -758,6 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v) | |||
| 758 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | 783 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
| 759 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | 784 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
| 760 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | 785 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
| 786 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); | ||
| 787 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); | ||
| 761 | return 0; | 788 | return 0; |
| 762 | } | 789 | } |
| 763 | 790 | ||
| @@ -1116,4 +1143,11 @@ void __init debug_objects_mem_init(void) | |||
| 1116 | pr_warn("out of memory.\n"); | 1143 | pr_warn("out of memory.\n"); |
| 1117 | } else | 1144 | } else |
| 1118 | debug_objects_selftest(); | 1145 | debug_objects_selftest(); |
| 1146 | |||
| 1147 | /* | ||
| 1148 | * Increase the thresholds for allocating and freeing objects | ||
| 1149 | * according to the number of possible CPUs available in the system. | ||
| 1150 | */ | ||
| 1151 | debug_objects_pool_size += num_possible_cpus() * 32; | ||
| 1152 | debug_objects_pool_min_level += num_possible_cpus() * 4; | ||
| 1119 | } | 1153 | } |
