diff options
| -rw-r--r-- | include/linux/slab_def.h | 3 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 4 | ||||
| -rw-r--r-- | mm/slab.c | 226 |
3 files changed, 1 insertions, 232 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 9a5eafb7145b..abc7de77b988 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -61,9 +61,6 @@ struct kmem_cache { | |||
| 61 | atomic_t allocmiss; | 61 | atomic_t allocmiss; |
| 62 | atomic_t freehit; | 62 | atomic_t freehit; |
| 63 | atomic_t freemiss; | 63 | atomic_t freemiss; |
| 64 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 65 | atomic_t store_user_clean; | ||
| 66 | #endif | ||
| 67 | 64 | ||
| 68 | /* | 65 | /* |
| 69 | * If debugging is enabled, then the allocator can add additional | 66 | * If debugging is enabled, then the allocator can add additional |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fdfa173651eb..eae43952902e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -542,10 +542,6 @@ config DEBUG_SLAB | |||
| 542 | allocation as well as poisoning memory on free to catch use of freed | 542 | allocation as well as poisoning memory on free to catch use of freed |
| 543 | memory. This can make kmalloc/kfree-intensive workloads much slower. | 543 | memory. This can make kmalloc/kfree-intensive workloads much slower. |
| 544 | 544 | ||
| 545 | config DEBUG_SLAB_LEAK | ||
| 546 | bool "Memory leak debugging" | ||
| 547 | depends on DEBUG_SLAB | ||
| 548 | |||
| 549 | config SLUB_DEBUG_ON | 545 | config SLUB_DEBUG_ON |
| 550 | bool "SLUB debugging on by default" | 546 | bool "SLUB debugging on by default" |
| 551 | depends on SLUB && SLUB_DEBUG | 547 | depends on SLUB && SLUB_DEBUG |
| @@ -362,29 +362,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
| 362 | 362 | ||
| 363 | #endif | 363 | #endif |
| 364 | 364 | ||
| 365 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 366 | |||
| 367 | static inline bool is_store_user_clean(struct kmem_cache *cachep) | ||
| 368 | { | ||
| 369 | return atomic_read(&cachep->store_user_clean) == 1; | ||
| 370 | } | ||
| 371 | |||
| 372 | static inline void set_store_user_clean(struct kmem_cache *cachep) | ||
| 373 | { | ||
| 374 | atomic_set(&cachep->store_user_clean, 1); | ||
| 375 | } | ||
| 376 | |||
| 377 | static inline void set_store_user_dirty(struct kmem_cache *cachep) | ||
| 378 | { | ||
| 379 | if (is_store_user_clean(cachep)) | ||
| 380 | atomic_set(&cachep->store_user_clean, 0); | ||
| 381 | } | ||
| 382 | |||
| 383 | #else | ||
| 384 | static inline void set_store_user_dirty(struct kmem_cache *cachep) {} | ||
| 385 | |||
| 386 | #endif | ||
| 387 | |||
| 388 | /* | 365 | /* |
| 389 | * Do not go above this order unless 0 objects fit into the slab or | 366 | * Do not go above this order unless 0 objects fit into the slab or |
| 390 | * overridden on the command line. | 367 | * overridden on the command line. |
| @@ -2552,11 +2529,6 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) | |||
| 2552 | objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); | 2529 | objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); |
| 2553 | page->active++; | 2530 | page->active++; |
| 2554 | 2531 | ||
| 2555 | #if DEBUG | ||
| 2556 | if (cachep->flags & SLAB_STORE_USER) | ||
| 2557 | set_store_user_dirty(cachep); | ||
| 2558 | #endif | ||
| 2559 | |||
| 2560 | return objp; | 2532 | return objp; |
| 2561 | } | 2533 | } |
| 2562 | 2534 | ||
| @@ -2762,10 +2734,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 2762 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | 2734 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; |
| 2763 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 2735 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; |
| 2764 | } | 2736 | } |
| 2765 | if (cachep->flags & SLAB_STORE_USER) { | 2737 | if (cachep->flags & SLAB_STORE_USER) |
| 2766 | set_store_user_dirty(cachep); | ||
| 2767 | *dbg_userword(cachep, objp) = (void *)caller; | 2738 | *dbg_userword(cachep, objp) = (void *)caller; |
| 2768 | } | ||
| 2769 | 2739 | ||
| 2770 | objnr = obj_to_index(cachep, page, objp); | 2740 | objnr = obj_to_index(cachep, page, objp); |
| 2771 | 2741 | ||
| @@ -4184,200 +4154,6 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
| 4184 | return res; | 4154 | return res; |
| 4185 | } | 4155 | } |
| 4186 | 4156 | ||
| 4187 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 4188 | |||
| 4189 | static inline int add_caller(unsigned long *n, unsigned long v) | ||
| 4190 | { | ||
| 4191 | unsigned long *p; | ||
| 4192 | int l; | ||
| 4193 | if (!v) | ||
| 4194 | return 1; | ||
| 4195 | l = n[1]; | ||
| 4196 | p = n + 2; | ||
| 4197 | while (l) { | ||
| 4198 | int i = l/2; | ||
| 4199 | unsigned long *q = p + 2 * i; | ||
| 4200 | if (*q == v) { | ||
| 4201 | q[1]++; | ||
| 4202 | return 1; | ||
| 4203 | } | ||
| 4204 | if (*q > v) { | ||
| 4205 | l = i; | ||
| 4206 | } else { | ||
| 4207 | p = q + 2; | ||
| 4208 | l -= i + 1; | ||
| 4209 | } | ||
| 4210 | } | ||
| 4211 | if (++n[1] == n[0]) | ||
| 4212 | return 0; | ||
| 4213 | memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); | ||
| 4214 | p[0] = v; | ||
| 4215 | p[1] = 1; | ||
| 4216 | return 1; | ||
| 4217 | } | ||
| 4218 | |||
| 4219 | static void handle_slab(unsigned long *n, struct kmem_cache *c, | ||
| 4220 | struct page *page) | ||
| 4221 | { | ||
| 4222 | void *p; | ||
| 4223 | int i, j; | ||
| 4224 | unsigned long v; | ||
| 4225 | |||
| 4226 | if (n[0] == n[1]) | ||
| 4227 | return; | ||
| 4228 | for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { | ||
| 4229 | bool active = true; | ||
| 4230 | |||
| 4231 | for (j = page->active; j < c->num; j++) { | ||
| 4232 | if (get_free_obj(page, j) == i) { | ||
| 4233 | active = false; | ||
| 4234 | break; | ||
| 4235 | } | ||
| 4236 | } | ||
| 4237 | |||
| 4238 | if (!active) | ||
| 4239 | continue; | ||
| 4240 | |||
| 4241 | /* | ||
| 4242 | * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table | ||
| 4243 | * mapping is established when actual object allocation and | ||
| 4244 | * we could mistakenly access the unmapped object in the cpu | ||
| 4245 | * cache. | ||
| 4246 | */ | ||
| 4247 | if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v))) | ||
| 4248 | continue; | ||
| 4249 | |||
| 4250 | if (!add_caller(n, v)) | ||
| 4251 | return; | ||
| 4252 | } | ||
| 4253 | } | ||
| 4254 | |||
| 4255 | static void show_symbol(struct seq_file *m, unsigned long address) | ||
| 4256 | { | ||
| 4257 | #ifdef CONFIG_KALLSYMS | ||
| 4258 | unsigned long offset, size; | ||
| 4259 | char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; | ||
| 4260 | |||
| 4261 | if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { | ||
| 4262 | seq_printf(m, "%s+%#lx/%#lx", name, offset, size); | ||
| 4263 | if (modname[0]) | ||
| 4264 | seq_printf(m, " [%s]", modname); | ||
| 4265 | return; | ||
| 4266 | } | ||
| 4267 | #endif | ||
| 4268 | seq_printf(m, "%px", (void *)address); | ||
| 4269 | } | ||
| 4270 | |||
| 4271 | static int leaks_show(struct seq_file *m, void *p) | ||
| 4272 | { | ||
| 4273 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, | ||
| 4274 | root_caches_node); | ||
| 4275 | struct page *page; | ||
| 4276 | struct kmem_cache_node *n; | ||
| 4277 | const char *name; | ||
| 4278 | unsigned long *x = m->private; | ||
| 4279 | int node; | ||
| 4280 | int i; | ||
| 4281 | |||
| 4282 | if (!(cachep->flags & SLAB_STORE_USER)) | ||
| 4283 | return 0; | ||
| 4284 | if (!(cachep->flags & SLAB_RED_ZONE)) | ||
| 4285 | return 0; | ||
| 4286 | |||
| 4287 | /* | ||
| 4288 | * Set store_user_clean and start to grab stored user information | ||
| 4289 | * for all objects on this cache. If some alloc/free requests comes | ||
| 4290 | * during the processing, information would be wrong so restart | ||
| 4291 | * whole processing. | ||
| 4292 | */ | ||
| 4293 | do { | ||
| 4294 | drain_cpu_caches(cachep); | ||
| 4295 | /* | ||
| 4296 | * drain_cpu_caches() could make kmemleak_object and | ||
| 4297 | * debug_objects_cache dirty, so reset afterwards. | ||
| 4298 | */ | ||
| 4299 | set_store_user_clean(cachep); | ||
| 4300 | |||
| 4301 | x[1] = 0; | ||
| 4302 | |||
| 4303 | for_each_kmem_cache_node(cachep, node, n) { | ||
| 4304 | |||
| 4305 | check_irq_on(); | ||
| 4306 | spin_lock_irq(&n->list_lock); | ||
| 4307 | |||
| 4308 | list_for_each_entry(page, &n->slabs_full, slab_list) | ||
| 4309 | handle_slab(x, cachep, page); | ||
| 4310 | list_for_each_entry(page, &n->slabs_partial, slab_list) | ||
| 4311 | handle_slab(x, cachep, page); | ||
| 4312 | spin_unlock_irq(&n->list_lock); | ||
| 4313 | } | ||
| 4314 | } while (!is_store_user_clean(cachep)); | ||
| 4315 | |||
| 4316 | name = cachep->name; | ||
| 4317 | if (x[0] == x[1]) { | ||
| 4318 | /* Increase the buffer size */ | ||
| 4319 | mutex_unlock(&slab_mutex); | ||
| 4320 | m->private = kcalloc(x[0] * 4, sizeof(unsigned long), | ||
| 4321 | GFP_KERNEL); | ||
| 4322 | if (!m->private) { | ||
| 4323 | /* Too bad, we are really out */ | ||
| 4324 | m->private = x; | ||
| 4325 | mutex_lock(&slab_mutex); | ||
| 4326 | return -ENOMEM; | ||
| 4327 | } | ||
| 4328 | *(unsigned long *)m->private = x[0] * 2; | ||
| 4329 | kfree(x); | ||
| 4330 | mutex_lock(&slab_mutex); | ||
| 4331 | /* Now make sure this entry will be retried */ | ||
| 4332 | m->count = m->size; | ||
| 4333 | return 0; | ||
| 4334 | } | ||
| 4335 | for (i = 0; i < x[1]; i++) { | ||
| 4336 | seq_printf(m, "%s: %lu ", name, x[2*i+3]); | ||
| 4337 | show_symbol(m, x[2*i+2]); | ||
| 4338 | seq_putc(m, '\n'); | ||
| 4339 | } | ||
| 4340 | |||
| 4341 | return 0; | ||
| 4342 | } | ||
| 4343 | |||
| 4344 | static const struct seq_operations slabstats_op = { | ||
| 4345 | .start = slab_start, | ||
| 4346 | .next = slab_next, | ||
| 4347 | .stop = slab_stop, | ||
| 4348 | .show = leaks_show, | ||
| 4349 | }; | ||
| 4350 | |||
| 4351 | static int slabstats_open(struct inode *inode, struct file *file) | ||
| 4352 | { | ||
| 4353 | unsigned long *n; | ||
| 4354 | |||
| 4355 | n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); | ||
| 4356 | if (!n) | ||
| 4357 | return -ENOMEM; | ||
| 4358 | |||
| 4359 | *n = PAGE_SIZE / (2 * sizeof(unsigned long)); | ||
| 4360 | |||
| 4361 | return 0; | ||
| 4362 | } | ||
| 4363 | |||
| 4364 | static const struct file_operations proc_slabstats_operations = { | ||
| 4365 | .open = slabstats_open, | ||
| 4366 | .read = seq_read, | ||
| 4367 | .llseek = seq_lseek, | ||
| 4368 | .release = seq_release_private, | ||
| 4369 | }; | ||
| 4370 | #endif | ||
| 4371 | |||
| 4372 | static int __init slab_proc_init(void) | ||
| 4373 | { | ||
| 4374 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 4375 | proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); | ||
| 4376 | #endif | ||
| 4377 | return 0; | ||
| 4378 | } | ||
| 4379 | module_init(slab_proc_init); | ||
| 4380 | |||
| 4381 | #ifdef CONFIG_HARDENED_USERCOPY | 4157 | #ifdef CONFIG_HARDENED_USERCOPY |
| 4382 | /* | 4158 | /* |
| 4383 | * Rejects incorrectly sized objects and objects that are to be copied | 4159 | * Rejects incorrectly sized objects and objects that are to be copied |
