diff options
-rw-r--r-- | fs/proc/proc_misc.c | 37 | ||||
-rw-r--r-- | include/linux/slab.h | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 4 | ||||
-rw-r--r-- | mm/slab.c | 180 | ||||
-rw-r--r-- | mm/util.c | 4 | ||||
-rw-r--r-- | net/core/skbuff.c | 2 |
6 files changed, 222 insertions, 11 deletions
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 826c131994c3..1e9ea37d457e 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -485,6 +485,40 @@ static struct file_operations proc_slabinfo_operations = { | |||
485 | .llseek = seq_lseek, | 485 | .llseek = seq_lseek, |
486 | .release = seq_release, | 486 | .release = seq_release, |
487 | }; | 487 | }; |
488 | |||
489 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
490 | extern struct seq_operations slabstats_op; | ||
491 | static int slabstats_open(struct inode *inode, struct file *file) | ||
492 | { | ||
493 | unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
494 | int ret = -ENOMEM; | ||
495 | if (n) { | ||
496 | ret = seq_open(file, &slabstats_op); | ||
497 | if (!ret) { | ||
498 | struct seq_file *m = file->private_data; | ||
499 | *n = PAGE_SIZE / (2 * sizeof(unsigned long)); | ||
500 | m->private = n; | ||
501 | n = NULL; | ||
502 | } | ||
503 | kfree(n); | ||
504 | } | ||
505 | return ret; | ||
506 | } | ||
507 | |||
508 | static int slabstats_release(struct inode *inode, struct file *file) | ||
509 | { | ||
510 | struct seq_file *m = file->private_data; | ||
511 | kfree(m->private); | ||
512 | return seq_release(inode, file); | ||
513 | } | ||
514 | |||
515 | static struct file_operations proc_slabstats_operations = { | ||
516 | .open = slabstats_open, | ||
517 | .read = seq_read, | ||
518 | .llseek = seq_lseek, | ||
519 | .release = slabstats_release, | ||
520 | }; | ||
521 | #endif | ||
488 | #endif | 522 | #endif |
489 | 523 | ||
490 | static int show_stat(struct seq_file *p, void *v) | 524 | static int show_stat(struct seq_file *p, void *v) |
@@ -744,6 +778,9 @@ void __init proc_misc_init(void) | |||
744 | create_seq_entry("interrupts", 0, &proc_interrupts_operations); | 778 | create_seq_entry("interrupts", 0, &proc_interrupts_operations); |
745 | #ifdef CONFIG_SLAB | 779 | #ifdef CONFIG_SLAB |
746 | create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); | 780 | create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); |
781 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
782 | create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations); | ||
783 | #endif | ||
747 | #endif | 784 | #endif |
748 | create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); | 785 | create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); |
749 | create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); | 786 | create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); |
diff --git a/include/linux/slab.h b/include/linux/slab.h index e2ee5b268797..f88e08a5802c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -77,11 +77,12 @@ struct cache_sizes { | |||
77 | }; | 77 | }; |
78 | extern struct cache_sizes malloc_sizes[]; | 78 | extern struct cache_sizes malloc_sizes[]; |
79 | 79 | ||
80 | #ifndef CONFIG_DEBUG_SLAB | ||
81 | extern void *__kmalloc(size_t, gfp_t); | 80 | extern void *__kmalloc(size_t, gfp_t); |
81 | #ifndef CONFIG_DEBUG_SLAB | ||
82 | #define ____kmalloc(size, flags) __kmalloc(size, flags) | ||
82 | #else | 83 | #else |
83 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | 84 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); |
84 | #define __kmalloc(size, flags) \ | 85 | #define ____kmalloc(size, flags) \ |
85 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | 86 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) |
86 | #endif | 87 | #endif |
87 | 88 | ||
@@ -173,6 +174,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
173 | #define kmem_ptr_validate(a, b) (0) | 174 | #define kmem_ptr_validate(a, b) (0) |
174 | #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) | 175 | #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) |
175 | #define kmalloc_node(s, f, n) kmalloc(s, f) | 176 | #define kmalloc_node(s, f, n) kmalloc(s, f) |
177 | #define ____kmalloc kmalloc | ||
176 | 178 | ||
177 | #endif /* CONFIG_SLOB */ | 179 | #endif /* CONFIG_SLOB */ |
178 | 180 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f2618e1c2b93..1fe3f897145f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -85,6 +85,10 @@ config DEBUG_SLAB | |||
85 | allocation as well as poisoning memory on free to catch use of freed | 85 | allocation as well as poisoning memory on free to catch use of freed |
86 | memory. This can make kmalloc/kfree-intensive workloads much slower. | 86 | memory. This can make kmalloc/kfree-intensive workloads much slower. |
87 | 87 | ||
88 | config DEBUG_SLAB_LEAK | ||
89 | bool "Memory leak debugging" | ||
90 | depends on DEBUG_SLAB | ||
91 | |||
88 | config DEBUG_PREEMPT | 92 | config DEBUG_PREEMPT |
89 | bool "Debug preemptible kernel" | 93 | bool "Debug preemptible kernel" |
90 | depends on DEBUG_KERNEL && PREEMPT | 94 | depends on DEBUG_KERNEL && PREEMPT |
@@ -204,7 +204,8 @@ | |||
204 | typedef unsigned int kmem_bufctl_t; | 204 | typedef unsigned int kmem_bufctl_t; |
205 | #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) | 205 | #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) |
206 | #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) | 206 | #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) |
207 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) | 207 | #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) |
208 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) | ||
208 | 209 | ||
209 | /* Max number of objs-per-slab for caches which use off-slab slabs. | 210 | /* Max number of objs-per-slab for caches which use off-slab slabs. |
210 | * Needed to avoid a possible looping condition in cache_grow(). | 211 | * Needed to avoid a possible looping condition in cache_grow(). |
@@ -2399,7 +2400,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2399 | /* Verify that the slab belongs to the intended node */ | 2400 | /* Verify that the slab belongs to the intended node */ |
2400 | WARN_ON(slabp->nodeid != nodeid); | 2401 | WARN_ON(slabp->nodeid != nodeid); |
2401 | 2402 | ||
2402 | if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { | 2403 | if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { |
2403 | printk(KERN_ERR "slab: double free detected in cache " | 2404 | printk(KERN_ERR "slab: double free detected in cache " |
2404 | "'%s', objp %p\n", cachep->name, objp); | 2405 | "'%s', objp %p\n", cachep->name, objp); |
2405 | BUG(); | 2406 | BUG(); |
@@ -2605,6 +2606,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
2605 | */ | 2606 | */ |
2606 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); | 2607 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); |
2607 | } | 2608 | } |
2609 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
2610 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; | ||
2611 | #endif | ||
2608 | if (cachep->flags & SLAB_POISON) { | 2612 | if (cachep->flags & SLAB_POISON) { |
2609 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2613 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2610 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 2614 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
@@ -2788,6 +2792,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
2788 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | 2792 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; |
2789 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | 2793 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; |
2790 | } | 2794 | } |
2795 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
2796 | { | ||
2797 | struct slab *slabp; | ||
2798 | unsigned objnr; | ||
2799 | |||
2800 | slabp = page_get_slab(virt_to_page(objp)); | ||
2801 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | ||
2802 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; | ||
2803 | } | ||
2804 | #endif | ||
2791 | objp += obj_offset(cachep); | 2805 | objp += obj_offset(cachep); |
2792 | if (cachep->ctor && cachep->flags & SLAB_POISON) { | 2806 | if (cachep->ctor && cachep->flags & SLAB_POISON) { |
2793 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 2807 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; |
@@ -3220,22 +3234,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3220 | return __cache_alloc(cachep, flags, caller); | 3234 | return __cache_alloc(cachep, flags, caller); |
3221 | } | 3235 | } |
3222 | 3236 | ||
3223 | #ifndef CONFIG_DEBUG_SLAB | ||
3224 | 3237 | ||
3225 | void *__kmalloc(size_t size, gfp_t flags) | 3238 | void *__kmalloc(size_t size, gfp_t flags) |
3226 | { | 3239 | { |
3240 | #ifndef CONFIG_DEBUG_SLAB | ||
3227 | return __do_kmalloc(size, flags, NULL); | 3241 | return __do_kmalloc(size, flags, NULL); |
3242 | #else | ||
3243 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | ||
3244 | #endif | ||
3228 | } | 3245 | } |
3229 | EXPORT_SYMBOL(__kmalloc); | 3246 | EXPORT_SYMBOL(__kmalloc); |
3230 | 3247 | ||
3231 | #else | 3248 | #ifdef CONFIG_DEBUG_SLAB |
3232 | |||
3233 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3249 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) |
3234 | { | 3250 | { |
3235 | return __do_kmalloc(size, flags, caller); | 3251 | return __do_kmalloc(size, flags, caller); |
3236 | } | 3252 | } |
3237 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3253 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3238 | |||
3239 | #endif | 3254 | #endif |
3240 | 3255 | ||
3241 | #ifdef CONFIG_SMP | 3256 | #ifdef CONFIG_SMP |
@@ -3899,6 +3914,159 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
3899 | res = count; | 3914 | res = count; |
3900 | return res; | 3915 | return res; |
3901 | } | 3916 | } |
3917 | |||
3918 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
3919 | |||
3920 | static void *leaks_start(struct seq_file *m, loff_t *pos) | ||
3921 | { | ||
3922 | loff_t n = *pos; | ||
3923 | struct list_head *p; | ||
3924 | |||
3925 | mutex_lock(&cache_chain_mutex); | ||
3926 | p = cache_chain.next; | ||
3927 | while (n--) { | ||
3928 | p = p->next; | ||
3929 | if (p == &cache_chain) | ||
3930 | return NULL; | ||
3931 | } | ||
3932 | return list_entry(p, struct kmem_cache, next); | ||
3933 | } | ||
3934 | |||
3935 | static inline int add_caller(unsigned long *n, unsigned long v) | ||
3936 | { | ||
3937 | unsigned long *p; | ||
3938 | int l; | ||
3939 | if (!v) | ||
3940 | return 1; | ||
3941 | l = n[1]; | ||
3942 | p = n + 2; | ||
3943 | while (l) { | ||
3944 | int i = l/2; | ||
3945 | unsigned long *q = p + 2 * i; | ||
3946 | if (*q == v) { | ||
3947 | q[1]++; | ||
3948 | return 1; | ||
3949 | } | ||
3950 | if (*q > v) { | ||
3951 | l = i; | ||
3952 | } else { | ||
3953 | p = q + 2; | ||
3954 | l -= i + 1; | ||
3955 | } | ||
3956 | } | ||
3957 | if (++n[1] == n[0]) | ||
3958 | return 0; | ||
3959 | memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); | ||
3960 | p[0] = v; | ||
3961 | p[1] = 1; | ||
3962 | return 1; | ||
3963 | } | ||
3964 | |||
3965 | static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | ||
3966 | { | ||
3967 | void *p; | ||
3968 | int i; | ||
3969 | if (n[0] == n[1]) | ||
3970 | return; | ||
3971 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { | ||
3972 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) | ||
3973 | continue; | ||
3974 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) | ||
3975 | return; | ||
3976 | } | ||
3977 | } | ||
3978 | |||
3979 | static void show_symbol(struct seq_file *m, unsigned long address) | ||
3980 | { | ||
3981 | #ifdef CONFIG_KALLSYMS | ||
3982 | char *modname; | ||
3983 | const char *name; | ||
3984 | unsigned long offset, size; | ||
3985 | char namebuf[KSYM_NAME_LEN+1]; | ||
3986 | |||
3987 | name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); | ||
3988 | |||
3989 | if (name) { | ||
3990 | seq_printf(m, "%s+%#lx/%#lx", name, offset, size); | ||
3991 | if (modname) | ||
3992 | seq_printf(m, " [%s]", modname); | ||
3993 | return; | ||
3994 | } | ||
3995 | #endif | ||
3996 | seq_printf(m, "%p", (void *)address); | ||
3997 | } | ||
3998 | |||
3999 | static int leaks_show(struct seq_file *m, void *p) | ||
4000 | { | ||
4001 | struct kmem_cache *cachep = p; | ||
4002 | struct list_head *q; | ||
4003 | struct slab *slabp; | ||
4004 | struct kmem_list3 *l3; | ||
4005 | const char *name; | ||
4006 | unsigned long *n = m->private; | ||
4007 | int node; | ||
4008 | int i; | ||
4009 | |||
4010 | if (!(cachep->flags & SLAB_STORE_USER)) | ||
4011 | return 0; | ||
4012 | if (!(cachep->flags & SLAB_RED_ZONE)) | ||
4013 | return 0; | ||
4014 | |||
4015 | /* OK, we can do it */ | ||
4016 | |||
4017 | n[1] = 0; | ||
4018 | |||
4019 | for_each_online_node(node) { | ||
4020 | l3 = cachep->nodelists[node]; | ||
4021 | if (!l3) | ||
4022 | continue; | ||
4023 | |||
4024 | check_irq_on(); | ||
4025 | spin_lock_irq(&l3->list_lock); | ||
4026 | |||
4027 | list_for_each(q, &l3->slabs_full) { | ||
4028 | slabp = list_entry(q, struct slab, list); | ||
4029 | handle_slab(n, cachep, slabp); | ||
4030 | } | ||
4031 | list_for_each(q, &l3->slabs_partial) { | ||
4032 | slabp = list_entry(q, struct slab, list); | ||
4033 | handle_slab(n, cachep, slabp); | ||
4034 | } | ||
4035 | spin_unlock_irq(&l3->list_lock); | ||
4036 | } | ||
4037 | name = cachep->name; | ||
4038 | if (n[0] == n[1]) { | ||
4039 | /* Increase the buffer size */ | ||
4040 | mutex_unlock(&cache_chain_mutex); | ||
4041 | m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); | ||
4042 | if (!m->private) { | ||
4043 | /* Too bad, we are really out */ | ||
4044 | m->private = n; | ||
4045 | mutex_lock(&cache_chain_mutex); | ||
4046 | return -ENOMEM; | ||
4047 | } | ||
4048 | *(unsigned long *)m->private = n[0] * 2; | ||
4049 | kfree(n); | ||
4050 | mutex_lock(&cache_chain_mutex); | ||
4051 | /* Now make sure this entry will be retried */ | ||
4052 | m->count = m->size; | ||
4053 | return 0; | ||
4054 | } | ||
4055 | for (i = 0; i < n[1]; i++) { | ||
4056 | seq_printf(m, "%s: %lu ", name, n[2*i+3]); | ||
4057 | show_symbol(m, n[2*i+2]); | ||
4058 | seq_putc(m, '\n'); | ||
4059 | } | ||
4060 | return 0; | ||
4061 | } | ||
4062 | |||
4063 | struct seq_operations slabstats_op = { | ||
4064 | .start = leaks_start, | ||
4065 | .next = s_next, | ||
4066 | .stop = s_stop, | ||
4067 | .show = leaks_show, | ||
4068 | }; | ||
4069 | #endif | ||
3902 | #endif | 4070 | #endif |
3903 | 4071 | ||
3904 | /** | 4072 | /** |
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | void *kzalloc(size_t size, gfp_t flags) | 12 | void *kzalloc(size_t size, gfp_t flags) |
13 | { | 13 | { |
14 | void *ret = kmalloc(size, flags); | 14 | void *ret = ____kmalloc(size, flags); |
15 | if (ret) | 15 | if (ret) |
16 | memset(ret, 0, size); | 16 | memset(ret, 0, size); |
17 | return ret; | 17 | return ret; |
@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp) | |||
33 | return NULL; | 33 | return NULL; |
34 | 34 | ||
35 | len = strlen(s) + 1; | 35 | len = strlen(s) + 1; |
36 | buf = kmalloc(len, gfp); | 36 | buf = ____kmalloc(len, gfp); |
37 | if (buf) | 37 | if (buf) |
38 | memcpy(buf, s, len); | 38 | memcpy(buf, s, len); |
39 | return buf; | 39 | return buf; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c9f878454531..09464fa8d72f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -149,7 +149,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
149 | 149 | ||
150 | /* Get the DATA. Size must match skb_add_mtu(). */ | 150 | /* Get the DATA. Size must match skb_add_mtu(). */ |
151 | size = SKB_DATA_ALIGN(size); | 151 | size = SKB_DATA_ALIGN(size); |
152 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); | 152 | data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); |
153 | if (!data) | 153 | if (!data) |
154 | goto nodata; | 154 | goto nodata; |
155 | 155 | ||