diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2006-03-25 06:06:39 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-25 11:22:49 -0500 |
| commit | 871751e25d956ad24f129ca972b7851feaa61d53 (patch) | |
| tree | c3213a17481f601339ce0c81a22eebca0946c2c7 /mm | |
| parent | f52ac8fec8a13e207f675b0c16e0d5f800c1c204 (diff) | |
[PATCH] slab: implement /proc/slab_allocators
Implement /proc/slab_allocators. It produces output like:
idr_layer_cache: 80 idr_pre_get+0x33/0x4e
buffer_head: 2555 alloc_buffer_head+0x20/0x75
mm_struct: 9 mm_alloc+0x1e/0x42
mm_struct: 20 dup_mm+0x36/0x370
vm_area_struct: 384 dup_mm+0x18f/0x370
vm_area_struct: 151 do_mmap_pgoff+0x2e0/0x7c3
vm_area_struct: 1 split_vma+0x5a/0x10e
vm_area_struct: 11 do_brk+0x206/0x2e2
vm_area_struct: 2 copy_vma+0xda/0x142
vm_area_struct: 9 setup_arg_pages+0x99/0x214
fs_cache: 8 copy_fs_struct+0x21/0x133
fs_cache: 29 copy_process+0xf38/0x10e3
files_cache: 30 alloc_files+0x1b/0xcf
signal_cache: 81 copy_process+0xbaa/0x10e3
sighand_cache: 77 copy_process+0xe65/0x10e3
sighand_cache: 1 de_thread+0x4d/0x5f8
anon_vma: 241 anon_vma_prepare+0xd9/0xf3
size-2048: 1 add_sect_attrs+0x5f/0x145
size-2048: 2 journal_init_revoke+0x99/0x302
size-2048: 2 journal_init_revoke+0x137/0x302
size-2048: 2 journal_init_inode+0xf9/0x1c4
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Alexander Nyberg <alexn@telia.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
DESC
slab-leaks3-locking-fix
EDESC
From: Andrew Morton <akpm@osdl.org>
Update for slab-remove-cachep-spinlock.patch
Cc: Al Viro <viro@ftp.linux.org.uk>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Alexander Nyberg <alexn@telia.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/slab.c | 180 | ||||
| -rw-r--r-- | mm/util.c | 4 |
2 files changed, 176 insertions, 8 deletions
| @@ -204,7 +204,8 @@ | |||
| 204 | typedef unsigned int kmem_bufctl_t; | 204 | typedef unsigned int kmem_bufctl_t; |
| 205 | #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) | 205 | #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) |
| 206 | #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) | 206 | #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) |
| 207 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) | 207 | #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) |
| 208 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) | ||
| 208 | 209 | ||
| 209 | /* Max number of objs-per-slab for caches which use off-slab slabs. | 210 | /* Max number of objs-per-slab for caches which use off-slab slabs. |
| 210 | * Needed to avoid a possible looping condition in cache_grow(). | 211 | * Needed to avoid a possible looping condition in cache_grow(). |
| @@ -2399,7 +2400,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
| 2399 | /* Verify that the slab belongs to the intended node */ | 2400 | /* Verify that the slab belongs to the intended node */ |
| 2400 | WARN_ON(slabp->nodeid != nodeid); | 2401 | WARN_ON(slabp->nodeid != nodeid); |
| 2401 | 2402 | ||
| 2402 | if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { | 2403 | if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { |
| 2403 | printk(KERN_ERR "slab: double free detected in cache " | 2404 | printk(KERN_ERR "slab: double free detected in cache " |
| 2404 | "'%s', objp %p\n", cachep->name, objp); | 2405 | "'%s', objp %p\n", cachep->name, objp); |
| 2405 | BUG(); | 2406 | BUG(); |
| @@ -2605,6 +2606,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 2605 | */ | 2606 | */ |
| 2606 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); | 2607 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); |
| 2607 | } | 2608 | } |
| 2609 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 2610 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; | ||
| 2611 | #endif | ||
| 2608 | if (cachep->flags & SLAB_POISON) { | 2612 | if (cachep->flags & SLAB_POISON) { |
| 2609 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2613 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 2610 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 2614 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
| @@ -2788,6 +2792,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 2788 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | 2792 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; |
| 2789 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | 2793 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; |
| 2790 | } | 2794 | } |
| 2795 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 2796 | { | ||
| 2797 | struct slab *slabp; | ||
| 2798 | unsigned objnr; | ||
| 2799 | |||
| 2800 | slabp = page_get_slab(virt_to_page(objp)); | ||
| 2801 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | ||
| 2802 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; | ||
| 2803 | } | ||
| 2804 | #endif | ||
| 2791 | objp += obj_offset(cachep); | 2805 | objp += obj_offset(cachep); |
| 2792 | if (cachep->ctor && cachep->flags & SLAB_POISON) { | 2806 | if (cachep->ctor && cachep->flags & SLAB_POISON) { |
| 2793 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 2807 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; |
| @@ -3220,22 +3234,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3220 | return __cache_alloc(cachep, flags, caller); | 3234 | return __cache_alloc(cachep, flags, caller); |
| 3221 | } | 3235 | } |
| 3222 | 3236 | ||
| 3223 | #ifndef CONFIG_DEBUG_SLAB | ||
| 3224 | 3237 | ||
| 3225 | void *__kmalloc(size_t size, gfp_t flags) | 3238 | void *__kmalloc(size_t size, gfp_t flags) |
| 3226 | { | 3239 | { |
| 3240 | #ifndef CONFIG_DEBUG_SLAB | ||
| 3227 | return __do_kmalloc(size, flags, NULL); | 3241 | return __do_kmalloc(size, flags, NULL); |
| 3242 | #else | ||
| 3243 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | ||
| 3244 | #endif | ||
| 3228 | } | 3245 | } |
| 3229 | EXPORT_SYMBOL(__kmalloc); | 3246 | EXPORT_SYMBOL(__kmalloc); |
| 3230 | 3247 | ||
| 3231 | #else | 3248 | #ifdef CONFIG_DEBUG_SLAB |
| 3232 | |||
| 3233 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3249 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) |
| 3234 | { | 3250 | { |
| 3235 | return __do_kmalloc(size, flags, caller); | 3251 | return __do_kmalloc(size, flags, caller); |
| 3236 | } | 3252 | } |
| 3237 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3253 | EXPORT_SYMBOL(__kmalloc_track_caller); |
| 3238 | |||
| 3239 | #endif | 3254 | #endif |
| 3240 | 3255 | ||
| 3241 | #ifdef CONFIG_SMP | 3256 | #ifdef CONFIG_SMP |
| @@ -3899,6 +3914,159 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
| 3899 | res = count; | 3914 | res = count; |
| 3900 | return res; | 3915 | return res; |
| 3901 | } | 3916 | } |
| 3917 | |||
| 3918 | #ifdef CONFIG_DEBUG_SLAB_LEAK | ||
| 3919 | |||
| 3920 | static void *leaks_start(struct seq_file *m, loff_t *pos) | ||
| 3921 | { | ||
| 3922 | loff_t n = *pos; | ||
| 3923 | struct list_head *p; | ||
| 3924 | |||
| 3925 | mutex_lock(&cache_chain_mutex); | ||
| 3926 | p = cache_chain.next; | ||
| 3927 | while (n--) { | ||
| 3928 | p = p->next; | ||
| 3929 | if (p == &cache_chain) | ||
| 3930 | return NULL; | ||
| 3931 | } | ||
| 3932 | return list_entry(p, struct kmem_cache, next); | ||
| 3933 | } | ||
| 3934 | |||
| 3935 | static inline int add_caller(unsigned long *n, unsigned long v) | ||
| 3936 | { | ||
| 3937 | unsigned long *p; | ||
| 3938 | int l; | ||
| 3939 | if (!v) | ||
| 3940 | return 1; | ||
| 3941 | l = n[1]; | ||
| 3942 | p = n + 2; | ||
| 3943 | while (l) { | ||
| 3944 | int i = l/2; | ||
| 3945 | unsigned long *q = p + 2 * i; | ||
| 3946 | if (*q == v) { | ||
| 3947 | q[1]++; | ||
| 3948 | return 1; | ||
| 3949 | } | ||
| 3950 | if (*q > v) { | ||
| 3951 | l = i; | ||
| 3952 | } else { | ||
| 3953 | p = q + 2; | ||
| 3954 | l -= i + 1; | ||
| 3955 | } | ||
| 3956 | } | ||
| 3957 | if (++n[1] == n[0]) | ||
| 3958 | return 0; | ||
| 3959 | memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); | ||
| 3960 | p[0] = v; | ||
| 3961 | p[1] = 1; | ||
| 3962 | return 1; | ||
| 3963 | } | ||
| 3964 | |||
| 3965 | static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | ||
| 3966 | { | ||
| 3967 | void *p; | ||
| 3968 | int i; | ||
| 3969 | if (n[0] == n[1]) | ||
| 3970 | return; | ||
| 3971 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { | ||
| 3972 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) | ||
| 3973 | continue; | ||
| 3974 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) | ||
| 3975 | return; | ||
| 3976 | } | ||
| 3977 | } | ||
| 3978 | |||
| 3979 | static void show_symbol(struct seq_file *m, unsigned long address) | ||
| 3980 | { | ||
| 3981 | #ifdef CONFIG_KALLSYMS | ||
| 3982 | char *modname; | ||
| 3983 | const char *name; | ||
| 3984 | unsigned long offset, size; | ||
| 3985 | char namebuf[KSYM_NAME_LEN+1]; | ||
| 3986 | |||
| 3987 | name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); | ||
| 3988 | |||
| 3989 | if (name) { | ||
| 3990 | seq_printf(m, "%s+%#lx/%#lx", name, offset, size); | ||
| 3991 | if (modname) | ||
| 3992 | seq_printf(m, " [%s]", modname); | ||
| 3993 | return; | ||
| 3994 | } | ||
| 3995 | #endif | ||
| 3996 | seq_printf(m, "%p", (void *)address); | ||
| 3997 | } | ||
| 3998 | |||
| 3999 | static int leaks_show(struct seq_file *m, void *p) | ||
| 4000 | { | ||
| 4001 | struct kmem_cache *cachep = p; | ||
| 4002 | struct list_head *q; | ||
| 4003 | struct slab *slabp; | ||
| 4004 | struct kmem_list3 *l3; | ||
| 4005 | const char *name; | ||
| 4006 | unsigned long *n = m->private; | ||
| 4007 | int node; | ||
| 4008 | int i; | ||
| 4009 | |||
| 4010 | if (!(cachep->flags & SLAB_STORE_USER)) | ||
| 4011 | return 0; | ||
| 4012 | if (!(cachep->flags & SLAB_RED_ZONE)) | ||
| 4013 | return 0; | ||
| 4014 | |||
| 4015 | /* OK, we can do it */ | ||
| 4016 | |||
| 4017 | n[1] = 0; | ||
| 4018 | |||
| 4019 | for_each_online_node(node) { | ||
| 4020 | l3 = cachep->nodelists[node]; | ||
| 4021 | if (!l3) | ||
| 4022 | continue; | ||
| 4023 | |||
| 4024 | check_irq_on(); | ||
| 4025 | spin_lock_irq(&l3->list_lock); | ||
| 4026 | |||
| 4027 | list_for_each(q, &l3->slabs_full) { | ||
| 4028 | slabp = list_entry(q, struct slab, list); | ||
| 4029 | handle_slab(n, cachep, slabp); | ||
| 4030 | } | ||
| 4031 | list_for_each(q, &l3->slabs_partial) { | ||
| 4032 | slabp = list_entry(q, struct slab, list); | ||
| 4033 | handle_slab(n, cachep, slabp); | ||
| 4034 | } | ||
| 4035 | spin_unlock_irq(&l3->list_lock); | ||
| 4036 | } | ||
| 4037 | name = cachep->name; | ||
| 4038 | if (n[0] == n[1]) { | ||
| 4039 | /* Increase the buffer size */ | ||
| 4040 | mutex_unlock(&cache_chain_mutex); | ||
| 4041 | m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); | ||
| 4042 | if (!m->private) { | ||
| 4043 | /* Too bad, we are really out */ | ||
| 4044 | m->private = n; | ||
| 4045 | mutex_lock(&cache_chain_mutex); | ||
| 4046 | return -ENOMEM; | ||
| 4047 | } | ||
| 4048 | *(unsigned long *)m->private = n[0] * 2; | ||
| 4049 | kfree(n); | ||
| 4050 | mutex_lock(&cache_chain_mutex); | ||
| 4051 | /* Now make sure this entry will be retried */ | ||
| 4052 | m->count = m->size; | ||
| 4053 | return 0; | ||
| 4054 | } | ||
| 4055 | for (i = 0; i < n[1]; i++) { | ||
| 4056 | seq_printf(m, "%s: %lu ", name, n[2*i+3]); | ||
| 4057 | show_symbol(m, n[2*i+2]); | ||
| 4058 | seq_putc(m, '\n'); | ||
| 4059 | } | ||
| 4060 | return 0; | ||
| 4061 | } | ||
| 4062 | |||
| 4063 | struct seq_operations slabstats_op = { | ||
| 4064 | .start = leaks_start, | ||
| 4065 | .next = s_next, | ||
| 4066 | .stop = s_stop, | ||
| 4067 | .show = leaks_show, | ||
| 4068 | }; | ||
| 4069 | #endif | ||
| 3902 | #endif | 4070 | #endif |
| 3903 | 4071 | ||
| 3904 | /** | 4072 | /** |
| @@ -11,7 +11,7 @@ | |||
| 11 | */ | 11 | */ |
| 12 | void *kzalloc(size_t size, gfp_t flags) | 12 | void *kzalloc(size_t size, gfp_t flags) |
| 13 | { | 13 | { |
| 14 | void *ret = kmalloc(size, flags); | 14 | void *ret = ____kmalloc(size, flags); |
| 15 | if (ret) | 15 | if (ret) |
| 16 | memset(ret, 0, size); | 16 | memset(ret, 0, size); |
| 17 | return ret; | 17 | return ret; |
| @@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp) | |||
| 33 | return NULL; | 33 | return NULL; |
| 34 | 34 | ||
| 35 | len = strlen(s) + 1; | 35 | len = strlen(s) + 1; |
| 36 | buf = kmalloc(len, gfp); | 36 | buf = ____kmalloc(len, gfp); |
| 37 | if (buf) | 37 | if (buf) |
| 38 | memcpy(buf, s, len); | 38 | memcpy(buf, s, len); |
| 39 | return buf; | 39 | return buf; |
