diff options
| -rw-r--r-- | include/linux/slub_def.h | 2 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 2 | ||||
| -rw-r--r-- | mm/slub.c | 40 |
3 files changed, 37 insertions, 7 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index b33c0f2e61dc..e4f5ed180b9b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -87,7 +87,7 @@ struct kmem_cache { | |||
| 87 | unsigned long min_partial; | 87 | unsigned long min_partial; |
| 88 | const char *name; /* Name (only for display!) */ | 88 | const char *name; /* Name (only for display!) */ |
| 89 | struct list_head list; /* List of slab caches */ | 89 | struct list_head list; /* List of slab caches */ |
| 90 | #ifdef CONFIG_SLUB_DEBUG | 90 | #ifdef CONFIG_SYSFS |
| 91 | struct kobject kobj; /* For sysfs */ | 91 | struct kobject kobj; /* For sysfs */ |
| 92 | #endif | 92 | #endif |
| 93 | 93 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b4afd2e6ca0..b6263651a955 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -353,7 +353,7 @@ config SLUB_DEBUG_ON | |||
| 353 | config SLUB_STATS | 353 | config SLUB_STATS |
| 354 | default n | 354 | default n |
| 355 | bool "Enable SLUB performance statistics" | 355 | bool "Enable SLUB performance statistics" |
| 356 | depends on SLUB && SLUB_DEBUG && SYSFS | 356 | depends on SLUB && SYSFS |
| 357 | help | 357 | help |
| 358 | SLUB statistics are useful to debug SLUBs allocation behavior in | 358 | SLUB statistics are useful to debug SLUBs allocation behavior in |
| 359 | order find ways to optimize the allocator. This should never be | 359 | order find ways to optimize the allocator. This should never be |
| @@ -198,7 +198,7 @@ struct track { | |||
| 198 | 198 | ||
| 199 | enum track_item { TRACK_ALLOC, TRACK_FREE }; | 199 | enum track_item { TRACK_ALLOC, TRACK_FREE }; |
| 200 | 200 | ||
| 201 | #ifdef CONFIG_SLUB_DEBUG | 201 | #ifdef CONFIG_SYSFS |
| 202 | static int sysfs_slab_add(struct kmem_cache *); | 202 | static int sysfs_slab_add(struct kmem_cache *); |
| 203 | static int sysfs_slab_alias(struct kmem_cache *, const char *); | 203 | static int sysfs_slab_alias(struct kmem_cache *, const char *); |
| 204 | static void sysfs_slab_remove(struct kmem_cache *); | 204 | static void sysfs_slab_remove(struct kmem_cache *); |
| @@ -1102,7 +1102,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {} | |||
| 1102 | static inline void slab_free_hook_irq(struct kmem_cache *s, | 1102 | static inline void slab_free_hook_irq(struct kmem_cache *s, |
| 1103 | void *object) {} | 1103 | void *object) {} |
| 1104 | 1104 | ||
| 1105 | #endif | 1105 | #endif /* CONFIG_SLUB_DEBUG */ |
| 1106 | 1106 | ||
| 1107 | /* | 1107 | /* |
| 1108 | * Slab allocation and freeing | 1108 | * Slab allocation and freeing |
| @@ -3373,7 +3373,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3373 | } | 3373 | } |
| 3374 | #endif | 3374 | #endif |
| 3375 | 3375 | ||
| 3376 | #ifdef CONFIG_SLUB_DEBUG | 3376 | #ifdef CONFIG_SYSFS |
| 3377 | static int count_inuse(struct page *page) | 3377 | static int count_inuse(struct page *page) |
| 3378 | { | 3378 | { |
| 3379 | return page->inuse; | 3379 | return page->inuse; |
| @@ -3383,7 +3383,9 @@ static int count_total(struct page *page) | |||
| 3383 | { | 3383 | { |
| 3384 | return page->objects; | 3384 | return page->objects; |
| 3385 | } | 3385 | } |
| 3386 | #endif | ||
| 3386 | 3387 | ||
| 3388 | #ifdef CONFIG_SLUB_DEBUG | ||
| 3387 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3389 | static int validate_slab(struct kmem_cache *s, struct page *page, |
| 3388 | unsigned long *map) | 3390 | unsigned long *map) |
| 3389 | { | 3391 | { |
| @@ -3474,6 +3476,7 @@ static long validate_slab_cache(struct kmem_cache *s) | |||
| 3474 | kfree(map); | 3476 | kfree(map); |
| 3475 | return count; | 3477 | return count; |
| 3476 | } | 3478 | } |
| 3479 | #endif | ||
| 3477 | 3480 | ||
| 3478 | #ifdef SLUB_RESILIENCY_TEST | 3481 | #ifdef SLUB_RESILIENCY_TEST |
| 3479 | static void resiliency_test(void) | 3482 | static void resiliency_test(void) |
| @@ -3532,9 +3535,12 @@ static void resiliency_test(void) | |||
| 3532 | validate_slab_cache(kmalloc_caches[9]); | 3535 | validate_slab_cache(kmalloc_caches[9]); |
| 3533 | } | 3536 | } |
| 3534 | #else | 3537 | #else |
| 3538 | #ifdef CONFIG_SYSFS | ||
| 3535 | static void resiliency_test(void) {}; | 3539 | static void resiliency_test(void) {}; |
| 3536 | #endif | 3540 | #endif |
| 3541 | #endif | ||
| 3537 | 3542 | ||
| 3543 | #ifdef CONFIG_DEBUG | ||
| 3538 | /* | 3544 | /* |
| 3539 | * Generate lists of code addresses where slabcache objects are allocated | 3545 | * Generate lists of code addresses where slabcache objects are allocated |
| 3540 | * and freed. | 3546 | * and freed. |
| @@ -3763,7 +3769,9 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
| 3763 | len += sprintf(buf, "No data\n"); | 3769 | len += sprintf(buf, "No data\n"); |
| 3764 | return len; | 3770 | return len; |
| 3765 | } | 3771 | } |
| 3772 | #endif | ||
| 3766 | 3773 | ||
| 3774 | #ifdef CONFIG_SYSFS | ||
| 3767 | enum slab_stat_type { | 3775 | enum slab_stat_type { |
| 3768 | SL_ALL, /* All slabs */ | 3776 | SL_ALL, /* All slabs */ |
| 3769 | SL_PARTIAL, /* Only partially allocated slabs */ | 3777 | SL_PARTIAL, /* Only partially allocated slabs */ |
| @@ -3816,6 +3824,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
| 3816 | } | 3824 | } |
| 3817 | } | 3825 | } |
| 3818 | 3826 | ||
| 3827 | down_read(&slub_lock); | ||
| 3828 | #ifdef CONFIG_SLUB_DEBUG | ||
| 3819 | if (flags & SO_ALL) { | 3829 | if (flags & SO_ALL) { |
| 3820 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3830 | for_each_node_state(node, N_NORMAL_MEMORY) { |
| 3821 | struct kmem_cache_node *n = get_node(s, node); | 3831 | struct kmem_cache_node *n = get_node(s, node); |
| @@ -3832,7 +3842,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
| 3832 | nodes[node] += x; | 3842 | nodes[node] += x; |
| 3833 | } | 3843 | } |
| 3834 | 3844 | ||
| 3835 | } else if (flags & SO_PARTIAL) { | 3845 | } else |
| 3846 | #endif | ||
| 3847 | if (flags & SO_PARTIAL) { | ||
| 3836 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3848 | for_each_node_state(node, N_NORMAL_MEMORY) { |
| 3837 | struct kmem_cache_node *n = get_node(s, node); | 3849 | struct kmem_cache_node *n = get_node(s, node); |
| 3838 | 3850 | ||
| @@ -3857,6 +3869,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
| 3857 | return x + sprintf(buf + x, "\n"); | 3869 | return x + sprintf(buf + x, "\n"); |
| 3858 | } | 3870 | } |
| 3859 | 3871 | ||
| 3872 | #ifdef CONFIG_SLUB_DEBUG | ||
| 3860 | static int any_slab_objects(struct kmem_cache *s) | 3873 | static int any_slab_objects(struct kmem_cache *s) |
| 3861 | { | 3874 | { |
| 3862 | int node; | 3875 | int node; |
| @@ -3872,6 +3885,7 @@ static int any_slab_objects(struct kmem_cache *s) | |||
| 3872 | } | 3885 | } |
| 3873 | return 0; | 3886 | return 0; |
| 3874 | } | 3887 | } |
| 3888 | #endif | ||
| 3875 | 3889 | ||
| 3876 | #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) | 3890 | #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) |
| 3877 | #define to_slab(n) container_of(n, struct kmem_cache, kobj); | 3891 | #define to_slab(n) container_of(n, struct kmem_cache, kobj); |
| @@ -3973,11 +3987,13 @@ static ssize_t aliases_show(struct kmem_cache *s, char *buf) | |||
| 3973 | } | 3987 | } |
| 3974 | SLAB_ATTR_RO(aliases); | 3988 | SLAB_ATTR_RO(aliases); |
| 3975 | 3989 | ||
| 3990 | #ifdef CONFIG_SLUB_DEBUG | ||
| 3976 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) | 3991 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) |
| 3977 | { | 3992 | { |
| 3978 | return show_slab_objects(s, buf, SO_ALL); | 3993 | return show_slab_objects(s, buf, SO_ALL); |
| 3979 | } | 3994 | } |
| 3980 | SLAB_ATTR_RO(slabs); | 3995 | SLAB_ATTR_RO(slabs); |
| 3996 | #endif | ||
| 3981 | 3997 | ||
| 3982 | static ssize_t partial_show(struct kmem_cache *s, char *buf) | 3998 | static ssize_t partial_show(struct kmem_cache *s, char *buf) |
| 3983 | { | 3999 | { |
| @@ -4003,6 +4019,7 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) | |||
| 4003 | } | 4019 | } |
| 4004 | SLAB_ATTR_RO(objects_partial); | 4020 | SLAB_ATTR_RO(objects_partial); |
| 4005 | 4021 | ||
| 4022 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4006 | static ssize_t total_objects_show(struct kmem_cache *s, char *buf) | 4023 | static ssize_t total_objects_show(struct kmem_cache *s, char *buf) |
| 4007 | { | 4024 | { |
| 4008 | return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); | 4025 | return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); |
| @@ -4055,6 +4072,7 @@ static ssize_t failslab_store(struct kmem_cache *s, const char *buf, | |||
| 4055 | } | 4072 | } |
| 4056 | SLAB_ATTR(failslab); | 4073 | SLAB_ATTR(failslab); |
| 4057 | #endif | 4074 | #endif |
| 4075 | #endif | ||
| 4058 | 4076 | ||
| 4059 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) | 4077 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) |
| 4060 | { | 4078 | { |
| @@ -4091,6 +4109,7 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) | |||
| 4091 | } | 4109 | } |
| 4092 | SLAB_ATTR_RO(destroy_by_rcu); | 4110 | SLAB_ATTR_RO(destroy_by_rcu); |
| 4093 | 4111 | ||
| 4112 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4094 | static ssize_t red_zone_show(struct kmem_cache *s, char *buf) | 4113 | static ssize_t red_zone_show(struct kmem_cache *s, char *buf) |
| 4095 | { | 4114 | { |
| 4096 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); | 4115 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); |
| @@ -4166,6 +4185,7 @@ static ssize_t validate_store(struct kmem_cache *s, | |||
| 4166 | return ret; | 4185 | return ret; |
| 4167 | } | 4186 | } |
| 4168 | SLAB_ATTR(validate); | 4187 | SLAB_ATTR(validate); |
| 4188 | #endif | ||
| 4169 | 4189 | ||
| 4170 | static ssize_t shrink_show(struct kmem_cache *s, char *buf) | 4190 | static ssize_t shrink_show(struct kmem_cache *s, char *buf) |
| 4171 | { | 4191 | { |
| @@ -4186,6 +4206,7 @@ static ssize_t shrink_store(struct kmem_cache *s, | |||
| 4186 | } | 4206 | } |
| 4187 | SLAB_ATTR(shrink); | 4207 | SLAB_ATTR(shrink); |
| 4188 | 4208 | ||
| 4209 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4189 | static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) | 4210 | static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) |
| 4190 | { | 4211 | { |
| 4191 | if (!(s->flags & SLAB_STORE_USER)) | 4212 | if (!(s->flags & SLAB_STORE_USER)) |
| @@ -4201,6 +4222,7 @@ static ssize_t free_calls_show(struct kmem_cache *s, char *buf) | |||
| 4201 | return list_locations(s, buf, TRACK_FREE); | 4222 | return list_locations(s, buf, TRACK_FREE); |
| 4202 | } | 4223 | } |
| 4203 | SLAB_ATTR_RO(free_calls); | 4224 | SLAB_ATTR_RO(free_calls); |
| 4225 | #endif | ||
| 4204 | 4226 | ||
| 4205 | #ifdef CONFIG_NUMA | 4227 | #ifdef CONFIG_NUMA |
| 4206 | static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) | 4228 | static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) |
| @@ -4307,25 +4329,33 @@ static struct attribute *slab_attrs[] = { | |||
| 4307 | &min_partial_attr.attr, | 4329 | &min_partial_attr.attr, |
| 4308 | &objects_attr.attr, | 4330 | &objects_attr.attr, |
| 4309 | &objects_partial_attr.attr, | 4331 | &objects_partial_attr.attr, |
| 4332 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4310 | &total_objects_attr.attr, | 4333 | &total_objects_attr.attr, |
| 4311 | &slabs_attr.attr, | 4334 | &slabs_attr.attr, |
| 4335 | #endif | ||
| 4312 | &partial_attr.attr, | 4336 | &partial_attr.attr, |
| 4313 | &cpu_slabs_attr.attr, | 4337 | &cpu_slabs_attr.attr, |
| 4314 | &ctor_attr.attr, | 4338 | &ctor_attr.attr, |
| 4315 | &aliases_attr.attr, | 4339 | &aliases_attr.attr, |
| 4316 | &align_attr.attr, | 4340 | &align_attr.attr, |
| 4341 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4317 | &sanity_checks_attr.attr, | 4342 | &sanity_checks_attr.attr, |
| 4318 | &trace_attr.attr, | 4343 | &trace_attr.attr, |
| 4344 | #endif | ||
| 4319 | &hwcache_align_attr.attr, | 4345 | &hwcache_align_attr.attr, |
| 4320 | &reclaim_account_attr.attr, | 4346 | &reclaim_account_attr.attr, |
| 4321 | &destroy_by_rcu_attr.attr, | 4347 | &destroy_by_rcu_attr.attr, |
| 4348 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4322 | &red_zone_attr.attr, | 4349 | &red_zone_attr.attr, |
| 4323 | &poison_attr.attr, | 4350 | &poison_attr.attr, |
| 4324 | &store_user_attr.attr, | 4351 | &store_user_attr.attr, |
| 4325 | &validate_attr.attr, | 4352 | &validate_attr.attr, |
| 4353 | #endif | ||
| 4326 | &shrink_attr.attr, | 4354 | &shrink_attr.attr, |
| 4355 | #ifdef CONFIG_SLUB_DEBUG | ||
| 4327 | &alloc_calls_attr.attr, | 4356 | &alloc_calls_attr.attr, |
| 4328 | &free_calls_attr.attr, | 4357 | &free_calls_attr.attr, |
| 4358 | #endif | ||
| 4329 | #ifdef CONFIG_ZONE_DMA | 4359 | #ifdef CONFIG_ZONE_DMA |
| 4330 | &cache_dma_attr.attr, | 4360 | &cache_dma_attr.attr, |
| 4331 | #endif | 4361 | #endif |
| @@ -4608,7 +4638,7 @@ static int __init slab_sysfs_init(void) | |||
| 4608 | } | 4638 | } |
| 4609 | 4639 | ||
| 4610 | __initcall(slab_sysfs_init); | 4640 | __initcall(slab_sysfs_init); |
| 4611 | #endif | 4641 | #endif /* CONFIG_SYSFS */ |
| 4612 | 4642 | ||
| 4613 | /* | 4643 | /* |
| 4614 | * The /proc/slabinfo ABI | 4644 | * The /proc/slabinfo ABI |
