aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c297
1 files changed, 141 insertions, 156 deletions
diff --git a/mm/slub.c b/mm/slub.c
index be4d66231c6f..fe536d3474d6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3476,71 +3476,6 @@ static long validate_slab_cache(struct kmem_cache *s)
3476 kfree(map); 3476 kfree(map);
3477 return count; 3477 return count;
3478} 3478}
3479#endif
3480
3481#ifdef SLUB_RESILIENCY_TEST
3482static void resiliency_test(void)
3483{
3484 u8 *p;
3485
3486 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
3487
3488 printk(KERN_ERR "SLUB resiliency testing\n");
3489 printk(KERN_ERR "-----------------------\n");
3490 printk(KERN_ERR "A. Corruption after allocation\n");
3491
3492 p = kzalloc(16, GFP_KERNEL);
3493 p[16] = 0x12;
3494 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3495 " 0x12->0x%p\n\n", p + 16);
3496
3497 validate_slab_cache(kmalloc_caches[4]);
3498
3499 /* Hmmm... The next two are dangerous */
3500 p = kzalloc(32, GFP_KERNEL);
3501 p[32 + sizeof(void *)] = 0x34;
3502 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3503 " 0x34 -> -0x%p\n", p);
3504 printk(KERN_ERR
3505 "If allocated object is overwritten then not detectable\n\n");
3506
3507 validate_slab_cache(kmalloc_caches[5]);
3508 p = kzalloc(64, GFP_KERNEL);
3509 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3510 *p = 0x56;
3511 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3512 p);
3513 printk(KERN_ERR
3514 "If allocated object is overwritten then not detectable\n\n");
3515 validate_slab_cache(kmalloc_caches[6]);
3516
3517 printk(KERN_ERR "\nB. Corruption after free\n");
3518 p = kzalloc(128, GFP_KERNEL);
3519 kfree(p);
3520 *p = 0x78;
3521 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3522 validate_slab_cache(kmalloc_caches[7]);
3523
3524 p = kzalloc(256, GFP_KERNEL);
3525 kfree(p);
3526 p[50] = 0x9a;
3527 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3528 p);
3529 validate_slab_cache(kmalloc_caches[8]);
3530
3531 p = kzalloc(512, GFP_KERNEL);
3532 kfree(p);
3533 p[512] = 0xab;
3534 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3535 validate_slab_cache(kmalloc_caches[9]);
3536}
3537#else
3538#ifdef CONFIG_SYSFS
3539static void resiliency_test(void) {};
3540#endif
3541#endif
3542
3543#ifdef CONFIG_DEBUG
3544/* 3479/*
3545 * Generate lists of code addresses where slabcache objects are allocated 3480 * Generate lists of code addresses where slabcache objects are allocated
3546 * and freed. 3481 * and freed.
@@ -3771,6 +3706,68 @@ static int list_locations(struct kmem_cache *s, char *buf,
3771} 3706}
3772#endif 3707#endif
3773 3708
3709#ifdef SLUB_RESILIENCY_TEST
3710static void resiliency_test(void)
3711{
3712 u8 *p;
3713
3714 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
3715
3716 printk(KERN_ERR "SLUB resiliency testing\n");
3717 printk(KERN_ERR "-----------------------\n");
3718 printk(KERN_ERR "A. Corruption after allocation\n");
3719
3720 p = kzalloc(16, GFP_KERNEL);
3721 p[16] = 0x12;
3722 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3723 " 0x12->0x%p\n\n", p + 16);
3724
3725 validate_slab_cache(kmalloc_caches[4]);
3726
3727 /* Hmmm... The next two are dangerous */
3728 p = kzalloc(32, GFP_KERNEL);
3729 p[32 + sizeof(void *)] = 0x34;
3730 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3731 " 0x34 -> -0x%p\n", p);
3732 printk(KERN_ERR
3733 "If allocated object is overwritten then not detectable\n\n");
3734
3735 validate_slab_cache(kmalloc_caches[5]);
3736 p = kzalloc(64, GFP_KERNEL);
3737 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3738 *p = 0x56;
3739 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3740 p);
3741 printk(KERN_ERR
3742 "If allocated object is overwritten then not detectable\n\n");
3743 validate_slab_cache(kmalloc_caches[6]);
3744
3745 printk(KERN_ERR "\nB. Corruption after free\n");
3746 p = kzalloc(128, GFP_KERNEL);
3747 kfree(p);
3748 *p = 0x78;
3749 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3750 validate_slab_cache(kmalloc_caches[7]);
3751
3752 p = kzalloc(256, GFP_KERNEL);
3753 kfree(p);
3754 p[50] = 0x9a;
3755 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3756 p);
3757 validate_slab_cache(kmalloc_caches[8]);
3758
3759 p = kzalloc(512, GFP_KERNEL);
3760 kfree(p);
3761 p[512] = 0xab;
3762 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3763 validate_slab_cache(kmalloc_caches[9]);
3764}
3765#else
3766#ifdef CONFIG_SYSFS
3767static void resiliency_test(void) {};
3768#endif
3769#endif
3770
3774#ifdef CONFIG_SYSFS 3771#ifdef CONFIG_SYSFS
3775enum slab_stat_type { 3772enum slab_stat_type {
3776 SL_ALL, /* All slabs */ 3773 SL_ALL, /* All slabs */
@@ -3987,14 +3984,6 @@ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3987} 3984}
3988SLAB_ATTR_RO(aliases); 3985SLAB_ATTR_RO(aliases);
3989 3986
3990#ifdef CONFIG_SLUB_DEBUG
3991static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3992{
3993 return show_slab_objects(s, buf, SO_ALL);
3994}
3995SLAB_ATTR_RO(slabs);
3996#endif
3997
3998static ssize_t partial_show(struct kmem_cache *s, char *buf) 3987static ssize_t partial_show(struct kmem_cache *s, char *buf)
3999{ 3988{
4000 return show_slab_objects(s, buf, SO_PARTIAL); 3989 return show_slab_objects(s, buf, SO_PARTIAL);
@@ -4019,7 +4008,48 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4019} 4008}
4020SLAB_ATTR_RO(objects_partial); 4009SLAB_ATTR_RO(objects_partial);
4021 4010
4011static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4012{
4013 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4014}
4015
4016static ssize_t reclaim_account_store(struct kmem_cache *s,
4017 const char *buf, size_t length)
4018{
4019 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4020 if (buf[0] == '1')
4021 s->flags |= SLAB_RECLAIM_ACCOUNT;
4022 return length;
4023}
4024SLAB_ATTR(reclaim_account);
4025
4026static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4027{
4028 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4029}
4030SLAB_ATTR_RO(hwcache_align);
4031
4032#ifdef CONFIG_ZONE_DMA
4033static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4034{
4035 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4036}
4037SLAB_ATTR_RO(cache_dma);
4038#endif
4039
4040static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4041{
4042 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4043}
4044SLAB_ATTR_RO(destroy_by_rcu);
4045
4022#ifdef CONFIG_SLUB_DEBUG 4046#ifdef CONFIG_SLUB_DEBUG
4047static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4048{
4049 return show_slab_objects(s, buf, SO_ALL);
4050}
4051SLAB_ATTR_RO(slabs);
4052
4023static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 4053static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4024{ 4054{
4025 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 4055 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
@@ -4056,60 +4086,6 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4056} 4086}
4057SLAB_ATTR(trace); 4087SLAB_ATTR(trace);
4058 4088
4059#ifdef CONFIG_FAILSLAB
4060static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4061{
4062 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4063}
4064
4065static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4066 size_t length)
4067{
4068 s->flags &= ~SLAB_FAILSLAB;
4069 if (buf[0] == '1')
4070 s->flags |= SLAB_FAILSLAB;
4071 return length;
4072}
4073SLAB_ATTR(failslab);
4074#endif
4075#endif
4076
4077static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4078{
4079 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4080}
4081
4082static ssize_t reclaim_account_store(struct kmem_cache *s,
4083 const char *buf, size_t length)
4084{
4085 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4086 if (buf[0] == '1')
4087 s->flags |= SLAB_RECLAIM_ACCOUNT;
4088 return length;
4089}
4090SLAB_ATTR(reclaim_account);
4091
4092static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4093{
4094 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4095}
4096SLAB_ATTR_RO(hwcache_align);
4097
4098#ifdef CONFIG_ZONE_DMA
4099static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4100{
4101 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4102}
4103SLAB_ATTR_RO(cache_dma);
4104#endif
4105
4106static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4107{
4108 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4109}
4110SLAB_ATTR_RO(destroy_by_rcu);
4111
4112#ifdef CONFIG_SLUB_DEBUG
4113static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4089static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4114{ 4090{
4115 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 4091 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
@@ -4185,6 +4161,39 @@ static ssize_t validate_store(struct kmem_cache *s,
4185 return ret; 4161 return ret;
4186} 4162}
4187SLAB_ATTR(validate); 4163SLAB_ATTR(validate);
4164
4165static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4166{
4167 if (!(s->flags & SLAB_STORE_USER))
4168 return -ENOSYS;
4169 return list_locations(s, buf, TRACK_ALLOC);
4170}
4171SLAB_ATTR_RO(alloc_calls);
4172
4173static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4174{
4175 if (!(s->flags & SLAB_STORE_USER))
4176 return -ENOSYS;
4177 return list_locations(s, buf, TRACK_FREE);
4178}
4179SLAB_ATTR_RO(free_calls);
4180#endif /* CONFIG_SLUB_DEBUG */
4181
4182#ifdef CONFIG_FAILSLAB
4183static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4184{
4185 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4186}
4187
4188static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4189 size_t length)
4190{
4191 s->flags &= ~SLAB_FAILSLAB;
4192 if (buf[0] == '1')
4193 s->flags |= SLAB_FAILSLAB;
4194 return length;
4195}
4196SLAB_ATTR(failslab);
4188#endif 4197#endif
4189 4198
4190static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4199static ssize_t shrink_show(struct kmem_cache *s, char *buf)
@@ -4206,24 +4215,6 @@ static ssize_t shrink_store(struct kmem_cache *s,
4206} 4215}
4207SLAB_ATTR(shrink); 4216SLAB_ATTR(shrink);
4208 4217
4209#ifdef CONFIG_SLUB_DEBUG
4210static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4211{
4212 if (!(s->flags & SLAB_STORE_USER))
4213 return -ENOSYS;
4214 return list_locations(s, buf, TRACK_ALLOC);
4215}
4216SLAB_ATTR_RO(alloc_calls);
4217
4218static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4219{
4220 if (!(s->flags & SLAB_STORE_USER))
4221 return -ENOSYS;
4222 return list_locations(s, buf, TRACK_FREE);
4223}
4224SLAB_ATTR_RO(free_calls);
4225#endif
4226
4227#ifdef CONFIG_NUMA 4218#ifdef CONFIG_NUMA
4228static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4219static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4229{ 4220{
@@ -4329,30 +4320,24 @@ static struct attribute *slab_attrs[] = {
4329 &min_partial_attr.attr, 4320 &min_partial_attr.attr,
4330 &objects_attr.attr, 4321 &objects_attr.attr,
4331 &objects_partial_attr.attr, 4322 &objects_partial_attr.attr,
4332#ifdef CONFIG_SLUB_DEBUG
4333 &total_objects_attr.attr,
4334 &slabs_attr.attr,
4335#endif
4336 &partial_attr.attr, 4323 &partial_attr.attr,
4337 &cpu_slabs_attr.attr, 4324 &cpu_slabs_attr.attr,
4338 &ctor_attr.attr, 4325 &ctor_attr.attr,
4339 &aliases_attr.attr, 4326 &aliases_attr.attr,
4340 &align_attr.attr, 4327 &align_attr.attr,
4341#ifdef CONFIG_SLUB_DEBUG
4342 &sanity_checks_attr.attr,
4343 &trace_attr.attr,
4344#endif
4345 &hwcache_align_attr.attr, 4328 &hwcache_align_attr.attr,
4346 &reclaim_account_attr.attr, 4329 &reclaim_account_attr.attr,
4347 &destroy_by_rcu_attr.attr, 4330 &destroy_by_rcu_attr.attr,
4331 &shrink_attr.attr,
4348#ifdef CONFIG_SLUB_DEBUG 4332#ifdef CONFIG_SLUB_DEBUG
4333 &total_objects_attr.attr,
4334 &slabs_attr.attr,
4335 &sanity_checks_attr.attr,
4336 &trace_attr.attr,
4349 &red_zone_attr.attr, 4337 &red_zone_attr.attr,
4350 &poison_attr.attr, 4338 &poison_attr.attr,
4351 &store_user_attr.attr, 4339 &store_user_attr.attr,
4352 &validate_attr.attr, 4340 &validate_attr.attr,
4353#endif
4354 &shrink_attr.attr,
4355#ifdef CONFIG_SLUB_DEBUG
4356 &alloc_calls_attr.attr, 4341 &alloc_calls_attr.attr,
4357 &free_calls_attr.attr, 4342 &free_calls_attr.attr,
4358#endif 4343#endif