diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-09 05:32:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:45 -0400 |
commit | b345970905e34c1b632fe4d80e2af14c7de99b45 (patch) | |
tree | 0e495a2b5cbbfdbaf7dfd126bb6a5a2060a23ae4 /mm | |
parent | 7656c72b5a631452ace361037ccf8384454d0f72 (diff) |
SLUB: move resiliency check into SYSFS section
Move the resiliency check into the SYSFS section after validate_slab that is
used by the resiliency check. This will avoid a forward declaration.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 112 |
1 files changed, 55 insertions, 57 deletions
@@ -2512,63 +2512,6 @@ static int __init cpucache_init(void) | |||
2512 | __initcall(cpucache_init); | 2512 | __initcall(cpucache_init); |
2513 | #endif | 2513 | #endif |
2514 | 2514 | ||
2515 | #ifdef SLUB_RESILIENCY_TEST | ||
2516 | static unsigned long validate_slab_cache(struct kmem_cache *s); | ||
2517 | |||
2518 | static void resiliency_test(void) | ||
2519 | { | ||
2520 | u8 *p; | ||
2521 | |||
2522 | printk(KERN_ERR "SLUB resiliency testing\n"); | ||
2523 | printk(KERN_ERR "-----------------------\n"); | ||
2524 | printk(KERN_ERR "A. Corruption after allocation\n"); | ||
2525 | |||
2526 | p = kzalloc(16, GFP_KERNEL); | ||
2527 | p[16] = 0x12; | ||
2528 | printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" | ||
2529 | " 0x12->0x%p\n\n", p + 16); | ||
2530 | |||
2531 | validate_slab_cache(kmalloc_caches + 4); | ||
2532 | |||
2533 | /* Hmmm... The next two are dangerous */ | ||
2534 | p = kzalloc(32, GFP_KERNEL); | ||
2535 | p[32 + sizeof(void *)] = 0x34; | ||
2536 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" | ||
2537 | " 0x34 -> -0x%p\n", p); | ||
2538 | printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); | ||
2539 | |||
2540 | validate_slab_cache(kmalloc_caches + 5); | ||
2541 | p = kzalloc(64, GFP_KERNEL); | ||
2542 | p += 64 + (get_cycles() & 0xff) * sizeof(void *); | ||
2543 | *p = 0x56; | ||
2544 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", | ||
2545 | p); | ||
2546 | printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); | ||
2547 | validate_slab_cache(kmalloc_caches + 6); | ||
2548 | |||
2549 | printk(KERN_ERR "\nB. Corruption after free\n"); | ||
2550 | p = kzalloc(128, GFP_KERNEL); | ||
2551 | kfree(p); | ||
2552 | *p = 0x78; | ||
2553 | printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); | ||
2554 | validate_slab_cache(kmalloc_caches + 7); | ||
2555 | |||
2556 | p = kzalloc(256, GFP_KERNEL); | ||
2557 | kfree(p); | ||
2558 | p[50] = 0x9a; | ||
2559 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); | ||
2560 | validate_slab_cache(kmalloc_caches + 8); | ||
2561 | |||
2562 | p = kzalloc(512, GFP_KERNEL); | ||
2563 | kfree(p); | ||
2564 | p[512] = 0xab; | ||
2565 | printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); | ||
2566 | validate_slab_cache(kmalloc_caches + 9); | ||
2567 | } | ||
2568 | #else | ||
2569 | static void resiliency_test(void) {}; | ||
2570 | #endif | ||
2571 | |||
2572 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 2515 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) |
2573 | { | 2516 | { |
2574 | struct kmem_cache *s = get_slab(size, gfpflags); | 2517 | struct kmem_cache *s = get_slab(size, gfpflags); |
@@ -2685,6 +2628,61 @@ static unsigned long validate_slab_cache(struct kmem_cache *s) | |||
2685 | return count; | 2628 | return count; |
2686 | } | 2629 | } |
2687 | 2630 | ||
2631 | #ifdef SLUB_RESILIENCY_TEST | ||
2632 | static void resiliency_test(void) | ||
2633 | { | ||
2634 | u8 *p; | ||
2635 | |||
2636 | printk(KERN_ERR "SLUB resiliency testing\n"); | ||
2637 | printk(KERN_ERR "-----------------------\n"); | ||
2638 | printk(KERN_ERR "A. Corruption after allocation\n"); | ||
2639 | |||
2640 | p = kzalloc(16, GFP_KERNEL); | ||
2641 | p[16] = 0x12; | ||
2642 | printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" | ||
2643 | " 0x12->0x%p\n\n", p + 16); | ||
2644 | |||
2645 | validate_slab_cache(kmalloc_caches + 4); | ||
2646 | |||
2647 | /* Hmmm... The next two are dangerous */ | ||
2648 | p = kzalloc(32, GFP_KERNEL); | ||
2649 | p[32 + sizeof(void *)] = 0x34; | ||
2650 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" | ||
2651 | " 0x34 -> -0x%p\n", p); | ||
2652 | printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); | ||
2653 | |||
2654 | validate_slab_cache(kmalloc_caches + 5); | ||
2655 | p = kzalloc(64, GFP_KERNEL); | ||
2656 | p += 64 + (get_cycles() & 0xff) * sizeof(void *); | ||
2657 | *p = 0x56; | ||
2658 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", | ||
2659 | p); | ||
2660 | printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); | ||
2661 | validate_slab_cache(kmalloc_caches + 6); | ||
2662 | |||
2663 | printk(KERN_ERR "\nB. Corruption after free\n"); | ||
2664 | p = kzalloc(128, GFP_KERNEL); | ||
2665 | kfree(p); | ||
2666 | *p = 0x78; | ||
2667 | printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); | ||
2668 | validate_slab_cache(kmalloc_caches + 7); | ||
2669 | |||
2670 | p = kzalloc(256, GFP_KERNEL); | ||
2671 | kfree(p); | ||
2672 | p[50] = 0x9a; | ||
2673 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); | ||
2674 | validate_slab_cache(kmalloc_caches + 8); | ||
2675 | |||
2676 | p = kzalloc(512, GFP_KERNEL); | ||
2677 | kfree(p); | ||
2678 | p[512] = 0xab; | ||
2679 | printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); | ||
2680 | validate_slab_cache(kmalloc_caches + 9); | ||
2681 | } | ||
2682 | #else | ||
2683 | static void resiliency_test(void) {}; | ||
2684 | #endif | ||
2685 | |||
2688 | /* | 2686 | /* |
2689 | * Generate lists of code addresses where slabcache objects are allocated | 2687 | * Generate lists of code addresses where slabcache objects are allocated |
2690 | * and freed. | 2688 | * and freed. |