diff options
-rw-r--r-- | mm/slub.c | 113 |
1 files changed, 110 insertions, 3 deletions
@@ -670,8 +670,6 @@ static void add_full(struct kmem_cache *s, struct page *page) | |||
670 | 670 | ||
671 | VM_BUG_ON(!irqs_disabled()); | 671 | VM_BUG_ON(!irqs_disabled()); |
672 | 672 | ||
673 | VM_BUG_ON(!irqs_disabled()); | ||
674 | |||
675 | if (!(s->flags & SLAB_STORE_USER)) | 673 | if (!(s->flags & SLAB_STORE_USER)) |
676 | return; | 674 | return; |
677 | 675 | ||
@@ -2551,6 +2549,99 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2551 | 2549 | ||
2552 | #ifdef CONFIG_SYSFS | 2550 | #ifdef CONFIG_SYSFS |
2553 | 2551 | ||
2552 | static int validate_slab(struct kmem_cache *s, struct page *page) | ||
2553 | { | ||
2554 | void *p; | ||
2555 | void *addr = page_address(page); | ||
2556 | unsigned long map[BITS_TO_LONGS(s->objects)]; | ||
2557 | |||
2558 | if (!check_slab(s, page) || | ||
2559 | !on_freelist(s, page, NULL)) | ||
2560 | return 0; | ||
2561 | |||
2562 | /* Now we know that a valid freelist exists */ | ||
2563 | bitmap_zero(map, s->objects); | ||
2564 | |||
2565 | for(p = page->freelist; p; p = get_freepointer(s, p)) { | ||
2566 | set_bit((p - addr) / s->size, map); | ||
2567 | if (!check_object(s, page, p, 0)) | ||
2568 | return 0; | ||
2569 | } | ||
2570 | |||
2571 | for(p = addr; p < addr + s->objects * s->size; p += s->size) | ||
2572 | if (!test_bit((p - addr) / s->size, map)) | ||
2573 | if (!check_object(s, page, p, 1)) | ||
2574 | return 0; | ||
2575 | return 1; | ||
2576 | } | ||
2577 | |||
2578 | static void validate_slab_slab(struct kmem_cache *s, struct page *page) | ||
2579 | { | ||
2580 | if (slab_trylock(page)) { | ||
2581 | validate_slab(s, page); | ||
2582 | slab_unlock(page); | ||
2583 | } else | ||
2584 | printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", | ||
2585 | s->name, page); | ||
2586 | |||
2587 | if (s->flags & DEBUG_DEFAULT_FLAGS) { | ||
2588 | if (!PageError(page)) | ||
2589 | printk(KERN_ERR "SLUB %s: PageError not set " | ||
2590 | "on slab 0x%p\n", s->name, page); | ||
2591 | } else { | ||
2592 | if (PageError(page)) | ||
2593 | printk(KERN_ERR "SLUB %s: PageError set on " | ||
2594 | "slab 0x%p\n", s->name, page); | ||
2595 | } | ||
2596 | } | ||
2597 | |||
2598 | static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) | ||
2599 | { | ||
2600 | unsigned long count = 0; | ||
2601 | struct page *page; | ||
2602 | unsigned long flags; | ||
2603 | |||
2604 | spin_lock_irqsave(&n->list_lock, flags); | ||
2605 | |||
2606 | list_for_each_entry(page, &n->partial, lru) { | ||
2607 | validate_slab_slab(s, page); | ||
2608 | count++; | ||
2609 | } | ||
2610 | if (count != n->nr_partial) | ||
2611 | printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " | ||
2612 | "counter=%ld\n", s->name, count, n->nr_partial); | ||
2613 | |||
2614 | if (!(s->flags & SLAB_STORE_USER)) | ||
2615 | goto out; | ||
2616 | |||
2617 | list_for_each_entry(page, &n->full, lru) { | ||
2618 | validate_slab_slab(s, page); | ||
2619 | count++; | ||
2620 | } | ||
2621 | if (count != atomic_long_read(&n->nr_slabs)) | ||
2622 | printk(KERN_ERR "SLUB: %s %ld slabs counted but " | ||
2623 | "counter=%ld\n", s->name, count, | ||
2624 | atomic_long_read(&n->nr_slabs)); | ||
2625 | |||
2626 | out: | ||
2627 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
2628 | return count; | ||
2629 | } | ||
2630 | |||
2631 | static unsigned long validate_slab_cache(struct kmem_cache *s) | ||
2632 | { | ||
2633 | int node; | ||
2634 | unsigned long count = 0; | ||
2635 | |||
2636 | flush_all(s); | ||
2637 | for_each_online_node(node) { | ||
2638 | struct kmem_cache_node *n = get_node(s, node); | ||
2639 | |||
2640 | count += validate_slab_node(s, n); | ||
2641 | } | ||
2642 | return count; | ||
2643 | } | ||
2644 | |||
2554 | static unsigned long count_partial(struct kmem_cache_node *n) | 2645 | static unsigned long count_partial(struct kmem_cache_node *n) |
2555 | { | 2646 | { |
2556 | unsigned long flags; | 2647 | unsigned long flags; |
@@ -2680,7 +2771,6 @@ struct slab_attribute { | |||
2680 | static struct slab_attribute _name##_attr = \ | 2771 | static struct slab_attribute _name##_attr = \ |
2681 | __ATTR(_name, 0644, _name##_show, _name##_store) | 2772 | __ATTR(_name, 0644, _name##_show, _name##_store) |
2682 | 2773 | ||
2683 | |||
2684 | static ssize_t slab_size_show(struct kmem_cache *s, char *buf) | 2774 | static ssize_t slab_size_show(struct kmem_cache *s, char *buf) |
2685 | { | 2775 | { |
2686 | return sprintf(buf, "%d\n", s->size); | 2776 | return sprintf(buf, "%d\n", s->size); |
@@ -2886,6 +2976,22 @@ static ssize_t store_user_store(struct kmem_cache *s, | |||
2886 | } | 2976 | } |
2887 | SLAB_ATTR(store_user); | 2977 | SLAB_ATTR(store_user); |
2888 | 2978 | ||
2979 | static ssize_t validate_show(struct kmem_cache *s, char *buf) | ||
2980 | { | ||
2981 | return 0; | ||
2982 | } | ||
2983 | |||
2984 | static ssize_t validate_store(struct kmem_cache *s, | ||
2985 | const char *buf, size_t length) | ||
2986 | { | ||
2987 | if (buf[0] == '1') | ||
2988 | validate_slab_cache(s); | ||
2989 | else | ||
2990 | return -EINVAL; | ||
2991 | return length; | ||
2992 | } | ||
2993 | SLAB_ATTR(validate); | ||
2994 | |||
2889 | #ifdef CONFIG_NUMA | 2995 | #ifdef CONFIG_NUMA |
2890 | static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) | 2996 | static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) |
2891 | { | 2997 | { |
@@ -2925,6 +3031,7 @@ static struct attribute * slab_attrs[] = { | |||
2925 | &red_zone_attr.attr, | 3031 | &red_zone_attr.attr, |
2926 | &poison_attr.attr, | 3032 | &poison_attr.attr, |
2927 | &store_user_attr.attr, | 3033 | &store_user_attr.attr, |
3034 | &validate_attr.attr, | ||
2928 | #ifdef CONFIG_ZONE_DMA | 3035 | #ifdef CONFIG_ZONE_DMA |
2929 | &cache_dma_attr.attr, | 3036 | &cache_dma_attr.attr, |
2930 | #endif | 3037 | #endif |