diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 59 |
1 files changed, 10 insertions, 49 deletions
@@ -409,9 +409,6 @@ struct kmem_cache { | |||
409 | /* constructor func */ | 409 | /* constructor func */ |
410 | void (*ctor) (void *, struct kmem_cache *, unsigned long); | 410 | void (*ctor) (void *, struct kmem_cache *, unsigned long); |
411 | 411 | ||
412 | /* de-constructor func */ | ||
413 | void (*dtor) (void *, struct kmem_cache *, unsigned long); | ||
414 | |||
415 | /* 5) cache creation/removal */ | 412 | /* 5) cache creation/removal */ |
416 | const char *name; | 413 | const char *name; |
417 | struct list_head next; | 414 | struct list_head next; |
@@ -572,21 +569,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
572 | #endif | 569 | #endif |
573 | 570 | ||
574 | /* | 571 | /* |
575 | * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp | ||
576 | * order. | ||
577 | */ | ||
578 | #if defined(CONFIG_LARGE_ALLOCS) | ||
579 | #define MAX_OBJ_ORDER 13 /* up to 32Mb */ | ||
580 | #define MAX_GFP_ORDER 13 /* up to 32Mb */ | ||
581 | #elif defined(CONFIG_MMU) | ||
582 | #define MAX_OBJ_ORDER 5 /* 32 pages */ | ||
583 | #define MAX_GFP_ORDER 5 /* 32 pages */ | ||
584 | #else | ||
585 | #define MAX_OBJ_ORDER 8 /* up to 1Mb */ | ||
586 | #define MAX_GFP_ORDER 8 /* up to 1Mb */ | ||
587 | #endif | ||
588 | |||
589 | /* | ||
590 | * Do not go above this order unless 0 objects fit into the slab. | 572 | * Do not go above this order unless 0 objects fit into the slab. |
591 | */ | 573 | */ |
592 | #define BREAK_GFP_ORDER_HI 1 | 574 | #define BREAK_GFP_ORDER_HI 1 |
@@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
792 | */ | 774 | */ |
793 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 775 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); |
794 | #endif | 776 | #endif |
777 | WARN_ON_ONCE(size == 0); | ||
795 | while (size > csizep->cs_size) | 778 | while (size > csizep->cs_size) |
796 | csizep++; | 779 | csizep++; |
797 | 780 | ||
@@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | |||
1911 | slab_error(cachep, "end of a freed object " | 1894 | slab_error(cachep, "end of a freed object " |
1912 | "was overwritten"); | 1895 | "was overwritten"); |
1913 | } | 1896 | } |
1914 | if (cachep->dtor && !(cachep->flags & SLAB_POISON)) | ||
1915 | (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); | ||
1916 | } | 1897 | } |
1917 | } | 1898 | } |
1918 | #else | 1899 | #else |
1919 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | 1900 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) |
1920 | { | 1901 | { |
1921 | if (cachep->dtor) { | ||
1922 | int i; | ||
1923 | for (i = 0; i < cachep->num; i++) { | ||
1924 | void *objp = index_to_obj(cachep, slabp, i); | ||
1925 | (cachep->dtor) (objp, cachep, 0); | ||
1926 | } | ||
1927 | } | ||
1928 | } | 1902 | } |
1929 | #endif | 1903 | #endif |
1930 | 1904 | ||
@@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2013 | size_t left_over = 0; | 1987 | size_t left_over = 0; |
2014 | int gfporder; | 1988 | int gfporder; |
2015 | 1989 | ||
2016 | for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { | 1990 | for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { |
2017 | unsigned int num; | 1991 | unsigned int num; |
2018 | size_t remainder; | 1992 | size_t remainder; |
2019 | 1993 | ||
@@ -2063,7 +2037,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2063 | return left_over; | 2037 | return left_over; |
2064 | } | 2038 | } |
2065 | 2039 | ||
2066 | static int setup_cpu_cache(struct kmem_cache *cachep) | 2040 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) |
2067 | { | 2041 | { |
2068 | if (g_cpucache_up == FULL) | 2042 | if (g_cpucache_up == FULL) |
2069 | return enable_cpucache(cachep); | 2043 | return enable_cpucache(cachep); |
@@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep) | |||
2124 | * @align: The required alignment for the objects. | 2098 | * @align: The required alignment for the objects. |
2125 | * @flags: SLAB flags | 2099 | * @flags: SLAB flags |
2126 | * @ctor: A constructor for the objects. | 2100 | * @ctor: A constructor for the objects. |
2127 | * @dtor: A destructor for the objects. | 2101 | * @dtor: A destructor for the objects (not implemented anymore). |
2128 | * | 2102 | * |
2129 | * Returns a ptr to the cache on success, NULL on failure. | 2103 | * Returns a ptr to the cache on success, NULL on failure. |
2130 | * Cannot be called within a int, but can be interrupted. | 2104 | * Cannot be called within a int, but can be interrupted. |
@@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2159 | * Sanity checks... these are all serious usage bugs. | 2133 | * Sanity checks... these are all serious usage bugs. |
2160 | */ | 2134 | */ |
2161 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || | 2135 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || |
2162 | (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { | 2136 | size > KMALLOC_MAX_SIZE || dtor) { |
2163 | printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, | 2137 | printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, |
2164 | name); | 2138 | name); |
2165 | BUG(); | 2139 | BUG(); |
@@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2213 | if (flags & SLAB_DESTROY_BY_RCU) | 2187 | if (flags & SLAB_DESTROY_BY_RCU) |
2214 | BUG_ON(flags & SLAB_POISON); | 2188 | BUG_ON(flags & SLAB_POISON); |
2215 | #endif | 2189 | #endif |
2216 | if (flags & SLAB_DESTROY_BY_RCU) | ||
2217 | BUG_ON(dtor); | ||
2218 | |||
2219 | /* | 2190 | /* |
2220 | * Always checks flags, a caller might be expecting debug support which | 2191 | * Always checks flags, a caller might be expecting debug support which |
2221 | * isn't available. | 2192 | * isn't available. |
@@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2370 | BUG_ON(!cachep->slabp_cache); | 2341 | BUG_ON(!cachep->slabp_cache); |
2371 | } | 2342 | } |
2372 | cachep->ctor = ctor; | 2343 | cachep->ctor = ctor; |
2373 | cachep->dtor = dtor; | ||
2374 | cachep->name = name; | 2344 | cachep->name = name; |
2375 | 2345 | ||
2376 | if (setup_cpu_cache(cachep)) { | 2346 | if (setup_cpu_cache(cachep)) { |
@@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | |||
2625 | } | 2595 | } |
2626 | 2596 | ||
2627 | static void cache_init_objs(struct kmem_cache *cachep, | 2597 | static void cache_init_objs(struct kmem_cache *cachep, |
2628 | struct slab *slabp, unsigned long ctor_flags) | 2598 | struct slab *slabp) |
2629 | { | 2599 | { |
2630 | int i; | 2600 | int i; |
2631 | 2601 | ||
@@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2649 | */ | 2619 | */ |
2650 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 2620 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) |
2651 | cachep->ctor(objp + obj_offset(cachep), cachep, | 2621 | cachep->ctor(objp + obj_offset(cachep), cachep, |
2652 | ctor_flags); | 2622 | 0); |
2653 | 2623 | ||
2654 | if (cachep->flags & SLAB_RED_ZONE) { | 2624 | if (cachep->flags & SLAB_RED_ZONE) { |
2655 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 2625 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
@@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2665 | cachep->buffer_size / PAGE_SIZE, 0); | 2635 | cachep->buffer_size / PAGE_SIZE, 0); |
2666 | #else | 2636 | #else |
2667 | if (cachep->ctor) | 2637 | if (cachep->ctor) |
2668 | cachep->ctor(objp, cachep, ctor_flags); | 2638 | cachep->ctor(objp, cachep, 0); |
2669 | #endif | 2639 | #endif |
2670 | slab_bufctl(slabp)[i] = i + 1; | 2640 | slab_bufctl(slabp)[i] = i + 1; |
2671 | } | 2641 | } |
@@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2754 | struct slab *slabp; | 2724 | struct slab *slabp; |
2755 | size_t offset; | 2725 | size_t offset; |
2756 | gfp_t local_flags; | 2726 | gfp_t local_flags; |
2757 | unsigned long ctor_flags; | ||
2758 | struct kmem_list3 *l3; | 2727 | struct kmem_list3 *l3; |
2759 | 2728 | ||
2760 | /* | 2729 | /* |
@@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2763 | */ | 2732 | */ |
2764 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); | 2733 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); |
2765 | 2734 | ||
2766 | ctor_flags = SLAB_CTOR_CONSTRUCTOR; | ||
2767 | local_flags = (flags & GFP_LEVEL_MASK); | 2735 | local_flags = (flags & GFP_LEVEL_MASK); |
2768 | /* Take the l3 list lock to change the colour_next on this node */ | 2736 | /* Take the l3 list lock to change the colour_next on this node */ |
2769 | check_irq_off(); | 2737 | check_irq_off(); |
@@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2808 | slabp->nodeid = nodeid; | 2776 | slabp->nodeid = nodeid; |
2809 | slab_map_pages(cachep, slabp, objp); | 2777 | slab_map_pages(cachep, slabp, objp); |
2810 | 2778 | ||
2811 | cache_init_objs(cachep, slabp, ctor_flags); | 2779 | cache_init_objs(cachep, slabp); |
2812 | 2780 | ||
2813 | if (local_flags & __GFP_WAIT) | 2781 | if (local_flags & __GFP_WAIT) |
2814 | local_irq_disable(); | 2782 | local_irq_disable(); |
@@ -2835,7 +2803,6 @@ failed: | |||
2835 | * Perform extra freeing checks: | 2803 | * Perform extra freeing checks: |
2836 | * - detect bad pointers. | 2804 | * - detect bad pointers. |
2837 | * - POISON/RED_ZONE checking | 2805 | * - POISON/RED_ZONE checking |
2838 | * - destructor calls, for caches with POISON+dtor | ||
2839 | */ | 2806 | */ |
2840 | static void kfree_debugcheck(const void *objp) | 2807 | static void kfree_debugcheck(const void *objp) |
2841 | { | 2808 | { |
@@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
2894 | BUG_ON(objnr >= cachep->num); | 2861 | BUG_ON(objnr >= cachep->num); |
2895 | BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); | 2862 | BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); |
2896 | 2863 | ||
2897 | if (cachep->flags & SLAB_POISON && cachep->dtor) { | ||
2898 | /* we want to cache poison the object, | ||
2899 | * call the destruction callback | ||
2900 | */ | ||
2901 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); | ||
2902 | } | ||
2903 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 2864 | #ifdef CONFIG_DEBUG_SLAB_LEAK |
2904 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; | 2865 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; |
2905 | #endif | 2866 | #endif |
@@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3099 | #endif | 3060 | #endif |
3100 | objp += obj_offset(cachep); | 3061 | objp += obj_offset(cachep); |
3101 | if (cachep->ctor && cachep->flags & SLAB_POISON) | 3062 | if (cachep->ctor && cachep->flags & SLAB_POISON) |
3102 | cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); | 3063 | cachep->ctor(objp, cachep, 0); |
3103 | #if ARCH_SLAB_MINALIGN | 3064 | #if ARCH_SLAB_MINALIGN |
3104 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { | 3065 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { |
3105 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 3066 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", |