aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c35
1 files changed, 14 insertions, 21 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 06236e4ddc1b..e76eee466886 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -406,7 +406,7 @@ struct kmem_cache {
406 unsigned int dflags; /* dynamic flags */ 406 unsigned int dflags; /* dynamic flags */
407 407
408 /* constructor func */ 408 /* constructor func */
409 void (*ctor)(struct kmem_cache *, void *); 409 void (*ctor)(void *obj);
410 410
411/* 5) cache creation/removal */ 411/* 5) cache creation/removal */
412 const char *name; 412 const char *name;
@@ -1901,15 +1901,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1901#endif 1901#endif
1902 1902
1903#if DEBUG 1903#if DEBUG
1904/** 1904static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1905 * slab_destroy_objs - destroy a slab and its objects
1906 * @cachep: cache pointer being destroyed
1907 * @slabp: slab pointer being destroyed
1908 *
1909 * Call the registered destructor for each object in a slab that is being
1910 * destroyed.
1911 */
1912static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1913{ 1905{
1914 int i; 1906 int i;
1915 for (i = 0; i < cachep->num; i++) { 1907 for (i = 0; i < cachep->num; i++) {
@@ -1938,7 +1930,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1938 } 1930 }
1939} 1931}
1940#else 1932#else
1941static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1933static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1942{ 1934{
1943} 1935}
1944#endif 1936#endif
@@ -1956,7 +1948,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1956{ 1948{
1957 void *addr = slabp->s_mem - slabp->colouroff; 1949 void *addr = slabp->s_mem - slabp->colouroff;
1958 1950
1959 slab_destroy_objs(cachep, slabp); 1951 slab_destroy_debugcheck(cachep, slabp);
1960 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1952 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1961 struct slab_rcu *slab_rcu; 1953 struct slab_rcu *slab_rcu;
1962 1954
@@ -2145,8 +2137,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2145 */ 2137 */
2146struct kmem_cache * 2138struct kmem_cache *
2147kmem_cache_create (const char *name, size_t size, size_t align, 2139kmem_cache_create (const char *name, size_t size, size_t align,
2148 unsigned long flags, 2140 unsigned long flags, void (*ctor)(void *))
2149 void (*ctor)(struct kmem_cache *, void *))
2150{ 2141{
2151 size_t left_over, slab_size, ralign; 2142 size_t left_over, slab_size, ralign;
2152 struct kmem_cache *cachep = NULL, *pc; 2143 struct kmem_cache *cachep = NULL, *pc;
@@ -2454,7 +2445,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2454 struct kmem_list3 *l3; 2445 struct kmem_list3 *l3;
2455 int node; 2446 int node;
2456 2447
2457 on_each_cpu(do_drain, cachep, 1, 1); 2448 on_each_cpu(do_drain, cachep, 1);
2458 check_irq_on(); 2449 check_irq_on();
2459 for_each_online_node(node) { 2450 for_each_online_node(node) {
2460 l3 = cachep->nodelists[node]; 2451 l3 = cachep->nodelists[node];
@@ -2661,7 +2652,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2661 * They must also be threaded. 2652 * They must also be threaded.
2662 */ 2653 */
2663 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2654 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2664 cachep->ctor(cachep, objp + obj_offset(cachep)); 2655 cachep->ctor(objp + obj_offset(cachep));
2665 2656
2666 if (cachep->flags & SLAB_RED_ZONE) { 2657 if (cachep->flags & SLAB_RED_ZONE) {
2667 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2658 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2677,7 +2668,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2677 cachep->buffer_size / PAGE_SIZE, 0); 2668 cachep->buffer_size / PAGE_SIZE, 0);
2678#else 2669#else
2679 if (cachep->ctor) 2670 if (cachep->ctor)
2680 cachep->ctor(cachep, objp); 2671 cachep->ctor(objp);
2681#endif 2672#endif
2682 slab_bufctl(slabp)[i] = i + 1; 2673 slab_bufctl(slabp)[i] = i + 1;
2683 } 2674 }
@@ -3101,7 +3092,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3101#endif 3092#endif
3102 objp += obj_offset(cachep); 3093 objp += obj_offset(cachep);
3103 if (cachep->ctor && cachep->flags & SLAB_POISON) 3094 if (cachep->ctor && cachep->flags & SLAB_POISON)
3104 cachep->ctor(cachep, objp); 3095 cachep->ctor(objp);
3105#if ARCH_SLAB_MINALIGN 3096#if ARCH_SLAB_MINALIGN
3106 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3097 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3107 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3098 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
@@ -3263,9 +3254,12 @@ retry:
3263 3254
3264 if (cpuset_zone_allowed_hardwall(zone, flags) && 3255 if (cpuset_zone_allowed_hardwall(zone, flags) &&
3265 cache->nodelists[nid] && 3256 cache->nodelists[nid] &&
3266 cache->nodelists[nid]->free_objects) 3257 cache->nodelists[nid]->free_objects) {
3267 obj = ____cache_alloc_node(cache, 3258 obj = ____cache_alloc_node(cache,
3268 flags | GFP_THISNODE, nid); 3259 flags | GFP_THISNODE, nid);
3260 if (obj)
3261 break;
3262 }
3269 } 3263 }
3270 3264
3271 if (!obj) { 3265 if (!obj) {
@@ -3936,7 +3930,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3936 } 3930 }
3937 new->cachep = cachep; 3931 new->cachep = cachep;
3938 3932
3939 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3933 on_each_cpu(do_ccupdate_local, (void *)new, 1);
3940 3934
3941 check_irq_on(); 3935 check_irq_on();
3942 cachep->batchcount = batchcount; 3936 cachep->batchcount = batchcount;
@@ -4478,4 +4472,3 @@ size_t ksize(const void *objp)
4478 4472
4479 return obj_size(virt_to_cache(objp)); 4473 return obj_size(virt_to_cache(objp));
4480} 4474}
4481EXPORT_SYMBOL(ksize);