aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-06-13 11:24:58 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-14 02:20:19 -0400
commit8c138bc00925521c4e764269db3a903bd2a51592 (patch)
tree8c62aed20b2061867c1e6366c587bb4cea89bebd /mm/slab.c
parent3b0efdfa1e719303536c04d9abca43abeb40f80a (diff)
slab: Get rid of obj_size macro
The size of the slab object is frequently needed. Since we now have a size field directly in the kmem_cache structure there is no need anymore of the obj_size macro/function. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c47
1 files changed, 21 insertions, 26 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e2b3907b7b0c..fc4a77446700 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -433,11 +433,6 @@ static int obj_offset(struct kmem_cache *cachep)
433 return cachep->obj_offset; 433 return cachep->obj_offset;
434} 434}
435 435
436static int obj_size(struct kmem_cache *cachep)
437{
438 return cachep->object_size;
439}
440
441static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 436static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
442{ 437{
443 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 438 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
@@ -465,7 +460,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
465#else 460#else
466 461
467#define obj_offset(x) 0 462#define obj_offset(x) 0
468#define obj_size(cachep) (cachep->size)
469#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 463#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
470#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 464#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
471#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 465#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
@@ -1853,7 +1847,7 @@ static void kmem_rcu_free(struct rcu_head *head)
1853static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1847static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1854 unsigned long caller) 1848 unsigned long caller)
1855{ 1849{
1856 int size = obj_size(cachep); 1850 int size = cachep->object_size;
1857 1851
1858 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1852 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1859 1853
@@ -1885,7 +1879,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1885 1879
1886static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1880static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1887{ 1881{
1888 int size = obj_size(cachep); 1882 int size = cachep->object_size;
1889 addr = &((char *)addr)[obj_offset(cachep)]; 1883 addr = &((char *)addr)[obj_offset(cachep)];
1890 1884
1891 memset(addr, val, size); 1885 memset(addr, val, size);
@@ -1945,7 +1939,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1945 printk("\n"); 1939 printk("\n");
1946 } 1940 }
1947 realobj = (char *)objp + obj_offset(cachep); 1941 realobj = (char *)objp + obj_offset(cachep);
1948 size = obj_size(cachep); 1942 size = cachep->object_size;
1949 for (i = 0; i < size && lines; i += 16, lines--) { 1943 for (i = 0; i < size && lines; i += 16, lines--) {
1950 int limit; 1944 int limit;
1951 limit = 16; 1945 limit = 16;
@@ -1962,7 +1956,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1962 int lines = 0; 1956 int lines = 0;
1963 1957
1964 realobj = (char *)objp + obj_offset(cachep); 1958 realobj = (char *)objp + obj_offset(cachep);
1965 size = obj_size(cachep); 1959 size = cachep->object_size;
1966 1960
1967 for (i = 0; i < size; i++) { 1961 for (i = 0; i < size; i++) {
1968 char exp = POISON_FREE; 1962 char exp = POISON_FREE;
@@ -3265,7 +3259,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3265 if (cachep == &cache_cache) 3259 if (cachep == &cache_cache)
3266 return false; 3260 return false;
3267 3261
3268 return should_failslab(obj_size(cachep), flags, cachep->flags); 3262 return should_failslab(cachep->object_size, flags, cachep->flags);
3269} 3263}
3270 3264
3271static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3265static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
@@ -3525,14 +3519,14 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3525 out: 3519 out:
3526 local_irq_restore(save_flags); 3520 local_irq_restore(save_flags);
3527 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3521 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3528 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, 3522 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3529 flags); 3523 flags);
3530 3524
3531 if (likely(ptr)) 3525 if (likely(ptr))
3532 kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); 3526 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3533 3527
3534 if (unlikely((flags & __GFP_ZERO) && ptr)) 3528 if (unlikely((flags & __GFP_ZERO) && ptr))
3535 memset(ptr, 0, obj_size(cachep)); 3529 memset(ptr, 0, cachep->object_size);
3536 3530
3537 return ptr; 3531 return ptr;
3538} 3532}
@@ -3587,15 +3581,15 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3587 objp = __do_cache_alloc(cachep, flags); 3581 objp = __do_cache_alloc(cachep, flags);
3588 local_irq_restore(save_flags); 3582 local_irq_restore(save_flags);
3589 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3583 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3590 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, 3584 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3591 flags); 3585 flags);
3592 prefetchw(objp); 3586 prefetchw(objp);
3593 3587
3594 if (likely(objp)) 3588 if (likely(objp))
3595 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); 3589 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3596 3590
3597 if (unlikely((flags & __GFP_ZERO) && objp)) 3591 if (unlikely((flags & __GFP_ZERO) && objp))
3598 memset(objp, 0, obj_size(cachep)); 3592 memset(objp, 0, cachep->object_size);
3599 3593
3600 return objp; 3594 return objp;
3601} 3595}
@@ -3711,7 +3705,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3711 kmemleak_free_recursive(objp, cachep->flags); 3705 kmemleak_free_recursive(objp, cachep->flags);
3712 objp = cache_free_debugcheck(cachep, objp, caller); 3706 objp = cache_free_debugcheck(cachep, objp, caller);
3713 3707
3714 kmemcheck_slab_free(cachep, objp, obj_size(cachep)); 3708 kmemcheck_slab_free(cachep, objp, cachep->object_size);
3715 3709
3716 /* 3710 /*
3717 * Skip calling cache_free_alien() when the platform is not numa. 3711 * Skip calling cache_free_alien() when the platform is not numa.
@@ -3746,7 +3740,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3746 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3740 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3747 3741
3748 trace_kmem_cache_alloc(_RET_IP_, ret, 3742 trace_kmem_cache_alloc(_RET_IP_, ret,
3749 obj_size(cachep), cachep->size, flags); 3743 cachep->object_size, cachep->size, flags);
3750 3744
3751 return ret; 3745 return ret;
3752} 3746}
@@ -3774,7 +3768,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3774 __builtin_return_address(0)); 3768 __builtin_return_address(0));
3775 3769
3776 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3770 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3777 obj_size(cachep), cachep->size, 3771 cachep->object_size, cachep->size,
3778 flags, nodeid); 3772 flags, nodeid);
3779 3773
3780 return ret; 3774 return ret;
@@ -3896,9 +3890,9 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3896 unsigned long flags; 3890 unsigned long flags;
3897 3891
3898 local_irq_save(flags); 3892 local_irq_save(flags);
3899 debug_check_no_locks_freed(objp, obj_size(cachep)); 3893 debug_check_no_locks_freed(objp, cachep->size);
3900 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3894 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3901 debug_check_no_obj_freed(objp, obj_size(cachep)); 3895 debug_check_no_obj_freed(objp, cachep->object_size);
3902 __cache_free(cachep, objp, __builtin_return_address(0)); 3896 __cache_free(cachep, objp, __builtin_return_address(0));
3903 local_irq_restore(flags); 3897 local_irq_restore(flags);
3904 3898
@@ -3927,8 +3921,9 @@ void kfree(const void *objp)
3927 local_irq_save(flags); 3921 local_irq_save(flags);
3928 kfree_debugcheck(objp); 3922 kfree_debugcheck(objp);
3929 c = virt_to_cache(objp); 3923 c = virt_to_cache(objp);
3930 debug_check_no_locks_freed(objp, obj_size(c)); 3924 debug_check_no_locks_freed(objp, c->object_size);
3931 debug_check_no_obj_freed(objp, obj_size(c)); 3925
3926 debug_check_no_obj_freed(objp, c->object_size);
3932 __cache_free(c, (void *)objp, __builtin_return_address(0)); 3927 __cache_free(c, (void *)objp, __builtin_return_address(0));
3933 local_irq_restore(flags); 3928 local_irq_restore(flags);
3934} 3929}
@@ -3936,7 +3931,7 @@ EXPORT_SYMBOL(kfree);
3936 3931
3937unsigned int kmem_cache_size(struct kmem_cache *cachep) 3932unsigned int kmem_cache_size(struct kmem_cache *cachep)
3938{ 3933{
3939 return obj_size(cachep); 3934 return cachep->object_size;
3940} 3935}
3941EXPORT_SYMBOL(kmem_cache_size); 3936EXPORT_SYMBOL(kmem_cache_size);
3942 3937
@@ -4657,6 +4652,6 @@ size_t ksize(const void *objp)
4657 if (unlikely(objp == ZERO_SIZE_PTR)) 4652 if (unlikely(objp == ZERO_SIZE_PTR))
4658 return 0; 4653 return 0;
4659 4654
4660 return obj_size(virt_to_cache(objp)); 4655 return virt_to_cache(objp)->object_size;
4661} 4656}
4662EXPORT_SYMBOL(ksize); 4657EXPORT_SYMBOL(ksize);