aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorEzequiel Garcia <elezegarcia@gmail.com>2012-09-08 16:47:57 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-25 03:18:34 -0400
commit48356303ff8cce7036f13a23df9119d3f47461ce (patch)
tree3e3eeb7d4daf12e7d68037d6c80518e929221aa4 /mm
parent4052147c0afa1cf05780ed846f37e87cdde9f628 (diff)
mm, slab: Rename __cache_alloc() -> slab_alloc()
This patch does not fix anything and its only goal is to produce common code between SLAB and SLUB. Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 4c54a2357937..d011030e9613 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3561,7 +3561,7 @@ done:
3561 * Fallback to other node is possible if __GFP_THISNODE is not set. 3561 * Fallback to other node is possible if __GFP_THISNODE is not set.
3562 */ 3562 */
3563static __always_inline void * 3563static __always_inline void *
3564__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3564slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3565 unsigned long caller) 3565 unsigned long caller)
3566{ 3566{
3567 unsigned long save_flags; 3567 unsigned long save_flags;
@@ -3648,7 +3648,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3648#endif /* CONFIG_NUMA */ 3648#endif /* CONFIG_NUMA */
3649 3649
3650static __always_inline void * 3650static __always_inline void *
3651__cache_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3651slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3652{ 3652{
3653 unsigned long save_flags; 3653 unsigned long save_flags;
3654 void *objp; 3654 void *objp;
@@ -3824,7 +3824,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3824 */ 3824 */
3825void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3825void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3826{ 3826{
3827 void *ret = __cache_alloc(cachep, flags, _RET_IP_); 3827 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3828 3828
3829 trace_kmem_cache_alloc(_RET_IP_, ret, 3829 trace_kmem_cache_alloc(_RET_IP_, ret,
3830 cachep->object_size, cachep->size, flags); 3830 cachep->object_size, cachep->size, flags);
@@ -3839,7 +3839,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3839{ 3839{
3840 void *ret; 3840 void *ret;
3841 3841
3842 ret = __cache_alloc(cachep, flags, _RET_IP_); 3842 ret = slab_alloc(cachep, flags, _RET_IP_);
3843 3843
3844 trace_kmalloc(_RET_IP_, ret, 3844 trace_kmalloc(_RET_IP_, ret,
3845 size, cachep->size, flags); 3845 size, cachep->size, flags);
@@ -3851,7 +3851,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
3851#ifdef CONFIG_NUMA 3851#ifdef CONFIG_NUMA
3852void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3852void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3853{ 3853{
3854 void *ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP_); 3854 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3855 3855
3856 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3856 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3857 cachep->object_size, cachep->size, 3857 cachep->object_size, cachep->size,
@@ -3869,7 +3869,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3869{ 3869{
3870 void *ret; 3870 void *ret;
3871 3871
3872 ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP); 3872 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP);
3873 3873
3874 trace_kmalloc_node(_RET_IP_, ret, 3874 trace_kmalloc_node(_RET_IP_, ret,
3875 size, cachep->size, 3875 size, cachep->size,
@@ -3932,7 +3932,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3932 cachep = __find_general_cachep(size, flags); 3932 cachep = __find_general_cachep(size, flags);
3933 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3933 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3934 return cachep; 3934 return cachep;
3935 ret = __cache_alloc(cachep, flags, caller); 3935 ret = slab_alloc(cachep, flags, caller);
3936 3936
3937 trace_kmalloc(caller, ret, 3937 trace_kmalloc(caller, ret,
3938 size, cachep->size, flags); 3938 size, cachep->size, flags);