diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-10-09 18:26:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:51 -0400 |
commit | 25c4f304be8cd6831105d3a2876028e4ecd254a1 (patch) | |
tree | 53f8a895514bd821561463b0eb65850f869f3a6d /mm | |
parent | d3aec34466d9d6c8ceaa7f95088ced5705823735 (diff) |
mm/slab: factor out unlikely part of cache_free_alien()
cache_free_alien() is rarely used function when node mismatch. But, it is
defined with inline attribute so it is inlined to __cache_free() which is
core free function of slab allocator. It uselessly makes
kmem_cache_free()/kfree() functions large. What we really need to inline
is just checking node match so this patch factor out other parts of
cache_free_alien() to reduce code size of kmem_cache_free()/ kfree().
<Before>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free
<After>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
0000000000001110 00000000000001b5 T kfree
0000000000000750 0000000000000181 T kmem_cache_free
You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 38 |
1 files changed, 21 insertions, 17 deletions
@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep, | |||
984 | } | 984 | } |
985 | } | 985 | } |
986 | 986 | ||
987 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | 987 | static int __cache_free_alien(struct kmem_cache *cachep, void *objp, |
988 | int node, int page_node) | ||
988 | { | 989 | { |
989 | int nodeid = page_to_nid(virt_to_page(objp)); | ||
990 | struct kmem_cache_node *n; | 990 | struct kmem_cache_node *n; |
991 | struct alien_cache *alien = NULL; | 991 | struct alien_cache *alien = NULL; |
992 | struct array_cache *ac; | 992 | struct array_cache *ac; |
993 | int node; | ||
994 | LIST_HEAD(list); | 993 | LIST_HEAD(list); |
995 | 994 | ||
996 | node = numa_mem_id(); | ||
997 | |||
998 | /* | ||
999 | * Make sure we are not freeing a object from another node to the array | ||
1000 | * cache on this cpu. | ||
1001 | */ | ||
1002 | if (likely(nodeid == node)) | ||
1003 | return 0; | ||
1004 | |||
1005 | n = get_node(cachep, node); | 995 | n = get_node(cachep, node); |
1006 | STATS_INC_NODEFREES(cachep); | 996 | STATS_INC_NODEFREES(cachep); |
1007 | if (n->alien && n->alien[nodeid]) { | 997 | if (n->alien && n->alien[page_node]) { |
1008 | alien = n->alien[nodeid]; | 998 | alien = n->alien[page_node]; |
1009 | ac = &alien->ac; | 999 | ac = &alien->ac; |
1010 | spin_lock(&alien->lock); | 1000 | spin_lock(&alien->lock); |
1011 | if (unlikely(ac->avail == ac->limit)) { | 1001 | if (unlikely(ac->avail == ac->limit)) { |
1012 | STATS_INC_ACOVERFLOW(cachep); | 1002 | STATS_INC_ACOVERFLOW(cachep); |
1013 | __drain_alien_cache(cachep, ac, nodeid, &list); | 1003 | __drain_alien_cache(cachep, ac, page_node, &list); |
1014 | } | 1004 | } |
1015 | ac_put_obj(cachep, ac, objp); | 1005 | ac_put_obj(cachep, ac, objp); |
1016 | spin_unlock(&alien->lock); | 1006 | spin_unlock(&alien->lock); |
1017 | slabs_destroy(cachep, &list); | 1007 | slabs_destroy(cachep, &list); |
1018 | } else { | 1008 | } else { |
1019 | n = get_node(cachep, nodeid); | 1009 | n = get_node(cachep, page_node); |
1020 | spin_lock(&n->list_lock); | 1010 | spin_lock(&n->list_lock); |
1021 | free_block(cachep, &objp, 1, nodeid, &list); | 1011 | free_block(cachep, &objp, 1, page_node, &list); |
1022 | spin_unlock(&n->list_lock); | 1012 | spin_unlock(&n->list_lock); |
1023 | slabs_destroy(cachep, &list); | 1013 | slabs_destroy(cachep, &list); |
1024 | } | 1014 | } |
1025 | return 1; | 1015 | return 1; |
1026 | } | 1016 | } |
1017 | |||
1018 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
1019 | { | ||
1020 | int page_node = page_to_nid(virt_to_page(objp)); | ||
1021 | int node = numa_mem_id(); | ||
1022 | /* | ||
1023 | * Make sure we are not freeing a object from another node to the array | ||
1024 | * cache on this cpu. | ||
1025 | */ | ||
1026 | if (likely(node == page_node)) | ||
1027 | return 0; | ||
1028 | |||
1029 | return __cache_free_alien(cachep, objp, node, page_node); | ||
1030 | } | ||
1027 | #endif | 1031 | #endif |
1028 | 1032 | ||
1029 | /* | 1033 | /* |