diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 38 |
1 files changed, 21 insertions, 17 deletions
@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep, | |||
984 | } | 984 | } |
985 | } | 985 | } |
986 | 986 | ||
987 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | 987 | static int __cache_free_alien(struct kmem_cache *cachep, void *objp, |
988 | int node, int page_node) | ||
988 | { | 989 | { |
989 | int nodeid = page_to_nid(virt_to_page(objp)); | ||
990 | struct kmem_cache_node *n; | 990 | struct kmem_cache_node *n; |
991 | struct alien_cache *alien = NULL; | 991 | struct alien_cache *alien = NULL; |
992 | struct array_cache *ac; | 992 | struct array_cache *ac; |
993 | int node; | ||
994 | LIST_HEAD(list); | 993 | LIST_HEAD(list); |
995 | 994 | ||
996 | node = numa_mem_id(); | ||
997 | |||
998 | /* | ||
999 | * Make sure we are not freeing a object from another node to the array | ||
1000 | * cache on this cpu. | ||
1001 | */ | ||
1002 | if (likely(nodeid == node)) | ||
1003 | return 0; | ||
1004 | |||
1005 | n = get_node(cachep, node); | 995 | n = get_node(cachep, node); |
1006 | STATS_INC_NODEFREES(cachep); | 996 | STATS_INC_NODEFREES(cachep); |
1007 | if (n->alien && n->alien[nodeid]) { | 997 | if (n->alien && n->alien[page_node]) { |
1008 | alien = n->alien[nodeid]; | 998 | alien = n->alien[page_node]; |
1009 | ac = &alien->ac; | 999 | ac = &alien->ac; |
1010 | spin_lock(&alien->lock); | 1000 | spin_lock(&alien->lock); |
1011 | if (unlikely(ac->avail == ac->limit)) { | 1001 | if (unlikely(ac->avail == ac->limit)) { |
1012 | STATS_INC_ACOVERFLOW(cachep); | 1002 | STATS_INC_ACOVERFLOW(cachep); |
1013 | __drain_alien_cache(cachep, ac, nodeid, &list); | 1003 | __drain_alien_cache(cachep, ac, page_node, &list); |
1014 | } | 1004 | } |
1015 | ac_put_obj(cachep, ac, objp); | 1005 | ac_put_obj(cachep, ac, objp); |
1016 | spin_unlock(&alien->lock); | 1006 | spin_unlock(&alien->lock); |
1017 | slabs_destroy(cachep, &list); | 1007 | slabs_destroy(cachep, &list); |
1018 | } else { | 1008 | } else { |
1019 | n = get_node(cachep, nodeid); | 1009 | n = get_node(cachep, page_node); |
1020 | spin_lock(&n->list_lock); | 1010 | spin_lock(&n->list_lock); |
1021 | free_block(cachep, &objp, 1, nodeid, &list); | 1011 | free_block(cachep, &objp, 1, page_node, &list); |
1022 | spin_unlock(&n->list_lock); | 1012 | spin_unlock(&n->list_lock); |
1023 | slabs_destroy(cachep, &list); | 1013 | slabs_destroy(cachep, &list); |
1024 | } | 1014 | } |
1025 | return 1; | 1015 | return 1; |
1026 | } | 1016 | } |
1017 | |||
1018 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
1019 | { | ||
1020 | int page_node = page_to_nid(virt_to_page(objp)); | ||
1021 | int node = numa_mem_id(); | ||
1022 | /* | ||
1023 | * Make sure we are not freeing a object from another node to the array | ||
1024 | * cache on this cpu. | ||
1025 | */ | ||
1026 | if (likely(node == page_node)) | ||
1027 | return 0; | ||
1028 | |||
1029 | return __cache_free_alien(cachep, objp, node, page_node); | ||
1030 | } | ||
1027 | #endif | 1031 | #endif |
1028 | 1032 | ||
1029 | /* | 1033 | /* |