diff options
-rw-r--r-- | mm/slab.c | 77 |
1 files changed, 42 insertions, 35 deletions
@@ -1024,6 +1024,40 @@ static void drain_alien_cache(struct kmem_cache *cachep, | |||
1024 | } | 1024 | } |
1025 | } | 1025 | } |
1026 | } | 1026 | } |
1027 | |||
1028 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
1029 | { | ||
1030 | struct slab *slabp = virt_to_slab(objp); | ||
1031 | int nodeid = slabp->nodeid; | ||
1032 | struct kmem_list3 *l3; | ||
1033 | struct array_cache *alien = NULL; | ||
1034 | |||
1035 | /* | ||
1036 | * Make sure we are not freeing a object from another node to the array | ||
1037 | * cache on this cpu. | ||
1038 | */ | ||
1039 | if (likely(slabp->nodeid == numa_node_id())) | ||
1040 | return 0; | ||
1041 | |||
1042 | l3 = cachep->nodelists[numa_node_id()]; | ||
1043 | STATS_INC_NODEFREES(cachep); | ||
1044 | if (l3->alien && l3->alien[nodeid]) { | ||
1045 | alien = l3->alien[nodeid]; | ||
1046 | spin_lock(&alien->lock); | ||
1047 | if (unlikely(alien->avail == alien->limit)) { | ||
1048 | STATS_INC_ACOVERFLOW(cachep); | ||
1049 | __drain_alien_cache(cachep, alien, nodeid); | ||
1050 | } | ||
1051 | alien->entry[alien->avail++] = objp; | ||
1052 | spin_unlock(&alien->lock); | ||
1053 | } else { | ||
1054 | spin_lock(&(cachep->nodelists[nodeid])->list_lock); | ||
1055 | free_block(cachep, &objp, 1, nodeid); | ||
1056 | spin_unlock(&(cachep->nodelists[nodeid])->list_lock); | ||
1057 | } | ||
1058 | return 1; | ||
1059 | } | ||
1060 | |||
1027 | #else | 1061 | #else |
1028 | 1062 | ||
1029 | #define drain_alien_cache(cachep, alien) do { } while (0) | 1063 | #define drain_alien_cache(cachep, alien) do { } while (0) |
@@ -1038,6 +1072,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
1038 | { | 1072 | { |
1039 | } | 1073 | } |
1040 | 1074 | ||
1075 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | ||
1076 | { | ||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1041 | #endif | 1080 | #endif |
1042 | 1081 | ||
1043 | static int cpuup_callback(struct notifier_block *nfb, | 1082 | static int cpuup_callback(struct notifier_block *nfb, |
@@ -3087,41 +3126,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3087 | check_irq_off(); | 3126 | check_irq_off(); |
3088 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3127 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3089 | 3128 | ||
3090 | /* Make sure we are not freeing a object from another | 3129 | if (cache_free_alien(cachep, objp)) |
3091 | * node to the array cache on this cpu. | 3130 | return; |
3092 | */ | 3131 | |
3093 | #ifdef CONFIG_NUMA | ||
3094 | { | ||
3095 | struct slab *slabp; | ||
3096 | slabp = virt_to_slab(objp); | ||
3097 | if (unlikely(slabp->nodeid != numa_node_id())) { | ||
3098 | struct array_cache *alien = NULL; | ||
3099 | int nodeid = slabp->nodeid; | ||
3100 | struct kmem_list3 *l3; | ||
3101 | |||
3102 | l3 = cachep->nodelists[numa_node_id()]; | ||
3103 | STATS_INC_NODEFREES(cachep); | ||
3104 | if (l3->alien && l3->alien[nodeid]) { | ||
3105 | alien = l3->alien[nodeid]; | ||
3106 | spin_lock(&alien->lock); | ||
3107 | if (unlikely(alien->avail == alien->limit)) { | ||
3108 | STATS_INC_ACOVERFLOW(cachep); | ||
3109 | __drain_alien_cache(cachep, | ||
3110 | alien, nodeid); | ||
3111 | } | ||
3112 | alien->entry[alien->avail++] = objp; | ||
3113 | spin_unlock(&alien->lock); | ||
3114 | } else { | ||
3115 | spin_lock(&(cachep->nodelists[nodeid])-> | ||
3116 | list_lock); | ||
3117 | free_block(cachep, &objp, 1, nodeid); | ||
3118 | spin_unlock(&(cachep->nodelists[nodeid])-> | ||
3119 | list_lock); | ||
3120 | } | ||
3121 | return; | ||
3122 | } | ||
3123 | } | ||
3124 | #endif | ||
3125 | if (likely(ac->avail < ac->limit)) { | 3132 | if (likely(ac->avail < ac->limit)) { |
3126 | STATS_INC_FREEHIT(cachep); | 3133 | STATS_INC_FREEHIT(cachep); |
3127 | ac->entry[ac->avail++] = objp; | 3134 | ac->entry[ac->avail++] = objp; |