diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 38 |
1 files changed, 29 insertions, 9 deletions
@@ -869,6 +869,22 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, | |||
869 | dump_stack(); | 869 | dump_stack(); |
870 | } | 870 | } |
871 | 871 | ||
872 | /* | ||
873 | * By default on NUMA we use alien caches to stage the freeing of | ||
874 | * objects allocated from other nodes. This causes massive memory | ||
875 | * inefficiencies when using fake NUMA setup to split memory into a | ||
876 | * large number of small nodes, so it can be disabled on the command | ||
877 | * line | ||
878 | */ | ||
879 | |||
880 | static int use_alien_caches __read_mostly = 1; | ||
881 | static int __init noaliencache_setup(char *s) | ||
882 | { | ||
883 | use_alien_caches = 0; | ||
884 | return 1; | ||
885 | } | ||
886 | __setup("noaliencache", noaliencache_setup); | ||
887 | |||
872 | #ifdef CONFIG_NUMA | 888 | #ifdef CONFIG_NUMA |
873 | /* | 889 | /* |
874 | * Special reaping functions for NUMA systems called from cache_reap(). | 890 | * Special reaping functions for NUMA systems called from cache_reap(). |
@@ -1117,7 +1133,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1117 | * Make sure we are not freeing a object from another node to the array | 1133 | * Make sure we are not freeing a object from another node to the array |
1118 | * cache on this cpu. | 1134 | * cache on this cpu. |
1119 | */ | 1135 | */ |
1120 | if (likely(slabp->nodeid == node)) | 1136 | if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) |
1121 | return 0; | 1137 | return 0; |
1122 | 1138 | ||
1123 | l3 = cachep->nodelists[node]; | 1139 | l3 = cachep->nodelists[node]; |
@@ -1195,7 +1211,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1195 | list_for_each_entry(cachep, &cache_chain, next) { | 1211 | list_for_each_entry(cachep, &cache_chain, next) { |
1196 | struct array_cache *nc; | 1212 | struct array_cache *nc; |
1197 | struct array_cache *shared; | 1213 | struct array_cache *shared; |
1198 | struct array_cache **alien; | 1214 | struct array_cache **alien = NULL; |
1199 | 1215 | ||
1200 | nc = alloc_arraycache(node, cachep->limit, | 1216 | nc = alloc_arraycache(node, cachep->limit, |
1201 | cachep->batchcount); | 1217 | cachep->batchcount); |
@@ -1207,9 +1223,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1207 | if (!shared) | 1223 | if (!shared) |
1208 | goto bad; | 1224 | goto bad; |
1209 | 1225 | ||
1210 | alien = alloc_alien_cache(node, cachep->limit); | 1226 | if (use_alien_caches) { |
1211 | if (!alien) | 1227 | alien = alloc_alien_cache(node, cachep->limit); |
1212 | goto bad; | 1228 | if (!alien) |
1229 | goto bad; | ||
1230 | } | ||
1213 | cachep->array[cpu] = nc; | 1231 | cachep->array[cpu] = nc; |
1214 | l3 = cachep->nodelists[node]; | 1232 | l3 = cachep->nodelists[node]; |
1215 | BUG_ON(!l3); | 1233 | BUG_ON(!l3); |
@@ -3590,13 +3608,15 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3590 | int node; | 3608 | int node; |
3591 | struct kmem_list3 *l3; | 3609 | struct kmem_list3 *l3; |
3592 | struct array_cache *new_shared; | 3610 | struct array_cache *new_shared; |
3593 | struct array_cache **new_alien; | 3611 | struct array_cache **new_alien = NULL; |
3594 | 3612 | ||
3595 | for_each_online_node(node) { | 3613 | for_each_online_node(node) { |
3596 | 3614 | ||
3597 | new_alien = alloc_alien_cache(node, cachep->limit); | 3615 | if (use_alien_caches) { |
3598 | if (!new_alien) | 3616 | new_alien = alloc_alien_cache(node, cachep->limit); |
3599 | goto fail; | 3617 | if (!new_alien) |
3618 | goto fail; | ||
3619 | } | ||
3600 | 3620 | ||
3601 | new_shared = alloc_arraycache(node, | 3621 | new_shared = alloc_arraycache(node, |
3602 | cachep->shared*cachep->batchcount, | 3622 | cachep->shared*cachep->batchcount, |