aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-05-06 17:49:28 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:53 -0400
commit6310984694c8204ad16a2414cd58808fae68e02b (patch)
tree9579700c6b8b99e873462018cf536c00ab76dfb6 /mm
parent364fbb29a0105863d76a1f7bbc01783a4af30a75 (diff)
SLAB: don't allocate empty shared caches
We can avoid allocating empty shared caches and avoid unecessary check of cache->limit. We save some memory. We avoid bringing into CPU cache unecessary cache lines. All accesses to l3->shared are already checking NULL pointers so this patch is safe. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index ae440b5ba60e..00f98b9f6df1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1223,19 +1223,20 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1223 */ 1223 */
1224 list_for_each_entry(cachep, &cache_chain, next) { 1224 list_for_each_entry(cachep, &cache_chain, next) {
1225 struct array_cache *nc; 1225 struct array_cache *nc;
1226 struct array_cache *shared; 1226 struct array_cache *shared = NULL;
1227 struct array_cache **alien = NULL; 1227 struct array_cache **alien = NULL;
1228 1228
1229 nc = alloc_arraycache(node, cachep->limit, 1229 nc = alloc_arraycache(node, cachep->limit,
1230 cachep->batchcount); 1230 cachep->batchcount);
1231 if (!nc) 1231 if (!nc)
1232 goto bad; 1232 goto bad;
1233 shared = alloc_arraycache(node, 1233 if (cachep->shared) {
1234 shared = alloc_arraycache(node,
1234 cachep->shared * cachep->batchcount, 1235 cachep->shared * cachep->batchcount,
1235 0xbaadf00d); 1236 0xbaadf00d);
1236 if (!shared) 1237 if (!shared)
1237 goto bad; 1238 goto bad;
1238 1239 }
1239 if (use_alien_caches) { 1240 if (use_alien_caches) {
1240 alien = alloc_alien_cache(node, cachep->limit); 1241 alien = alloc_alien_cache(node, cachep->limit);
1241 if (!alien) 1242 if (!alien)
@@ -1317,8 +1318,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1317 1318
1318 shared = l3->shared; 1319 shared = l3->shared;
1319 if (shared) { 1320 if (shared) {
1320 free_block(cachep, l3->shared->entry, 1321 free_block(cachep, shared->entry,
1321 l3->shared->avail, node); 1322 shared->avail, node);
1322 l3->shared = NULL; 1323 l3->shared = NULL;
1323 } 1324 }
1324 1325
@@ -3870,12 +3871,15 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3870 goto fail; 3871 goto fail;
3871 } 3872 }
3872 3873
3873 new_shared = alloc_arraycache(node, 3874 new_shared = NULL;
3875 if (cachep->shared) {
3876 new_shared = alloc_arraycache(node,
3874 cachep->shared*cachep->batchcount, 3877 cachep->shared*cachep->batchcount,
3875 0xbaadf00d); 3878 0xbaadf00d);
3876 if (!new_shared) { 3879 if (!new_shared) {
3877 free_alien_cache(new_alien); 3880 free_alien_cache(new_alien);
3878 goto fail; 3881 goto fail;
3882 }
3879 } 3883 }
3880 3884
3881 l3 = cachep->nodelists[node]; 3885 l3 = cachep->nodelists[node];