aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorRavikiran G Thirumalai <kiran@scalex86.org>2006-02-05 02:27:56 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-05 14:06:53 -0500
commit2e1217cf96b54d3b2d0162930608159e73507fbf (patch)
treeac4eba17cc9cec38e003a4bbabb5dae63934f4ac /mm/slab.c
parent64b4a954b03a1153fb8ae38d6ffbd991e01a1e80 (diff)
[PATCH] NUMA slab locking fixes: move color_next to l3
colour_next is used as an index to add a colouring offset to a new slab in the cache (colour_off * colour_next). Now with the NUMA aware slab allocator, it makes sense to colour slabs added on the same node sequentially with colour_next. This patch moves the colouring index "colour_next" per-node by placing it on kmem_list3 rather than kmem_cache. This also helps simplify locking for CPU up and down paths. Signed-off-by: Alok N Kataria <alokk@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Cc: Christoph Lameter <christoph@lameter.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 71370256a7eb..2317096166dd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -294,6 +294,7 @@ struct kmem_list3 {
294 unsigned long next_reap; 294 unsigned long next_reap;
295 int free_touched; 295 int free_touched;
296 unsigned int free_limit; 296 unsigned int free_limit;
297 unsigned int colour_next; /* Per-node cache coloring */
297 spinlock_t list_lock; 298 spinlock_t list_lock;
298 struct array_cache *shared; /* shared per node */ 299 struct array_cache *shared; /* shared per node */
299 struct array_cache **alien; /* on other nodes */ 300 struct array_cache **alien; /* on other nodes */
@@ -344,6 +345,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
344 INIT_LIST_HEAD(&parent->slabs_free); 345 INIT_LIST_HEAD(&parent->slabs_free);
345 parent->shared = NULL; 346 parent->shared = NULL;
346 parent->alien = NULL; 347 parent->alien = NULL;
348 parent->colour_next = 0;
347 spin_lock_init(&parent->list_lock); 349 spin_lock_init(&parent->list_lock);
348 parent->free_objects = 0; 350 parent->free_objects = 0;
349 parent->free_touched = 0; 351 parent->free_touched = 0;
@@ -390,7 +392,6 @@ struct kmem_cache {
390 392
391 size_t colour; /* cache colouring range */ 393 size_t colour; /* cache colouring range */
392 unsigned int colour_off; /* colour offset */ 394 unsigned int colour_off; /* colour offset */
393 unsigned int colour_next; /* cache colouring */
394 struct kmem_cache *slabp_cache; 395 struct kmem_cache *slabp_cache;
395 unsigned int slab_size; 396 unsigned int slab_size;
396 unsigned int dflags; /* dynamic flags */ 397 unsigned int dflags; /* dynamic flags */
@@ -1119,7 +1120,6 @@ void __init kmem_cache_init(void)
1119 BUG(); 1120 BUG();
1120 1121
1121 cache_cache.colour = left_over / cache_cache.colour_off; 1122 cache_cache.colour = left_over / cache_cache.colour_off;
1122 cache_cache.colour_next = 0;
1123 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1123 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1124 sizeof(struct slab), cache_line_size()); 1124 sizeof(struct slab), cache_line_size());
1125 1125
@@ -2324,18 +2324,19 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2324 */ 2324 */
2325 ctor_flags |= SLAB_CTOR_ATOMIC; 2325 ctor_flags |= SLAB_CTOR_ATOMIC;
2326 2326
2327 /* About to mess with non-constant members - lock. */ 2327 /* Take the l3 list lock to change the colour_next on this node */
2328 check_irq_off(); 2328 check_irq_off();
2329 spin_lock(&cachep->spinlock); 2329 l3 = cachep->nodelists[nodeid];
2330 spin_lock(&l3->list_lock);
2330 2331
2331 /* Get colour for the slab, and cal the next value. */ 2332 /* Get colour for the slab, and cal the next value. */
2332 offset = cachep->colour_next; 2333 offset = l3->colour_next;
2333 cachep->colour_next++; 2334 l3->colour_next++;
2334 if (cachep->colour_next >= cachep->colour) 2335 if (l3->colour_next >= cachep->colour)
2335 cachep->colour_next = 0; 2336 l3->colour_next = 0;
2336 offset *= cachep->colour_off; 2337 spin_unlock(&l3->list_lock);
2337 2338
2338 spin_unlock(&cachep->spinlock); 2339 offset *= cachep->colour_off;
2339 2340
2340 check_irq_off(); 2341 check_irq_off();
2341 if (local_flags & __GFP_WAIT) 2342 if (local_flags & __GFP_WAIT)
@@ -2367,7 +2368,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2367 if (local_flags & __GFP_WAIT) 2368 if (local_flags & __GFP_WAIT)
2368 local_irq_disable(); 2369 local_irq_disable();
2369 check_irq_off(); 2370 check_irq_off();
2370 l3 = cachep->nodelists[nodeid];
2371 spin_lock(&l3->list_lock); 2371 spin_lock(&l3->list_lock);
2372 2372
2373 /* Make slab active. */ 2373 /* Make slab active. */