aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRavikiran G Thirumalai <kiran@scalex86.org>2006-09-26 02:31:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:49 -0400
commite5ac9c5aec7c4bc57fa93f2d37d760a22cb7bd33 (patch)
tree3f8824da788608592a06cd888eca220c297eb901
parentdfd54cbcc0b834652389ce99b5e656ea5f44a3c1 (diff)
[PATCH] Add some comments to slab.c
Also, checks if we get a valid slabp_cache for off slab slab-descriptors. We should always get this. If we don't, then in that case we, will have to disable off-slab descriptors for this cache and do the calculations again. This is a rare case, so add a BUG_ON, for now, just in case. Signed-off-by: Alok N Kataria <alok.kataria@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d47d0e186973..3ad2f64998fd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2206,8 +2206,17 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2206 cachep->gfpflags |= GFP_DMA; 2206 cachep->gfpflags |= GFP_DMA;
2207 cachep->buffer_size = size; 2207 cachep->buffer_size = size;
2208 2208
2209 if (flags & CFLGS_OFF_SLAB) 2209 if (flags & CFLGS_OFF_SLAB) {
2210 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2210 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2211 /*
2212 * This is a possibility for one of the malloc_sizes caches.
2213 * But since we go off slab only for object size greater than
2214 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2215 * this should not happen at all.
2216 * But leave a BUG_ON for some lucky dude.
2217 */
2218 BUG_ON(!cachep->slabp_cache);
2219 }
2211 cachep->ctor = ctor; 2220 cachep->ctor = ctor;
2212 cachep->dtor = dtor; 2221 cachep->dtor = dtor;
2213 cachep->name = name; 2222 cachep->name = name;
@@ -2441,7 +2450,17 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
2441} 2450}
2442EXPORT_SYMBOL(kmem_cache_destroy); 2451EXPORT_SYMBOL(kmem_cache_destroy);
2443 2452
2444/* Get the memory for a slab management obj. */ 2453/*
2454 * Get the memory for a slab management obj.
2455 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2456 * always come from malloc_sizes caches. The slab descriptor cannot
2457 * come from the same cache which is getting created because,
2458 * when we are searching for an appropriate cache for these
2459 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2460 * If we are creating a malloc_sizes cache here it would not be visible to
2461 * kmem_find_general_cachep till the initialization is complete.
2462 * Hence we cannot have slabp_cache same as the original cache.
2463 */
2445static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2464static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2446 int colour_off, gfp_t local_flags, 2465 int colour_off, gfp_t local_flags,
2447 int nodeid) 2466 int nodeid)
@@ -3125,6 +3144,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3125 if (slabp->inuse == 0) { 3144 if (slabp->inuse == 0) {
3126 if (l3->free_objects > l3->free_limit) { 3145 if (l3->free_objects > l3->free_limit) {
3127 l3->free_objects -= cachep->num; 3146 l3->free_objects -= cachep->num;
3147 /* No need to drop any previously held
3148 * lock here, even if we have a off-slab slab
3149 * descriptor it is guaranteed to come from
3150 * a different cache, refer to comments before
3151 * alloc_slabmgmt.
3152 */
3128 slab_destroy(cachep, slabp); 3153 slab_destroy(cachep, slabp);
3129 } else { 3154 } else {
3130 list_add(&slabp->list, &l3->slabs_free); 3155 list_add(&slabp->list, &l3->slabs_free);