aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@linux.intel.com>2006-02-01 06:05:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:15 -0500
commit9884fd8df195fe48d4e1be2279b419be96127cae (patch)
tree1438571d206c1d488efb8454c64ae00c51438a2e /mm/slab.c
parentc84db23c6e587d3ab00a41c51fedf758e1f6ecd4 (diff)
[PATCH] Use 32 bit division in slab_put_obj()
Improve the performance of slab_put_obj(). Without the cast, gcc considers ptrdiff_t a 64 bit signed integer and ends up emitting code to use a full signed 128 bit divide on EM64T, which is substantially slower than a 32 bit unsigned divide. I noticed this when looking at the profile of a case where the slab balance is just on edge and thrashes back and forth freeing a block. Signed-off-by: Benjamin LaHaise <benjamin.c.lahaise@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6f8495e2185b..88082ae15736 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1398,7 +1398,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1398 struct slab *slabp = page_get_slab(virt_to_page(objp)); 1398 struct slab *slabp = page_get_slab(virt_to_page(objp));
1399 int objnr; 1399 int objnr;
1400 1400
1401 objnr = (objp - slabp->s_mem) / cachep->objsize; 1401 objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize;
1402 if (objnr) { 1402 if (objnr) {
1403 objp = slabp->s_mem + (objnr - 1) * cachep->objsize; 1403 objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
1404 realobj = (char *)objp + obj_dbghead(cachep); 1404 realobj = (char *)objp + obj_dbghead(cachep);
@@ -2341,7 +2341,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
2341 if (cachep->flags & SLAB_STORE_USER) 2341 if (cachep->flags & SLAB_STORE_USER)
2342 *dbg_userword(cachep, objp) = caller; 2342 *dbg_userword(cachep, objp) = caller;
2343 2343
2344 objnr = (objp - slabp->s_mem) / cachep->objsize; 2344 objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize;
2345 2345
2346 BUG_ON(objnr >= cachep->num); 2346 BUG_ON(objnr >= cachep->num);
2347 BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize); 2347 BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
@@ -2699,7 +2699,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
2699 slabp = page_get_slab(virt_to_page(objp)); 2699 slabp = page_get_slab(virt_to_page(objp));
2700 l3 = cachep->nodelists[node]; 2700 l3 = cachep->nodelists[node];
2701 list_del(&slabp->list); 2701 list_del(&slabp->list);
2702 objnr = (objp - slabp->s_mem) / cachep->objsize; 2702 objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize;
2703 check_spinlock_acquired_node(cachep, node); 2703 check_spinlock_acquired_node(cachep, node);
2704 check_slabp(cachep, slabp); 2704 check_slabp(cachep, slabp);
2705 2705