aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c34
1 files changed, 23 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 5988adf010c5..3d18b711ab82 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -609,6 +609,18 @@ static inline struct slab *virt_to_slab(const void *obj)
609 return page_get_slab(page); 609 return page_get_slab(page);
610} 610}
611 611
612static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
613 unsigned int idx)
614{
615 return slab->s_mem + cache->buffer_size * idx;
616}
617
618static inline unsigned int obj_to_index(struct kmem_cache *cache,
619 struct slab *slab, void *obj)
620{
621 return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
622}
623
612/* These are the default caches for kmalloc. Custom caches can have other sizes. */ 624/* These are the default caches for kmalloc. Custom caches can have other sizes. */
613struct cache_sizes malloc_sizes[] = { 625struct cache_sizes malloc_sizes[] = {
614#define CACHE(x) { .cs_size = (x) }, 626#define CACHE(x) { .cs_size = (x) },
@@ -1568,18 +1580,18 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1568 * exist: 1580 * exist:
1569 */ 1581 */
1570 struct slab *slabp = virt_to_slab(objp); 1582 struct slab *slabp = virt_to_slab(objp);
1571 int objnr; 1583 unsigned int objnr;
1572 1584
1573 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 1585 objnr = obj_to_index(cachep, slabp, objp);
1574 if (objnr) { 1586 if (objnr) {
1575 objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size; 1587 objp = index_to_obj(cachep, slabp, objnr - 1);
1576 realobj = (char *)objp + obj_offset(cachep); 1588 realobj = (char *)objp + obj_offset(cachep);
1577 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1589 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1578 realobj, size); 1590 realobj, size);
1579 print_objinfo(cachep, objp, 2); 1591 print_objinfo(cachep, objp, 2);
1580 } 1592 }
1581 if (objnr + 1 < cachep->num) { 1593 if (objnr + 1 < cachep->num) {
1582 objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size; 1594 objp = index_to_obj(cachep, slabp, objnr + 1);
1583 realobj = (char *)objp + obj_offset(cachep); 1595 realobj = (char *)objp + obj_offset(cachep);
1584 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1596 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1585 realobj, size); 1597 realobj, size);
@@ -1598,7 +1610,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1598{ 1610{
1599 int i; 1611 int i;
1600 for (i = 0; i < cachep->num; i++) { 1612 for (i = 0; i < cachep->num; i++) {
1601 void *objp = slabp->s_mem + cachep->buffer_size * i; 1613 void *objp = index_to_obj(cachep, slabp, i);
1602 1614
1603 if (cachep->flags & SLAB_POISON) { 1615 if (cachep->flags & SLAB_POISON) {
1604#ifdef CONFIG_DEBUG_PAGEALLOC 1616#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -1631,7 +1643,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1631 if (cachep->dtor) { 1643 if (cachep->dtor) {
1632 int i; 1644 int i;
1633 for (i = 0; i < cachep->num; i++) { 1645 for (i = 0; i < cachep->num; i++) {
1634 void *objp = slabp->s_mem + cachep->buffer_size * i; 1646 void *objp = index_to_obj(cachep, slabp, i);
1635 (cachep->dtor) (objp, cachep, 0); 1647 (cachep->dtor) (objp, cachep, 0);
1636 } 1648 }
1637 } 1649 }
@@ -2307,7 +2319,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2307 int i; 2319 int i;
2308 2320
2309 for (i = 0; i < cachep->num; i++) { 2321 for (i = 0; i < cachep->num; i++) {
2310 void *objp = slabp->s_mem + cachep->buffer_size * i; 2322 void *objp = index_to_obj(cachep, slabp, i);
2311#if DEBUG 2323#if DEBUG
2312 /* need to poison the objs? */ 2324 /* need to poison the objs? */
2313 if (cachep->flags & SLAB_POISON) 2325 if (cachep->flags & SLAB_POISON)
@@ -2363,7 +2375,7 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2363 2375
2364static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid) 2376static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
2365{ 2377{
2366 void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); 2378 void *objp = index_to_obj(cachep, slabp, slabp->free);
2367 kmem_bufctl_t next; 2379 kmem_bufctl_t next;
2368 2380
2369 slabp->inuse++; 2381 slabp->inuse++;
@@ -2380,7 +2392,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nod
2380static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp, 2392static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
2381 int nodeid) 2393 int nodeid)
2382{ 2394{
2383 unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; 2395 unsigned int objnr = obj_to_index(cachep, slabp, objp);
2384 2396
2385#if DEBUG 2397#if DEBUG
2386 /* Verify that the slab belongs to the intended node */ 2398 /* Verify that the slab belongs to the intended node */
@@ -2565,10 +2577,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2565 if (cachep->flags & SLAB_STORE_USER) 2577 if (cachep->flags & SLAB_STORE_USER)
2566 *dbg_userword(cachep, objp) = caller; 2578 *dbg_userword(cachep, objp) = caller;
2567 2579
2568 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 2580 objnr = obj_to_index(cachep, slabp, objp);
2569 2581
2570 BUG_ON(objnr >= cachep->num); 2582 BUG_ON(objnr >= cachep->num);
2571 BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size); 2583 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2572 2584
2573 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2585 if (cachep->flags & SLAB_DEBUG_INITIAL) {
2574 /* Need to call the slab's constructor so the 2586 /* Need to call the slab's constructor so the