aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorManfred Spraul <manfred@colorfullife.com>2006-02-01 06:05:42 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:17 -0500
commit3dafccf22751429e69b6266636cf3acf45b48075 (patch)
treec65a0217e117155bbfca2bc4c7ad488a42018cc7 /mm/slab.c
parente965f9630c651fa4249039fd4b80c9392d07a856 (diff)
[PATCH] slab: distinguish between object and buffer size
An object cache has two different object lengths: - the amount of memory available for the user (object size) - the amount of memory allocated internally (buffer size) This patch does some renames to make the code reflect that better. Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c154
1 files changed, 80 insertions, 74 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 88082ae15736..1a014aaf4491 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -375,7 +375,7 @@ struct kmem_cache {
375 unsigned int batchcount; 375 unsigned int batchcount;
376 unsigned int limit; 376 unsigned int limit;
377 unsigned int shared; 377 unsigned int shared;
378 unsigned int objsize; 378 unsigned int buffer_size;
379/* 2) touched by every alloc & free from the backend */ 379/* 2) touched by every alloc & free from the backend */
380 struct kmem_list3 *nodelists[MAX_NUMNODES]; 380 struct kmem_list3 *nodelists[MAX_NUMNODES];
381 unsigned int flags; /* constant flags */ 381 unsigned int flags; /* constant flags */
@@ -423,8 +423,14 @@ struct kmem_cache {
423 atomic_t freemiss; 423 atomic_t freemiss;
424#endif 424#endif
425#if DEBUG 425#if DEBUG
426 int dbghead; 426 /*
427 int reallen; 427 * If debugging is enabled, then the allocator can add additional
428 * fields and/or padding to every object. buffer_size contains the total
429 * object size including these internal fields, the following two
430 * variables contain the offset to the user object and its size.
431 */
432 int obj_offset;
433 int obj_size;
428#endif 434#endif
429}; 435};
430 436
@@ -495,50 +501,50 @@ struct kmem_cache {
495 501
496/* memory layout of objects: 502/* memory layout of objects:
497 * 0 : objp 503 * 0 : objp
498 * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that 504 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
499 * the end of an object is aligned with the end of the real 505 * the end of an object is aligned with the end of the real
500 * allocation. Catches writes behind the end of the allocation. 506 * allocation. Catches writes behind the end of the allocation.
501 * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1: 507 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
502 * redzone word. 508 * redzone word.
503 * cachep->dbghead: The real object. 509 * cachep->obj_offset: The real object.
504 * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 510 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
505 * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] 511 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
506 */ 512 */
507static int obj_dbghead(kmem_cache_t *cachep) 513static int obj_offset(kmem_cache_t *cachep)
508{ 514{
509 return cachep->dbghead; 515 return cachep->obj_offset;
510} 516}
511 517
512static int obj_reallen(kmem_cache_t *cachep) 518static int obj_size(kmem_cache_t *cachep)
513{ 519{
514 return cachep->reallen; 520 return cachep->obj_size;
515} 521}
516 522
517static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) 523static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
518{ 524{
519 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 525 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
520 return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD); 526 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
521} 527}
522 528
523static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) 529static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
524{ 530{
525 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 531 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
526 if (cachep->flags & SLAB_STORE_USER) 532 if (cachep->flags & SLAB_STORE_USER)
527 return (unsigned long *)(objp + cachep->objsize - 533 return (unsigned long *)(objp + cachep->buffer_size -
528 2 * BYTES_PER_WORD); 534 2 * BYTES_PER_WORD);
529 return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD); 535 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
530} 536}
531 537
532static void **dbg_userword(kmem_cache_t *cachep, void *objp) 538static void **dbg_userword(kmem_cache_t *cachep, void *objp)
533{ 539{
534 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 540 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
535 return (void **)(objp + cachep->objsize - BYTES_PER_WORD); 541 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
536} 542}
537 543
538#else 544#else
539 545
540#define obj_dbghead(x) 0 546#define obj_offset(x) 0
541#define obj_reallen(cachep) (cachep->objsize) 547#define obj_size(cachep) (cachep->buffer_size)
542#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 548#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
543#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 549#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
544#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 550#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
@@ -623,12 +629,12 @@ static kmem_cache_t cache_cache = {
623 .batchcount = 1, 629 .batchcount = 1,
624 .limit = BOOT_CPUCACHE_ENTRIES, 630 .limit = BOOT_CPUCACHE_ENTRIES,
625 .shared = 1, 631 .shared = 1,
626 .objsize = sizeof(kmem_cache_t), 632 .buffer_size = sizeof(kmem_cache_t),
627 .flags = SLAB_NO_REAP, 633 .flags = SLAB_NO_REAP,
628 .spinlock = SPIN_LOCK_UNLOCKED, 634 .spinlock = SPIN_LOCK_UNLOCKED,
629 .name = "kmem_cache", 635 .name = "kmem_cache",
630#if DEBUG 636#if DEBUG
631 .reallen = sizeof(kmem_cache_t), 637 .obj_size = sizeof(kmem_cache_t),
632#endif 638#endif
633}; 639};
634 640
@@ -1057,9 +1063,9 @@ void __init kmem_cache_init(void)
1057 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1063 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1058 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; 1064 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
1059 1065
1060 cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); 1066 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
1061 1067
1062 cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, 1068 cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
1063 &left_over, &cache_cache.num); 1069 &left_over, &cache_cache.num);
1064 if (!cache_cache.num) 1070 if (!cache_cache.num)
1065 BUG(); 1071 BUG();
@@ -1274,9 +1280,9 @@ static void kmem_rcu_free(struct rcu_head *head)
1274static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, 1280static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
1275 unsigned long caller) 1281 unsigned long caller)
1276{ 1282{
1277 int size = obj_reallen(cachep); 1283 int size = obj_size(cachep);
1278 1284
1279 addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)]; 1285 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1280 1286
1281 if (size < 5 * sizeof(unsigned long)) 1287 if (size < 5 * sizeof(unsigned long))
1282 return; 1288 return;
@@ -1306,8 +1312,8 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
1306 1312
1307static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) 1313static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
1308{ 1314{
1309 int size = obj_reallen(cachep); 1315 int size = obj_size(cachep);
1310 addr = &((char *)addr)[obj_dbghead(cachep)]; 1316 addr = &((char *)addr)[obj_offset(cachep)];
1311 1317
1312 memset(addr, val, size); 1318 memset(addr, val, size);
1313 *(unsigned char *)(addr + size - 1) = POISON_END; 1319 *(unsigned char *)(addr + size - 1) = POISON_END;
@@ -1344,8 +1350,8 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
1344 (unsigned long)*dbg_userword(cachep, objp)); 1350 (unsigned long)*dbg_userword(cachep, objp));
1345 printk("\n"); 1351 printk("\n");
1346 } 1352 }
1347 realobj = (char *)objp + obj_dbghead(cachep); 1353 realobj = (char *)objp + obj_offset(cachep);
1348 size = obj_reallen(cachep); 1354 size = obj_size(cachep);
1349 for (i = 0; i < size && lines; i += 16, lines--) { 1355 for (i = 0; i < size && lines; i += 16, lines--) {
1350 int limit; 1356 int limit;
1351 limit = 16; 1357 limit = 16;
@@ -1361,8 +1367,8 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1361 int size, i; 1367 int size, i;
1362 int lines = 0; 1368 int lines = 0;
1363 1369
1364 realobj = (char *)objp + obj_dbghead(cachep); 1370 realobj = (char *)objp + obj_offset(cachep);
1365 size = obj_reallen(cachep); 1371 size = obj_size(cachep);
1366 1372
1367 for (i = 0; i < size; i++) { 1373 for (i = 0; i < size; i++) {
1368 char exp = POISON_FREE; 1374 char exp = POISON_FREE;
@@ -1398,17 +1404,17 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1398 struct slab *slabp = page_get_slab(virt_to_page(objp)); 1404 struct slab *slabp = page_get_slab(virt_to_page(objp));
1399 int objnr; 1405 int objnr;
1400 1406
1401 objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize; 1407 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
1402 if (objnr) { 1408 if (objnr) {
1403 objp = slabp->s_mem + (objnr - 1) * cachep->objsize; 1409 objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
1404 realobj = (char *)objp + obj_dbghead(cachep); 1410 realobj = (char *)objp + obj_offset(cachep);
1405 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1411 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1406 realobj, size); 1412 realobj, size);
1407 print_objinfo(cachep, objp, 2); 1413 print_objinfo(cachep, objp, 2);
1408 } 1414 }
1409 if (objnr + 1 < cachep->num) { 1415 if (objnr + 1 < cachep->num) {
1410 objp = slabp->s_mem + (objnr + 1) * cachep->objsize; 1416 objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
1411 realobj = (char *)objp + obj_dbghead(cachep); 1417 realobj = (char *)objp + obj_offset(cachep);
1412 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1418 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1413 realobj, size); 1419 realobj, size);
1414 print_objinfo(cachep, objp, 2); 1420 print_objinfo(cachep, objp, 2);
@@ -1428,14 +1434,14 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
1428#if DEBUG 1434#if DEBUG
1429 int i; 1435 int i;
1430 for (i = 0; i < cachep->num; i++) { 1436 for (i = 0; i < cachep->num; i++) {
1431 void *objp = slabp->s_mem + cachep->objsize * i; 1437 void *objp = slabp->s_mem + cachep->buffer_size * i;
1432 1438
1433 if (cachep->flags & SLAB_POISON) { 1439 if (cachep->flags & SLAB_POISON) {
1434#ifdef CONFIG_DEBUG_PAGEALLOC 1440#ifdef CONFIG_DEBUG_PAGEALLOC
1435 if ((cachep->objsize % PAGE_SIZE) == 0 1441 if ((cachep->buffer_size % PAGE_SIZE) == 0
1436 && OFF_SLAB(cachep)) 1442 && OFF_SLAB(cachep))
1437 kernel_map_pages(virt_to_page(objp), 1443 kernel_map_pages(virt_to_page(objp),
1438 cachep->objsize / PAGE_SIZE, 1444 cachep->buffer_size / PAGE_SIZE,
1439 1); 1445 1);
1440 else 1446 else
1441 check_poison_obj(cachep, objp); 1447 check_poison_obj(cachep, objp);
@@ -1452,13 +1458,13 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
1452 "was overwritten"); 1458 "was overwritten");
1453 } 1459 }
1454 if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1460 if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1455 (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0); 1461 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1456 } 1462 }
1457#else 1463#else
1458 if (cachep->dtor) { 1464 if (cachep->dtor) {
1459 int i; 1465 int i;
1460 for (i = 0; i < cachep->num; i++) { 1466 for (i = 0; i < cachep->num; i++) {
1461 void *objp = slabp->s_mem + cachep->objsize * i; 1467 void *objp = slabp->s_mem + cachep->buffer_size * i;
1462 (cachep->dtor) (objp, cachep, 0); 1468 (cachep->dtor) (objp, cachep, 0);
1463 } 1469 }
1464 } 1470 }
@@ -1478,7 +1484,7 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
1478 } 1484 }
1479} 1485}
1480 1486
1481/* For setting up all the kmem_list3s for cache whose objsize is same 1487/* For setting up all the kmem_list3s for cache whose buffer_size is same
1482 as size of kmem_list3. */ 1488 as size of kmem_list3. */
1483static inline void set_up_list3s(kmem_cache_t *cachep, int index) 1489static inline void set_up_list3s(kmem_cache_t *cachep, int index)
1484{ 1490{
@@ -1611,7 +1617,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1611 set_fs(old_fs); 1617 set_fs(old_fs);
1612 if (res) { 1618 if (res) {
1613 printk("SLAB: cache with size %d has lost its name\n", 1619 printk("SLAB: cache with size %d has lost its name\n",
1614 pc->objsize); 1620 pc->buffer_size);
1615 continue; 1621 continue;
1616 } 1622 }
1617 1623
@@ -1702,14 +1708,14 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1702 memset(cachep, 0, sizeof(kmem_cache_t)); 1708 memset(cachep, 0, sizeof(kmem_cache_t));
1703 1709
1704#if DEBUG 1710#if DEBUG
1705 cachep->reallen = size; 1711 cachep->obj_size = size;
1706 1712
1707 if (flags & SLAB_RED_ZONE) { 1713 if (flags & SLAB_RED_ZONE) {
1708 /* redzoning only works with word aligned caches */ 1714 /* redzoning only works with word aligned caches */
1709 align = BYTES_PER_WORD; 1715 align = BYTES_PER_WORD;
1710 1716
1711 /* add space for red zone words */ 1717 /* add space for red zone words */
1712 cachep->dbghead += BYTES_PER_WORD; 1718 cachep->obj_offset += BYTES_PER_WORD;
1713 size += 2 * BYTES_PER_WORD; 1719 size += 2 * BYTES_PER_WORD;
1714 } 1720 }
1715 if (flags & SLAB_STORE_USER) { 1721 if (flags & SLAB_STORE_USER) {
@@ -1722,8 +1728,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1722 } 1728 }
1723#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 1729#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
1724 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 1730 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
1725 && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { 1731 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
1726 cachep->dbghead += PAGE_SIZE - size; 1732 cachep->obj_offset += PAGE_SIZE - size;
1727 size = PAGE_SIZE; 1733 size = PAGE_SIZE;
1728 } 1734 }
1729#endif 1735#endif
@@ -1786,7 +1792,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1786 if (flags & SLAB_CACHE_DMA) 1792 if (flags & SLAB_CACHE_DMA)
1787 cachep->gfpflags |= GFP_DMA; 1793 cachep->gfpflags |= GFP_DMA;
1788 spin_lock_init(&cachep->spinlock); 1794 spin_lock_init(&cachep->spinlock);
1789 cachep->objsize = size; 1795 cachep->buffer_size = size;
1790 1796
1791 if (flags & CFLGS_OFF_SLAB) 1797 if (flags & CFLGS_OFF_SLAB)
1792 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 1798 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -2118,7 +2124,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
2118 int i; 2124 int i;
2119 2125
2120 for (i = 0; i < cachep->num; i++) { 2126 for (i = 0; i < cachep->num; i++) {
2121 void *objp = slabp->s_mem + cachep->objsize * i; 2127 void *objp = slabp->s_mem + cachep->buffer_size * i;
2122#if DEBUG 2128#if DEBUG
2123 /* need to poison the objs? */ 2129 /* need to poison the objs? */
2124 if (cachep->flags & SLAB_POISON) 2130 if (cachep->flags & SLAB_POISON)
@@ -2136,7 +2142,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
2136 * Otherwise, deadlock. They must also be threaded. 2142 * Otherwise, deadlock. They must also be threaded.
2137 */ 2143 */
2138 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2144 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2139 cachep->ctor(objp + obj_dbghead(cachep), cachep, 2145 cachep->ctor(objp + obj_offset(cachep), cachep,
2140 ctor_flags); 2146 ctor_flags);
2141 2147
2142 if (cachep->flags & SLAB_RED_ZONE) { 2148 if (cachep->flags & SLAB_RED_ZONE) {
@@ -2147,10 +2153,10 @@ static void cache_init_objs(kmem_cache_t *cachep,
2147 slab_error(cachep, "constructor overwrote the" 2153 slab_error(cachep, "constructor overwrote the"
2148 " start of an object"); 2154 " start of an object");
2149 } 2155 }
2150 if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) 2156 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
2151 && cachep->flags & SLAB_POISON) 2157 && cachep->flags & SLAB_POISON)
2152 kernel_map_pages(virt_to_page(objp), 2158 kernel_map_pages(virt_to_page(objp),
2153 cachep->objsize / PAGE_SIZE, 0); 2159 cachep->buffer_size / PAGE_SIZE, 0);
2154#else 2160#else
2155 if (cachep->ctor) 2161 if (cachep->ctor)
2156 cachep->ctor(objp, cachep, ctor_flags); 2162 cachep->ctor(objp, cachep, ctor_flags);
@@ -2309,7 +2315,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
2309 unsigned int objnr; 2315 unsigned int objnr;
2310 struct slab *slabp; 2316 struct slab *slabp;
2311 2317
2312 objp -= obj_dbghead(cachep); 2318 objp -= obj_offset(cachep);
2313 kfree_debugcheck(objp); 2319 kfree_debugcheck(objp);
2314 page = virt_to_page(objp); 2320 page = virt_to_page(objp);
2315 2321
@@ -2341,31 +2347,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
2341 if (cachep->flags & SLAB_STORE_USER) 2347 if (cachep->flags & SLAB_STORE_USER)
2342 *dbg_userword(cachep, objp) = caller; 2348 *dbg_userword(cachep, objp) = caller;
2343 2349
2344 objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize; 2350 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2345 2351
2346 BUG_ON(objnr >= cachep->num); 2352 BUG_ON(objnr >= cachep->num);
2347 BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize); 2353 BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
2348 2354
2349 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2355 if (cachep->flags & SLAB_DEBUG_INITIAL) {
2350 /* Need to call the slab's constructor so the 2356 /* Need to call the slab's constructor so the
2351 * caller can perform a verify of its state (debugging). 2357 * caller can perform a verify of its state (debugging).
2352 * Called without the cache-lock held. 2358 * Called without the cache-lock held.
2353 */ 2359 */
2354 cachep->ctor(objp + obj_dbghead(cachep), 2360 cachep->ctor(objp + obj_offset(cachep),
2355 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); 2361 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
2356 } 2362 }
2357 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2363 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2358 /* we want to cache poison the object, 2364 /* we want to cache poison the object,
2359 * call the destruction callback 2365 * call the destruction callback
2360 */ 2366 */
2361 cachep->dtor(objp + obj_dbghead(cachep), cachep, 0); 2367 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2362 } 2368 }
2363 if (cachep->flags & SLAB_POISON) { 2369 if (cachep->flags & SLAB_POISON) {
2364#ifdef CONFIG_DEBUG_PAGEALLOC 2370#ifdef CONFIG_DEBUG_PAGEALLOC
2365 if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { 2371 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
2366 store_stackinfo(cachep, objp, (unsigned long)caller); 2372 store_stackinfo(cachep, objp, (unsigned long)caller);
2367 kernel_map_pages(virt_to_page(objp), 2373 kernel_map_pages(virt_to_page(objp),
2368 cachep->objsize / PAGE_SIZE, 0); 2374 cachep->buffer_size / PAGE_SIZE, 0);
2369 } else { 2375 } else {
2370 poison_obj(cachep, objp, POISON_FREE); 2376 poison_obj(cachep, objp, POISON_FREE);
2371 } 2377 }
@@ -2468,7 +2474,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
2468 2474
2469 /* get obj pointer */ 2475 /* get obj pointer */
2470 ac->entry[ac->avail++] = slabp->s_mem + 2476 ac->entry[ac->avail++] = slabp->s_mem +
2471 slabp->free * cachep->objsize; 2477 slabp->free * cachep->buffer_size;
2472 2478
2473 slabp->inuse++; 2479 slabp->inuse++;
2474 next = slab_bufctl(slabp)[slabp->free]; 2480 next = slab_bufctl(slabp)[slabp->free];
@@ -2526,9 +2532,9 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
2526 return objp; 2532 return objp;
2527 if (cachep->flags & SLAB_POISON) { 2533 if (cachep->flags & SLAB_POISON) {
2528#ifdef CONFIG_DEBUG_PAGEALLOC 2534#ifdef CONFIG_DEBUG_PAGEALLOC
2529 if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 2535 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2530 kernel_map_pages(virt_to_page(objp), 2536 kernel_map_pages(virt_to_page(objp),
2531 cachep->objsize / PAGE_SIZE, 1); 2537 cachep->buffer_size / PAGE_SIZE, 1);
2532 else 2538 else
2533 check_poison_obj(cachep, objp); 2539 check_poison_obj(cachep, objp);
2534#else 2540#else
@@ -2553,7 +2559,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
2553 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2559 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2554 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2560 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2555 } 2561 }
2556 objp += obj_dbghead(cachep); 2562 objp += obj_offset(cachep);
2557 if (cachep->ctor && cachep->flags & SLAB_POISON) { 2563 if (cachep->ctor && cachep->flags & SLAB_POISON) {
2558 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2564 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2559 2565
@@ -2648,7 +2654,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2648 BUG_ON(slabp->inuse == cachep->num); 2654 BUG_ON(slabp->inuse == cachep->num);
2649 2655
2650 /* get obj pointer */ 2656 /* get obj pointer */
2651 obj = slabp->s_mem + slabp->free * cachep->objsize; 2657 obj = slabp->s_mem + slabp->free * cachep->buffer_size;
2652 slabp->inuse++; 2658 slabp->inuse++;
2653 next = slab_bufctl(slabp)[slabp->free]; 2659 next = slab_bufctl(slabp)[slabp->free];
2654#if DEBUG 2660#if DEBUG
@@ -2699,7 +2705,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
2699 slabp = page_get_slab(virt_to_page(objp)); 2705 slabp = page_get_slab(virt_to_page(objp));
2700 l3 = cachep->nodelists[node]; 2706 l3 = cachep->nodelists[node];
2701 list_del(&slabp->list); 2707 list_del(&slabp->list);
2702 objnr = (unsigned)(objp - slabp->s_mem) / cachep->objsize; 2708 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2703 check_spinlock_acquired_node(cachep, node); 2709 check_spinlock_acquired_node(cachep, node);
2704 check_slabp(cachep, slabp); 2710 check_slabp(cachep, slabp);
2705 2711
@@ -2881,7 +2887,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
2881 unsigned long addr = (unsigned long)ptr; 2887 unsigned long addr = (unsigned long)ptr;
2882 unsigned long min_addr = PAGE_OFFSET; 2888 unsigned long min_addr = PAGE_OFFSET;
2883 unsigned long align_mask = BYTES_PER_WORD - 1; 2889 unsigned long align_mask = BYTES_PER_WORD - 1;
2884 unsigned long size = cachep->objsize; 2890 unsigned long size = cachep->buffer_size;
2885 struct page *page; 2891 struct page *page;
2886 2892
2887 if (unlikely(addr < min_addr)) 2893 if (unlikely(addr < min_addr))
@@ -3083,7 +3089,7 @@ void kfree(const void *objp)
3083 local_irq_save(flags); 3089 local_irq_save(flags);
3084 kfree_debugcheck(objp); 3090 kfree_debugcheck(objp);
3085 c = page_get_cache(virt_to_page(objp)); 3091 c = page_get_cache(virt_to_page(objp));
3086 mutex_debug_check_no_locks_freed(objp, obj_reallen(c)); 3092 mutex_debug_check_no_locks_freed(objp, obj_size(c));
3087 __cache_free(c, (void *)objp); 3093 __cache_free(c, (void *)objp);
3088 local_irq_restore(flags); 3094 local_irq_restore(flags);
3089} 3095}
@@ -3114,7 +3120,7 @@ EXPORT_SYMBOL(free_percpu);
3114 3120
3115unsigned int kmem_cache_size(kmem_cache_t *cachep) 3121unsigned int kmem_cache_size(kmem_cache_t *cachep)
3116{ 3122{
3117 return obj_reallen(cachep); 3123 return obj_size(cachep);
3118} 3124}
3119EXPORT_SYMBOL(kmem_cache_size); 3125EXPORT_SYMBOL(kmem_cache_size);
3120 3126
@@ -3258,13 +3264,13 @@ static void enable_cpucache(kmem_cache_t *cachep)
3258 * The numbers are guessed, we should auto-tune as described by 3264 * The numbers are guessed, we should auto-tune as described by
3259 * Bonwick. 3265 * Bonwick.
3260 */ 3266 */
3261 if (cachep->objsize > 131072) 3267 if (cachep->buffer_size > 131072)
3262 limit = 1; 3268 limit = 1;
3263 else if (cachep->objsize > PAGE_SIZE) 3269 else if (cachep->buffer_size > PAGE_SIZE)
3264 limit = 8; 3270 limit = 8;
3265 else if (cachep->objsize > 1024) 3271 else if (cachep->buffer_size > 1024)
3266 limit = 24; 3272 limit = 24;
3267 else if (cachep->objsize > 256) 3273 else if (cachep->buffer_size > 256)
3268 limit = 54; 3274 limit = 54;
3269 else 3275 else
3270 limit = 120; 3276 limit = 120;
@@ -3279,7 +3285,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
3279 */ 3285 */
3280 shared = 0; 3286 shared = 0;
3281#ifdef CONFIG_SMP 3287#ifdef CONFIG_SMP
3282 if (cachep->objsize <= PAGE_SIZE) 3288 if (cachep->buffer_size <= PAGE_SIZE)
3283 shared = 8; 3289 shared = 8;
3284#endif 3290#endif
3285 3291
@@ -3528,7 +3534,7 @@ static int s_show(struct seq_file *m, void *p)
3528 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 3534 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3529 3535
3530 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 3536 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
3531 name, active_objs, num_objs, cachep->objsize, 3537 name, active_objs, num_objs, cachep->buffer_size,
3532 cachep->num, (1 << cachep->gfporder)); 3538 cachep->num, (1 << cachep->gfporder));
3533 seq_printf(m, " : tunables %4u %4u %4u", 3539 seq_printf(m, " : tunables %4u %4u %4u",
3534 cachep->limit, cachep->batchcount, cachep->shared); 3540 cachep->limit, cachep->batchcount, cachep->shared);
@@ -3656,5 +3662,5 @@ unsigned int ksize(const void *objp)
3656 if (unlikely(objp == NULL)) 3662 if (unlikely(objp == NULL))
3657 return 0; 3663 return 0;
3658 3664
3659 return obj_reallen(page_get_cache(virt_to_page(objp))); 3665 return obj_size(page_get_cache(virt_to_page(objp)));
3660} 3666}