aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2006-02-01 06:05:50 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:18 -0500
commit343e0d7a93951e35065fdb5e3dd61aece0ec6b3c (patch)
treea9802aac4041b894a80ab6616b532a2fd0b468e6
parent9a2dba4b4912b493070cbc170629fdbf440b01d7 (diff)
[PATCH] slab: replace kmem_cache_t with struct kmem_cache
Replace uses of kmem_cache_t with proper struct kmem_cache in mm/slab.c. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c195
1 files changed, 98 insertions, 97 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b19093864998..6fbd6a1cdeb4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -55,7 +55,7 @@
55 * 55 *
56 * SMP synchronization: 56 * SMP synchronization:
57 * constructors and destructors are called without any locking. 57 * constructors and destructors are called without any locking.
58 * Several members in kmem_cache_t and struct slab never change, they 58 * Several members in struct kmem_cache and struct slab never change, they
59 * are accessed without any locking. 59 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe. 61 * and local interrupts are disabled so slab code is preempt-safe.
@@ -244,7 +244,7 @@ struct slab {
244 */ 244 */
245struct slab_rcu { 245struct slab_rcu {
246 struct rcu_head head; 246 struct rcu_head head;
247 kmem_cache_t *cachep; 247 struct kmem_cache *cachep;
248 void *addr; 248 void *addr;
249}; 249};
250 250
@@ -363,7 +363,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
363 } while (0) 363 } while (0)
364 364
365/* 365/*
366 * kmem_cache_t 366 * struct kmem_cache
367 * 367 *
368 * manages a cache. 368 * manages a cache.
369 */ 369 */
@@ -391,15 +391,15 @@ struct kmem_cache {
391 size_t colour; /* cache colouring range */ 391 size_t colour; /* cache colouring range */
392 unsigned int colour_off; /* colour offset */ 392 unsigned int colour_off; /* colour offset */
393 unsigned int colour_next; /* cache colouring */ 393 unsigned int colour_next; /* cache colouring */
394 kmem_cache_t *slabp_cache; 394 struct kmem_cache *slabp_cache;
395 unsigned int slab_size; 395 unsigned int slab_size;
396 unsigned int dflags; /* dynamic flags */ 396 unsigned int dflags; /* dynamic flags */
397 397
398 /* constructor func */ 398 /* constructor func */
399 void (*ctor) (void *, kmem_cache_t *, unsigned long); 399 void (*ctor) (void *, struct kmem_cache *, unsigned long);
400 400
401 /* de-constructor func */ 401 /* de-constructor func */
402 void (*dtor) (void *, kmem_cache_t *, unsigned long); 402 void (*dtor) (void *, struct kmem_cache *, unsigned long);
403 403
404/* 4) cache creation/removal */ 404/* 4) cache creation/removal */
405 const char *name; 405 const char *name;
@@ -509,23 +509,23 @@ struct kmem_cache {
509 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 509 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
510 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] 510 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
511 */ 511 */
512static int obj_offset(kmem_cache_t *cachep) 512static int obj_offset(struct kmem_cache *cachep)
513{ 513{
514 return cachep->obj_offset; 514 return cachep->obj_offset;
515} 515}
516 516
517static int obj_size(kmem_cache_t *cachep) 517static int obj_size(struct kmem_cache *cachep)
518{ 518{
519 return cachep->obj_size; 519 return cachep->obj_size;
520} 520}
521 521
522static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) 522static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
523{ 523{
524 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 524 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
525 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 525 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
526} 526}
527 527
528static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) 528static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
529{ 529{
530 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 530 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
531 if (cachep->flags & SLAB_STORE_USER) 531 if (cachep->flags & SLAB_STORE_USER)
@@ -534,7 +534,7 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
534 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 534 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
535} 535}
536 536
537static void **dbg_userword(kmem_cache_t *cachep, void *objp) 537static void **dbg_userword(struct kmem_cache *cachep, void *objp)
538{ 538{
539 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 539 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
540 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 540 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
@@ -636,16 +636,16 @@ static struct arraycache_init initarray_generic =
636 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 636 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
637 637
638/* internal cache of cache description objs */ 638/* internal cache of cache description objs */
639static kmem_cache_t cache_cache = { 639static struct kmem_cache cache_cache = {
640 .batchcount = 1, 640 .batchcount = 1,
641 .limit = BOOT_CPUCACHE_ENTRIES, 641 .limit = BOOT_CPUCACHE_ENTRIES,
642 .shared = 1, 642 .shared = 1,
643 .buffer_size = sizeof(kmem_cache_t), 643 .buffer_size = sizeof(struct kmem_cache),
644 .flags = SLAB_NO_REAP, 644 .flags = SLAB_NO_REAP,
645 .spinlock = SPIN_LOCK_UNLOCKED, 645 .spinlock = SPIN_LOCK_UNLOCKED,
646 .name = "kmem_cache", 646 .name = "kmem_cache",
647#if DEBUG 647#if DEBUG
648 .obj_size = sizeof(kmem_cache_t), 648 .obj_size = sizeof(struct kmem_cache),
649#endif 649#endif
650}; 650};
651 651
@@ -674,17 +674,17 @@ static enum {
674 674
675static DEFINE_PER_CPU(struct work_struct, reap_work); 675static DEFINE_PER_CPU(struct work_struct, reap_work);
676 676
677static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node); 677static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
678static void enable_cpucache(kmem_cache_t *cachep); 678static void enable_cpucache(struct kmem_cache *cachep);
679static void cache_reap(void *unused); 679static void cache_reap(void *unused);
680static int __node_shrink(kmem_cache_t *cachep, int node); 680static int __node_shrink(struct kmem_cache *cachep, int node);
681 681
682static inline struct array_cache *cpu_cache_get(kmem_cache_t *cachep) 682static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
683{ 683{
684 return cachep->array[smp_processor_id()]; 684 return cachep->array[smp_processor_id()];
685} 685}
686 686
687static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) 687static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
688{ 688{
689 struct cache_sizes *csizep = malloc_sizes; 689 struct cache_sizes *csizep = malloc_sizes;
690 690
@@ -708,7 +708,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
708 return csizep->cs_cachep; 708 return csizep->cs_cachep;
709} 709}
710 710
711kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 711struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
712{ 712{
713 return __find_general_cachep(size, gfpflags); 713 return __find_general_cachep(size, gfpflags);
714} 714}
@@ -781,7 +781,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
781 781
782#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 782#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
783 783
784static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) 784static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
785{ 785{
786 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 786 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
787 function, cachep->name, msg); 787 function, cachep->name, msg);
@@ -828,7 +828,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
828} 828}
829 829
830#ifdef CONFIG_NUMA 830#ifdef CONFIG_NUMA
831static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int); 831static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
832 832
833static struct array_cache **alloc_alien_cache(int node, int limit) 833static struct array_cache **alloc_alien_cache(int node, int limit)
834{ 834{
@@ -870,7 +870,7 @@ static void free_alien_cache(struct array_cache **ac_ptr)
870 kfree(ac_ptr); 870 kfree(ac_ptr);
871} 871}
872 872
873static void __drain_alien_cache(kmem_cache_t *cachep, 873static void __drain_alien_cache(struct kmem_cache *cachep,
874 struct array_cache *ac, int node) 874 struct array_cache *ac, int node)
875{ 875{
876 struct kmem_list3 *rl3 = cachep->nodelists[node]; 876 struct kmem_list3 *rl3 = cachep->nodelists[node];
@@ -883,7 +883,7 @@ static void __drain_alien_cache(kmem_cache_t *cachep,
883 } 883 }
884} 884}
885 885
886static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) 886static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
887{ 887{
888 int i = 0; 888 int i = 0;
889 struct array_cache *ac; 889 struct array_cache *ac;
@@ -908,7 +908,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
908 unsigned long action, void *hcpu) 908 unsigned long action, void *hcpu)
909{ 909{
910 long cpu = (long)hcpu; 910 long cpu = (long)hcpu;
911 kmem_cache_t *cachep; 911 struct kmem_cache *cachep;
912 struct kmem_list3 *l3 = NULL; 912 struct kmem_list3 *l3 = NULL;
913 int node = cpu_to_node(cpu); 913 int node = cpu_to_node(cpu);
914 int memsize = sizeof(struct kmem_list3); 914 int memsize = sizeof(struct kmem_list3);
@@ -1046,7 +1046,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
1046/* 1046/*
1047 * swap the static kmem_list3 with kmalloced memory 1047 * swap the static kmem_list3 with kmalloced memory
1048 */ 1048 */
1049static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid) 1049static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
1050{ 1050{
1051 struct kmem_list3 *ptr; 1051 struct kmem_list3 *ptr;
1052 1052
@@ -1086,14 +1086,14 @@ void __init kmem_cache_init(void)
1086 1086
1087 /* Bootstrap is tricky, because several objects are allocated 1087 /* Bootstrap is tricky, because several objects are allocated
1088 * from caches that do not exist yet: 1088 * from caches that do not exist yet:
1089 * 1) initialize the cache_cache cache: it contains the kmem_cache_t 1089 * 1) initialize the cache_cache cache: it contains the struct kmem_cache
1090 * structures of all caches, except cache_cache itself: cache_cache 1090 * structures of all caches, except cache_cache itself: cache_cache
1091 * is statically allocated. 1091 * is statically allocated.
1092 * Initially an __init data area is used for the head array and the 1092 * Initially an __init data area is used for the head array and the
1093 * kmem_list3 structures, it's replaced with a kmalloc allocated 1093 * kmem_list3 structures, it's replaced with a kmalloc allocated
1094 * array at the end of the bootstrap. 1094 * array at the end of the bootstrap.
1095 * 2) Create the first kmalloc cache. 1095 * 2) Create the first kmalloc cache.
1096 * The kmem_cache_t for the new cache is allocated normally. 1096 * The struct kmem_cache for the new cache is allocated normally.
1097 * An __init data area is used for the head array. 1097 * An __init data area is used for the head array.
1098 * 3) Create the remaining kmalloc caches, with minimally sized 1098 * 3) Create the remaining kmalloc caches, with minimally sized
1099 * head arrays. 1099 * head arrays.
@@ -1224,7 +1224,7 @@ void __init kmem_cache_init(void)
1224 1224
1225 /* 6) resize the head arrays to their final sizes */ 1225 /* 6) resize the head arrays to their final sizes */
1226 { 1226 {
1227 kmem_cache_t *cachep; 1227 struct kmem_cache *cachep;
1228 mutex_lock(&cache_chain_mutex); 1228 mutex_lock(&cache_chain_mutex);
1229 list_for_each_entry(cachep, &cache_chain, next) 1229 list_for_each_entry(cachep, &cache_chain, next)
1230 enable_cpucache(cachep); 1230 enable_cpucache(cachep);
@@ -1267,7 +1267,7 @@ __initcall(cpucache_init);
1267 * did not request dmaable memory, we might get it, but that 1267 * did not request dmaable memory, we might get it, but that
1268 * would be relatively rare and ignorable. 1268 * would be relatively rare and ignorable.
1269 */ 1269 */
1270static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) 1270static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1271{ 1271{
1272 struct page *page; 1272 struct page *page;
1273 void *addr; 1273 void *addr;
@@ -1293,7 +1293,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
1293/* 1293/*
1294 * Interface to system's page release. 1294 * Interface to system's page release.
1295 */ 1295 */
1296static void kmem_freepages(kmem_cache_t *cachep, void *addr) 1296static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1297{ 1297{
1298 unsigned long i = (1 << cachep->gfporder); 1298 unsigned long i = (1 << cachep->gfporder);
1299 struct page *page = virt_to_page(addr); 1299 struct page *page = virt_to_page(addr);
@@ -1315,7 +1315,7 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
1315static void kmem_rcu_free(struct rcu_head *head) 1315static void kmem_rcu_free(struct rcu_head *head)
1316{ 1316{
1317 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1317 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1318 kmem_cache_t *cachep = slab_rcu->cachep; 1318 struct kmem_cache *cachep = slab_rcu->cachep;
1319 1319
1320 kmem_freepages(cachep, slab_rcu->addr); 1320 kmem_freepages(cachep, slab_rcu->addr);
1321 if (OFF_SLAB(cachep)) 1321 if (OFF_SLAB(cachep))
@@ -1325,7 +1325,7 @@ static void kmem_rcu_free(struct rcu_head *head)
1325#if DEBUG 1325#if DEBUG
1326 1326
1327#ifdef CONFIG_DEBUG_PAGEALLOC 1327#ifdef CONFIG_DEBUG_PAGEALLOC
1328static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, 1328static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1329 unsigned long caller) 1329 unsigned long caller)
1330{ 1330{
1331 int size = obj_size(cachep); 1331 int size = obj_size(cachep);
@@ -1358,7 +1358,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
1358} 1358}
1359#endif 1359#endif
1360 1360
1361static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) 1361static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1362{ 1362{
1363 int size = obj_size(cachep); 1363 int size = obj_size(cachep);
1364 addr = &((char *)addr)[obj_offset(cachep)]; 1364 addr = &((char *)addr)[obj_offset(cachep)];
@@ -1380,7 +1380,7 @@ static void dump_line(char *data, int offset, int limit)
1380 1380
1381#if DEBUG 1381#if DEBUG
1382 1382
1383static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) 1383static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1384{ 1384{
1385 int i, size; 1385 int i, size;
1386 char *realobj; 1386 char *realobj;
@@ -1409,7 +1409,7 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
1409 } 1409 }
1410} 1410}
1411 1411
1412static void check_poison_obj(kmem_cache_t *cachep, void *objp) 1412static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1413{ 1413{
1414 char *realobj; 1414 char *realobj;
1415 int size, i; 1415 int size, i;
@@ -1476,7 +1476,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1476 * slab_destroy_objs - call the registered destructor for each object in 1476 * slab_destroy_objs - call the registered destructor for each object in
1477 * a slab that is to be destroyed. 1477 * a slab that is to be destroyed.
1478 */ 1478 */
1479static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) 1479static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1480{ 1480{
1481 int i; 1481 int i;
1482 for (i = 0; i < cachep->num; i++) { 1482 for (i = 0; i < cachep->num; i++) {
@@ -1508,7 +1508,7 @@ static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
1508 } 1508 }
1509} 1509}
1510#else 1510#else
1511static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) 1511static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1512{ 1512{
1513 if (cachep->dtor) { 1513 if (cachep->dtor) {
1514 int i; 1514 int i;
@@ -1525,7 +1525,7 @@ static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
1525 * Before calling the slab must have been unlinked from the cache. 1525 * Before calling the slab must have been unlinked from the cache.
1526 * The cache-lock is not held/needed. 1526 * The cache-lock is not held/needed.
1527 */ 1527 */
1528static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) 1528static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1529{ 1529{
1530 void *addr = slabp->s_mem - slabp->colouroff; 1530 void *addr = slabp->s_mem - slabp->colouroff;
1531 1531
@@ -1546,7 +1546,7 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
1546 1546
1547/* For setting up all the kmem_list3s for cache whose buffer_size is same 1547/* For setting up all the kmem_list3s for cache whose buffer_size is same
1548 as size of kmem_list3. */ 1548 as size of kmem_list3. */
1549static void set_up_list3s(kmem_cache_t *cachep, int index) 1549static void set_up_list3s(struct kmem_cache *cachep, int index)
1550{ 1550{
1551 int node; 1551 int node;
1552 1552
@@ -1566,7 +1566,7 @@ static void set_up_list3s(kmem_cache_t *cachep, int index)
1566 * high order pages for slabs. When the gfp() functions are more friendly 1566 * high order pages for slabs. When the gfp() functions are more friendly
1567 * towards high-order requests, this should be changed. 1567 * towards high-order requests, this should be changed.
1568 */ 1568 */
1569static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, 1569static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size,
1570 size_t align, gfp_t flags) 1570 size_t align, gfp_t flags)
1571{ 1571{
1572 size_t left_over = 0; 1572 size_t left_over = 0;
@@ -1638,13 +1638,13 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
1638 * cacheline. This can be beneficial if you're counting cycles as closely 1638 * cacheline. This can be beneficial if you're counting cycles as closely
1639 * as davem. 1639 * as davem.
1640 */ 1640 */
1641kmem_cache_t * 1641struct kmem_cache *
1642kmem_cache_create (const char *name, size_t size, size_t align, 1642kmem_cache_create (const char *name, size_t size, size_t align,
1643 unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), 1643 unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
1644 void (*dtor)(void*, kmem_cache_t *, unsigned long)) 1644 void (*dtor)(void*, struct kmem_cache *, unsigned long))
1645{ 1645{
1646 size_t left_over, slab_size, ralign; 1646 size_t left_over, slab_size, ralign;
1647 kmem_cache_t *cachep = NULL; 1647 struct kmem_cache *cachep = NULL;
1648 struct list_head *p; 1648 struct list_head *p;
1649 1649
1650 /* 1650 /*
@@ -1662,7 +1662,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1662 mutex_lock(&cache_chain_mutex); 1662 mutex_lock(&cache_chain_mutex);
1663 1663
1664 list_for_each(p, &cache_chain) { 1664 list_for_each(p, &cache_chain) {
1665 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); 1665 struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
1666 mm_segment_t old_fs = get_fs(); 1666 mm_segment_t old_fs = get_fs();
1667 char tmp; 1667 char tmp;
1668 int res; 1668 int res;
@@ -1762,10 +1762,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1762 align = ralign; 1762 align = ralign;
1763 1763
1764 /* Get cache's description obj. */ 1764 /* Get cache's description obj. */
1765 cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); 1765 cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
1766 if (!cachep) 1766 if (!cachep)
1767 goto oops; 1767 goto oops;
1768 memset(cachep, 0, sizeof(kmem_cache_t)); 1768 memset(cachep, 0, sizeof(struct kmem_cache));
1769 1769
1770#if DEBUG 1770#if DEBUG
1771 cachep->obj_size = size; 1771 cachep->obj_size = size;
@@ -1941,7 +1941,7 @@ static void check_irq_on(void)
1941 BUG_ON(irqs_disabled()); 1941 BUG_ON(irqs_disabled());
1942} 1942}
1943 1943
1944static void check_spinlock_acquired(kmem_cache_t *cachep) 1944static void check_spinlock_acquired(struct kmem_cache *cachep)
1945{ 1945{
1946#ifdef CONFIG_SMP 1946#ifdef CONFIG_SMP
1947 check_irq_off(); 1947 check_irq_off();
@@ -1949,7 +1949,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep)
1949#endif 1949#endif
1950} 1950}
1951 1951
1952static void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) 1952static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
1953{ 1953{
1954#ifdef CONFIG_SMP 1954#ifdef CONFIG_SMP
1955 check_irq_off(); 1955 check_irq_off();
@@ -1982,12 +1982,12 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
1982 preempt_enable(); 1982 preempt_enable();
1983} 1983}
1984 1984
1985static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, 1985static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
1986 int force, int node); 1986 int force, int node);
1987 1987
1988static void do_drain(void *arg) 1988static void do_drain(void *arg)
1989{ 1989{
1990 kmem_cache_t *cachep = (kmem_cache_t *) arg; 1990 struct kmem_cache *cachep = (struct kmem_cache *) arg;
1991 struct array_cache *ac; 1991 struct array_cache *ac;
1992 int node = numa_node_id(); 1992 int node = numa_node_id();
1993 1993
@@ -1999,7 +1999,7 @@ static void do_drain(void *arg)
1999 ac->avail = 0; 1999 ac->avail = 0;
2000} 2000}
2001 2001
2002static void drain_cpu_caches(kmem_cache_t *cachep) 2002static void drain_cpu_caches(struct kmem_cache *cachep)
2003{ 2003{
2004 struct kmem_list3 *l3; 2004 struct kmem_list3 *l3;
2005 int node; 2005 int node;
@@ -2020,7 +2020,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
2020 spin_unlock_irq(&cachep->spinlock); 2020 spin_unlock_irq(&cachep->spinlock);
2021} 2021}
2022 2022
2023static int __node_shrink(kmem_cache_t *cachep, int node) 2023static int __node_shrink(struct kmem_cache *cachep, int node)
2024{ 2024{
2025 struct slab *slabp; 2025 struct slab *slabp;
2026 struct kmem_list3 *l3 = cachep->nodelists[node]; 2026 struct kmem_list3 *l3 = cachep->nodelists[node];
@@ -2049,7 +2049,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
2049 return ret; 2049 return ret;
2050} 2050}
2051 2051
2052static int __cache_shrink(kmem_cache_t *cachep) 2052static int __cache_shrink(struct kmem_cache *cachep)
2053{ 2053{
2054 int ret = 0, i = 0; 2054 int ret = 0, i = 0;
2055 struct kmem_list3 *l3; 2055 struct kmem_list3 *l3;
@@ -2075,7 +2075,7 @@ static int __cache_shrink(kmem_cache_t *cachep)
2075 * Releases as many slabs as possible for a cache. 2075 * Releases as many slabs as possible for a cache.
2076 * To help debugging, a zero exit status indicates all slabs were released. 2076 * To help debugging, a zero exit status indicates all slabs were released.
2077 */ 2077 */
2078int kmem_cache_shrink(kmem_cache_t *cachep) 2078int kmem_cache_shrink(struct kmem_cache *cachep)
2079{ 2079{
2080 if (!cachep || in_interrupt()) 2080 if (!cachep || in_interrupt())
2081 BUG(); 2081 BUG();
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2088 * kmem_cache_destroy - delete a cache 2088 * kmem_cache_destroy - delete a cache
2089 * @cachep: the cache to destroy 2089 * @cachep: the cache to destroy
2090 * 2090 *
2091 * Remove a kmem_cache_t object from the slab cache. 2091 * Remove a struct kmem_cache object from the slab cache.
2092 * Returns 0 on success. 2092 * Returns 0 on success.
2093 * 2093 *
2094 * It is expected this function will be called by a module when it is 2094 * It is expected this function will be called by a module when it is
@@ -2101,7 +2101,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2101 * The caller must guarantee that noone will allocate memory from the cache 2101 * The caller must guarantee that noone will allocate memory from the cache
2102 * during the kmem_cache_destroy(). 2102 * during the kmem_cache_destroy().
2103 */ 2103 */
2104int kmem_cache_destroy(kmem_cache_t *cachep) 2104int kmem_cache_destroy(struct kmem_cache *cachep)
2105{ 2105{
2106 int i; 2106 int i;
2107 struct kmem_list3 *l3; 2107 struct kmem_list3 *l3;
@@ -2152,7 +2152,7 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
2152EXPORT_SYMBOL(kmem_cache_destroy); 2152EXPORT_SYMBOL(kmem_cache_destroy);
2153 2153
2154/* Get the memory for a slab management obj. */ 2154/* Get the memory for a slab management obj. */
2155static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2155static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2156 int colour_off, gfp_t local_flags) 2156 int colour_off, gfp_t local_flags)
2157{ 2157{
2158 struct slab *slabp; 2158 struct slab *slabp;
@@ -2178,7 +2178,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2178 return (kmem_bufctl_t *) (slabp + 1); 2178 return (kmem_bufctl_t *) (slabp + 1);
2179} 2179}
2180 2180
2181static void cache_init_objs(kmem_cache_t *cachep, 2181static void cache_init_objs(struct kmem_cache *cachep,
2182 struct slab *slabp, unsigned long ctor_flags) 2182 struct slab *slabp, unsigned long ctor_flags)
2183{ 2183{
2184 int i; 2184 int i;
@@ -2227,7 +2227,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
2227 slabp->free = 0; 2227 slabp->free = 0;
2228} 2228}
2229 2229
2230static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) 2230static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2231{ 2231{
2232 if (flags & SLAB_DMA) { 2232 if (flags & SLAB_DMA) {
2233 if (!(cachep->gfpflags & GFP_DMA)) 2233 if (!(cachep->gfpflags & GFP_DMA))
@@ -2238,7 +2238,7 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
2238 } 2238 }
2239} 2239}
2240 2240
2241static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid) 2241static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
2242{ 2242{
2243 void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); 2243 void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
2244 kmem_bufctl_t next; 2244 kmem_bufctl_t next;
@@ -2254,7 +2254,7 @@ static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid)
2254 return objp; 2254 return objp;
2255} 2255}
2256 2256
2257static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp, 2257static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
2258 int nodeid) 2258 int nodeid)
2259{ 2259{
2260 unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; 2260 unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
@@ -2274,7 +2274,7 @@ static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp,
2274 slabp->inuse--; 2274 slabp->inuse--;
2275} 2275}
2276 2276
2277static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) 2277static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
2278{ 2278{
2279 int i; 2279 int i;
2280 struct page *page; 2280 struct page *page;
@@ -2293,7 +2293,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
2293 * Grow (by 1) the number of slabs within a cache. This is called by 2293 * Grow (by 1) the number of slabs within a cache. This is called by
2294 * kmem_cache_alloc() when there are no active objs left in a cache. 2294 * kmem_cache_alloc() when there are no active objs left in a cache.
2295 */ 2295 */
2296static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2296static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2297{ 2297{
2298 struct slab *slabp; 2298 struct slab *slabp;
2299 void *objp; 2299 void *objp;
@@ -2404,7 +2404,7 @@ static void kfree_debugcheck(const void *objp)
2404 } 2404 }
2405} 2405}
2406 2406
2407static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, 2407static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2408 void *caller) 2408 void *caller)
2409{ 2409{
2410 struct page *page; 2410 struct page *page;
@@ -2478,7 +2478,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
2478 return objp; 2478 return objp;
2479} 2479}
2480 2480
2481static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) 2481static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2482{ 2482{
2483 kmem_bufctl_t i; 2483 kmem_bufctl_t i;
2484 int entries = 0; 2484 int entries = 0;
@@ -2511,7 +2511,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
2511#define check_slabp(x,y) do { } while(0) 2511#define check_slabp(x,y) do { } while(0)
2512#endif 2512#endif
2513 2513
2514static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) 2514static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2515{ 2515{
2516 int batchcount; 2516 int batchcount;
2517 struct kmem_list3 *l3; 2517 struct kmem_list3 *l3;
@@ -2602,7 +2602,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
2602} 2602}
2603 2603
2604static inline void 2604static inline void
2605cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) 2605cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
2606{ 2606{
2607 might_sleep_if(flags & __GFP_WAIT); 2607 might_sleep_if(flags & __GFP_WAIT);
2608#if DEBUG 2608#if DEBUG
@@ -2611,7 +2611,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
2611} 2611}
2612 2612
2613#if DEBUG 2613#if DEBUG
2614static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, 2614static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
2615 void *objp, void *caller) 2615 void *objp, void *caller)
2616{ 2616{
2617 if (!objp) 2617 if (!objp)
@@ -2660,7 +2660,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
2660#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2660#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2661#endif 2661#endif
2662 2662
2663static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2663static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2664{ 2664{
2665 void *objp; 2665 void *objp;
2666 struct array_cache *ac; 2666 struct array_cache *ac;
@@ -2687,7 +2687,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2687 return objp; 2687 return objp;
2688} 2688}
2689 2689
2690static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2690static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2691{ 2691{
2692 unsigned long save_flags; 2692 unsigned long save_flags;
2693 void *objp; 2693 void *objp;
@@ -2707,7 +2707,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2707/* 2707/*
2708 * A interface to enable slab creation on nodeid 2708 * A interface to enable slab creation on nodeid
2709 */ 2709 */
2710static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2710static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2711{ 2711{
2712 struct list_head *entry; 2712 struct list_head *entry;
2713 struct slab *slabp; 2713 struct slab *slabp;
@@ -2769,7 +2769,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2769/* 2769/*
2770 * Caller needs to acquire correct kmem_list's list_lock 2770 * Caller needs to acquire correct kmem_list's list_lock
2771 */ 2771 */
2772static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, 2772static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
2773 int node) 2773 int node)
2774{ 2774{
2775 int i; 2775 int i;
@@ -2807,7 +2807,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
2807 } 2807 }
2808} 2808}
2809 2809
2810static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) 2810static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
2811{ 2811{
2812 int batchcount; 2812 int batchcount;
2813 struct kmem_list3 *l3; 2813 struct kmem_list3 *l3;
@@ -2866,7 +2866,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2866 * 2866 *
2867 * Called with disabled ints. 2867 * Called with disabled ints.
2868 */ 2868 */
2869static inline void __cache_free(kmem_cache_t *cachep, void *objp) 2869static inline void __cache_free(struct kmem_cache *cachep, void *objp)
2870{ 2870{
2871 struct array_cache *ac = cpu_cache_get(cachep); 2871 struct array_cache *ac = cpu_cache_get(cachep);
2872 2872
@@ -2925,7 +2925,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2925 * Allocate an object from this cache. The flags are only relevant 2925 * Allocate an object from this cache. The flags are only relevant
2926 * if the cache has no available objects. 2926 * if the cache has no available objects.
2927 */ 2927 */
2928void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2928void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2929{ 2929{
2930 return __cache_alloc(cachep, flags); 2930 return __cache_alloc(cachep, flags);
2931} 2931}
@@ -2945,7 +2945,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
2945 * 2945 *
2946 * Currently only used for dentry validation. 2946 * Currently only used for dentry validation.
2947 */ 2947 */
2948int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) 2948int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
2949{ 2949{
2950 unsigned long addr = (unsigned long)ptr; 2950 unsigned long addr = (unsigned long)ptr;
2951 unsigned long min_addr = PAGE_OFFSET; 2951 unsigned long min_addr = PAGE_OFFSET;
@@ -2986,7 +2986,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
2986 * New and improved: it will now make sure that the object gets 2986 * New and improved: it will now make sure that the object gets
2987 * put on the correct node list so that there is no false sharing. 2987 * put on the correct node list so that there is no false sharing.
2988 */ 2988 */
2989void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2989void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2990{ 2990{
2991 unsigned long save_flags; 2991 unsigned long save_flags;
2992 void *ptr; 2992 void *ptr;
@@ -3010,7 +3010,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
3010 3010
3011void *kmalloc_node(size_t size, gfp_t flags, int node) 3011void *kmalloc_node(size_t size, gfp_t flags, int node)
3012{ 3012{
3013 kmem_cache_t *cachep; 3013 struct kmem_cache *cachep;
3014 3014
3015 cachep = kmem_find_general_cachep(size, flags); 3015 cachep = kmem_find_general_cachep(size, flags);
3016 if (unlikely(cachep == NULL)) 3016 if (unlikely(cachep == NULL))
@@ -3043,7 +3043,7 @@ EXPORT_SYMBOL(kmalloc_node);
3043 */ 3043 */
3044void *__kmalloc(size_t size, gfp_t flags) 3044void *__kmalloc(size_t size, gfp_t flags)
3045{ 3045{
3046 kmem_cache_t *cachep; 3046 struct kmem_cache *cachep;
3047 3047
3048 /* If you want to save a few bytes .text space: replace 3048 /* If you want to save a few bytes .text space: replace
3049 * __ with kmem_. 3049 * __ with kmem_.
@@ -3114,7 +3114,7 @@ EXPORT_SYMBOL(__alloc_percpu);
3114 * Free an object which was previously allocated from this 3114 * Free an object which was previously allocated from this
3115 * cache. 3115 * cache.
3116 */ 3116 */
3117void kmem_cache_free(kmem_cache_t *cachep, void *objp) 3117void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3118{ 3118{
3119 unsigned long flags; 3119 unsigned long flags;
3120 3120
@@ -3135,7 +3135,7 @@ EXPORT_SYMBOL(kmem_cache_free);
3135 */ 3135 */
3136void kfree(const void *objp) 3136void kfree(const void *objp)
3137{ 3137{
3138 kmem_cache_t *c; 3138 struct kmem_cache *c;
3139 unsigned long flags; 3139 unsigned long flags;
3140 3140
3141 if (unlikely(!objp)) 3141 if (unlikely(!objp))
@@ -3172,13 +3172,13 @@ void free_percpu(const void *objp)
3172EXPORT_SYMBOL(free_percpu); 3172EXPORT_SYMBOL(free_percpu);
3173#endif 3173#endif
3174 3174
3175unsigned int kmem_cache_size(kmem_cache_t *cachep) 3175unsigned int kmem_cache_size(struct kmem_cache *cachep)
3176{ 3176{
3177 return obj_size(cachep); 3177 return obj_size(cachep);
3178} 3178}
3179EXPORT_SYMBOL(kmem_cache_size); 3179EXPORT_SYMBOL(kmem_cache_size);
3180 3180
3181const char *kmem_cache_name(kmem_cache_t *cachep) 3181const char *kmem_cache_name(struct kmem_cache *cachep)
3182{ 3182{
3183 return cachep->name; 3183 return cachep->name;
3184} 3184}
@@ -3187,7 +3187,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
3187/* 3187/*
3188 * This initializes kmem_list3 for all nodes. 3188 * This initializes kmem_list3 for all nodes.
3189 */ 3189 */
3190static int alloc_kmemlist(kmem_cache_t *cachep) 3190static int alloc_kmemlist(struct kmem_cache *cachep)
3191{ 3191{
3192 int node; 3192 int node;
3193 struct kmem_list3 *l3; 3193 struct kmem_list3 *l3;
@@ -3243,7 +3243,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
3243} 3243}
3244 3244
3245struct ccupdate_struct { 3245struct ccupdate_struct {
3246 kmem_cache_t *cachep; 3246 struct kmem_cache *cachep;
3247 struct array_cache *new[NR_CPUS]; 3247 struct array_cache *new[NR_CPUS];
3248}; 3248};
3249 3249
@@ -3259,7 +3259,7 @@ static void do_ccupdate_local(void *info)
3259 new->new[smp_processor_id()] = old; 3259 new->new[smp_processor_id()] = old;
3260} 3260}
3261 3261
3262static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, 3262static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
3263 int shared) 3263 int shared)
3264{ 3264{
3265 struct ccupdate_struct new; 3265 struct ccupdate_struct new;
@@ -3305,7 +3305,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
3305 return 0; 3305 return 0;
3306} 3306}
3307 3307
3308static void enable_cpucache(kmem_cache_t *cachep) 3308static void enable_cpucache(struct kmem_cache *cachep)
3309{ 3309{
3310 int err; 3310 int err;
3311 int limit, shared; 3311 int limit, shared;
@@ -3357,7 +3357,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
3357 cachep->name, -err); 3357 cachep->name, -err);
3358} 3358}
3359 3359
3360static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, 3360static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
3361 int force, int node) 3361 int force, int node)
3362{ 3362{
3363 int tofree; 3363 int tofree;
@@ -3402,12 +3402,12 @@ static void cache_reap(void *unused)
3402 } 3402 }
3403 3403
3404 list_for_each(walk, &cache_chain) { 3404 list_for_each(walk, &cache_chain) {
3405 kmem_cache_t *searchp; 3405 struct kmem_cache *searchp;
3406 struct list_head *p; 3406 struct list_head *p;
3407 int tofree; 3407 int tofree;
3408 struct slab *slabp; 3408 struct slab *slabp;
3409 3409
3410 searchp = list_entry(walk, kmem_cache_t, next); 3410 searchp = list_entry(walk, struct kmem_cache, next);
3411 3411
3412 if (searchp->flags & SLAB_NO_REAP) 3412 if (searchp->flags & SLAB_NO_REAP)
3413 goto next; 3413 goto next;
@@ -3510,15 +3510,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3510 if (p == &cache_chain) 3510 if (p == &cache_chain)
3511 return NULL; 3511 return NULL;
3512 } 3512 }
3513 return list_entry(p, kmem_cache_t, next); 3513 return list_entry(p, struct kmem_cache, next);
3514} 3514}
3515 3515
3516static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3516static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3517{ 3517{
3518 kmem_cache_t *cachep = p; 3518 struct kmem_cache *cachep = p;
3519 ++*pos; 3519 ++*pos;
3520 return cachep->next.next == &cache_chain ? NULL 3520 return cachep->next.next == &cache_chain ? NULL
3521 : list_entry(cachep->next.next, kmem_cache_t, next); 3521 : list_entry(cachep->next.next, struct kmem_cache, next);
3522} 3522}
3523 3523
3524static void s_stop(struct seq_file *m, void *p) 3524static void s_stop(struct seq_file *m, void *p)
@@ -3528,7 +3528,7 @@ static void s_stop(struct seq_file *m, void *p)
3528 3528
3529static int s_show(struct seq_file *m, void *p) 3529static int s_show(struct seq_file *m, void *p)
3530{ 3530{
3531 kmem_cache_t *cachep = p; 3531 struct kmem_cache *cachep = p;
3532 struct list_head *q; 3532 struct list_head *q;
3533 struct slab *slabp; 3533 struct slab *slabp;
3534 unsigned long active_objs; 3534 unsigned long active_objs;
@@ -3678,7 +3678,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3678 mutex_lock(&cache_chain_mutex); 3678 mutex_lock(&cache_chain_mutex);
3679 res = -EINVAL; 3679 res = -EINVAL;
3680 list_for_each(p, &cache_chain) { 3680 list_for_each(p, &cache_chain) {
3681 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); 3681 struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
3682 next);
3682 3683
3683 if (!strcmp(cachep->name, kbuf)) { 3684 if (!strcmp(cachep->name, kbuf)) {
3684 if (limit < 1 || 3685 if (limit < 1 ||