diff options
-rw-r--r-- | include/linux/slab.h | 24 | ||||
-rw-r--r-- | include/linux/slab_def.h | 10 | ||||
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | mm/slab.c | 117 | ||||
-rw-r--r-- | mm/slob.c | 9 | ||||
-rw-r--r-- | mm/slub.c | 80 |
6 files changed, 130 insertions, 112 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 67d5d94b783a..0dd2dfa7beca 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -93,6 +93,30 @@ | |||
93 | (unsigned long)ZERO_SIZE_PTR) | 93 | (unsigned long)ZERO_SIZE_PTR) |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * Common fields provided in kmem_cache by all slab allocators | ||
97 | * This struct is either used directly by the allocator (SLOB) | ||
98 | * or the allocator must include definitions for all fields | ||
99 | * provided in kmem_cache_common in their definition of kmem_cache. | ||
100 | * | ||
101 | * Once we can do anonymous structs (C11 standard) we could put a | ||
102 | * anonymous struct definition in these allocators so that the | ||
103 | * separate allocations in the kmem_cache structure of SLAB and | ||
104 | * SLUB is no longer needed. | ||
105 | */ | ||
106 | #ifdef CONFIG_SLOB | ||
107 | struct kmem_cache { | ||
108 | unsigned int object_size;/* The original size of the object */ | ||
109 | unsigned int size; /* The aligned/padded/added on size */ | ||
110 | unsigned int align; /* Alignment as calculated */ | ||
111 | unsigned long flags; /* Active flags on the slab */ | ||
112 | const char *name; /* Slab name for sysfs */ | ||
113 | int refcount; /* Use counter */ | ||
114 | void (*ctor)(void *); /* Called on object slot creation */ | ||
115 | struct list_head list; /* List of all slab caches on the system */ | ||
116 | }; | ||
117 | #endif | ||
118 | |||
119 | /* | ||
96 | * struct kmem_cache related prototypes | 120 | * struct kmem_cache related prototypes |
97 | */ | 121 | */ |
98 | void __init kmem_cache_init(void); | 122 | void __init kmem_cache_init(void); |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index fbd1117fdfde..1d93f27d81de 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -27,7 +27,7 @@ struct kmem_cache { | |||
27 | unsigned int limit; | 27 | unsigned int limit; |
28 | unsigned int shared; | 28 | unsigned int shared; |
29 | 29 | ||
30 | unsigned int buffer_size; | 30 | unsigned int size; |
31 | u32 reciprocal_buffer_size; | 31 | u32 reciprocal_buffer_size; |
32 | /* 2) touched by every alloc & free from the backend */ | 32 | /* 2) touched by every alloc & free from the backend */ |
33 | 33 | ||
@@ -52,7 +52,10 @@ struct kmem_cache { | |||
52 | 52 | ||
53 | /* 4) cache creation/removal */ | 53 | /* 4) cache creation/removal */ |
54 | const char *name; | 54 | const char *name; |
55 | struct list_head next; | 55 | struct list_head list; |
56 | int refcount; | ||
57 | int object_size; | ||
58 | int align; | ||
56 | 59 | ||
57 | /* 5) statistics */ | 60 | /* 5) statistics */ |
58 | #ifdef CONFIG_DEBUG_SLAB | 61 | #ifdef CONFIG_DEBUG_SLAB |
@@ -73,12 +76,11 @@ struct kmem_cache { | |||
73 | 76 | ||
74 | /* | 77 | /* |
75 | * If debugging is enabled, then the allocator can add additional | 78 | * If debugging is enabled, then the allocator can add additional |
76 | * fields and/or padding to every object. buffer_size contains the total | 79 | * fields and/or padding to every object. size contains the total |
77 | * object size including these internal fields, the following two | 80 | * object size including these internal fields, the following two |
78 | * variables contain the offset to the user object and its size. | 81 | * variables contain the offset to the user object and its size. |
79 | */ | 82 | */ |
80 | int obj_offset; | 83 | int obj_offset; |
81 | int obj_size; | ||
82 | #endif /* CONFIG_DEBUG_SLAB */ | 84 | #endif /* CONFIG_DEBUG_SLAB */ |
83 | 85 | ||
84 | /* 6) per-cpu/per-node data, touched during every alloc/free */ | 86 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index ebdcf4ba42ee..df448adb7283 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -82,7 +82,7 @@ struct kmem_cache { | |||
82 | unsigned long flags; | 82 | unsigned long flags; |
83 | unsigned long min_partial; | 83 | unsigned long min_partial; |
84 | int size; /* The size of an object including meta data */ | 84 | int size; /* The size of an object including meta data */ |
85 | int objsize; /* The size of an object without meta data */ | 85 | int object_size; /* The size of an object without meta data */ |
86 | int offset; /* Free pointer offset. */ | 86 | int offset; /* Free pointer offset. */ |
87 | int cpu_partial; /* Number of per cpu partial objects to keep around */ | 87 | int cpu_partial; /* Number of per cpu partial objects to keep around */ |
88 | struct kmem_cache_order_objects oo; | 88 | struct kmem_cache_order_objects oo; |
@@ -424,8 +424,8 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
424 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: | 424 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: |
425 | * redzone word. | 425 | * redzone word. |
426 | * cachep->obj_offset: The real object. | 426 | * cachep->obj_offset: The real object. |
427 | * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] | 427 | * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] |
428 | * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address | 428 | * cachep->size - 1* BYTES_PER_WORD: last caller address |
429 | * [BYTES_PER_WORD long] | 429 | * [BYTES_PER_WORD long] |
430 | */ | 430 | */ |
431 | static int obj_offset(struct kmem_cache *cachep) | 431 | static int obj_offset(struct kmem_cache *cachep) |
@@ -435,7 +435,7 @@ static int obj_offset(struct kmem_cache *cachep) | |||
435 | 435 | ||
436 | static int obj_size(struct kmem_cache *cachep) | 436 | static int obj_size(struct kmem_cache *cachep) |
437 | { | 437 | { |
438 | return cachep->obj_size; | 438 | return cachep->object_size; |
439 | } | 439 | } |
440 | 440 | ||
441 | static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) | 441 | static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) |
@@ -449,23 +449,23 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) | |||
449 | { | 449 | { |
450 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 450 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); |
451 | if (cachep->flags & SLAB_STORE_USER) | 451 | if (cachep->flags & SLAB_STORE_USER) |
452 | return (unsigned long long *)(objp + cachep->buffer_size - | 452 | return (unsigned long long *)(objp + cachep->size - |
453 | sizeof(unsigned long long) - | 453 | sizeof(unsigned long long) - |
454 | REDZONE_ALIGN); | 454 | REDZONE_ALIGN); |
455 | return (unsigned long long *) (objp + cachep->buffer_size - | 455 | return (unsigned long long *) (objp + cachep->size - |
456 | sizeof(unsigned long long)); | 456 | sizeof(unsigned long long)); |
457 | } | 457 | } |
458 | 458 | ||
459 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) | 459 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) |
460 | { | 460 | { |
461 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); | 461 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); |
462 | return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); | 462 | return (void **)(objp + cachep->size - BYTES_PER_WORD); |
463 | } | 463 | } |
464 | 464 | ||
465 | #else | 465 | #else |
466 | 466 | ||
467 | #define obj_offset(x) 0 | 467 | #define obj_offset(x) 0 |
468 | #define obj_size(cachep) (cachep->buffer_size) | 468 | #define obj_size(cachep) (cachep->size) |
469 | #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) | 469 | #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) |
470 | #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) | 470 | #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) |
471 | #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) | 471 | #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) |
@@ -475,7 +475,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
475 | #ifdef CONFIG_TRACING | 475 | #ifdef CONFIG_TRACING |
476 | size_t slab_buffer_size(struct kmem_cache *cachep) | 476 | size_t slab_buffer_size(struct kmem_cache *cachep) |
477 | { | 477 | { |
478 | return cachep->buffer_size; | 478 | return cachep->size; |
479 | } | 479 | } |
480 | EXPORT_SYMBOL(slab_buffer_size); | 480 | EXPORT_SYMBOL(slab_buffer_size); |
481 | #endif | 481 | #endif |
@@ -513,13 +513,13 @@ static inline struct slab *virt_to_slab(const void *obj) | |||
513 | static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, | 513 | static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, |
514 | unsigned int idx) | 514 | unsigned int idx) |
515 | { | 515 | { |
516 | return slab->s_mem + cache->buffer_size * idx; | 516 | return slab->s_mem + cache->size * idx; |
517 | } | 517 | } |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * We want to avoid an expensive divide : (offset / cache->buffer_size) | 520 | * We want to avoid an expensive divide : (offset / cache->size) |
521 | * Using the fact that buffer_size is a constant for a particular cache, | 521 | * Using the fact that size is a constant for a particular cache, |
522 | * we can replace (offset / cache->buffer_size) by | 522 | * we can replace (offset / cache->size) by |
523 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) | 523 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) |
524 | */ | 524 | */ |
525 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | 525 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, |
@@ -565,7 +565,7 @@ static struct kmem_cache cache_cache = { | |||
565 | .batchcount = 1, | 565 | .batchcount = 1, |
566 | .limit = BOOT_CPUCACHE_ENTRIES, | 566 | .limit = BOOT_CPUCACHE_ENTRIES, |
567 | .shared = 1, | 567 | .shared = 1, |
568 | .buffer_size = sizeof(struct kmem_cache), | 568 | .size = sizeof(struct kmem_cache), |
569 | .name = "kmem_cache", | 569 | .name = "kmem_cache", |
570 | }; | 570 | }; |
571 | 571 | ||
@@ -1134,7 +1134,7 @@ static int init_cache_nodelists_node(int node) | |||
1134 | struct kmem_list3 *l3; | 1134 | struct kmem_list3 *l3; |
1135 | const int memsize = sizeof(struct kmem_list3); | 1135 | const int memsize = sizeof(struct kmem_list3); |
1136 | 1136 | ||
1137 | list_for_each_entry(cachep, &cache_chain, next) { | 1137 | list_for_each_entry(cachep, &cache_chain, list) { |
1138 | /* | 1138 | /* |
1139 | * Set up the size64 kmemlist for cpu before we can | 1139 | * Set up the size64 kmemlist for cpu before we can |
1140 | * begin anything. Make sure some other cpu on this | 1140 | * begin anything. Make sure some other cpu on this |
@@ -1172,7 +1172,7 @@ static void __cpuinit cpuup_canceled(long cpu) | |||
1172 | int node = cpu_to_mem(cpu); | 1172 | int node = cpu_to_mem(cpu); |
1173 | const struct cpumask *mask = cpumask_of_node(node); | 1173 | const struct cpumask *mask = cpumask_of_node(node); |
1174 | 1174 | ||
1175 | list_for_each_entry(cachep, &cache_chain, next) { | 1175 | list_for_each_entry(cachep, &cache_chain, list) { |
1176 | struct array_cache *nc; | 1176 | struct array_cache *nc; |
1177 | struct array_cache *shared; | 1177 | struct array_cache *shared; |
1178 | struct array_cache **alien; | 1178 | struct array_cache **alien; |
@@ -1222,7 +1222,7 @@ free_array_cache: | |||
1222 | * the respective cache's slabs, now we can go ahead and | 1222 | * the respective cache's slabs, now we can go ahead and |
1223 | * shrink each nodelist to its limit. | 1223 | * shrink each nodelist to its limit. |
1224 | */ | 1224 | */ |
1225 | list_for_each_entry(cachep, &cache_chain, next) { | 1225 | list_for_each_entry(cachep, &cache_chain, list) { |
1226 | l3 = cachep->nodelists[node]; | 1226 | l3 = cachep->nodelists[node]; |
1227 | if (!l3) | 1227 | if (!l3) |
1228 | continue; | 1228 | continue; |
@@ -1251,7 +1251,7 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1251 | * Now we can go ahead with allocating the shared arrays and | 1251 | * Now we can go ahead with allocating the shared arrays and |
1252 | * array caches | 1252 | * array caches |
1253 | */ | 1253 | */ |
1254 | list_for_each_entry(cachep, &cache_chain, next) { | 1254 | list_for_each_entry(cachep, &cache_chain, list) { |
1255 | struct array_cache *nc; | 1255 | struct array_cache *nc; |
1256 | struct array_cache *shared = NULL; | 1256 | struct array_cache *shared = NULL; |
1257 | struct array_cache **alien = NULL; | 1257 | struct array_cache **alien = NULL; |
@@ -1383,7 +1383,7 @@ static int __meminit drain_cache_nodelists_node(int node) | |||
1383 | struct kmem_cache *cachep; | 1383 | struct kmem_cache *cachep; |
1384 | int ret = 0; | 1384 | int ret = 0; |
1385 | 1385 | ||
1386 | list_for_each_entry(cachep, &cache_chain, next) { | 1386 | list_for_each_entry(cachep, &cache_chain, list) { |
1387 | struct kmem_list3 *l3; | 1387 | struct kmem_list3 *l3; |
1388 | 1388 | ||
1389 | l3 = cachep->nodelists[node]; | 1389 | l3 = cachep->nodelists[node]; |
@@ -1526,7 +1526,7 @@ void __init kmem_cache_init(void) | |||
1526 | 1526 | ||
1527 | /* 1) create the cache_cache */ | 1527 | /* 1) create the cache_cache */ |
1528 | INIT_LIST_HEAD(&cache_chain); | 1528 | INIT_LIST_HEAD(&cache_chain); |
1529 | list_add(&cache_cache.next, &cache_chain); | 1529 | list_add(&cache_cache.list, &cache_chain); |
1530 | cache_cache.colour_off = cache_line_size(); | 1530 | cache_cache.colour_off = cache_line_size(); |
1531 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1531 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; |
1532 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; | 1532 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
@@ -1534,18 +1534,16 @@ void __init kmem_cache_init(void) | |||
1534 | /* | 1534 | /* |
1535 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | 1535 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids |
1536 | */ | 1536 | */ |
1537 | cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1537 | cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
1538 | nr_node_ids * sizeof(struct kmem_list3 *); | 1538 | nr_node_ids * sizeof(struct kmem_list3 *); |
1539 | #if DEBUG | 1539 | cache_cache.object_size = cache_cache.size; |
1540 | cache_cache.obj_size = cache_cache.buffer_size; | 1540 | cache_cache.size = ALIGN(cache_cache.size, |
1541 | #endif | ||
1542 | cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, | ||
1543 | cache_line_size()); | 1541 | cache_line_size()); |
1544 | cache_cache.reciprocal_buffer_size = | 1542 | cache_cache.reciprocal_buffer_size = |
1545 | reciprocal_value(cache_cache.buffer_size); | 1543 | reciprocal_value(cache_cache.size); |
1546 | 1544 | ||
1547 | for (order = 0; order < MAX_ORDER; order++) { | 1545 | for (order = 0; order < MAX_ORDER; order++) { |
1548 | cache_estimate(order, cache_cache.buffer_size, | 1546 | cache_estimate(order, cache_cache.size, |
1549 | cache_line_size(), 0, &left_over, &cache_cache.num); | 1547 | cache_line_size(), 0, &left_over, &cache_cache.num); |
1550 | if (cache_cache.num) | 1548 | if (cache_cache.num) |
1551 | break; | 1549 | break; |
@@ -1671,7 +1669,7 @@ void __init kmem_cache_init_late(void) | |||
1671 | 1669 | ||
1672 | /* 6) resize the head arrays to their final sizes */ | 1670 | /* 6) resize the head arrays to their final sizes */ |
1673 | mutex_lock(&cache_chain_mutex); | 1671 | mutex_lock(&cache_chain_mutex); |
1674 | list_for_each_entry(cachep, &cache_chain, next) | 1672 | list_for_each_entry(cachep, &cache_chain, list) |
1675 | if (enable_cpucache(cachep, GFP_NOWAIT)) | 1673 | if (enable_cpucache(cachep, GFP_NOWAIT)) |
1676 | BUG(); | 1674 | BUG(); |
1677 | mutex_unlock(&cache_chain_mutex); | 1675 | mutex_unlock(&cache_chain_mutex); |
@@ -1724,7 +1722,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1724 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", | 1722 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", |
1725 | nodeid, gfpflags); | 1723 | nodeid, gfpflags); |
1726 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", | 1724 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", |
1727 | cachep->name, cachep->buffer_size, cachep->gfporder); | 1725 | cachep->name, cachep->size, cachep->gfporder); |
1728 | 1726 | ||
1729 | for_each_online_node(node) { | 1727 | for_each_online_node(node) { |
1730 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | 1728 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; |
@@ -2028,10 +2026,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab | |||
2028 | 2026 | ||
2029 | if (cachep->flags & SLAB_POISON) { | 2027 | if (cachep->flags & SLAB_POISON) { |
2030 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2028 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2031 | if (cachep->buffer_size % PAGE_SIZE == 0 && | 2029 | if (cachep->size % PAGE_SIZE == 0 && |
2032 | OFF_SLAB(cachep)) | 2030 | OFF_SLAB(cachep)) |
2033 | kernel_map_pages(virt_to_page(objp), | 2031 | kernel_map_pages(virt_to_page(objp), |
2034 | cachep->buffer_size / PAGE_SIZE, 1); | 2032 | cachep->size / PAGE_SIZE, 1); |
2035 | else | 2033 | else |
2036 | check_poison_obj(cachep, objp); | 2034 | check_poison_obj(cachep, objp); |
2037 | #else | 2035 | #else |
@@ -2281,7 +2279,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2281 | mutex_lock(&cache_chain_mutex); | 2279 | mutex_lock(&cache_chain_mutex); |
2282 | } | 2280 | } |
2283 | 2281 | ||
2284 | list_for_each_entry(pc, &cache_chain, next) { | 2282 | list_for_each_entry(pc, &cache_chain, list) { |
2285 | char tmp; | 2283 | char tmp; |
2286 | int res; | 2284 | int res; |
2287 | 2285 | ||
@@ -2294,7 +2292,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2294 | if (res) { | 2292 | if (res) { |
2295 | printk(KERN_ERR | 2293 | printk(KERN_ERR |
2296 | "SLAB: cache with size %d has lost its name\n", | 2294 | "SLAB: cache with size %d has lost its name\n", |
2297 | pc->buffer_size); | 2295 | pc->size); |
2298 | continue; | 2296 | continue; |
2299 | } | 2297 | } |
2300 | 2298 | ||
@@ -2399,8 +2397,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2399 | goto oops; | 2397 | goto oops; |
2400 | 2398 | ||
2401 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 2399 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; |
2400 | cachep->object_size = size; | ||
2401 | cachep->align = align; | ||
2402 | #if DEBUG | 2402 | #if DEBUG |
2403 | cachep->obj_size = size; | ||
2404 | 2403 | ||
2405 | /* | 2404 | /* |
2406 | * Both debugging options require word-alignment which is calculated | 2405 | * Both debugging options require word-alignment which is calculated |
@@ -2423,7 +2422,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2423 | } | 2422 | } |
2424 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2423 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2425 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2424 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
2426 | && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { | 2425 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
2427 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); | 2426 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
2428 | size = PAGE_SIZE; | 2427 | size = PAGE_SIZE; |
2429 | } | 2428 | } |
@@ -2492,7 +2491,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2492 | cachep->gfpflags = 0; | 2491 | cachep->gfpflags = 0; |
2493 | if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) | 2492 | if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) |
2494 | cachep->gfpflags |= GFP_DMA; | 2493 | cachep->gfpflags |= GFP_DMA; |
2495 | cachep->buffer_size = size; | 2494 | cachep->size = size; |
2496 | cachep->reciprocal_buffer_size = reciprocal_value(size); | 2495 | cachep->reciprocal_buffer_size = reciprocal_value(size); |
2497 | 2496 | ||
2498 | if (flags & CFLGS_OFF_SLAB) { | 2497 | if (flags & CFLGS_OFF_SLAB) { |
@@ -2526,7 +2525,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2526 | } | 2525 | } |
2527 | 2526 | ||
2528 | /* cache setup completed, link it into the list */ | 2527 | /* cache setup completed, link it into the list */ |
2529 | list_add(&cachep->next, &cache_chain); | 2528 | list_add(&cachep->list, &cache_chain); |
2530 | oops: | 2529 | oops: |
2531 | if (!cachep && (flags & SLAB_PANIC)) | 2530 | if (!cachep && (flags & SLAB_PANIC)) |
2532 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 2531 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
@@ -2721,10 +2720,10 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
2721 | /* | 2720 | /* |
2722 | * the chain is never empty, cache_cache is never destroyed | 2721 | * the chain is never empty, cache_cache is never destroyed |
2723 | */ | 2722 | */ |
2724 | list_del(&cachep->next); | 2723 | list_del(&cachep->list); |
2725 | if (__cache_shrink(cachep)) { | 2724 | if (__cache_shrink(cachep)) { |
2726 | slab_error(cachep, "Can't free all objects"); | 2725 | slab_error(cachep, "Can't free all objects"); |
2727 | list_add(&cachep->next, &cache_chain); | 2726 | list_add(&cachep->list, &cache_chain); |
2728 | mutex_unlock(&cache_chain_mutex); | 2727 | mutex_unlock(&cache_chain_mutex); |
2729 | put_online_cpus(); | 2728 | put_online_cpus(); |
2730 | return; | 2729 | return; |
@@ -2821,10 +2820,10 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2821 | slab_error(cachep, "constructor overwrote the" | 2820 | slab_error(cachep, "constructor overwrote the" |
2822 | " start of an object"); | 2821 | " start of an object"); |
2823 | } | 2822 | } |
2824 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && | 2823 | if ((cachep->size % PAGE_SIZE) == 0 && |
2825 | OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) | 2824 | OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) |
2826 | kernel_map_pages(virt_to_page(objp), | 2825 | kernel_map_pages(virt_to_page(objp), |
2827 | cachep->buffer_size / PAGE_SIZE, 0); | 2826 | cachep->size / PAGE_SIZE, 0); |
2828 | #else | 2827 | #else |
2829 | if (cachep->ctor) | 2828 | if (cachep->ctor) |
2830 | cachep->ctor(objp); | 2829 | cachep->ctor(objp); |
@@ -3058,10 +3057,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
3058 | #endif | 3057 | #endif |
3059 | if (cachep->flags & SLAB_POISON) { | 3058 | if (cachep->flags & SLAB_POISON) { |
3060 | #ifdef CONFIG_DEBUG_PAGEALLOC | 3059 | #ifdef CONFIG_DEBUG_PAGEALLOC |
3061 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 3060 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
3062 | store_stackinfo(cachep, objp, (unsigned long)caller); | 3061 | store_stackinfo(cachep, objp, (unsigned long)caller); |
3063 | kernel_map_pages(virt_to_page(objp), | 3062 | kernel_map_pages(virt_to_page(objp), |
3064 | cachep->buffer_size / PAGE_SIZE, 0); | 3063 | cachep->size / PAGE_SIZE, 0); |
3065 | } else { | 3064 | } else { |
3066 | poison_obj(cachep, objp, POISON_FREE); | 3065 | poison_obj(cachep, objp, POISON_FREE); |
3067 | } | 3066 | } |
@@ -3211,9 +3210,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3211 | return objp; | 3210 | return objp; |
3212 | if (cachep->flags & SLAB_POISON) { | 3211 | if (cachep->flags & SLAB_POISON) { |
3213 | #ifdef CONFIG_DEBUG_PAGEALLOC | 3212 | #ifdef CONFIG_DEBUG_PAGEALLOC |
3214 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) | 3213 | if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) |
3215 | kernel_map_pages(virt_to_page(objp), | 3214 | kernel_map_pages(virt_to_page(objp), |
3216 | cachep->buffer_size / PAGE_SIZE, 1); | 3215 | cachep->size / PAGE_SIZE, 1); |
3217 | else | 3216 | else |
3218 | check_poison_obj(cachep, objp); | 3217 | check_poison_obj(cachep, objp); |
3219 | #else | 3218 | #else |
@@ -3243,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3243 | unsigned objnr; | 3242 | unsigned objnr; |
3244 | 3243 | ||
3245 | slabp = virt_to_head_page(objp)->slab_page; | 3244 | slabp = virt_to_head_page(objp)->slab_page; |
3246 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | 3245 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->size; |
3247 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; | 3246 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; |
3248 | } | 3247 | } |
3249 | #endif | 3248 | #endif |
@@ -3747,7 +3746,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3747 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3746 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3748 | 3747 | ||
3749 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3748 | trace_kmem_cache_alloc(_RET_IP_, ret, |
3750 | obj_size(cachep), cachep->buffer_size, flags); | 3749 | obj_size(cachep), cachep->size, flags); |
3751 | 3750 | ||
3752 | return ret; | 3751 | return ret; |
3753 | } | 3752 | } |
@@ -3775,7 +3774,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3775 | __builtin_return_address(0)); | 3774 | __builtin_return_address(0)); |
3776 | 3775 | ||
3777 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3776 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
3778 | obj_size(cachep), cachep->buffer_size, | 3777 | obj_size(cachep), cachep->size, |
3779 | flags, nodeid); | 3778 | flags, nodeid); |
3780 | 3779 | ||
3781 | return ret; | 3780 | return ret; |
@@ -3857,7 +3856,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3857 | ret = __cache_alloc(cachep, flags, caller); | 3856 | ret = __cache_alloc(cachep, flags, caller); |
3858 | 3857 | ||
3859 | trace_kmalloc((unsigned long) caller, ret, | 3858 | trace_kmalloc((unsigned long) caller, ret, |
3860 | size, cachep->buffer_size, flags); | 3859 | size, cachep->size, flags); |
3861 | 3860 | ||
3862 | return ret; | 3861 | return ret; |
3863 | } | 3862 | } |
@@ -4011,7 +4010,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | |||
4011 | return 0; | 4010 | return 0; |
4012 | 4011 | ||
4013 | fail: | 4012 | fail: |
4014 | if (!cachep->next.next) { | 4013 | if (!cachep->list.next) { |
4015 | /* Cache is not active yet. Roll back what we did */ | 4014 | /* Cache is not active yet. Roll back what we did */ |
4016 | node--; | 4015 | node--; |
4017 | while (node >= 0) { | 4016 | while (node >= 0) { |
@@ -4105,13 +4104,13 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |||
4105 | * The numbers are guessed, we should auto-tune as described by | 4104 | * The numbers are guessed, we should auto-tune as described by |
4106 | * Bonwick. | 4105 | * Bonwick. |
4107 | */ | 4106 | */ |
4108 | if (cachep->buffer_size > 131072) | 4107 | if (cachep->size > 131072) |
4109 | limit = 1; | 4108 | limit = 1; |
4110 | else if (cachep->buffer_size > PAGE_SIZE) | 4109 | else if (cachep->size > PAGE_SIZE) |
4111 | limit = 8; | 4110 | limit = 8; |
4112 | else if (cachep->buffer_size > 1024) | 4111 | else if (cachep->size > 1024) |
4113 | limit = 24; | 4112 | limit = 24; |
4114 | else if (cachep->buffer_size > 256) | 4113 | else if (cachep->size > 256) |
4115 | limit = 54; | 4114 | limit = 54; |
4116 | else | 4115 | else |
4117 | limit = 120; | 4116 | limit = 120; |
@@ -4126,7 +4125,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |||
4126 | * to a larger limit. Thus disabled by default. | 4125 | * to a larger limit. Thus disabled by default. |
4127 | */ | 4126 | */ |
4128 | shared = 0; | 4127 | shared = 0; |
4129 | if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) | 4128 | if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) |
4130 | shared = 8; | 4129 | shared = 8; |
4131 | 4130 | ||
4132 | #if DEBUG | 4131 | #if DEBUG |
@@ -4196,7 +4195,7 @@ static void cache_reap(struct work_struct *w) | |||
4196 | /* Give up. Setup the next iteration. */ | 4195 | /* Give up. Setup the next iteration. */ |
4197 | goto out; | 4196 | goto out; |
4198 | 4197 | ||
4199 | list_for_each_entry(searchp, &cache_chain, next) { | 4198 | list_for_each_entry(searchp, &cache_chain, list) { |
4200 | check_irq_on(); | 4199 | check_irq_on(); |
4201 | 4200 | ||
4202 | /* | 4201 | /* |
@@ -4289,7 +4288,7 @@ static void s_stop(struct seq_file *m, void *p) | |||
4289 | 4288 | ||
4290 | static int s_show(struct seq_file *m, void *p) | 4289 | static int s_show(struct seq_file *m, void *p) |
4291 | { | 4290 | { |
4292 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); | 4291 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); |
4293 | struct slab *slabp; | 4292 | struct slab *slabp; |
4294 | unsigned long active_objs; | 4293 | unsigned long active_objs; |
4295 | unsigned long num_objs; | 4294 | unsigned long num_objs; |
@@ -4345,7 +4344,7 @@ static int s_show(struct seq_file *m, void *p) | |||
4345 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | 4344 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); |
4346 | 4345 | ||
4347 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | 4346 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", |
4348 | name, active_objs, num_objs, cachep->buffer_size, | 4347 | name, active_objs, num_objs, cachep->size, |
4349 | cachep->num, (1 << cachep->gfporder)); | 4348 | cachep->num, (1 << cachep->gfporder)); |
4350 | seq_printf(m, " : tunables %4u %4u %4u", | 4349 | seq_printf(m, " : tunables %4u %4u %4u", |
4351 | cachep->limit, cachep->batchcount, cachep->shared); | 4350 | cachep->limit, cachep->batchcount, cachep->shared); |
@@ -4437,7 +4436,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
4437 | /* Find the cache in the chain of caches. */ | 4436 | /* Find the cache in the chain of caches. */ |
4438 | mutex_lock(&cache_chain_mutex); | 4437 | mutex_lock(&cache_chain_mutex); |
4439 | res = -EINVAL; | 4438 | res = -EINVAL; |
4440 | list_for_each_entry(cachep, &cache_chain, next) { | 4439 | list_for_each_entry(cachep, &cache_chain, list) { |
4441 | if (!strcmp(cachep->name, kbuf)) { | 4440 | if (!strcmp(cachep->name, kbuf)) { |
4442 | if (limit < 1 || batchcount < 1 || | 4441 | if (limit < 1 || batchcount < 1 || |
4443 | batchcount > limit || shared < 0) { | 4442 | batchcount > limit || shared < 0) { |
@@ -4513,7 +4512,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | |||
4513 | int i; | 4512 | int i; |
4514 | if (n[0] == n[1]) | 4513 | if (n[0] == n[1]) |
4515 | return; | 4514 | return; |
4516 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { | 4515 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { |
4517 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) | 4516 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) |
4518 | continue; | 4517 | continue; |
4519 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) | 4518 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) |
@@ -506,13 +506,6 @@ size_t ksize(const void *block) | |||
506 | } | 506 | } |
507 | EXPORT_SYMBOL(ksize); | 507 | EXPORT_SYMBOL(ksize); |
508 | 508 | ||
509 | struct kmem_cache { | ||
510 | unsigned int size, align; | ||
511 | unsigned long flags; | ||
512 | const char *name; | ||
513 | void (*ctor)(void *); | ||
514 | }; | ||
515 | |||
516 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 509 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
517 | size_t align, unsigned long flags, void (*ctor)(void *)) | 510 | size_t align, unsigned long flags, void (*ctor)(void *)) |
518 | { | 511 | { |
@@ -523,7 +516,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
523 | 516 | ||
524 | if (c) { | 517 | if (c) { |
525 | c->name = name; | 518 | c->name = name; |
526 | c->size = size; | 519 | c->size = c->object_size; |
527 | if (flags & SLAB_DESTROY_BY_RCU) { | 520 | if (flags & SLAB_DESTROY_BY_RCU) { |
528 | /* leave room for rcu footer at the end of object */ | 521 | /* leave room for rcu footer at the end of object */ |
529 | c->size += sizeof(struct slob_rcu); | 522 | c->size += sizeof(struct slob_rcu); |
@@ -311,7 +311,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) | |||
311 | * and whatever may come after it. | 311 | * and whatever may come after it. |
312 | */ | 312 | */ |
313 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | 313 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
314 | return s->objsize; | 314 | return s->object_size; |
315 | 315 | ||
316 | #endif | 316 | #endif |
317 | /* | 317 | /* |
@@ -609,11 +609,11 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) | |||
609 | if (p > addr + 16) | 609 | if (p > addr + 16) |
610 | print_section("Bytes b4 ", p - 16, 16); | 610 | print_section("Bytes b4 ", p - 16, 16); |
611 | 611 | ||
612 | print_section("Object ", p, min_t(unsigned long, s->objsize, | 612 | print_section("Object ", p, min_t(unsigned long, s->object_size, |
613 | PAGE_SIZE)); | 613 | PAGE_SIZE)); |
614 | if (s->flags & SLAB_RED_ZONE) | 614 | if (s->flags & SLAB_RED_ZONE) |
615 | print_section("Redzone ", p + s->objsize, | 615 | print_section("Redzone ", p + s->object_size, |
616 | s->inuse - s->objsize); | 616 | s->inuse - s->object_size); |
617 | 617 | ||
618 | if (s->offset) | 618 | if (s->offset) |
619 | off = s->offset + sizeof(void *); | 619 | off = s->offset + sizeof(void *); |
@@ -655,12 +655,12 @@ static void init_object(struct kmem_cache *s, void *object, u8 val) | |||
655 | u8 *p = object; | 655 | u8 *p = object; |
656 | 656 | ||
657 | if (s->flags & __OBJECT_POISON) { | 657 | if (s->flags & __OBJECT_POISON) { |
658 | memset(p, POISON_FREE, s->objsize - 1); | 658 | memset(p, POISON_FREE, s->object_size - 1); |
659 | p[s->objsize - 1] = POISON_END; | 659 | p[s->object_size - 1] = POISON_END; |
660 | } | 660 | } |
661 | 661 | ||
662 | if (s->flags & SLAB_RED_ZONE) | 662 | if (s->flags & SLAB_RED_ZONE) |
663 | memset(p + s->objsize, val, s->inuse - s->objsize); | 663 | memset(p + s->object_size, val, s->inuse - s->object_size); |
664 | } | 664 | } |
665 | 665 | ||
666 | static void restore_bytes(struct kmem_cache *s, char *message, u8 data, | 666 | static void restore_bytes(struct kmem_cache *s, char *message, u8 data, |
@@ -705,10 +705,10 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, | |||
705 | * Poisoning uses 0x6b (POISON_FREE) and the last byte is | 705 | * Poisoning uses 0x6b (POISON_FREE) and the last byte is |
706 | * 0xa5 (POISON_END) | 706 | * 0xa5 (POISON_END) |
707 | * | 707 | * |
708 | * object + s->objsize | 708 | * object + s->object_size |
709 | * Padding to reach word boundary. This is also used for Redzoning. | 709 | * Padding to reach word boundary. This is also used for Redzoning. |
710 | * Padding is extended by another word if Redzoning is enabled and | 710 | * Padding is extended by another word if Redzoning is enabled and |
711 | * objsize == inuse. | 711 | * object_size == inuse. |
712 | * | 712 | * |
713 | * We fill with 0xbb (RED_INACTIVE) for inactive objects and with | 713 | * We fill with 0xbb (RED_INACTIVE) for inactive objects and with |
714 | * 0xcc (RED_ACTIVE) for objects in use. | 714 | * 0xcc (RED_ACTIVE) for objects in use. |
@@ -727,7 +727,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, | |||
727 | * object + s->size | 727 | * object + s->size |
728 | * Nothing is used beyond s->size. | 728 | * Nothing is used beyond s->size. |
729 | * | 729 | * |
730 | * If slabcaches are merged then the objsize and inuse boundaries are mostly | 730 | * If slabcaches are merged then the object_size and inuse boundaries are mostly |
731 | * ignored. And therefore no slab options that rely on these boundaries | 731 | * ignored. And therefore no slab options that rely on these boundaries |
732 | * may be used with merged slabcaches. | 732 | * may be used with merged slabcaches. |
733 | */ | 733 | */ |
@@ -787,25 +787,25 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
787 | void *object, u8 val) | 787 | void *object, u8 val) |
788 | { | 788 | { |
789 | u8 *p = object; | 789 | u8 *p = object; |
790 | u8 *endobject = object + s->objsize; | 790 | u8 *endobject = object + s->object_size; |
791 | 791 | ||
792 | if (s->flags & SLAB_RED_ZONE) { | 792 | if (s->flags & SLAB_RED_ZONE) { |
793 | if (!check_bytes_and_report(s, page, object, "Redzone", | 793 | if (!check_bytes_and_report(s, page, object, "Redzone", |
794 | endobject, val, s->inuse - s->objsize)) | 794 | endobject, val, s->inuse - s->object_size)) |
795 | return 0; | 795 | return 0; |
796 | } else { | 796 | } else { |
797 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { | 797 | if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { |
798 | check_bytes_and_report(s, page, p, "Alignment padding", | 798 | check_bytes_and_report(s, page, p, "Alignment padding", |
799 | endobject, POISON_INUSE, s->inuse - s->objsize); | 799 | endobject, POISON_INUSE, s->inuse - s->object_size); |
800 | } | 800 | } |
801 | } | 801 | } |
802 | 802 | ||
803 | if (s->flags & SLAB_POISON) { | 803 | if (s->flags & SLAB_POISON) { |
804 | if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && | 804 | if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && |
805 | (!check_bytes_and_report(s, page, p, "Poison", p, | 805 | (!check_bytes_and_report(s, page, p, "Poison", p, |
806 | POISON_FREE, s->objsize - 1) || | 806 | POISON_FREE, s->object_size - 1) || |
807 | !check_bytes_and_report(s, page, p, "Poison", | 807 | !check_bytes_and_report(s, page, p, "Poison", |
808 | p + s->objsize - 1, POISON_END, 1))) | 808 | p + s->object_size - 1, POISON_END, 1))) |
809 | return 0; | 809 | return 0; |
810 | /* | 810 | /* |
811 | * check_pad_bytes cleans up on its own. | 811 | * check_pad_bytes cleans up on its own. |
@@ -926,7 +926,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
926 | page->freelist); | 926 | page->freelist); |
927 | 927 | ||
928 | if (!alloc) | 928 | if (!alloc) |
929 | print_section("Object ", (void *)object, s->objsize); | 929 | print_section("Object ", (void *)object, s->object_size); |
930 | 930 | ||
931 | dump_stack(); | 931 | dump_stack(); |
932 | } | 932 | } |
@@ -942,14 +942,14 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | |||
942 | lockdep_trace_alloc(flags); | 942 | lockdep_trace_alloc(flags); |
943 | might_sleep_if(flags & __GFP_WAIT); | 943 | might_sleep_if(flags & __GFP_WAIT); |
944 | 944 | ||
945 | return should_failslab(s->objsize, flags, s->flags); | 945 | return should_failslab(s->object_size, flags, s->flags); |
946 | } | 946 | } |
947 | 947 | ||
948 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) | 948 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) |
949 | { | 949 | { |
950 | flags &= gfp_allowed_mask; | 950 | flags &= gfp_allowed_mask; |
951 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | 951 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); |
952 | kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); | 952 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); |
953 | } | 953 | } |
954 | 954 | ||
955 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | 955 | static inline void slab_free_hook(struct kmem_cache *s, void *x) |
@@ -966,13 +966,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
966 | unsigned long flags; | 966 | unsigned long flags; |
967 | 967 | ||
968 | local_irq_save(flags); | 968 | local_irq_save(flags); |
969 | kmemcheck_slab_free(s, x, s->objsize); | 969 | kmemcheck_slab_free(s, x, s->object_size); |
970 | debug_check_no_locks_freed(x, s->objsize); | 970 | debug_check_no_locks_freed(x, s->object_size); |
971 | local_irq_restore(flags); | 971 | local_irq_restore(flags); |
972 | } | 972 | } |
973 | #endif | 973 | #endif |
974 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 974 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
975 | debug_check_no_obj_freed(x, s->objsize); | 975 | debug_check_no_obj_freed(x, s->object_size); |
976 | } | 976 | } |
977 | 977 | ||
978 | /* | 978 | /* |
@@ -1207,7 +1207,7 @@ out: | |||
1207 | 1207 | ||
1208 | __setup("slub_debug", setup_slub_debug); | 1208 | __setup("slub_debug", setup_slub_debug); |
1209 | 1209 | ||
1210 | static unsigned long kmem_cache_flags(unsigned long objsize, | 1210 | static unsigned long kmem_cache_flags(unsigned long object_size, |
1211 | unsigned long flags, const char *name, | 1211 | unsigned long flags, const char *name, |
1212 | void (*ctor)(void *)) | 1212 | void (*ctor)(void *)) |
1213 | { | 1213 | { |
@@ -1237,7 +1237,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page, | |||
1237 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, | 1237 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, |
1238 | struct page *page) {} | 1238 | struct page *page) {} |
1239 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | 1239 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} |
1240 | static inline unsigned long kmem_cache_flags(unsigned long objsize, | 1240 | static inline unsigned long kmem_cache_flags(unsigned long object_size, |
1241 | unsigned long flags, const char *name, | 1241 | unsigned long flags, const char *name, |
1242 | void (*ctor)(void *)) | 1242 | void (*ctor)(void *)) |
1243 | { | 1243 | { |
@@ -2098,10 +2098,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) | |||
2098 | "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", | 2098 | "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", |
2099 | nid, gfpflags); | 2099 | nid, gfpflags); |
2100 | printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " | 2100 | printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " |
2101 | "default order: %d, min order: %d\n", s->name, s->objsize, | 2101 | "default order: %d, min order: %d\n", s->name, s->object_size, |
2102 | s->size, oo_order(s->oo), oo_order(s->min)); | 2102 | s->size, oo_order(s->oo), oo_order(s->min)); |
2103 | 2103 | ||
2104 | if (oo_order(s->min) > get_order(s->objsize)) | 2104 | if (oo_order(s->min) > get_order(s->object_size)) |
2105 | printk(KERN_WARNING " %s debugging increased min order, use " | 2105 | printk(KERN_WARNING " %s debugging increased min order, use " |
2106 | "slub_debug=O to disable.\n", s->name); | 2106 | "slub_debug=O to disable.\n", s->name); |
2107 | 2107 | ||
@@ -2374,7 +2374,7 @@ redo: | |||
2374 | } | 2374 | } |
2375 | 2375 | ||
2376 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 2376 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
2377 | memset(object, 0, s->objsize); | 2377 | memset(object, 0, s->object_size); |
2378 | 2378 | ||
2379 | slab_post_alloc_hook(s, gfpflags, object); | 2379 | slab_post_alloc_hook(s, gfpflags, object); |
2380 | 2380 | ||
@@ -2385,7 +2385,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | |||
2385 | { | 2385 | { |
2386 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 2386 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); |
2387 | 2387 | ||
2388 | trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); | 2388 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); |
2389 | 2389 | ||
2390 | return ret; | 2390 | return ret; |
2391 | } | 2391 | } |
@@ -2415,7 +2415,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | |||
2415 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); | 2415 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
2416 | 2416 | ||
2417 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 2417 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
2418 | s->objsize, s->size, gfpflags, node); | 2418 | s->object_size, s->size, gfpflags, node); |
2419 | 2419 | ||
2420 | return ret; | 2420 | return ret; |
2421 | } | 2421 | } |
@@ -2910,7 +2910,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min) | |||
2910 | static int calculate_sizes(struct kmem_cache *s, int forced_order) | 2910 | static int calculate_sizes(struct kmem_cache *s, int forced_order) |
2911 | { | 2911 | { |
2912 | unsigned long flags = s->flags; | 2912 | unsigned long flags = s->flags; |
2913 | unsigned long size = s->objsize; | 2913 | unsigned long size = s->object_size; |
2914 | unsigned long align = s->align; | 2914 | unsigned long align = s->align; |
2915 | int order; | 2915 | int order; |
2916 | 2916 | ||
@@ -2939,7 +2939,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2939 | * end of the object and the free pointer. If not then add an | 2939 | * end of the object and the free pointer. If not then add an |
2940 | * additional word to have some bytes to store Redzone information. | 2940 | * additional word to have some bytes to store Redzone information. |
2941 | */ | 2941 | */ |
2942 | if ((flags & SLAB_RED_ZONE) && size == s->objsize) | 2942 | if ((flags & SLAB_RED_ZONE) && size == s->object_size) |
2943 | size += sizeof(void *); | 2943 | size += sizeof(void *); |
2944 | #endif | 2944 | #endif |
2945 | 2945 | ||
@@ -2987,7 +2987,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2987 | * user specified and the dynamic determination of cache line size | 2987 | * user specified and the dynamic determination of cache line size |
2988 | * on bootup. | 2988 | * on bootup. |
2989 | */ | 2989 | */ |
2990 | align = calculate_alignment(flags, align, s->objsize); | 2990 | align = calculate_alignment(flags, align, s->object_size); |
2991 | s->align = align; | 2991 | s->align = align; |
2992 | 2992 | ||
2993 | /* | 2993 | /* |
@@ -3035,7 +3035,7 @@ static int kmem_cache_open(struct kmem_cache *s, | |||
3035 | memset(s, 0, kmem_size); | 3035 | memset(s, 0, kmem_size); |
3036 | s->name = name; | 3036 | s->name = name; |
3037 | s->ctor = ctor; | 3037 | s->ctor = ctor; |
3038 | s->objsize = size; | 3038 | s->object_size = size; |
3039 | s->align = align; | 3039 | s->align = align; |
3040 | s->flags = kmem_cache_flags(size, flags, name, ctor); | 3040 | s->flags = kmem_cache_flags(size, flags, name, ctor); |
3041 | s->reserved = 0; | 3041 | s->reserved = 0; |
@@ -3050,7 +3050,7 @@ static int kmem_cache_open(struct kmem_cache *s, | |||
3050 | * Disable debugging flags that store metadata if the min slab | 3050 | * Disable debugging flags that store metadata if the min slab |
3051 | * order increased. | 3051 | * order increased. |
3052 | */ | 3052 | */ |
3053 | if (get_order(s->size) > get_order(s->objsize)) { | 3053 | if (get_order(s->size) > get_order(s->object_size)) { |
3054 | s->flags &= ~DEBUG_METADATA_FLAGS; | 3054 | s->flags &= ~DEBUG_METADATA_FLAGS; |
3055 | s->offset = 0; | 3055 | s->offset = 0; |
3056 | if (!calculate_sizes(s, -1)) | 3056 | if (!calculate_sizes(s, -1)) |
@@ -3124,7 +3124,7 @@ error: | |||
3124 | */ | 3124 | */ |
3125 | unsigned int kmem_cache_size(struct kmem_cache *s) | 3125 | unsigned int kmem_cache_size(struct kmem_cache *s) |
3126 | { | 3126 | { |
3127 | return s->objsize; | 3127 | return s->object_size; |
3128 | } | 3128 | } |
3129 | EXPORT_SYMBOL(kmem_cache_size); | 3129 | EXPORT_SYMBOL(kmem_cache_size); |
3130 | 3130 | ||
@@ -3853,11 +3853,11 @@ void __init kmem_cache_init(void) | |||
3853 | 3853 | ||
3854 | if (s && s->size) { | 3854 | if (s && s->size) { |
3855 | char *name = kasprintf(GFP_NOWAIT, | 3855 | char *name = kasprintf(GFP_NOWAIT, |
3856 | "dma-kmalloc-%d", s->objsize); | 3856 | "dma-kmalloc-%d", s->object_size); |
3857 | 3857 | ||
3858 | BUG_ON(!name); | 3858 | BUG_ON(!name); |
3859 | kmalloc_dma_caches[i] = create_kmalloc_cache(name, | 3859 | kmalloc_dma_caches[i] = create_kmalloc_cache(name, |
3860 | s->objsize, SLAB_CACHE_DMA); | 3860 | s->object_size, SLAB_CACHE_DMA); |
3861 | } | 3861 | } |
3862 | } | 3862 | } |
3863 | #endif | 3863 | #endif |
@@ -3951,7 +3951,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3951 | * Adjust the object sizes so that we clear | 3951 | * Adjust the object sizes so that we clear |
3952 | * the complete object on kzalloc. | 3952 | * the complete object on kzalloc. |
3953 | */ | 3953 | */ |
3954 | s->objsize = max(s->objsize, (int)size); | 3954 | s->object_size = max(s->object_size, (int)size); |
3955 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); | 3955 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
3956 | 3956 | ||
3957 | if (sysfs_slab_alias(s, name)) { | 3957 | if (sysfs_slab_alias(s, name)) { |
@@ -4634,7 +4634,7 @@ SLAB_ATTR_RO(align); | |||
4634 | 4634 | ||
4635 | static ssize_t object_size_show(struct kmem_cache *s, char *buf) | 4635 | static ssize_t object_size_show(struct kmem_cache *s, char *buf) |
4636 | { | 4636 | { |
4637 | return sprintf(buf, "%d\n", s->objsize); | 4637 | return sprintf(buf, "%d\n", s->object_size); |
4638 | } | 4638 | } |
4639 | SLAB_ATTR_RO(object_size); | 4639 | SLAB_ATTR_RO(object_size); |
4640 | 4640 | ||
@@ -5438,7 +5438,7 @@ __initcall(slab_sysfs_init); | |||
5438 | static void print_slabinfo_header(struct seq_file *m) | 5438 | static void print_slabinfo_header(struct seq_file *m) |
5439 | { | 5439 | { |
5440 | seq_puts(m, "slabinfo - version: 2.1\n"); | 5440 | seq_puts(m, "slabinfo - version: 2.1\n"); |
5441 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> " | 5441 | seq_puts(m, "# name <active_objs> <num_objs> <object_size> " |
5442 | "<objperslab> <pagesperslab>"); | 5442 | "<objperslab> <pagesperslab>"); |
5443 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | 5443 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); |
5444 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | 5444 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); |