diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 406 |
1 files changed, 151 insertions, 255 deletions
@@ -68,7 +68,7 @@ | |||
68 | * Further notes from the original documentation: | 68 | * Further notes from the original documentation: |
69 | * | 69 | * |
70 | * 11 April '97. Started multi-threading - markhe | 70 | * 11 April '97. Started multi-threading - markhe |
71 | * The global cache-chain is protected by the mutex 'cache_chain_mutex'. | 71 | * The global cache-chain is protected by the mutex 'slab_mutex'. |
72 | * The sem is only needed when accessing/extending the cache-chain, which | 72 | * The sem is only needed when accessing/extending the cache-chain, which |
73 | * can never happen inside an interrupt (kmem_cache_create(), | 73 | * can never happen inside an interrupt (kmem_cache_create(), |
74 | * kmem_cache_shrink() and kmem_cache_reap()). | 74 | * kmem_cache_shrink() and kmem_cache_reap()). |
@@ -87,6 +87,7 @@ | |||
87 | */ | 87 | */ |
88 | 88 | ||
89 | #include <linux/slab.h> | 89 | #include <linux/slab.h> |
90 | #include "slab.h" | ||
90 | #include <linux/mm.h> | 91 | #include <linux/mm.h> |
91 | #include <linux/poison.h> | 92 | #include <linux/poison.h> |
92 | #include <linux/swap.h> | 93 | #include <linux/swap.h> |
@@ -424,8 +425,8 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
424 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: | 425 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: |
425 | * redzone word. | 426 | * redzone word. |
426 | * cachep->obj_offset: The real object. | 427 | * cachep->obj_offset: The real object. |
427 | * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] | 428 | * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] |
428 | * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address | 429 | * cachep->size - 1* BYTES_PER_WORD: last caller address |
429 | * [BYTES_PER_WORD long] | 430 | * [BYTES_PER_WORD long] |
430 | */ | 431 | */ |
431 | static int obj_offset(struct kmem_cache *cachep) | 432 | static int obj_offset(struct kmem_cache *cachep) |
@@ -433,11 +434,6 @@ static int obj_offset(struct kmem_cache *cachep) | |||
433 | return cachep->obj_offset; | 434 | return cachep->obj_offset; |
434 | } | 435 | } |
435 | 436 | ||
436 | static int obj_size(struct kmem_cache *cachep) | ||
437 | { | ||
438 | return cachep->obj_size; | ||
439 | } | ||
440 | |||
441 | static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) | 437 | static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) |
442 | { | 438 | { |
443 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 439 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); |
@@ -449,23 +445,22 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) | |||
449 | { | 445 | { |
450 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 446 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); |
451 | if (cachep->flags & SLAB_STORE_USER) | 447 | if (cachep->flags & SLAB_STORE_USER) |
452 | return (unsigned long long *)(objp + cachep->buffer_size - | 448 | return (unsigned long long *)(objp + cachep->size - |
453 | sizeof(unsigned long long) - | 449 | sizeof(unsigned long long) - |
454 | REDZONE_ALIGN); | 450 | REDZONE_ALIGN); |
455 | return (unsigned long long *) (objp + cachep->buffer_size - | 451 | return (unsigned long long *) (objp + cachep->size - |
456 | sizeof(unsigned long long)); | 452 | sizeof(unsigned long long)); |
457 | } | 453 | } |
458 | 454 | ||
459 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) | 455 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) |
460 | { | 456 | { |
461 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); | 457 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); |
462 | return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); | 458 | return (void **)(objp + cachep->size - BYTES_PER_WORD); |
463 | } | 459 | } |
464 | 460 | ||
465 | #else | 461 | #else |
466 | 462 | ||
467 | #define obj_offset(x) 0 | 463 | #define obj_offset(x) 0 |
468 | #define obj_size(cachep) (cachep->buffer_size) | ||
469 | #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) | 464 | #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) |
470 | #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) | 465 | #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) |
471 | #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) | 466 | #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) |
@@ -475,7 +470,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
475 | #ifdef CONFIG_TRACING | 470 | #ifdef CONFIG_TRACING |
476 | size_t slab_buffer_size(struct kmem_cache *cachep) | 471 | size_t slab_buffer_size(struct kmem_cache *cachep) |
477 | { | 472 | { |
478 | return cachep->buffer_size; | 473 | return cachep->size; |
479 | } | 474 | } |
480 | EXPORT_SYMBOL(slab_buffer_size); | 475 | EXPORT_SYMBOL(slab_buffer_size); |
481 | #endif | 476 | #endif |
@@ -489,56 +484,37 @@ EXPORT_SYMBOL(slab_buffer_size); | |||
489 | static int slab_max_order = SLAB_MAX_ORDER_LO; | 484 | static int slab_max_order = SLAB_MAX_ORDER_LO; |
490 | static bool slab_max_order_set __initdata; | 485 | static bool slab_max_order_set __initdata; |
491 | 486 | ||
492 | /* | ||
493 | * Functions for storing/retrieving the cachep and or slab from the page | ||
494 | * allocator. These are used to find the slab an obj belongs to. With kfree(), | ||
495 | * these are used to find the cache which an obj belongs to. | ||
496 | */ | ||
497 | static inline void page_set_cache(struct page *page, struct kmem_cache *cache) | ||
498 | { | ||
499 | page->lru.next = (struct list_head *)cache; | ||
500 | } | ||
501 | |||
502 | static inline struct kmem_cache *page_get_cache(struct page *page) | 487 | static inline struct kmem_cache *page_get_cache(struct page *page) |
503 | { | 488 | { |
504 | page = compound_head(page); | 489 | page = compound_head(page); |
505 | BUG_ON(!PageSlab(page)); | 490 | BUG_ON(!PageSlab(page)); |
506 | return (struct kmem_cache *)page->lru.next; | 491 | return page->slab_cache; |
507 | } | ||
508 | |||
509 | static inline void page_set_slab(struct page *page, struct slab *slab) | ||
510 | { | ||
511 | page->lru.prev = (struct list_head *)slab; | ||
512 | } | ||
513 | |||
514 | static inline struct slab *page_get_slab(struct page *page) | ||
515 | { | ||
516 | BUG_ON(!PageSlab(page)); | ||
517 | return (struct slab *)page->lru.prev; | ||
518 | } | 492 | } |
519 | 493 | ||
520 | static inline struct kmem_cache *virt_to_cache(const void *obj) | 494 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
521 | { | 495 | { |
522 | struct page *page = virt_to_head_page(obj); | 496 | struct page *page = virt_to_head_page(obj); |
523 | return page_get_cache(page); | 497 | return page->slab_cache; |
524 | } | 498 | } |
525 | 499 | ||
526 | static inline struct slab *virt_to_slab(const void *obj) | 500 | static inline struct slab *virt_to_slab(const void *obj) |
527 | { | 501 | { |
528 | struct page *page = virt_to_head_page(obj); | 502 | struct page *page = virt_to_head_page(obj); |
529 | return page_get_slab(page); | 503 | |
504 | VM_BUG_ON(!PageSlab(page)); | ||
505 | return page->slab_page; | ||
530 | } | 506 | } |
531 | 507 | ||
532 | static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, | 508 | static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, |
533 | unsigned int idx) | 509 | unsigned int idx) |
534 | { | 510 | { |
535 | return slab->s_mem + cache->buffer_size * idx; | 511 | return slab->s_mem + cache->size * idx; |
536 | } | 512 | } |
537 | 513 | ||
538 | /* | 514 | /* |
539 | * We want to avoid an expensive divide : (offset / cache->buffer_size) | 515 | * We want to avoid an expensive divide : (offset / cache->size) |
540 | * Using the fact that buffer_size is a constant for a particular cache, | 516 | * Using the fact that size is a constant for a particular cache, |
541 | * we can replace (offset / cache->buffer_size) by | 517 | * we can replace (offset / cache->size) by |
542 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) | 518 | * reciprocal_divide(offset, cache->reciprocal_buffer_size) |
543 | */ | 519 | */ |
544 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | 520 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, |
@@ -584,33 +560,12 @@ static struct kmem_cache cache_cache = { | |||
584 | .batchcount = 1, | 560 | .batchcount = 1, |
585 | .limit = BOOT_CPUCACHE_ENTRIES, | 561 | .limit = BOOT_CPUCACHE_ENTRIES, |
586 | .shared = 1, | 562 | .shared = 1, |
587 | .buffer_size = sizeof(struct kmem_cache), | 563 | .size = sizeof(struct kmem_cache), |
588 | .name = "kmem_cache", | 564 | .name = "kmem_cache", |
589 | }; | 565 | }; |
590 | 566 | ||
591 | #define BAD_ALIEN_MAGIC 0x01020304ul | 567 | #define BAD_ALIEN_MAGIC 0x01020304ul |
592 | 568 | ||
593 | /* | ||
594 | * chicken and egg problem: delay the per-cpu array allocation | ||
595 | * until the general caches are up. | ||
596 | */ | ||
597 | static enum { | ||
598 | NONE, | ||
599 | PARTIAL_AC, | ||
600 | PARTIAL_L3, | ||
601 | EARLY, | ||
602 | LATE, | ||
603 | FULL | ||
604 | } g_cpucache_up; | ||
605 | |||
606 | /* | ||
607 | * used by boot code to determine if it can use slab based allocator | ||
608 | */ | ||
609 | int slab_is_available(void) | ||
610 | { | ||
611 | return g_cpucache_up >= EARLY; | ||
612 | } | ||
613 | |||
614 | #ifdef CONFIG_LOCKDEP | 569 | #ifdef CONFIG_LOCKDEP |
615 | 570 | ||
616 | /* | 571 | /* |
@@ -676,7 +631,7 @@ static void init_node_lock_keys(int q) | |||
676 | { | 631 | { |
677 | struct cache_sizes *s = malloc_sizes; | 632 | struct cache_sizes *s = malloc_sizes; |
678 | 633 | ||
679 | if (g_cpucache_up < LATE) | 634 | if (slab_state < UP) |
680 | return; | 635 | return; |
681 | 636 | ||
682 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 637 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
@@ -716,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | |||
716 | } | 671 | } |
717 | #endif | 672 | #endif |
718 | 673 | ||
719 | /* | ||
720 | * Guard access to the cache-chain. | ||
721 | */ | ||
722 | static DEFINE_MUTEX(cache_chain_mutex); | ||
723 | static struct list_head cache_chain; | ||
724 | |||
725 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); | 674 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); |
726 | 675 | ||
727 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 676 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
@@ -1145,7 +1094,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1145 | * When hotplugging memory or a cpu, existing nodelists are not replaced if | 1094 | * When hotplugging memory or a cpu, existing nodelists are not replaced if |
1146 | * already in use. | 1095 | * already in use. |
1147 | * | 1096 | * |
1148 | * Must hold cache_chain_mutex. | 1097 | * Must hold slab_mutex. |
1149 | */ | 1098 | */ |
1150 | static int init_cache_nodelists_node(int node) | 1099 | static int init_cache_nodelists_node(int node) |
1151 | { | 1100 | { |
@@ -1153,7 +1102,7 @@ static int init_cache_nodelists_node(int node) | |||
1153 | struct kmem_list3 *l3; | 1102 | struct kmem_list3 *l3; |
1154 | const int memsize = sizeof(struct kmem_list3); | 1103 | const int memsize = sizeof(struct kmem_list3); |
1155 | 1104 | ||
1156 | list_for_each_entry(cachep, &cache_chain, next) { | 1105 | list_for_each_entry(cachep, &slab_caches, list) { |
1157 | /* | 1106 | /* |
1158 | * Set up the size64 kmemlist for cpu before we can | 1107 | * Set up the size64 kmemlist for cpu before we can |
1159 | * begin anything. Make sure some other cpu on this | 1108 | * begin anything. Make sure some other cpu on this |
@@ -1169,7 +1118,7 @@ static int init_cache_nodelists_node(int node) | |||
1169 | 1118 | ||
1170 | /* | 1119 | /* |
1171 | * The l3s don't come and go as CPUs come and | 1120 | * The l3s don't come and go as CPUs come and |
1172 | * go. cache_chain_mutex is sufficient | 1121 | * go. slab_mutex is sufficient |
1173 | * protection here. | 1122 | * protection here. |
1174 | */ | 1123 | */ |
1175 | cachep->nodelists[node] = l3; | 1124 | cachep->nodelists[node] = l3; |
@@ -1191,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu) | |||
1191 | int node = cpu_to_mem(cpu); | 1140 | int node = cpu_to_mem(cpu); |
1192 | const struct cpumask *mask = cpumask_of_node(node); | 1141 | const struct cpumask *mask = cpumask_of_node(node); |
1193 | 1142 | ||
1194 | list_for_each_entry(cachep, &cache_chain, next) { | 1143 | list_for_each_entry(cachep, &slab_caches, list) { |
1195 | struct array_cache *nc; | 1144 | struct array_cache *nc; |
1196 | struct array_cache *shared; | 1145 | struct array_cache *shared; |
1197 | struct array_cache **alien; | 1146 | struct array_cache **alien; |
@@ -1241,7 +1190,7 @@ free_array_cache: | |||
1241 | * the respective cache's slabs, now we can go ahead and | 1190 | * the respective cache's slabs, now we can go ahead and |
1242 | * shrink each nodelist to its limit. | 1191 | * shrink each nodelist to its limit. |
1243 | */ | 1192 | */ |
1244 | list_for_each_entry(cachep, &cache_chain, next) { | 1193 | list_for_each_entry(cachep, &slab_caches, list) { |
1245 | l3 = cachep->nodelists[node]; | 1194 | l3 = cachep->nodelists[node]; |
1246 | if (!l3) | 1195 | if (!l3) |
1247 | continue; | 1196 | continue; |
@@ -1270,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1270 | * Now we can go ahead with allocating the shared arrays and | 1219 | * Now we can go ahead with allocating the shared arrays and |
1271 | * array caches | 1220 | * array caches |
1272 | */ | 1221 | */ |
1273 | list_for_each_entry(cachep, &cache_chain, next) { | 1222 | list_for_each_entry(cachep, &slab_caches, list) { |
1274 | struct array_cache *nc; | 1223 | struct array_cache *nc; |
1275 | struct array_cache *shared = NULL; | 1224 | struct array_cache *shared = NULL; |
1276 | struct array_cache **alien = NULL; | 1225 | struct array_cache **alien = NULL; |
@@ -1338,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1338 | switch (action) { | 1287 | switch (action) { |
1339 | case CPU_UP_PREPARE: | 1288 | case CPU_UP_PREPARE: |
1340 | case CPU_UP_PREPARE_FROZEN: | 1289 | case CPU_UP_PREPARE_FROZEN: |
1341 | mutex_lock(&cache_chain_mutex); | 1290 | mutex_lock(&slab_mutex); |
1342 | err = cpuup_prepare(cpu); | 1291 | err = cpuup_prepare(cpu); |
1343 | mutex_unlock(&cache_chain_mutex); | 1292 | mutex_unlock(&slab_mutex); |
1344 | break; | 1293 | break; |
1345 | case CPU_ONLINE: | 1294 | case CPU_ONLINE: |
1346 | case CPU_ONLINE_FROZEN: | 1295 | case CPU_ONLINE_FROZEN: |
@@ -1350,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1350 | case CPU_DOWN_PREPARE: | 1299 | case CPU_DOWN_PREPARE: |
1351 | case CPU_DOWN_PREPARE_FROZEN: | 1300 | case CPU_DOWN_PREPARE_FROZEN: |
1352 | /* | 1301 | /* |
1353 | * Shutdown cache reaper. Note that the cache_chain_mutex is | 1302 | * Shutdown cache reaper. Note that the slab_mutex is |
1354 | * held so that if cache_reap() is invoked it cannot do | 1303 | * held so that if cache_reap() is invoked it cannot do |
1355 | * anything expensive but will only modify reap_work | 1304 | * anything expensive but will only modify reap_work |
1356 | * and reschedule the timer. | 1305 | * and reschedule the timer. |
@@ -1377,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1377 | #endif | 1326 | #endif |
1378 | case CPU_UP_CANCELED: | 1327 | case CPU_UP_CANCELED: |
1379 | case CPU_UP_CANCELED_FROZEN: | 1328 | case CPU_UP_CANCELED_FROZEN: |
1380 | mutex_lock(&cache_chain_mutex); | 1329 | mutex_lock(&slab_mutex); |
1381 | cpuup_canceled(cpu); | 1330 | cpuup_canceled(cpu); |
1382 | mutex_unlock(&cache_chain_mutex); | 1331 | mutex_unlock(&slab_mutex); |
1383 | break; | 1332 | break; |
1384 | } | 1333 | } |
1385 | return notifier_from_errno(err); | 1334 | return notifier_from_errno(err); |
@@ -1395,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = { | |||
1395 | * Returns -EBUSY if all objects cannot be drained so that the node is not | 1344 | * Returns -EBUSY if all objects cannot be drained so that the node is not |
1396 | * removed. | 1345 | * removed. |
1397 | * | 1346 | * |
1398 | * Must hold cache_chain_mutex. | 1347 | * Must hold slab_mutex. |
1399 | */ | 1348 | */ |
1400 | static int __meminit drain_cache_nodelists_node(int node) | 1349 | static int __meminit drain_cache_nodelists_node(int node) |
1401 | { | 1350 | { |
1402 | struct kmem_cache *cachep; | 1351 | struct kmem_cache *cachep; |
1403 | int ret = 0; | 1352 | int ret = 0; |
1404 | 1353 | ||
1405 | list_for_each_entry(cachep, &cache_chain, next) { | 1354 | list_for_each_entry(cachep, &slab_caches, list) { |
1406 | struct kmem_list3 *l3; | 1355 | struct kmem_list3 *l3; |
1407 | 1356 | ||
1408 | l3 = cachep->nodelists[node]; | 1357 | l3 = cachep->nodelists[node]; |
@@ -1433,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self, | |||
1433 | 1382 | ||
1434 | switch (action) { | 1383 | switch (action) { |
1435 | case MEM_GOING_ONLINE: | 1384 | case MEM_GOING_ONLINE: |
1436 | mutex_lock(&cache_chain_mutex); | 1385 | mutex_lock(&slab_mutex); |
1437 | ret = init_cache_nodelists_node(nid); | 1386 | ret = init_cache_nodelists_node(nid); |
1438 | mutex_unlock(&cache_chain_mutex); | 1387 | mutex_unlock(&slab_mutex); |
1439 | break; | 1388 | break; |
1440 | case MEM_GOING_OFFLINE: | 1389 | case MEM_GOING_OFFLINE: |
1441 | mutex_lock(&cache_chain_mutex); | 1390 | mutex_lock(&slab_mutex); |
1442 | ret = drain_cache_nodelists_node(nid); | 1391 | ret = drain_cache_nodelists_node(nid); |
1443 | mutex_unlock(&cache_chain_mutex); | 1392 | mutex_unlock(&slab_mutex); |
1444 | break; | 1393 | break; |
1445 | case MEM_ONLINE: | 1394 | case MEM_ONLINE: |
1446 | case MEM_OFFLINE: | 1395 | case MEM_OFFLINE: |
@@ -1544,8 +1493,8 @@ void __init kmem_cache_init(void) | |||
1544 | node = numa_mem_id(); | 1493 | node = numa_mem_id(); |
1545 | 1494 | ||
1546 | /* 1) create the cache_cache */ | 1495 | /* 1) create the cache_cache */ |
1547 | INIT_LIST_HEAD(&cache_chain); | 1496 | INIT_LIST_HEAD(&slab_caches); |
1548 | list_add(&cache_cache.next, &cache_chain); | 1497 | list_add(&cache_cache.list, &slab_caches); |
1549 | cache_cache.colour_off = cache_line_size(); | 1498 | cache_cache.colour_off = cache_line_size(); |
1550 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1499 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; |
1551 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; | 1500 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
@@ -1553,18 +1502,16 @@ void __init kmem_cache_init(void) | |||
1553 | /* | 1502 | /* |
1554 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | 1503 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids |
1555 | */ | 1504 | */ |
1556 | cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1505 | cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
1557 | nr_node_ids * sizeof(struct kmem_list3 *); | 1506 | nr_node_ids * sizeof(struct kmem_list3 *); |
1558 | #if DEBUG | 1507 | cache_cache.object_size = cache_cache.size; |
1559 | cache_cache.obj_size = cache_cache.buffer_size; | 1508 | cache_cache.size = ALIGN(cache_cache.size, |
1560 | #endif | ||
1561 | cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, | ||
1562 | cache_line_size()); | 1509 | cache_line_size()); |
1563 | cache_cache.reciprocal_buffer_size = | 1510 | cache_cache.reciprocal_buffer_size = |
1564 | reciprocal_value(cache_cache.buffer_size); | 1511 | reciprocal_value(cache_cache.size); |
1565 | 1512 | ||
1566 | for (order = 0; order < MAX_ORDER; order++) { | 1513 | for (order = 0; order < MAX_ORDER; order++) { |
1567 | cache_estimate(order, cache_cache.buffer_size, | 1514 | cache_estimate(order, cache_cache.size, |
1568 | cache_line_size(), 0, &left_over, &cache_cache.num); | 1515 | cache_line_size(), 0, &left_over, &cache_cache.num); |
1569 | if (cache_cache.num) | 1516 | if (cache_cache.num) |
1570 | break; | 1517 | break; |
@@ -1585,7 +1532,7 @@ void __init kmem_cache_init(void) | |||
1585 | * bug. | 1532 | * bug. |
1586 | */ | 1533 | */ |
1587 | 1534 | ||
1588 | sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, | 1535 | sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, |
1589 | sizes[INDEX_AC].cs_size, | 1536 | sizes[INDEX_AC].cs_size, |
1590 | ARCH_KMALLOC_MINALIGN, | 1537 | ARCH_KMALLOC_MINALIGN, |
1591 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1538 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
@@ -1593,7 +1540,7 @@ void __init kmem_cache_init(void) | |||
1593 | 1540 | ||
1594 | if (INDEX_AC != INDEX_L3) { | 1541 | if (INDEX_AC != INDEX_L3) { |
1595 | sizes[INDEX_L3].cs_cachep = | 1542 | sizes[INDEX_L3].cs_cachep = |
1596 | kmem_cache_create(names[INDEX_L3].name, | 1543 | __kmem_cache_create(names[INDEX_L3].name, |
1597 | sizes[INDEX_L3].cs_size, | 1544 | sizes[INDEX_L3].cs_size, |
1598 | ARCH_KMALLOC_MINALIGN, | 1545 | ARCH_KMALLOC_MINALIGN, |
1599 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1546 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
@@ -1611,14 +1558,14 @@ void __init kmem_cache_init(void) | |||
1611 | * allow tighter packing of the smaller caches. | 1558 | * allow tighter packing of the smaller caches. |
1612 | */ | 1559 | */ |
1613 | if (!sizes->cs_cachep) { | 1560 | if (!sizes->cs_cachep) { |
1614 | sizes->cs_cachep = kmem_cache_create(names->name, | 1561 | sizes->cs_cachep = __kmem_cache_create(names->name, |
1615 | sizes->cs_size, | 1562 | sizes->cs_size, |
1616 | ARCH_KMALLOC_MINALIGN, | 1563 | ARCH_KMALLOC_MINALIGN, |
1617 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1564 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
1618 | NULL); | 1565 | NULL); |
1619 | } | 1566 | } |
1620 | #ifdef CONFIG_ZONE_DMA | 1567 | #ifdef CONFIG_ZONE_DMA |
1621 | sizes->cs_dmacachep = kmem_cache_create( | 1568 | sizes->cs_dmacachep = __kmem_cache_create( |
1622 | names->name_dma, | 1569 | names->name_dma, |
1623 | sizes->cs_size, | 1570 | sizes->cs_size, |
1624 | ARCH_KMALLOC_MINALIGN, | 1571 | ARCH_KMALLOC_MINALIGN, |
@@ -1676,27 +1623,27 @@ void __init kmem_cache_init(void) | |||
1676 | } | 1623 | } |
1677 | } | 1624 | } |
1678 | 1625 | ||
1679 | g_cpucache_up = EARLY; | 1626 | slab_state = UP; |
1680 | } | 1627 | } |
1681 | 1628 | ||
1682 | void __init kmem_cache_init_late(void) | 1629 | void __init kmem_cache_init_late(void) |
1683 | { | 1630 | { |
1684 | struct kmem_cache *cachep; | 1631 | struct kmem_cache *cachep; |
1685 | 1632 | ||
1686 | g_cpucache_up = LATE; | 1633 | slab_state = UP; |
1687 | 1634 | ||
1688 | /* Annotate slab for lockdep -- annotate the malloc caches */ | 1635 | /* Annotate slab for lockdep -- annotate the malloc caches */ |
1689 | init_lock_keys(); | 1636 | init_lock_keys(); |
1690 | 1637 | ||
1691 | /* 6) resize the head arrays to their final sizes */ | 1638 | /* 6) resize the head arrays to their final sizes */ |
1692 | mutex_lock(&cache_chain_mutex); | 1639 | mutex_lock(&slab_mutex); |
1693 | list_for_each_entry(cachep, &cache_chain, next) | 1640 | list_for_each_entry(cachep, &slab_caches, list) |
1694 | if (enable_cpucache(cachep, GFP_NOWAIT)) | 1641 | if (enable_cpucache(cachep, GFP_NOWAIT)) |
1695 | BUG(); | 1642 | BUG(); |
1696 | mutex_unlock(&cache_chain_mutex); | 1643 | mutex_unlock(&slab_mutex); |
1697 | 1644 | ||
1698 | /* Done! */ | 1645 | /* Done! */ |
1699 | g_cpucache_up = FULL; | 1646 | slab_state = FULL; |
1700 | 1647 | ||
1701 | /* | 1648 | /* |
1702 | * Register a cpu startup notifier callback that initializes | 1649 | * Register a cpu startup notifier callback that initializes |
@@ -1727,6 +1674,9 @@ static int __init cpucache_init(void) | |||
1727 | */ | 1674 | */ |
1728 | for_each_online_cpu(cpu) | 1675 | for_each_online_cpu(cpu) |
1729 | start_cpu_timer(cpu); | 1676 | start_cpu_timer(cpu); |
1677 | |||
1678 | /* Done! */ | ||
1679 | slab_state = FULL; | ||
1730 | return 0; | 1680 | return 0; |
1731 | } | 1681 | } |
1732 | __initcall(cpucache_init); | 1682 | __initcall(cpucache_init); |
@@ -1743,7 +1693,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1743 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", | 1693 | "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", |
1744 | nodeid, gfpflags); | 1694 | nodeid, gfpflags); |
1745 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", | 1695 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", |
1746 | cachep->name, cachep->buffer_size, cachep->gfporder); | 1696 | cachep->name, cachep->size, cachep->gfporder); |
1747 | 1697 | ||
1748 | for_each_online_node(node) { | 1698 | for_each_online_node(node) { |
1749 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | 1699 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; |
@@ -1798,7 +1748,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1798 | flags |= __GFP_COMP; | 1748 | flags |= __GFP_COMP; |
1799 | #endif | 1749 | #endif |
1800 | 1750 | ||
1801 | flags |= cachep->gfpflags; | 1751 | flags |= cachep->allocflags; |
1802 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1752 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1803 | flags |= __GFP_RECLAIMABLE; | 1753 | flags |= __GFP_RECLAIMABLE; |
1804 | 1754 | ||
@@ -1874,7 +1824,7 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
1874 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | 1824 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, |
1875 | unsigned long caller) | 1825 | unsigned long caller) |
1876 | { | 1826 | { |
1877 | int size = obj_size(cachep); | 1827 | int size = cachep->object_size; |
1878 | 1828 | ||
1879 | addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; | 1829 | addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; |
1880 | 1830 | ||
@@ -1906,7 +1856,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | |||
1906 | 1856 | ||
1907 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) | 1857 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) |
1908 | { | 1858 | { |
1909 | int size = obj_size(cachep); | 1859 | int size = cachep->object_size; |
1910 | addr = &((char *)addr)[obj_offset(cachep)]; | 1860 | addr = &((char *)addr)[obj_offset(cachep)]; |
1911 | 1861 | ||
1912 | memset(addr, val, size); | 1862 | memset(addr, val, size); |
@@ -1966,7 +1916,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | |||
1966 | printk("\n"); | 1916 | printk("\n"); |
1967 | } | 1917 | } |
1968 | realobj = (char *)objp + obj_offset(cachep); | 1918 | realobj = (char *)objp + obj_offset(cachep); |
1969 | size = obj_size(cachep); | 1919 | size = cachep->object_size; |
1970 | for (i = 0; i < size && lines; i += 16, lines--) { | 1920 | for (i = 0; i < size && lines; i += 16, lines--) { |
1971 | int limit; | 1921 | int limit; |
1972 | limit = 16; | 1922 | limit = 16; |
@@ -1983,7 +1933,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
1983 | int lines = 0; | 1933 | int lines = 0; |
1984 | 1934 | ||
1985 | realobj = (char *)objp + obj_offset(cachep); | 1935 | realobj = (char *)objp + obj_offset(cachep); |
1986 | size = obj_size(cachep); | 1936 | size = cachep->object_size; |
1987 | 1937 | ||
1988 | for (i = 0; i < size; i++) { | 1938 | for (i = 0; i < size; i++) { |
1989 | char exp = POISON_FREE; | 1939 | char exp = POISON_FREE; |
@@ -2047,10 +1997,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab | |||
2047 | 1997 | ||
2048 | if (cachep->flags & SLAB_POISON) { | 1998 | if (cachep->flags & SLAB_POISON) { |
2049 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1999 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2050 | if (cachep->buffer_size % PAGE_SIZE == 0 && | 2000 | if (cachep->size % PAGE_SIZE == 0 && |
2051 | OFF_SLAB(cachep)) | 2001 | OFF_SLAB(cachep)) |
2052 | kernel_map_pages(virt_to_page(objp), | 2002 | kernel_map_pages(virt_to_page(objp), |
2053 | cachep->buffer_size / PAGE_SIZE, 1); | 2003 | cachep->size / PAGE_SIZE, 1); |
2054 | else | 2004 | else |
2055 | check_poison_obj(cachep, objp); | 2005 | check_poison_obj(cachep, objp); |
2056 | #else | 2006 | #else |
@@ -2194,10 +2144,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2194 | 2144 | ||
2195 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | 2145 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
2196 | { | 2146 | { |
2197 | if (g_cpucache_up == FULL) | 2147 | if (slab_state >= FULL) |
2198 | return enable_cpucache(cachep, gfp); | 2148 | return enable_cpucache(cachep, gfp); |
2199 | 2149 | ||
2200 | if (g_cpucache_up == NONE) { | 2150 | if (slab_state == DOWN) { |
2201 | /* | 2151 | /* |
2202 | * Note: the first kmem_cache_create must create the cache | 2152 | * Note: the first kmem_cache_create must create the cache |
2203 | * that's used by kmalloc(24), otherwise the creation of | 2153 | * that's used by kmalloc(24), otherwise the creation of |
@@ -2212,16 +2162,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2212 | */ | 2162 | */ |
2213 | set_up_list3s(cachep, SIZE_AC); | 2163 | set_up_list3s(cachep, SIZE_AC); |
2214 | if (INDEX_AC == INDEX_L3) | 2164 | if (INDEX_AC == INDEX_L3) |
2215 | g_cpucache_up = PARTIAL_L3; | 2165 | slab_state = PARTIAL_L3; |
2216 | else | 2166 | else |
2217 | g_cpucache_up = PARTIAL_AC; | 2167 | slab_state = PARTIAL_ARRAYCACHE; |
2218 | } else { | 2168 | } else { |
2219 | cachep->array[smp_processor_id()] = | 2169 | cachep->array[smp_processor_id()] = |
2220 | kmalloc(sizeof(struct arraycache_init), gfp); | 2170 | kmalloc(sizeof(struct arraycache_init), gfp); |
2221 | 2171 | ||
2222 | if (g_cpucache_up == PARTIAL_AC) { | 2172 | if (slab_state == PARTIAL_ARRAYCACHE) { |
2223 | set_up_list3s(cachep, SIZE_L3); | 2173 | set_up_list3s(cachep, SIZE_L3); |
2224 | g_cpucache_up = PARTIAL_L3; | 2174 | slab_state = PARTIAL_L3; |
2225 | } else { | 2175 | } else { |
2226 | int node; | 2176 | int node; |
2227 | for_each_online_node(node) { | 2177 | for_each_online_node(node) { |
@@ -2247,7 +2197,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2247 | } | 2197 | } |
2248 | 2198 | ||
2249 | /** | 2199 | /** |
2250 | * kmem_cache_create - Create a cache. | 2200 | * __kmem_cache_create - Create a cache. |
2251 | * @name: A string which is used in /proc/slabinfo to identify this cache. | 2201 | * @name: A string which is used in /proc/slabinfo to identify this cache. |
2252 | * @size: The size of objects to be created in this cache. | 2202 | * @size: The size of objects to be created in this cache. |
2253 | * @align: The required alignment for the objects. | 2203 | * @align: The required alignment for the objects. |
@@ -2274,59 +2224,14 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2274 | * as davem. | 2224 | * as davem. |
2275 | */ | 2225 | */ |
2276 | struct kmem_cache * | 2226 | struct kmem_cache * |
2277 | kmem_cache_create (const char *name, size_t size, size_t align, | 2227 | __kmem_cache_create (const char *name, size_t size, size_t align, |
2278 | unsigned long flags, void (*ctor)(void *)) | 2228 | unsigned long flags, void (*ctor)(void *)) |
2279 | { | 2229 | { |
2280 | size_t left_over, slab_size, ralign; | 2230 | size_t left_over, slab_size, ralign; |
2281 | struct kmem_cache *cachep = NULL, *pc; | 2231 | struct kmem_cache *cachep = NULL; |
2282 | gfp_t gfp; | 2232 | gfp_t gfp; |
2283 | 2233 | ||
2284 | /* | ||
2285 | * Sanity checks... these are all serious usage bugs. | ||
2286 | */ | ||
2287 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || | ||
2288 | size > KMALLOC_MAX_SIZE) { | ||
2289 | printk(KERN_ERR "%s: Early error in slab %s\n", __func__, | ||
2290 | name); | ||
2291 | BUG(); | ||
2292 | } | ||
2293 | |||
2294 | /* | ||
2295 | * We use cache_chain_mutex to ensure a consistent view of | ||
2296 | * cpu_online_mask as well. Please see cpuup_callback | ||
2297 | */ | ||
2298 | if (slab_is_available()) { | ||
2299 | get_online_cpus(); | ||
2300 | mutex_lock(&cache_chain_mutex); | ||
2301 | } | ||
2302 | |||
2303 | list_for_each_entry(pc, &cache_chain, next) { | ||
2304 | char tmp; | ||
2305 | int res; | ||
2306 | |||
2307 | /* | ||
2308 | * This happens when the module gets unloaded and doesn't | ||
2309 | * destroy its slab cache and no-one else reuses the vmalloc | ||
2310 | * area of the module. Print a warning. | ||
2311 | */ | ||
2312 | res = probe_kernel_address(pc->name, tmp); | ||
2313 | if (res) { | ||
2314 | printk(KERN_ERR | ||
2315 | "SLAB: cache with size %d has lost its name\n", | ||
2316 | pc->buffer_size); | ||
2317 | continue; | ||
2318 | } | ||
2319 | |||
2320 | if (!strcmp(pc->name, name)) { | ||
2321 | printk(KERN_ERR | ||
2322 | "kmem_cache_create: duplicate cache %s\n", name); | ||
2323 | dump_stack(); | ||
2324 | goto oops; | ||
2325 | } | ||
2326 | } | ||
2327 | |||
2328 | #if DEBUG | 2234 | #if DEBUG |
2329 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | ||
2330 | #if FORCED_DEBUG | 2235 | #if FORCED_DEBUG |
2331 | /* | 2236 | /* |
2332 | * Enable redzoning and last user accounting, except for caches with | 2237 | * Enable redzoning and last user accounting, except for caches with |
@@ -2415,11 +2320,12 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2415 | /* Get cache's description obj. */ | 2320 | /* Get cache's description obj. */ |
2416 | cachep = kmem_cache_zalloc(&cache_cache, gfp); | 2321 | cachep = kmem_cache_zalloc(&cache_cache, gfp); |
2417 | if (!cachep) | 2322 | if (!cachep) |
2418 | goto oops; | 2323 | return NULL; |
2419 | 2324 | ||
2420 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 2325 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; |
2326 | cachep->object_size = size; | ||
2327 | cachep->align = align; | ||
2421 | #if DEBUG | 2328 | #if DEBUG |
2422 | cachep->obj_size = size; | ||
2423 | 2329 | ||
2424 | /* | 2330 | /* |
2425 | * Both debugging options require word-alignment which is calculated | 2331 | * Both debugging options require word-alignment which is calculated |
@@ -2442,7 +2348,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2442 | } | 2348 | } |
2443 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2349 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2444 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2350 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
2445 | && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { | 2351 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
2446 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); | 2352 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
2447 | size = PAGE_SIZE; | 2353 | size = PAGE_SIZE; |
2448 | } | 2354 | } |
@@ -2471,8 +2377,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2471 | printk(KERN_ERR | 2377 | printk(KERN_ERR |
2472 | "kmem_cache_create: couldn't create cache %s.\n", name); | 2378 | "kmem_cache_create: couldn't create cache %s.\n", name); |
2473 | kmem_cache_free(&cache_cache, cachep); | 2379 | kmem_cache_free(&cache_cache, cachep); |
2474 | cachep = NULL; | 2380 | return NULL; |
2475 | goto oops; | ||
2476 | } | 2381 | } |
2477 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 2382 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
2478 | + sizeof(struct slab), align); | 2383 | + sizeof(struct slab), align); |
@@ -2508,10 +2413,10 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2508 | cachep->colour = left_over / cachep->colour_off; | 2413 | cachep->colour = left_over / cachep->colour_off; |
2509 | cachep->slab_size = slab_size; | 2414 | cachep->slab_size = slab_size; |
2510 | cachep->flags = flags; | 2415 | cachep->flags = flags; |
2511 | cachep->gfpflags = 0; | 2416 | cachep->allocflags = 0; |
2512 | if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) | 2417 | if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) |
2513 | cachep->gfpflags |= GFP_DMA; | 2418 | cachep->allocflags |= GFP_DMA; |
2514 | cachep->buffer_size = size; | 2419 | cachep->size = size; |
2515 | cachep->reciprocal_buffer_size = reciprocal_value(size); | 2420 | cachep->reciprocal_buffer_size = reciprocal_value(size); |
2516 | 2421 | ||
2517 | if (flags & CFLGS_OFF_SLAB) { | 2422 | if (flags & CFLGS_OFF_SLAB) { |
@@ -2530,8 +2435,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2530 | 2435 | ||
2531 | if (setup_cpu_cache(cachep, gfp)) { | 2436 | if (setup_cpu_cache(cachep, gfp)) { |
2532 | __kmem_cache_destroy(cachep); | 2437 | __kmem_cache_destroy(cachep); |
2533 | cachep = NULL; | 2438 | return NULL; |
2534 | goto oops; | ||
2535 | } | 2439 | } |
2536 | 2440 | ||
2537 | if (flags & SLAB_DEBUG_OBJECTS) { | 2441 | if (flags & SLAB_DEBUG_OBJECTS) { |
@@ -2545,18 +2449,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2545 | } | 2449 | } |
2546 | 2450 | ||
2547 | /* cache setup completed, link it into the list */ | 2451 | /* cache setup completed, link it into the list */ |
2548 | list_add(&cachep->next, &cache_chain); | 2452 | list_add(&cachep->list, &slab_caches); |
2549 | oops: | ||
2550 | if (!cachep && (flags & SLAB_PANIC)) | ||
2551 | panic("kmem_cache_create(): failed to create slab `%s'\n", | ||
2552 | name); | ||
2553 | if (slab_is_available()) { | ||
2554 | mutex_unlock(&cache_chain_mutex); | ||
2555 | put_online_cpus(); | ||
2556 | } | ||
2557 | return cachep; | 2453 | return cachep; |
2558 | } | 2454 | } |
2559 | EXPORT_SYMBOL(kmem_cache_create); | ||
2560 | 2455 | ||
2561 | #if DEBUG | 2456 | #if DEBUG |
2562 | static void check_irq_off(void) | 2457 | static void check_irq_off(void) |
@@ -2671,7 +2566,7 @@ out: | |||
2671 | return nr_freed; | 2566 | return nr_freed; |
2672 | } | 2567 | } |
2673 | 2568 | ||
2674 | /* Called with cache_chain_mutex held to protect against cpu hotplug */ | 2569 | /* Called with slab_mutex held to protect against cpu hotplug */ |
2675 | static int __cache_shrink(struct kmem_cache *cachep) | 2570 | static int __cache_shrink(struct kmem_cache *cachep) |
2676 | { | 2571 | { |
2677 | int ret = 0, i = 0; | 2572 | int ret = 0, i = 0; |
@@ -2706,9 +2601,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
2706 | BUG_ON(!cachep || in_interrupt()); | 2601 | BUG_ON(!cachep || in_interrupt()); |
2707 | 2602 | ||
2708 | get_online_cpus(); | 2603 | get_online_cpus(); |
2709 | mutex_lock(&cache_chain_mutex); | 2604 | mutex_lock(&slab_mutex); |
2710 | ret = __cache_shrink(cachep); | 2605 | ret = __cache_shrink(cachep); |
2711 | mutex_unlock(&cache_chain_mutex); | 2606 | mutex_unlock(&slab_mutex); |
2712 | put_online_cpus(); | 2607 | put_online_cpus(); |
2713 | return ret; | 2608 | return ret; |
2714 | } | 2609 | } |
@@ -2736,15 +2631,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
2736 | 2631 | ||
2737 | /* Find the cache in the chain of caches. */ | 2632 | /* Find the cache in the chain of caches. */ |
2738 | get_online_cpus(); | 2633 | get_online_cpus(); |
2739 | mutex_lock(&cache_chain_mutex); | 2634 | mutex_lock(&slab_mutex); |
2740 | /* | 2635 | /* |
2741 | * the chain is never empty, cache_cache is never destroyed | 2636 | * the chain is never empty, cache_cache is never destroyed |
2742 | */ | 2637 | */ |
2743 | list_del(&cachep->next); | 2638 | list_del(&cachep->list); |
2744 | if (__cache_shrink(cachep)) { | 2639 | if (__cache_shrink(cachep)) { |
2745 | slab_error(cachep, "Can't free all objects"); | 2640 | slab_error(cachep, "Can't free all objects"); |
2746 | list_add(&cachep->next, &cache_chain); | 2641 | list_add(&cachep->list, &slab_caches); |
2747 | mutex_unlock(&cache_chain_mutex); | 2642 | mutex_unlock(&slab_mutex); |
2748 | put_online_cpus(); | 2643 | put_online_cpus(); |
2749 | return; | 2644 | return; |
2750 | } | 2645 | } |
@@ -2753,7 +2648,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
2753 | rcu_barrier(); | 2648 | rcu_barrier(); |
2754 | 2649 | ||
2755 | __kmem_cache_destroy(cachep); | 2650 | __kmem_cache_destroy(cachep); |
2756 | mutex_unlock(&cache_chain_mutex); | 2651 | mutex_unlock(&slab_mutex); |
2757 | put_online_cpus(); | 2652 | put_online_cpus(); |
2758 | } | 2653 | } |
2759 | EXPORT_SYMBOL(kmem_cache_destroy); | 2654 | EXPORT_SYMBOL(kmem_cache_destroy); |
@@ -2840,10 +2735,10 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2840 | slab_error(cachep, "constructor overwrote the" | 2735 | slab_error(cachep, "constructor overwrote the" |
2841 | " start of an object"); | 2736 | " start of an object"); |
2842 | } | 2737 | } |
2843 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && | 2738 | if ((cachep->size % PAGE_SIZE) == 0 && |
2844 | OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) | 2739 | OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) |
2845 | kernel_map_pages(virt_to_page(objp), | 2740 | kernel_map_pages(virt_to_page(objp), |
2846 | cachep->buffer_size / PAGE_SIZE, 0); | 2741 | cachep->size / PAGE_SIZE, 0); |
2847 | #else | 2742 | #else |
2848 | if (cachep->ctor) | 2743 | if (cachep->ctor) |
2849 | cachep->ctor(objp); | 2744 | cachep->ctor(objp); |
@@ -2857,9 +2752,9 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) | |||
2857 | { | 2752 | { |
2858 | if (CONFIG_ZONE_DMA_FLAG) { | 2753 | if (CONFIG_ZONE_DMA_FLAG) { |
2859 | if (flags & GFP_DMA) | 2754 | if (flags & GFP_DMA) |
2860 | BUG_ON(!(cachep->gfpflags & GFP_DMA)); | 2755 | BUG_ON(!(cachep->allocflags & GFP_DMA)); |
2861 | else | 2756 | else |
2862 | BUG_ON(cachep->gfpflags & GFP_DMA); | 2757 | BUG_ON(cachep->allocflags & GFP_DMA); |
2863 | } | 2758 | } |
2864 | } | 2759 | } |
2865 | 2760 | ||
@@ -2918,8 +2813,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | |||
2918 | nr_pages <<= cache->gfporder; | 2813 | nr_pages <<= cache->gfporder; |
2919 | 2814 | ||
2920 | do { | 2815 | do { |
2921 | page_set_cache(page, cache); | 2816 | page->slab_cache = cache; |
2922 | page_set_slab(page, slab); | 2817 | page->slab_page = slab; |
2923 | page++; | 2818 | page++; |
2924 | } while (--nr_pages); | 2819 | } while (--nr_pages); |
2925 | } | 2820 | } |
@@ -3057,7 +2952,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
3057 | kfree_debugcheck(objp); | 2952 | kfree_debugcheck(objp); |
3058 | page = virt_to_head_page(objp); | 2953 | page = virt_to_head_page(objp); |
3059 | 2954 | ||
3060 | slabp = page_get_slab(page); | 2955 | slabp = page->slab_page; |
3061 | 2956 | ||
3062 | if (cachep->flags & SLAB_RED_ZONE) { | 2957 | if (cachep->flags & SLAB_RED_ZONE) { |
3063 | verify_redzone_free(cachep, objp); | 2958 | verify_redzone_free(cachep, objp); |
@@ -3077,10 +2972,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
3077 | #endif | 2972 | #endif |
3078 | if (cachep->flags & SLAB_POISON) { | 2973 | if (cachep->flags & SLAB_POISON) { |
3079 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2974 | #ifdef CONFIG_DEBUG_PAGEALLOC |
3080 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 2975 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
3081 | store_stackinfo(cachep, objp, (unsigned long)caller); | 2976 | store_stackinfo(cachep, objp, (unsigned long)caller); |
3082 | kernel_map_pages(virt_to_page(objp), | 2977 | kernel_map_pages(virt_to_page(objp), |
3083 | cachep->buffer_size / PAGE_SIZE, 0); | 2978 | cachep->size / PAGE_SIZE, 0); |
3084 | } else { | 2979 | } else { |
3085 | poison_obj(cachep, objp, POISON_FREE); | 2980 | poison_obj(cachep, objp, POISON_FREE); |
3086 | } | 2981 | } |
@@ -3230,9 +3125,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3230 | return objp; | 3125 | return objp; |
3231 | if (cachep->flags & SLAB_POISON) { | 3126 | if (cachep->flags & SLAB_POISON) { |
3232 | #ifdef CONFIG_DEBUG_PAGEALLOC | 3127 | #ifdef CONFIG_DEBUG_PAGEALLOC |
3233 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) | 3128 | if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) |
3234 | kernel_map_pages(virt_to_page(objp), | 3129 | kernel_map_pages(virt_to_page(objp), |
3235 | cachep->buffer_size / PAGE_SIZE, 1); | 3130 | cachep->size / PAGE_SIZE, 1); |
3236 | else | 3131 | else |
3237 | check_poison_obj(cachep, objp); | 3132 | check_poison_obj(cachep, objp); |
3238 | #else | 3133 | #else |
@@ -3261,8 +3156,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3261 | struct slab *slabp; | 3156 | struct slab *slabp; |
3262 | unsigned objnr; | 3157 | unsigned objnr; |
3263 | 3158 | ||
3264 | slabp = page_get_slab(virt_to_head_page(objp)); | 3159 | slabp = virt_to_head_page(objp)->slab_page; |
3265 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | 3160 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->size; |
3266 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; | 3161 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; |
3267 | } | 3162 | } |
3268 | #endif | 3163 | #endif |
@@ -3285,7 +3180,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) | |||
3285 | if (cachep == &cache_cache) | 3180 | if (cachep == &cache_cache) |
3286 | return false; | 3181 | return false; |
3287 | 3182 | ||
3288 | return should_failslab(obj_size(cachep), flags, cachep->flags); | 3183 | return should_failslab(cachep->object_size, flags, cachep->flags); |
3289 | } | 3184 | } |
3290 | 3185 | ||
3291 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3186 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
@@ -3336,7 +3231,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3336 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | 3231 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) |
3337 | nid_alloc = cpuset_slab_spread_node(); | 3232 | nid_alloc = cpuset_slab_spread_node(); |
3338 | else if (current->mempolicy) | 3233 | else if (current->mempolicy) |
3339 | nid_alloc = slab_node(current->mempolicy); | 3234 | nid_alloc = slab_node(); |
3340 | if (nid_alloc != nid_here) | 3235 | if (nid_alloc != nid_here) |
3341 | return ____cache_alloc_node(cachep, flags, nid_alloc); | 3236 | return ____cache_alloc_node(cachep, flags, nid_alloc); |
3342 | return NULL; | 3237 | return NULL; |
@@ -3368,7 +3263,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3368 | 3263 | ||
3369 | retry_cpuset: | 3264 | retry_cpuset: |
3370 | cpuset_mems_cookie = get_mems_allowed(); | 3265 | cpuset_mems_cookie = get_mems_allowed(); |
3371 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); | 3266 | zonelist = node_zonelist(slab_node(), flags); |
3372 | 3267 | ||
3373 | retry: | 3268 | retry: |
3374 | /* | 3269 | /* |
@@ -3545,14 +3440,14 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3545 | out: | 3440 | out: |
3546 | local_irq_restore(save_flags); | 3441 | local_irq_restore(save_flags); |
3547 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); | 3442 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); |
3548 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, | 3443 | kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, |
3549 | flags); | 3444 | flags); |
3550 | 3445 | ||
3551 | if (likely(ptr)) | 3446 | if (likely(ptr)) |
3552 | kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); | 3447 | kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); |
3553 | 3448 | ||
3554 | if (unlikely((flags & __GFP_ZERO) && ptr)) | 3449 | if (unlikely((flags & __GFP_ZERO) && ptr)) |
3555 | memset(ptr, 0, obj_size(cachep)); | 3450 | memset(ptr, 0, cachep->object_size); |
3556 | 3451 | ||
3557 | return ptr; | 3452 | return ptr; |
3558 | } | 3453 | } |
@@ -3607,15 +3502,15 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3607 | objp = __do_cache_alloc(cachep, flags); | 3502 | objp = __do_cache_alloc(cachep, flags); |
3608 | local_irq_restore(save_flags); | 3503 | local_irq_restore(save_flags); |
3609 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); | 3504 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); |
3610 | kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, | 3505 | kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, |
3611 | flags); | 3506 | flags); |
3612 | prefetchw(objp); | 3507 | prefetchw(objp); |
3613 | 3508 | ||
3614 | if (likely(objp)) | 3509 | if (likely(objp)) |
3615 | kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); | 3510 | kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); |
3616 | 3511 | ||
3617 | if (unlikely((flags & __GFP_ZERO) && objp)) | 3512 | if (unlikely((flags & __GFP_ZERO) && objp)) |
3618 | memset(objp, 0, obj_size(cachep)); | 3513 | memset(objp, 0, cachep->object_size); |
3619 | 3514 | ||
3620 | return objp; | 3515 | return objp; |
3621 | } | 3516 | } |
@@ -3731,7 +3626,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, | |||
3731 | kmemleak_free_recursive(objp, cachep->flags); | 3626 | kmemleak_free_recursive(objp, cachep->flags); |
3732 | objp = cache_free_debugcheck(cachep, objp, caller); | 3627 | objp = cache_free_debugcheck(cachep, objp, caller); |
3733 | 3628 | ||
3734 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); | 3629 | kmemcheck_slab_free(cachep, objp, cachep->object_size); |
3735 | 3630 | ||
3736 | /* | 3631 | /* |
3737 | * Skip calling cache_free_alien() when the platform is not numa. | 3632 | * Skip calling cache_free_alien() when the platform is not numa. |
@@ -3766,7 +3661,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3766 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3661 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3767 | 3662 | ||
3768 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3663 | trace_kmem_cache_alloc(_RET_IP_, ret, |
3769 | obj_size(cachep), cachep->buffer_size, flags); | 3664 | cachep->object_size, cachep->size, flags); |
3770 | 3665 | ||
3771 | return ret; | 3666 | return ret; |
3772 | } | 3667 | } |
@@ -3794,7 +3689,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3794 | __builtin_return_address(0)); | 3689 | __builtin_return_address(0)); |
3795 | 3690 | ||
3796 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3691 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
3797 | obj_size(cachep), cachep->buffer_size, | 3692 | cachep->object_size, cachep->size, |
3798 | flags, nodeid); | 3693 | flags, nodeid); |
3799 | 3694 | ||
3800 | return ret; | 3695 | return ret; |
@@ -3876,7 +3771,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3876 | ret = __cache_alloc(cachep, flags, caller); | 3771 | ret = __cache_alloc(cachep, flags, caller); |
3877 | 3772 | ||
3878 | trace_kmalloc((unsigned long) caller, ret, | 3773 | trace_kmalloc((unsigned long) caller, ret, |
3879 | size, cachep->buffer_size, flags); | 3774 | size, cachep->size, flags); |
3880 | 3775 | ||
3881 | return ret; | 3776 | return ret; |
3882 | } | 3777 | } |
@@ -3916,9 +3811,9 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3916 | unsigned long flags; | 3811 | unsigned long flags; |
3917 | 3812 | ||
3918 | local_irq_save(flags); | 3813 | local_irq_save(flags); |
3919 | debug_check_no_locks_freed(objp, obj_size(cachep)); | 3814 | debug_check_no_locks_freed(objp, cachep->object_size); |
3920 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | 3815 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) |
3921 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3816 | debug_check_no_obj_freed(objp, cachep->object_size); |
3922 | __cache_free(cachep, objp, __builtin_return_address(0)); | 3817 | __cache_free(cachep, objp, __builtin_return_address(0)); |
3923 | local_irq_restore(flags); | 3818 | local_irq_restore(flags); |
3924 | 3819 | ||
@@ -3947,8 +3842,9 @@ void kfree(const void *objp) | |||
3947 | local_irq_save(flags); | 3842 | local_irq_save(flags); |
3948 | kfree_debugcheck(objp); | 3843 | kfree_debugcheck(objp); |
3949 | c = virt_to_cache(objp); | 3844 | c = virt_to_cache(objp); |
3950 | debug_check_no_locks_freed(objp, obj_size(c)); | 3845 | debug_check_no_locks_freed(objp, c->object_size); |
3951 | debug_check_no_obj_freed(objp, obj_size(c)); | 3846 | |
3847 | debug_check_no_obj_freed(objp, c->object_size); | ||
3952 | __cache_free(c, (void *)objp, __builtin_return_address(0)); | 3848 | __cache_free(c, (void *)objp, __builtin_return_address(0)); |
3953 | local_irq_restore(flags); | 3849 | local_irq_restore(flags); |
3954 | } | 3850 | } |
@@ -3956,7 +3852,7 @@ EXPORT_SYMBOL(kfree); | |||
3956 | 3852 | ||
3957 | unsigned int kmem_cache_size(struct kmem_cache *cachep) | 3853 | unsigned int kmem_cache_size(struct kmem_cache *cachep) |
3958 | { | 3854 | { |
3959 | return obj_size(cachep); | 3855 | return cachep->object_size; |
3960 | } | 3856 | } |
3961 | EXPORT_SYMBOL(kmem_cache_size); | 3857 | EXPORT_SYMBOL(kmem_cache_size); |
3962 | 3858 | ||
@@ -4030,7 +3926,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | |||
4030 | return 0; | 3926 | return 0; |
4031 | 3927 | ||
4032 | fail: | 3928 | fail: |
4033 | if (!cachep->next.next) { | 3929 | if (!cachep->list.next) { |
4034 | /* Cache is not active yet. Roll back what we did */ | 3930 | /* Cache is not active yet. Roll back what we did */ |
4035 | node--; | 3931 | node--; |
4036 | while (node >= 0) { | 3932 | while (node >= 0) { |
@@ -4065,7 +3961,7 @@ static void do_ccupdate_local(void *info) | |||
4065 | new->new[smp_processor_id()] = old; | 3961 | new->new[smp_processor_id()] = old; |
4066 | } | 3962 | } |
4067 | 3963 | ||
4068 | /* Always called with the cache_chain_mutex held */ | 3964 | /* Always called with the slab_mutex held */ |
4069 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3965 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
4070 | int batchcount, int shared, gfp_t gfp) | 3966 | int batchcount, int shared, gfp_t gfp) |
4071 | { | 3967 | { |
@@ -4109,7 +4005,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
4109 | return alloc_kmemlist(cachep, gfp); | 4005 | return alloc_kmemlist(cachep, gfp); |
4110 | } | 4006 | } |
4111 | 4007 | ||
4112 | /* Called with cache_chain_mutex held always */ | 4008 | /* Called with slab_mutex held always */ |
4113 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | 4009 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) |
4114 | { | 4010 | { |
4115 | int err; | 4011 | int err; |
@@ -4124,13 +4020,13 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |||
4124 | * The numbers are guessed, we should auto-tune as described by | 4020 | * The numbers are guessed, we should auto-tune as described by |
4125 | * Bonwick. | 4021 | * Bonwick. |
4126 | */ | 4022 | */ |
4127 | if (cachep->buffer_size > 131072) | 4023 | if (cachep->size > 131072) |
4128 | limit = 1; | 4024 | limit = 1; |
4129 | else if (cachep->buffer_size > PAGE_SIZE) | 4025 | else if (cachep->size > PAGE_SIZE) |
4130 | limit = 8; | 4026 | limit = 8; |
4131 | else if (cachep->buffer_size > 1024) | 4027 | else if (cachep->size > 1024) |
4132 | limit = 24; | 4028 | limit = 24; |
4133 | else if (cachep->buffer_size > 256) | 4029 | else if (cachep->size > 256) |
4134 | limit = 54; | 4030 | limit = 54; |
4135 | else | 4031 | else |
4136 | limit = 120; | 4032 | limit = 120; |
@@ -4145,7 +4041,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) | |||
4145 | * to a larger limit. Thus disabled by default. | 4041 | * to a larger limit. Thus disabled by default. |
4146 | */ | 4042 | */ |
4147 | shared = 0; | 4043 | shared = 0; |
4148 | if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) | 4044 | if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) |
4149 | shared = 8; | 4045 | shared = 8; |
4150 | 4046 | ||
4151 | #if DEBUG | 4047 | #if DEBUG |
@@ -4211,11 +4107,11 @@ static void cache_reap(struct work_struct *w) | |||
4211 | int node = numa_mem_id(); | 4107 | int node = numa_mem_id(); |
4212 | struct delayed_work *work = to_delayed_work(w); | 4108 | struct delayed_work *work = to_delayed_work(w); |
4213 | 4109 | ||
4214 | if (!mutex_trylock(&cache_chain_mutex)) | 4110 | if (!mutex_trylock(&slab_mutex)) |
4215 | /* Give up. Setup the next iteration. */ | 4111 | /* Give up. Setup the next iteration. */ |
4216 | goto out; | 4112 | goto out; |
4217 | 4113 | ||
4218 | list_for_each_entry(searchp, &cache_chain, next) { | 4114 | list_for_each_entry(searchp, &slab_caches, list) { |
4219 | check_irq_on(); | 4115 | check_irq_on(); |
4220 | 4116 | ||
4221 | /* | 4117 | /* |
@@ -4253,7 +4149,7 @@ next: | |||
4253 | cond_resched(); | 4149 | cond_resched(); |
4254 | } | 4150 | } |
4255 | check_irq_on(); | 4151 | check_irq_on(); |
4256 | mutex_unlock(&cache_chain_mutex); | 4152 | mutex_unlock(&slab_mutex); |
4257 | next_reap_node(); | 4153 | next_reap_node(); |
4258 | out: | 4154 | out: |
4259 | /* Set up the next iteration */ | 4155 | /* Set up the next iteration */ |
@@ -4289,26 +4185,26 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
4289 | { | 4185 | { |
4290 | loff_t n = *pos; | 4186 | loff_t n = *pos; |
4291 | 4187 | ||
4292 | mutex_lock(&cache_chain_mutex); | 4188 | mutex_lock(&slab_mutex); |
4293 | if (!n) | 4189 | if (!n) |
4294 | print_slabinfo_header(m); | 4190 | print_slabinfo_header(m); |
4295 | 4191 | ||
4296 | return seq_list_start(&cache_chain, *pos); | 4192 | return seq_list_start(&slab_caches, *pos); |
4297 | } | 4193 | } |
4298 | 4194 | ||
4299 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | 4195 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) |
4300 | { | 4196 | { |
4301 | return seq_list_next(p, &cache_chain, pos); | 4197 | return seq_list_next(p, &slab_caches, pos); |
4302 | } | 4198 | } |
4303 | 4199 | ||
4304 | static void s_stop(struct seq_file *m, void *p) | 4200 | static void s_stop(struct seq_file *m, void *p) |
4305 | { | 4201 | { |
4306 | mutex_unlock(&cache_chain_mutex); | 4202 | mutex_unlock(&slab_mutex); |
4307 | } | 4203 | } |
4308 | 4204 | ||
4309 | static int s_show(struct seq_file *m, void *p) | 4205 | static int s_show(struct seq_file *m, void *p) |
4310 | { | 4206 | { |
4311 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); | 4207 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); |
4312 | struct slab *slabp; | 4208 | struct slab *slabp; |
4313 | unsigned long active_objs; | 4209 | unsigned long active_objs; |
4314 | unsigned long num_objs; | 4210 | unsigned long num_objs; |
@@ -4364,7 +4260,7 @@ static int s_show(struct seq_file *m, void *p) | |||
4364 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | 4260 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); |
4365 | 4261 | ||
4366 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | 4262 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", |
4367 | name, active_objs, num_objs, cachep->buffer_size, | 4263 | name, active_objs, num_objs, cachep->size, |
4368 | cachep->num, (1 << cachep->gfporder)); | 4264 | cachep->num, (1 << cachep->gfporder)); |
4369 | seq_printf(m, " : tunables %4u %4u %4u", | 4265 | seq_printf(m, " : tunables %4u %4u %4u", |
4370 | cachep->limit, cachep->batchcount, cachep->shared); | 4266 | cachep->limit, cachep->batchcount, cachep->shared); |
@@ -4454,9 +4350,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
4454 | return -EINVAL; | 4350 | return -EINVAL; |
4455 | 4351 | ||
4456 | /* Find the cache in the chain of caches. */ | 4352 | /* Find the cache in the chain of caches. */ |
4457 | mutex_lock(&cache_chain_mutex); | 4353 | mutex_lock(&slab_mutex); |
4458 | res = -EINVAL; | 4354 | res = -EINVAL; |
4459 | list_for_each_entry(cachep, &cache_chain, next) { | 4355 | list_for_each_entry(cachep, &slab_caches, list) { |
4460 | if (!strcmp(cachep->name, kbuf)) { | 4356 | if (!strcmp(cachep->name, kbuf)) { |
4461 | if (limit < 1 || batchcount < 1 || | 4357 | if (limit < 1 || batchcount < 1 || |
4462 | batchcount > limit || shared < 0) { | 4358 | batchcount > limit || shared < 0) { |
@@ -4469,7 +4365,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
4469 | break; | 4365 | break; |
4470 | } | 4366 | } |
4471 | } | 4367 | } |
4472 | mutex_unlock(&cache_chain_mutex); | 4368 | mutex_unlock(&slab_mutex); |
4473 | if (res >= 0) | 4369 | if (res >= 0) |
4474 | res = count; | 4370 | res = count; |
4475 | return res; | 4371 | return res; |
@@ -4492,8 +4388,8 @@ static const struct file_operations proc_slabinfo_operations = { | |||
4492 | 4388 | ||
4493 | static void *leaks_start(struct seq_file *m, loff_t *pos) | 4389 | static void *leaks_start(struct seq_file *m, loff_t *pos) |
4494 | { | 4390 | { |
4495 | mutex_lock(&cache_chain_mutex); | 4391 | mutex_lock(&slab_mutex); |
4496 | return seq_list_start(&cache_chain, *pos); | 4392 | return seq_list_start(&slab_caches, *pos); |
4497 | } | 4393 | } |
4498 | 4394 | ||
4499 | static inline int add_caller(unsigned long *n, unsigned long v) | 4395 | static inline int add_caller(unsigned long *n, unsigned long v) |
@@ -4532,7 +4428,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | |||
4532 | int i; | 4428 | int i; |
4533 | if (n[0] == n[1]) | 4429 | if (n[0] == n[1]) |
4534 | return; | 4430 | return; |
4535 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { | 4431 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { |
4536 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) | 4432 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) |
4537 | continue; | 4433 | continue; |
4538 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) | 4434 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) |
@@ -4558,7 +4454,7 @@ static void show_symbol(struct seq_file *m, unsigned long address) | |||
4558 | 4454 | ||
4559 | static int leaks_show(struct seq_file *m, void *p) | 4455 | static int leaks_show(struct seq_file *m, void *p) |
4560 | { | 4456 | { |
4561 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); | 4457 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); |
4562 | struct slab *slabp; | 4458 | struct slab *slabp; |
4563 | struct kmem_list3 *l3; | 4459 | struct kmem_list3 *l3; |
4564 | const char *name; | 4460 | const char *name; |
@@ -4592,17 +4488,17 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4592 | name = cachep->name; | 4488 | name = cachep->name; |
4593 | if (n[0] == n[1]) { | 4489 | if (n[0] == n[1]) { |
4594 | /* Increase the buffer size */ | 4490 | /* Increase the buffer size */ |
4595 | mutex_unlock(&cache_chain_mutex); | 4491 | mutex_unlock(&slab_mutex); |
4596 | m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); | 4492 | m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); |
4597 | if (!m->private) { | 4493 | if (!m->private) { |
4598 | /* Too bad, we are really out */ | 4494 | /* Too bad, we are really out */ |
4599 | m->private = n; | 4495 | m->private = n; |
4600 | mutex_lock(&cache_chain_mutex); | 4496 | mutex_lock(&slab_mutex); |
4601 | return -ENOMEM; | 4497 | return -ENOMEM; |
4602 | } | 4498 | } |
4603 | *(unsigned long *)m->private = n[0] * 2; | 4499 | *(unsigned long *)m->private = n[0] * 2; |
4604 | kfree(n); | 4500 | kfree(n); |
4605 | mutex_lock(&cache_chain_mutex); | 4501 | mutex_lock(&slab_mutex); |
4606 | /* Now make sure this entry will be retried */ | 4502 | /* Now make sure this entry will be retried */ |
4607 | m->count = m->size; | 4503 | m->count = m->size; |
4608 | return 0; | 4504 | return 0; |
@@ -4677,6 +4573,6 @@ size_t ksize(const void *objp) | |||
4677 | if (unlikely(objp == ZERO_SIZE_PTR)) | 4573 | if (unlikely(objp == ZERO_SIZE_PTR)) |
4678 | return 0; | 4574 | return 0; |
4679 | 4575 | ||
4680 | return obj_size(virt_to_cache(objp)); | 4576 | return virt_to_cache(objp)->object_size; |
4681 | } | 4577 | } |
4682 | EXPORT_SYMBOL(ksize); | 4578 | EXPORT_SYMBOL(ksize); |