diff options
author | Christoph Lameter <cl@linux.com> | 2012-09-04 20:20:33 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-09-05 05:00:36 -0400 |
commit | 9b030cb865f137e1574596983face2a07e41e8b2 (patch) | |
tree | 51caca89688beefd5ba910069d5c5754140906ae /mm/slab.c | |
parent | 945cf2b6199be70ff03102b9e642c3bb05d01de9 (diff) |
mm/sl[aou]b: Use "kmem_cache" name for slab cache with kmem_cache struct
Make all allocators use the "kmem_cache" slabname for the "kmem_cache"
structure.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 72 |
1 files changed, 37 insertions, 35 deletions
@@ -578,9 +578,9 @@ static struct arraycache_init initarray_generic = | |||
578 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 578 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
579 | 579 | ||
580 | /* internal cache of cache description objs */ | 580 | /* internal cache of cache description objs */ |
581 | static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; | 581 | static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; |
582 | static struct kmem_cache cache_cache = { | 582 | static struct kmem_cache kmem_cache_boot = { |
583 | .nodelists = cache_cache_nodelists, | 583 | .nodelists = kmem_cache_nodelists, |
584 | .batchcount = 1, | 584 | .batchcount = 1, |
585 | .limit = BOOT_CPUCACHE_ENTRIES, | 585 | .limit = BOOT_CPUCACHE_ENTRIES, |
586 | .shared = 1, | 586 | .shared = 1, |
@@ -1594,15 +1594,17 @@ void __init kmem_cache_init(void) | |||
1594 | int order; | 1594 | int order; |
1595 | int node; | 1595 | int node; |
1596 | 1596 | ||
1597 | kmem_cache = &kmem_cache_boot; | ||
1598 | |||
1597 | if (num_possible_nodes() == 1) | 1599 | if (num_possible_nodes() == 1) |
1598 | use_alien_caches = 0; | 1600 | use_alien_caches = 0; |
1599 | 1601 | ||
1600 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 1602 | for (i = 0; i < NUM_INIT_LISTS; i++) { |
1601 | kmem_list3_init(&initkmem_list3[i]); | 1603 | kmem_list3_init(&initkmem_list3[i]); |
1602 | if (i < MAX_NUMNODES) | 1604 | if (i < MAX_NUMNODES) |
1603 | cache_cache.nodelists[i] = NULL; | 1605 | kmem_cache->nodelists[i] = NULL; |
1604 | } | 1606 | } |
1605 | set_up_list3s(&cache_cache, CACHE_CACHE); | 1607 | set_up_list3s(kmem_cache, CACHE_CACHE); |
1606 | 1608 | ||
1607 | /* | 1609 | /* |
1608 | * Fragmentation resistance on low memory - only use bigger | 1610 | * Fragmentation resistance on low memory - only use bigger |
@@ -1614,9 +1616,9 @@ void __init kmem_cache_init(void) | |||
1614 | 1616 | ||
1615 | /* Bootstrap is tricky, because several objects are allocated | 1617 | /* Bootstrap is tricky, because several objects are allocated |
1616 | * from caches that do not exist yet: | 1618 | * from caches that do not exist yet: |
1617 | * 1) initialize the cache_cache cache: it contains the struct | 1619 | * 1) initialize the kmem_cache cache: it contains the struct |
1618 | * kmem_cache structures of all caches, except cache_cache itself: | 1620 | * kmem_cache structures of all caches, except kmem_cache itself: |
1619 | * cache_cache is statically allocated. | 1621 | * kmem_cache is statically allocated. |
1620 | * Initially an __init data area is used for the head array and the | 1622 | * Initially an __init data area is used for the head array and the |
1621 | * kmem_list3 structures, it's replaced with a kmalloc allocated | 1623 | * kmem_list3 structures, it's replaced with a kmalloc allocated |
1622 | * array at the end of the bootstrap. | 1624 | * array at the end of the bootstrap. |
@@ -1625,43 +1627,43 @@ void __init kmem_cache_init(void) | |||
1625 | * An __init data area is used for the head array. | 1627 | * An __init data area is used for the head array. |
1626 | * 3) Create the remaining kmalloc caches, with minimally sized | 1628 | * 3) Create the remaining kmalloc caches, with minimally sized |
1627 | * head arrays. | 1629 | * head arrays. |
1628 | * 4) Replace the __init data head arrays for cache_cache and the first | 1630 | * 4) Replace the __init data head arrays for kmem_cache and the first |
1629 | * kmalloc cache with kmalloc allocated arrays. | 1631 | * kmalloc cache with kmalloc allocated arrays. |
1630 | * 5) Replace the __init data for kmem_list3 for cache_cache and | 1632 | * 5) Replace the __init data for kmem_list3 for kmem_cache and |
1631 | * the other cache's with kmalloc allocated memory. | 1633 | * the other cache's with kmalloc allocated memory. |
1632 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 1634 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. |
1633 | */ | 1635 | */ |
1634 | 1636 | ||
1635 | node = numa_mem_id(); | 1637 | node = numa_mem_id(); |
1636 | 1638 | ||
1637 | /* 1) create the cache_cache */ | 1639 | /* 1) create the kmem_cache */ |
1638 | INIT_LIST_HEAD(&slab_caches); | 1640 | INIT_LIST_HEAD(&slab_caches); |
1639 | list_add(&cache_cache.list, &slab_caches); | 1641 | list_add(&kmem_cache->list, &slab_caches); |
1640 | cache_cache.colour_off = cache_line_size(); | 1642 | kmem_cache->colour_off = cache_line_size(); |
1641 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1643 | kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; |
1642 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; | 1644 | kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
1643 | 1645 | ||
1644 | /* | 1646 | /* |
1645 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | 1647 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids |
1646 | */ | 1648 | */ |
1647 | cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1649 | kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
1648 | nr_node_ids * sizeof(struct kmem_list3 *); | 1650 | nr_node_ids * sizeof(struct kmem_list3 *); |
1649 | cache_cache.object_size = cache_cache.size; | 1651 | kmem_cache->object_size = kmem_cache->size; |
1650 | cache_cache.size = ALIGN(cache_cache.size, | 1652 | kmem_cache->size = ALIGN(kmem_cache->object_size, |
1651 | cache_line_size()); | 1653 | cache_line_size()); |
1652 | cache_cache.reciprocal_buffer_size = | 1654 | kmem_cache->reciprocal_buffer_size = |
1653 | reciprocal_value(cache_cache.size); | 1655 | reciprocal_value(kmem_cache->size); |
1654 | 1656 | ||
1655 | for (order = 0; order < MAX_ORDER; order++) { | 1657 | for (order = 0; order < MAX_ORDER; order++) { |
1656 | cache_estimate(order, cache_cache.size, | 1658 | cache_estimate(order, kmem_cache->size, |
1657 | cache_line_size(), 0, &left_over, &cache_cache.num); | 1659 | cache_line_size(), 0, &left_over, &kmem_cache->num); |
1658 | if (cache_cache.num) | 1660 | if (kmem_cache->num) |
1659 | break; | 1661 | break; |
1660 | } | 1662 | } |
1661 | BUG_ON(!cache_cache.num); | 1663 | BUG_ON(!kmem_cache->num); |
1662 | cache_cache.gfporder = order; | 1664 | kmem_cache->gfporder = order; |
1663 | cache_cache.colour = left_over / cache_cache.colour_off; | 1665 | kmem_cache->colour = left_over / kmem_cache->colour_off; |
1664 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 1666 | kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + |
1665 | sizeof(struct slab), cache_line_size()); | 1667 | sizeof(struct slab), cache_line_size()); |
1666 | 1668 | ||
1667 | /* 2+3) create the kmalloc caches */ | 1669 | /* 2+3) create the kmalloc caches */ |
@@ -1728,15 +1730,15 @@ void __init kmem_cache_init(void) | |||
1728 | 1730 | ||
1729 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1731 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1730 | 1732 | ||
1731 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1733 | BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); |
1732 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1734 | memcpy(ptr, cpu_cache_get(kmem_cache), |
1733 | sizeof(struct arraycache_init)); | 1735 | sizeof(struct arraycache_init)); |
1734 | /* | 1736 | /* |
1735 | * Do not assume that spinlocks can be initialized via memcpy: | 1737 | * Do not assume that spinlocks can be initialized via memcpy: |
1736 | */ | 1738 | */ |
1737 | spin_lock_init(&ptr->lock); | 1739 | spin_lock_init(&ptr->lock); |
1738 | 1740 | ||
1739 | cache_cache.array[smp_processor_id()] = ptr; | 1741 | kmem_cache->array[smp_processor_id()] = ptr; |
1740 | 1742 | ||
1741 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1743 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1742 | 1744 | ||
@@ -1757,7 +1759,7 @@ void __init kmem_cache_init(void) | |||
1757 | int nid; | 1759 | int nid; |
1758 | 1760 | ||
1759 | for_each_online_node(nid) { | 1761 | for_each_online_node(nid) { |
1760 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1762 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
1761 | 1763 | ||
1762 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1764 | init_list(malloc_sizes[INDEX_AC].cs_cachep, |
1763 | &initkmem_list3[SIZE_AC + nid], nid); | 1765 | &initkmem_list3[SIZE_AC + nid], nid); |
@@ -2223,7 +2225,7 @@ void __kmem_cache_destroy(struct kmem_cache *cachep) | |||
2223 | kfree(l3); | 2225 | kfree(l3); |
2224 | } | 2226 | } |
2225 | } | 2227 | } |
2226 | kmem_cache_free(&cache_cache, cachep); | 2228 | kmem_cache_free(kmem_cache, cachep); |
2227 | } | 2229 | } |
2228 | 2230 | ||
2229 | 2231 | ||
@@ -2473,7 +2475,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2473 | gfp = GFP_NOWAIT; | 2475 | gfp = GFP_NOWAIT; |
2474 | 2476 | ||
2475 | /* Get cache's description obj. */ | 2477 | /* Get cache's description obj. */ |
2476 | cachep = kmem_cache_zalloc(&cache_cache, gfp); | 2478 | cachep = kmem_cache_zalloc(kmem_cache, gfp); |
2477 | if (!cachep) | 2479 | if (!cachep) |
2478 | return NULL; | 2480 | return NULL; |
2479 | 2481 | ||
@@ -2531,7 +2533,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2531 | if (!cachep->num) { | 2533 | if (!cachep->num) { |
2532 | printk(KERN_ERR | 2534 | printk(KERN_ERR |
2533 | "kmem_cache_create: couldn't create cache %s.\n", name); | 2535 | "kmem_cache_create: couldn't create cache %s.\n", name); |
2534 | kmem_cache_free(&cache_cache, cachep); | 2536 | kmem_cache_free(kmem_cache, cachep); |
2535 | return NULL; | 2537 | return NULL; |
2536 | } | 2538 | } |
2537 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 2539 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
@@ -3299,7 +3301,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3299 | 3301 | ||
3300 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) | 3302 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) |
3301 | { | 3303 | { |
3302 | if (cachep == &cache_cache) | 3304 | if (cachep == kmem_cache) |
3303 | return false; | 3305 | return false; |
3304 | 3306 | ||
3305 | return should_failslab(cachep->object_size, flags, cachep->flags); | 3307 | return should_failslab(cachep->object_size, flags, cachep->flags); |