diff options
author | Christoph Lameter <cl@linux.com> | 2012-11-28 11:23:09 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-12-11 05:14:27 -0500 |
commit | 2f9baa9fcf8d0a204ca129a671d6086cc100faab (patch) | |
tree | 2f5623db840042f0deffac46c15affa9825bc0b1 /mm/slab.c | |
parent | dffb4d605c23110e3ad54b8c9f244a8235c013c2 (diff) |
slab: Use the new create_boot_cache function to simplify bootstrap
Simplify setup and reduce code in kmem_cache_init(). This allows us to
get rid of initarray_cache as well as the manual setup code for
the kmem_cache and kmem_cache_node arrays during bootstrap.
We introduce a new bootstrap state "PARTIAL" for slab that signals the
creation of a kmem_cache boot cache.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 49 |
1 files changed, 16 insertions, 33 deletions
@@ -547,8 +547,6 @@ static struct cache_names __initdata cache_names[] = { | |||
547 | #undef CACHE | 547 | #undef CACHE |
548 | }; | 548 | }; |
549 | 549 | ||
550 | static struct arraycache_init initarray_cache __initdata = | ||
551 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | ||
552 | static struct arraycache_init initarray_generic = | 550 | static struct arraycache_init initarray_generic = |
553 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 551 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
554 | 552 | ||
@@ -1572,12 +1570,9 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep) | |||
1572 | */ | 1570 | */ |
1573 | void __init kmem_cache_init(void) | 1571 | void __init kmem_cache_init(void) |
1574 | { | 1572 | { |
1575 | size_t left_over; | ||
1576 | struct cache_sizes *sizes; | 1573 | struct cache_sizes *sizes; |
1577 | struct cache_names *names; | 1574 | struct cache_names *names; |
1578 | int i; | 1575 | int i; |
1579 | int order; | ||
1580 | int node; | ||
1581 | 1576 | ||
1582 | kmem_cache = &kmem_cache_boot; | 1577 | kmem_cache = &kmem_cache_boot; |
1583 | setup_nodelists_pointer(kmem_cache); | 1578 | setup_nodelists_pointer(kmem_cache); |
@@ -1618,36 +1613,16 @@ void __init kmem_cache_init(void) | |||
1618 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 1613 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. |
1619 | */ | 1614 | */ |
1620 | 1615 | ||
1621 | node = numa_mem_id(); | ||
1622 | |||
1623 | /* 1) create the kmem_cache */ | 1616 | /* 1) create the kmem_cache */ |
1624 | INIT_LIST_HEAD(&slab_caches); | ||
1625 | list_add(&kmem_cache->list, &slab_caches); | ||
1626 | kmem_cache->colour_off = cache_line_size(); | ||
1627 | kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; | ||
1628 | 1617 | ||
1629 | /* | 1618 | /* |
1630 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | 1619 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids |
1631 | */ | 1620 | */ |
1632 | kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1621 | create_boot_cache(kmem_cache, "kmem_cache", |
1633 | nr_node_ids * sizeof(struct kmem_list3 *); | 1622 | offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
1634 | kmem_cache->object_size = kmem_cache->size; | 1623 | nr_node_ids * sizeof(struct kmem_list3 *), |
1635 | kmem_cache->size = ALIGN(kmem_cache->object_size, | 1624 | SLAB_HWCACHE_ALIGN); |
1636 | cache_line_size()); | 1625 | list_add(&kmem_cache->list, &slab_caches); |
1637 | kmem_cache->reciprocal_buffer_size = | ||
1638 | reciprocal_value(kmem_cache->size); | ||
1639 | |||
1640 | for (order = 0; order < MAX_ORDER; order++) { | ||
1641 | cache_estimate(order, kmem_cache->size, | ||
1642 | cache_line_size(), 0, &left_over, &kmem_cache->num); | ||
1643 | if (kmem_cache->num) | ||
1644 | break; | ||
1645 | } | ||
1646 | BUG_ON(!kmem_cache->num); | ||
1647 | kmem_cache->gfporder = order; | ||
1648 | kmem_cache->colour = left_over / kmem_cache->colour_off; | ||
1649 | kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + | ||
1650 | sizeof(struct slab), cache_line_size()); | ||
1651 | 1626 | ||
1652 | /* 2+3) create the kmalloc caches */ | 1627 | /* 2+3) create the kmalloc caches */ |
1653 | sizes = malloc_sizes; | 1628 | sizes = malloc_sizes; |
@@ -1695,7 +1670,6 @@ void __init kmem_cache_init(void) | |||
1695 | 1670 | ||
1696 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1671 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1697 | 1672 | ||
1698 | BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); | ||
1699 | memcpy(ptr, cpu_cache_get(kmem_cache), | 1673 | memcpy(ptr, cpu_cache_get(kmem_cache), |
1700 | sizeof(struct arraycache_init)); | 1674 | sizeof(struct arraycache_init)); |
1701 | /* | 1675 | /* |
@@ -2250,7 +2224,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2250 | 2224 | ||
2251 | if (slab_state == DOWN) { | 2225 | if (slab_state == DOWN) { |
2252 | /* | 2226 | /* |
2253 | * Note: the first kmem_cache_create must create the cache | 2227 | * Note: Creation of first cache (kmem_cache). |
2228 | * The setup_list3s is taken care | ||
2229 | * of by the caller of __kmem_cache_create | ||
2230 | */ | ||
2231 | cachep->array[smp_processor_id()] = &initarray_generic.cache; | ||
2232 | slab_state = PARTIAL; | ||
2233 | } else if (slab_state == PARTIAL) { | ||
2234 | /* | ||
2235 | * Note: the second kmem_cache_create must create the cache | ||
2254 | * that's used by kmalloc(24), otherwise the creation of | 2236 | * that's used by kmalloc(24), otherwise the creation of |
2255 | * further caches will BUG(). | 2237 | * further caches will BUG(). |
2256 | */ | 2238 | */ |
@@ -2258,7 +2240,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2258 | 2240 | ||
2259 | /* | 2241 | /* |
2260 | * If the cache that's used by kmalloc(sizeof(kmem_list3)) is | 2242 | * If the cache that's used by kmalloc(sizeof(kmem_list3)) is |
2261 | * the first cache, then we need to set up all its list3s, | 2243 | * the second cache, then we need to set up all its list3s, |
2262 | * otherwise the creation of further caches will BUG(). | 2244 | * otherwise the creation of further caches will BUG(). |
2263 | */ | 2245 | */ |
2264 | set_up_list3s(cachep, SIZE_AC); | 2246 | set_up_list3s(cachep, SIZE_AC); |
@@ -2267,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2267 | else | 2249 | else |
2268 | slab_state = PARTIAL_ARRAYCACHE; | 2250 | slab_state = PARTIAL_ARRAYCACHE; |
2269 | } else { | 2251 | } else { |
2252 | /* Remaining boot caches */ | ||
2270 | cachep->array[smp_processor_id()] = | 2253 | cachep->array[smp_processor_id()] = |
2271 | kmalloc(sizeof(struct arraycache_init), gfp); | 2254 | kmalloc(sizeof(struct arraycache_init), gfp); |
2272 | 2255 | ||