aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2006-03-22 03:08:11 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:58 -0500
commitf30cf7d13eee420f5249b4d7709b46570098ab92 (patch)
tree825ad85c9c517efb0be41bf114ceb73a15f5c914 /mm/slab.c
parent8fea4e96a8f29ccc34c244f54574680ce9b43631 (diff)
[PATCH] slab: extract setup_cpu_cache
Extract setup_cpu_cache() function from kmem_cache_create() to make the latter a little less complex. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c109
1 files changed, 55 insertions, 54 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 3d18b711ab82..4d5c4b93e0eb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1748,6 +1748,60 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep,
1748 return left_over; 1748 return left_over;
1749} 1749}
1750 1750
1751static void setup_cpu_cache(struct kmem_cache *cachep)
1752{
1753 if (g_cpucache_up == FULL) {
1754 enable_cpucache(cachep);
1755 return;
1756 }
1757 if (g_cpucache_up == NONE) {
1758 /*
1759 * Note: the first kmem_cache_create must create the cache
1760 * that's used by kmalloc(24), otherwise the creation of
1761 * further caches will BUG().
1762 */
1763 cachep->array[smp_processor_id()] = &initarray_generic.cache;
1764
1765 /*
1766 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
1767 * the first cache, then we need to set up all its list3s,
1768 * otherwise the creation of further caches will BUG().
1769 */
1770 set_up_list3s(cachep, SIZE_AC);
1771 if (INDEX_AC == INDEX_L3)
1772 g_cpucache_up = PARTIAL_L3;
1773 else
1774 g_cpucache_up = PARTIAL_AC;
1775 } else {
1776 cachep->array[smp_processor_id()] =
1777 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1778
1779 if (g_cpucache_up == PARTIAL_AC) {
1780 set_up_list3s(cachep, SIZE_L3);
1781 g_cpucache_up = PARTIAL_L3;
1782 } else {
1783 int node;
1784 for_each_online_node(node) {
1785 cachep->nodelists[node] =
1786 kmalloc_node(sizeof(struct kmem_list3),
1787 GFP_KERNEL, node);
1788 BUG_ON(!cachep->nodelists[node]);
1789 kmem_list3_init(cachep->nodelists[node]);
1790 }
1791 }
1792 }
1793 cachep->nodelists[numa_node_id()]->next_reap =
1794 jiffies + REAPTIMEOUT_LIST3 +
1795 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1796
1797 cpu_cache_get(cachep)->avail = 0;
1798 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1799 cpu_cache_get(cachep)->batchcount = 1;
1800 cpu_cache_get(cachep)->touched = 0;
1801 cachep->batchcount = 1;
1802 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1803}
1804
1751/** 1805/**
1752 * kmem_cache_create - Create a cache. 1806 * kmem_cache_create - Create a cache.
1753 * @name: A string which is used in /proc/slabinfo to identify this cache. 1807 * @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -2000,60 +2054,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2000 cachep->name = name; 2054 cachep->name = name;
2001 2055
2002 2056
2003 if (g_cpucache_up == FULL) { 2057 setup_cpu_cache(cachep);
2004 enable_cpucache(cachep);
2005 } else {
2006 if (g_cpucache_up == NONE) {
2007 /* Note: the first kmem_cache_create must create
2008 * the cache that's used by kmalloc(24), otherwise
2009 * the creation of further caches will BUG().
2010 */
2011 cachep->array[smp_processor_id()] =
2012 &initarray_generic.cache;
2013
2014 /* If the cache that's used by
2015 * kmalloc(sizeof(kmem_list3)) is the first cache,
2016 * then we need to set up all its list3s, otherwise
2017 * the creation of further caches will BUG().
2018 */
2019 set_up_list3s(cachep, SIZE_AC);
2020 if (INDEX_AC == INDEX_L3)
2021 g_cpucache_up = PARTIAL_L3;
2022 else
2023 g_cpucache_up = PARTIAL_AC;
2024 } else {
2025 cachep->array[smp_processor_id()] =
2026 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2027
2028 if (g_cpucache_up == PARTIAL_AC) {
2029 set_up_list3s(cachep, SIZE_L3);
2030 g_cpucache_up = PARTIAL_L3;
2031 } else {
2032 int node;
2033 for_each_online_node(node) {
2034
2035 cachep->nodelists[node] =
2036 kmalloc_node(sizeof
2037 (struct kmem_list3),
2038 GFP_KERNEL, node);
2039 BUG_ON(!cachep->nodelists[node]);
2040 kmem_list3_init(cachep->
2041 nodelists[node]);
2042 }
2043 }
2044 }
2045 cachep->nodelists[numa_node_id()]->next_reap =
2046 jiffies + REAPTIMEOUT_LIST3 +
2047 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2048
2049 BUG_ON(!cpu_cache_get(cachep));
2050 cpu_cache_get(cachep)->avail = 0;
2051 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2052 cpu_cache_get(cachep)->batchcount = 1;
2053 cpu_cache_get(cachep)->touched = 0;
2054 cachep->batchcount = 1;
2055 cachep->limit = BOOT_CPUCACHE_ENTRIES;
2056 }
2057 2058
2058 /* cache setup completed, link it into the list */ 2059 /* cache setup completed, link it into the list */
2059 list_add(&cachep->next, &cache_chain); 2060 list_add(&cachep->next, &cache_chain);