aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 17:54:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit158e319bba59e890c3920ce6d827c188287bae84 (patch)
tree3380225c70f1621a86fd469b23687f4758d8559a /mm/slab.c
parent832a15d209cd260180407bde1af18965b21623f3 (diff)
mm/slab: clean up cache type determination
Current cache type determination code is open-code and looks not understandable. Following patch will introduce one more cache type and it would make code more complex. So, before it happens, this patch abstracts these codes. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c105
1 files changed, 71 insertions, 34 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d5dffc806f82..9b56685fb79b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2023,6 +2023,64 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
2023 return cachep; 2023 return cachep;
2024} 2024}
2025 2025
2026static bool set_off_slab_cache(struct kmem_cache *cachep,
2027 size_t size, unsigned long flags)
2028{
2029 size_t left;
2030
2031 cachep->num = 0;
2032
2033 /*
2034 * Determine if the slab management is 'on' or 'off' slab.
2035 * (bootstrapping cannot cope with offslab caches so don't do
2036 * it too early on. Always use on-slab management when
2037 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2038 */
2039 if (size < OFF_SLAB_MIN_SIZE)
2040 return false;
2041
2042 if (slab_early_init)
2043 return false;
2044
2045 if (flags & SLAB_NOLEAKTRACE)
2046 return false;
2047
2048 /*
2049 * Size is large, assume best to place the slab management obj
2050 * off-slab (should allow better packing of objs).
2051 */
2052 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
2053 if (!cachep->num)
2054 return false;
2055
2056 /*
2057 * If the slab has been placed off-slab, and we have enough space then
2058 * move it on-slab. This is at the expense of any extra colouring.
2059 */
2060 if (left >= cachep->num * sizeof(freelist_idx_t))
2061 return false;
2062
2063 cachep->colour = left / cachep->colour_off;
2064
2065 return true;
2066}
2067
2068static bool set_on_slab_cache(struct kmem_cache *cachep,
2069 size_t size, unsigned long flags)
2070{
2071 size_t left;
2072
2073 cachep->num = 0;
2074
2075 left = calculate_slab_order(cachep, size, flags);
2076 if (!cachep->num)
2077 return false;
2078
2079 cachep->colour = left / cachep->colour_off;
2080
2081 return true;
2082}
2083
2026/** 2084/**
2027 * __kmem_cache_create - Create a cache. 2085 * __kmem_cache_create - Create a cache.
2028 * @cachep: cache management descriptor 2086 * @cachep: cache management descriptor
@@ -2047,7 +2105,6 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
2047int 2105int
2048__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2106__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2049{ 2107{
2050 size_t left_over, freelist_size;
2051 size_t ralign = BYTES_PER_WORD; 2108 size_t ralign = BYTES_PER_WORD;
2052 gfp_t gfp; 2109 gfp_t gfp;
2053 int err; 2110 int err;
@@ -2098,6 +2155,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2098 * 4) Store it. 2155 * 4) Store it.
2099 */ 2156 */
2100 cachep->align = ralign; 2157 cachep->align = ralign;
2158 cachep->colour_off = cache_line_size();
2159 /* Offset must be a multiple of the alignment. */
2160 if (cachep->colour_off < cachep->align)
2161 cachep->colour_off = cachep->align;
2101 2162
2102 if (slab_is_available()) 2163 if (slab_is_available())
2103 gfp = GFP_KERNEL; 2164 gfp = GFP_KERNEL;
@@ -2152,43 +2213,18 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2152 } 2213 }
2153#endif 2214#endif
2154 2215
2155 /* 2216 if (set_off_slab_cache(cachep, size, flags)) {
2156 * Determine if the slab management is 'on' or 'off' slab.
2157 * (bootstrapping cannot cope with offslab caches so don't do
2158 * it too early on. Always use on-slab management when
2159 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2160 */
2161 if (size >= OFF_SLAB_MIN_SIZE && !slab_early_init &&
2162 !(flags & SLAB_NOLEAKTRACE)) {
2163 /*
2164 * Size is large, assume best to place the slab management obj
2165 * off-slab (should allow better packing of objs).
2166 */
2167 flags |= CFLGS_OFF_SLAB; 2217 flags |= CFLGS_OFF_SLAB;
2218 goto done;
2168 } 2219 }
2169 2220
2170 left_over = calculate_slab_order(cachep, size, flags); 2221 if (set_on_slab_cache(cachep, size, flags))
2171 2222 goto done;
2172 if (!cachep->num)
2173 return -E2BIG;
2174
2175 freelist_size = cachep->num * sizeof(freelist_idx_t);
2176 2223
2177 /* 2224 return -E2BIG;
2178 * If the slab has been placed off-slab, and we have enough space then
2179 * move it on-slab. This is at the expense of any extra colouring.
2180 */
2181 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
2182 flags &= ~CFLGS_OFF_SLAB;
2183 left_over -= freelist_size;
2184 }
2185 2225
2186 cachep->colour_off = cache_line_size(); 2226done:
2187 /* Offset must be a multiple of the alignment. */ 2227 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2188 if (cachep->colour_off < cachep->align)
2189 cachep->colour_off = cachep->align;
2190 cachep->colour = left_over / cachep->colour_off;
2191 cachep->freelist_size = freelist_size;
2192 cachep->flags = flags; 2228 cachep->flags = flags;
2193 cachep->allocflags = __GFP_COMP; 2229 cachep->allocflags = __GFP_COMP;
2194 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2230 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
@@ -2209,7 +2245,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2209#endif 2245#endif
2210 2246
2211 if (OFF_SLAB(cachep)) { 2247 if (OFF_SLAB(cachep)) {
2212 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); 2248 cachep->freelist_cache =
2249 kmalloc_slab(cachep->freelist_size, 0u);
2213 /* 2250 /*
2214 * This is a possibility for one of the kmalloc_{dma,}_caches. 2251 * This is a possibility for one of the kmalloc_{dma,}_caches.
2215 * But since we go off slab only for object size greater than 2252 * But since we go off slab only for object size greater than