aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAaro Koskinen <aaro.koskinen@nokia.com>2009-08-28 07:28:54 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2009-08-30 07:56:48 -0400
commitacdfcd04d9df7d084ff752f82afad6ed4ad5f363 (patch)
treeedee979b012067348978d7ea23a58acd4b532a7d
parentcf5d11317e8f2671d3115622aec76274a40f4fc2 (diff)
SLUB: fix ARCH_KMALLOC_MINALIGN cases 64 and 256
If the minalign is 64 bytes, then the 96 byte cache should not be created because it would conflict with the 128 byte cache. If the minalign is 256 bytes, patching the size_index table should not result in a buffer overrun. The calculation "(i - 1) / 8" used to access size_index[] is moved to a separate function as suggested by Christoph Lameter. Acked-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--include/linux/slub_def.h6
-rw-r--r--mm/slub.c30
2 files changed, 26 insertions, 10 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 4dcbc2c71491..aa5d4a69d461 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -152,12 +152,10 @@ static __always_inline int kmalloc_index(size_t size)
152 if (size <= KMALLOC_MIN_SIZE) 152 if (size <= KMALLOC_MIN_SIZE)
153 return KMALLOC_SHIFT_LOW; 153 return KMALLOC_SHIFT_LOW;
154 154
155#if KMALLOC_MIN_SIZE <= 64 155 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
156 if (size > 64 && size <= 96)
157 return 1; 156 return 1;
158 if (size > 128 && size <= 192) 157 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
159 return 2; 158 return 2;
160#endif
161 if (size <= 8) return 3; 159 if (size <= 8) return 3;
162 if (size <= 16) return 4; 160 if (size <= 16) return 4;
163 if (size <= 32) return 5; 161 if (size <= 32) return 5;
diff --git a/mm/slub.c b/mm/slub.c
index e16c9fb1f48b..be493bd63c31 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2825,6 +2825,11 @@ static s8 size_index[24] = {
2825 2 /* 192 */ 2825 2 /* 192 */
2826}; 2826};
2827 2827
2828static inline int size_index_elem(size_t bytes)
2829{
2830 return (bytes - 1) / 8;
2831}
2832
2828static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2833static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2829{ 2834{
2830 int index; 2835 int index;
@@ -2833,7 +2838,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2833 if (!size) 2838 if (!size)
2834 return ZERO_SIZE_PTR; 2839 return ZERO_SIZE_PTR;
2835 2840
2836 index = size_index[(size - 1) / 8]; 2841 index = size_index[size_index_elem(size)];
2837 } else 2842 } else
2838 index = fls(size - 1); 2843 index = fls(size - 1);
2839 2844
@@ -3188,10 +3193,12 @@ void __init kmem_cache_init(void)
3188 slab_state = PARTIAL; 3193 slab_state = PARTIAL;
3189 3194
3190 /* Caches that are not of the two-to-the-power-of size */ 3195 /* Caches that are not of the two-to-the-power-of size */
3191 if (KMALLOC_MIN_SIZE <= 64) { 3196 if (KMALLOC_MIN_SIZE <= 32) {
3192 create_kmalloc_cache(&kmalloc_caches[1], 3197 create_kmalloc_cache(&kmalloc_caches[1],
3193 "kmalloc-96", 96, GFP_NOWAIT); 3198 "kmalloc-96", 96, GFP_NOWAIT);
3194 caches++; 3199 caches++;
3200 }
3201 if (KMALLOC_MIN_SIZE <= 64) {
3195 create_kmalloc_cache(&kmalloc_caches[2], 3202 create_kmalloc_cache(&kmalloc_caches[2],
3196 "kmalloc-192", 192, GFP_NOWAIT); 3203 "kmalloc-192", 192, GFP_NOWAIT);
3197 caches++; 3204 caches++;
@@ -3218,17 +3225,28 @@ void __init kmem_cache_init(void)
3218 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3225 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3219 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3226 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3220 3227
3221 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3228 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3222 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3229 int elem = size_index_elem(i);
3230 if (elem >= ARRAY_SIZE(size_index))
3231 break;
3232 size_index[elem] = KMALLOC_SHIFT_LOW;
3233 }
3223 3234
3224 if (KMALLOC_MIN_SIZE == 128) { 3235 if (KMALLOC_MIN_SIZE == 64) {
3236 /*
3237 * The 96 byte size cache is not used if the alignment
3238 * is 64 byte.
3239 */
3240 for (i = 64 + 8; i <= 96; i += 8)
3241 size_index[size_index_elem(i)] = 7;
3242 } else if (KMALLOC_MIN_SIZE == 128) {
3225 /* 3243 /*
3226 * The 192 byte sized cache is not used if the alignment 3244 * The 192 byte sized cache is not used if the alignment
3227 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3245 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3228 * instead. 3246 * instead.
3229 */ 3247 */
3230 for (i = 128 + 8; i <= 192; i += 8) 3248 for (i = 128 + 8; i <= 192; i += 8)
3231 size_index[(i - 1) / 8] = 8; 3249 size_index[size_index_elem(i)] = 8;
3232 } 3250 }
3233 3251
3234 slab_state = UP; 3252 slab_state = UP;