summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2018-04-05 19:21:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:36:24 -0400
commitbe4a7988b35db9e6f95dca818d5e94785840fb58 (patch)
treed6e19b6f6472fa8ccb6359bf04ea3f729d83c90c
parent0293d1fdd677a09b816df0c7bfe8f60d1b9b956f (diff)
kasan: make kasan_cache_create() work with 32-bit slab cache sizes
If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should not do it as well. Link: http://lkml.kernel.org/r/20180305200730.15812-20-adobriyan@gmail.com Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kasan.h4
-rw-r--r--mm/kasan/kasan.c12
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c2
4 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d6459bd1376d..de784fd11d12 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
43void kasan_alloc_pages(struct page *page, unsigned int order); 43void kasan_alloc_pages(struct page *page, unsigned int order);
44void kasan_free_pages(struct page *page, unsigned int order); 44void kasan_free_pages(struct page *page, unsigned int order);
45 45
46void kasan_cache_create(struct kmem_cache *cache, size_t *size, 46void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
47 slab_flags_t *flags); 47 slab_flags_t *flags);
48void kasan_cache_shrink(struct kmem_cache *cache); 48void kasan_cache_shrink(struct kmem_cache *cache);
49void kasan_cache_shutdown(struct kmem_cache *cache); 49void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
92static inline void kasan_free_pages(struct page *page, unsigned int order) {} 92static inline void kasan_free_pages(struct page *page, unsigned int order) {}
93 93
94static inline void kasan_cache_create(struct kmem_cache *cache, 94static inline void kasan_cache_create(struct kmem_cache *cache,
95 size_t *size, 95 unsigned int *size,
96 slab_flags_t *flags) {} 96 slab_flags_t *flags) {}
97static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 97static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
98static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 98static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index e13d911251e7..f7a5e1d1ba87 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. 323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
324 * For larger allocations larger redzones are used. 324 * For larger allocations larger redzones are used.
325 */ 325 */
326static size_t optimal_redzone(size_t object_size) 326static unsigned int optimal_redzone(unsigned int object_size)
327{ 327{
328 int rz = 328 return
329 object_size <= 64 - 16 ? 16 : 329 object_size <= 64 - 16 ? 16 :
330 object_size <= 128 - 32 ? 32 : 330 object_size <= 128 - 32 ? 32 :
331 object_size <= 512 - 64 ? 64 : 331 object_size <= 512 - 64 ? 64 :
@@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t object_size)
333 object_size <= (1 << 14) - 256 ? 256 : 333 object_size <= (1 << 14) - 256 ? 256 :
334 object_size <= (1 << 15) - 512 ? 512 : 334 object_size <= (1 << 15) - 512 ? 512 :
335 object_size <= (1 << 16) - 1024 ? 1024 : 2048; 335 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
336 return rz;
337} 336}
338 337
339void kasan_cache_create(struct kmem_cache *cache, size_t *size, 338void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
340 slab_flags_t *flags) 339 slab_flags_t *flags)
341{ 340{
341 unsigned int orig_size = *size;
342 int redzone_adjust; 342 int redzone_adjust;
343 int orig_size = *size;
344 343
345 /* Add alloc meta. */ 344 /* Add alloc meta. */
346 cache->kasan_info.alloc_meta_offset = *size; 345 cache->kasan_info.alloc_meta_offset = *size;
@@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
358 if (redzone_adjust > 0) 357 if (redzone_adjust > 0)
359 *size += redzone_adjust; 358 *size += redzone_adjust;
360 359
361 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + 360 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
361 max(*size, cache->object_size +
362 optimal_redzone(cache->object_size))); 362 optimal_redzone(cache->object_size)));
363 363
364 /* 364 /*
diff --git a/mm/slab.c b/mm/slab.c
index 063a02d79c8e..fb106e8277b7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1994,7 +1994,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1994 size_t ralign = BYTES_PER_WORD; 1994 size_t ralign = BYTES_PER_WORD;
1995 gfp_t gfp; 1995 gfp_t gfp;
1996 int err; 1996 int err;
1997 size_t size = cachep->size; 1997 unsigned int size = cachep->size;
1998 1998
1999#if DEBUG 1999#if DEBUG
2000#if FORCED_DEBUG 2000#if FORCED_DEBUG
diff --git a/mm/slub.c b/mm/slub.c
index b4a739f8f84d..dfead847961c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3458,7 +3458,7 @@ static void set_cpu_partial(struct kmem_cache *s)
3458static int calculate_sizes(struct kmem_cache *s, int forced_order) 3458static int calculate_sizes(struct kmem_cache *s, int forced_order)
3459{ 3459{
3460 slab_flags_t flags = s->flags; 3460 slab_flags_t flags = s->flags;
3461 size_t size = s->object_size; 3461 unsigned int size = s->object_size;
3462 int order; 3462 int order;
3463 3463
3464 /* 3464 /*