aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2016-07-28 18:49:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit80a9201a5965f4715d5c09790862e0df84ce0614 (patch)
treee05d44a7f9ef0d81328dc4bd70f23656fc279bc9
parentc146a2b98eb5898eb0fab15a332257a4102ecae9 (diff)
mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
For KASAN builds: - switch SLUB allocator to using stackdepot instead of storing the allocation/deallocation stacks in the objects; - change the freelist hook so that parts of the freelist can be put into the quarantine. [aryabinin@virtuozzo.com: fixes] Link: http://lkml.kernel.org/r/1468601423-28676-1-git-send-email-aryabinin@virtuozzo.com Link: http://lkml.kernel.org/r/1468347165-41906-3-git-send-email-glider@google.com Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Steven Rostedt (Red Hat) <rostedt@goodmis.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kostya Serebryany <kcc@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Kuthonuzo Luruo <kuthonuzo.luruo@hpe.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--mm/kasan/Makefile3
-rw-r--r--mm/kasan/kasan.c63
-rw-r--r--mm/kasan/kasan.h3
-rw-r--r--mm/kasan/report.c8
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slub.c57
10 files changed, 93 insertions, 56 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index ac4b3c46a84d..c9cf374445d8 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -77,6 +77,7 @@ void kasan_free_shadow(const struct vm_struct *vm);
77 77
78size_t ksize(const void *); 78size_t ksize(const void *);
79static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 79static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
80size_t kasan_metadata_size(struct kmem_cache *cache);
80 81
81#else /* CONFIG_KASAN */ 82#else /* CONFIG_KASAN */
82 83
@@ -121,6 +122,7 @@ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
121static inline void kasan_free_shadow(const struct vm_struct *vm) {} 122static inline void kasan_free_shadow(const struct vm_struct *vm) {}
122 123
123static inline void kasan_unpoison_slab(const void *ptr) { } 124static inline void kasan_unpoison_slab(const void *ptr) { }
125static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
124 126
125#endif /* CONFIG_KASAN */ 127#endif /* CONFIG_KASAN */
126 128
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 339ba027ade9..4ad2c5a26399 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -88,7 +88,8 @@ struct kmem_cache {
88}; 88};
89 89
90static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, 90static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
91 void *x) { 91 void *x)
92{
92 void *object = x - (x - page->s_mem) % cache->size; 93 void *object = x - (x - page->s_mem) % cache->size;
93 void *last_object = page->s_mem + (cache->num - 1) * cache->size; 94 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
94 95
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cf501cf8e6db..75f56c2ef2d4 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -104,6 +104,10 @@ struct kmem_cache {
104 unsigned int *random_seq; 104 unsigned int *random_seq;
105#endif 105#endif
106 106
107#ifdef CONFIG_KASAN
108 struct kasan_cache kasan_info;
109#endif
110
107 struct kmem_cache_node *node[MAX_NUMNODES]; 111 struct kmem_cache_node *node[MAX_NUMNODES];
108}; 112};
109 113
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 67d8c6838ba9..bd38aab05929 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN
5 5
6config KASAN 6config KASAN
7 bool "KASan: runtime memory debugger" 7 bool "KASan: runtime memory debugger"
8 depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) 8 depends on SLUB || (SLAB && !DEBUG_SLAB)
9 select CONSTRUCTORS 9 select CONSTRUCTORS
10 select STACKDEPOT if SLAB 10 select STACKDEPOT
11 help 11 help
12 Enables kernel address sanitizer - runtime memory debugger, 12 Enables kernel address sanitizer - runtime memory debugger,
13 designed to find out-of-bounds accesses and use-after-free bugs. 13 designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 1548749a3d45..2976a9ee104f 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -7,5 +7,4 @@ CFLAGS_REMOVE_kasan.o = -pg
7# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 7# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
8CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) 8CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
9 9
10obj-y := kasan.o report.o kasan_init.o 10obj-y := kasan.o report.o kasan_init.o quarantine.o
11obj-$(CONFIG_SLAB) += quarantine.o
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6845f9294696..b6f99e81bfeb 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
351 KASAN_FREE_PAGE); 351 KASAN_FREE_PAGE);
352} 352}
353 353
354#ifdef CONFIG_SLAB
355/* 354/*
356 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. 355 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
357 * For larger allocations larger redzones are used. 356 * For larger allocations larger redzones are used.
@@ -373,16 +372,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
373 unsigned long *flags) 372 unsigned long *flags)
374{ 373{
375 int redzone_adjust; 374 int redzone_adjust;
376 /* Make sure the adjusted size is still less than 375 int orig_size = *size;
377 * KMALLOC_MAX_CACHE_SIZE. 376
378 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
379 * to skip it for SLUB when it starts using kasan_cache_create().
380 */
381 if (*size > KMALLOC_MAX_CACHE_SIZE -
382 sizeof(struct kasan_alloc_meta) -
383 sizeof(struct kasan_free_meta))
384 return;
385 *flags |= SLAB_KASAN;
386 /* Add alloc meta. */ 377 /* Add alloc meta. */
387 cache->kasan_info.alloc_meta_offset = *size; 378 cache->kasan_info.alloc_meta_offset = *size;
388 *size += sizeof(struct kasan_alloc_meta); 379 *size += sizeof(struct kasan_alloc_meta);
@@ -395,14 +386,26 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
395 } 386 }
396 redzone_adjust = optimal_redzone(cache->object_size) - 387 redzone_adjust = optimal_redzone(cache->object_size) -
397 (*size - cache->object_size); 388 (*size - cache->object_size);
389
398 if (redzone_adjust > 0) 390 if (redzone_adjust > 0)
399 *size += redzone_adjust; 391 *size += redzone_adjust;
400 *size = min(KMALLOC_MAX_CACHE_SIZE, 392
401 max(*size, 393 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
402 cache->object_size + 394 optimal_redzone(cache->object_size)));
403 optimal_redzone(cache->object_size))); 395
396 /*
397 * If the metadata doesn't fit, don't enable KASAN at all.
398 */
399 if (*size <= cache->kasan_info.alloc_meta_offset ||
400 *size <= cache->kasan_info.free_meta_offset) {
401 cache->kasan_info.alloc_meta_offset = 0;
402 cache->kasan_info.free_meta_offset = 0;
403 *size = orig_size;
404 return;
405 }
406
407 *flags |= SLAB_KASAN;
404} 408}
405#endif
406 409
407void kasan_cache_shrink(struct kmem_cache *cache) 410void kasan_cache_shrink(struct kmem_cache *cache)
408{ 411{
@@ -414,6 +417,14 @@ void kasan_cache_destroy(struct kmem_cache *cache)
414 quarantine_remove_cache(cache); 417 quarantine_remove_cache(cache);
415} 418}
416 419
420size_t kasan_metadata_size(struct kmem_cache *cache)
421{
422 return (cache->kasan_info.alloc_meta_offset ?
423 sizeof(struct kasan_alloc_meta) : 0) +
424 (cache->kasan_info.free_meta_offset ?
425 sizeof(struct kasan_free_meta) : 0);
426}
427
417void kasan_poison_slab(struct page *page) 428void kasan_poison_slab(struct page *page)
418{ 429{
419 kasan_poison_shadow(page_address(page), 430 kasan_poison_shadow(page_address(page),
@@ -431,16 +442,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
431 kasan_poison_shadow(object, 442 kasan_poison_shadow(object,
432 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 443 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
433 KASAN_KMALLOC_REDZONE); 444 KASAN_KMALLOC_REDZONE);
434#ifdef CONFIG_SLAB
435 if (cache->flags & SLAB_KASAN) { 445 if (cache->flags & SLAB_KASAN) {
436 struct kasan_alloc_meta *alloc_info = 446 struct kasan_alloc_meta *alloc_info =
437 get_alloc_info(cache, object); 447 get_alloc_info(cache, object);
438 alloc_info->state = KASAN_STATE_INIT; 448 alloc_info->state = KASAN_STATE_INIT;
439 } 449 }
440#endif
441} 450}
442 451
443#ifdef CONFIG_SLAB
444static inline int in_irqentry_text(unsigned long ptr) 452static inline int in_irqentry_text(unsigned long ptr)
445{ 453{
446 return (ptr >= (unsigned long)&__irqentry_text_start && 454 return (ptr >= (unsigned long)&__irqentry_text_start &&
@@ -501,7 +509,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
501 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); 509 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
502 return (void *)object + cache->kasan_info.free_meta_offset; 510 return (void *)object + cache->kasan_info.free_meta_offset;
503} 511}
504#endif
505 512
506void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) 513void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
507{ 514{
@@ -522,16 +529,16 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
522 529
523bool kasan_slab_free(struct kmem_cache *cache, void *object) 530bool kasan_slab_free(struct kmem_cache *cache, void *object)
524{ 531{
525#ifdef CONFIG_SLAB
526 /* RCU slabs could be legally used after free within the RCU period */ 532 /* RCU slabs could be legally used after free within the RCU period */
527 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) 533 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
528 return false; 534 return false;
529 535
530 if (likely(cache->flags & SLAB_KASAN)) { 536 if (likely(cache->flags & SLAB_KASAN)) {
531 struct kasan_alloc_meta *alloc_info = 537 struct kasan_alloc_meta *alloc_info;
532 get_alloc_info(cache, object); 538 struct kasan_free_meta *free_info;
533 struct kasan_free_meta *free_info = 539
534 get_free_info(cache, object); 540 alloc_info = get_alloc_info(cache, object);
541 free_info = get_free_info(cache, object);
535 542
536 switch (alloc_info->state) { 543 switch (alloc_info->state) {
537 case KASAN_STATE_ALLOC: 544 case KASAN_STATE_ALLOC:
@@ -550,10 +557,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
550 } 557 }
551 } 558 }
552 return false; 559 return false;
553#else
554 kasan_poison_slab_free(cache, object);
555 return false;
556#endif
557} 560}
558 561
559void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, 562void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -576,7 +579,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
576 kasan_unpoison_shadow(object, size); 579 kasan_unpoison_shadow(object, size);
577 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 580 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
578 KASAN_KMALLOC_REDZONE); 581 KASAN_KMALLOC_REDZONE);
579#ifdef CONFIG_SLAB
580 if (cache->flags & SLAB_KASAN) { 582 if (cache->flags & SLAB_KASAN) {
581 struct kasan_alloc_meta *alloc_info = 583 struct kasan_alloc_meta *alloc_info =
582 get_alloc_info(cache, object); 584 get_alloc_info(cache, object);
@@ -585,7 +587,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
585 alloc_info->alloc_size = size; 587 alloc_info->alloc_size = size;
586 set_track(&alloc_info->track, flags); 588 set_track(&alloc_info->track, flags);
587 } 589 }
588#endif
589} 590}
590EXPORT_SYMBOL(kasan_kmalloc); 591EXPORT_SYMBOL(kasan_kmalloc);
591 592
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb87923552ef..31972cdba433 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -95,7 +95,6 @@ struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
95struct kasan_free_meta *get_free_info(struct kmem_cache *cache, 95struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
96 const void *object); 96 const void *object);
97 97
98
99static inline const void *kasan_shadow_to_mem(const void *shadow_addr) 98static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
100{ 99{
101 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) 100 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
@@ -110,7 +109,7 @@ static inline bool kasan_report_enabled(void)
110void kasan_report(unsigned long addr, size_t size, 109void kasan_report(unsigned long addr, size_t size,
111 bool is_write, unsigned long ip); 110 bool is_write, unsigned long ip);
112 111
113#ifdef CONFIG_SLAB 112#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
114void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); 113void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
115void quarantine_reduce(void); 114void quarantine_reduce(void);
116void quarantine_remove_cache(struct kmem_cache *cache); 115void quarantine_remove_cache(struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b3c122ddd454..861b9776841a 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -116,7 +116,6 @@ static inline bool init_task_stack_addr(const void *addr)
116 sizeof(init_thread_union.stack)); 116 sizeof(init_thread_union.stack));
117} 117}
118 118
119#ifdef CONFIG_SLAB
120static void print_track(struct kasan_track *track) 119static void print_track(struct kasan_track *track)
121{ 120{
122 pr_err("PID = %u\n", track->pid); 121 pr_err("PID = %u\n", track->pid);
@@ -130,8 +129,8 @@ static void print_track(struct kasan_track *track)
130 } 129 }
131} 130}
132 131
133static void object_err(struct kmem_cache *cache, struct page *page, 132static void kasan_object_err(struct kmem_cache *cache, struct page *page,
134 void *object, char *unused_reason) 133 void *object, char *unused_reason)
135{ 134{
136 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); 135 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
137 struct kasan_free_meta *free_info; 136 struct kasan_free_meta *free_info;
@@ -162,7 +161,6 @@ static void object_err(struct kmem_cache *cache, struct page *page,
162 break; 161 break;
163 } 162 }
164} 163}
165#endif
166 164
167static void print_address_description(struct kasan_access_info *info) 165static void print_address_description(struct kasan_access_info *info)
168{ 166{
@@ -177,7 +175,7 @@ static void print_address_description(struct kasan_access_info *info)
177 struct kmem_cache *cache = page->slab_cache; 175 struct kmem_cache *cache = page->slab_cache;
178 object = nearest_obj(cache, page, 176 object = nearest_obj(cache, page,
179 (void *)info->access_addr); 177 (void *)info->access_addr);
180 object_err(cache, page, object, 178 kasan_object_err(cache, page, object,
181 "kasan: bad access detected"); 179 "kasan: bad access detected");
182 return; 180 return;
183 } 181 }
diff --git a/mm/slab.h b/mm/slab.h
index f33980ab0406..9653f2e2591a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -369,6 +369,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
369 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 369 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
370 return s->object_size; 370 return s->object_size;
371# endif 371# endif
372 if (s->flags & SLAB_KASAN)
373 return s->object_size;
372 /* 374 /*
373 * If we have the need to store the freelist pointer 375 * If we have the need to store the freelist pointer
374 * back there or track user information then we can 376 * back there or track user information then we can
diff --git a/mm/slub.c b/mm/slub.c
index 1cdde1a5ba5f..74e7c8c30db8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -454,8 +454,6 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
454 */ 454 */
455#if defined(CONFIG_SLUB_DEBUG_ON) 455#if defined(CONFIG_SLUB_DEBUG_ON)
456static int slub_debug = DEBUG_DEFAULT_FLAGS; 456static int slub_debug = DEBUG_DEFAULT_FLAGS;
457#elif defined(CONFIG_KASAN)
458static int slub_debug = SLAB_STORE_USER;
459#else 457#else
460static int slub_debug; 458static int slub_debug;
461#endif 459#endif
@@ -660,6 +658,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
660 if (s->flags & SLAB_STORE_USER) 658 if (s->flags & SLAB_STORE_USER)
661 off += 2 * sizeof(struct track); 659 off += 2 * sizeof(struct track);
662 660
661 off += kasan_metadata_size(s);
662
663 if (off != size_from_object(s)) 663 if (off != size_from_object(s))
664 /* Beginning of the filler is the free pointer */ 664 /* Beginning of the filler is the free pointer */
665 print_section("Padding ", p + off, size_from_object(s) - off); 665 print_section("Padding ", p + off, size_from_object(s) - off);
@@ -787,6 +787,8 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
787 /* We also have user information there */ 787 /* We also have user information there */
788 off += 2 * sizeof(struct track); 788 off += 2 * sizeof(struct track);
789 789
790 off += kasan_metadata_size(s);
791
790 if (size_from_object(s) == off) 792 if (size_from_object(s) == off)
791 return 1; 793 return 1;
792 794
@@ -1322,8 +1324,10 @@ static inline void kfree_hook(const void *x)
1322 kasan_kfree_large(x); 1324 kasan_kfree_large(x);
1323} 1325}
1324 1326
1325static inline void slab_free_hook(struct kmem_cache *s, void *x) 1327static inline void *slab_free_hook(struct kmem_cache *s, void *x)
1326{ 1328{
1329 void *freeptr;
1330
1327 kmemleak_free_recursive(x, s->flags); 1331 kmemleak_free_recursive(x, s->flags);
1328 1332
1329 /* 1333 /*
@@ -1344,7 +1348,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1344 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1348 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1345 debug_check_no_obj_freed(x, s->object_size); 1349 debug_check_no_obj_freed(x, s->object_size);
1346 1350
1351 freeptr = get_freepointer(s, x);
1352 /*
1353 * kasan_slab_free() may put x into memory quarantine, delaying its
1354 * reuse. In this case the object's freelist pointer is changed.
1355 */
1347 kasan_slab_free(s, x); 1356 kasan_slab_free(s, x);
1357 return freeptr;
1348} 1358}
1349 1359
1350static inline void slab_free_freelist_hook(struct kmem_cache *s, 1360static inline void slab_free_freelist_hook(struct kmem_cache *s,
@@ -1362,11 +1372,11 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
1362 1372
1363 void *object = head; 1373 void *object = head;
1364 void *tail_obj = tail ? : head; 1374 void *tail_obj = tail ? : head;
1375 void *freeptr;
1365 1376
1366 do { 1377 do {
1367 slab_free_hook(s, object); 1378 freeptr = slab_free_hook(s, object);
1368 } while ((object != tail_obj) && 1379 } while ((object != tail_obj) && (object = freeptr));
1369 (object = get_freepointer(s, object)));
1370#endif 1380#endif
1371} 1381}
1372 1382
@@ -2878,16 +2888,13 @@ slab_empty:
2878 * same page) possible by specifying head and tail ptr, plus objects 2888 * same page) possible by specifying head and tail ptr, plus objects
2879 * count (cnt). Bulk free indicated by tail pointer being set. 2889 * count (cnt). Bulk free indicated by tail pointer being set.
2880 */ 2890 */
2881static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 2891static __always_inline void do_slab_free(struct kmem_cache *s,
2882 void *head, void *tail, int cnt, 2892 struct page *page, void *head, void *tail,
2883 unsigned long addr) 2893 int cnt, unsigned long addr)
2884{ 2894{
2885 void *tail_obj = tail ? : head; 2895 void *tail_obj = tail ? : head;
2886 struct kmem_cache_cpu *c; 2896 struct kmem_cache_cpu *c;
2887 unsigned long tid; 2897 unsigned long tid;
2888
2889 slab_free_freelist_hook(s, head, tail);
2890
2891redo: 2898redo:
2892 /* 2899 /*
2893 * Determine the currently cpus per cpu slab. 2900 * Determine the currently cpus per cpu slab.
@@ -2921,6 +2928,27 @@ redo:
2921 2928
2922} 2929}
2923 2930
2931static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2932 void *head, void *tail, int cnt,
2933 unsigned long addr)
2934{
2935 slab_free_freelist_hook(s, head, tail);
2936 /*
2937 * slab_free_freelist_hook() could have put the items into quarantine.
2938 * If so, no need to free them.
2939 */
2940 if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
2941 return;
2942 do_slab_free(s, page, head, tail, cnt, addr);
2943}
2944
2945#ifdef CONFIG_KASAN
2946void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2947{
2948 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2949}
2950#endif
2951
2924void kmem_cache_free(struct kmem_cache *s, void *x) 2952void kmem_cache_free(struct kmem_cache *s, void *x)
2925{ 2953{
2926 s = cache_from_obj(s, x); 2954 s = cache_from_obj(s, x);
@@ -3363,7 +3391,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
3363static int calculate_sizes(struct kmem_cache *s, int forced_order) 3391static int calculate_sizes(struct kmem_cache *s, int forced_order)
3364{ 3392{
3365 unsigned long flags = s->flags; 3393 unsigned long flags = s->flags;
3366 unsigned long size = s->object_size; 3394 size_t size = s->object_size;
3367 int order; 3395 int order;
3368 3396
3369 /* 3397 /*
@@ -3422,7 +3450,10 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3422 * the object. 3450 * the object.
3423 */ 3451 */
3424 size += 2 * sizeof(struct track); 3452 size += 2 * sizeof(struct track);
3453#endif
3425 3454
3455 kasan_cache_create(s, &size, &s->flags);
3456#ifdef CONFIG_SLUB_DEBUG
3426 if (flags & SLAB_RED_ZONE) { 3457 if (flags & SLAB_RED_ZONE) {
3427 /* 3458 /*
3428 * Add some empty padding so that we can catch 3459 * Add some empty padding so that we can catch