aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kasan.txt5
-rw-r--r--include/linux/kasan.h12
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/slab_def.h14
-rw-r--r--include/linux/slub_def.h11
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--mm/Makefile1
-rw-r--r--mm/kasan/kasan.c102
-rw-r--r--mm/kasan/kasan.h34
-rw-r--r--mm/kasan/report.c54
-rw-r--r--mm/slab.c43
-rw-r--r--mm/slab_common.c2
12 files changed, 266 insertions, 22 deletions
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
index aa1e0c91e368..7dd95b35cd7c 100644
--- a/Documentation/kasan.txt
+++ b/Documentation/kasan.txt
@@ -12,8 +12,7 @@ KASAN uses compile-time instrumentation for checking every memory access,
12therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is 12therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
13required for detection of out-of-bounds accesses to stack or global variables. 13required for detection of out-of-bounds accesses to stack or global variables.
14 14
15Currently KASAN is supported only for x86_64 architecture and requires the 15Currently KASAN is supported only for x86_64 architecture.
16kernel to be built with the SLUB allocator.
17 16
181. Usage 171. Usage
19======== 18========
@@ -27,7 +26,7 @@ inline are compiler instrumentation types. The former produces smaller binary
27the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC 26the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
28version 5.0 or later. 27version 5.0 or later.
29 28
30Currently KASAN works only with the SLUB memory allocator. 29KASAN works with both SLUB and SLAB memory allocators.
31For better bug detection and nicer reporting, enable CONFIG_STACKTRACE. 30For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
32 31
33To disable instrumentation for specific files or directories, add a line 32To disable instrumentation for specific files or directories, add a line
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 0fdc798e3ff7..839f2007a0f9 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -48,6 +48,9 @@ void kasan_unpoison_task_stack(struct task_struct *task);
48void kasan_alloc_pages(struct page *page, unsigned int order); 48void kasan_alloc_pages(struct page *page, unsigned int order);
49void kasan_free_pages(struct page *page, unsigned int order); 49void kasan_free_pages(struct page *page, unsigned int order);
50 50
51void kasan_cache_create(struct kmem_cache *cache, size_t *size,
52 unsigned long *flags);
53
51void kasan_poison_slab(struct page *page); 54void kasan_poison_slab(struct page *page);
52void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 55void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
53void kasan_poison_object_data(struct kmem_cache *cache, void *object); 56void kasan_poison_object_data(struct kmem_cache *cache, void *object);
@@ -61,6 +64,11 @@ void kasan_krealloc(const void *object, size_t new_size);
61void kasan_slab_alloc(struct kmem_cache *s, void *object); 64void kasan_slab_alloc(struct kmem_cache *s, void *object);
62void kasan_slab_free(struct kmem_cache *s, void *object); 65void kasan_slab_free(struct kmem_cache *s, void *object);
63 66
67struct kasan_cache {
68 int alloc_meta_offset;
69 int free_meta_offset;
70};
71
64int kasan_module_alloc(void *addr, size_t size); 72int kasan_module_alloc(void *addr, size_t size);
65void kasan_free_shadow(const struct vm_struct *vm); 73void kasan_free_shadow(const struct vm_struct *vm);
66 74
@@ -76,6 +84,10 @@ static inline void kasan_disable_current(void) {}
76static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 84static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
77static inline void kasan_free_pages(struct page *page, unsigned int order) {} 85static inline void kasan_free_pages(struct page *page, unsigned int order) {}
78 86
87static inline void kasan_cache_create(struct kmem_cache *cache,
88 size_t *size,
89 unsigned long *flags) {}
90
79static inline void kasan_poison_slab(struct page *page) {} 91static inline void kasan_poison_slab(struct page *page) {}
80static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 92static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
81 void *object) {} 93 void *object) {}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index e4b568738ca3..aa61595a1482 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -92,6 +92,12 @@
92# define SLAB_ACCOUNT 0x00000000UL 92# define SLAB_ACCOUNT 0x00000000UL
93#endif 93#endif
94 94
95#ifdef CONFIG_KASAN
96#define SLAB_KASAN 0x08000000UL
97#else
98#define SLAB_KASAN 0x00000000UL
99#endif
100
95/* The following flags affect the page allocator grouping pages by mobility */ 101/* The following flags affect the page allocator grouping pages by mobility */
96#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 102#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
97#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 103#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e878ba35ae91..9edbbf352340 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -76,8 +76,22 @@ struct kmem_cache {
76#ifdef CONFIG_MEMCG 76#ifdef CONFIG_MEMCG
77 struct memcg_cache_params memcg_params; 77 struct memcg_cache_params memcg_params;
78#endif 78#endif
79#ifdef CONFIG_KASAN
80 struct kasan_cache kasan_info;
81#endif
79 82
80 struct kmem_cache_node *node[MAX_NUMNODES]; 83 struct kmem_cache_node *node[MAX_NUMNODES];
81}; 84};
82 85
86static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
87 void *x) {
88 void *object = x - (x - page->s_mem) % cache->size;
89 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
90
91 if (unlikely(object > last_object))
92 return last_object;
93 else
94 return object;
95}
96
83#endif /* _LINUX_SLAB_DEF_H */ 97#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ac5143f95ee6..665cd0cd18b8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -130,4 +130,15 @@ static inline void *virt_to_obj(struct kmem_cache *s,
130void object_err(struct kmem_cache *s, struct page *page, 130void object_err(struct kmem_cache *s, struct page *page,
131 u8 *object, char *reason); 131 u8 *object, char *reason);
132 132
133static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
134 void *x) {
135 void *object = x - (x - page_address(page)) % cache->size;
136 void *last_object = page_address(page) +
137 (page->objects - 1) * cache->size;
138 if (unlikely(object > last_object))
139 return last_object;
140 else
141 return object;
142}
143
133#endif /* _LINUX_SLUB_DEF_H */ 144#endif /* _LINUX_SLUB_DEF_H */
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 0fee5acd5aa0..0e4d2b3b0aee 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN
5 5
6config KASAN 6config KASAN
7 bool "KASan: runtime memory debugger" 7 bool "KASan: runtime memory debugger"
8 depends on SLUB_DEBUG 8 depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
9 select CONSTRUCTORS 9 select CONSTRUCTORS
10 help 10 help
11 Enables kernel address sanitizer - runtime memory debugger, 11 Enables kernel address sanitizer - runtime memory debugger,
@@ -16,6 +16,8 @@ config KASAN
16 This feature consumes about 1/8 of available memory and brings about 16 This feature consumes about 1/8 of available memory and brings about
17 ~x3 performance slowdown. 17 ~x3 performance slowdown.
18 For better error detection enable CONFIG_STACKTRACE. 18 For better error detection enable CONFIG_STACKTRACE.
19 Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
20 (the resulting kernel does not boot).
19 21
20choice 22choice
21 prompt "Instrumentation type" 23 prompt "Instrumentation type"
diff --git a/mm/Makefile b/mm/Makefile
index f5e797cbd128..deb467edca2d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5KASAN_SANITIZE_slab_common.o := n 5KASAN_SANITIZE_slab_common.o := n
6KASAN_SANITIZE_slab.o := n
6KASAN_SANITIZE_slub.o := n 7KASAN_SANITIZE_slub.o := n
7 8
8# These files are disabled because they produce non-interesting and/or 9# These files are disabled because they produce non-interesting and/or
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 1ad20ade8c91..7c82509ef169 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -334,6 +334,59 @@ void kasan_free_pages(struct page *page, unsigned int order)
334 KASAN_FREE_PAGE); 334 KASAN_FREE_PAGE);
335} 335}
336 336
337#ifdef CONFIG_SLAB
338/*
339 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
340 * For larger allocations larger redzones are used.
341 */
342static size_t optimal_redzone(size_t object_size)
343{
344 int rz =
345 object_size <= 64 - 16 ? 16 :
346 object_size <= 128 - 32 ? 32 :
347 object_size <= 512 - 64 ? 64 :
348 object_size <= 4096 - 128 ? 128 :
349 object_size <= (1 << 14) - 256 ? 256 :
350 object_size <= (1 << 15) - 512 ? 512 :
351 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
352 return rz;
353}
354
355void kasan_cache_create(struct kmem_cache *cache, size_t *size,
356 unsigned long *flags)
357{
358 int redzone_adjust;
359 /* Make sure the adjusted size is still less than
360 * KMALLOC_MAX_CACHE_SIZE.
361 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
362 * to skip it for SLUB when it starts using kasan_cache_create().
363 */
364 if (*size > KMALLOC_MAX_CACHE_SIZE -
365 sizeof(struct kasan_alloc_meta) -
366 sizeof(struct kasan_free_meta))
367 return;
368 *flags |= SLAB_KASAN;
369 /* Add alloc meta. */
370 cache->kasan_info.alloc_meta_offset = *size;
371 *size += sizeof(struct kasan_alloc_meta);
372
373 /* Add free meta. */
374 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
375 cache->object_size < sizeof(struct kasan_free_meta)) {
376 cache->kasan_info.free_meta_offset = *size;
377 *size += sizeof(struct kasan_free_meta);
378 }
379 redzone_adjust = optimal_redzone(cache->object_size) -
380 (*size - cache->object_size);
381 if (redzone_adjust > 0)
382 *size += redzone_adjust;
383 *size = min(KMALLOC_MAX_CACHE_SIZE,
384 max(*size,
385 cache->object_size +
386 optimal_redzone(cache->object_size)));
387}
388#endif
389
337void kasan_poison_slab(struct page *page) 390void kasan_poison_slab(struct page *page)
338{ 391{
339 kasan_poison_shadow(page_address(page), 392 kasan_poison_shadow(page_address(page),
@@ -351,8 +404,36 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
351 kasan_poison_shadow(object, 404 kasan_poison_shadow(object,
352 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 405 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
353 KASAN_KMALLOC_REDZONE); 406 KASAN_KMALLOC_REDZONE);
407#ifdef CONFIG_SLAB
408 if (cache->flags & SLAB_KASAN) {
409 struct kasan_alloc_meta *alloc_info =
410 get_alloc_info(cache, object);
411 alloc_info->state = KASAN_STATE_INIT;
412 }
413#endif
414}
415
416static inline void set_track(struct kasan_track *track)
417{
418 track->cpu = raw_smp_processor_id();
419 track->pid = current->pid;
420 track->when = jiffies;
354} 421}
355 422
423#ifdef CONFIG_SLAB
424struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
425 const void *object)
426{
427 return (void *)object + cache->kasan_info.alloc_meta_offset;
428}
429
430struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
431 const void *object)
432{
433 return (void *)object + cache->kasan_info.free_meta_offset;
434}
435#endif
436
356void kasan_slab_alloc(struct kmem_cache *cache, void *object) 437void kasan_slab_alloc(struct kmem_cache *cache, void *object)
357{ 438{
358 kasan_kmalloc(cache, object, cache->object_size); 439 kasan_kmalloc(cache, object, cache->object_size);
@@ -367,6 +448,17 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
367 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) 448 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
368 return; 449 return;
369 450
451#ifdef CONFIG_SLAB
452 if (cache->flags & SLAB_KASAN) {
453 struct kasan_free_meta *free_info =
454 get_free_info(cache, object);
455 struct kasan_alloc_meta *alloc_info =
456 get_alloc_info(cache, object);
457 alloc_info->state = KASAN_STATE_FREE;
458 set_track(&free_info->track);
459 }
460#endif
461
370 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 462 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
371} 463}
372 464
@@ -386,6 +478,16 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
386 kasan_unpoison_shadow(object, size); 478 kasan_unpoison_shadow(object, size);
387 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 479 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
388 KASAN_KMALLOC_REDZONE); 480 KASAN_KMALLOC_REDZONE);
481#ifdef CONFIG_SLAB
482 if (cache->flags & SLAB_KASAN) {
483 struct kasan_alloc_meta *alloc_info =
484 get_alloc_info(cache, object);
485
486 alloc_info->state = KASAN_STATE_ALLOC;
487 alloc_info->alloc_size = size;
488 set_track(&alloc_info->track);
489 }
490#endif
389} 491}
390EXPORT_SYMBOL(kasan_kmalloc); 492EXPORT_SYMBOL(kasan_kmalloc);
391 493
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 4f6c62e5c21e..7b9e4ab9b66b 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -54,6 +54,40 @@ struct kasan_global {
54#endif 54#endif
55}; 55};
56 56
57/**
58 * Structures to keep alloc and free tracks *
59 */
60
61enum kasan_state {
62 KASAN_STATE_INIT,
63 KASAN_STATE_ALLOC,
64 KASAN_STATE_FREE
65};
66
67struct kasan_track {
68 u64 cpu : 6; /* for NR_CPUS = 64 */
69 u64 pid : 16; /* 65536 processes */
70 u64 when : 42; /* ~140 years */
71};
72
73struct kasan_alloc_meta {
74 u32 state : 2; /* enum kasan_state */
75 u32 alloc_size : 30;
76 struct kasan_track track;
77};
78
79struct kasan_free_meta {
80 /* Allocator freelist pointer, unused by KASAN. */
81 void **freelist;
82 struct kasan_track track;
83};
84
85struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
86 const void *object);
87struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
88 const void *object);
89
90
57static inline const void *kasan_shadow_to_mem(const void *shadow_addr) 91static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
58{ 92{
59 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) 93 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 745aa8f36028..3e3385cc97ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -115,6 +115,46 @@ static inline bool init_task_stack_addr(const void *addr)
115 sizeof(init_thread_union.stack)); 115 sizeof(init_thread_union.stack));
116} 116}
117 117
118#ifdef CONFIG_SLAB
119static void print_track(struct kasan_track *track)
120{
121 pr_err("PID = %u, CPU = %u, timestamp = %lu\n", track->pid,
122 track->cpu, (unsigned long)track->when);
123}
124
125static void object_err(struct kmem_cache *cache, struct page *page,
126 void *object, char *unused_reason)
127{
128 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
129 struct kasan_free_meta *free_info;
130
131 dump_stack();
132 pr_err("Object at %p, in cache %s\n", object, cache->name);
133 if (!(cache->flags & SLAB_KASAN))
134 return;
135 switch (alloc_info->state) {
136 case KASAN_STATE_INIT:
137 pr_err("Object not allocated yet\n");
138 break;
139 case KASAN_STATE_ALLOC:
140 pr_err("Object allocated with size %u bytes.\n",
141 alloc_info->alloc_size);
142 pr_err("Allocation:\n");
143 print_track(&alloc_info->track);
144 break;
145 case KASAN_STATE_FREE:
146 pr_err("Object freed, allocated with size %u bytes\n",
147 alloc_info->alloc_size);
148 free_info = get_free_info(cache, object);
149 pr_err("Allocation:\n");
150 print_track(&alloc_info->track);
151 pr_err("Deallocation:\n");
152 print_track(&free_info->track);
153 break;
154 }
155}
156#endif
157
118static void print_address_description(struct kasan_access_info *info) 158static void print_address_description(struct kasan_access_info *info)
119{ 159{
120 const void *addr = info->access_addr; 160 const void *addr = info->access_addr;
@@ -126,17 +166,10 @@ static void print_address_description(struct kasan_access_info *info)
126 if (PageSlab(page)) { 166 if (PageSlab(page)) {
127 void *object; 167 void *object;
128 struct kmem_cache *cache = page->slab_cache; 168 struct kmem_cache *cache = page->slab_cache;
129 void *last_object; 169 object = nearest_obj(cache, page,
130 170 (void *)info->access_addr);
131 object = virt_to_obj(cache, page_address(page), addr);
132 last_object = page_address(page) +
133 page->objects * cache->size;
134
135 if (unlikely(object > last_object))
136 object = last_object; /* we hit into padding */
137
138 object_err(cache, page, object, 171 object_err(cache, page, object,
139 "kasan: bad access detected"); 172 "kasan: bad access detected");
140 return; 173 return;
141 } 174 }
142 dump_page(page, "kasan: bad access detected"); 175 dump_page(page, "kasan: bad access detected");
@@ -146,7 +179,6 @@ static void print_address_description(struct kasan_access_info *info)
146 if (!init_task_stack_addr(addr)) 179 if (!init_task_stack_addr(addr))
147 pr_err("Address belongs to variable %pS\n", addr); 180 pr_err("Address belongs to variable %pS\n", addr);
148 } 181 }
149
150 dump_stack(); 182 dump_stack();
151} 183}
152 184
diff --git a/mm/slab.c b/mm/slab.c
index e719a5cb3396..7515578471d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2086,6 +2086,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2086 } 2086 }
2087#endif 2087#endif
2088 2088
2089 kasan_cache_create(cachep, &size, &flags);
2090
2089 size = ALIGN(size, cachep->align); 2091 size = ALIGN(size, cachep->align);
2090 /* 2092 /*
2091 * We should restrict the number of objects in a slab to implement 2093 * We should restrict the number of objects in a slab to implement
@@ -2387,8 +2389,13 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2387 * cache which they are a constructor for. Otherwise, deadlock. 2389 * cache which they are a constructor for. Otherwise, deadlock.
2388 * They must also be threaded. 2390 * They must also be threaded.
2389 */ 2391 */
2390 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2392 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2393 kasan_unpoison_object_data(cachep,
2394 objp + obj_offset(cachep));
2391 cachep->ctor(objp + obj_offset(cachep)); 2395 cachep->ctor(objp + obj_offset(cachep));
2396 kasan_poison_object_data(
2397 cachep, objp + obj_offset(cachep));
2398 }
2392 2399
2393 if (cachep->flags & SLAB_RED_ZONE) { 2400 if (cachep->flags & SLAB_RED_ZONE) {
2394 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2401 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2409,6 +2416,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2409 struct page *page) 2416 struct page *page)
2410{ 2417{
2411 int i; 2418 int i;
2419 void *objp;
2412 2420
2413 cache_init_objs_debug(cachep, page); 2421 cache_init_objs_debug(cachep, page);
2414 2422
@@ -2419,8 +2427,12 @@ static void cache_init_objs(struct kmem_cache *cachep,
2419 2427
2420 for (i = 0; i < cachep->num; i++) { 2428 for (i = 0; i < cachep->num; i++) {
2421 /* constructor could break poison info */ 2429 /* constructor could break poison info */
2422 if (DEBUG == 0 && cachep->ctor) 2430 if (DEBUG == 0 && cachep->ctor) {
2423 cachep->ctor(index_to_obj(cachep, page, i)); 2431 objp = index_to_obj(cachep, page, i);
2432 kasan_unpoison_object_data(cachep, objp);
2433 cachep->ctor(objp);
2434 kasan_poison_object_data(cachep, objp);
2435 }
2424 2436
2425 set_free_obj(page, i, i); 2437 set_free_obj(page, i, i);
2426 } 2438 }
@@ -2550,6 +2562,7 @@ static int cache_grow(struct kmem_cache *cachep,
2550 2562
2551 slab_map_pages(cachep, page, freelist); 2563 slab_map_pages(cachep, page, freelist);
2552 2564
2565 kasan_poison_slab(page);
2553 cache_init_objs(cachep, page); 2566 cache_init_objs(cachep, page);
2554 2567
2555 if (gfpflags_allow_blocking(local_flags)) 2568 if (gfpflags_allow_blocking(local_flags))
@@ -3316,6 +3329,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3316{ 3329{
3317 struct array_cache *ac = cpu_cache_get(cachep); 3330 struct array_cache *ac = cpu_cache_get(cachep);
3318 3331
3332 kasan_slab_free(cachep, objp);
3333
3319 check_irq_off(); 3334 check_irq_off();
3320 kmemleak_free_recursive(objp, cachep->flags); 3335 kmemleak_free_recursive(objp, cachep->flags);
3321 objp = cache_free_debugcheck(cachep, objp, caller); 3336 objp = cache_free_debugcheck(cachep, objp, caller);
@@ -3363,6 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3363{ 3378{
3364 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3379 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3365 3380
3381 kasan_slab_alloc(cachep, ret);
3366 trace_kmem_cache_alloc(_RET_IP_, ret, 3382 trace_kmem_cache_alloc(_RET_IP_, ret,
3367 cachep->object_size, cachep->size, flags); 3383 cachep->object_size, cachep->size, flags);
3368 3384
@@ -3428,6 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3428 3444
3429 ret = slab_alloc(cachep, flags, _RET_IP_); 3445 ret = slab_alloc(cachep, flags, _RET_IP_);
3430 3446
3447 kasan_kmalloc(cachep, ret, size);
3431 trace_kmalloc(_RET_IP_, ret, 3448 trace_kmalloc(_RET_IP_, ret,
3432 size, cachep->size, flags); 3449 size, cachep->size, flags);
3433 return ret; 3450 return ret;
@@ -3451,6 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3451{ 3468{
3452 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3469 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3453 3470
3471 kasan_slab_alloc(cachep, ret);
3454 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3472 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3455 cachep->object_size, cachep->size, 3473 cachep->object_size, cachep->size,
3456 flags, nodeid); 3474 flags, nodeid);
@@ -3468,7 +3486,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3468 void *ret; 3486 void *ret;
3469 3487
3470 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3488 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3471 3489 kasan_kmalloc(cachep, ret, size);
3472 trace_kmalloc_node(_RET_IP_, ret, 3490 trace_kmalloc_node(_RET_IP_, ret,
3473 size, cachep->size, 3491 size, cachep->size,
3474 flags, nodeid); 3492 flags, nodeid);
@@ -3481,11 +3499,15 @@ static __always_inline void *
3481__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3499__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3482{ 3500{
3483 struct kmem_cache *cachep; 3501 struct kmem_cache *cachep;
3502 void *ret;
3484 3503
3485 cachep = kmalloc_slab(size, flags); 3504 cachep = kmalloc_slab(size, flags);
3486 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3505 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3487 return cachep; 3506 return cachep;
3488 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3507 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3508 kasan_kmalloc(cachep, ret, size);
3509
3510 return ret;
3489} 3511}
3490 3512
3491void *__kmalloc_node(size_t size, gfp_t flags, int node) 3513void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3519,6 +3541,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3519 return cachep; 3541 return cachep;
3520 ret = slab_alloc(cachep, flags, caller); 3542 ret = slab_alloc(cachep, flags, caller);
3521 3543
3544 kasan_kmalloc(cachep, ret, size);
3522 trace_kmalloc(caller, ret, 3545 trace_kmalloc(caller, ret,
3523 size, cachep->size, flags); 3546 size, cachep->size, flags);
3524 3547
@@ -4290,10 +4313,18 @@ module_init(slab_proc_init);
4290 */ 4313 */
4291size_t ksize(const void *objp) 4314size_t ksize(const void *objp)
4292{ 4315{
4316 size_t size;
4317
4293 BUG_ON(!objp); 4318 BUG_ON(!objp);
4294 if (unlikely(objp == ZERO_SIZE_PTR)) 4319 if (unlikely(objp == ZERO_SIZE_PTR))
4295 return 0; 4320 return 0;
4296 4321
4297 return virt_to_cache(objp)->object_size; 4322 size = virt_to_cache(objp)->object_size;
4323 /* We assume that ksize callers could use the whole allocated area,
4324 * so we need to unpoison this area.
4325 */
4326 kasan_krealloc(objp, size);
4327
4328 return size;
4298} 4329}
4299EXPORT_SYMBOL(ksize); 4330EXPORT_SYMBOL(ksize);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b2e379639a5b..4de72e220c82 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -35,7 +35,7 @@ struct kmem_cache *kmem_cache;
35 */ 35 */
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB | SLAB_KASAN)
39 39
40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
41 SLAB_NOTRACK | SLAB_ACCOUNT) 41 SLAB_NOTRACK | SLAB_ACCOUNT)