aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2016-03-25 17:21:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 19:37:42 -0400
commit7ed2f9e663854db313f177a511145630e398b402 (patch)
treef9dfba81a688864a4d78689470f624b0a482f545 /mm/kasan
parente6e8379c876de16c6b78f83b15d5ac32c79cb440 (diff)
mm, kasan: SLAB support
Add KASAN hooks to SLAB allocator. This patch is based on the "mm: kasan: unified support for SLUB and SLAB allocators" patch originally prepared by Dmitry Chernenkov. Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/kasan.c102
-rw-r--r--mm/kasan/kasan.h34
-rw-r--r--mm/kasan/report.c54
3 files changed, 179 insertions, 11 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 1ad20ade8c91..7c82509ef169 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -334,6 +334,59 @@ void kasan_free_pages(struct page *page, unsigned int order)
334 KASAN_FREE_PAGE); 334 KASAN_FREE_PAGE);
335} 335}
336 336
337#ifdef CONFIG_SLAB
338/*
339 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
340 * For larger allocations larger redzones are used.
341 */
342static size_t optimal_redzone(size_t object_size)
343{
344 int rz =
345 object_size <= 64 - 16 ? 16 :
346 object_size <= 128 - 32 ? 32 :
347 object_size <= 512 - 64 ? 64 :
348 object_size <= 4096 - 128 ? 128 :
349 object_size <= (1 << 14) - 256 ? 256 :
350 object_size <= (1 << 15) - 512 ? 512 :
351 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
352 return rz;
353}
354
355void kasan_cache_create(struct kmem_cache *cache, size_t *size,
356 unsigned long *flags)
357{
358 int redzone_adjust;
359 /* Make sure the adjusted size is still less than
360 * KMALLOC_MAX_CACHE_SIZE.
361 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
362 * to skip it for SLUB when it starts using kasan_cache_create().
363 */
364 if (*size > KMALLOC_MAX_CACHE_SIZE -
365 sizeof(struct kasan_alloc_meta) -
366 sizeof(struct kasan_free_meta))
367 return;
368 *flags |= SLAB_KASAN;
369 /* Add alloc meta. */
370 cache->kasan_info.alloc_meta_offset = *size;
371 *size += sizeof(struct kasan_alloc_meta);
372
373 /* Add free meta. */
374 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
375 cache->object_size < sizeof(struct kasan_free_meta)) {
376 cache->kasan_info.free_meta_offset = *size;
377 *size += sizeof(struct kasan_free_meta);
378 }
379 redzone_adjust = optimal_redzone(cache->object_size) -
380 (*size - cache->object_size);
381 if (redzone_adjust > 0)
382 *size += redzone_adjust;
383 *size = min(KMALLOC_MAX_CACHE_SIZE,
384 max(*size,
385 cache->object_size +
386 optimal_redzone(cache->object_size)));
387}
388#endif
389
337void kasan_poison_slab(struct page *page) 390void kasan_poison_slab(struct page *page)
338{ 391{
339 kasan_poison_shadow(page_address(page), 392 kasan_poison_shadow(page_address(page),
@@ -351,8 +404,36 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
351 kasan_poison_shadow(object, 404 kasan_poison_shadow(object,
352 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 405 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
353 KASAN_KMALLOC_REDZONE); 406 KASAN_KMALLOC_REDZONE);
407#ifdef CONFIG_SLAB
408 if (cache->flags & SLAB_KASAN) {
409 struct kasan_alloc_meta *alloc_info =
410 get_alloc_info(cache, object);
411 alloc_info->state = KASAN_STATE_INIT;
412 }
413#endif
414}
415
416static inline void set_track(struct kasan_track *track)
417{
418 track->cpu = raw_smp_processor_id();
419 track->pid = current->pid;
420 track->when = jiffies;
354} 421}
355 422
423#ifdef CONFIG_SLAB
424struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
425 const void *object)
426{
427 return (void *)object + cache->kasan_info.alloc_meta_offset;
428}
429
430struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
431 const void *object)
432{
433 return (void *)object + cache->kasan_info.free_meta_offset;
434}
435#endif
436
356void kasan_slab_alloc(struct kmem_cache *cache, void *object) 437void kasan_slab_alloc(struct kmem_cache *cache, void *object)
357{ 438{
358 kasan_kmalloc(cache, object, cache->object_size); 439 kasan_kmalloc(cache, object, cache->object_size);
@@ -367,6 +448,17 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
367 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) 448 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
368 return; 449 return;
369 450
451#ifdef CONFIG_SLAB
452 if (cache->flags & SLAB_KASAN) {
453 struct kasan_free_meta *free_info =
454 get_free_info(cache, object);
455 struct kasan_alloc_meta *alloc_info =
456 get_alloc_info(cache, object);
457 alloc_info->state = KASAN_STATE_FREE;
458 set_track(&free_info->track);
459 }
460#endif
461
370 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 462 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
371} 463}
372 464
@@ -386,6 +478,16 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
386 kasan_unpoison_shadow(object, size); 478 kasan_unpoison_shadow(object, size);
387 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 479 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
388 KASAN_KMALLOC_REDZONE); 480 KASAN_KMALLOC_REDZONE);
481#ifdef CONFIG_SLAB
482 if (cache->flags & SLAB_KASAN) {
483 struct kasan_alloc_meta *alloc_info =
484 get_alloc_info(cache, object);
485
486 alloc_info->state = KASAN_STATE_ALLOC;
487 alloc_info->alloc_size = size;
488 set_track(&alloc_info->track);
489 }
490#endif
389} 491}
390EXPORT_SYMBOL(kasan_kmalloc); 492EXPORT_SYMBOL(kasan_kmalloc);
391 493
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 4f6c62e5c21e..7b9e4ab9b66b 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -54,6 +54,40 @@ struct kasan_global {
54#endif 54#endif
55}; 55};
56 56
57/**
58 * Structures to keep alloc and free tracks *
59 */
60
61enum kasan_state {
62 KASAN_STATE_INIT,
63 KASAN_STATE_ALLOC,
64 KASAN_STATE_FREE
65};
66
67struct kasan_track {
68 u64 cpu : 6; /* for NR_CPUS = 64 */
69 u64 pid : 16; /* 65536 processes */
70 u64 when : 42; /* ~140 years */
71};
72
73struct kasan_alloc_meta {
74 u32 state : 2; /* enum kasan_state */
75 u32 alloc_size : 30;
76 struct kasan_track track;
77};
78
79struct kasan_free_meta {
80 /* Allocator freelist pointer, unused by KASAN. */
81 void **freelist;
82 struct kasan_track track;
83};
84
85struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
86 const void *object);
87struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
88 const void *object);
89
90
57static inline const void *kasan_shadow_to_mem(const void *shadow_addr) 91static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
58{ 92{
59 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) 93 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 745aa8f36028..3e3385cc97ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -115,6 +115,46 @@ static inline bool init_task_stack_addr(const void *addr)
115 sizeof(init_thread_union.stack)); 115 sizeof(init_thread_union.stack));
116} 116}
117 117
118#ifdef CONFIG_SLAB
119static void print_track(struct kasan_track *track)
120{
121 pr_err("PID = %u, CPU = %u, timestamp = %lu\n", track->pid,
122 track->cpu, (unsigned long)track->when);
123}
124
125static void object_err(struct kmem_cache *cache, struct page *page,
126 void *object, char *unused_reason)
127{
128 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
129 struct kasan_free_meta *free_info;
130
131 dump_stack();
132 pr_err("Object at %p, in cache %s\n", object, cache->name);
133 if (!(cache->flags & SLAB_KASAN))
134 return;
135 switch (alloc_info->state) {
136 case KASAN_STATE_INIT:
137 pr_err("Object not allocated yet\n");
138 break;
139 case KASAN_STATE_ALLOC:
140 pr_err("Object allocated with size %u bytes.\n",
141 alloc_info->alloc_size);
142 pr_err("Allocation:\n");
143 print_track(&alloc_info->track);
144 break;
145 case KASAN_STATE_FREE:
146 pr_err("Object freed, allocated with size %u bytes\n",
147 alloc_info->alloc_size);
148 free_info = get_free_info(cache, object);
149 pr_err("Allocation:\n");
150 print_track(&alloc_info->track);
151 pr_err("Deallocation:\n");
152 print_track(&free_info->track);
153 break;
154 }
155}
156#endif
157
118static void print_address_description(struct kasan_access_info *info) 158static void print_address_description(struct kasan_access_info *info)
119{ 159{
120 const void *addr = info->access_addr; 160 const void *addr = info->access_addr;
@@ -126,17 +166,10 @@ static void print_address_description(struct kasan_access_info *info)
126 if (PageSlab(page)) { 166 if (PageSlab(page)) {
127 void *object; 167 void *object;
128 struct kmem_cache *cache = page->slab_cache; 168 struct kmem_cache *cache = page->slab_cache;
129 void *last_object; 169 object = nearest_obj(cache, page,
130 170 (void *)info->access_addr);
131 object = virt_to_obj(cache, page_address(page), addr);
132 last_object = page_address(page) +
133 page->objects * cache->size;
134
135 if (unlikely(object > last_object))
136 object = last_object; /* we hit into padding */
137
138 object_err(cache, page, object, 171 object_err(cache, page, object,
139 "kasan: bad access detected"); 172 "kasan: bad access detected");
140 return; 173 return;
141 } 174 }
142 dump_page(page, "kasan: bad access detected"); 175 dump_page(page, "kasan: bad access detected");
@@ -146,7 +179,6 @@ static void print_address_description(struct kasan_access_info *info)
146 if (!init_task_stack_addr(addr)) 179 if (!init_task_stack_addr(addr))
147 pr_err("Address belongs to variable %pS\n", addr); 180 pr_err("Address belongs to variable %pS\n", addr);
148 } 181 }
149
150 dump_stack(); 182 dump_stack();
151} 183}
152 184