aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-02-13 17:39:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 00:21:41 -0500
commit0316bec22ec95ea2faca6406437b0b5950553b7c (patch)
tree6a278e1515188e738df2b04e9ada375215b3df22 /mm/kasan
parenta79316c6178ca419e35feef47d47f50b4e0ee9f2 (diff)
mm: slub: add kernel address sanitizer support for slub allocator
With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Signed-off-by: Dmitry Chernenkov <dmitryc@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/kasan.c98
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c21
3 files changed, 124 insertions, 0 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index b516eb8632b9..dc83f070edb6 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -31,6 +31,7 @@
31#include <linux/kasan.h> 31#include <linux/kasan.h>
32 32
33#include "kasan.h" 33#include "kasan.h"
34#include "../slab.h"
34 35
35/* 36/*
36 * Poisons the shadow memory for 'size' bytes starting from 'addr'. 37 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
@@ -268,6 +269,103 @@ void kasan_free_pages(struct page *page, unsigned int order)
268 KASAN_FREE_PAGE); 269 KASAN_FREE_PAGE);
269} 270}
270 271
272void kasan_poison_slab(struct page *page)
273{
274 kasan_poison_shadow(page_address(page),
275 PAGE_SIZE << compound_order(page),
276 KASAN_KMALLOC_REDZONE);
277}
278
279void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
280{
281 kasan_unpoison_shadow(object, cache->object_size);
282}
283
284void kasan_poison_object_data(struct kmem_cache *cache, void *object)
285{
286 kasan_poison_shadow(object,
287 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
288 KASAN_KMALLOC_REDZONE);
289}
290
291void kasan_slab_alloc(struct kmem_cache *cache, void *object)
292{
293 kasan_kmalloc(cache, object, cache->object_size);
294}
295
296void kasan_slab_free(struct kmem_cache *cache, void *object)
297{
298 unsigned long size = cache->object_size;
299 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
300
301 /* RCU slabs could be legally used after free within the RCU period */
302 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
303 return;
304
305 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
306}
307
308void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
309{
310 unsigned long redzone_start;
311 unsigned long redzone_end;
312
313 if (unlikely(object == NULL))
314 return;
315
316 redzone_start = round_up((unsigned long)(object + size),
317 KASAN_SHADOW_SCALE_SIZE);
318 redzone_end = round_up((unsigned long)object + cache->object_size,
319 KASAN_SHADOW_SCALE_SIZE);
320
321 kasan_unpoison_shadow(object, size);
322 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
323 KASAN_KMALLOC_REDZONE);
324}
325EXPORT_SYMBOL(kasan_kmalloc);
326
327void kasan_kmalloc_large(const void *ptr, size_t size)
328{
329 struct page *page;
330 unsigned long redzone_start;
331 unsigned long redzone_end;
332
333 if (unlikely(ptr == NULL))
334 return;
335
336 page = virt_to_page(ptr);
337 redzone_start = round_up((unsigned long)(ptr + size),
338 KASAN_SHADOW_SCALE_SIZE);
339 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
340
341 kasan_unpoison_shadow(ptr, size);
342 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
343 KASAN_PAGE_REDZONE);
344}
345
346void kasan_krealloc(const void *object, size_t size)
347{
348 struct page *page;
349
350 if (unlikely(object == ZERO_SIZE_PTR))
351 return;
352
353 page = virt_to_head_page(object);
354
355 if (unlikely(!PageSlab(page)))
356 kasan_kmalloc_large(object, size);
357 else
358 kasan_kmalloc(page->slab_cache, object, size);
359}
360
361void kasan_kfree_large(const void *ptr)
362{
363 struct page *page = virt_to_page(ptr);
364
365 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
366 KASAN_FREE_PAGE);
367}
368
271#define DEFINE_ASAN_LOAD_STORE(size) \ 369#define DEFINE_ASAN_LOAD_STORE(size) \
272 void __asan_load##size(unsigned long addr) \ 370 void __asan_load##size(unsigned long addr) \
273 { \ 371 { \
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d3c90d5dd97a..5b052ab40cf9 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -7,6 +7,11 @@
7#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) 7#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
8 8
9#define KASAN_FREE_PAGE 0xFF /* page was freed */ 9#define KASAN_FREE_PAGE 0xFF /* page was freed */
10#define KASAN_FREE_PAGE 0xFF /* page was freed */
11#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
12#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
13#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
14
10 15
11struct kasan_access_info { 16struct kasan_access_info {
12 const void *access_addr; 17 const void *access_addr;
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index fab8e7882ff1..2760edb4d0a8 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -24,6 +24,7 @@
24#include <linux/kasan.h> 24#include <linux/kasan.h>
25 25
26#include "kasan.h" 26#include "kasan.h"
27#include "../slab.h"
27 28
28/* Shadow layout customization. */ 29/* Shadow layout customization. */
29#define SHADOW_BYTES_PER_BLOCK 1 30#define SHADOW_BYTES_PER_BLOCK 1
@@ -55,8 +56,11 @@ static void print_error_description(struct kasan_access_info *info)
55 56
56 switch (shadow_val) { 57 switch (shadow_val) {
57 case KASAN_FREE_PAGE: 58 case KASAN_FREE_PAGE:
59 case KASAN_KMALLOC_FREE:
58 bug_type = "use after free"; 60 bug_type = "use after free";
59 break; 61 break;
62 case KASAN_PAGE_REDZONE:
63 case KASAN_KMALLOC_REDZONE:
60 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: 64 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
61 bug_type = "out of bounds access"; 65 bug_type = "out of bounds access";
62 break; 66 break;
@@ -77,6 +81,23 @@ static void print_address_description(struct kasan_access_info *info)
77 if ((addr >= (void *)PAGE_OFFSET) && 81 if ((addr >= (void *)PAGE_OFFSET) &&
78 (addr < high_memory)) { 82 (addr < high_memory)) {
79 struct page *page = virt_to_head_page(addr); 83 struct page *page = virt_to_head_page(addr);
84
85 if (PageSlab(page)) {
86 void *object;
87 struct kmem_cache *cache = page->slab_cache;
88 void *last_object;
89
90 object = virt_to_obj(cache, page_address(page), addr);
91 last_object = page_address(page) +
92 page->objects * cache->size;
93
94 if (unlikely(object > last_object))
95 object = last_object; /* we hit into padding */
96
97 object_err(cache, page, object,
98 "kasan: bad access detected");
99 return;
100 }
80 dump_page(page, "kasan: bad access detected"); 101 dump_page(page, "kasan: bad access detected");
81 } 102 }
82 103