aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-02-13 17:40:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 00:21:42 -0500
commitbebf56a1b176c2e1c9efe44e7e6915532cc682cf (patch)
tree4b967827878142197f2b62cd0b89652873631192
parent6301939d97d079f0d3dbe71e750f4daf5d39fc33 (diff)
kasan: enable instrumentation of global variables
This feature let us to detect accesses out of bounds of global variables. This will work as for globals in kernel image, so for globals in modules. Currently this won't work for symbols in user-specified sections (e.g. __init, __read_mostly, ...) The idea of this is simple. Compiler increases each global variable by redzone size and add constructors invoking __asan_register_globals() function. Information about global variable (address, size, size with redzone ...) passed to __asan_register_globals() so we could poison variable's redzone. This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/kasan.txt2
-rw-r--r--arch/x86/kernel/module.c12
-rw-r--r--arch/x86/mm/kasan_init_64.c2
-rw-r--r--include/linux/compiler-gcc4.h4
-rw-r--r--include/linux/compiler-gcc5.h2
-rw-r--r--include/linux/kasan.h10
-rw-r--r--kernel/module.c2
-rw-r--r--lib/Kconfig.kasan1
-rw-r--r--mm/kasan/kasan.c52
-rw-r--r--mm/kasan/kasan.h25
-rw-r--r--mm/kasan/report.c22
-rw-r--r--scripts/Makefile.kasan2
12 files changed, 132 insertions, 4 deletions
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
index f0645a8a992f..092fc10961fe 100644
--- a/Documentation/kasan.txt
+++ b/Documentation/kasan.txt
@@ -9,7 +9,7 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds
9bugs. 9bugs.
10 10
11KASan uses compile-time instrumentation for checking every memory access, 11KASan uses compile-time instrumentation for checking every memory access,
12therefore you will need a certain version of GCC >= 4.9.2 12therefore you will need a certain version of GCC > 4.9.2
13 13
14Currently KASan is supported only for x86_64 architecture and requires that the 14Currently KASan is supported only for x86_64 architecture and requires that the
15kernel be built with the SLUB allocator. 15kernel be built with the SLUB allocator.
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e830e61aae05..d1ac80b72c72 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -24,6 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/kasan.h>
27#include <linux/bug.h> 28#include <linux/bug.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/gfp.h> 30#include <linux/gfp.h>
@@ -83,13 +84,22 @@ static unsigned long int get_module_load_offset(void)
83 84
84void *module_alloc(unsigned long size) 85void *module_alloc(unsigned long size)
85{ 86{
87 void *p;
88
86 if (PAGE_ALIGN(size) > MODULES_LEN) 89 if (PAGE_ALIGN(size) > MODULES_LEN)
87 return NULL; 90 return NULL;
88 return __vmalloc_node_range(size, 1, 91
92 p = __vmalloc_node_range(size, MODULE_ALIGN,
89 MODULES_VADDR + get_module_load_offset(), 93 MODULES_VADDR + get_module_load_offset(),
90 MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, 94 MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
91 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 95 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
92 __builtin_return_address(0)); 96 __builtin_return_address(0));
97 if (p && (kasan_module_alloc(p, size) < 0)) {
98 vfree(p);
99 return NULL;
100 }
101
102 return p;
93} 103}
94 104
95#ifdef CONFIG_X86_32 105#ifdef CONFIG_X86_32
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 53508708b7aa..4860906c6b9f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -196,7 +196,7 @@ void __init kasan_init(void)
196 (unsigned long)kasan_mem_to_shadow(_end), 196 (unsigned long)kasan_mem_to_shadow(_end),
197 NUMA_NO_NODE); 197 NUMA_NO_NODE);
198 198
199 populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_VADDR), 199 populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
200 (void *)KASAN_SHADOW_END); 200 (void *)KASAN_SHADOW_END);
201 201
202 memset(kasan_zero_page, 0, PAGE_SIZE); 202 memset(kasan_zero_page, 0, PAGE_SIZE);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index d1a558239b1a..769e19864632 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -85,3 +85,7 @@
85#define __HAVE_BUILTIN_BSWAP16__ 85#define __HAVE_BUILTIN_BSWAP16__
86#endif 86#endif
87#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 87#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
88
89#if GCC_VERSION >= 40902
90#define KASAN_ABI_VERSION 3
91#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index c8c565952548..efee493714eb 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -63,3 +63,5 @@
63#define __HAVE_BUILTIN_BSWAP64__ 63#define __HAVE_BUILTIN_BSWAP64__
64#define __HAVE_BUILTIN_BSWAP16__ 64#define __HAVE_BUILTIN_BSWAP16__
65#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 65#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
66
67#define KASAN_ABI_VERSION 4
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d5310eef3e38..72ba725ddf9c 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -49,8 +49,15 @@ void kasan_krealloc(const void *object, size_t new_size);
49void kasan_slab_alloc(struct kmem_cache *s, void *object); 49void kasan_slab_alloc(struct kmem_cache *s, void *object);
50void kasan_slab_free(struct kmem_cache *s, void *object); 50void kasan_slab_free(struct kmem_cache *s, void *object);
51 51
52#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
53
54int kasan_module_alloc(void *addr, size_t size);
55void kasan_module_free(void *addr);
56
52#else /* CONFIG_KASAN */ 57#else /* CONFIG_KASAN */
53 58
59#define MODULE_ALIGN 1
60
54static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 61static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
55 62
56static inline void kasan_enable_current(void) {} 63static inline void kasan_enable_current(void) {}
@@ -74,6 +81,9 @@ static inline void kasan_krealloc(const void *object, size_t new_size) {}
74static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} 81static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
75static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} 82static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
76 83
84static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
85static inline void kasan_module_free(void *addr) {}
86
77#endif /* CONFIG_KASAN */ 87#endif /* CONFIG_KASAN */
78 88
79#endif /* LINUX_KASAN_H */ 89#endif /* LINUX_KASAN_H */
diff --git a/kernel/module.c b/kernel/module.c
index 82dc1f899e6d..8426ad48362c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -56,6 +56,7 @@
56#include <linux/async.h> 56#include <linux/async.h>
57#include <linux/percpu.h> 57#include <linux/percpu.h>
58#include <linux/kmemleak.h> 58#include <linux/kmemleak.h>
59#include <linux/kasan.h>
59#include <linux/jump_label.h> 60#include <linux/jump_label.h>
60#include <linux/pfn.h> 61#include <linux/pfn.h>
61#include <linux/bsearch.h> 62#include <linux/bsearch.h>
@@ -1813,6 +1814,7 @@ static void unset_module_init_ro_nx(struct module *mod) { }
1813void __weak module_memfree(void *module_region) 1814void __weak module_memfree(void *module_region)
1814{ 1815{
1815 vfree(module_region); 1816 vfree(module_region);
1817 kasan_module_free(module_region);
1816} 1818}
1817 1819
1818void __weak module_arch_cleanup(struct module *mod) 1820void __weak module_arch_cleanup(struct module *mod)
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4d47d874335c..4fecaedc80a2 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
6config KASAN 6config KASAN
7 bool "KASan: runtime memory debugger" 7 bool "KASan: runtime memory debugger"
8 depends on SLUB_DEBUG 8 depends on SLUB_DEBUG
9 select CONSTRUCTORS
9 help 10 help
10 Enables kernel address sanitizer - runtime memory debugger, 11 Enables kernel address sanitizer - runtime memory debugger,
11 designed to find out-of-bounds accesses and use-after-free bugs. 12 designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 799c52b9826c..78fee632a7ee 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -22,6 +22,7 @@
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/memory.h> 23#include <linux/memory.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/module.h>
25#include <linux/printk.h> 26#include <linux/printk.h>
26#include <linux/sched.h> 27#include <linux/sched.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
@@ -395,6 +396,57 @@ void kasan_kfree_large(const void *ptr)
395 KASAN_FREE_PAGE); 396 KASAN_FREE_PAGE);
396} 397}
397 398
399int kasan_module_alloc(void *addr, size_t size)
400{
401 void *ret;
402 size_t shadow_size;
403 unsigned long shadow_start;
404
405 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
406 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
407 PAGE_SIZE);
408
409 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
410 return -EINVAL;
411
412 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
413 shadow_start + shadow_size,
414 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
415 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
416 __builtin_return_address(0));
417 return ret ? 0 : -ENOMEM;
418}
419
420void kasan_module_free(void *addr)
421{
422 vfree(kasan_mem_to_shadow(addr));
423}
424
425static void register_global(struct kasan_global *global)
426{
427 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
428
429 kasan_unpoison_shadow(global->beg, global->size);
430
431 kasan_poison_shadow(global->beg + aligned_size,
432 global->size_with_redzone - aligned_size,
433 KASAN_GLOBAL_REDZONE);
434}
435
436void __asan_register_globals(struct kasan_global *globals, size_t size)
437{
438 int i;
439
440 for (i = 0; i < size; i++)
441 register_global(&globals[i]);
442}
443EXPORT_SYMBOL(__asan_register_globals);
444
445void __asan_unregister_globals(struct kasan_global *globals, size_t size)
446{
447}
448EXPORT_SYMBOL(__asan_unregister_globals);
449
398#define DEFINE_ASAN_LOAD_STORE(size) \ 450#define DEFINE_ASAN_LOAD_STORE(size) \
399 void __asan_load##size(unsigned long addr) \ 451 void __asan_load##size(unsigned long addr) \
400 { \ 452 { \
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1fcc1d81a9cf..4986b0acab21 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -11,6 +11,7 @@
11#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ 11#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
12#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ 12#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
13#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ 13#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
14#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
14 15
15/* 16/*
16 * Stack redzone shadow values 17 * Stack redzone shadow values
@@ -21,6 +22,10 @@
21#define KASAN_STACK_RIGHT 0xF3 22#define KASAN_STACK_RIGHT 0xF3
22#define KASAN_STACK_PARTIAL 0xF4 23#define KASAN_STACK_PARTIAL 0xF4
23 24
25/* Don't break randconfig/all*config builds */
26#ifndef KASAN_ABI_VERSION
27#define KASAN_ABI_VERSION 1
28#endif
24 29
25struct kasan_access_info { 30struct kasan_access_info {
26 const void *access_addr; 31 const void *access_addr;
@@ -30,6 +35,26 @@ struct kasan_access_info {
30 unsigned long ip; 35 unsigned long ip;
31}; 36};
32 37
38/* The layout of struct dictated by compiler */
39struct kasan_source_location {
40 const char *filename;
41 int line_no;
42 int column_no;
43};
44
45/* The layout of struct dictated by compiler */
46struct kasan_global {
47 const void *beg; /* Address of the beginning of the global variable. */
48 size_t size; /* Size of the global variable. */
49 size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
50 const void *name;
51 const void *module_name; /* Name of the module where the global variable is declared. */
52 unsigned long has_dynamic_init; /* This needed for C++ */
53#if KASAN_ABI_VERSION >= 4
54 struct kasan_source_location *location;
55#endif
56};
57
33void kasan_report_error(struct kasan_access_info *info); 58void kasan_report_error(struct kasan_access_info *info);
34void kasan_report_user_access(struct kasan_access_info *info); 59void kasan_report_user_access(struct kasan_access_info *info);
35 60
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 866732ef3db3..680ceedf810a 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -23,6 +23,8 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kasan.h> 24#include <linux/kasan.h>
25 25
26#include <asm/sections.h>
27
26#include "kasan.h" 28#include "kasan.h"
27#include "../slab.h" 29#include "../slab.h"
28 30
@@ -61,6 +63,7 @@ static void print_error_description(struct kasan_access_info *info)
61 break; 63 break;
62 case KASAN_PAGE_REDZONE: 64 case KASAN_PAGE_REDZONE:
63 case KASAN_KMALLOC_REDZONE: 65 case KASAN_KMALLOC_REDZONE:
66 case KASAN_GLOBAL_REDZONE:
64 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: 67 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
65 bug_type = "out of bounds access"; 68 bug_type = "out of bounds access";
66 break; 69 break;
@@ -80,6 +83,20 @@ static void print_error_description(struct kasan_access_info *info)
80 info->access_size, current->comm, task_pid_nr(current)); 83 info->access_size, current->comm, task_pid_nr(current));
81} 84}
82 85
86static inline bool kernel_or_module_addr(const void *addr)
87{
88 return (addr >= (void *)_stext && addr < (void *)_end)
89 || (addr >= (void *)MODULES_VADDR
90 && addr < (void *)MODULES_END);
91}
92
93static inline bool init_task_stack_addr(const void *addr)
94{
95 return addr >= (void *)&init_thread_union.stack &&
96 (addr <= (void *)&init_thread_union.stack +
97 sizeof(init_thread_union.stack));
98}
99
83static void print_address_description(struct kasan_access_info *info) 100static void print_address_description(struct kasan_access_info *info)
84{ 101{
85 const void *addr = info->access_addr; 102 const void *addr = info->access_addr;
@@ -107,6 +124,11 @@ static void print_address_description(struct kasan_access_info *info)
107 dump_page(page, "kasan: bad access detected"); 124 dump_page(page, "kasan: bad access detected");
108 } 125 }
109 126
127 if (kernel_or_module_addr(addr)) {
128 if (!init_task_stack_addr(addr))
129 pr_err("Address belongs to variable %pS\n", addr);
130 }
131
110 dump_stack(); 132 dump_stack();
111} 133}
112 134
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 2163b8cc446e..631619b2b118 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -9,7 +9,7 @@ CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
9 9
10CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \ 10CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
11 -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \ 11 -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \
12 --param asan-stack=1 \ 12 --param asan-stack=1 --param asan-globals=1 \
13 --param asan-instrumentation-with-call-threshold=$(call_threshold)) 13 --param asan-instrumentation-with-call-threshold=$(call_threshold))
14 14
15ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),) 15ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)