diff options
author | Andrey Ryabinin <a.ryabinin@samsung.com> | 2015-02-13 17:39:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-14 00:21:41 -0500 |
commit | b8c73fc2493d42517be95cf2c89659fc6c6f4d02 (patch) | |
tree | 81e44f58a903a8093617f5db7806f0bfa4cf5691 /mm | |
parent | ef7f0d6a6ca8c9e4b27d78895af86c2fbfaeedb2 (diff) |
mm: page_alloc: add kasan hooks on alloc and free paths
Add kernel address sanitizer hooks to mark allocated page's addresses as
accessible in corresponding shadow region. Mark freed pages as
inaccessible.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrey Konovalov <adech.fo@gmail.com>
Cc: Yuri Gribov <tetra2005@gmail.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 2 | ||||
-rw-r--r-- | mm/kasan/kasan.c | 14 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 2 | ||||
-rw-r--r-- | mm/kasan/report.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 |
5 files changed, 32 insertions, 0 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index d50d6de6f1b6..8c0d9459b54a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/sysfs.h> | 16 | #include <linux/sysfs.h> |
17 | #include <linux/balloon_compaction.h> | 17 | #include <linux/balloon_compaction.h> |
18 | #include <linux/page-isolation.h> | 18 | #include <linux/page-isolation.h> |
19 | #include <linux/kasan.h> | ||
19 | #include "internal.h" | 20 | #include "internal.h" |
20 | 21 | ||
21 | #ifdef CONFIG_COMPACTION | 22 | #ifdef CONFIG_COMPACTION |
@@ -72,6 +73,7 @@ static void map_pages(struct list_head *list) | |||
72 | list_for_each_entry(page, list, lru) { | 73 | list_for_each_entry(page, list, lru) { |
73 | arch_alloc_page(page, 0); | 74 | arch_alloc_page(page, 0); |
74 | kernel_map_pages(page, 1, 1); | 75 | kernel_map_pages(page, 1, 1); |
76 | kasan_alloc_pages(page, 0); | ||
75 | } | 77 | } |
76 | } | 78 | } |
77 | 79 | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index def81104772f..b516eb8632b9 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -254,6 +254,20 @@ static __always_inline void check_memory_region(unsigned long addr, | |||
254 | kasan_report(addr, size, write, _RET_IP_); | 254 | kasan_report(addr, size, write, _RET_IP_); |
255 | } | 255 | } |
256 | 256 | ||
257 | void kasan_alloc_pages(struct page *page, unsigned int order) | ||
258 | { | ||
259 | if (likely(!PageHighMem(page))) | ||
260 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | ||
261 | } | ||
262 | |||
263 | void kasan_free_pages(struct page *page, unsigned int order) | ||
264 | { | ||
265 | if (likely(!PageHighMem(page))) | ||
266 | kasan_poison_shadow(page_address(page), | ||
267 | PAGE_SIZE << order, | ||
268 | KASAN_FREE_PAGE); | ||
269 | } | ||
270 | |||
257 | #define DEFINE_ASAN_LOAD_STORE(size) \ | 271 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
258 | void __asan_load##size(unsigned long addr) \ | 272 | void __asan_load##size(unsigned long addr) \ |
259 | { \ | 273 | { \ |
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 648b9c006f3f..d3c90d5dd97a 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) | 6 | #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) |
7 | #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) | 7 | #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) |
8 | 8 | ||
9 | #define KASAN_FREE_PAGE 0xFF /* page was freed */ | ||
10 | |||
9 | struct kasan_access_info { | 11 | struct kasan_access_info { |
10 | const void *access_addr; | 12 | const void *access_addr; |
11 | const void *first_bad_addr; | 13 | const void *first_bad_addr; |
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 5835d69563f5..fab8e7882ff1 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c | |||
@@ -54,6 +54,9 @@ static void print_error_description(struct kasan_access_info *info) | |||
54 | shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr); | 54 | shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr); |
55 | 55 | ||
56 | switch (shadow_val) { | 56 | switch (shadow_val) { |
57 | case KASAN_FREE_PAGE: | ||
58 | bug_type = "use after free"; | ||
59 | break; | ||
57 | case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: | 60 | case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: |
58 | bug_type = "out of bounds access"; | 61 | bug_type = "out of bounds access"; |
59 | break; | 62 | break; |
@@ -69,6 +72,14 @@ static void print_error_description(struct kasan_access_info *info) | |||
69 | 72 | ||
70 | static void print_address_description(struct kasan_access_info *info) | 73 | static void print_address_description(struct kasan_access_info *info) |
71 | { | 74 | { |
75 | const void *addr = info->access_addr; | ||
76 | |||
77 | if ((addr >= (void *)PAGE_OFFSET) && | ||
78 | (addr < high_memory)) { | ||
79 | struct page *page = virt_to_head_page(addr); | ||
80 | dump_page(page, "kasan: bad access detected"); | ||
81 | } | ||
82 | |||
72 | dump_stack(); | 83 | dump_stack(); |
73 | } | 84 | } |
74 | 85 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb4758263f6b..a47f0b229a1a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/kmemcheck.h> | 27 | #include <linux/kmemcheck.h> |
28 | #include <linux/kasan.h> | ||
28 | #include <linux/module.h> | 29 | #include <linux/module.h> |
29 | #include <linux/suspend.h> | 30 | #include <linux/suspend.h> |
30 | #include <linux/pagevec.h> | 31 | #include <linux/pagevec.h> |
@@ -787,6 +788,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order) | |||
787 | 788 | ||
788 | trace_mm_page_free(page, order); | 789 | trace_mm_page_free(page, order); |
789 | kmemcheck_free_shadow(page, order); | 790 | kmemcheck_free_shadow(page, order); |
791 | kasan_free_pages(page, order); | ||
790 | 792 | ||
791 | if (PageAnon(page)) | 793 | if (PageAnon(page)) |
792 | page->mapping = NULL; | 794 | page->mapping = NULL; |
@@ -970,6 +972,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, | |||
970 | 972 | ||
971 | arch_alloc_page(page, order); | 973 | arch_alloc_page(page, order); |
972 | kernel_map_pages(page, 1 << order, 1); | 974 | kernel_map_pages(page, 1 << order, 1); |
975 | kasan_alloc_pages(page, order); | ||
973 | 976 | ||
974 | if (gfp_flags & __GFP_ZERO) | 977 | if (gfp_flags & __GFP_ZERO) |
975 | prep_zero_page(page, order, gfp_flags); | 978 | prep_zero_page(page, order, gfp_flags); |