summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 03:30:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:44 -0500
commit2813b9c0296259fb11e75c839bab2d958ba4f96c (patch)
tree4f174c60d6f189977d74053b118f8f820ab689f1
parent41eea9cd239c5b3fff726894f85c97f60e5799a3 (diff)
kasan, mm, arm64: tag non slab memory allocated via pagealloc
Tag-based KASAN doesn't check memory accesses through pointers tagged with 0xff. When page_address is used to get pointer to memory that corresponds to some page, the tag of the resulting pointer gets set to 0xff, even though the allocated memory might have been tagged differently. For slab pages it's impossible to recover the correct tag to return from page_address, since the page might contain multiple slab objects tagged with different values, and we can't know in advance which one of them is going to get accessed. For non slab pages however, we can recover the tag in page_address, since the whole page was marked with the same tag. This patch adds tagging to non slab memory allocated with pagealloc. To set the tag of the pointer returned from page_address, the tag gets stored to page->flags when the memory gets allocated. Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm64/include/asm/memory.h8
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/page-flags-layout.h10
-rw-r--r--mm/cma.c11
-rw-r--r--mm/kasan/common.c15
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/slab.c2
7 files changed, 72 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 907946cc767c..2bb8721da7ef 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -321,7 +321,13 @@ static inline void *phys_to_virt(phys_addr_t x)
321#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 321#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
322#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 322#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
323 323
324#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 324#define page_to_virt(page) ({ \
325 unsigned long __addr = \
326 ((__page_to_voff(page)) | PAGE_OFFSET); \
327 __addr = __tag_set(__addr, page_kasan_tag(page)); \
328 ((void *)__addr); \
329})
330
325#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 331#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
326 332
327#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ 333#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5411de93a363..b4d01969e700 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -804,6 +804,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
804#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 804#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
805#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 805#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
806#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 806#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
807#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
807 808
808/* 809/*
809 * Define the bit shifts to access each section. For non-existent 810 * Define the bit shifts to access each section. For non-existent
@@ -814,6 +815,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
814#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 815#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
815#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 816#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
816#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 817#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
818#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
817 819
818/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 820/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
819#ifdef NODE_NOT_IN_PAGE_FLAGS 821#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -836,6 +838,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
836#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 838#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
837#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 839#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
838#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 840#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
841#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
839#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 842#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
840 843
841static inline enum zone_type page_zonenum(const struct page *page) 844static inline enum zone_type page_zonenum(const struct page *page)
@@ -1101,6 +1104,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1101} 1104}
1102#endif /* CONFIG_NUMA_BALANCING */ 1105#endif /* CONFIG_NUMA_BALANCING */
1103 1106
1107#ifdef CONFIG_KASAN_SW_TAGS
1108static inline u8 page_kasan_tag(const struct page *page)
1109{
1110 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1111}
1112
1113static inline void page_kasan_tag_set(struct page *page, u8 tag)
1114{
1115 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1116 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1117}
1118
1119static inline void page_kasan_tag_reset(struct page *page)
1120{
1121 page_kasan_tag_set(page, 0xff);
1122}
1123#else
1124static inline u8 page_kasan_tag(const struct page *page)
1125{
1126 return 0xff;
1127}
1128
1129static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1130static inline void page_kasan_tag_reset(struct page *page) { }
1131#endif
1132
1104static inline struct zone *page_zone(const struct page *page) 1133static inline struct zone *page_zone(const struct page *page)
1105{ 1134{
1106 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1135 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 7ec86bf31ce4..1dda31825ec4 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -82,6 +82,16 @@
82#define LAST_CPUPID_WIDTH 0 82#define LAST_CPUPID_WIDTH 0
83#endif 83#endif
84 84
85#ifdef CONFIG_KASAN_SW_TAGS
86#define KASAN_TAG_WIDTH 8
87#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
88 > BITS_PER_LONG - NR_PAGEFLAGS
89#error "KASAN: not enough bits in page flags for tag"
90#endif
91#else
92#define KASAN_TAG_WIDTH 0
93#endif
94
85/* 95/*
86 * We are going to use the flags for the page to node mapping if its in 96 * We are going to use the flags for the page to node mapping if its in
87 * there. This includes the case where there is no node, so it is implicit. 97 * there. This includes the case where there is no node, so it is implicit.
diff --git a/mm/cma.c b/mm/cma.c
index 4cb76121a3ab..c7b39dd3b4f6 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -407,6 +407,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
407 unsigned long pfn = -1; 407 unsigned long pfn = -1;
408 unsigned long start = 0; 408 unsigned long start = 0;
409 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 409 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
410 size_t i;
410 struct page *page = NULL; 411 struct page *page = NULL;
411 int ret = -ENOMEM; 412 int ret = -ENOMEM;
412 413
@@ -466,6 +467,16 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
466 467
467 trace_cma_alloc(pfn, page, count, align); 468 trace_cma_alloc(pfn, page, count, align);
468 469
470 /*
471 * CMA can allocate multiple page blocks, which results in different
472 * blocks being marked with different tags. Reset the tags to ignore
473 * those page blocks.
474 */
475 if (page) {
476 for (i = 0; i < count; i++)
477 page_kasan_tag_reset(page + i);
478 }
479
469 if (ret && !no_warn) { 480 if (ret && !no_warn) {
470 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", 481 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
471 __func__, count, ret); 482 __func__, count, ret);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 27f0cae336c9..195ca385cf7a 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -220,8 +220,15 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
220 220
221void kasan_alloc_pages(struct page *page, unsigned int order) 221void kasan_alloc_pages(struct page *page, unsigned int order)
222{ 222{
223 u8 tag;
224 unsigned long i;
225
223 if (unlikely(PageHighMem(page))) 226 if (unlikely(PageHighMem(page)))
224 return; 227 return;
228
229 tag = random_tag();
230 for (i = 0; i < (1 << order); i++)
231 page_kasan_tag_set(page + i, tag);
225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); 232 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
226} 233}
227 234
@@ -319,6 +326,10 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
319 326
320void kasan_poison_slab(struct page *page) 327void kasan_poison_slab(struct page *page)
321{ 328{
329 unsigned long i;
330
331 for (i = 0; i < (1 << compound_order(page)); i++)
332 page_kasan_tag_reset(page + i);
322 kasan_poison_shadow(page_address(page), 333 kasan_poison_shadow(page_address(page),
323 PAGE_SIZE << compound_order(page), 334 PAGE_SIZE << compound_order(page),
324 KASAN_KMALLOC_REDZONE); 335 KASAN_KMALLOC_REDZONE);
@@ -517,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
517 page = virt_to_head_page(ptr); 528 page = virt_to_head_page(ptr);
518 529
519 if (unlikely(!PageSlab(page))) { 530 if (unlikely(!PageSlab(page))) {
520 if (reset_tag(ptr) != page_address(page)) { 531 if (ptr != page_address(page)) {
521 kasan_report_invalid_free(ptr, ip); 532 kasan_report_invalid_free(ptr, ip);
522 return; 533 return;
523 } 534 }
@@ -530,7 +541,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
530 541
531void kasan_kfree_large(void *ptr, unsigned long ip) 542void kasan_kfree_large(void *ptr, unsigned long ip)
532{ 543{
533 if (reset_tag(ptr) != page_address(virt_to_head_page(ptr))) 544 if (ptr != page_address(virt_to_head_page(ptr)))
534 kasan_report_invalid_free(ptr, ip); 545 kasan_report_invalid_free(ptr, ip);
535 /* The object will be poisoned by page_alloc. */ 546 /* The object will be poisoned by page_alloc. */
536} 547}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e95b5b7c9c3d..d245de2124e3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1183,6 +1183,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1183 init_page_count(page); 1183 init_page_count(page);
1184 page_mapcount_reset(page); 1184 page_mapcount_reset(page);
1185 page_cpupid_reset_last(page); 1185 page_cpupid_reset_last(page);
1186 page_kasan_tag_reset(page);
1186 1187
1187 INIT_LIST_HEAD(&page->lru); 1188 INIT_LIST_HEAD(&page->lru);
1188#ifdef WANT_PAGE_VIRTUAL 1189#ifdef WANT_PAGE_VIRTUAL
diff --git a/mm/slab.c b/mm/slab.c
index a80beb543678..01991060714c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2357,7 +2357,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
2357 void *freelist; 2357 void *freelist;
2358 void *addr = page_address(page); 2358 void *addr = page_address(page);
2359 2359
2360 page->s_mem = addr + colour_off; 2360 page->s_mem = kasan_reset_tag(addr) + colour_off;
2361 page->active = 0; 2361 page->active = 0;
2362 2362
2363 if (OBJFREELIST_SLAB(cachep)) 2363 if (OBJFREELIST_SLAB(cachep))