aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Ryabinin <aryabinin@virtuozzo.com>2016-08-02 17:02:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-02 17:31:41 -0400
commitb3cbd9bf77cd1888114dbee1653e79aa23fd4068 (patch)
treef9a10e2c14b336f276805f8a4714d0bd437d1646
parent47b5c2a0f021e90a79845d1a1353780e5edd0bce (diff)
mm/kasan: get rid of ->state in struct kasan_alloc_meta
The state of object currently tracked in two places - shadow memory, and the ->state field in struct kasan_alloc_meta. We can get rid of the latter. The will save us a little bit of memory. Also, this allow us to move free stack into struct kasan_alloc_meta, without increasing memory consumption. So now we should always know when the last time the object was freed. This may be useful for long delayed use-after-free bugs. As a side effect this fixes following UBSAN warning: UBSAN: Undefined behaviour in mm/kasan/quarantine.c:102:13 member access within misaligned address ffff88000d1efebc for type 'struct qlist_node' which requires 8 byte alignment Link: http://lkml.kernel.org/r/1470062715-14077-5-git-send-email-aryabinin@virtuozzo.com Reported-by: kernel test robot <xiaolong.ye@intel.com> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kasan.h3
-rw-r--r--mm/kasan/kasan.c61
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/kasan/quarantine.c2
-rw-r--r--mm/kasan/report.c23
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c1
7 files changed, 42 insertions, 64 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index c9cf374445d8..d600303306eb 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -56,6 +56,7 @@ void kasan_cache_destroy(struct kmem_cache *cache);
56void kasan_poison_slab(struct page *page); 56void kasan_poison_slab(struct page *page);
57void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 57void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
58void kasan_poison_object_data(struct kmem_cache *cache, void *object); 58void kasan_poison_object_data(struct kmem_cache *cache, void *object);
59void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
59 60
60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); 61void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
61void kasan_kfree_large(const void *ptr); 62void kasan_kfree_large(const void *ptr);
@@ -102,6 +103,8 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
102 void *object) {} 103 void *object) {}
103static inline void kasan_poison_object_data(struct kmem_cache *cache, 104static inline void kasan_poison_object_data(struct kmem_cache *cache,
104 void *object) {} 105 void *object) {}
106static inline void kasan_init_slab_obj(struct kmem_cache *cache,
107 const void *object) {}
105 108
106static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} 109static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
107static inline void kasan_kfree_large(const void *ptr) {} 110static inline void kasan_kfree_large(const void *ptr) {}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 388e812ccaca..92750e3b0083 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -442,11 +442,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
442 kasan_poison_shadow(object, 442 kasan_poison_shadow(object,
443 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 443 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
444 KASAN_KMALLOC_REDZONE); 444 KASAN_KMALLOC_REDZONE);
445 if (cache->flags & SLAB_KASAN) {
446 struct kasan_alloc_meta *alloc_info =
447 get_alloc_info(cache, object);
448 alloc_info->state = KASAN_STATE_INIT;
449 }
450} 445}
451 446
452static inline int in_irqentry_text(unsigned long ptr) 447static inline int in_irqentry_text(unsigned long ptr)
@@ -510,6 +505,17 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
510 return (void *)object + cache->kasan_info.free_meta_offset; 505 return (void *)object + cache->kasan_info.free_meta_offset;
511} 506}
512 507
508void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
509{
510 struct kasan_alloc_meta *alloc_info;
511
512 if (!(cache->flags & SLAB_KASAN))
513 return;
514
515 alloc_info = get_alloc_info(cache, object);
516 __memset(alloc_info, 0, sizeof(*alloc_info));
517}
518
513void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) 519void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
514{ 520{
515 kasan_kmalloc(cache, object, cache->object_size, flags); 521 kasan_kmalloc(cache, object, cache->object_size, flags);
@@ -529,34 +535,27 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
529 535
530bool kasan_slab_free(struct kmem_cache *cache, void *object) 536bool kasan_slab_free(struct kmem_cache *cache, void *object)
531{ 537{
538 s8 shadow_byte;
539
532 /* RCU slabs could be legally used after free within the RCU period */ 540 /* RCU slabs could be legally used after free within the RCU period */
533 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) 541 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
534 return false; 542 return false;
535 543
536 if (likely(cache->flags & SLAB_KASAN)) { 544 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
537 struct kasan_alloc_meta *alloc_info; 545 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
538 struct kasan_free_meta *free_info; 546 pr_err("Double free");
547 dump_stack();
548 return true;
549 }
539 550
540 alloc_info = get_alloc_info(cache, object); 551 kasan_poison_slab_free(cache, object);
541 free_info = get_free_info(cache, object);
542 552
543 switch (alloc_info->state) { 553 if (unlikely(!(cache->flags & SLAB_KASAN)))
544 case KASAN_STATE_ALLOC: 554 return false;
545 alloc_info->state = KASAN_STATE_QUARANTINE; 555
546 set_track(&free_info->track, GFP_NOWAIT); 556 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
547 kasan_poison_slab_free(cache, object); 557 quarantine_put(get_free_info(cache, object), cache);
548 quarantine_put(free_info, cache); 558 return true;
549 return true;
550 case KASAN_STATE_QUARANTINE:
551 case KASAN_STATE_FREE:
552 pr_err("Double free");
553 dump_stack();
554 break;
555 default:
556 break;
557 }
558 }
559 return false;
560} 559}
561 560
562void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, 561void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -579,13 +578,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
579 kasan_unpoison_shadow(object, size); 578 kasan_unpoison_shadow(object, size);
580 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 579 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
581 KASAN_KMALLOC_REDZONE); 580 KASAN_KMALLOC_REDZONE);
582 if (cache->flags & SLAB_KASAN) {
583 struct kasan_alloc_meta *alloc_info =
584 get_alloc_info(cache, object);
585 581
586 alloc_info->state = KASAN_STATE_ALLOC; 582 if (cache->flags & SLAB_KASAN)
587 set_track(&alloc_info->track, flags); 583 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
588 }
589} 584}
590EXPORT_SYMBOL(kasan_kmalloc); 585EXPORT_SYMBOL(kasan_kmalloc);
591 586
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index aa175460c8f9..9b7b31e25fd2 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -59,13 +59,6 @@ struct kasan_global {
59 * Structures to keep alloc and free tracks * 59 * Structures to keep alloc and free tracks *
60 */ 60 */
61 61
62enum kasan_state {
63 KASAN_STATE_INIT,
64 KASAN_STATE_ALLOC,
65 KASAN_STATE_QUARANTINE,
66 KASAN_STATE_FREE
67};
68
69#define KASAN_STACK_DEPTH 64 62#define KASAN_STACK_DEPTH 64
70 63
71struct kasan_track { 64struct kasan_track {
@@ -74,8 +67,8 @@ struct kasan_track {
74}; 67};
75 68
76struct kasan_alloc_meta { 69struct kasan_alloc_meta {
77 struct kasan_track track; 70 struct kasan_track alloc_track;
78 u32 state; 71 struct kasan_track free_track;
79}; 72};
80 73
81struct qlist_node { 74struct qlist_node {
@@ -86,7 +79,6 @@ struct kasan_free_meta {
86 * Otherwise it might be used for the allocator freelist. 79 * Otherwise it might be used for the allocator freelist.
87 */ 80 */
88 struct qlist_node quarantine_link; 81 struct qlist_node quarantine_link;
89 struct kasan_track track;
90}; 82};
91 83
92struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, 84struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 4852625ff851..7fd121d13b88 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -144,13 +144,11 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
144static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) 144static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
145{ 145{
146 void *object = qlink_to_object(qlink, cache); 146 void *object = qlink_to_object(qlink, cache);
147 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
148 unsigned long flags; 147 unsigned long flags;
149 148
150 if (IS_ENABLED(CONFIG_SLAB)) 149 if (IS_ENABLED(CONFIG_SLAB))
151 local_irq_save(flags); 150 local_irq_save(flags);
152 151
153 alloc_info->state = KASAN_STATE_FREE;
154 ___cache_free(cache, object, _THIS_IP_); 152 ___cache_free(cache, object, _THIS_IP_);
155 153
156 if (IS_ENABLED(CONFIG_SLAB)) 154 if (IS_ENABLED(CONFIG_SLAB))
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index d67a7e020905..f437398b685a 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -133,7 +133,6 @@ static void kasan_object_err(struct kmem_cache *cache, struct page *page,
133 void *object, char *unused_reason) 133 void *object, char *unused_reason)
134{ 134{
135 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); 135 struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
136 struct kasan_free_meta *free_info;
137 136
138 dump_stack(); 137 dump_stack();
139 pr_err("Object at %p, in cache %s size: %d\n", object, cache->name, 138 pr_err("Object at %p, in cache %s size: %d\n", object, cache->name,
@@ -141,23 +140,11 @@ static void kasan_object_err(struct kmem_cache *cache, struct page *page,
141 140
142 if (!(cache->flags & SLAB_KASAN)) 141 if (!(cache->flags & SLAB_KASAN))
143 return; 142 return;
144 switch (alloc_info->state) { 143
145 case KASAN_STATE_INIT: 144 pr_err("Allocated:\n");
146 pr_err("Object not allocated yet\n"); 145 print_track(&alloc_info->alloc_track);
147 break; 146 pr_err("Freed:\n");
148 case KASAN_STATE_ALLOC: 147 print_track(&alloc_info->free_track);
149 pr_err("Allocation:\n");
150 print_track(&alloc_info->track);
151 break;
152 case KASAN_STATE_FREE:
153 case KASAN_STATE_QUARANTINE:
154 free_info = get_free_info(cache, object);
155 pr_err("Allocation:\n");
156 print_track(&alloc_info->track);
157 pr_err("Deallocation:\n");
158 print_track(&free_info->track);
159 break;
160 }
161} 148}
162 149
163static void print_address_description(struct kasan_access_info *info) 150static void print_address_description(struct kasan_access_info *info)
diff --git a/mm/slab.c b/mm/slab.c
index 09771ed3e693..ca135bd47c35 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2604,9 +2604,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
2604 } 2604 }
2605 2605
2606 for (i = 0; i < cachep->num; i++) { 2606 for (i = 0; i < cachep->num; i++) {
2607 objp = index_to_obj(cachep, page, i);
2608 kasan_init_slab_obj(cachep, objp);
2609
2607 /* constructor could break poison info */ 2610 /* constructor could break poison info */
2608 if (DEBUG == 0 && cachep->ctor) { 2611 if (DEBUG == 0 && cachep->ctor) {
2609 objp = index_to_obj(cachep, page, i);
2610 kasan_unpoison_object_data(cachep, objp); 2612 kasan_unpoison_object_data(cachep, objp);
2611 cachep->ctor(objp); 2613 cachep->ctor(objp);
2612 kasan_poison_object_data(cachep, objp); 2614 kasan_poison_object_data(cachep, objp);
diff --git a/mm/slub.c b/mm/slub.c
index 74e7c8c30db8..26eb6a99540e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1384,6 +1384,7 @@ static void setup_object(struct kmem_cache *s, struct page *page,
1384 void *object) 1384 void *object)
1385{ 1385{
1386 setup_object_debug(s, page, object); 1386 setup_object_debug(s, page, object);
1387 kasan_init_slab_obj(s, object);
1387 if (unlikely(s->ctor)) { 1388 if (unlikely(s->ctor)) {
1388 kasan_unpoison_object_data(s, object); 1389 kasan_unpoison_object_data(s, object);
1389 s->ctor(object); 1390 s->ctor(object);