aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-03-12 19:26:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-03-12 21:46:08 -0400
commita5af5aa8b67dfdba36c853b70564fd2dfe73d478 (patch)
treed898e560802ef68b21280539040ccd19dc62678b /mm
parentb3c1030d50bad39383fcfa6721bd3c35463b3f3f (diff)
kasan, module, vmalloc: rework shadow allocation for modules
Current approach in handling shadow memory for modules is broken. Shadow memory could be freed only after memory shadow corresponds it is no longer used. vfree() called from interrupt context could use memory its freeing to store 'struct llist_node' in it: void vfree(const void *addr) { ... if (unlikely(in_interrupt())) { struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); if (llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); Later this list node used in free_work() which actually frees memory. Currently module_memfree() called in interrupt context will free shadow before freeing module's memory which could provoke kernel crash. So shadow memory should be freed after module's memory. However, such deallocation order could race with kasan_module_alloc() in module_alloc(). Free shadow right before releasing vm area. At this point vfree()'d memory is not used anymore and yet not available for other allocations. New VM_KASAN flag used to indicate that vm area has dynamically allocated shadow memory so kasan frees shadow only if it was previously allocated. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/kasan.c14
-rw-r--r--mm/vmalloc.c1
2 files changed, 12 insertions, 3 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 78fee632a7ee..936d81661c47 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -29,6 +29,7 @@
29#include <linux/stacktrace.h> 29#include <linux/stacktrace.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/vmalloc.h>
32#include <linux/kasan.h> 33#include <linux/kasan.h>
33 34
34#include "kasan.h" 35#include "kasan.h"
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size)
414 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 415 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
415 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 416 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
416 __builtin_return_address(0)); 417 __builtin_return_address(0));
417 return ret ? 0 : -ENOMEM; 418
419 if (ret) {
420 find_vm_area(addr)->flags |= VM_KASAN;
421 return 0;
422 }
423
424 return -ENOMEM;
418} 425}
419 426
420void kasan_module_free(void *addr) 427void kasan_free_shadow(const struct vm_struct *vm)
421{ 428{
422 vfree(kasan_mem_to_shadow(addr)); 429 if (vm->flags & VM_KASAN)
430 vfree(kasan_mem_to_shadow(vm->addr));
423} 431}
424 432
425static void register_global(struct kasan_global *global) 433static void register_global(struct kasan_global *global)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35b25e1340ca..49abccf29a29 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr)
1418 spin_unlock(&vmap_area_lock); 1418 spin_unlock(&vmap_area_lock);
1419 1419
1420 vmap_debug_free_range(va->va_start, va->va_end); 1420 vmap_debug_free_range(va->va_start, va->va_end);
1421 kasan_free_shadow(vm);
1421 free_unmap_vmap_area(va); 1422 free_unmap_vmap_area(va);
1422 vm->size -= PAGE_SIZE; 1423 vm->size -= PAGE_SIZE;
1423 1424