diff options
author | Kees Cook <keescook@chromium.org> | 2016-06-26 11:46:23 -0400 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2016-07-07 14:09:20 -0400 |
commit | a3dff71c1c88fc184a1ae5e425ba621d547d16ec (patch) | |
tree | 7d6fe391cedf8905280eb9038844bba06276142d | |
parent | 0edca7b5afb4b0909eecd2ede9a6736ef7cafc42 (diff) |
lkdtm: split usercopy tests to separate file
This splits the USERCOPY_* tests into the new lkdtm_usercopy.c file to
help separate things better for readability.
Signed-off-by: Kees Cook <keescook@chromium.org>
-rw-r--r-- | drivers/misc/Makefile | 1 | ||||
-rw-r--r-- | drivers/misc/lkdtm.h | 13 | ||||
-rw-r--r-- | drivers/misc/lkdtm_core.c | 279 | ||||
-rw-r--r-- | drivers/misc/lkdtm_usercopy.c | 315 |
4 files changed, 342 insertions, 266 deletions
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 7d45ed4a1549..e6b2778731ff 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -60,6 +60,7 @@ obj-$(CONFIG_PANEL) += panel.o | |||
60 | 60 | ||
61 | lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o | 61 | lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o |
62 | lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o | 62 | lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o |
63 | lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o | ||
63 | 64 | ||
64 | OBJCOPYFLAGS := | 65 | OBJCOPYFLAGS := |
65 | OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ | 66 | OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ |
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h index 9531fa3be4c3..ef290a2c8816 100644 --- a/drivers/misc/lkdtm.h +++ b/drivers/misc/lkdtm.h | |||
@@ -1,6 +1,19 @@ | |||
1 | #ifndef __LKDTM_H | 1 | #ifndef __LKDTM_H |
2 | #define __LKDTM_H | 2 | #define __LKDTM_H |
3 | 3 | ||
4 | /* lkdtm_rodata.c */ | ||
4 | void lkdtm_rodata_do_nothing(void); | 5 | void lkdtm_rodata_do_nothing(void); |
5 | 6 | ||
7 | /* lkdtm_usercopy.c */ | ||
8 | void __init lkdtm_usercopy_init(void); | ||
9 | void __exit lkdtm_usercopy_exit(void); | ||
10 | void lkdtm_USERCOPY_HEAP_SIZE_TO(void); | ||
11 | void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); | ||
12 | void lkdtm_USERCOPY_HEAP_FLAG_TO(void); | ||
13 | void lkdtm_USERCOPY_HEAP_FLAG_FROM(void); | ||
14 | void lkdtm_USERCOPY_STACK_FRAME_TO(void); | ||
15 | void lkdtm_USERCOPY_STACK_FRAME_FROM(void); | ||
16 | void lkdtm_USERCOPY_STACK_BEYOND(void); | ||
17 | void lkdtm_USERCOPY_KERNEL(void); | ||
18 | |||
6 | #endif | 19 | #endif |
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c index 1454d58aa278..74376920ed55 100644 --- a/drivers/misc/lkdtm_core.c +++ b/drivers/misc/lkdtm_core.c | |||
@@ -193,10 +193,6 @@ static DEFINE_SPINLOCK(lock_me_up); | |||
193 | 193 | ||
194 | static u8 data_area[EXEC_SIZE]; | 194 | static u8 data_area[EXEC_SIZE]; |
195 | 195 | ||
196 | static size_t cache_size = 1024; | ||
197 | static struct kmem_cache *bad_cache; | ||
198 | |||
199 | static const unsigned char test_text[] = "This is a test.\n"; | ||
200 | static const unsigned long rodata = 0xAA55AA55; | 196 | static const unsigned long rodata = 0xAA55AA55; |
201 | static unsigned long ro_after_init __ro_after_init = 0x55AA5500; | 197 | static unsigned long ro_after_init __ro_after_init = 0x55AA5500; |
202 | 198 | ||
@@ -403,255 +399,6 @@ static void execute_user_location(void *dst) | |||
403 | func(); | 399 | func(); |
404 | } | 400 | } |
405 | 401 | ||
406 | /* | ||
407 | * Instead of adding -Wno-return-local-addr, just pass the stack address | ||
408 | * through a function to obfuscate it from the compiler. | ||
409 | */ | ||
410 | static noinline unsigned char *trick_compiler(unsigned char *stack) | ||
411 | { | ||
412 | return stack + 0; | ||
413 | } | ||
414 | |||
415 | static noinline unsigned char *do_usercopy_stack_callee(int value) | ||
416 | { | ||
417 | unsigned char buf[32]; | ||
418 | int i; | ||
419 | |||
420 | /* Exercise stack to avoid everything living in registers. */ | ||
421 | for (i = 0; i < sizeof(buf); i++) { | ||
422 | buf[i] = value & 0xff; | ||
423 | } | ||
424 | |||
425 | return trick_compiler(buf); | ||
426 | } | ||
427 | |||
428 | static noinline void do_usercopy_stack(bool to_user, bool bad_frame) | ||
429 | { | ||
430 | unsigned long user_addr; | ||
431 | unsigned char good_stack[32]; | ||
432 | unsigned char *bad_stack; | ||
433 | int i; | ||
434 | |||
435 | /* Exercise stack to avoid everything living in registers. */ | ||
436 | for (i = 0; i < sizeof(good_stack); i++) | ||
437 | good_stack[i] = test_text[i % sizeof(test_text)]; | ||
438 | |||
439 | /* This is a pointer to outside our current stack frame. */ | ||
440 | if (bad_frame) { | ||
441 | bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); | ||
442 | } else { | ||
443 | /* Put start address just inside stack. */ | ||
444 | bad_stack = task_stack_page(current) + THREAD_SIZE; | ||
445 | bad_stack -= sizeof(unsigned long); | ||
446 | } | ||
447 | |||
448 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
449 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
450 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
451 | if (user_addr >= TASK_SIZE) { | ||
452 | pr_warn("Failed to allocate user memory\n"); | ||
453 | return; | ||
454 | } | ||
455 | |||
456 | if (to_user) { | ||
457 | pr_info("attempting good copy_to_user of local stack\n"); | ||
458 | if (copy_to_user((void __user *)user_addr, good_stack, | ||
459 | sizeof(good_stack))) { | ||
460 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
461 | goto free_user; | ||
462 | } | ||
463 | |||
464 | pr_info("attempting bad copy_to_user of distant stack\n"); | ||
465 | if (copy_to_user((void __user *)user_addr, bad_stack, | ||
466 | sizeof(good_stack))) { | ||
467 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
468 | goto free_user; | ||
469 | } | ||
470 | } else { | ||
471 | /* | ||
472 | * There isn't a safe way to not be protected by usercopy | ||
473 | * if we're going to write to another thread's stack. | ||
474 | */ | ||
475 | if (!bad_frame) | ||
476 | goto free_user; | ||
477 | |||
478 | pr_info("attempting good copy_from_user of local stack\n"); | ||
479 | if (copy_from_user(good_stack, (void __user *)user_addr, | ||
480 | sizeof(good_stack))) { | ||
481 | pr_warn("copy_from_user failed unexpectedly?!\n"); | ||
482 | goto free_user; | ||
483 | } | ||
484 | |||
485 | pr_info("attempting bad copy_from_user of distant stack\n"); | ||
486 | if (copy_from_user(bad_stack, (void __user *)user_addr, | ||
487 | sizeof(good_stack))) { | ||
488 | pr_warn("copy_from_user failed, but lacked Oops\n"); | ||
489 | goto free_user; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | free_user: | ||
494 | vm_munmap(user_addr, PAGE_SIZE); | ||
495 | } | ||
496 | |||
497 | static void do_usercopy_kernel(void) | ||
498 | { | ||
499 | unsigned long user_addr; | ||
500 | |||
501 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
502 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
503 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
504 | if (user_addr >= TASK_SIZE) { | ||
505 | pr_warn("Failed to allocate user memory\n"); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | pr_info("attempting good copy_to_user from kernel rodata\n"); | ||
510 | if (copy_to_user((void __user *)user_addr, test_text, | ||
511 | sizeof(test_text))) { | ||
512 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
513 | goto free_user; | ||
514 | } | ||
515 | |||
516 | pr_info("attempting bad copy_to_user from kernel text\n"); | ||
517 | if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { | ||
518 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
519 | goto free_user; | ||
520 | } | ||
521 | |||
522 | free_user: | ||
523 | vm_munmap(user_addr, PAGE_SIZE); | ||
524 | } | ||
525 | |||
526 | static void do_usercopy_heap_size(bool to_user) | ||
527 | { | ||
528 | unsigned long user_addr; | ||
529 | unsigned char *one, *two; | ||
530 | size_t size = 1024; | ||
531 | |||
532 | one = kmalloc(size, GFP_KERNEL); | ||
533 | two = kmalloc(size, GFP_KERNEL); | ||
534 | if (!one || !two) { | ||
535 | pr_warn("Failed to allocate kernel memory\n"); | ||
536 | goto free_kernel; | ||
537 | } | ||
538 | |||
539 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
540 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
541 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
542 | if (user_addr >= TASK_SIZE) { | ||
543 | pr_warn("Failed to allocate user memory\n"); | ||
544 | goto free_kernel; | ||
545 | } | ||
546 | |||
547 | memset(one, 'A', size); | ||
548 | memset(two, 'B', size); | ||
549 | |||
550 | if (to_user) { | ||
551 | pr_info("attempting good copy_to_user of correct size\n"); | ||
552 | if (copy_to_user((void __user *)user_addr, one, size)) { | ||
553 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
554 | goto free_user; | ||
555 | } | ||
556 | |||
557 | pr_info("attempting bad copy_to_user of too large size\n"); | ||
558 | if (copy_to_user((void __user *)user_addr, one, 2 * size)) { | ||
559 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
560 | goto free_user; | ||
561 | } | ||
562 | } else { | ||
563 | pr_info("attempting good copy_from_user of correct size\n"); | ||
564 | if (copy_from_user(one, (void __user *)user_addr, size)) { | ||
565 | pr_warn("copy_from_user failed unexpectedly?!\n"); | ||
566 | goto free_user; | ||
567 | } | ||
568 | |||
569 | pr_info("attempting bad copy_from_user of too large size\n"); | ||
570 | if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { | ||
571 | pr_warn("copy_from_user failed, but lacked Oops\n"); | ||
572 | goto free_user; | ||
573 | } | ||
574 | } | ||
575 | |||
576 | free_user: | ||
577 | vm_munmap(user_addr, PAGE_SIZE); | ||
578 | free_kernel: | ||
579 | kfree(one); | ||
580 | kfree(two); | ||
581 | } | ||
582 | |||
583 | static void do_usercopy_heap_flag(bool to_user) | ||
584 | { | ||
585 | unsigned long user_addr; | ||
586 | unsigned char *good_buf = NULL; | ||
587 | unsigned char *bad_buf = NULL; | ||
588 | |||
589 | /* Make sure cache was prepared. */ | ||
590 | if (!bad_cache) { | ||
591 | pr_warn("Failed to allocate kernel cache\n"); | ||
592 | return; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * Allocate one buffer from each cache (kmalloc will have the | ||
597 | * SLAB_USERCOPY flag already, but "bad_cache" won't). | ||
598 | */ | ||
599 | good_buf = kmalloc(cache_size, GFP_KERNEL); | ||
600 | bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); | ||
601 | if (!good_buf || !bad_buf) { | ||
602 | pr_warn("Failed to allocate buffers from caches\n"); | ||
603 | goto free_alloc; | ||
604 | } | ||
605 | |||
606 | /* Allocate user memory we'll poke at. */ | ||
607 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
608 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
609 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
610 | if (user_addr >= TASK_SIZE) { | ||
611 | pr_warn("Failed to allocate user memory\n"); | ||
612 | goto free_alloc; | ||
613 | } | ||
614 | |||
615 | memset(good_buf, 'A', cache_size); | ||
616 | memset(bad_buf, 'B', cache_size); | ||
617 | |||
618 | if (to_user) { | ||
619 | pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); | ||
620 | if (copy_to_user((void __user *)user_addr, good_buf, | ||
621 | cache_size)) { | ||
622 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
623 | goto free_user; | ||
624 | } | ||
625 | |||
626 | pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); | ||
627 | if (copy_to_user((void __user *)user_addr, bad_buf, | ||
628 | cache_size)) { | ||
629 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
630 | goto free_user; | ||
631 | } | ||
632 | } else { | ||
633 | pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); | ||
634 | if (copy_from_user(good_buf, (void __user *)user_addr, | ||
635 | cache_size)) { | ||
636 | pr_warn("copy_from_user failed unexpectedly?!\n"); | ||
637 | goto free_user; | ||
638 | } | ||
639 | |||
640 | pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); | ||
641 | if (copy_from_user(bad_buf, (void __user *)user_addr, | ||
642 | cache_size)) { | ||
643 | pr_warn("copy_from_user failed, but lacked Oops\n"); | ||
644 | goto free_user; | ||
645 | } | ||
646 | } | ||
647 | |||
648 | free_user: | ||
649 | vm_munmap(user_addr, PAGE_SIZE); | ||
650 | free_alloc: | ||
651 | if (bad_buf) | ||
652 | kmem_cache_free(bad_cache, bad_buf); | ||
653 | kfree(good_buf); | ||
654 | } | ||
655 | 402 | ||
656 | static void lkdtm_do_action(enum ctype which) | 403 | static void lkdtm_do_action(enum ctype which) |
657 | { | 404 | { |
@@ -964,28 +711,28 @@ static void lkdtm_do_action(enum ctype which) | |||
964 | return; | 711 | return; |
965 | } | 712 | } |
966 | case CT_USERCOPY_HEAP_SIZE_TO: | 713 | case CT_USERCOPY_HEAP_SIZE_TO: |
967 | do_usercopy_heap_size(true); | 714 | lkdtm_USERCOPY_HEAP_SIZE_TO(); |
968 | break; | 715 | break; |
969 | case CT_USERCOPY_HEAP_SIZE_FROM: | 716 | case CT_USERCOPY_HEAP_SIZE_FROM: |
970 | do_usercopy_heap_size(false); | 717 | lkdtm_USERCOPY_HEAP_SIZE_FROM(); |
971 | break; | 718 | break; |
972 | case CT_USERCOPY_HEAP_FLAG_TO: | 719 | case CT_USERCOPY_HEAP_FLAG_TO: |
973 | do_usercopy_heap_flag(true); | 720 | lkdtm_USERCOPY_HEAP_FLAG_TO(); |
974 | break; | 721 | break; |
975 | case CT_USERCOPY_HEAP_FLAG_FROM: | 722 | case CT_USERCOPY_HEAP_FLAG_FROM: |
976 | do_usercopy_heap_flag(false); | 723 | lkdtm_USERCOPY_HEAP_FLAG_FROM(); |
977 | break; | 724 | break; |
978 | case CT_USERCOPY_STACK_FRAME_TO: | 725 | case CT_USERCOPY_STACK_FRAME_TO: |
979 | do_usercopy_stack(true, true); | 726 | lkdtm_USERCOPY_STACK_FRAME_TO(); |
980 | break; | 727 | break; |
981 | case CT_USERCOPY_STACK_FRAME_FROM: | 728 | case CT_USERCOPY_STACK_FRAME_FROM: |
982 | do_usercopy_stack(false, true); | 729 | lkdtm_USERCOPY_STACK_FRAME_FROM(); |
983 | break; | 730 | break; |
984 | case CT_USERCOPY_STACK_BEYOND: | 731 | case CT_USERCOPY_STACK_BEYOND: |
985 | do_usercopy_stack(true, false); | 732 | lkdtm_USERCOPY_STACK_BEYOND(); |
986 | break; | 733 | break; |
987 | case CT_USERCOPY_KERNEL: | 734 | case CT_USERCOPY_KERNEL: |
988 | do_usercopy_kernel(); | 735 | lkdtm_USERCOPY_KERNEL(); |
989 | break; | 736 | break; |
990 | case CT_NONE: | 737 | case CT_NONE: |
991 | default: | 738 | default: |
@@ -1276,13 +1023,12 @@ static int __init lkdtm_module_init(void) | |||
1276 | int n_debugfs_entries = 1; /* Assume only the direct entry */ | 1023 | int n_debugfs_entries = 1; /* Assume only the direct entry */ |
1277 | int i; | 1024 | int i; |
1278 | 1025 | ||
1026 | /* Handle test-specific initialization. */ | ||
1027 | lkdtm_usercopy_init(); | ||
1028 | |||
1279 | /* Make sure we can write to __ro_after_init values during __init */ | 1029 | /* Make sure we can write to __ro_after_init values during __init */ |
1280 | ro_after_init |= 0xAA; | 1030 | ro_after_init |= 0xAA; |
1281 | 1031 | ||
1282 | /* Prepare cache that lacks SLAB_USERCOPY flag. */ | ||
1283 | bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, | ||
1284 | 0, NULL); | ||
1285 | |||
1286 | /* Register debugfs interface */ | 1032 | /* Register debugfs interface */ |
1287 | lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); | 1033 | lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); |
1288 | if (!lkdtm_debugfs_root) { | 1034 | if (!lkdtm_debugfs_root) { |
@@ -1334,7 +1080,8 @@ static void __exit lkdtm_module_exit(void) | |||
1334 | { | 1080 | { |
1335 | debugfs_remove_recursive(lkdtm_debugfs_root); | 1081 | debugfs_remove_recursive(lkdtm_debugfs_root); |
1336 | 1082 | ||
1337 | kmem_cache_destroy(bad_cache); | 1083 | /* Handle test-specific clean-up. */ |
1084 | lkdtm_usercopy_exit(); | ||
1338 | 1085 | ||
1339 | unregister_jprobe(&lkdtm); | 1086 | unregister_jprobe(&lkdtm); |
1340 | pr_info("Crash point unregistered\n"); | 1087 | pr_info("Crash point unregistered\n"); |
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c new file mode 100644 index 000000000000..9c748e819a35 --- /dev/null +++ b/drivers/misc/lkdtm_usercopy.c | |||
@@ -0,0 +1,315 @@ | |||
1 | /* | ||
2 | * This is for all the tests related to copy_to_user() and copy_from_user() | ||
3 | * hardening. | ||
4 | */ | ||
5 | #define pr_fmt(fmt) "lkdtm: " fmt | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/vmalloc.h> | ||
10 | #include <linux/mman.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <asm/cacheflush.h> | ||
13 | |||
14 | static size_t cache_size = 1024; | ||
15 | static struct kmem_cache *bad_cache; | ||
16 | |||
17 | static const unsigned char test_text[] = "This is a test.\n"; | ||
18 | |||
19 | /* | ||
20 | * Instead of adding -Wno-return-local-addr, just pass the stack address | ||
21 | * through a function to obfuscate it from the compiler. | ||
22 | */ | ||
23 | static noinline unsigned char *trick_compiler(unsigned char *stack) | ||
24 | { | ||
25 | return stack + 0; | ||
26 | } | ||
27 | |||
28 | static noinline unsigned char *do_usercopy_stack_callee(int value) | ||
29 | { | ||
30 | unsigned char buf[32]; | ||
31 | int i; | ||
32 | |||
33 | /* Exercise stack to avoid everything living in registers. */ | ||
34 | for (i = 0; i < sizeof(buf); i++) { | ||
35 | buf[i] = value & 0xff; | ||
36 | } | ||
37 | |||
38 | return trick_compiler(buf); | ||
39 | } | ||
40 | |||
41 | static noinline void do_usercopy_stack(bool to_user, bool bad_frame) | ||
42 | { | ||
43 | unsigned long user_addr; | ||
44 | unsigned char good_stack[32]; | ||
45 | unsigned char *bad_stack; | ||
46 | int i; | ||
47 | |||
48 | /* Exercise stack to avoid everything living in registers. */ | ||
49 | for (i = 0; i < sizeof(good_stack); i++) | ||
50 | good_stack[i] = test_text[i % sizeof(test_text)]; | ||
51 | |||
52 | /* This is a pointer to outside our current stack frame. */ | ||
53 | if (bad_frame) { | ||
54 | bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); | ||
55 | } else { | ||
56 | /* Put start address just inside stack. */ | ||
57 | bad_stack = task_stack_page(current) + THREAD_SIZE; | ||
58 | bad_stack -= sizeof(unsigned long); | ||
59 | } | ||
60 | |||
61 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
62 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
63 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
64 | if (user_addr >= TASK_SIZE) { | ||
65 | pr_warn("Failed to allocate user memory\n"); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | if (to_user) { | ||
70 | pr_info("attempting good copy_to_user of local stack\n"); | ||
71 | if (copy_to_user((void __user *)user_addr, good_stack, | ||
72 | sizeof(good_stack))) { | ||
73 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
74 | goto free_user; | ||
75 | } | ||
76 | |||
77 | pr_info("attempting bad copy_to_user of distant stack\n"); | ||
78 | if (copy_to_user((void __user *)user_addr, bad_stack, | ||
79 | sizeof(good_stack))) { | ||
80 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
81 | goto free_user; | ||
82 | } | ||
83 | } else { | ||
84 | /* | ||
85 | * There isn't a safe way to not be protected by usercopy | ||
86 | * if we're going to write to another thread's stack. | ||
87 | */ | ||
88 | if (!bad_frame) | ||
89 | goto free_user; | ||
90 | |||
91 | pr_info("attempting good copy_from_user of local stack\n"); | ||
92 | if (copy_from_user(good_stack, (void __user *)user_addr, | ||
93 | sizeof(good_stack))) { | ||
94 | pr_warn("copy_from_user failed unexpectedly?!\n"); | ||
95 | goto free_user; | ||
96 | } | ||
97 | |||
98 | pr_info("attempting bad copy_from_user of distant stack\n"); | ||
99 | if (copy_from_user(bad_stack, (void __user *)user_addr, | ||
100 | sizeof(good_stack))) { | ||
101 | pr_warn("copy_from_user failed, but lacked Oops\n"); | ||
102 | goto free_user; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | free_user: | ||
107 | vm_munmap(user_addr, PAGE_SIZE); | ||
108 | } | ||
109 | |||
110 | static void do_usercopy_heap_size(bool to_user) | ||
111 | { | ||
112 | unsigned long user_addr; | ||
113 | unsigned char *one, *two; | ||
114 | const size_t size = 1024; | ||
115 | |||
116 | one = kmalloc(size, GFP_KERNEL); | ||
117 | two = kmalloc(size, GFP_KERNEL); | ||
118 | if (!one || !two) { | ||
119 | pr_warn("Failed to allocate kernel memory\n"); | ||
120 | goto free_kernel; | ||
121 | } | ||
122 | |||
123 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
124 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
125 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
126 | if (user_addr >= TASK_SIZE) { | ||
127 | pr_warn("Failed to allocate user memory\n"); | ||
128 | goto free_kernel; | ||
129 | } | ||
130 | |||
131 | memset(one, 'A', size); | ||
132 | memset(two, 'B', size); | ||
133 | |||
134 | if (to_user) { | ||
135 | pr_info("attempting good copy_to_user of correct size\n"); | ||
136 | if (copy_to_user((void __user *)user_addr, one, size)) { | ||
137 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
138 | goto free_user; | ||
139 | } | ||
140 | |||
141 | pr_info("attempting bad copy_to_user of too large size\n"); | ||
142 | if (copy_to_user((void __user *)user_addr, one, 2 * size)) { | ||
143 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
144 | goto free_user; | ||
145 | } | ||
146 | } else { | ||
147 | pr_info("attempting good copy_from_user of correct size\n"); | ||
148 | if (copy_from_user(one, (void __user *)user_addr, size)) { | ||
149 | pr_warn("copy_from_user failed unexpectedly?!\n"); | ||
150 | goto free_user; | ||
151 | } | ||
152 | |||
153 | pr_info("attempting bad copy_from_user of too large size\n"); | ||
154 | if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { | ||
155 | pr_warn("copy_from_user failed, but lacked Oops\n"); | ||
156 | goto free_user; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | free_user: | ||
161 | vm_munmap(user_addr, PAGE_SIZE); | ||
162 | free_kernel: | ||
163 | kfree(one); | ||
164 | kfree(two); | ||
165 | } | ||
166 | |||
167 | static void do_usercopy_heap_flag(bool to_user) | ||
168 | { | ||
169 | unsigned long user_addr; | ||
170 | unsigned char *good_buf = NULL; | ||
171 | unsigned char *bad_buf = NULL; | ||
172 | |||
173 | /* Make sure cache was prepared. */ | ||
174 | if (!bad_cache) { | ||
175 | pr_warn("Failed to allocate kernel cache\n"); | ||
176 | return; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Allocate one buffer from each cache (kmalloc will have the | ||
181 | * SLAB_USERCOPY flag already, but "bad_cache" won't). | ||
182 | */ | ||
183 | good_buf = kmalloc(cache_size, GFP_KERNEL); | ||
184 | bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); | ||
185 | if (!good_buf || !bad_buf) { | ||
186 | pr_warn("Failed to allocate buffers from caches\n"); | ||
187 | goto free_alloc; | ||
188 | } | ||
189 | |||
190 | /* Allocate user memory we'll poke at. */ | ||
191 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
192 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
193 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
194 | if (user_addr >= TASK_SIZE) { | ||
195 | pr_warn("Failed to allocate user memory\n"); | ||
196 | goto free_alloc; | ||
197 | } | ||
198 | |||
199 | memset(good_buf, 'A', cache_size); | ||
200 | memset(bad_buf, 'B', cache_size); | ||
201 | |||
202 | if (to_user) { | ||
203 | pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); | ||
204 | if (copy_to_user((void __user *)user_addr, good_buf, | ||
205 | cache_size)) { | ||
206 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
207 | goto free_user; | ||
208 | } | ||
209 | |||
210 | pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); | ||
211 | if (copy_to_user((void __user *)user_addr, bad_buf, | ||
212 | cache_size)) { | ||
213 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
214 | goto free_user; | ||
215 | } | ||
216 | } else { | ||
217 | pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); | ||
218 | if (copy_from_user(good_buf, (void __user *)user_addr, | ||
219 | cache_size)) { | ||
220 | pr_warn("copy_from_user failed unexpectedly?!\n"); | ||
221 | goto free_user; | ||
222 | } | ||
223 | |||
224 | pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); | ||
225 | if (copy_from_user(bad_buf, (void __user *)user_addr, | ||
226 | cache_size)) { | ||
227 | pr_warn("copy_from_user failed, but lacked Oops\n"); | ||
228 | goto free_user; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | free_user: | ||
233 | vm_munmap(user_addr, PAGE_SIZE); | ||
234 | free_alloc: | ||
235 | if (bad_buf) | ||
236 | kmem_cache_free(bad_cache, bad_buf); | ||
237 | kfree(good_buf); | ||
238 | } | ||
239 | |||
240 | /* Callable tests. */ | ||
241 | void lkdtm_USERCOPY_HEAP_SIZE_TO(void) | ||
242 | { | ||
243 | do_usercopy_heap_size(true); | ||
244 | } | ||
245 | |||
246 | void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) | ||
247 | { | ||
248 | do_usercopy_heap_size(false); | ||
249 | } | ||
250 | |||
251 | void lkdtm_USERCOPY_HEAP_FLAG_TO(void) | ||
252 | { | ||
253 | do_usercopy_heap_flag(true); | ||
254 | } | ||
255 | |||
256 | void lkdtm_USERCOPY_HEAP_FLAG_FROM(void) | ||
257 | { | ||
258 | do_usercopy_heap_flag(false); | ||
259 | } | ||
260 | |||
261 | void lkdtm_USERCOPY_STACK_FRAME_TO(void) | ||
262 | { | ||
263 | do_usercopy_stack(true, true); | ||
264 | } | ||
265 | |||
266 | void lkdtm_USERCOPY_STACK_FRAME_FROM(void) | ||
267 | { | ||
268 | do_usercopy_stack(false, true); | ||
269 | } | ||
270 | |||
271 | void lkdtm_USERCOPY_STACK_BEYOND(void) | ||
272 | { | ||
273 | do_usercopy_stack(true, false); | ||
274 | } | ||
275 | |||
276 | void lkdtm_USERCOPY_KERNEL(void) | ||
277 | { | ||
278 | unsigned long user_addr; | ||
279 | |||
280 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | ||
281 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
282 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
283 | if (user_addr >= TASK_SIZE) { | ||
284 | pr_warn("Failed to allocate user memory\n"); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | pr_info("attempting good copy_to_user from kernel rodata\n"); | ||
289 | if (copy_to_user((void __user *)user_addr, test_text, | ||
290 | sizeof(test_text))) { | ||
291 | pr_warn("copy_to_user failed unexpectedly?!\n"); | ||
292 | goto free_user; | ||
293 | } | ||
294 | |||
295 | pr_info("attempting bad copy_to_user from kernel text\n"); | ||
296 | if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { | ||
297 | pr_warn("copy_to_user failed, but lacked Oops\n"); | ||
298 | goto free_user; | ||
299 | } | ||
300 | |||
301 | free_user: | ||
302 | vm_munmap(user_addr, PAGE_SIZE); | ||
303 | } | ||
304 | |||
305 | void __init lkdtm_usercopy_init(void) | ||
306 | { | ||
307 | /* Prepare cache that lacks SLAB_USERCOPY flag. */ | ||
308 | bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, | ||
309 | 0, NULL); | ||
310 | } | ||
311 | |||
312 | void __exit lkdtm_usercopy_exit(void) | ||
313 | { | ||
314 | kmem_cache_destroy(bad_cache); | ||
315 | } | ||