diff options
author | Mike Snitzer <snitzer@redhat.com> | 2017-03-17 14:56:17 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2017-03-24 15:54:22 -0400 |
commit | 1aa0efd4210df1c57764b77040a6615bc9b3ac0f (patch) | |
tree | 5b3033a4fa126bf1ebc17a7d0e5f7172f1317d01 | |
parent | 8f0009a225171cc1b76a6b443de5137b26e1374b (diff) |
dm integrity: factor out create_journal() from dm_integrity_ctr()
Preparation for next commit that makes call to create_journal()
optional.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-integrity.c | 379 |
1 files changed, 196 insertions, 183 deletions
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ea779cca8b45..ecb0b592f5a0 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -2429,6 +2429,200 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, | |||
2429 | return 0; | 2429 | return 0; |
2430 | } | 2430 | } |
2431 | 2431 | ||
2432 | static int create_journal(struct dm_integrity_c *ic, char **error) | ||
2433 | { | ||
2434 | int r = 0; | ||
2435 | unsigned i; | ||
2436 | __u64 journal_pages, journal_desc_size, journal_tree_size; | ||
2437 | |||
2438 | journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, | ||
2439 | PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); | ||
2440 | journal_desc_size = journal_pages * sizeof(struct page_list); | ||
2441 | if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) { | ||
2442 | *error = "Journal doesn't fit into memory"; | ||
2443 | r = -ENOMEM; | ||
2444 | goto bad; | ||
2445 | } | ||
2446 | ic->journal_pages = journal_pages; | ||
2447 | |||
2448 | ic->journal = dm_integrity_alloc_page_list(ic); | ||
2449 | if (!ic->journal) { | ||
2450 | *error = "Could not allocate memory for journal"; | ||
2451 | r = -ENOMEM; | ||
2452 | goto bad; | ||
2453 | } | ||
2454 | if (ic->journal_crypt_alg.alg_string) { | ||
2455 | unsigned ivsize, blocksize; | ||
2456 | struct journal_completion comp; | ||
2457 | |||
2458 | comp.ic = ic; | ||
2459 | ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); | ||
2460 | if (IS_ERR(ic->journal_crypt)) { | ||
2461 | *error = "Invalid journal cipher"; | ||
2462 | r = PTR_ERR(ic->journal_crypt); | ||
2463 | ic->journal_crypt = NULL; | ||
2464 | goto bad; | ||
2465 | } | ||
2466 | ivsize = crypto_skcipher_ivsize(ic->journal_crypt); | ||
2467 | blocksize = crypto_skcipher_blocksize(ic->journal_crypt); | ||
2468 | |||
2469 | if (ic->journal_crypt_alg.key) { | ||
2470 | r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, | ||
2471 | ic->journal_crypt_alg.key_size); | ||
2472 | if (r) { | ||
2473 | *error = "Error setting encryption key"; | ||
2474 | goto bad; | ||
2475 | } | ||
2476 | } | ||
2477 | DEBUG_print("cipher %s, block size %u iv size %u\n", | ||
2478 | ic->journal_crypt_alg.alg_string, blocksize, ivsize); | ||
2479 | |||
2480 | ic->journal_io = dm_integrity_alloc_page_list(ic); | ||
2481 | if (!ic->journal_io) { | ||
2482 | *error = "Could not allocate memory for journal io"; | ||
2483 | r = -ENOMEM; | ||
2484 | goto bad; | ||
2485 | } | ||
2486 | |||
2487 | if (blocksize == 1) { | ||
2488 | struct scatterlist *sg; | ||
2489 | SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); | ||
2490 | unsigned char iv[ivsize]; | ||
2491 | skcipher_request_set_tfm(req, ic->journal_crypt); | ||
2492 | |||
2493 | ic->journal_xor = dm_integrity_alloc_page_list(ic); | ||
2494 | if (!ic->journal_xor) { | ||
2495 | *error = "Could not allocate memory for journal xor"; | ||
2496 | r = -ENOMEM; | ||
2497 | goto bad; | ||
2498 | } | ||
2499 | |||
2500 | sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); | ||
2501 | if (!sg) { | ||
2502 | *error = "Unable to allocate sg list"; | ||
2503 | r = -ENOMEM; | ||
2504 | goto bad; | ||
2505 | } | ||
2506 | sg_init_table(sg, ic->journal_pages + 1); | ||
2507 | for (i = 0; i < ic->journal_pages; i++) { | ||
2508 | char *va = lowmem_page_address(ic->journal_xor[i].page); | ||
2509 | clear_page(va); | ||
2510 | sg_set_buf(&sg[i], va, PAGE_SIZE); | ||
2511 | } | ||
2512 | sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); | ||
2513 | memset(iv, 0x00, ivsize); | ||
2514 | |||
2515 | skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); | ||
2516 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | ||
2517 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | ||
2518 | if (do_crypt(true, req, &comp)) | ||
2519 | wait_for_completion(&comp.comp); | ||
2520 | kvfree(sg); | ||
2521 | r = dm_integrity_failed(ic); | ||
2522 | if (r) { | ||
2523 | *error = "Unable to encrypt journal"; | ||
2524 | goto bad; | ||
2525 | } | ||
2526 | DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); | ||
2527 | |||
2528 | crypto_free_skcipher(ic->journal_crypt); | ||
2529 | ic->journal_crypt = NULL; | ||
2530 | } else { | ||
2531 | SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); | ||
2532 | unsigned char iv[ivsize]; | ||
2533 | unsigned crypt_len = roundup(ivsize, blocksize); | ||
2534 | unsigned char crypt_data[crypt_len]; | ||
2535 | |||
2536 | skcipher_request_set_tfm(req, ic->journal_crypt); | ||
2537 | |||
2538 | ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); | ||
2539 | if (!ic->journal_scatterlist) { | ||
2540 | *error = "Unable to allocate sg list"; | ||
2541 | r = -ENOMEM; | ||
2542 | goto bad; | ||
2543 | } | ||
2544 | ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); | ||
2545 | if (!ic->journal_io_scatterlist) { | ||
2546 | *error = "Unable to allocate sg list"; | ||
2547 | r = -ENOMEM; | ||
2548 | goto bad; | ||
2549 | } | ||
2550 | ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); | ||
2551 | if (!ic->sk_requests) { | ||
2552 | *error = "Unable to allocate sk requests"; | ||
2553 | r = -ENOMEM; | ||
2554 | goto bad; | ||
2555 | } | ||
2556 | for (i = 0; i < ic->journal_sections; i++) { | ||
2557 | struct scatterlist sg; | ||
2558 | struct skcipher_request *section_req; | ||
2559 | __u32 section_le = cpu_to_le32(i); | ||
2560 | |||
2561 | memset(iv, 0x00, ivsize); | ||
2562 | memset(crypt_data, 0x00, crypt_len); | ||
2563 | memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); | ||
2564 | |||
2565 | sg_init_one(&sg, crypt_data, crypt_len); | ||
2566 | skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); | ||
2567 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | ||
2568 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | ||
2569 | if (do_crypt(true, req, &comp)) | ||
2570 | wait_for_completion(&comp.comp); | ||
2571 | |||
2572 | r = dm_integrity_failed(ic); | ||
2573 | if (r) { | ||
2574 | *error = "Unable to generate iv"; | ||
2575 | goto bad; | ||
2576 | } | ||
2577 | |||
2578 | section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); | ||
2579 | if (!section_req) { | ||
2580 | *error = "Unable to allocate crypt request"; | ||
2581 | r = -ENOMEM; | ||
2582 | goto bad; | ||
2583 | } | ||
2584 | section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL); | ||
2585 | if (!section_req->iv) { | ||
2586 | skcipher_request_free(section_req); | ||
2587 | *error = "Unable to allocate iv"; | ||
2588 | r = -ENOMEM; | ||
2589 | goto bad; | ||
2590 | } | ||
2591 | memcpy(section_req->iv + ivsize, crypt_data, ivsize); | ||
2592 | section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; | ||
2593 | ic->sk_requests[i] = section_req; | ||
2594 | DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i); | ||
2595 | } | ||
2596 | } | ||
2597 | } | ||
2598 | |||
2599 | for (i = 0; i < N_COMMIT_IDS; i++) { | ||
2600 | unsigned j; | ||
2601 | retest_commit_id: | ||
2602 | for (j = 0; j < i; j++) { | ||
2603 | if (ic->commit_ids[j] == ic->commit_ids[i]) { | ||
2604 | ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); | ||
2605 | goto retest_commit_id; | ||
2606 | } | ||
2607 | } | ||
2608 | DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); | ||
2609 | } | ||
2610 | |||
2611 | journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); | ||
2612 | if (journal_tree_size > ULONG_MAX) { | ||
2613 | *error = "Journal doesn't fit into memory"; | ||
2614 | r = -ENOMEM; | ||
2615 | goto bad; | ||
2616 | } | ||
2617 | ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); | ||
2618 | if (!ic->journal_tree) { | ||
2619 | *error = "Could not allocate memory for journal tree"; | ||
2620 | r = -ENOMEM; | ||
2621 | } | ||
2622 | bad: | ||
2623 | return r; | ||
2624 | } | ||
2625 | |||
2432 | /* | 2626 | /* |
2433 | * Construct a integrity mapping: <dev_path> <offset> <tag_size> | 2627 | * Construct a integrity mapping: <dev_path> <offset> <tag_size> |
2434 | * | 2628 | * |
@@ -2461,7 +2655,6 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2461 | }; | 2655 | }; |
2462 | unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; | 2656 | unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; |
2463 | bool should_write_sb; | 2657 | bool should_write_sb; |
2464 | __u64 journal_pages, journal_desc_size, journal_tree_size; | ||
2465 | __u64 threshold; | 2658 | __u64 threshold; |
2466 | unsigned long long start; | 2659 | unsigned long long start; |
2467 | 2660 | ||
@@ -2761,189 +2954,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2761 | } | 2954 | } |
2762 | dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); | 2955 | dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); |
2763 | 2956 | ||
2764 | journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, | 2957 | r = create_journal(ic, &ti->error); |
2765 | PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); | 2958 | if (r) |
2766 | journal_desc_size = journal_pages * sizeof(struct page_list); | ||
2767 | if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) { | ||
2768 | ti->error = "Journal doesn't fit into memory"; | ||
2769 | r = -ENOMEM; | ||
2770 | goto bad; | ||
2771 | } | ||
2772 | ic->journal_pages = journal_pages; | ||
2773 | |||
2774 | ic->journal = dm_integrity_alloc_page_list(ic); | ||
2775 | if (!ic->journal) { | ||
2776 | ti->error = "Could not allocate memory for journal"; | ||
2777 | r = -ENOMEM; | ||
2778 | goto bad; | ||
2779 | } | ||
2780 | if (ic->journal_crypt_alg.alg_string) { | ||
2781 | unsigned ivsize, blocksize; | ||
2782 | struct journal_completion comp; | ||
2783 | comp.ic = ic; | ||
2784 | |||
2785 | ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); | ||
2786 | if (IS_ERR(ic->journal_crypt)) { | ||
2787 | ti->error = "Invalid journal cipher"; | ||
2788 | r = PTR_ERR(ic->journal_crypt); | ||
2789 | ic->journal_crypt = NULL; | ||
2790 | goto bad; | ||
2791 | } | ||
2792 | ivsize = crypto_skcipher_ivsize(ic->journal_crypt); | ||
2793 | blocksize = crypto_skcipher_blocksize(ic->journal_crypt); | ||
2794 | |||
2795 | if (ic->journal_crypt_alg.key) { | ||
2796 | r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, | ||
2797 | ic->journal_crypt_alg.key_size); | ||
2798 | if (r) { | ||
2799 | ti->error = "Error setting encryption key"; | ||
2800 | goto bad; | ||
2801 | } | ||
2802 | } | ||
2803 | DEBUG_print("cipher %s, block size %u iv size %u\n", | ||
2804 | ic->journal_crypt_alg.alg_string, blocksize, ivsize); | ||
2805 | |||
2806 | ic->journal_io = dm_integrity_alloc_page_list(ic); | ||
2807 | if (!ic->journal_io) { | ||
2808 | ti->error = "Could not allocate memory for journal io"; | ||
2809 | r = -ENOMEM; | ||
2810 | goto bad; | ||
2811 | } | ||
2812 | |||
2813 | if (blocksize == 1) { | ||
2814 | struct scatterlist *sg; | ||
2815 | SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); | ||
2816 | unsigned char iv[ivsize]; | ||
2817 | skcipher_request_set_tfm(req, ic->journal_crypt); | ||
2818 | |||
2819 | ic->journal_xor = dm_integrity_alloc_page_list(ic); | ||
2820 | if (!ic->journal_xor) { | ||
2821 | ti->error = "Could not allocate memory for journal xor"; | ||
2822 | r = -ENOMEM; | ||
2823 | goto bad; | ||
2824 | } | ||
2825 | |||
2826 | sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); | ||
2827 | if (!sg) { | ||
2828 | ti->error = "Unable to allocate sg list"; | ||
2829 | r = -ENOMEM; | ||
2830 | goto bad; | ||
2831 | } | ||
2832 | sg_init_table(sg, ic->journal_pages + 1); | ||
2833 | for (i = 0; i < ic->journal_pages; i++) { | ||
2834 | char *va = lowmem_page_address(ic->journal_xor[i].page); | ||
2835 | clear_page(va); | ||
2836 | sg_set_buf(&sg[i], va, PAGE_SIZE); | ||
2837 | } | ||
2838 | sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); | ||
2839 | memset(iv, 0x00, ivsize); | ||
2840 | |||
2841 | skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); | ||
2842 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | ||
2843 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | ||
2844 | if (do_crypt(true, req, &comp)) | ||
2845 | wait_for_completion(&comp.comp); | ||
2846 | kvfree(sg); | ||
2847 | if ((r = dm_integrity_failed(ic))) { | ||
2848 | ti->error = "Unable to encrypt journal"; | ||
2849 | goto bad; | ||
2850 | } | ||
2851 | DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); | ||
2852 | |||
2853 | crypto_free_skcipher(ic->journal_crypt); | ||
2854 | ic->journal_crypt = NULL; | ||
2855 | } else { | ||
2856 | SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); | ||
2857 | unsigned char iv[ivsize]; | ||
2858 | unsigned crypt_len = roundup(ivsize, blocksize); | ||
2859 | unsigned char crypt_data[crypt_len]; | ||
2860 | |||
2861 | skcipher_request_set_tfm(req, ic->journal_crypt); | ||
2862 | |||
2863 | ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); | ||
2864 | if (!ic->journal_scatterlist) { | ||
2865 | ti->error = "Unable to allocate sg list"; | ||
2866 | r = -ENOMEM; | ||
2867 | goto bad; | ||
2868 | } | ||
2869 | ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); | ||
2870 | if (!ic->journal_io_scatterlist) { | ||
2871 | ti->error = "Unable to allocate sg list"; | ||
2872 | r = -ENOMEM; | ||
2873 | goto bad; | ||
2874 | } | ||
2875 | ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); | ||
2876 | if (!ic->sk_requests) { | ||
2877 | ti->error = "Unable to allocate sk requests"; | ||
2878 | r = -ENOMEM; | ||
2879 | goto bad; | ||
2880 | } | ||
2881 | for (i = 0; i < ic->journal_sections; i++) { | ||
2882 | struct scatterlist sg; | ||
2883 | struct skcipher_request *section_req; | ||
2884 | __u32 section_le = cpu_to_le32(i); | ||
2885 | |||
2886 | memset(iv, 0x00, ivsize); | ||
2887 | memset(crypt_data, 0x00, crypt_len); | ||
2888 | memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); | ||
2889 | |||
2890 | sg_init_one(&sg, crypt_data, crypt_len); | ||
2891 | skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); | ||
2892 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | ||
2893 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | ||
2894 | if (do_crypt(true, req, &comp)) | ||
2895 | wait_for_completion(&comp.comp); | ||
2896 | |||
2897 | if ((r = dm_integrity_failed(ic))) { | ||
2898 | ti->error = "Unable to generate iv"; | ||
2899 | goto bad; | ||
2900 | } | ||
2901 | |||
2902 | section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); | ||
2903 | if (!section_req) { | ||
2904 | ti->error = "Unable to allocate crypt request"; | ||
2905 | r = -ENOMEM; | ||
2906 | goto bad; | ||
2907 | } | ||
2908 | section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL); | ||
2909 | if (!section_req->iv) { | ||
2910 | skcipher_request_free(section_req); | ||
2911 | ti->error = "Unable to allocate iv"; | ||
2912 | r = -ENOMEM; | ||
2913 | goto bad; | ||
2914 | } | ||
2915 | memcpy(section_req->iv + ivsize, crypt_data, ivsize); | ||
2916 | section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; | ||
2917 | ic->sk_requests[i] = section_req; | ||
2918 | DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i); | ||
2919 | } | ||
2920 | } | ||
2921 | } | ||
2922 | |||
2923 | for (i = 0; i < N_COMMIT_IDS; i++) { | ||
2924 | unsigned j; | ||
2925 | retest_commit_id: | ||
2926 | for (j = 0; j < i; j++) { | ||
2927 | if (ic->commit_ids[j] == ic->commit_ids[i]) { | ||
2928 | ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); | ||
2929 | goto retest_commit_id; | ||
2930 | } | ||
2931 | } | ||
2932 | DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); | ||
2933 | } | ||
2934 | |||
2935 | journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); | ||
2936 | if (journal_tree_size > ULONG_MAX) { | ||
2937 | ti->error = "Journal doesn't fit into memory"; | ||
2938 | r = -ENOMEM; | ||
2939 | goto bad; | ||
2940 | } | ||
2941 | ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); | ||
2942 | if (!ic->journal_tree) { | ||
2943 | ti->error = "Could not allocate memory for journal tree"; | ||
2944 | r = -ENOMEM; | ||
2945 | goto bad; | 2959 | goto bad; |
2946 | } | ||
2947 | 2960 | ||
2948 | if (should_write_sb) { | 2961 | if (should_write_sb) { |
2949 | int r; | 2962 | int r; |