summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
authorMilan Broz <gmazyland@gmail.com>2017-03-16 10:39:44 -0400
committerMike Snitzer <snitzer@redhat.com>2017-03-24 15:54:21 -0400
commit8f0009a225171cc1b76a6b443de5137b26e1374b (patch)
tree4bb6cf315a9756411856c978eced0cc9a4fa1d2d /drivers/md/dm-crypt.c
parent33d2f09fcb357fd1861c4959d1d3505492bf91f8 (diff)
dm crypt: optionally support larger encryption sector size
Add optional "sector_size" parameter that specifies encryption sector size (atomic unit of block device encryption). Parameter can be in range 512 - 4096 bytes and must be power of two. For compatibility reasons, the maximal IO must fit into the page limit, so the limit is set to the minimal page size possible (4096 bytes). NOTE: this device cannot yet be handled by cryptsetup if this parameter is set. IV for the sector is calculated from the 512 bytes sector offset unless the iv_large_sectors option is used. Test script using dmsetup: DEV="/dev/sdb" DEV_SIZE=$(blockdev --getsz $DEV) KEY="9c1185a5c5e9fc54612808977ee8f548b2258d31ddadef707ba62c166051b9e3cd0294c27515f2bccee924e8823ca6e124b8fc3167ed478bca702babe4e130ac" BLOCK_SIZE=4096 # dmsetup create test_crypt --table "0 $DEV_SIZE crypt aes-xts-plain64 $KEY 0 $DEV 0 1 sector_size:$BLOCK_SIZE" # dmsetup table --showkeys test_crypt Signed-off-by: Milan Broz <gmazyland@gmail.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c105
1 files changed, 82 insertions, 23 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index b2e48b26fd40..05acc42bdd38 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -129,6 +129,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
129 129
130enum cipher_flags { 130enum cipher_flags {
131 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ 131 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
132 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
132}; 133};
133 134
134/* 135/*
@@ -171,6 +172,7 @@ struct crypt_config {
171 } iv_gen_private; 172 } iv_gen_private;
172 sector_t iv_offset; 173 sector_t iv_offset;
173 unsigned int iv_size; 174 unsigned int iv_size;
175 unsigned int sector_size;
174 176
175 /* ESSIV: struct crypto_cipher *essiv_tfm */ 177 /* ESSIV: struct crypto_cipher *essiv_tfm */
176 void *iv_private; 178 void *iv_private;
@@ -524,6 +526,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
524{ 526{
525 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 527 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
526 528
529 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
530 ti->error = "Unsupported sector size for LMK";
531 return -EINVAL;
532 }
533
527 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 534 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
528 if (IS_ERR(lmk->hash_tfm)) { 535 if (IS_ERR(lmk->hash_tfm)) {
529 ti->error = "Error initializing LMK hash"; 536 ti->error = "Error initializing LMK hash";
@@ -677,6 +684,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
677{ 684{
678 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 685 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
679 686
687 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
688 ti->error = "Unsupported sector size for TCW";
689 return -EINVAL;
690 }
691
680 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 692 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
681 ti->error = "Wrong key size for TCW"; 693 ti->error = "Wrong key size for TCW";
682 return -EINVAL; 694 return -EINVAL;
@@ -1037,15 +1049,20 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
1037 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 1049 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1038 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 1050 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1039 struct dm_crypt_request *dmreq; 1051 struct dm_crypt_request *dmreq;
1040 unsigned int data_len = 1 << SECTOR_SHIFT;
1041 u8 *iv, *org_iv, *tag_iv, *tag; 1052 u8 *iv, *org_iv, *tag_iv, *tag;
1042 uint64_t *sector; 1053 uint64_t *sector;
1043 int r = 0; 1054 int r = 0;
1044 1055
1045 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); 1056 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1046 1057
1058 /* Reject unexpected unaligned bio. */
1059 if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
1060 return -EIO;
1061
1047 dmreq = dmreq_of_req(cc, req); 1062 dmreq = dmreq_of_req(cc, req);
1048 dmreq->iv_sector = ctx->cc_sector; 1063 dmreq->iv_sector = ctx->cc_sector;
1064 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1065 sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
1049 dmreq->ctx = ctx; 1066 dmreq->ctx = ctx;
1050 1067
1051 *org_tag_of_dmreq(cc, dmreq) = tag_offset; 1068 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
@@ -1066,13 +1083,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
1066 sg_init_table(dmreq->sg_in, 4); 1083 sg_init_table(dmreq->sg_in, 4);
1067 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); 1084 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1068 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); 1085 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1069 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, data_len, bv_in.bv_offset); 1086 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1070 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); 1087 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1071 1088
1072 sg_init_table(dmreq->sg_out, 4); 1089 sg_init_table(dmreq->sg_out, 4);
1073 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); 1090 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1074 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); 1091 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1075 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, data_len, bv_out.bv_offset); 1092 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1076 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); 1093 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1077 1094
1078 if (cc->iv_gen_ops) { 1095 if (cc->iv_gen_ops) {
@@ -1094,14 +1111,14 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
1094 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); 1111 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1095 if (bio_data_dir(ctx->bio_in) == WRITE) { 1112 if (bio_data_dir(ctx->bio_in) == WRITE) {
1096 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, 1113 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1097 data_len, iv); 1114 cc->sector_size, iv);
1098 r = crypto_aead_encrypt(req); 1115 r = crypto_aead_encrypt(req);
1099 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) 1116 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1100 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, 1117 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1101 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); 1118 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1102 } else { 1119 } else {
1103 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, 1120 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1104 data_len + cc->integrity_tag_size, iv); 1121 cc->sector_size + cc->integrity_tag_size, iv);
1105 r = crypto_aead_decrypt(req); 1122 r = crypto_aead_decrypt(req);
1106 } 1123 }
1107 1124
@@ -1112,8 +1129,8 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
1112 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 1129 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1113 r = cc->iv_gen_ops->post(cc, org_iv, dmreq); 1130 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1114 1131
1115 bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len); 1132 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1116 bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len); 1133 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1117 1134
1118 return r; 1135 return r;
1119} 1136}
@@ -1127,13 +1144,18 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
1127 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 1144 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1128 struct scatterlist *sg_in, *sg_out; 1145 struct scatterlist *sg_in, *sg_out;
1129 struct dm_crypt_request *dmreq; 1146 struct dm_crypt_request *dmreq;
1130 unsigned int data_len = 1 << SECTOR_SHIFT;
1131 u8 *iv, *org_iv, *tag_iv; 1147 u8 *iv, *org_iv, *tag_iv;
1132 uint64_t *sector; 1148 uint64_t *sector;
1133 int r = 0; 1149 int r = 0;
1134 1150
1151 /* Reject unexpected unaligned bio. */
1152 if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
1153 return -EIO;
1154
1135 dmreq = dmreq_of_req(cc, req); 1155 dmreq = dmreq_of_req(cc, req);
1136 dmreq->iv_sector = ctx->cc_sector; 1156 dmreq->iv_sector = ctx->cc_sector;
1157 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1158 sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
1137 dmreq->ctx = ctx; 1159 dmreq->ctx = ctx;
1138 1160
1139 *org_tag_of_dmreq(cc, dmreq) = tag_offset; 1161 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
@@ -1150,10 +1172,10 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
1150 sg_out = &dmreq->sg_out[0]; 1172 sg_out = &dmreq->sg_out[0];
1151 1173
1152 sg_init_table(sg_in, 1); 1174 sg_init_table(sg_in, 1);
1153 sg_set_page(sg_in, bv_in.bv_page, data_len, bv_in.bv_offset); 1175 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1154 1176
1155 sg_init_table(sg_out, 1); 1177 sg_init_table(sg_out, 1);
1156 sg_set_page(sg_out, bv_out.bv_page, data_len, bv_out.bv_offset); 1178 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1157 1179
1158 if (cc->iv_gen_ops) { 1180 if (cc->iv_gen_ops) {
1159 /* For READs use IV stored in integrity metadata */ 1181 /* For READs use IV stored in integrity metadata */
@@ -1171,7 +1193,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
1171 memcpy(iv, org_iv, cc->iv_size); 1193 memcpy(iv, org_iv, cc->iv_size);
1172 } 1194 }
1173 1195
1174 skcipher_request_set_crypt(req, sg_in, sg_out, data_len, iv); 1196 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1175 1197
1176 if (bio_data_dir(ctx->bio_in) == WRITE) 1198 if (bio_data_dir(ctx->bio_in) == WRITE)
1177 r = crypto_skcipher_encrypt(req); 1199 r = crypto_skcipher_encrypt(req);
@@ -1181,8 +1203,8 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
1181 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 1203 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1182 r = cc->iv_gen_ops->post(cc, org_iv, dmreq); 1204 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1183 1205
1184 bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len); 1206 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1185 bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len); 1207 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1186 1208
1187 return r; 1209 return r;
1188} 1210}
@@ -1268,6 +1290,7 @@ static int crypt_convert(struct crypt_config *cc,
1268 struct convert_context *ctx) 1290 struct convert_context *ctx)
1269{ 1291{
1270 unsigned int tag_offset = 0; 1292 unsigned int tag_offset = 0;
1293 unsigned int sector_step = cc->sector_size / (1 << SECTOR_SHIFT);
1271 int r; 1294 int r;
1272 1295
1273 atomic_set(&ctx->cc_pending, 1); 1296 atomic_set(&ctx->cc_pending, 1);
@@ -1275,7 +1298,6 @@ static int crypt_convert(struct crypt_config *cc,
1275 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 1298 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1276 1299
1277 crypt_alloc_req(cc, ctx); 1300 crypt_alloc_req(cc, ctx);
1278
1279 atomic_inc(&ctx->cc_pending); 1301 atomic_inc(&ctx->cc_pending);
1280 1302
1281 if (crypt_integrity_aead(cc)) 1303 if (crypt_integrity_aead(cc))
@@ -1298,16 +1320,16 @@ static int crypt_convert(struct crypt_config *cc,
1298 */ 1320 */
1299 case -EINPROGRESS: 1321 case -EINPROGRESS:
1300 ctx->r.req = NULL; 1322 ctx->r.req = NULL;
1301 ctx->cc_sector++; 1323 ctx->cc_sector += sector_step;
1302 tag_offset++; 1324 tag_offset += sector_step;
1303 continue; 1325 continue;
1304 /* 1326 /*
1305 * The request was already processed (synchronously). 1327 * The request was already processed (synchronously).
1306 */ 1328 */
1307 case 0: 1329 case 0:
1308 atomic_dec(&ctx->cc_pending); 1330 atomic_dec(&ctx->cc_pending);
1309 ctx->cc_sector++; 1331 ctx->cc_sector += sector_step;
1310 tag_offset++; 1332 tag_offset += sector_step;
1311 cond_resched(); 1333 cond_resched();
1312 continue; 1334 continue;
1313 /* 1335 /*
@@ -2506,10 +2528,11 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
2506 struct crypt_config *cc = ti->private; 2528 struct crypt_config *cc = ti->private;
2507 struct dm_arg_set as; 2529 struct dm_arg_set as;
2508 static struct dm_arg _args[] = { 2530 static struct dm_arg _args[] = {
2509 {0, 3, "Invalid number of feature args"}, 2531 {0, 6, "Invalid number of feature args"},
2510 }; 2532 };
2511 unsigned int opt_params, val; 2533 unsigned int opt_params, val;
2512 const char *opt_string, *sval; 2534 const char *opt_string, *sval;
2535 char dummy;
2513 int ret; 2536 int ret;
2514 2537
2515 /* Optional parameters */ 2538 /* Optional parameters */
@@ -2552,7 +2575,16 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
2552 cc->cipher_auth = kstrdup(sval, GFP_KERNEL); 2575 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2553 if (!cc->cipher_auth) 2576 if (!cc->cipher_auth)
2554 return -ENOMEM; 2577 return -ENOMEM;
2555 } else { 2578 } else if (sscanf(opt_string, "sector_size:%u%c", &cc->sector_size, &dummy) == 1) {
2579 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2580 cc->sector_size > 4096 ||
2581 (1 << ilog2(cc->sector_size) != cc->sector_size)) {
2582 ti->error = "Invalid feature value for sector_size";
2583 return -EINVAL;
2584 }
2585 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2586 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2587 else {
2556 ti->error = "Invalid feature arguments"; 2588 ti->error = "Invalid feature arguments";
2557 return -EINVAL; 2589 return -EINVAL;
2558 } 2590 }
@@ -2592,6 +2624,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2592 return -ENOMEM; 2624 return -ENOMEM;
2593 } 2625 }
2594 cc->key_size = key_size; 2626 cc->key_size = key_size;
2627 cc->sector_size = (1 << SECTOR_SHIFT);
2595 2628
2596 ti->private = cc; 2629 ti->private = cc;
2597 2630
@@ -2664,7 +2697,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2664 mutex_init(&cc->bio_alloc_lock); 2697 mutex_init(&cc->bio_alloc_lock);
2665 2698
2666 ret = -EINVAL; 2699 ret = -EINVAL;
2667 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 2700 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
2701 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
2668 ti->error = "Invalid iv_offset sector"; 2702 ti->error = "Invalid iv_offset sector";
2669 goto bad; 2703 goto bad;
2670 } 2704 }
@@ -2765,6 +2799,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
2765 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) 2799 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
2766 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); 2800 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
2767 2801
2802 /*
2803 * Ensure that bio is a multiple of internal sector encryption size
2804 * and is aligned to this size as defined in IO hints.
2805 */
2806 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
2807 return -EIO;
2808
2809 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
2810 return -EIO;
2811
2768 io = dm_per_bio_data(bio, cc->per_bio_data_size); 2812 io = dm_per_bio_data(bio, cc->per_bio_data_size);
2769 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 2813 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
2770 2814
@@ -2772,12 +2816,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
2772 unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio); 2816 unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio);
2773 2817
2774 if (unlikely(tag_len > KMALLOC_MAX_SIZE) || 2818 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
2775 unlikely(!(io->integrity_metadata = kmalloc(tag_len, 2819 unlikely(!(io->integrity_metadata = kzalloc(tag_len,
2776 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { 2820 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
2777 if (bio_sectors(bio) > cc->tag_pool_max_sectors) 2821 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
2778 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); 2822 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
2779 io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO); 2823 io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
2780 io->integrity_metadata_from_pool = true; 2824 io->integrity_metadata_from_pool = true;
2825 memset(io->integrity_metadata, 0, cc->tag_pool_max_sectors * (1 << SECTOR_SHIFT));
2781 } 2826 }
2782 } 2827 }
2783 2828
@@ -2825,6 +2870,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
2825 num_feature_args += !!ti->num_discard_bios; 2870 num_feature_args += !!ti->num_discard_bios;
2826 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); 2871 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2827 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 2872 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2873 num_feature_args += (cc->sector_size != (1 << SECTOR_SHIFT)) ? 1 : 0;
2874 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2828 if (cc->on_disk_tag_size) 2875 if (cc->on_disk_tag_size)
2829 num_feature_args++; 2876 num_feature_args++;
2830 if (num_feature_args) { 2877 if (num_feature_args) {
@@ -2837,6 +2884,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
2837 DMEMIT(" submit_from_crypt_cpus"); 2884 DMEMIT(" submit_from_crypt_cpus");
2838 if (cc->on_disk_tag_size) 2885 if (cc->on_disk_tag_size)
2839 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); 2886 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
2887 if (cc->sector_size != (1 << SECTOR_SHIFT))
2888 DMEMIT(" sector_size:%d", cc->sector_size);
2889 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
2890 DMEMIT(" iv_large_sectors");
2840 } 2891 }
2841 2892
2842 break; 2893 break;
@@ -2926,6 +2977,8 @@ static int crypt_iterate_devices(struct dm_target *ti,
2926 2977
2927static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) 2978static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2928{ 2979{
2980 struct crypt_config *cc = ti->private;
2981
2929 /* 2982 /*
2930 * Unfortunate constraint that is required to avoid the potential 2983 * Unfortunate constraint that is required to avoid the potential
2931 * for exceeding underlying device's max_segments limits -- due to 2984 * for exceeding underlying device's max_segments limits -- due to
@@ -2933,11 +2986,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2933 * bio that are not as physically contiguous as the original bio. 2986 * bio that are not as physically contiguous as the original bio.
2934 */ 2987 */
2935 limits->max_segment_size = PAGE_SIZE; 2988 limits->max_segment_size = PAGE_SIZE;
2989
2990 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
2991 limits->logical_block_size = cc->sector_size;
2992 limits->physical_block_size = cc->sector_size;
2993 blk_limits_io_min(limits, cc->sector_size);
2994 }
2936} 2995}
2937 2996
2938static struct target_type crypt_target = { 2997static struct target_type crypt_target = {
2939 .name = "crypt", 2998 .name = "crypt",
2940 .version = {1, 16, 0}, 2999 .version = {1, 17, 0},
2941 .module = THIS_MODULE, 3000 .module = THIS_MODULE,
2942 .ctr = crypt_ctr, 3001 .ctr = crypt_ctr,
2943 .dtr = crypt_dtr, 3002 .dtr = crypt_dtr,