aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f4b51809db21..586cef085c6a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -71,7 +71,7 @@ struct dm_crypt_io {
71 struct convert_context ctx; 71 struct convert_context ctx;
72 72
73 atomic_t io_pending; 73 atomic_t io_pending;
74 int error; 74 blk_status_t error;
75 sector_t sector; 75 sector_t sector;
76 76
77 struct rb_node rb_node; 77 struct rb_node rb_node;
@@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
1292/* 1292/*
1293 * Encrypt / decrypt data from one bio to another one (can be the same one) 1293 * Encrypt / decrypt data from one bio to another one (can be the same one)
1294 */ 1294 */
1295static int crypt_convert(struct crypt_config *cc, 1295static blk_status_t crypt_convert(struct crypt_config *cc,
1296 struct convert_context *ctx) 1296 struct convert_context *ctx)
1297{ 1297{
1298 unsigned int tag_offset = 0; 1298 unsigned int tag_offset = 0;
@@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc,
1343 */ 1343 */
1344 case -EBADMSG: 1344 case -EBADMSG:
1345 atomic_dec(&ctx->cc_pending); 1345 atomic_dec(&ctx->cc_pending);
1346 return -EILSEQ; 1346 return BLK_STS_PROTECTION;
1347 /* 1347 /*
1348 * There was an error while processing the request. 1348 * There was an error while processing the request.
1349 */ 1349 */
1350 default: 1350 default:
1351 atomic_dec(&ctx->cc_pending); 1351 atomic_dec(&ctx->cc_pending);
1352 return -EIO; 1352 return BLK_STS_IOERR;
1353 } 1353 }
1354 } 1354 }
1355 1355
@@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1463{ 1463{
1464 struct crypt_config *cc = io->cc; 1464 struct crypt_config *cc = io->cc;
1465 struct bio *base_bio = io->base_bio; 1465 struct bio *base_bio = io->base_bio;
1466 int error = io->error; 1466 blk_status_t error = io->error;
1467 1467
1468 if (!atomic_dec_and_test(&io->io_pending)) 1468 if (!atomic_dec_and_test(&io->io_pending))
1469 return; 1469 return;
@@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1476 else 1476 else
1477 kfree(io->integrity_metadata); 1477 kfree(io->integrity_metadata);
1478 1478
1479 base_bio->bi_error = error; 1479 base_bio->bi_status = error;
1480 bio_endio(base_bio); 1480 bio_endio(base_bio);
1481} 1481}
1482 1482
@@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone)
1502 struct dm_crypt_io *io = clone->bi_private; 1502 struct dm_crypt_io *io = clone->bi_private;
1503 struct crypt_config *cc = io->cc; 1503 struct crypt_config *cc = io->cc;
1504 unsigned rw = bio_data_dir(clone); 1504 unsigned rw = bio_data_dir(clone);
1505 int error; 1505 blk_status_t error;
1506 1506
1507 /* 1507 /*
1508 * free the processed pages 1508 * free the processed pages
@@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone)
1510 if (rw == WRITE) 1510 if (rw == WRITE)
1511 crypt_free_buffer_pages(cc, clone); 1511 crypt_free_buffer_pages(cc, clone);
1512 1512
1513 error = clone->bi_error; 1513 error = clone->bi_status;
1514 bio_put(clone); 1514 bio_put(clone);
1515 1515
1516 if (rw == READ && !error) { 1516 if (rw == READ && !error) {
@@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work)
1570 1570
1571 crypt_inc_pending(io); 1571 crypt_inc_pending(io);
1572 if (kcryptd_io_read(io, GFP_NOIO)) 1572 if (kcryptd_io_read(io, GFP_NOIO))
1573 io->error = -ENOMEM; 1573 io->error = BLK_STS_RESOURCE;
1574 crypt_dec_pending(io); 1574 crypt_dec_pending(io);
1575} 1575}
1576 1576
@@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1656 sector_t sector; 1656 sector_t sector;
1657 struct rb_node **rbp, *parent; 1657 struct rb_node **rbp, *parent;
1658 1658
1659 if (unlikely(io->error < 0)) { 1659 if (unlikely(io->error)) {
1660 crypt_free_buffer_pages(cc, clone); 1660 crypt_free_buffer_pages(cc, clone);
1661 bio_put(clone); 1661 bio_put(clone);
1662 crypt_dec_pending(io); 1662 crypt_dec_pending(io);
@@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1697 struct bio *clone; 1697 struct bio *clone;
1698 int crypt_finished; 1698 int crypt_finished;
1699 sector_t sector = io->sector; 1699 sector_t sector = io->sector;
1700 int r; 1700 blk_status_t r;
1701 1701
1702 /* 1702 /*
1703 * Prevent io from disappearing until this function completes. 1703 * Prevent io from disappearing until this function completes.
@@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1707 1707
1708 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 1708 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1709 if (unlikely(!clone)) { 1709 if (unlikely(!clone)) {
1710 io->error = -EIO; 1710 io->error = BLK_STS_IOERR;
1711 goto dec; 1711 goto dec;
1712 } 1712 }
1713 1713
@@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1718 1718
1719 crypt_inc_pending(io); 1719 crypt_inc_pending(io);
1720 r = crypt_convert(cc, &io->ctx); 1720 r = crypt_convert(cc, &io->ctx);
1721 if (r < 0) 1721 if (r)
1722 io->error = r; 1722 io->error = r;
1723 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1723 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1724 1724
@@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1740static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1740static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1741{ 1741{
1742 struct crypt_config *cc = io->cc; 1742 struct crypt_config *cc = io->cc;
1743 int r = 0; 1743 blk_status_t r;
1744 1744
1745 crypt_inc_pending(io); 1745 crypt_inc_pending(io);
1746 1746
@@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1748 io->sector); 1748 io->sector);
1749 1749
1750 r = crypt_convert(cc, &io->ctx); 1750 r = crypt_convert(cc, &io->ctx);
1751 if (r < 0) 1751 if (r)
1752 io->error = r; 1752 io->error = r;
1753 1753
1754 if (atomic_dec_and_test(&io->ctx.cc_pending)) 1754 if (atomic_dec_and_test(&io->ctx.cc_pending))
@@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
1781 if (error == -EBADMSG) { 1781 if (error == -EBADMSG) {
1782 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", 1782 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1783 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); 1783 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
1784 io->error = -EILSEQ; 1784 io->error = BLK_STS_PROTECTION;
1785 } else if (error < 0) 1785 } else if (error < 0)
1786 io->error = -EIO; 1786 io->error = BLK_STS_IOERR;
1787 1787
1788 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1788 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1789 1789