diff options
-rw-r--r-- | drivers/md/dm-crypt.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-flakey.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-integrity.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-log-writes.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 13 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-target.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-verity-target.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-zero.c | 4 | ||||
-rw-r--r-- | drivers/md/dm.c | 16 |
11 files changed, 46 insertions, 33 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ebf9e72d479b..f4b51809db21 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -2795,10 +2795,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) | |||
2795 | * and is aligned to this size as defined in IO hints. | 2795 | * and is aligned to this size as defined in IO hints. |
2796 | */ | 2796 | */ |
2797 | if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) | 2797 | if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) |
2798 | return -EIO; | 2798 | return DM_MAPIO_KILL; |
2799 | 2799 | ||
2800 | if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) | 2800 | if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) |
2801 | return -EIO; | 2801 | return DM_MAPIO_KILL; |
2802 | 2802 | ||
2803 | io = dm_per_bio_data(bio, cc->per_bio_data_size); | 2803 | io = dm_per_bio_data(bio, cc->per_bio_data_size); |
2804 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | 2804 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 13305a182611..e8f093b323ce 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -321,7 +321,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) | |||
321 | if (bio_data_dir(bio) == READ) { | 321 | if (bio_data_dir(bio) == READ) { |
322 | if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) && | 322 | if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) && |
323 | !test_bit(ERROR_WRITES, &fc->flags)) | 323 | !test_bit(ERROR_WRITES, &fc->flags)) |
324 | return -EIO; | 324 | return DM_MAPIO_KILL; |
325 | goto map_bio; | 325 | goto map_bio; |
326 | } | 326 | } |
327 | 327 | ||
@@ -349,7 +349,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) | |||
349 | /* | 349 | /* |
350 | * By default, error all I/O. | 350 | * By default, error all I/O. |
351 | */ | 351 | */ |
352 | return -EIO; | 352 | return DM_MAPIO_KILL; |
353 | } | 353 | } |
354 | 354 | ||
355 | map_bio: | 355 | map_bio: |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c7f7c8d76576..ee78fb471229 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -1352,13 +1352,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) | |||
1352 | DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", | 1352 | DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", |
1353 | (unsigned long long)dio->range.logical_sector, bio_sectors(bio), | 1353 | (unsigned long long)dio->range.logical_sector, bio_sectors(bio), |
1354 | (unsigned long long)ic->provided_data_sectors); | 1354 | (unsigned long long)ic->provided_data_sectors); |
1355 | return -EIO; | 1355 | return DM_MAPIO_KILL; |
1356 | } | 1356 | } |
1357 | if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { | 1357 | if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { |
1358 | DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", | 1358 | DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", |
1359 | ic->sectors_per_block, | 1359 | ic->sectors_per_block, |
1360 | (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); | 1360 | (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); |
1361 | return -EIO; | 1361 | return DM_MAPIO_KILL; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | if (ic->sectors_per_block > 1) { | 1364 | if (ic->sectors_per_block > 1) { |
@@ -1368,7 +1368,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) | |||
1368 | if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { | 1368 | if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { |
1369 | DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", | 1369 | DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", |
1370 | bv.bv_offset, bv.bv_len, ic->sectors_per_block); | 1370 | bv.bv_offset, bv.bv_len, ic->sectors_per_block); |
1371 | return -EIO; | 1371 | return DM_MAPIO_KILL; |
1372 | } | 1372 | } |
1373 | } | 1373 | } |
1374 | } | 1374 | } |
@@ -1383,18 +1383,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) | |||
1383 | wanted_tag_size *= ic->tag_size; | 1383 | wanted_tag_size *= ic->tag_size; |
1384 | if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { | 1384 | if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { |
1385 | DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size); | 1385 | DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size); |
1386 | return -EIO; | 1386 | return DM_MAPIO_KILL; |
1387 | } | 1387 | } |
1388 | } | 1388 | } |
1389 | } else { | 1389 | } else { |
1390 | if (unlikely(bip != NULL)) { | 1390 | if (unlikely(bip != NULL)) { |
1391 | DMERR("Unexpected integrity data when using internal hash"); | 1391 | DMERR("Unexpected integrity data when using internal hash"); |
1392 | return -EIO; | 1392 | return DM_MAPIO_KILL; |
1393 | } | 1393 | } |
1394 | } | 1394 | } |
1395 | 1395 | ||
1396 | if (unlikely(ic->mode == 'R') && unlikely(dio->write)) | 1396 | if (unlikely(ic->mode == 'R') && unlikely(dio->write)) |
1397 | return -EIO; | 1397 | return DM_MAPIO_KILL; |
1398 | 1398 | ||
1399 | get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); | 1399 | get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); |
1400 | dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); | 1400 | dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); |
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 4dfe38655a49..e42264706c59 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -586,7 +586,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) | |||
586 | spin_lock_irq(&lc->blocks_lock); | 586 | spin_lock_irq(&lc->blocks_lock); |
587 | lc->logging_enabled = false; | 587 | lc->logging_enabled = false; |
588 | spin_unlock_irq(&lc->blocks_lock); | 588 | spin_unlock_irq(&lc->blocks_lock); |
589 | return -ENOMEM; | 589 | return DM_MAPIO_KILL; |
590 | } | 590 | } |
591 | INIT_LIST_HEAD(&block->list); | 591 | INIT_LIST_HEAD(&block->list); |
592 | pb->block = block; | 592 | pb->block = block; |
@@ -639,7 +639,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) | |||
639 | spin_lock_irq(&lc->blocks_lock); | 639 | spin_lock_irq(&lc->blocks_lock); |
640 | lc->logging_enabled = false; | 640 | lc->logging_enabled = false; |
641 | spin_unlock_irq(&lc->blocks_lock); | 641 | spin_unlock_irq(&lc->blocks_lock); |
642 | return -ENOMEM; | 642 | return DM_MAPIO_KILL; |
643 | } | 643 | } |
644 | 644 | ||
645 | src = kmap_atomic(bv.bv_page); | 645 | src = kmap_atomic(bv.bv_page); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6d5ebb76149d..bf6e49c780d5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -559,7 +559,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
559 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | 559 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) |
560 | return DM_MAPIO_REQUEUE; | 560 | return DM_MAPIO_REQUEUE; |
561 | dm_report_EIO(m); | 561 | dm_report_EIO(m); |
562 | return -EIO; | 562 | return DM_MAPIO_KILL; |
563 | } | 563 | } |
564 | 564 | ||
565 | mpio->pgpath = pgpath; | 565 | mpio->pgpath = pgpath; |
@@ -621,11 +621,18 @@ static void process_queued_bios(struct work_struct *work) | |||
621 | blk_start_plug(&plug); | 621 | blk_start_plug(&plug); |
622 | while ((bio = bio_list_pop(&bios))) { | 622 | while ((bio = bio_list_pop(&bios))) { |
623 | r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); | 623 | r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); |
624 | if (r < 0 || r == DM_MAPIO_REQUEUE) { | 624 | switch (r) { |
625 | case DM_MAPIO_KILL: | ||
626 | r = -EIO; | ||
627 | /*FALLTHRU*/ | ||
628 | case DM_MAPIO_REQUEUE: | ||
625 | bio->bi_error = r; | 629 | bio->bi_error = r; |
626 | bio_endio(bio); | 630 | bio_endio(bio); |
627 | } else if (r == DM_MAPIO_REMAPPED) | 631 | break; |
632 | case DM_MAPIO_REMAPPED: | ||
628 | generic_make_request(bio); | 633 | generic_make_request(bio); |
634 | break; | ||
635 | } | ||
629 | } | 636 | } |
630 | blk_finish_plug(&plug); | 637 | blk_finish_plug(&plug); |
631 | } | 638 | } |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 5e30b08b91d9..d9c0c6a77eb5 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1207,14 +1207,14 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
1207 | 1207 | ||
1208 | r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); | 1208 | r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); |
1209 | if (r < 0 && r != -EWOULDBLOCK) | 1209 | if (r < 0 && r != -EWOULDBLOCK) |
1210 | return r; | 1210 | return DM_MAPIO_KILL; |
1211 | 1211 | ||
1212 | /* | 1212 | /* |
1213 | * If region is not in-sync queue the bio. | 1213 | * If region is not in-sync queue the bio. |
1214 | */ | 1214 | */ |
1215 | if (!r || (r == -EWOULDBLOCK)) { | 1215 | if (!r || (r == -EWOULDBLOCK)) { |
1216 | if (bio->bi_opf & REQ_RAHEAD) | 1216 | if (bio->bi_opf & REQ_RAHEAD) |
1217 | return -EIO; | 1217 | return DM_MAPIO_KILL; |
1218 | 1218 | ||
1219 | queue_bio(ms, bio, rw); | 1219 | queue_bio(ms, bio, rw); |
1220 | return DM_MAPIO_SUBMITTED; | 1220 | return DM_MAPIO_SUBMITTED; |
@@ -1226,7 +1226,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
1226 | */ | 1226 | */ |
1227 | m = choose_mirror(ms, bio->bi_iter.bi_sector); | 1227 | m = choose_mirror(ms, bio->bi_iter.bi_sector); |
1228 | if (unlikely(!m)) | 1228 | if (unlikely(!m)) |
1229 | return -EIO; | 1229 | return DM_MAPIO_KILL; |
1230 | 1230 | ||
1231 | dm_bio_record(&bio_record->details, bio); | 1231 | dm_bio_record(&bio_record->details, bio); |
1232 | bio_record->m = m; | 1232 | bio_record->m = m; |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index e152d9817c81..5a7f73f9a6fb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1690,7 +1690,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1690 | /* Full snapshots are not usable */ | 1690 | /* Full snapshots are not usable */ |
1691 | /* To get here the table must be live so s->active is always set. */ | 1691 | /* To get here the table must be live so s->active is always set. */ |
1692 | if (!s->valid) | 1692 | if (!s->valid) |
1693 | return -EIO; | 1693 | return DM_MAPIO_KILL; |
1694 | 1694 | ||
1695 | /* FIXME: should only take write lock if we need | 1695 | /* FIXME: should only take write lock if we need |
1696 | * to copy an exception */ | 1696 | * to copy an exception */ |
@@ -1698,7 +1698,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1698 | 1698 | ||
1699 | if (!s->valid || (unlikely(s->snapshot_overflowed) && | 1699 | if (!s->valid || (unlikely(s->snapshot_overflowed) && |
1700 | bio_data_dir(bio) == WRITE)) { | 1700 | bio_data_dir(bio) == WRITE)) { |
1701 | r = -EIO; | 1701 | r = DM_MAPIO_KILL; |
1702 | goto out_unlock; | 1702 | goto out_unlock; |
1703 | } | 1703 | } |
1704 | 1704 | ||
@@ -1723,7 +1723,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1723 | 1723 | ||
1724 | if (!s->valid || s->snapshot_overflowed) { | 1724 | if (!s->valid || s->snapshot_overflowed) { |
1725 | free_pending_exception(pe); | 1725 | free_pending_exception(pe); |
1726 | r = -EIO; | 1726 | r = DM_MAPIO_KILL; |
1727 | goto out_unlock; | 1727 | goto out_unlock; |
1728 | } | 1728 | } |
1729 | 1729 | ||
@@ -1741,7 +1741,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1741 | DMERR("Snapshot overflowed: Unable to allocate exception."); | 1741 | DMERR("Snapshot overflowed: Unable to allocate exception."); |
1742 | } else | 1742 | } else |
1743 | __invalidate_snapshot(s, -ENOMEM); | 1743 | __invalidate_snapshot(s, -ENOMEM); |
1744 | r = -EIO; | 1744 | r = DM_MAPIO_KILL; |
1745 | goto out_unlock; | 1745 | goto out_unlock; |
1746 | } | 1746 | } |
1747 | } | 1747 | } |
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index b242b750542f..c0d7e60820c4 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c | |||
@@ -128,7 +128,7 @@ static void io_err_dtr(struct dm_target *tt) | |||
128 | 128 | ||
129 | static int io_err_map(struct dm_target *tt, struct bio *bio) | 129 | static int io_err_map(struct dm_target *tt, struct bio *bio) |
130 | { | 130 | { |
131 | return -EIO; | 131 | return DM_MAPIO_KILL; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, | 134 | static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 97de961a3bfc..9ed55468b98b 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -643,17 +643,17 @@ static int verity_map(struct dm_target *ti, struct bio *bio) | |||
643 | if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & | 643 | if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & |
644 | ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { | 644 | ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { |
645 | DMERR_LIMIT("unaligned io"); | 645 | DMERR_LIMIT("unaligned io"); |
646 | return -EIO; | 646 | return DM_MAPIO_KILL; |
647 | } | 647 | } |
648 | 648 | ||
649 | if (bio_end_sector(bio) >> | 649 | if (bio_end_sector(bio) >> |
650 | (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { | 650 | (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { |
651 | DMERR_LIMIT("io out of range"); | 651 | DMERR_LIMIT("io out of range"); |
652 | return -EIO; | 652 | return DM_MAPIO_KILL; |
653 | } | 653 | } |
654 | 654 | ||
655 | if (bio_data_dir(bio) == WRITE) | 655 | if (bio_data_dir(bio) == WRITE) |
656 | return -EIO; | 656 | return DM_MAPIO_KILL; |
657 | 657 | ||
658 | io = dm_per_bio_data(bio, ti->per_io_data_size); | 658 | io = dm_per_bio_data(bio, ti->per_io_data_size); |
659 | io->v = v; | 659 | io->v = v; |
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index b616f11d8473..b65ca8dcfbdc 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c | |||
@@ -39,7 +39,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) | |||
39 | case REQ_OP_READ: | 39 | case REQ_OP_READ: |
40 | if (bio->bi_opf & REQ_RAHEAD) { | 40 | if (bio->bi_opf & REQ_RAHEAD) { |
41 | /* readahead of null bytes only wastes buffer cache */ | 41 | /* readahead of null bytes only wastes buffer cache */ |
42 | return -EIO; | 42 | return DM_MAPIO_KILL; |
43 | } | 43 | } |
44 | zero_fill_bio(bio); | 44 | zero_fill_bio(bio); |
45 | break; | 45 | break; |
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) | |||
47 | /* writes get silently dropped */ | 47 | /* writes get silently dropped */ |
48 | break; | 48 | break; |
49 | default: | 49 | default: |
50 | return -EIO; | 50 | return DM_MAPIO_KILL; |
51 | } | 51 | } |
52 | 52 | ||
53 | bio_endio(bio); | 53 | bio_endio(bio); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6ef9500226c0..499f8209bacf 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1084,18 +1084,24 @@ static void __map_bio(struct dm_target_io *tio) | |||
1084 | r = ti->type->map(ti, clone); | 1084 | r = ti->type->map(ti, clone); |
1085 | dm_offload_end(&o); | 1085 | dm_offload_end(&o); |
1086 | 1086 | ||
1087 | if (r == DM_MAPIO_REMAPPED) { | 1087 | switch (r) { |
1088 | case DM_MAPIO_SUBMITTED: | ||
1089 | break; | ||
1090 | case DM_MAPIO_REMAPPED: | ||
1088 | /* the bio has been remapped so dispatch it */ | 1091 | /* the bio has been remapped so dispatch it */ |
1089 | |||
1090 | trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, | 1092 | trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, |
1091 | tio->io->bio->bi_bdev->bd_dev, sector); | 1093 | tio->io->bio->bi_bdev->bd_dev, sector); |
1092 | |||
1093 | generic_make_request(clone); | 1094 | generic_make_request(clone); |
1094 | } else if (r < 0 || r == DM_MAPIO_REQUEUE) { | 1095 | break; |
1096 | case DM_MAPIO_KILL: | ||
1097 | r = -EIO; | ||
1098 | /*FALLTHRU*/ | ||
1099 | case DM_MAPIO_REQUEUE: | ||
1095 | /* error the io and bail out, or requeue it if needed */ | 1100 | /* error the io and bail out, or requeue it if needed */ |
1096 | dec_pending(tio->io, r); | 1101 | dec_pending(tio->io, r); |
1097 | free_tio(tio); | 1102 | free_tio(tio); |
1098 | } else if (r != DM_MAPIO_SUBMITTED) { | 1103 | break; |
1104 | default: | ||
1099 | DMWARN("unimplemented target map return value: %d", r); | 1105 | DMWARN("unimplemented target map return value: %d", r); |
1100 | BUG(); | 1106 | BUG(); |
1101 | } | 1107 | } |