summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-zoned-target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-zoned-target.c')
-rw-r--r--drivers/md/dm-zoned-target.c68
1 files changed, 60 insertions, 8 deletions
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
133 134
134 refcount_inc(&bioctx->ref); 135 refcount_inc(&bioctx->ref);
135 generic_make_request(clone); 136 generic_make_request(clone);
137 if (clone->bi_status == BLK_STS_IOERR)
138 return -EIO;
136 139
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks; 141 zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
277 280
278 /* Get the buffer zone. One will be allocated if needed */ 281 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone); 282 bzone = dmz_get_chunk_buffer(zmd, zone);
280 if (!bzone) 283 if (IS_ERR(bzone))
281 return -ENOSPC; 284 return PTR_ERR(bzone);
282 285
283 if (dmz_is_readonly(bzone)) 286 if (dmz_is_readonly(bzone))
284 return -EROFS; 287 return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
389 392
390 dmz_lock_metadata(zmd); 393 dmz_lock_metadata(zmd);
391 394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
392 /* 400 /*
393 * Get the data zone mapping the chunk. There may be no 401 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained, 402 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
493 501
494 /* Flush dirty metadata blocks */ 502 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata); 503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
496 506
497 /* Process queued flush requests */ 507 /* Process queued flush requests */
498 while (1) { 508 while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
513 * Get a chunk work and start it to process a new BIO. 523 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one. 524 * If the BIO chunk has no work yet, create one.
515 */ 525 */
516static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) 526static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
517{ 527{
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); 528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw; 529 struct dm_chunk_work *cw;
530 int ret = 0;
520 531
521 mutex_lock(&dmz->chunk_lock); 532 mutex_lock(&dmz->chunk_lock);
522 533
523 /* Get the BIO chunk work. If one is not active yet, create one */ 534 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); 535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
525 if (!cw) { 536 if (!cw) {
526 int ret;
527 537
528 /* Create a new chunk work */ 538 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); 539 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
530 if (!cw) 540 if (unlikely(!cw)) {
541 ret = -ENOMEM;
531 goto out; 542 goto out;
543 }
532 544
533 INIT_WORK(&cw->work, dmz_chunk_work); 545 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0); 546 refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); 551 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
540 if (unlikely(ret)) { 552 if (unlikely(ret)) {
541 kfree(cw); 553 kfree(cw);
542 cw = NULL;
543 goto out; 554 goto out;
544 } 555 }
545 } 556 }
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
547 bio_list_add(&cw->bio_list, bio); 558 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw); 559 dmz_get_chunk_work(cw);
549 560
561 dmz_reclaim_bio_acc(dmz->reclaim);
550 if (queue_work(dmz->chunk_wq, &cw->work)) 562 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw); 563 dmz_get_chunk_work(cw);
552out: 564out:
553 mutex_unlock(&dmz->chunk_lock); 565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567}
568
569/*
570 * Check the backing device availability. If it's on the way out,
571 * start failing I/O. Reclaim and metadata components also call this
572 * function to cleanly abort operation in the event of such failure.
573 */
574bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575{
576 struct gendisk *disk;
577
578 if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
579 disk = dmz_dev->bdev->bd_disk;
580 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
581 dmz_dev_warn(dmz_dev, "Backing device queue dying");
582 dmz_dev->flags |= DMZ_BDEV_DYING;
583 } else if (disk->fops->check_events) {
584 if (disk->fops->check_events(disk, 0) &
585 DISK_EVENT_MEDIA_CHANGE) {
586 dmz_dev_warn(dmz_dev, "Backing device offline");
587 dmz_dev->flags |= DMZ_BDEV_DYING;
588 }
589 }
590 }
591
592 return dmz_dev->flags & DMZ_BDEV_DYING;
554} 593}
555 594
556/* 595/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
564 sector_t sector = bio->bi_iter.bi_sector; 603 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio); 604 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector; 605 sector_t chunk_sector;
606 int ret;
607
608 if (dmz_bdev_is_dying(dmz->dev))
609 return DM_MAPIO_KILL;
567 610
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", 611 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors, 612 bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); 644 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
602 645
603 /* Now ready to handle this BIO */ 646 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim); 647 ret = dmz_queue_chunk_work(dmz, bio);
605 dmz_queue_chunk_work(dmz, bio); 648 if (ret) {
649 dmz_dev_debug(dmz->dev,
650 "BIO op %d, can't process chunk %llu, err %i\n",
651 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
652 ret);
653 return DM_MAPIO_REQUEUE;
654 }
606 655
607 return DM_MAPIO_SUBMITTED; 656 return DM_MAPIO_SUBMITTED;
608} 657}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
855{ 904{
856 struct dmz_target *dmz = ti->private; 905 struct dmz_target *dmz = ti->private;
857 906
907 if (dmz_bdev_is_dying(dmz->dev))
908 return -ENODEV;
909
858 *bdev = dmz->dev->bdev; 910 *bdev = dmz->dev->bdev;
859 911
860 return 0; 912 return 0;