summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-zoned-metadata.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-zoned-metadata.c')
-rw-r--r--drivers/md/dm-zoned-metadata.c68
1 files changed, 50 insertions, 18 deletions
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -34,7 +35,7 @@
34 * (1) Super block (1 block) 35 * (1) Super block (1 block)
35 * (2) Chunk mapping table (nr_map_blocks) 36 * (2) Chunk mapping table (nr_map_blocks)
36 * (3) Bitmap blocks (nr_bitmap_blocks) 37 * (3) Bitmap blocks (nr_bitmap_blocks)
37 * All metadata blocks are stored in conventional zones, starting from the 38 * All metadata blocks are stored in conventional zones, starting from
38 * the first conventional zone found on disk. 39 * the first conventional zone found on disk.
39 */ 40 */
40struct dmz_super { 41struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore 234 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being 235 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with 236 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are 237 * the map lock and zone state management (active and reclaim state are
237 * mutually exclusive). 238 * mutually exclusive).
238 */ 239 */
239void dmz_lock_metadata(struct dmz_metadata *zmd) 240void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
402 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 403 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
403 struct bio *bio; 404 struct bio *bio;
404 405
406 if (dmz_bdev_is_dying(zmd->dev))
407 return ERR_PTR(-EIO);
408
405 /* Get a new block and a BIO to read it */ 409 /* Get a new block and a BIO to read it */
406 mblk = dmz_alloc_mblock(zmd, mblk_no); 410 mblk = dmz_alloc_mblock(zmd, mblk_no);
407 if (!mblk) 411 if (!mblk)
408 return NULL; 412 return ERR_PTR(-ENOMEM);
409 413
410 bio = bio_alloc(GFP_NOIO, 1); 414 bio = bio_alloc(GFP_NOIO, 1);
411 if (!bio) { 415 if (!bio) {
412 dmz_free_mblock(zmd, mblk); 416 dmz_free_mblock(zmd, mblk);
413 return NULL; 417 return ERR_PTR(-ENOMEM);
414 } 418 }
415 419
416 spin_lock(&zmd->mblk_lock); 420 spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
541 if (!mblk) { 545 if (!mblk) {
542 /* Cache miss: read the block from disk */ 546 /* Cache miss: read the block from disk */
543 mblk = dmz_get_mblock_slow(zmd, mblk_no); 547 mblk = dmz_get_mblock_slow(zmd, mblk_no);
544 if (!mblk) 548 if (IS_ERR(mblk))
545 return ERR_PTR(-ENOMEM); 549 return mblk;
546 } 550 }
547 551
548 /* Wait for on-going read I/O and check for error */ 552 /* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
570/* 574/*
571 * Issue a metadata block write BIO. 575 * Issue a metadata block write BIO.
572 */ 576 */
573static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, 577static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
574 unsigned int set) 578 unsigned int set)
575{ 579{
576 sector_t block = zmd->sb[set].block + mblk->no; 580 sector_t block = zmd->sb[set].block + mblk->no;
577 struct bio *bio; 581 struct bio *bio;
578 582
583 if (dmz_bdev_is_dying(zmd->dev))
584 return -EIO;
585
579 bio = bio_alloc(GFP_NOIO, 1); 586 bio = bio_alloc(GFP_NOIO, 1);
580 if (!bio) { 587 if (!bio) {
581 set_bit(DMZ_META_ERROR, &mblk->state); 588 set_bit(DMZ_META_ERROR, &mblk->state);
582 return; 589 return -ENOMEM;
583 } 590 }
584 591
585 set_bit(DMZ_META_WRITING, &mblk->state); 592 set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
591 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); 598 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
592 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); 599 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
593 submit_bio(bio); 600 submit_bio(bio);
601
602 return 0;
594} 603}
595 604
596/* 605/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
602 struct bio *bio; 611 struct bio *bio;
603 int ret; 612 int ret;
604 613
614 if (dmz_bdev_is_dying(zmd->dev))
615 return -EIO;
616
605 bio = bio_alloc(GFP_NOIO, 1); 617 bio = bio_alloc(GFP_NOIO, 1);
606 if (!bio) 618 if (!bio)
607 return -ENOMEM; 619 return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
659{ 671{
660 struct dmz_mblock *mblk; 672 struct dmz_mblock *mblk;
661 struct blk_plug plug; 673 struct blk_plug plug;
662 int ret = 0; 674 int ret = 0, nr_mblks_submitted = 0;
663 675
664 /* Issue writes */ 676 /* Issue writes */
665 blk_start_plug(&plug); 677 blk_start_plug(&plug);
666 list_for_each_entry(mblk, write_list, link) 678 list_for_each_entry(mblk, write_list, link) {
667 dmz_write_mblock(zmd, mblk, set); 679 ret = dmz_write_mblock(zmd, mblk, set);
680 if (ret)
681 break;
682 nr_mblks_submitted++;
683 }
668 blk_finish_plug(&plug); 684 blk_finish_plug(&plug);
669 685
670 /* Wait for completion */ 686 /* Wait for completion */
671 list_for_each_entry(mblk, write_list, link) { 687 list_for_each_entry(mblk, write_list, link) {
688 if (!nr_mblks_submitted)
689 break;
672 wait_on_bit_io(&mblk->state, DMZ_META_WRITING, 690 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
673 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
674 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 692 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
675 clear_bit(DMZ_META_ERROR, &mblk->state); 693 clear_bit(DMZ_META_ERROR, &mblk->state);
676 ret = -EIO; 694 ret = -EIO;
677 } 695 }
696 nr_mblks_submitted--;
678 } 697 }
679 698
680 /* Flush drive cache (this will also sync data) */ 699 /* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
736 */ 755 */
737 dmz_lock_flush(zmd); 756 dmz_lock_flush(zmd);
738 757
758 if (dmz_bdev_is_dying(zmd->dev)) {
759 ret = -EIO;
760 goto out;
761 }
762
739 /* Get dirty blocks */ 763 /* Get dirty blocks */
740 spin_lock(&zmd->mblk_lock); 764 spin_lock(&zmd->mblk_lock);
741 list_splice_init(&zmd->mblk_dirty_list, &write_list); 765 list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1542 struct dm_zone *zone; 1566 struct dm_zone *zone;
1543 1567
1544 if (list_empty(&zmd->map_rnd_list)) 1568 if (list_empty(&zmd->map_rnd_list))
1545 return NULL; 1569 return ERR_PTR(-EBUSY);
1546 1570
1547 list_for_each_entry(zone, &zmd->map_rnd_list, link) { 1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1548 if (dmz_is_buf(zone)) 1572 if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1553 return dzone; 1577 return dzone;
1554 } 1578 }
1555 1579
1556 return NULL; 1580 return ERR_PTR(-EBUSY);
1557} 1581}
1558 1582
1559/* 1583/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1564 struct dm_zone *zone; 1588 struct dm_zone *zone;
1565 1589
1566 if (list_empty(&zmd->map_seq_list)) 1590 if (list_empty(&zmd->map_seq_list))
1567 return NULL; 1591 return ERR_PTR(-EBUSY);
1568 1592
1569 list_for_each_entry(zone, &zmd->map_seq_list, link) { 1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1570 if (!zone->bzone) 1594 if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1573 return zone; 1597 return zone;
1574 } 1598 }
1575 1599
1576 return NULL; 1600 return ERR_PTR(-EBUSY);
1577} 1601}
1578 1602
1579/* 1603/*
@@ -1628,9 +1652,13 @@ again:
1628 if (op != REQ_OP_WRITE) 1652 if (op != REQ_OP_WRITE)
1629 goto out; 1653 goto out;
1630 1654
1631 /* Alloate a random zone */ 1655 /* Allocate a random zone */
1632 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1633 if (!dzone) { 1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1634 dmz_wait_for_free_zones(zmd); 1662 dmz_wait_for_free_zones(zmd);
1635 goto again; 1663 goto again;
1636 } 1664 }
@@ -1725,9 +1753,13 @@ again:
1725 if (bzone) 1753 if (bzone)
1726 goto out; 1754 goto out;
1727 1755
1728 /* Alloate a random zone */ 1756 /* Allocate a random zone */
1729 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1730 if (!bzone) { 1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1731 dmz_wait_for_free_zones(zmd); 1763 dmz_wait_for_free_zones(zmd);
1732 goto again; 1764 goto again;
1733 } 1765 }