diff options
author | Christoph Hellwig <hch@lst.de> | 2018-07-24 08:04:12 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-07-24 16:39:27 -0400 |
commit | 24d5493f207ce0ce38df80ce86c907417e04594a (patch) | |
tree | 5b4239636db01bf3f2a173ae4f32228afc8ce873 /block/bio.c | |
parent | 76f17d8ba1cbc3d2786955b2f15e071da93527cd (diff) |
block: simplify bio_check_pages_dirty
bio_check_pages_dirty currently inviolates the invariant that bv_page of
a bio_vec inside bi_vcnt shouldn't be zero, and that is going to become
really annoying with multpath biovecs. Fortunately there isn't any
all that good reason for it - once we decide to defer freeing the bio
to a workqueue holding onto a few additional pages isn't really an
issue anymore. So just check if there is a clean page that needs
dirtying in the first path, and do a second pass to free them if there
was none, while the cache is still hot.
Also use the chance to micro-optimize bio_dirty_fn a bit by not saving
irq state - we know we are called from a workqueue.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bio.c')
-rw-r--r-- | block/bio.c | 56 |
1 files changed, 21 insertions, 35 deletions
diff --git a/block/bio.c b/block/bio.c index 8ecc95615941..504b42278099 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1649,19 +1649,15 @@ static void bio_release_pages(struct bio *bio) | |||
1649 | struct bio_vec *bvec; | 1649 | struct bio_vec *bvec; |
1650 | int i; | 1650 | int i; |
1651 | 1651 | ||
1652 | bio_for_each_segment_all(bvec, bio, i) { | 1652 | bio_for_each_segment_all(bvec, bio, i) |
1653 | struct page *page = bvec->bv_page; | 1653 | put_page(bvec->bv_page); |
1654 | |||
1655 | if (page) | ||
1656 | put_page(page); | ||
1657 | } | ||
1658 | } | 1654 | } |
1659 | 1655 | ||
1660 | /* | 1656 | /* |
1661 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. | 1657 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. |
1662 | * If they are, then fine. If, however, some pages are clean then they must | 1658 | * If they are, then fine. If, however, some pages are clean then they must |
1663 | * have been written out during the direct-IO read. So we take another ref on | 1659 | * have been written out during the direct-IO read. So we take another ref on |
1664 | * the BIO and the offending pages and re-dirty the pages in process context. | 1660 | * the BIO and re-dirty the pages in process context. |
1665 | * | 1661 | * |
1666 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from | 1662 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from |
1667 | * here on. It will run one put_page() against each page and will run one | 1663 | * here on. It will run one put_page() against each page and will run one |
@@ -1679,52 +1675,42 @@ static struct bio *bio_dirty_list; | |||
1679 | */ | 1675 | */ |
1680 | static void bio_dirty_fn(struct work_struct *work) | 1676 | static void bio_dirty_fn(struct work_struct *work) |
1681 | { | 1677 | { |
1682 | unsigned long flags; | 1678 | struct bio *bio, *next; |
1683 | struct bio *bio; | ||
1684 | 1679 | ||
1685 | spin_lock_irqsave(&bio_dirty_lock, flags); | 1680 | spin_lock_irq(&bio_dirty_lock); |
1686 | bio = bio_dirty_list; | 1681 | next = bio_dirty_list; |
1687 | bio_dirty_list = NULL; | 1682 | bio_dirty_list = NULL; |
1688 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | 1683 | spin_unlock_irq(&bio_dirty_lock); |
1689 | 1684 | ||
1690 | while (bio) { | 1685 | while ((bio = next) != NULL) { |
1691 | struct bio *next = bio->bi_private; | 1686 | next = bio->bi_private; |
1692 | 1687 | ||
1693 | bio_set_pages_dirty(bio); | 1688 | bio_set_pages_dirty(bio); |
1694 | bio_release_pages(bio); | 1689 | bio_release_pages(bio); |
1695 | bio_put(bio); | 1690 | bio_put(bio); |
1696 | bio = next; | ||
1697 | } | 1691 | } |
1698 | } | 1692 | } |
1699 | 1693 | ||
1700 | void bio_check_pages_dirty(struct bio *bio) | 1694 | void bio_check_pages_dirty(struct bio *bio) |
1701 | { | 1695 | { |
1702 | struct bio_vec *bvec; | 1696 | struct bio_vec *bvec; |
1703 | int nr_clean_pages = 0; | 1697 | unsigned long flags; |
1704 | int i; | 1698 | int i; |
1705 | 1699 | ||
1706 | bio_for_each_segment_all(bvec, bio, i) { | 1700 | bio_for_each_segment_all(bvec, bio, i) { |
1707 | struct page *page = bvec->bv_page; | 1701 | if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) |
1708 | 1702 | goto defer; | |
1709 | if (PageDirty(page) || PageCompound(page)) { | ||
1710 | put_page(page); | ||
1711 | bvec->bv_page = NULL; | ||
1712 | } else { | ||
1713 | nr_clean_pages++; | ||
1714 | } | ||
1715 | } | 1703 | } |
1716 | 1704 | ||
1717 | if (nr_clean_pages) { | 1705 | bio_release_pages(bio); |
1718 | unsigned long flags; | 1706 | bio_put(bio); |
1719 | 1707 | return; | |
1720 | spin_lock_irqsave(&bio_dirty_lock, flags); | 1708 | defer: |
1721 | bio->bi_private = bio_dirty_list; | 1709 | spin_lock_irqsave(&bio_dirty_lock, flags); |
1722 | bio_dirty_list = bio; | 1710 | bio->bi_private = bio_dirty_list; |
1723 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | 1711 | bio_dirty_list = bio; |
1724 | schedule_work(&bio_dirty_work); | 1712 | spin_unlock_irqrestore(&bio_dirty_lock, flags); |
1725 | } else { | 1713 | schedule_work(&bio_dirty_work); |
1726 | bio_put(bio); | ||
1727 | } | ||
1728 | } | 1714 | } |
1729 | EXPORT_SYMBOL_GPL(bio_check_pages_dirty); | 1715 | EXPORT_SYMBOL_GPL(bio_check_pages_dirty); |
1730 | 1716 | ||