summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-07-20 09:29:37 -0400
committerJens Axboe <axboe@fb.com>2015-07-29 10:55:15 -0400
commit4246a0b63bd8f56a1469b12eafeb875b1041a451 (patch)
tree3281bb158d658ef7f208ad380c0ecee600a5ab5e
parent0034af036554c39eefd14d835a8ec3496ac46712 (diff)
block: add a bi_error field to struct bio
Currently we have two different ways to signal an I/O error on a BIO: (1) by clearing the BIO_UPTODATE flag (2) by returning a Linux errno value to the bi_end_io callback The first one has the drawback of only communicating a single possible error (-EIO), and the second one has the drawback of not beeing persistent when bios are queued up, and are not passed along from child to parent bio in the ever more popular chaining scenario. Having both mechanisms available has the additional drawback of utterly confusing driver authors and introducing bugs where various I/O submitters only deal with one of them, and the others have to add boilerplate code to deal with both kinds of error returns. So add a new bi_error field to store an errno value directly in struct bio and remove the existing mechanisms to clean all this up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--Documentation/block/biodoc.txt2
-rw-r--r--arch/m68k/emu/nfblock.c2
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c12
-rw-r--r--block/bio-integrity.c11
-rw-r--r--block/bio.c43
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-lib.c30
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/bounce.c27
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/brd.c13
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_bitmap.c19
-rw-r--r--drivers/block/drbd/drbd_int.h11
-rw-r--r--drivers/block/drbd/drbd_req.c10
-rw-r--r--drivers/block/drbd/drbd_worker.c44
-rw-r--r--drivers/block/floppy.c7
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/pktcdvd.c32
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/rsxx/dev.c9
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.h2
-rw-r--r--drivers/md/bcache/io.c8
-rw-r--r--drivers/md/bcache/journal.c8
-rw-r--r--drivers/md/bcache/movinggc.c8
-rw-r--r--drivers/md/bcache/request.c27
-rw-r--r--drivers/md/bcache/super.c14
-rw-r--r--drivers/md/bcache/writeback.c10
-rw-r--r--drivers/md/dm-bio-prison.c6
-rw-r--r--drivers/md/dm-bufio.c26
-rw-r--r--drivers/md/dm-cache-target.c24
-rw-r--r--drivers/md/dm-crypt.c14
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-log-writes.c11
-rw-r--r--drivers/md/dm-raid1.c24
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-thin.c41
-rw-r--r--drivers/md/dm-verity.c9
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c18
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c53
-rw-r--r--drivers/md/raid10.c55
-rw-r--r--drivers/md/raid5.c52
-rw-r--r--drivers/nvdimm/blk.c5
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c3
-rw-r--r--drivers/target/target_core_iblock.c21
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--fs/btrfs/check-integrity.c10
-rw-r--r--fs/btrfs/compression.c24
-rw-r--r--fs/btrfs/disk-io.c35
-rw-r--r--fs/btrfs/extent_io.c30
-rw-r--r--fs/btrfs/inode.c50
-rw-r--r--fs/btrfs/raid56.c62
-rw-r--r--fs/btrfs/scrub.c22
-rw-r--r--fs/btrfs/volumes.c23
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/direct-io.c13
-rw-r--r--fs/ext4/page-io.c15
-rw-r--r--fs/ext4/readpage.c6
-rw-r--r--fs/f2fs/data.c10
-rw-r--r--fs/gfs2/lops.c10
-rw-r--r--fs/gfs2/ops_fstype.c6
-rw-r--r--fs/jfs/jfs_logmgr.c8
-rw-r--r--fs/jfs/jfs_metapage.c8
-rw-r--r--fs/logfs/dev_bdev.c12
-rw-r--r--fs/mpage.c4
-rw-r--r--fs/nfs/blocklayout/blocklayout.c14
-rw-r--r--fs/nilfs2/segbuf.c5
-rw-r--r--fs/ocfs2/cluster/heartbeat.c9
-rw-r--r--fs/xfs/xfs_aops.c5
-rw-r--r--fs/xfs/xfs_buf.c7
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/swap.h4
-rw-r--r--kernel/power/swap.c12
-rw-r--r--kernel/trace/blktrace.c10
-rw-r--r--mm/page_io.c12
95 files changed, 622 insertions, 682 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index fd12c0d835fd..5be8a7f4cc7f 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -1109,7 +1109,7 @@ it will loop and handle as many sectors (on a bio-segment granularity)
1109as specified. 1109as specified.
1110 1110
1111Now bh->b_end_io is replaced by bio->bi_end_io, but most of the time the 1111Now bh->b_end_io is replaced by bio->bi_end_io, but most of the time the
1112right thing to use is bio_endio(bio, uptodate) instead. 1112right thing to use is bio_endio(bio) instead.
1113 1113
1114If the driver is dropping the io_request_lock from its request_fn strategy, 1114If the driver is dropping the io_request_lock from its request_fn strategy,
1115then it just needs to replace that with q->queue_lock instead. 1115then it just needs to replace that with q->queue_lock instead.
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 2d75ae246167..f2a00c591bf7 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -76,7 +76,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
76 bvec_to_phys(&bvec)); 76 bvec_to_phys(&bvec));
77 sec += len; 77 sec += len;
78 } 78 }
79 bio_endio(bio, 0); 79 bio_endio(bio);
80} 80}
81 81
82static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 82static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ee90db17b097..f86250c48b53 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -132,7 +132,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
132 phys_mem += vec.bv_len; 132 phys_mem += vec.bv_len;
133 transfered += vec.bv_len; 133 transfered += vec.bv_len;
134 } 134 }
135 bio_endio(bio, 0); 135 bio_endio(bio);
136} 136}
137 137
138/** 138/**
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 48eebacdf5fe..fa84ca990caa 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -101,8 +101,9 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
101 spin_unlock(&dev->lock); 101 spin_unlock(&dev->lock);
102} 102}
103 103
104static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) 104static void simdisk_make_request(struct request_queue *q, struct bio *bio)
105{ 105{
106 struct simdisk *dev = q->queuedata;
106 struct bio_vec bvec; 107 struct bio_vec bvec;
107 struct bvec_iter iter; 108 struct bvec_iter iter;
108 sector_t sector = bio->bi_iter.bi_sector; 109 sector_t sector = bio->bi_iter.bi_sector;
@@ -116,17 +117,10 @@ static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
116 sector += len; 117 sector += len;
117 __bio_kunmap_atomic(buffer); 118 __bio_kunmap_atomic(buffer);
118 } 119 }
119 return 0;
120}
121 120
122static void simdisk_make_request(struct request_queue *q, struct bio *bio) 121 bio_endio(bio);
123{
124 struct simdisk *dev = q->queuedata;
125 int status = simdisk_xfer_bio(dev, bio);
126 bio_endio(bio, status);
127} 122}
128 123
129
130static int simdisk_open(struct block_device *bdev, fmode_t mode) 124static int simdisk_open(struct block_device *bdev, fmode_t mode)
131{ 125{
132 struct simdisk *dev = bdev->bd_disk->private_data; 126 struct simdisk *dev = bdev->bd_disk->private_data;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 719b7152aed1..4aecca79374a 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -355,13 +355,12 @@ static void bio_integrity_verify_fn(struct work_struct *work)
355 container_of(work, struct bio_integrity_payload, bip_work); 355 container_of(work, struct bio_integrity_payload, bip_work);
356 struct bio *bio = bip->bip_bio; 356 struct bio *bio = bip->bip_bio;
357 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 357 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
358 int error;
359 358
360 error = bio_integrity_process(bio, bi->verify_fn); 359 bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
361 360
362 /* Restore original bio completion handler */ 361 /* Restore original bio completion handler */
363 bio->bi_end_io = bip->bip_end_io; 362 bio->bi_end_io = bip->bip_end_io;
364 bio_endio(bio, error); 363 bio_endio(bio);
365} 364}
366 365
367/** 366/**
@@ -376,7 +375,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
376 * in process context. This function postpones completion 375 * in process context. This function postpones completion
377 * accordingly. 376 * accordingly.
378 */ 377 */
379void bio_integrity_endio(struct bio *bio, int error) 378void bio_integrity_endio(struct bio *bio)
380{ 379{
381 struct bio_integrity_payload *bip = bio_integrity(bio); 380 struct bio_integrity_payload *bip = bio_integrity(bio);
382 381
@@ -386,9 +385,9 @@ void bio_integrity_endio(struct bio *bio, int error)
386 * integrity metadata. Restore original bio end_io handler 385 * integrity metadata. Restore original bio end_io handler
387 * and run it. 386 * and run it.
388 */ 387 */
389 if (error) { 388 if (bio->bi_error) {
390 bio->bi_end_io = bip->bip_end_io; 389 bio->bi_end_io = bip->bip_end_io;
391 bio_endio(bio, error); 390 bio_endio(bio);
392 391
393 return; 392 return;
394 } 393 }
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..a23f489f398f 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -269,7 +269,6 @@ static void bio_free(struct bio *bio)
269void bio_init(struct bio *bio) 269void bio_init(struct bio *bio)
270{ 270{
271 memset(bio, 0, sizeof(*bio)); 271 memset(bio, 0, sizeof(*bio));
272 bio->bi_flags = 1 << BIO_UPTODATE;
273 atomic_set(&bio->__bi_remaining, 1); 272 atomic_set(&bio->__bi_remaining, 1);
274 atomic_set(&bio->__bi_cnt, 1); 273 atomic_set(&bio->__bi_cnt, 1);
275} 274}
@@ -292,14 +291,17 @@ void bio_reset(struct bio *bio)
292 __bio_free(bio); 291 __bio_free(bio);
293 292
294 memset(bio, 0, BIO_RESET_BYTES); 293 memset(bio, 0, BIO_RESET_BYTES);
295 bio->bi_flags = flags | (1 << BIO_UPTODATE); 294 bio->bi_flags = flags;
296 atomic_set(&bio->__bi_remaining, 1); 295 atomic_set(&bio->__bi_remaining, 1);
297} 296}
298EXPORT_SYMBOL(bio_reset); 297EXPORT_SYMBOL(bio_reset);
299 298
300static void bio_chain_endio(struct bio *bio, int error) 299static void bio_chain_endio(struct bio *bio)
301{ 300{
302 bio_endio(bio->bi_private, error); 301 struct bio *parent = bio->bi_private;
302
303 parent->bi_error = bio->bi_error;
304 bio_endio(parent);
303 bio_put(bio); 305 bio_put(bio);
304} 306}
305 307
@@ -896,11 +898,11 @@ struct submit_bio_ret {
896 int error; 898 int error;
897}; 899};
898 900
899static void submit_bio_wait_endio(struct bio *bio, int error) 901static void submit_bio_wait_endio(struct bio *bio)
900{ 902{
901 struct submit_bio_ret *ret = bio->bi_private; 903 struct submit_bio_ret *ret = bio->bi_private;
902 904
903 ret->error = error; 905 ret->error = bio->bi_error;
904 complete(&ret->event); 906 complete(&ret->event);
905} 907}
906 908
@@ -1445,7 +1447,7 @@ void bio_unmap_user(struct bio *bio)
1445} 1447}
1446EXPORT_SYMBOL(bio_unmap_user); 1448EXPORT_SYMBOL(bio_unmap_user);
1447 1449
1448static void bio_map_kern_endio(struct bio *bio, int err) 1450static void bio_map_kern_endio(struct bio *bio)
1449{ 1451{
1450 bio_put(bio); 1452 bio_put(bio);
1451} 1453}
@@ -1501,13 +1503,13 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1501} 1503}
1502EXPORT_SYMBOL(bio_map_kern); 1504EXPORT_SYMBOL(bio_map_kern);
1503 1505
1504static void bio_copy_kern_endio(struct bio *bio, int err) 1506static void bio_copy_kern_endio(struct bio *bio)
1505{ 1507{
1506 bio_free_pages(bio); 1508 bio_free_pages(bio);
1507 bio_put(bio); 1509 bio_put(bio);
1508} 1510}
1509 1511
1510static void bio_copy_kern_endio_read(struct bio *bio, int err) 1512static void bio_copy_kern_endio_read(struct bio *bio)
1511{ 1513{
1512 char *p = bio->bi_private; 1514 char *p = bio->bi_private;
1513 struct bio_vec *bvec; 1515 struct bio_vec *bvec;
@@ -1518,7 +1520,7 @@ static void bio_copy_kern_endio_read(struct bio *bio, int err)
1518 p += bvec->bv_len; 1520 p += bvec->bv_len;
1519 } 1521 }
1520 1522
1521 bio_copy_kern_endio(bio, err); 1523 bio_copy_kern_endio(bio);
1522} 1524}
1523 1525
1524/** 1526/**
@@ -1778,25 +1780,15 @@ static inline bool bio_remaining_done(struct bio *bio)
1778/** 1780/**
1779 * bio_endio - end I/O on a bio 1781 * bio_endio - end I/O on a bio
1780 * @bio: bio 1782 * @bio: bio
1781 * @error: error, if any
1782 * 1783 *
1783 * Description: 1784 * Description:
1784 * bio_endio() will end I/O on the whole bio. bio_endio() is the 1785 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1785 * preferred way to end I/O on a bio, it takes care of clearing 1786 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1786 * BIO_UPTODATE on error. @error is 0 on success, and and one of the 1787 * bio unless they own it and thus know that it has an end_io function.
1787 * established -Exxxx (-EIO, for instance) error values in case
1788 * something went wrong. No one should call bi_end_io() directly on a
1789 * bio unless they own it and thus know that it has an end_io
1790 * function.
1791 **/ 1788 **/
1792void bio_endio(struct bio *bio, int error) 1789void bio_endio(struct bio *bio)
1793{ 1790{
1794 while (bio) { 1791 while (bio) {
1795 if (error)
1796 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1797 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1798 error = -EIO;
1799
1800 if (unlikely(!bio_remaining_done(bio))) 1792 if (unlikely(!bio_remaining_done(bio)))
1801 break; 1793 break;
1802 1794
@@ -1810,11 +1802,12 @@ void bio_endio(struct bio *bio, int error)
1810 */ 1802 */
1811 if (bio->bi_end_io == bio_chain_endio) { 1803 if (bio->bi_end_io == bio_chain_endio) {
1812 struct bio *parent = bio->bi_private; 1804 struct bio *parent = bio->bi_private;
1805 parent->bi_error = bio->bi_error;
1813 bio_put(bio); 1806 bio_put(bio);
1814 bio = parent; 1807 bio = parent;
1815 } else { 1808 } else {
1816 if (bio->bi_end_io) 1809 if (bio->bi_end_io)
1817 bio->bi_end_io(bio, error); 1810 bio->bi_end_io(bio);
1818 bio = NULL; 1811 bio = NULL;
1819 } 1812 }
1820 } 1813 }
diff --git a/block/blk-core.c b/block/blk-core.c
index 627ed0c593fb..7ef15b947b91 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -143,9 +143,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
143 unsigned int nbytes, int error) 143 unsigned int nbytes, int error)
144{ 144{
145 if (error) 145 if (error)
146 clear_bit(BIO_UPTODATE, &bio->bi_flags); 146 bio->bi_error = error;
147 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
148 error = -EIO;
149 147
150 if (unlikely(rq->cmd_flags & REQ_QUIET)) 148 if (unlikely(rq->cmd_flags & REQ_QUIET))
151 set_bit(BIO_QUIET, &bio->bi_flags); 149 set_bit(BIO_QUIET, &bio->bi_flags);
@@ -154,7 +152,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
154 152
155 /* don't actually finish bio if it's part of flush sequence */ 153 /* don't actually finish bio if it's part of flush sequence */
156 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
157 bio_endio(bio, error); 155 bio_endio(bio);
158} 156}
159 157
160void blk_dump_rq_flags(struct request *rq, char *msg) 158void blk_dump_rq_flags(struct request *rq, char *msg)
@@ -1620,7 +1618,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
1620 blk_queue_bounce(q, &bio); 1618 blk_queue_bounce(q, &bio);
1621 1619
1622 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1620 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1623 bio_endio(bio, -EIO); 1621 bio->bi_error = -EIO;
1622 bio_endio(bio);
1624 return; 1623 return;
1625 } 1624 }
1626 1625
@@ -1673,7 +1672,8 @@ get_rq:
1673 */ 1672 */
1674 req = get_request(q, rw_flags, bio, GFP_NOIO); 1673 req = get_request(q, rw_flags, bio, GFP_NOIO);
1675 if (IS_ERR(req)) { 1674 if (IS_ERR(req)) {
1676 bio_endio(bio, PTR_ERR(req)); /* @q is dead */ 1675 bio->bi_error = PTR_ERR(req);
1676 bio_endio(bio);
1677 goto out_unlock; 1677 goto out_unlock;
1678 } 1678 }
1679 1679
@@ -1896,7 +1896,8 @@ generic_make_request_checks(struct bio *bio)
1896 return true; 1896 return true;
1897 1897
1898end_io: 1898end_io:
1899 bio_endio(bio, err); 1899 bio->bi_error = err;
1900 bio_endio(bio);
1900 return false; 1901 return false;
1901} 1902}
1902 1903
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 7688ee3f5d72..6dee17443f14 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -11,16 +11,16 @@
11 11
12struct bio_batch { 12struct bio_batch {
13 atomic_t done; 13 atomic_t done;
14 unsigned long flags; 14 int error;
15 struct completion *wait; 15 struct completion *wait;
16}; 16};
17 17
18static void bio_batch_end_io(struct bio *bio, int err) 18static void bio_batch_end_io(struct bio *bio)
19{ 19{
20 struct bio_batch *bb = bio->bi_private; 20 struct bio_batch *bb = bio->bi_private;
21 21
22 if (err && (err != -EOPNOTSUPP)) 22 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23 clear_bit(BIO_UPTODATE, &bb->flags); 23 bb->error = bio->bi_error;
24 if (atomic_dec_and_test(&bb->done)) 24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait); 25 complete(bb->wait);
26 bio_put(bio); 26 bio_put(bio);
@@ -78,7 +78,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
78 } 78 }
79 79
80 atomic_set(&bb.done, 1); 80 atomic_set(&bb.done, 1);
81 bb.flags = 1 << BIO_UPTODATE; 81 bb.error = 0;
82 bb.wait = &wait; 82 bb.wait = &wait;
83 83
84 blk_start_plug(&plug); 84 blk_start_plug(&plug);
@@ -134,9 +134,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
134 if (!atomic_dec_and_test(&bb.done)) 134 if (!atomic_dec_and_test(&bb.done))
135 wait_for_completion_io(&wait); 135 wait_for_completion_io(&wait);
136 136
137 if (!test_bit(BIO_UPTODATE, &bb.flags)) 137 if (bb.error)
138 ret = -EIO; 138 return bb.error;
139
140 return ret; 139 return ret;
141} 140}
142EXPORT_SYMBOL(blkdev_issue_discard); 141EXPORT_SYMBOL(blkdev_issue_discard);
@@ -172,7 +171,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
172 return -EOPNOTSUPP; 171 return -EOPNOTSUPP;
173 172
174 atomic_set(&bb.done, 1); 173 atomic_set(&bb.done, 1);
175 bb.flags = 1 << BIO_UPTODATE; 174 bb.error = 0;
176 bb.wait = &wait; 175 bb.wait = &wait;
177 176
178 while (nr_sects) { 177 while (nr_sects) {
@@ -208,9 +207,8 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
208 if (!atomic_dec_and_test(&bb.done)) 207 if (!atomic_dec_and_test(&bb.done))
209 wait_for_completion_io(&wait); 208 wait_for_completion_io(&wait);
210 209
211 if (!test_bit(BIO_UPTODATE, &bb.flags)) 210 if (bb.error)
212 ret = -ENOTSUPP; 211 return bb.error;
213
214 return ret; 212 return ret;
215} 213}
216EXPORT_SYMBOL(blkdev_issue_write_same); 214EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -236,7 +234,7 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
236 DECLARE_COMPLETION_ONSTACK(wait); 234 DECLARE_COMPLETION_ONSTACK(wait);
237 235
238 atomic_set(&bb.done, 1); 236 atomic_set(&bb.done, 1);
239 bb.flags = 1 << BIO_UPTODATE; 237 bb.error = 0;
240 bb.wait = &wait; 238 bb.wait = &wait;
241 239
242 ret = 0; 240 ret = 0;
@@ -270,10 +268,8 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
270 if (!atomic_dec_and_test(&bb.done)) 268 if (!atomic_dec_and_test(&bb.done))
271 wait_for_completion_io(&wait); 269 wait_for_completion_io(&wait);
272 270
273 if (!test_bit(BIO_UPTODATE, &bb.flags)) 271 if (bb.error)
274 /* One of bios in the batch was completed with error.*/ 272 return bb.error;
275 ret = -EIO;
276
277 return ret; 273 return ret;
278} 274}
279 275
diff --git a/block/blk-map.c b/block/blk-map.c
index da310a105429..5fe1c30bfba7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -103,7 +103,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
103 * normal IO completion path 103 * normal IO completion path
104 */ 104 */
105 bio_get(bio); 105 bio_get(bio);
106 bio_endio(bio, 0); 106 bio_endio(bio);
107 __blk_rq_unmap_user(bio); 107 __blk_rq_unmap_user(bio);
108 return -EINVAL; 108 return -EINVAL;
109 } 109 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7d842db59699..94559025c5e6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1199,7 +1199,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1199 struct blk_mq_alloc_data alloc_data; 1199 struct blk_mq_alloc_data alloc_data;
1200 1200
1201 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { 1201 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1202 bio_endio(bio, -EIO); 1202 bio_io_error(bio);
1203 return NULL; 1203 return NULL;
1204 } 1204 }
1205 1205
@@ -1283,7 +1283,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1283 blk_queue_bounce(q, &bio); 1283 blk_queue_bounce(q, &bio);
1284 1284
1285 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1285 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1286 bio_endio(bio, -EIO); 1286 bio_io_error(bio);
1287 return; 1287 return;
1288 } 1288 }
1289 1289
@@ -1368,7 +1368,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1368 blk_queue_bounce(q, &bio); 1368 blk_queue_bounce(q, &bio);
1369 1369
1370 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1370 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1371 bio_endio(bio, -EIO); 1371 bio_io_error(bio);
1372 return; 1372 return;
1373 } 1373 }
1374 1374
diff --git a/block/bounce.c b/block/bounce.c
index b17311227c12..f4db245b9f3a 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -123,7 +123,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
123 } 123 }
124} 124}
125 125
126static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) 126static void bounce_end_io(struct bio *bio, mempool_t *pool)
127{ 127{
128 struct bio *bio_orig = bio->bi_private; 128 struct bio *bio_orig = bio->bi_private;
129 struct bio_vec *bvec, *org_vec; 129 struct bio_vec *bvec, *org_vec;
@@ -141,39 +141,40 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
141 mempool_free(bvec->bv_page, pool); 141 mempool_free(bvec->bv_page, pool);
142 } 142 }
143 143
144 bio_endio(bio_orig, err); 144 bio_orig->bi_error = bio->bi_error;
145 bio_endio(bio_orig);
145 bio_put(bio); 146 bio_put(bio);
146} 147}
147 148
148static void bounce_end_io_write(struct bio *bio, int err) 149static void bounce_end_io_write(struct bio *bio)
149{ 150{
150 bounce_end_io(bio, page_pool, err); 151 bounce_end_io(bio, page_pool);
151} 152}
152 153
153static void bounce_end_io_write_isa(struct bio *bio, int err) 154static void bounce_end_io_write_isa(struct bio *bio)
154{ 155{
155 156
156 bounce_end_io(bio, isa_page_pool, err); 157 bounce_end_io(bio, isa_page_pool);
157} 158}
158 159
159static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) 160static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
160{ 161{
161 struct bio *bio_orig = bio->bi_private; 162 struct bio *bio_orig = bio->bi_private;
162 163
163 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 164 if (!bio->bi_error)
164 copy_to_high_bio_irq(bio_orig, bio); 165 copy_to_high_bio_irq(bio_orig, bio);
165 166
166 bounce_end_io(bio, pool, err); 167 bounce_end_io(bio, pool);
167} 168}
168 169
169static void bounce_end_io_read(struct bio *bio, int err) 170static void bounce_end_io_read(struct bio *bio)
170{ 171{
171 __bounce_end_io_read(bio, page_pool, err); 172 __bounce_end_io_read(bio, page_pool);
172} 173}
173 174
174static void bounce_end_io_read_isa(struct bio *bio, int err) 175static void bounce_end_io_read_isa(struct bio *bio)
175{ 176{
176 __bounce_end_io_read(bio, isa_page_pool, err); 177 __bounce_end_io_read(bio, isa_page_pool);
177} 178}
178 179
179#ifdef CONFIG_NEED_BOUNCE_POOL 180#ifdef CONFIG_NEED_BOUNCE_POOL
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 422b7d84f686..ad80c85e0857 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1110,7 +1110,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1110 d->ip.rq = NULL; 1110 d->ip.rq = NULL;
1111 do { 1111 do {
1112 bio = rq->bio; 1112 bio = rq->bio;
1113 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1113 bok = !fastfail && !bio->bi_error;
1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); 1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
1115 1115
1116 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1116 /* cf. http://lkml.org/lkml/2006/10/31/28 */
@@ -1172,7 +1172,7 @@ ktiocomplete(struct frame *f)
1172 ahout->cmdstat, ahin->cmdstat, 1172 ahout->cmdstat, ahin->cmdstat,
1173 d->aoemajor, d->aoeminor); 1173 d->aoemajor, d->aoeminor);
1174noskb: if (buf) 1174noskb: if (buf)
1175 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1175 buf->bio->bi_error = -EIO;
1176 goto out; 1176 goto out;
1177 } 1177 }
1178 1178
@@ -1185,7 +1185,7 @@ noskb: if (buf)
1185 "aoe: runt data size in read from", 1185 "aoe: runt data size in read from",
1186 (long) d->aoemajor, d->aoeminor, 1186 (long) d->aoemajor, d->aoeminor,
1187 skb->len, n); 1187 skb->len, n);
1188 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1188 buf->bio->bi_error = -EIO;
1189 break; 1189 break;
1190 } 1190 }
1191 if (n > f->iter.bi_size) { 1191 if (n > f->iter.bi_size) {
@@ -1193,7 +1193,7 @@ noskb: if (buf)
1193 "aoe: too-large data size in read from", 1193 "aoe: too-large data size in read from",
1194 (long) d->aoemajor, d->aoeminor, 1194 (long) d->aoemajor, d->aoeminor,
1195 n, f->iter.bi_size); 1195 n, f->iter.bi_size);
1196 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1196 buf->bio->bi_error = -EIO;
1197 break; 1197 break;
1198 } 1198 }
1199 bvcpy(skb, f->buf->bio, f->iter, n); 1199 bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1695,7 +1695,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
1695 if (buf == NULL) 1695 if (buf == NULL)
1696 return; 1696 return;
1697 buf->iter.bi_size = 0; 1697 buf->iter.bi_size = 0;
1698 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1698 buf->bio->bi_error = -EIO;
1699 if (buf->nframesout == 0) 1699 if (buf->nframesout == 0)
1700 aoe_end_buf(d, buf); 1700 aoe_end_buf(d, buf);
1701} 1701}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index e774c50b6842..ffd1947500c6 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
170 if (rq == NULL) 170 if (rq == NULL)
171 return; 171 return;
172 while ((bio = d->ip.nxbio)) { 172 while ((bio = d->ip.nxbio)) {
173 clear_bit(BIO_UPTODATE, &bio->bi_flags); 173 bio->bi_error = -EIO;
174 d->ip.nxbio = bio->bi_next; 174 d->ip.nxbio = bio->bi_next;
175 n = (unsigned long) rq->special; 175 n = (unsigned long) rq->special;
176 rq->special = (void *) --n; 176 rq->special = (void *) --n;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index e573e470bd8a..f9ab74505e69 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -331,14 +331,12 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
331 struct bio_vec bvec; 331 struct bio_vec bvec;
332 sector_t sector; 332 sector_t sector;
333 struct bvec_iter iter; 333 struct bvec_iter iter;
334 int err = -EIO;
335 334
336 sector = bio->bi_iter.bi_sector; 335 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 336 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 goto out; 337 goto io_error;
339 338
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 339 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341 err = 0;
342 discard_from_brd(brd, sector, bio->bi_iter.bi_size); 340 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343 goto out; 341 goto out;
344 } 342 }
@@ -349,15 +347,20 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
349 347
350 bio_for_each_segment(bvec, bio, iter) { 348 bio_for_each_segment(bvec, bio, iter) {
351 unsigned int len = bvec.bv_len; 349 unsigned int len = bvec.bv_len;
350 int err;
351
352 err = brd_do_bvec(brd, bvec.bv_page, len, 352 err = brd_do_bvec(brd, bvec.bv_page, len,
353 bvec.bv_offset, rw, sector); 353 bvec.bv_offset, rw, sector);
354 if (err) 354 if (err)
355 break; 355 goto io_error;
356 sector += len >> SECTOR_SHIFT; 356 sector += len >> SECTOR_SHIFT;
357 } 357 }
358 358
359out: 359out:
360 bio_endio(bio, err); 360 bio_endio(bio);
361 return;
362io_error:
363 bio_io_error(bio);
361} 364}
362 365
363static int brd_rw_page(struct block_device *bdev, sector_t sector, 366static int brd_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 1318e3217cb0..b3868e7a1ffd 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -175,11 +175,11 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
175 atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ 175 atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
176 device->md_io.submit_jif = jiffies; 176 device->md_io.submit_jif = jiffies;
177 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 177 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
178 bio_endio(bio, -EIO); 178 bio_io_error(bio);
179 else 179 else
180 submit_bio(rw, bio); 180 submit_bio(rw, bio);
181 wait_until_done_or_force_detached(device, bdev, &device->md_io.done); 181 wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
182 if (bio_flagged(bio, BIO_UPTODATE)) 182 if (!bio->bi_error)
183 err = device->md_io.error; 183 err = device->md_io.error;
184 184
185 out: 185 out:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 434c77dcc99e..e5e0f19ceda0 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -941,36 +941,27 @@ static void drbd_bm_aio_ctx_destroy(struct kref *kref)
941} 941}
942 942
943/* bv_page may be a copy, or may be the original */ 943/* bv_page may be a copy, or may be the original */
944static void drbd_bm_endio(struct bio *bio, int error) 944static void drbd_bm_endio(struct bio *bio)
945{ 945{
946 struct drbd_bm_aio_ctx *ctx = bio->bi_private; 946 struct drbd_bm_aio_ctx *ctx = bio->bi_private;
947 struct drbd_device *device = ctx->device; 947 struct drbd_device *device = ctx->device;
948 struct drbd_bitmap *b = device->bitmap; 948 struct drbd_bitmap *b = device->bitmap;
949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
950 int uptodate = bio_flagged(bio, BIO_UPTODATE);
951
952
953 /* strange behavior of some lower level drivers...
954 * fail the request by clearing the uptodate flag,
955 * but do not return any error?!
956 * do we want to WARN() on this? */
957 if (!error && !uptodate)
958 error = -EIO;
959 950
960 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 951 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
961 !bm_test_page_unchanged(b->bm_pages[idx])) 952 !bm_test_page_unchanged(b->bm_pages[idx]))
962 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); 953 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
963 954
964 if (error) { 955 if (bio->bi_error) {
965 /* ctx error will hold the completed-last non-zero error code, 956 /* ctx error will hold the completed-last non-zero error code,
966 * in case error codes differ. */ 957 * in case error codes differ. */
967 ctx->error = error; 958 ctx->error = bio->bi_error;
968 bm_set_page_io_err(b->bm_pages[idx]); 959 bm_set_page_io_err(b->bm_pages[idx]);
969 /* Not identical to on disk version of it. 960 /* Not identical to on disk version of it.
970 * Is BM_PAGE_IO_ERROR enough? */ 961 * Is BM_PAGE_IO_ERROR enough? */
971 if (__ratelimit(&drbd_ratelimit_state)) 962 if (__ratelimit(&drbd_ratelimit_state))
972 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", 963 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
973 error, idx); 964 bio->bi_error, idx);
974 } else { 965 } else {
975 bm_clear_page_io_err(b->bm_pages[idx]); 966 bm_clear_page_io_err(b->bm_pages[idx]);
976 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); 967 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
@@ -1031,7 +1022,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
1031 1022
1032 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 1023 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1033 bio->bi_rw |= rw; 1024 bio->bi_rw |= rw;
1034 bio_endio(bio, -EIO); 1025 bio_io_error(bio);
1035 } else { 1026 } else {
1036 submit_bio(rw, bio); 1027 submit_bio(rw, bio);
1037 /* this should not count as user activity and cause the 1028 /* this should not count as user activity and cause the
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index efd19c2da9c2..a08c4a9179f1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1481,9 +1481,9 @@ extern int drbd_khelper(struct drbd_device *device, char *cmd);
1481 1481
1482/* drbd_worker.c */ 1482/* drbd_worker.c */
1483/* bi_end_io handlers */ 1483/* bi_end_io handlers */
1484extern void drbd_md_endio(struct bio *bio, int error); 1484extern void drbd_md_endio(struct bio *bio);
1485extern void drbd_peer_request_endio(struct bio *bio, int error); 1485extern void drbd_peer_request_endio(struct bio *bio);
1486extern void drbd_request_endio(struct bio *bio, int error); 1486extern void drbd_request_endio(struct bio *bio);
1487extern int drbd_worker(struct drbd_thread *thi); 1487extern int drbd_worker(struct drbd_thread *thi);
1488enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); 1488enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1489void drbd_resync_after_changed(struct drbd_device *device); 1489void drbd_resync_after_changed(struct drbd_device *device);
@@ -1604,12 +1604,13 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
1604 __release(local); 1604 __release(local);
1605 if (!bio->bi_bdev) { 1605 if (!bio->bi_bdev) {
1606 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); 1606 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
1607 bio_endio(bio, -ENODEV); 1607 bio->bi_error = -ENODEV;
1608 bio_endio(bio);
1608 return; 1609 return;
1609 } 1610 }
1610 1611
1611 if (drbd_insert_fault(device, fault_type)) 1612 if (drbd_insert_fault(device, fault_type))
1612 bio_endio(bio, -EIO); 1613 bio_io_error(bio);
1613 else 1614 else
1614 generic_make_request(bio); 1615 generic_make_request(bio);
1615} 1616}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3907202fb9d9..9cb41166366e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -201,7 +201,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
201void complete_master_bio(struct drbd_device *device, 201void complete_master_bio(struct drbd_device *device,
202 struct bio_and_error *m) 202 struct bio_and_error *m)
203{ 203{
204 bio_endio(m->bio, m->error); 204 m->bio->bi_error = m->error;
205 bio_endio(m->bio);
205 dec_ap_bio(device); 206 dec_ap_bio(device);
206} 207}
207 208
@@ -1153,12 +1154,12 @@ drbd_submit_req_private_bio(struct drbd_request *req)
1153 rw == WRITE ? DRBD_FAULT_DT_WR 1154 rw == WRITE ? DRBD_FAULT_DT_WR
1154 : rw == READ ? DRBD_FAULT_DT_RD 1155 : rw == READ ? DRBD_FAULT_DT_RD
1155 : DRBD_FAULT_DT_RA)) 1156 : DRBD_FAULT_DT_RA))
1156 bio_endio(bio, -EIO); 1157 bio_io_error(bio);
1157 else 1158 else
1158 generic_make_request(bio); 1159 generic_make_request(bio);
1159 put_ldev(device); 1160 put_ldev(device);
1160 } else 1161 } else
1161 bio_endio(bio, -EIO); 1162 bio_io_error(bio);
1162} 1163}
1163 1164
1164static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) 1165static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
@@ -1191,7 +1192,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1191 /* only pass the error to the upper layers. 1192 /* only pass the error to the upper layers.
1192 * if user cannot handle io errors, that's not our business. */ 1193 * if user cannot handle io errors, that's not our business. */
1193 drbd_err(device, "could not kmalloc() req\n"); 1194 drbd_err(device, "could not kmalloc() req\n");
1194 bio_endio(bio, -ENOMEM); 1195 bio->bi_error = -ENOMEM;
1196 bio_endio(bio);
1195 return ERR_PTR(-ENOMEM); 1197 return ERR_PTR(-ENOMEM);
1196 } 1198 }
1197 req->start_jif = start_jif; 1199 req->start_jif = start_jif;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d0fae55d871d..5578c1477ba6 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -65,12 +65,12 @@ rwlock_t global_state_lock;
65/* used for synchronous meta data and bitmap IO 65/* used for synchronous meta data and bitmap IO
66 * submitted by drbd_md_sync_page_io() 66 * submitted by drbd_md_sync_page_io()
67 */ 67 */
68void drbd_md_endio(struct bio *bio, int error) 68void drbd_md_endio(struct bio *bio)
69{ 69{
70 struct drbd_device *device; 70 struct drbd_device *device;
71 71
72 device = bio->bi_private; 72 device = bio->bi_private;
73 device->md_io.error = error; 73 device->md_io.error = bio->bi_error;
74 74
75 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able 75 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
76 * to timeout on the lower level device, and eventually detach from it. 76 * to timeout on the lower level device, and eventually detach from it.
@@ -170,31 +170,20 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
170/* writes on behalf of the partner, or resync writes, 170/* writes on behalf of the partner, or resync writes,
171 * "submitted" by the receiver. 171 * "submitted" by the receiver.
172 */ 172 */
173void drbd_peer_request_endio(struct bio *bio, int error) 173void drbd_peer_request_endio(struct bio *bio)
174{ 174{
175 struct drbd_peer_request *peer_req = bio->bi_private; 175 struct drbd_peer_request *peer_req = bio->bi_private;
176 struct drbd_device *device = peer_req->peer_device->device; 176 struct drbd_device *device = peer_req->peer_device->device;
177 int uptodate = bio_flagged(bio, BIO_UPTODATE);
178 int is_write = bio_data_dir(bio) == WRITE; 177 int is_write = bio_data_dir(bio) == WRITE;
179 int is_discard = !!(bio->bi_rw & REQ_DISCARD); 178 int is_discard = !!(bio->bi_rw & REQ_DISCARD);
180 179
181 if (error && __ratelimit(&drbd_ratelimit_state)) 180 if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
182 drbd_warn(device, "%s: error=%d s=%llus\n", 181 drbd_warn(device, "%s: error=%d s=%llus\n",
183 is_write ? (is_discard ? "discard" : "write") 182 is_write ? (is_discard ? "discard" : "write")
184 : "read", error, 183 : "read", bio->bi_error,
185 (unsigned long long)peer_req->i.sector); 184 (unsigned long long)peer_req->i.sector);
186 if (!error && !uptodate) {
187 if (__ratelimit(&drbd_ratelimit_state))
188 drbd_warn(device, "%s: setting error to -EIO s=%llus\n",
189 is_write ? "write" : "read",
190 (unsigned long long)peer_req->i.sector);
191 /* strange behavior of some lower level drivers...
192 * fail the request by clearing the uptodate flag,
193 * but do not return any error?! */
194 error = -EIO;
195 }
196 185
197 if (error) 186 if (bio->bi_error)
198 set_bit(__EE_WAS_ERROR, &peer_req->flags); 187 set_bit(__EE_WAS_ERROR, &peer_req->flags);
199 188
200 bio_put(bio); /* no need for the bio anymore */ 189 bio_put(bio); /* no need for the bio anymore */
@@ -208,24 +197,13 @@ void drbd_peer_request_endio(struct bio *bio, int error)
208 197
209/* read, readA or write requests on R_PRIMARY coming from drbd_make_request 198/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
210 */ 199 */
211void drbd_request_endio(struct bio *bio, int error) 200void drbd_request_endio(struct bio *bio)
212{ 201{
213 unsigned long flags; 202 unsigned long flags;
214 struct drbd_request *req = bio->bi_private; 203 struct drbd_request *req = bio->bi_private;
215 struct drbd_device *device = req->device; 204 struct drbd_device *device = req->device;
216 struct bio_and_error m; 205 struct bio_and_error m;
217 enum drbd_req_event what; 206 enum drbd_req_event what;
218 int uptodate = bio_flagged(bio, BIO_UPTODATE);
219
220 if (!error && !uptodate) {
221 drbd_warn(device, "p %s: setting error to -EIO\n",
222 bio_data_dir(bio) == WRITE ? "write" : "read");
223 /* strange behavior of some lower level drivers...
224 * fail the request by clearing the uptodate flag,
225 * but do not return any error?! */
226 error = -EIO;
227 }
228
229 207
230 /* If this request was aborted locally before, 208 /* If this request was aborted locally before,
231 * but now was completed "successfully", 209 * but now was completed "successfully",
@@ -259,14 +237,14 @@ void drbd_request_endio(struct bio *bio, int error)
259 if (__ratelimit(&drbd_ratelimit_state)) 237 if (__ratelimit(&drbd_ratelimit_state))
260 drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); 238 drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
261 239
262 if (!error) 240 if (!bio->bi_error)
263 panic("possible random memory corruption caused by delayed completion of aborted local request\n"); 241 panic("possible random memory corruption caused by delayed completion of aborted local request\n");
264 } 242 }
265 243
266 /* to avoid recursion in __req_mod */ 244 /* to avoid recursion in __req_mod */
267 if (unlikely(error)) { 245 if (unlikely(bio->bi_error)) {
268 if (bio->bi_rw & REQ_DISCARD) 246 if (bio->bi_rw & REQ_DISCARD)
269 what = (error == -EOPNOTSUPP) 247 what = (bio->bi_error == -EOPNOTSUPP)
270 ? DISCARD_COMPLETED_NOTSUPP 248 ? DISCARD_COMPLETED_NOTSUPP
271 : DISCARD_COMPLETED_WITH_ERROR; 249 : DISCARD_COMPLETED_WITH_ERROR;
272 else 250 else
@@ -279,7 +257,7 @@ void drbd_request_endio(struct bio *bio, int error)
279 what = COMPLETED_OK; 257 what = COMPLETED_OK;
280 258
281 bio_put(req->private_bio); 259 bio_put(req->private_bio);
282 req->private_bio = ERR_PTR(error); 260 req->private_bio = ERR_PTR(bio->bi_error);
283 261
284 /* not req_mod(), we need irqsave here! */ 262 /* not req_mod(), we need irqsave here! */
285 spin_lock_irqsave(&device->resource->req_lock, flags); 263 spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a08cda955285..331363e7de0f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3771,13 +3771,14 @@ struct rb0_cbdata {
3771 struct completion complete; 3771 struct completion complete;
3772}; 3772};
3773 3773
3774static void floppy_rb0_cb(struct bio *bio, int err) 3774static void floppy_rb0_cb(struct bio *bio)
3775{ 3775{
3776 struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; 3776 struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
3777 int drive = cbdata->drive; 3777 int drive = cbdata->drive;
3778 3778
3779 if (err) { 3779 if (bio->bi_error) {
3780 pr_info("floppy: error %d while reading block 0\n", err); 3780 pr_info("floppy: error %d while reading block 0\n",
3781 bio->bi_error);
3781 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3782 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3782 } 3783 }
3783 complete(&cbdata->complete); 3784 complete(&cbdata->complete);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..016a59afcf24 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -222,7 +222,7 @@ static void end_cmd(struct nullb_cmd *cmd)
222 blk_end_request_all(cmd->rq, 0); 222 blk_end_request_all(cmd->rq, 0);
223 break; 223 break;
224 case NULL_Q_BIO: 224 case NULL_Q_BIO:
225 bio_endio(cmd->bio, 0); 225 bio_endio(cmd->bio);
226 break; 226 break;
227 } 227 }
228 228
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 4c20c228184c..a7a259e031da 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -977,7 +977,7 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
977 } 977 }
978} 978}
979 979
980static void pkt_end_io_read(struct bio *bio, int err) 980static void pkt_end_io_read(struct bio *bio)
981{ 981{
982 struct packet_data *pkt = bio->bi_private; 982 struct packet_data *pkt = bio->bi_private;
983 struct pktcdvd_device *pd = pkt->pd; 983 struct pktcdvd_device *pd = pkt->pd;
@@ -985,9 +985,9 @@ static void pkt_end_io_read(struct bio *bio, int err)
985 985
986 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 986 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
987 bio, (unsigned long long)pkt->sector, 987 bio, (unsigned long long)pkt->sector,
988 (unsigned long long)bio->bi_iter.bi_sector, err); 988 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
989 989
990 if (err) 990 if (bio->bi_error)
991 atomic_inc(&pkt->io_errors); 991 atomic_inc(&pkt->io_errors);
992 if (atomic_dec_and_test(&pkt->io_wait)) { 992 if (atomic_dec_and_test(&pkt->io_wait)) {
993 atomic_inc(&pkt->run_sm); 993 atomic_inc(&pkt->run_sm);
@@ -996,13 +996,13 @@ static void pkt_end_io_read(struct bio *bio, int err)
996 pkt_bio_finished(pd); 996 pkt_bio_finished(pd);
997} 997}
998 998
999static void pkt_end_io_packet_write(struct bio *bio, int err) 999static void pkt_end_io_packet_write(struct bio *bio)
1000{ 1000{
1001 struct packet_data *pkt = bio->bi_private; 1001 struct packet_data *pkt = bio->bi_private;
1002 struct pktcdvd_device *pd = pkt->pd; 1002 struct pktcdvd_device *pd = pkt->pd;
1003 BUG_ON(!pd); 1003 BUG_ON(!pd);
1004 1004
1005 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err); 1005 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
1006 1006
1007 pd->stats.pkt_ended++; 1007 pd->stats.pkt_ended++;
1008 1008
@@ -1340,22 +1340,22 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1340 pkt_queue_bio(pd, pkt->w_bio); 1340 pkt_queue_bio(pd, pkt->w_bio);
1341} 1341}
1342 1342
1343static void pkt_finish_packet(struct packet_data *pkt, int uptodate) 1343static void pkt_finish_packet(struct packet_data *pkt, int error)
1344{ 1344{
1345 struct bio *bio; 1345 struct bio *bio;
1346 1346
1347 if (!uptodate) 1347 if (error)
1348 pkt->cache_valid = 0; 1348 pkt->cache_valid = 0;
1349 1349
1350 /* Finish all bios corresponding to this packet */ 1350 /* Finish all bios corresponding to this packet */
1351 while ((bio = bio_list_pop(&pkt->orig_bios))) 1351 while ((bio = bio_list_pop(&pkt->orig_bios))) {
1352 bio_endio(bio, uptodate ? 0 : -EIO); 1352 bio->bi_error = error;
1353 bio_endio(bio);
1354 }
1353} 1355}
1354 1356
1355static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) 1357static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1356{ 1358{
1357 int uptodate;
1358
1359 pkt_dbg(2, pd, "pkt %d\n", pkt->id); 1359 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1360 1360
1361 for (;;) { 1361 for (;;) {
@@ -1384,7 +1384,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
1384 if (atomic_read(&pkt->io_wait) > 0) 1384 if (atomic_read(&pkt->io_wait) > 0)
1385 return; 1385 return;
1386 1386
1387 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) { 1387 if (!pkt->w_bio->bi_error) {
1388 pkt_set_state(pkt, PACKET_FINISHED_STATE); 1388 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1389 } else { 1389 } else {
1390 pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1390 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1401,8 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
1401 break; 1401 break;
1402 1402
1403 case PACKET_FINISHED_STATE: 1403 case PACKET_FINISHED_STATE:
1404 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags); 1404 pkt_finish_packet(pkt, pkt->w_bio->bi_error);
1405 pkt_finish_packet(pkt, uptodate);
1406 return; 1405 return;
1407 1406
1408 default: 1407 default:
@@ -2332,13 +2331,14 @@ static void pkt_close(struct gendisk *disk, fmode_t mode)
2332} 2331}
2333 2332
2334 2333
2335static void pkt_end_io_read_cloned(struct bio *bio, int err) 2334static void pkt_end_io_read_cloned(struct bio *bio)
2336{ 2335{
2337 struct packet_stacked_data *psd = bio->bi_private; 2336 struct packet_stacked_data *psd = bio->bi_private;
2338 struct pktcdvd_device *pd = psd->pd; 2337 struct pktcdvd_device *pd = psd->pd;
2339 2338
2339 psd->bio->bi_error = bio->bi_error;
2340 bio_put(bio); 2340 bio_put(bio);
2341 bio_endio(psd->bio, err); 2341 bio_endio(psd->bio);
2342 mempool_free(psd, psd_pool); 2342 mempool_free(psd, psd_pool);
2343 pkt_bio_finished(pd); 2343 pkt_bio_finished(pd);
2344} 2344}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b1612eb16172..49b4706b162c 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -593,7 +593,8 @@ out:
593 next = bio_list_peek(&priv->list); 593 next = bio_list_peek(&priv->list);
594 spin_unlock_irq(&priv->lock); 594 spin_unlock_irq(&priv->lock);
595 595
596 bio_endio(bio, error); 596 bio->bi_error = error;
597 bio_endio(bio);
597 return next; 598 return next;
598} 599}
599 600
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index ac8c62cb4875..63b9d2ffa8ee 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -137,7 +137,10 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
137 if (!card->eeh_state && card->gendisk) 137 if (!card->eeh_state && card->gendisk)
138 disk_stats_complete(card, meta->bio, meta->start_time); 138 disk_stats_complete(card, meta->bio, meta->start_time);
139 139
140 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); 140 if (atomic_read(&meta->error))
141 bio_io_error(meta->bio);
142 else
143 bio_endio(meta->bio);
141 kmem_cache_free(bio_meta_pool, meta); 144 kmem_cache_free(bio_meta_pool, meta);
142 } 145 }
143} 146}
@@ -199,7 +202,9 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
199queue_err: 202queue_err:
200 kmem_cache_free(bio_meta_pool, bio_meta); 203 kmem_cache_free(bio_meta_pool, bio_meta);
201req_err: 204req_err:
202 bio_endio(bio, st); 205 if (st)
206 bio->bi_error = st;
207 bio_endio(bio);
203} 208}
204 209
205/*----------------- Device Setup -------------------*/ 210/*----------------- Device Setup -------------------*/
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4cf81b5bf0f7..3b3afd2ec5d6 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -456,7 +456,7 @@ static void process_page(unsigned long data)
456 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 456 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
457 if (control & DMASCR_HARD_ERROR) { 457 if (control & DMASCR_HARD_ERROR) {
458 /* error */ 458 /* error */
459 clear_bit(BIO_UPTODATE, &bio->bi_flags); 459 bio->bi_error = -EIO;
460 dev_printk(KERN_WARNING, &card->dev->dev, 460 dev_printk(KERN_WARNING, &card->dev->dev,
461 "I/O error on sector %d/%d\n", 461 "I/O error on sector %d/%d\n",
462 le32_to_cpu(desc->local_addr)>>9, 462 le32_to_cpu(desc->local_addr)>>9,
@@ -505,7 +505,7 @@ static void process_page(unsigned long data)
505 505
506 return_bio = bio->bi_next; 506 return_bio = bio->bi_next;
507 bio->bi_next = NULL; 507 bio->bi_next = NULL;
508 bio_endio(bio, 0); 508 bio_endio(bio);
509 } 509 }
510} 510}
511 511
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..662648e08596 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1078,9 +1078,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
1078/* 1078/*
1079 * bio callback. 1079 * bio callback.
1080 */ 1080 */
1081static void end_block_io_op(struct bio *bio, int error) 1081static void end_block_io_op(struct bio *bio)
1082{ 1082{
1083 __end_block_io_op(bio->bi_private, error); 1083 __end_block_io_op(bio->bi_private, bio->bi_error);
1084 bio_put(bio); 1084 bio_put(bio);
1085} 1085}
1086 1086
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..d542db7a6c73 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -82,7 +82,6 @@ struct blk_shadow {
82struct split_bio { 82struct split_bio {
83 struct bio *bio; 83 struct bio *bio;
84 atomic_t pending; 84 atomic_t pending;
85 int err;
86}; 85};
87 86
88static DEFINE_MUTEX(blkfront_mutex); 87static DEFINE_MUTEX(blkfront_mutex);
@@ -1478,16 +1477,14 @@ static int blkfront_probe(struct xenbus_device *dev,
1478 return 0; 1477 return 0;
1479} 1478}
1480 1479
1481static void split_bio_end(struct bio *bio, int error) 1480static void split_bio_end(struct bio *bio)
1482{ 1481{
1483 struct split_bio *split_bio = bio->bi_private; 1482 struct split_bio *split_bio = bio->bi_private;
1484 1483
1485 if (error)
1486 split_bio->err = error;
1487
1488 if (atomic_dec_and_test(&split_bio->pending)) { 1484 if (atomic_dec_and_test(&split_bio->pending)) {
1489 split_bio->bio->bi_phys_segments = 0; 1485 split_bio->bio->bi_phys_segments = 0;
1490 bio_endio(split_bio->bio, split_bio->err); 1486 split_bio->bio->bi_error = bio->bi_error;
1487 bio_endio(split_bio->bio);
1491 kfree(split_bio); 1488 kfree(split_bio);
1492 } 1489 }
1493 bio_put(bio); 1490 bio_put(bio);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f439ad2800da..68c3d4800464 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -850,7 +850,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
850 850
851 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 851 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
852 zram_bio_discard(zram, index, offset, bio); 852 zram_bio_discard(zram, index, offset, bio);
853 bio_endio(bio, 0); 853 bio_endio(bio);
854 return; 854 return;
855 } 855 }
856 856
@@ -883,8 +883,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
883 update_position(&index, &offset, &bvec); 883 update_position(&index, &offset, &bvec);
884 } 884 }
885 885
886 set_bit(BIO_UPTODATE, &bio->bi_flags); 886 bio_endio(bio);
887 bio_endio(bio, 0);
888 return; 887 return;
889 888
890out: 889out:
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 00cde40db572..83392f856dfd 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -278,7 +278,7 @@ err:
278 goto out; 278 goto out;
279} 279}
280 280
281static void btree_node_read_endio(struct bio *bio, int error) 281static void btree_node_read_endio(struct bio *bio)
282{ 282{
283 struct closure *cl = bio->bi_private; 283 struct closure *cl = bio->bi_private;
284 closure_put(cl); 284 closure_put(cl);
@@ -305,7 +305,7 @@ static void bch_btree_node_read(struct btree *b)
305 bch_submit_bbio(bio, b->c, &b->key, 0); 305 bch_submit_bbio(bio, b->c, &b->key, 0);
306 closure_sync(&cl); 306 closure_sync(&cl);
307 307
308 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 308 if (bio->bi_error)
309 set_btree_node_io_error(b); 309 set_btree_node_io_error(b);
310 310
311 bch_bbio_free(bio, b->c); 311 bch_bbio_free(bio, b->c);
@@ -371,15 +371,15 @@ static void btree_node_write_done(struct closure *cl)
371 __btree_node_write_done(cl); 371 __btree_node_write_done(cl);
372} 372}
373 373
374static void btree_node_write_endio(struct bio *bio, int error) 374static void btree_node_write_endio(struct bio *bio)
375{ 375{
376 struct closure *cl = bio->bi_private; 376 struct closure *cl = bio->bi_private;
377 struct btree *b = container_of(cl, struct btree, io); 377 struct btree *b = container_of(cl, struct btree, io);
378 378
379 if (error) 379 if (bio->bi_error)
380 set_btree_node_io_error(b); 380 set_btree_node_io_error(b);
381 381
382 bch_bbio_count_io_errors(b->c, bio, error, "writing btree"); 382 bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
383 closure_put(cl); 383 closure_put(cl);
384} 384}
385 385
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 79a6d63e8ed3..782cc2c8a185 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -38,7 +38,7 @@
38 * they are running owned by the thread that is running them. Otherwise, suppose 38 * they are running owned by the thread that is running them. Otherwise, suppose
39 * you submit some bios and wish to have a function run when they all complete: 39 * you submit some bios and wish to have a function run when they all complete:
40 * 40 *
41 * foo_endio(struct bio *bio, int error) 41 * foo_endio(struct bio *bio)
42 * { 42 * {
43 * closure_put(cl); 43 * closure_put(cl);
44 * } 44 * }
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index bf6a9ca18403..9440df94bc83 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -55,19 +55,19 @@ static void bch_bio_submit_split_done(struct closure *cl)
55 55
56 s->bio->bi_end_io = s->bi_end_io; 56 s->bio->bi_end_io = s->bi_end_io;
57 s->bio->bi_private = s->bi_private; 57 s->bio->bi_private = s->bi_private;
58 bio_endio(s->bio, 0); 58 bio_endio(s->bio);
59 59
60 closure_debug_destroy(&s->cl); 60 closure_debug_destroy(&s->cl);
61 mempool_free(s, s->p->bio_split_hook); 61 mempool_free(s, s->p->bio_split_hook);
62} 62}
63 63
64static void bch_bio_submit_split_endio(struct bio *bio, int error) 64static void bch_bio_submit_split_endio(struct bio *bio)
65{ 65{
66 struct closure *cl = bio->bi_private; 66 struct closure *cl = bio->bi_private;
67 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); 67 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
68 68
69 if (error) 69 if (bio->bi_error)
70 clear_bit(BIO_UPTODATE, &s->bio->bi_flags); 70 s->bio->bi_error = bio->bi_error;
71 71
72 bio_put(bio); 72 bio_put(bio);
73 closure_put(cl); 73 closure_put(cl);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 418607a6ba33..d6a4e16030a6 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -24,7 +24,7 @@
24 * bit. 24 * bit.
25 */ 25 */
26 26
27static void journal_read_endio(struct bio *bio, int error) 27static void journal_read_endio(struct bio *bio)
28{ 28{
29 struct closure *cl = bio->bi_private; 29 struct closure *cl = bio->bi_private;
30 closure_put(cl); 30 closure_put(cl);
@@ -401,7 +401,7 @@ retry:
401 401
402#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) 402#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
403 403
404static void journal_discard_endio(struct bio *bio, int error) 404static void journal_discard_endio(struct bio *bio)
405{ 405{
406 struct journal_device *ja = 406 struct journal_device *ja =
407 container_of(bio, struct journal_device, discard_bio); 407 container_of(bio, struct journal_device, discard_bio);
@@ -547,11 +547,11 @@ void bch_journal_next(struct journal *j)
547 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); 547 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
548} 548}
549 549
550static void journal_write_endio(struct bio *bio, int error) 550static void journal_write_endio(struct bio *bio)
551{ 551{
552 struct journal_write *w = bio->bi_private; 552 struct journal_write *w = bio->bi_private;
553 553
554 cache_set_err_on(error, w->c, "journal io error"); 554 cache_set_err_on(bio->bi_error, w->c, "journal io error");
555 closure_put(&w->c->journal.io); 555 closure_put(&w->c->journal.io);
556} 556}
557 557
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index cd7490311e51..b929fc944e9c 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -60,20 +60,20 @@ static void write_moving_finish(struct closure *cl)
60 closure_return_with_destructor(cl, moving_io_destructor); 60 closure_return_with_destructor(cl, moving_io_destructor);
61} 61}
62 62
63static void read_moving_endio(struct bio *bio, int error) 63static void read_moving_endio(struct bio *bio)
64{ 64{
65 struct bbio *b = container_of(bio, struct bbio, bio); 65 struct bbio *b = container_of(bio, struct bbio, bio);
66 struct moving_io *io = container_of(bio->bi_private, 66 struct moving_io *io = container_of(bio->bi_private,
67 struct moving_io, cl); 67 struct moving_io, cl);
68 68
69 if (error) 69 if (bio->bi_error)
70 io->op.error = error; 70 io->op.error = bio->bi_error;
71 else if (!KEY_DIRTY(&b->key) && 71 else if (!KEY_DIRTY(&b->key) &&
72 ptr_stale(io->op.c, &b->key, 0)) { 72 ptr_stale(io->op.c, &b->key, 0)) {
73 io->op.error = -EINTR; 73 io->op.error = -EINTR;
74 } 74 }
75 75
76 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 76 bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
77} 77}
78 78
79static void moving_init(struct moving_io *io) 79static void moving_init(struct moving_io *io)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f292790997d7..a09b9462ff49 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -173,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl)
173 bch_data_insert_keys(cl); 173 bch_data_insert_keys(cl);
174} 174}
175 175
176static void bch_data_insert_endio(struct bio *bio, int error) 176static void bch_data_insert_endio(struct bio *bio)
177{ 177{
178 struct closure *cl = bio->bi_private; 178 struct closure *cl = bio->bi_private;
179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
180 180
181 if (error) { 181 if (bio->bi_error) {
182 /* TODO: We could try to recover from this. */ 182 /* TODO: We could try to recover from this. */
183 if (op->writeback) 183 if (op->writeback)
184 op->error = error; 184 op->error = bio->bi_error;
185 else if (!op->replace) 185 else if (!op->replace)
186 set_closure_fn(cl, bch_data_insert_error, op->wq); 186 set_closure_fn(cl, bch_data_insert_error, op->wq);
187 else 187 else
188 set_closure_fn(cl, NULL, NULL); 188 set_closure_fn(cl, NULL, NULL);
189 } 189 }
190 190
191 bch_bbio_endio(op->c, bio, error, "writing data to cache"); 191 bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
192} 192}
193 193
194static void bch_data_insert_start(struct closure *cl) 194static void bch_data_insert_start(struct closure *cl)
@@ -477,7 +477,7 @@ struct search {
477 struct data_insert_op iop; 477 struct data_insert_op iop;
478}; 478};
479 479
480static void bch_cache_read_endio(struct bio *bio, int error) 480static void bch_cache_read_endio(struct bio *bio)
481{ 481{
482 struct bbio *b = container_of(bio, struct bbio, bio); 482 struct bbio *b = container_of(bio, struct bbio, bio);
483 struct closure *cl = bio->bi_private; 483 struct closure *cl = bio->bi_private;
@@ -490,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error)
490 * from the backing device. 490 * from the backing device.
491 */ 491 */
492 492
493 if (error) 493 if (bio->bi_error)
494 s->iop.error = error; 494 s->iop.error = bio->bi_error;
495 else if (!KEY_DIRTY(&b->key) && 495 else if (!KEY_DIRTY(&b->key) &&
496 ptr_stale(s->iop.c, &b->key, 0)) { 496 ptr_stale(s->iop.c, &b->key, 0)) {
497 atomic_long_inc(&s->iop.c->cache_read_races); 497 atomic_long_inc(&s->iop.c->cache_read_races);
498 s->iop.error = -EINTR; 498 s->iop.error = -EINTR;
499 } 499 }
500 500
501 bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); 501 bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
502} 502}
503 503
504/* 504/*
@@ -591,13 +591,13 @@ static void cache_lookup(struct closure *cl)
591 591
592/* Common code for the make_request functions */ 592/* Common code for the make_request functions */
593 593
594static void request_endio(struct bio *bio, int error) 594static void request_endio(struct bio *bio)
595{ 595{
596 struct closure *cl = bio->bi_private; 596 struct closure *cl = bio->bi_private;
597 597
598 if (error) { 598 if (bio->bi_error) {
599 struct search *s = container_of(cl, struct search, cl); 599 struct search *s = container_of(cl, struct search, cl);
600 s->iop.error = error; 600 s->iop.error = bio->bi_error;
601 /* Only cache read errors are recoverable */ 601 /* Only cache read errors are recoverable */
602 s->recoverable = false; 602 s->recoverable = false;
603 } 603 }
@@ -613,7 +613,8 @@ static void bio_complete(struct search *s)
613 &s->d->disk->part0, s->start_time); 613 &s->d->disk->part0, s->start_time);
614 614
615 trace_bcache_request_end(s->d, s->orig_bio); 615 trace_bcache_request_end(s->d, s->orig_bio);
616 bio_endio(s->orig_bio, s->iop.error); 616 s->orig_bio->bi_error = s->iop.error;
617 bio_endio(s->orig_bio);
617 s->orig_bio = NULL; 618 s->orig_bio = NULL;
618 } 619 }
619} 620}
@@ -992,7 +993,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
992 } else { 993 } else {
993 if ((bio->bi_rw & REQ_DISCARD) && 994 if ((bio->bi_rw & REQ_DISCARD) &&
994 !blk_queue_discard(bdev_get_queue(dc->bdev))) 995 !blk_queue_discard(bdev_get_queue(dc->bdev)))
995 bio_endio(bio, 0); 996 bio_endio(bio);
996 else 997 else
997 bch_generic_make_request(bio, &d->bio_split_hook); 998 bch_generic_make_request(bio, &d->bio_split_hook);
998 } 999 }
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fc8e545ced18..be01fd3c87f1 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -221,7 +221,7 @@ err:
221 return err; 221 return err;
222} 222}
223 223
224static void write_bdev_super_endio(struct bio *bio, int error) 224static void write_bdev_super_endio(struct bio *bio)
225{ 225{
226 struct cached_dev *dc = bio->bi_private; 226 struct cached_dev *dc = bio->bi_private;
227 /* XXX: error checking */ 227 /* XXX: error checking */
@@ -290,11 +290,11 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
290 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 290 closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
291} 291}
292 292
293static void write_super_endio(struct bio *bio, int error) 293static void write_super_endio(struct bio *bio)
294{ 294{
295 struct cache *ca = bio->bi_private; 295 struct cache *ca = bio->bi_private;
296 296
297 bch_count_io_errors(ca, error, "writing superblock"); 297 bch_count_io_errors(ca, bio->bi_error, "writing superblock");
298 closure_put(&ca->set->sb_write); 298 closure_put(&ca->set->sb_write);
299} 299}
300 300
@@ -339,12 +339,12 @@ void bcache_write_super(struct cache_set *c)
339 339
340/* UUID io */ 340/* UUID io */
341 341
342static void uuid_endio(struct bio *bio, int error) 342static void uuid_endio(struct bio *bio)
343{ 343{
344 struct closure *cl = bio->bi_private; 344 struct closure *cl = bio->bi_private;
345 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 345 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
346 346
347 cache_set_err_on(error, c, "accessing uuids"); 347 cache_set_err_on(bio->bi_error, c, "accessing uuids");
348 bch_bbio_free(bio, c); 348 bch_bbio_free(bio, c);
349 closure_put(cl); 349 closure_put(cl);
350} 350}
@@ -512,11 +512,11 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
512 * disk. 512 * disk.
513 */ 513 */
514 514
515static void prio_endio(struct bio *bio, int error) 515static void prio_endio(struct bio *bio)
516{ 516{
517 struct cache *ca = bio->bi_private; 517 struct cache *ca = bio->bi_private;
518 518
519 cache_set_err_on(error, ca->set, "accessing priorities"); 519 cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
520 bch_bbio_free(bio, ca->set); 520 bch_bbio_free(bio, ca->set);
521 closure_put(&ca->prio); 521 closure_put(&ca->prio);
522} 522}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index f1986bcd1bf0..b4fc874c30fd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -166,12 +166,12 @@ static void write_dirty_finish(struct closure *cl)
166 closure_return_with_destructor(cl, dirty_io_destructor); 166 closure_return_with_destructor(cl, dirty_io_destructor);
167} 167}
168 168
169static void dirty_endio(struct bio *bio, int error) 169static void dirty_endio(struct bio *bio)
170{ 170{
171 struct keybuf_key *w = bio->bi_private; 171 struct keybuf_key *w = bio->bi_private;
172 struct dirty_io *io = w->private; 172 struct dirty_io *io = w->private;
173 173
174 if (error) 174 if (bio->bi_error)
175 SET_KEY_DIRTY(&w->key, false); 175 SET_KEY_DIRTY(&w->key, false);
176 176
177 closure_put(&io->cl); 177 closure_put(&io->cl);
@@ -193,15 +193,15 @@ static void write_dirty(struct closure *cl)
193 continue_at(cl, write_dirty_finish, system_wq); 193 continue_at(cl, write_dirty_finish, system_wq);
194} 194}
195 195
196static void read_dirty_endio(struct bio *bio, int error) 196static void read_dirty_endio(struct bio *bio)
197{ 197{
198 struct keybuf_key *w = bio->bi_private; 198 struct keybuf_key *w = bio->bi_private;
199 struct dirty_io *io = w->private; 199 struct dirty_io *io = w->private;
200 200
201 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), 201 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
202 error, "reading dirty data from cache"); 202 bio->bi_error, "reading dirty data from cache");
203 203
204 dirty_endio(bio, error); 204 dirty_endio(bio);
205} 205}
206 206
207static void read_dirty_submit(struct closure *cl) 207static void read_dirty_submit(struct closure *cl)
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index cd6d1d21e057..03af174485d3 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -236,8 +236,10 @@ void dm_cell_error(struct dm_bio_prison *prison,
236 bio_list_init(&bios); 236 bio_list_init(&bios);
237 dm_cell_release(prison, cell, &bios); 237 dm_cell_release(prison, cell, &bios);
238 238
239 while ((bio = bio_list_pop(&bios))) 239 while ((bio = bio_list_pop(&bios))) {
240 bio_endio(bio, error); 240 bio->bi_error = error;
241 bio_endio(bio);
242 }
241} 243}
242EXPORT_SYMBOL_GPL(dm_cell_error); 244EXPORT_SYMBOL_GPL(dm_cell_error);
243 245
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 86dbbc737402..83cc52eaf56d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -545,7 +545,8 @@ static void dmio_complete(unsigned long error, void *context)
545{ 545{
546 struct dm_buffer *b = context; 546 struct dm_buffer *b = context;
547 547
548 b->bio.bi_end_io(&b->bio, error ? -EIO : 0); 548 b->bio.bi_error = error ? -EIO : 0;
549 b->bio.bi_end_io(&b->bio);
549} 550}
550 551
551static void use_dmio(struct dm_buffer *b, int rw, sector_t block, 552static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
@@ -575,13 +576,16 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
575 b->bio.bi_end_io = end_io; 576 b->bio.bi_end_io = end_io;
576 577
577 r = dm_io(&io_req, 1, &region, NULL); 578 r = dm_io(&io_req, 1, &region, NULL);
578 if (r) 579 if (r) {
579 end_io(&b->bio, r); 580 b->bio.bi_error = r;
581 end_io(&b->bio);
582 }
580} 583}
581 584
582static void inline_endio(struct bio *bio, int error) 585static void inline_endio(struct bio *bio)
583{ 586{
584 bio_end_io_t *end_fn = bio->bi_private; 587 bio_end_io_t *end_fn = bio->bi_private;
588 int error = bio->bi_error;
585 589
586 /* 590 /*
587 * Reset the bio to free any attached resources 591 * Reset the bio to free any attached resources
@@ -589,7 +593,8 @@ static void inline_endio(struct bio *bio, int error)
589 */ 593 */
590 bio_reset(bio); 594 bio_reset(bio);
591 595
592 end_fn(bio, error); 596 bio->bi_error = error;
597 end_fn(bio);
593} 598}
594 599
595static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, 600static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
@@ -661,13 +666,14 @@ static void submit_io(struct dm_buffer *b, int rw, sector_t block,
661 * Set the error, clear B_WRITING bit and wake anyone who was waiting on 666 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
662 * it. 667 * it.
663 */ 668 */
664static void write_endio(struct bio *bio, int error) 669static void write_endio(struct bio *bio)
665{ 670{
666 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); 671 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
667 672
668 b->write_error = error; 673 b->write_error = bio->bi_error;
669 if (unlikely(error)) { 674 if (unlikely(bio->bi_error)) {
670 struct dm_bufio_client *c = b->c; 675 struct dm_bufio_client *c = b->c;
676 int error = bio->bi_error;
671 (void)cmpxchg(&c->async_write_error, 0, error); 677 (void)cmpxchg(&c->async_write_error, 0, error);
672 } 678 }
673 679
@@ -1026,11 +1032,11 @@ found_buffer:
1026 * The endio routine for reading: set the error, clear the bit and wake up 1032 * The endio routine for reading: set the error, clear the bit and wake up
1027 * anyone waiting on the buffer. 1033 * anyone waiting on the buffer.
1028 */ 1034 */
1029static void read_endio(struct bio *bio, int error) 1035static void read_endio(struct bio *bio)
1030{ 1036{
1031 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); 1037 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1032 1038
1033 b->read_error = error; 1039 b->read_error = bio->bi_error;
1034 1040
1035 BUG_ON(!test_bit(B_READING, &b->state)); 1041 BUG_ON(!test_bit(B_READING, &b->state));
1036 1042
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b4e1756b169..04d0dadc48b1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -919,14 +919,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
919 wake_worker(cache); 919 wake_worker(cache);
920} 920}
921 921
922static void writethrough_endio(struct bio *bio, int err) 922static void writethrough_endio(struct bio *bio)
923{ 923{
924 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 924 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
925 925
926 dm_unhook_bio(&pb->hook_info, bio); 926 dm_unhook_bio(&pb->hook_info, bio);
927 927
928 if (err) { 928 if (bio->bi_error) {
929 bio_endio(bio, err); 929 bio_endio(bio);
930 return; 930 return;
931 } 931 }
932 932
@@ -1231,7 +1231,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
1231 * The block was promoted via an overwrite, so it's dirty. 1231 * The block was promoted via an overwrite, so it's dirty.
1232 */ 1232 */
1233 set_dirty(cache, mg->new_oblock, mg->cblock); 1233 set_dirty(cache, mg->new_oblock, mg->cblock);
1234 bio_endio(mg->new_ocell->holder, 0); 1234 bio_endio(mg->new_ocell->holder);
1235 cell_defer(cache, mg->new_ocell, false); 1235 cell_defer(cache, mg->new_ocell, false);
1236 } 1236 }
1237 free_io_migration(mg); 1237 free_io_migration(mg);
@@ -1284,7 +1284,7 @@ static void issue_copy(struct dm_cache_migration *mg)
1284 } 1284 }
1285} 1285}
1286 1286
1287static void overwrite_endio(struct bio *bio, int err) 1287static void overwrite_endio(struct bio *bio)
1288{ 1288{
1289 struct dm_cache_migration *mg = bio->bi_private; 1289 struct dm_cache_migration *mg = bio->bi_private;
1290 struct cache *cache = mg->cache; 1290 struct cache *cache = mg->cache;
@@ -1294,7 +1294,7 @@ static void overwrite_endio(struct bio *bio, int err)
1294 1294
1295 dm_unhook_bio(&pb->hook_info, bio); 1295 dm_unhook_bio(&pb->hook_info, bio);
1296 1296
1297 if (err) 1297 if (bio->bi_error)
1298 mg->err = true; 1298 mg->err = true;
1299 1299
1300 mg->requeue_holder = false; 1300 mg->requeue_holder = false;
@@ -1358,7 +1358,7 @@ static void issue_discard(struct dm_cache_migration *mg)
1358 b = to_dblock(from_dblock(b) + 1); 1358 b = to_dblock(from_dblock(b) + 1);
1359 } 1359 }
1360 1360
1361 bio_endio(bio, 0); 1361 bio_endio(bio);
1362 cell_defer(mg->cache, mg->new_ocell, false); 1362 cell_defer(mg->cache, mg->new_ocell, false);
1363 free_migration(mg); 1363 free_migration(mg);
1364} 1364}
@@ -1631,7 +1631,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
1631 1631
1632 calc_discard_block_range(cache, bio, &b, &e); 1632 calc_discard_block_range(cache, bio, &b, &e);
1633 if (b == e) { 1633 if (b == e) {
1634 bio_endio(bio, 0); 1634 bio_endio(bio);
1635 return; 1635 return;
1636 } 1636 }
1637 1637
@@ -2213,8 +2213,10 @@ static void requeue_deferred_bios(struct cache *cache)
2213 bio_list_merge(&bios, &cache->deferred_bios); 2213 bio_list_merge(&bios, &cache->deferred_bios);
2214 bio_list_init(&cache->deferred_bios); 2214 bio_list_init(&cache->deferred_bios);
2215 2215
2216 while ((bio = bio_list_pop(&bios))) 2216 while ((bio = bio_list_pop(&bios))) {
2217 bio_endio(bio, DM_ENDIO_REQUEUE); 2217 bio->bi_error = DM_ENDIO_REQUEUE;
2218 bio_endio(bio);
2219 }
2218} 2220}
2219 2221
2220static int more_work(struct cache *cache) 2222static int more_work(struct cache *cache)
@@ -3119,7 +3121,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
3119 * This is a duplicate writethrough io that is no 3121 * This is a duplicate writethrough io that is no
3120 * longer needed because the block has been demoted. 3122 * longer needed because the block has been demoted.
3121 */ 3123 */
3122 bio_endio(bio, 0); 3124 bio_endio(bio);
3123 // FIXME: remap everything as a miss 3125 // FIXME: remap everything as a miss
3124 cell_defer(cache, cell, false); 3126 cell_defer(cache, cell, false);
3125 r = DM_MAPIO_SUBMITTED; 3127 r = DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0f48fed44a17..744b80c608e5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1076,7 +1076,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1076 if (io->ctx.req) 1076 if (io->ctx.req)
1077 crypt_free_req(cc, io->ctx.req, base_bio); 1077 crypt_free_req(cc, io->ctx.req, base_bio);
1078 1078
1079 bio_endio(base_bio, error); 1079 base_bio->bi_error = error;
1080 bio_endio(base_bio);
1080} 1081}
1081 1082
1082/* 1083/*
@@ -1096,15 +1097,12 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1096 * The work is done per CPU global for all dm-crypt instances. 1097 * The work is done per CPU global for all dm-crypt instances.
1097 * They should not depend on each other and do not block. 1098 * They should not depend on each other and do not block.
1098 */ 1099 */
1099static void crypt_endio(struct bio *clone, int error) 1100static void crypt_endio(struct bio *clone)
1100{ 1101{
1101 struct dm_crypt_io *io = clone->bi_private; 1102 struct dm_crypt_io *io = clone->bi_private;
1102 struct crypt_config *cc = io->cc; 1103 struct crypt_config *cc = io->cc;
1103 unsigned rw = bio_data_dir(clone); 1104 unsigned rw = bio_data_dir(clone);
1104 1105
1105 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1106 error = -EIO;
1107
1108 /* 1106 /*
1109 * free the processed pages 1107 * free the processed pages
1110 */ 1108 */
@@ -1113,13 +1111,13 @@ static void crypt_endio(struct bio *clone, int error)
1113 1111
1114 bio_put(clone); 1112 bio_put(clone);
1115 1113
1116 if (rw == READ && !error) { 1114 if (rw == READ && !clone->bi_error) {
1117 kcryptd_queue_crypt(io); 1115 kcryptd_queue_crypt(io);
1118 return; 1116 return;
1119 } 1117 }
1120 1118
1121 if (unlikely(error)) 1119 if (unlikely(clone->bi_error))
1122 io->error = error; 1120 io->error = clone->bi_error;
1123 1121
1124 crypt_dec_pending(io); 1122 crypt_dec_pending(io);
1125} 1123}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b257e46876d3..04481247aab8 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -296,7 +296,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
296 * Drop writes? 296 * Drop writes?
297 */ 297 */
298 if (test_bit(DROP_WRITES, &fc->flags)) { 298 if (test_bit(DROP_WRITES, &fc->flags)) {
299 bio_endio(bio, 0); 299 bio_endio(bio);
300 return DM_MAPIO_SUBMITTED; 300 return DM_MAPIO_SUBMITTED;
301 } 301 }
302 302
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 74adcd2c967e..efc6659f9d6a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -134,12 +134,12 @@ static void dec_count(struct io *io, unsigned int region, int error)
134 complete_io(io); 134 complete_io(io);
135} 135}
136 136
137static void endio(struct bio *bio, int error) 137static void endio(struct bio *bio)
138{ 138{
139 struct io *io; 139 struct io *io;
140 unsigned region; 140 unsigned region;
141 141
142 if (error && bio_data_dir(bio) == READ) 142 if (bio->bi_error && bio_data_dir(bio) == READ)
143 zero_fill_bio(bio); 143 zero_fill_bio(bio);
144 144
145 /* 145 /*
@@ -149,7 +149,7 @@ static void endio(struct bio *bio, int error)
149 149
150 bio_put(bio); 150 bio_put(bio);
151 151
152 dec_count(io, region, error); 152 dec_count(io, region, bio->bi_error);
153} 153}
154 154
155/*----------------------------------------------------------------- 155/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index ad1b049ae2ab..e9d17488d5e3 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -146,16 +146,16 @@ static void put_io_block(struct log_writes_c *lc)
146 } 146 }
147} 147}
148 148
149static void log_end_io(struct bio *bio, int err) 149static void log_end_io(struct bio *bio)
150{ 150{
151 struct log_writes_c *lc = bio->bi_private; 151 struct log_writes_c *lc = bio->bi_private;
152 struct bio_vec *bvec; 152 struct bio_vec *bvec;
153 int i; 153 int i;
154 154
155 if (err) { 155 if (bio->bi_error) {
156 unsigned long flags; 156 unsigned long flags;
157 157
158 DMERR("Error writing log block, error=%d", err); 158 DMERR("Error writing log block, error=%d", bio->bi_error);
159 spin_lock_irqsave(&lc->blocks_lock, flags); 159 spin_lock_irqsave(&lc->blocks_lock, flags);
160 lc->logging_enabled = false; 160 lc->logging_enabled = false;
161 spin_unlock_irqrestore(&lc->blocks_lock, flags); 161 spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -205,7 +205,6 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
205 bio->bi_bdev = lc->logdev->bdev; 205 bio->bi_bdev = lc->logdev->bdev;
206 bio->bi_end_io = log_end_io; 206 bio->bi_end_io = log_end_io;
207 bio->bi_private = lc; 207 bio->bi_private = lc;
208 set_bit(BIO_UPTODATE, &bio->bi_flags);
209 208
210 page = alloc_page(GFP_KERNEL); 209 page = alloc_page(GFP_KERNEL);
211 if (!page) { 210 if (!page) {
@@ -270,7 +269,6 @@ static int log_one_block(struct log_writes_c *lc,
270 bio->bi_bdev = lc->logdev->bdev; 269 bio->bi_bdev = lc->logdev->bdev;
271 bio->bi_end_io = log_end_io; 270 bio->bi_end_io = log_end_io;
272 bio->bi_private = lc; 271 bio->bi_private = lc;
273 set_bit(BIO_UPTODATE, &bio->bi_flags);
274 272
275 for (i = 0; i < block->vec_cnt; i++) { 273 for (i = 0; i < block->vec_cnt; i++) {
276 /* 274 /*
@@ -292,7 +290,6 @@ static int log_one_block(struct log_writes_c *lc,
292 bio->bi_bdev = lc->logdev->bdev; 290 bio->bi_bdev = lc->logdev->bdev;
293 bio->bi_end_io = log_end_io; 291 bio->bi_end_io = log_end_io;
294 bio->bi_private = lc; 292 bio->bi_private = lc;
295 set_bit(BIO_UPTODATE, &bio->bi_flags);
296 293
297 ret = bio_add_page(bio, block->vecs[i].bv_page, 294 ret = bio_add_page(bio, block->vecs[i].bv_page,
298 block->vecs[i].bv_len, 0); 295 block->vecs[i].bv_len, 0);
@@ -606,7 +603,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
606 WARN_ON(flush_bio || fua_bio); 603 WARN_ON(flush_bio || fua_bio);
607 if (lc->device_supports_discard) 604 if (lc->device_supports_discard)
608 goto map_bio; 605 goto map_bio;
609 bio_endio(bio, 0); 606 bio_endio(bio);
610 return DM_MAPIO_SUBMITTED; 607 return DM_MAPIO_SUBMITTED;
611 } 608 }
612 609
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d83696bf403b..e1eabfb2f52d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -490,9 +490,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
490 * If device is suspended, complete the bio. 490 * If device is suspended, complete the bio.
491 */ 491 */
492 if (dm_noflush_suspending(ms->ti)) 492 if (dm_noflush_suspending(ms->ti))
493 bio_endio(bio, DM_ENDIO_REQUEUE); 493 bio->bi_error = DM_ENDIO_REQUEUE;
494 else 494 else
495 bio_endio(bio, -EIO); 495 bio->bi_error = -EIO;
496
497 bio_endio(bio);
496 return; 498 return;
497 } 499 }
498 500
@@ -515,7 +517,7 @@ static void read_callback(unsigned long error, void *context)
515 bio_set_m(bio, NULL); 517 bio_set_m(bio, NULL);
516 518
517 if (likely(!error)) { 519 if (likely(!error)) {
518 bio_endio(bio, 0); 520 bio_endio(bio);
519 return; 521 return;
520 } 522 }
521 523
@@ -531,7 +533,7 @@ static void read_callback(unsigned long error, void *context)
531 533
532 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", 534 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
533 m->dev->name); 535 m->dev->name);
534 bio_endio(bio, -EIO); 536 bio_io_error(bio);
535} 537}
536 538
537/* Asynchronous read. */ 539/* Asynchronous read. */
@@ -580,7 +582,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
580 if (likely(m)) 582 if (likely(m))
581 read_async_bio(m, bio); 583 read_async_bio(m, bio);
582 else 584 else
583 bio_endio(bio, -EIO); 585 bio_io_error(bio);
584 } 586 }
585} 587}
586 588
@@ -598,7 +600,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
598 600
599static void write_callback(unsigned long error, void *context) 601static void write_callback(unsigned long error, void *context)
600{ 602{
601 unsigned i, ret = 0; 603 unsigned i;
602 struct bio *bio = (struct bio *) context; 604 struct bio *bio = (struct bio *) context;
603 struct mirror_set *ms; 605 struct mirror_set *ms;
604 int should_wake = 0; 606 int should_wake = 0;
@@ -614,7 +616,7 @@ static void write_callback(unsigned long error, void *context)
614 * regions with the same code. 616 * regions with the same code.
615 */ 617 */
616 if (likely(!error)) { 618 if (likely(!error)) {
617 bio_endio(bio, ret); 619 bio_endio(bio);
618 return; 620 return;
619 } 621 }
620 622
@@ -623,7 +625,8 @@ static void write_callback(unsigned long error, void *context)
623 * degrade the array. 625 * degrade the array.
624 */ 626 */
625 if (bio->bi_rw & REQ_DISCARD) { 627 if (bio->bi_rw & REQ_DISCARD) {
626 bio_endio(bio, -EOPNOTSUPP); 628 bio->bi_error = -EOPNOTSUPP;
629 bio_endio(bio);
627 return; 630 return;
628 } 631 }
629 632
@@ -828,13 +831,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
828 * be wrong if the failed leg returned after reboot and 831 * be wrong if the failed leg returned after reboot and
829 * got replicated back to the good legs.) 832 * got replicated back to the good legs.)
830 */ 833 */
831
832 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) 834 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
833 bio_endio(bio, -EIO); 835 bio_io_error(bio);
834 else if (errors_handled(ms) && !keep_log(ms)) 836 else if (errors_handled(ms) && !keep_log(ms))
835 hold_bio(ms, bio); 837 hold_bio(ms, bio);
836 else 838 else
837 bio_endio(bio, 0); 839 bio_endio(bio);
838 } 840 }
839} 841}
840 842
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 7c82d3ccce87..dd8ca0bb0980 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1490,7 +1490,7 @@ out:
1490 error_bios(snapshot_bios); 1490 error_bios(snapshot_bios);
1491 } else { 1491 } else {
1492 if (full_bio) 1492 if (full_bio)
1493 bio_endio(full_bio, 0); 1493 bio_endio(full_bio);
1494 flush_bios(snapshot_bios); 1494 flush_bios(snapshot_bios);
1495 } 1495 }
1496 1496
@@ -1580,11 +1580,11 @@ static void start_copy(struct dm_snap_pending_exception *pe)
1580 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); 1580 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1581} 1581}
1582 1582
1583static void full_bio_end_io(struct bio *bio, int error) 1583static void full_bio_end_io(struct bio *bio)
1584{ 1584{
1585 void *callback_data = bio->bi_private; 1585 void *callback_data = bio->bi_private;
1586 1586
1587 dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0); 1587 dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
1588} 1588}
1589 1589
1590static void start_full_bio(struct dm_snap_pending_exception *pe, 1590static void start_full_bio(struct dm_snap_pending_exception *pe,
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a672a1502c14..4f94c7da82f6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -273,7 +273,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
273 return DM_MAPIO_REMAPPED; 273 return DM_MAPIO_REMAPPED;
274 } else { 274 } else {
275 /* The range doesn't map to the target stripe */ 275 /* The range doesn't map to the target stripe */
276 bio_endio(bio, 0); 276 bio_endio(bio);
277 return DM_MAPIO_SUBMITTED; 277 return DM_MAPIO_SUBMITTED;
278 } 278 }
279} 279}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c33f61a4cc28..2ade2c46dca9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -614,8 +614,10 @@ static void error_bio_list(struct bio_list *bios, int error)
614{ 614{
615 struct bio *bio; 615 struct bio *bio;
616 616
617 while ((bio = bio_list_pop(bios))) 617 while ((bio = bio_list_pop(bios))) {
618 bio_endio(bio, error); 618 bio->bi_error = error;
619 bio_endio(bio);
620 }
619} 621}
620 622
621static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) 623static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
@@ -864,14 +866,14 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
864 complete_mapping_preparation(m); 866 complete_mapping_preparation(m);
865} 867}
866 868
867static void overwrite_endio(struct bio *bio, int err) 869static void overwrite_endio(struct bio *bio)
868{ 870{
869 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 871 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
870 struct dm_thin_new_mapping *m = h->overwrite_mapping; 872 struct dm_thin_new_mapping *m = h->overwrite_mapping;
871 873
872 bio->bi_end_io = m->saved_bi_end_io; 874 bio->bi_end_io = m->saved_bi_end_io;
873 875
874 m->err = err; 876 m->err = bio->bi_error;
875 complete_mapping_preparation(m); 877 complete_mapping_preparation(m);
876} 878}
877 879
@@ -996,7 +998,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
996 */ 998 */
997 if (bio) { 999 if (bio) {
998 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1000 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
999 bio_endio(bio, 0); 1001 bio_endio(bio);
1000 } else { 1002 } else {
1001 inc_all_io_entry(tc->pool, m->cell->holder); 1003 inc_all_io_entry(tc->pool, m->cell->holder);
1002 remap_and_issue(tc, m->cell->holder, m->data_block); 1004 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1026,7 +1028,7 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
1026 1028
1027static void process_prepared_discard_success(struct dm_thin_new_mapping *m) 1029static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
1028{ 1030{
1029 bio_endio(m->bio, 0); 1031 bio_endio(m->bio);
1030 free_discard_mapping(m); 1032 free_discard_mapping(m);
1031} 1033}
1032 1034
@@ -1040,7 +1042,7 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
1040 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); 1042 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1041 bio_io_error(m->bio); 1043 bio_io_error(m->bio);
1042 } else 1044 } else
1043 bio_endio(m->bio, 0); 1045 bio_endio(m->bio);
1044 1046
1045 cell_defer_no_holder(tc, m->cell); 1047 cell_defer_no_holder(tc, m->cell);
1046 mempool_free(m, tc->pool->mapping_pool); 1048 mempool_free(m, tc->pool->mapping_pool);
@@ -1111,7 +1113,8 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1111 * Even if r is set, there could be sub discards in flight that we 1113 * Even if r is set, there could be sub discards in flight that we
1112 * need to wait for. 1114 * need to wait for.
1113 */ 1115 */
1114 bio_endio(m->bio, r); 1116 m->bio->bi_error = r;
1117 bio_endio(m->bio);
1115 cell_defer_no_holder(tc, m->cell); 1118 cell_defer_no_holder(tc, m->cell);
1116 mempool_free(m, pool->mapping_pool); 1119 mempool_free(m, pool->mapping_pool);
1117} 1120}
@@ -1487,9 +1490,10 @@ static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1487{ 1490{
1488 int error = should_error_unserviceable_bio(pool); 1491 int error = should_error_unserviceable_bio(pool);
1489 1492
1490 if (error) 1493 if (error) {
1491 bio_endio(bio, error); 1494 bio->bi_error = error;
1492 else 1495 bio_endio(bio);
1496 } else
1493 retry_on_resume(bio); 1497 retry_on_resume(bio);
1494} 1498}
1495 1499
@@ -1625,7 +1629,7 @@ static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_priso
1625 * will prevent completion until the sub range discards have 1629 * will prevent completion until the sub range discards have
1626 * completed. 1630 * completed.
1627 */ 1631 */
1628 bio_endio(bio, 0); 1632 bio_endio(bio);
1629} 1633}
1630 1634
1631static void process_discard_bio(struct thin_c *tc, struct bio *bio) 1635static void process_discard_bio(struct thin_c *tc, struct bio *bio)
@@ -1639,7 +1643,7 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1639 /* 1643 /*
1640 * The discard covers less than a block. 1644 * The discard covers less than a block.
1641 */ 1645 */
1642 bio_endio(bio, 0); 1646 bio_endio(bio);
1643 return; 1647 return;
1644 } 1648 }
1645 1649
@@ -1784,7 +1788,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1784 if (bio_data_dir(bio) == READ) { 1788 if (bio_data_dir(bio) == READ) {
1785 zero_fill_bio(bio); 1789 zero_fill_bio(bio);
1786 cell_defer_no_holder(tc, cell); 1790 cell_defer_no_holder(tc, cell);
1787 bio_endio(bio, 0); 1791 bio_endio(bio);
1788 return; 1792 return;
1789 } 1793 }
1790 1794
@@ -1849,7 +1853,7 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1849 1853
1850 } else { 1854 } else {
1851 zero_fill_bio(bio); 1855 zero_fill_bio(bio);
1852 bio_endio(bio, 0); 1856 bio_endio(bio);
1853 } 1857 }
1854 } else 1858 } else
1855 provision_block(tc, bio, block, cell); 1859 provision_block(tc, bio, block, cell);
@@ -1920,7 +1924,7 @@ static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1920 } 1924 }
1921 1925
1922 zero_fill_bio(bio); 1926 zero_fill_bio(bio);
1923 bio_endio(bio, 0); 1927 bio_endio(bio);
1924 break; 1928 break;
1925 1929
1926 default: 1930 default:
@@ -1945,7 +1949,7 @@ static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell
1945 1949
1946static void process_bio_success(struct thin_c *tc, struct bio *bio) 1950static void process_bio_success(struct thin_c *tc, struct bio *bio)
1947{ 1951{
1948 bio_endio(bio, 0); 1952 bio_endio(bio);
1949} 1953}
1950 1954
1951static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1955static void process_bio_fail(struct thin_c *tc, struct bio *bio)
@@ -2581,7 +2585,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2581 thin_hook_bio(tc, bio); 2585 thin_hook_bio(tc, bio);
2582 2586
2583 if (tc->requeue_mode) { 2587 if (tc->requeue_mode) {
2584 bio_endio(bio, DM_ENDIO_REQUEUE); 2588 bio->bi_error = DM_ENDIO_REQUEUE;
2589 bio_endio(bio);
2585 return DM_MAPIO_SUBMITTED; 2590 return DM_MAPIO_SUBMITTED;
2586 } 2591 }
2587 2592
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index bb9c6a00e4b0..4b34df8fdb58 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -458,8 +458,9 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
458 458
459 bio->bi_end_io = io->orig_bi_end_io; 459 bio->bi_end_io = io->orig_bi_end_io;
460 bio->bi_private = io->orig_bi_private; 460 bio->bi_private = io->orig_bi_private;
461 bio->bi_error = error;
461 462
462 bio_endio(bio, error); 463 bio_endio(bio);
463} 464}
464 465
465static void verity_work(struct work_struct *w) 466static void verity_work(struct work_struct *w)
@@ -469,12 +470,12 @@ static void verity_work(struct work_struct *w)
469 verity_finish_io(io, verity_verify_io(io)); 470 verity_finish_io(io, verity_verify_io(io));
470} 471}
471 472
472static void verity_end_io(struct bio *bio, int error) 473static void verity_end_io(struct bio *bio)
473{ 474{
474 struct dm_verity_io *io = bio->bi_private; 475 struct dm_verity_io *io = bio->bi_private;
475 476
476 if (error) { 477 if (bio->bi_error) {
477 verity_finish_io(io, error); 478 verity_finish_io(io, bio->bi_error);
478 return; 479 return;
479 } 480 }
480 481
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index b9a64bbce304..766bc93006e6 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
47 break; 47 break;
48 } 48 }
49 49
50 bio_endio(bio, 0); 50 bio_endio(bio);
51 51
52 /* accepted bio, don't make new request */ 52 /* accepted bio, don't make new request */
53 return DM_MAPIO_SUBMITTED; 53 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f331d888e7f5..7f367fcace03 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -944,7 +944,8 @@ static void dec_pending(struct dm_io *io, int error)
944 } else { 944 } else {
945 /* done with normal IO or empty flush */ 945 /* done with normal IO or empty flush */
946 trace_block_bio_complete(md->queue, bio, io_error); 946 trace_block_bio_complete(md->queue, bio, io_error);
947 bio_endio(bio, io_error); 947 bio->bi_error = io_error;
948 bio_endio(bio);
948 } 949 }
949 } 950 }
950} 951}
@@ -957,17 +958,15 @@ static void disable_write_same(struct mapped_device *md)
957 limits->max_write_same_sectors = 0; 958 limits->max_write_same_sectors = 0;
958} 959}
959 960
960static void clone_endio(struct bio *bio, int error) 961static void clone_endio(struct bio *bio)
961{ 962{
963 int error = bio->bi_error;
962 int r = error; 964 int r = error;
963 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 965 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
964 struct dm_io *io = tio->io; 966 struct dm_io *io = tio->io;
965 struct mapped_device *md = tio->io->md; 967 struct mapped_device *md = tio->io->md;
966 dm_endio_fn endio = tio->ti->type->end_io; 968 dm_endio_fn endio = tio->ti->type->end_io;
967 969
968 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
969 error = -EIO;
970
971 if (endio) { 970 if (endio) {
972 r = endio(tio->ti, bio, error); 971 r = endio(tio->ti, bio, error);
973 if (r < 0 || r == DM_ENDIO_REQUEUE) 972 if (r < 0 || r == DM_ENDIO_REQUEUE)
@@ -996,7 +995,7 @@ static void clone_endio(struct bio *bio, int error)
996/* 995/*
997 * Partial completion handling for request-based dm 996 * Partial completion handling for request-based dm
998 */ 997 */
999static void end_clone_bio(struct bio *clone, int error) 998static void end_clone_bio(struct bio *clone)
1000{ 999{
1001 struct dm_rq_clone_bio_info *info = 1000 struct dm_rq_clone_bio_info *info =
1002 container_of(clone, struct dm_rq_clone_bio_info, clone); 1001 container_of(clone, struct dm_rq_clone_bio_info, clone);
@@ -1013,13 +1012,13 @@ static void end_clone_bio(struct bio *clone, int error)
1013 * the remainder. 1012 * the remainder.
1014 */ 1013 */
1015 return; 1014 return;
1016 else if (error) { 1015 else if (bio->bi_error) {
1017 /* 1016 /*
1018 * Don't notice the error to the upper layer yet. 1017 * Don't notice the error to the upper layer yet.
1019 * The error handling decision is made by the target driver, 1018 * The error handling decision is made by the target driver,
1020 * when the request is completed. 1019 * when the request is completed.
1021 */ 1020 */
1022 tio->error = error; 1021 tio->error = bio->bi_error;
1023 return; 1022 return;
1024 } 1023 }
1025 1024
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 1277eb26b58a..4a8e15058e8b 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -70,7 +70,7 @@
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71 71
72 72
73static void faulty_fail(struct bio *bio, int error) 73static void faulty_fail(struct bio *bio)
74{ 74{
75 struct bio *b = bio->bi_private; 75 struct bio *b = bio->bi_private;
76 76
@@ -181,7 +181,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
181 /* special case - don't decrement, don't generic_make_request, 181 /* special case - don't decrement, don't generic_make_request,
182 * just fail immediately 182 * just fail immediately
183 */ 183 */
184 bio_endio(bio, -EIO); 184 bio_io_error(bio);
185 return; 185 return;
186 } 186 }
187 187
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index fa7d577f3d12..aefd66142eef 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -297,7 +297,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
297 if (unlikely((split->bi_rw & REQ_DISCARD) && 297 if (unlikely((split->bi_rw & REQ_DISCARD) &&
298 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { 298 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
299 /* Just ignore it */ 299 /* Just ignore it */
300 bio_endio(split, 0); 300 bio_endio(split);
301 } else 301 } else
302 generic_make_request(split); 302 generic_make_request(split);
303 } while (split != bio); 303 } while (split != bio);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..ac4381a6625c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -263,7 +263,9 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
263 return; 263 return;
264 } 264 }
265 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 265 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
266 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); 266 if (bio_sectors(bio) != 0)
267 bio->bi_error = -EROFS;
268 bio_endio(bio);
267 return; 269 return;
268 } 270 }
269 smp_rmb(); /* Ensure implications of 'active' are visible */ 271 smp_rmb(); /* Ensure implications of 'active' are visible */
@@ -377,7 +379,7 @@ static int md_mergeable_bvec(struct request_queue *q,
377 * Generic flush handling for md 379 * Generic flush handling for md
378 */ 380 */
379 381
380static void md_end_flush(struct bio *bio, int err) 382static void md_end_flush(struct bio *bio)
381{ 383{
382 struct md_rdev *rdev = bio->bi_private; 384 struct md_rdev *rdev = bio->bi_private;
383 struct mddev *mddev = rdev->mddev; 385 struct mddev *mddev = rdev->mddev;
@@ -433,7 +435,7 @@ static void md_submit_flush_data(struct work_struct *ws)
433 435
434 if (bio->bi_iter.bi_size == 0) 436 if (bio->bi_iter.bi_size == 0)
435 /* an empty barrier - all done */ 437 /* an empty barrier - all done */
436 bio_endio(bio, 0); 438 bio_endio(bio);
437 else { 439 else {
438 bio->bi_rw &= ~REQ_FLUSH; 440 bio->bi_rw &= ~REQ_FLUSH;
439 mddev->pers->make_request(mddev, bio); 441 mddev->pers->make_request(mddev, bio);
@@ -728,15 +730,13 @@ void md_rdev_clear(struct md_rdev *rdev)
728} 730}
729EXPORT_SYMBOL_GPL(md_rdev_clear); 731EXPORT_SYMBOL_GPL(md_rdev_clear);
730 732
731static void super_written(struct bio *bio, int error) 733static void super_written(struct bio *bio)
732{ 734{
733 struct md_rdev *rdev = bio->bi_private; 735 struct md_rdev *rdev = bio->bi_private;
734 struct mddev *mddev = rdev->mddev; 736 struct mddev *mddev = rdev->mddev;
735 737
736 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 738 if (bio->bi_error) {
737 printk("md: super_written gets error=%d, uptodate=%d\n", 739 printk("md: super_written gets error=%d\n", bio->bi_error);
738 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
739 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
740 md_error(mddev, rdev); 740 md_error(mddev, rdev);
741 } 741 }
742 742
@@ -791,7 +791,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
791 bio_add_page(bio, page, size, 0); 791 bio_add_page(bio, page, size, 0);
792 submit_bio_wait(rw, bio); 792 submit_bio_wait(rw, bio);
793 793
794 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 794 ret = !bio->bi_error;
795 bio_put(bio); 795 bio_put(bio);
796 return ret; 796 return ret;
797} 797}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index ac3ede2bd00e..082a489af9d3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -77,18 +77,18 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
77 struct bio *bio = mp_bh->master_bio; 77 struct bio *bio = mp_bh->master_bio;
78 struct mpconf *conf = mp_bh->mddev->private; 78 struct mpconf *conf = mp_bh->mddev->private;
79 79
80 bio_endio(bio, err); 80 bio->bi_error = err;
81 bio_endio(bio);
81 mempool_free(mp_bh, conf->pool); 82 mempool_free(mp_bh, conf->pool);
82} 83}
83 84
84static void multipath_end_request(struct bio *bio, int error) 85static void multipath_end_request(struct bio *bio)
85{ 86{
86 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
87 struct multipath_bh *mp_bh = bio->bi_private; 87 struct multipath_bh *mp_bh = bio->bi_private;
88 struct mpconf *conf = mp_bh->mddev->private; 88 struct mpconf *conf = mp_bh->mddev->private;
89 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; 89 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
90 90
91 if (uptodate) 91 if (!bio->bi_error)
92 multipath_end_bh_io(mp_bh, 0); 92 multipath_end_bh_io(mp_bh, 0);
93 else if (!(bio->bi_rw & REQ_RAHEAD)) { 93 else if (!(bio->bi_rw & REQ_RAHEAD)) {
94 /* 94 /*
@@ -101,7 +101,7 @@ static void multipath_end_request(struct bio *bio, int error)
101 (unsigned long long)bio->bi_iter.bi_sector); 101 (unsigned long long)bio->bi_iter.bi_sector);
102 multipath_reschedule_retry(mp_bh); 102 multipath_reschedule_retry(mp_bh);
103 } else 103 } else
104 multipath_end_bh_io(mp_bh, error); 104 multipath_end_bh_io(mp_bh, bio->bi_error);
105 rdev_dec_pending(rdev, conf->mddev); 105 rdev_dec_pending(rdev, conf->mddev);
106} 106}
107 107
@@ -123,7 +123,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
123 123
124 mp_bh->path = multipath_map(conf); 124 mp_bh->path = multipath_map(conf);
125 if (mp_bh->path < 0) { 125 if (mp_bh->path < 0) {
126 bio_endio(bio, -EIO); 126 bio_io_error(bio);
127 mempool_free(mp_bh, conf->pool); 127 mempool_free(mp_bh, conf->pool);
128 return; 128 return;
129 } 129 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index efb654eb5399..e6e0ae56f66b 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -543,7 +543,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
543 if (unlikely((split->bi_rw & REQ_DISCARD) && 543 if (unlikely((split->bi_rw & REQ_DISCARD) &&
544 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { 544 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
545 /* Just ignore it */ 545 /* Just ignore it */
546 bio_endio(split, 0); 546 bio_endio(split);
547 } else 547 } else
548 generic_make_request(split); 548 generic_make_request(split);
549 } while (split != bio); 549 } while (split != bio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..9aa7d1fb2bc1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -255,9 +255,10 @@ static void call_bio_endio(struct r1bio *r1_bio)
255 done = 1; 255 done = 1;
256 256
257 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 257 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
258 clear_bit(BIO_UPTODATE, &bio->bi_flags); 258 bio->bi_error = -EIO;
259
259 if (done) { 260 if (done) {
260 bio_endio(bio, 0); 261 bio_endio(bio);
261 /* 262 /*
262 * Wake up any possible resync thread that waits for the device 263 * Wake up any possible resync thread that waits for the device
263 * to go idle. 264 * to go idle.
@@ -312,9 +313,9 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
312 return mirror; 313 return mirror;
313} 314}
314 315
315static void raid1_end_read_request(struct bio *bio, int error) 316static void raid1_end_read_request(struct bio *bio)
316{ 317{
317 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 318 int uptodate = !bio->bi_error;
318 struct r1bio *r1_bio = bio->bi_private; 319 struct r1bio *r1_bio = bio->bi_private;
319 int mirror; 320 int mirror;
320 struct r1conf *conf = r1_bio->mddev->private; 321 struct r1conf *conf = r1_bio->mddev->private;
@@ -397,9 +398,8 @@ static void r1_bio_write_done(struct r1bio *r1_bio)
397 } 398 }
398} 399}
399 400
400static void raid1_end_write_request(struct bio *bio, int error) 401static void raid1_end_write_request(struct bio *bio)
401{ 402{
402 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
403 struct r1bio *r1_bio = bio->bi_private; 403 struct r1bio *r1_bio = bio->bi_private;
404 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 404 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
405 struct r1conf *conf = r1_bio->mddev->private; 405 struct r1conf *conf = r1_bio->mddev->private;
@@ -410,7 +410,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
410 /* 410 /*
411 * 'one mirror IO has finished' event handler: 411 * 'one mirror IO has finished' event handler:
412 */ 412 */
413 if (!uptodate) { 413 if (bio->bi_error) {
414 set_bit(WriteErrorSeen, 414 set_bit(WriteErrorSeen,
415 &conf->mirrors[mirror].rdev->flags); 415 &conf->mirrors[mirror].rdev->flags);
416 if (!test_and_set_bit(WantReplacement, 416 if (!test_and_set_bit(WantReplacement,
@@ -793,7 +793,7 @@ static void flush_pending_writes(struct r1conf *conf)
793 if (unlikely((bio->bi_rw & REQ_DISCARD) && 793 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
794 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 794 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
795 /* Just ignore it */ 795 /* Just ignore it */
796 bio_endio(bio, 0); 796 bio_endio(bio);
797 else 797 else
798 generic_make_request(bio); 798 generic_make_request(bio);
799 bio = next; 799 bio = next;
@@ -1068,7 +1068,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1068 if (unlikely((bio->bi_rw & REQ_DISCARD) && 1068 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1069 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1069 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1070 /* Just ignore it */ 1070 /* Just ignore it */
1071 bio_endio(bio, 0); 1071 bio_endio(bio);
1072 else 1072 else
1073 generic_make_request(bio); 1073 generic_make_request(bio);
1074 bio = next; 1074 bio = next;
@@ -1734,7 +1734,7 @@ abort:
1734 return err; 1734 return err;
1735} 1735}
1736 1736
1737static void end_sync_read(struct bio *bio, int error) 1737static void end_sync_read(struct bio *bio)
1738{ 1738{
1739 struct r1bio *r1_bio = bio->bi_private; 1739 struct r1bio *r1_bio = bio->bi_private;
1740 1740
@@ -1745,16 +1745,16 @@ static void end_sync_read(struct bio *bio, int error)
1745 * or re-read if the read failed. 1745 * or re-read if the read failed.
1746 * We don't do much here, just schedule handling by raid1d 1746 * We don't do much here, just schedule handling by raid1d
1747 */ 1747 */
1748 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1748 if (!bio->bi_error)
1749 set_bit(R1BIO_Uptodate, &r1_bio->state); 1749 set_bit(R1BIO_Uptodate, &r1_bio->state);
1750 1750
1751 if (atomic_dec_and_test(&r1_bio->remaining)) 1751 if (atomic_dec_and_test(&r1_bio->remaining))
1752 reschedule_retry(r1_bio); 1752 reschedule_retry(r1_bio);
1753} 1753}
1754 1754
1755static void end_sync_write(struct bio *bio, int error) 1755static void end_sync_write(struct bio *bio)
1756{ 1756{
1757 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1757 int uptodate = !bio->bi_error;
1758 struct r1bio *r1_bio = bio->bi_private; 1758 struct r1bio *r1_bio = bio->bi_private;
1759 struct mddev *mddev = r1_bio->mddev; 1759 struct mddev *mddev = r1_bio->mddev;
1760 struct r1conf *conf = mddev->private; 1760 struct r1conf *conf = mddev->private;
@@ -1941,7 +1941,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
1941 idx ++; 1941 idx ++;
1942 } 1942 }
1943 set_bit(R1BIO_Uptodate, &r1_bio->state); 1943 set_bit(R1BIO_Uptodate, &r1_bio->state);
1944 set_bit(BIO_UPTODATE, &bio->bi_flags); 1944 bio->bi_error = 0;
1945 return 1; 1945 return 1;
1946} 1946}
1947 1947
@@ -1965,15 +1965,14 @@ static void process_checks(struct r1bio *r1_bio)
1965 for (i = 0; i < conf->raid_disks * 2; i++) { 1965 for (i = 0; i < conf->raid_disks * 2; i++) {
1966 int j; 1966 int j;
1967 int size; 1967 int size;
1968 int uptodate; 1968 int error;
1969 struct bio *b = r1_bio->bios[i]; 1969 struct bio *b = r1_bio->bios[i];
1970 if (b->bi_end_io != end_sync_read) 1970 if (b->bi_end_io != end_sync_read)
1971 continue; 1971 continue;
1972 /* fixup the bio for reuse, but preserve BIO_UPTODATE */ 1972 /* fixup the bio for reuse, but preserve errno */
1973 uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); 1973 error = b->bi_error;
1974 bio_reset(b); 1974 bio_reset(b);
1975 if (!uptodate) 1975 b->bi_error = error;
1976 clear_bit(BIO_UPTODATE, &b->bi_flags);
1977 b->bi_vcnt = vcnt; 1976 b->bi_vcnt = vcnt;
1978 b->bi_iter.bi_size = r1_bio->sectors << 9; 1977 b->bi_iter.bi_size = r1_bio->sectors << 9;
1979 b->bi_iter.bi_sector = r1_bio->sector + 1978 b->bi_iter.bi_sector = r1_bio->sector +
@@ -1996,7 +1995,7 @@ static void process_checks(struct r1bio *r1_bio)
1996 } 1995 }
1997 for (primary = 0; primary < conf->raid_disks * 2; primary++) 1996 for (primary = 0; primary < conf->raid_disks * 2; primary++)
1998 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1997 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1999 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1998 !r1_bio->bios[primary]->bi_error) {
2000 r1_bio->bios[primary]->bi_end_io = NULL; 1999 r1_bio->bios[primary]->bi_end_io = NULL;
2001 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2000 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2002 break; 2001 break;
@@ -2006,14 +2005,14 @@ static void process_checks(struct r1bio *r1_bio)
2006 int j; 2005 int j;
2007 struct bio *pbio = r1_bio->bios[primary]; 2006 struct bio *pbio = r1_bio->bios[primary];
2008 struct bio *sbio = r1_bio->bios[i]; 2007 struct bio *sbio = r1_bio->bios[i];
2009 int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); 2008 int error = sbio->bi_error;
2010 2009
2011 if (sbio->bi_end_io != end_sync_read) 2010 if (sbio->bi_end_io != end_sync_read)
2012 continue; 2011 continue;
2013 /* Now we can 'fixup' the BIO_UPTODATE flag */ 2012 /* Now we can 'fixup' the error value */
2014 set_bit(BIO_UPTODATE, &sbio->bi_flags); 2013 sbio->bi_error = 0;
2015 2014
2016 if (uptodate) { 2015 if (!error) {
2017 for (j = vcnt; j-- ; ) { 2016 for (j = vcnt; j-- ; ) {
2018 struct page *p, *s; 2017 struct page *p, *s;
2019 p = pbio->bi_io_vec[j].bv_page; 2018 p = pbio->bi_io_vec[j].bv_page;
@@ -2028,7 +2027,7 @@ static void process_checks(struct r1bio *r1_bio)
2028 if (j >= 0) 2027 if (j >= 0)
2029 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2028 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2030 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2029 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2031 && uptodate)) { 2030 && !error)) {
2032 /* No need to write to this device. */ 2031 /* No need to write to this device. */
2033 sbio->bi_end_io = NULL; 2032 sbio->bi_end_io = NULL;
2034 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 2033 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2269,11 +2268,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
2269 struct bio *bio = r1_bio->bios[m]; 2268 struct bio *bio = r1_bio->bios[m];
2270 if (bio->bi_end_io == NULL) 2269 if (bio->bi_end_io == NULL)
2271 continue; 2270 continue;
2272 if (test_bit(BIO_UPTODATE, &bio->bi_flags) && 2271 if (!bio->bi_error &&
2273 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2272 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2274 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 2273 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2275 } 2274 }
2276 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 2275 if (bio->bi_error &&
2277 test_bit(R1BIO_WriteError, &r1_bio->state)) { 2276 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2278 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 2277 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2279 md_error(conf->mddev, rdev); 2278 md_error(conf->mddev, rdev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..929e9a26d81b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -101,7 +101,7 @@ static int _enough(struct r10conf *conf, int previous, int ignore);
101static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 101static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
102 int *skipped); 102 int *skipped);
103static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 103static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
104static void end_reshape_write(struct bio *bio, int error); 104static void end_reshape_write(struct bio *bio);
105static void end_reshape(struct r10conf *conf); 105static void end_reshape(struct r10conf *conf);
106 106
107static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 107static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -307,9 +307,9 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
307 } else 307 } else
308 done = 1; 308 done = 1;
309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
310 clear_bit(BIO_UPTODATE, &bio->bi_flags); 310 bio->bi_error = -EIO;
311 if (done) { 311 if (done) {
312 bio_endio(bio, 0); 312 bio_endio(bio);
313 /* 313 /*
314 * Wake up any possible resync thread that waits for the device 314 * Wake up any possible resync thread that waits for the device
315 * to go idle. 315 * to go idle.
@@ -358,9 +358,9 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
358 return r10_bio->devs[slot].devnum; 358 return r10_bio->devs[slot].devnum;
359} 359}
360 360
361static void raid10_end_read_request(struct bio *bio, int error) 361static void raid10_end_read_request(struct bio *bio)
362{ 362{
363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 363 int uptodate = !bio->bi_error;
364 struct r10bio *r10_bio = bio->bi_private; 364 struct r10bio *r10_bio = bio->bi_private;
365 int slot, dev; 365 int slot, dev;
366 struct md_rdev *rdev; 366 struct md_rdev *rdev;
@@ -438,9 +438,8 @@ static void one_write_done(struct r10bio *r10_bio)
438 } 438 }
439} 439}
440 440
441static void raid10_end_write_request(struct bio *bio, int error) 441static void raid10_end_write_request(struct bio *bio)
442{ 442{
443 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
444 struct r10bio *r10_bio = bio->bi_private; 443 struct r10bio *r10_bio = bio->bi_private;
445 int dev; 444 int dev;
446 int dec_rdev = 1; 445 int dec_rdev = 1;
@@ -460,7 +459,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
460 /* 459 /*
461 * this branch is our 'one mirror IO has finished' event handler: 460 * this branch is our 'one mirror IO has finished' event handler:
462 */ 461 */
463 if (!uptodate) { 462 if (bio->bi_error) {
464 if (repl) 463 if (repl)
465 /* Never record new bad blocks to replacement, 464 /* Never record new bad blocks to replacement,
466 * just fail it. 465 * just fail it.
@@ -957,7 +956,7 @@ static void flush_pending_writes(struct r10conf *conf)
957 if (unlikely((bio->bi_rw & REQ_DISCARD) && 956 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
958 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 957 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
959 /* Just ignore it */ 958 /* Just ignore it */
960 bio_endio(bio, 0); 959 bio_endio(bio);
961 else 960 else
962 generic_make_request(bio); 961 generic_make_request(bio);
963 bio = next; 962 bio = next;
@@ -1133,7 +1132,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1133 if (unlikely((bio->bi_rw & REQ_DISCARD) && 1132 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1134 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1133 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1135 /* Just ignore it */ 1134 /* Just ignore it */
1136 bio_endio(bio, 0); 1135 bio_endio(bio);
1137 else 1136 else
1138 generic_make_request(bio); 1137 generic_make_request(bio);
1139 bio = next; 1138 bio = next;
@@ -1916,7 +1915,7 @@ abort:
1916 return err; 1915 return err;
1917} 1916}
1918 1917
1919static void end_sync_read(struct bio *bio, int error) 1918static void end_sync_read(struct bio *bio)
1920{ 1919{
1921 struct r10bio *r10_bio = bio->bi_private; 1920 struct r10bio *r10_bio = bio->bi_private;
1922 struct r10conf *conf = r10_bio->mddev->private; 1921 struct r10conf *conf = r10_bio->mddev->private;
@@ -1928,7 +1927,7 @@ static void end_sync_read(struct bio *bio, int error)
1928 } else 1927 } else
1929 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1928 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1930 1929
1931 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1930 if (!bio->bi_error)
1932 set_bit(R10BIO_Uptodate, &r10_bio->state); 1931 set_bit(R10BIO_Uptodate, &r10_bio->state);
1933 else 1932 else
1934 /* The write handler will notice the lack of 1933 /* The write handler will notice the lack of
@@ -1977,9 +1976,8 @@ static void end_sync_request(struct r10bio *r10_bio)
1977 } 1976 }
1978} 1977}
1979 1978
1980static void end_sync_write(struct bio *bio, int error) 1979static void end_sync_write(struct bio *bio)
1981{ 1980{
1982 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1983 struct r10bio *r10_bio = bio->bi_private; 1981 struct r10bio *r10_bio = bio->bi_private;
1984 struct mddev *mddev = r10_bio->mddev; 1982 struct mddev *mddev = r10_bio->mddev;
1985 struct r10conf *conf = mddev->private; 1983 struct r10conf *conf = mddev->private;
@@ -1996,7 +1994,7 @@ static void end_sync_write(struct bio *bio, int error)
1996 else 1994 else
1997 rdev = conf->mirrors[d].rdev; 1995 rdev = conf->mirrors[d].rdev;
1998 1996
1999 if (!uptodate) { 1997 if (bio->bi_error) {
2000 if (repl) 1998 if (repl)
2001 md_error(mddev, rdev); 1999 md_error(mddev, rdev);
2002 else { 2000 else {
@@ -2044,7 +2042,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2044 2042
2045 /* find the first device with a block */ 2043 /* find the first device with a block */
2046 for (i=0; i<conf->copies; i++) 2044 for (i=0; i<conf->copies; i++)
2047 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) 2045 if (!r10_bio->devs[i].bio->bi_error)
2048 break; 2046 break;
2049 2047
2050 if (i == conf->copies) 2048 if (i == conf->copies)
@@ -2064,7 +2062,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2064 continue; 2062 continue;
2065 if (i == first) 2063 if (i == first)
2066 continue; 2064 continue;
2067 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { 2065 if (!r10_bio->devs[i].bio->bi_error) {
2068 /* We know that the bi_io_vec layout is the same for 2066 /* We know that the bi_io_vec layout is the same for
2069 * both 'first' and 'i', so we just compare them. 2067 * both 'first' and 'i', so we just compare them.
2070 * All vec entries are PAGE_SIZE; 2068 * All vec entries are PAGE_SIZE;
@@ -2706,8 +2704,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2706 rdev = conf->mirrors[dev].rdev; 2704 rdev = conf->mirrors[dev].rdev;
2707 if (r10_bio->devs[m].bio == NULL) 2705 if (r10_bio->devs[m].bio == NULL)
2708 continue; 2706 continue;
2709 if (test_bit(BIO_UPTODATE, 2707 if (!r10_bio->devs[m].bio->bi_error) {
2710 &r10_bio->devs[m].bio->bi_flags)) {
2711 rdev_clear_badblocks( 2708 rdev_clear_badblocks(
2712 rdev, 2709 rdev,
2713 r10_bio->devs[m].addr, 2710 r10_bio->devs[m].addr,
@@ -2722,8 +2719,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2722 rdev = conf->mirrors[dev].replacement; 2719 rdev = conf->mirrors[dev].replacement;
2723 if (r10_bio->devs[m].repl_bio == NULL) 2720 if (r10_bio->devs[m].repl_bio == NULL)
2724 continue; 2721 continue;
2725 if (test_bit(BIO_UPTODATE, 2722
2726 &r10_bio->devs[m].repl_bio->bi_flags)) { 2723 if (!r10_bio->devs[m].repl_bio->bi_error) {
2727 rdev_clear_badblocks( 2724 rdev_clear_badblocks(
2728 rdev, 2725 rdev,
2729 r10_bio->devs[m].addr, 2726 r10_bio->devs[m].addr,
@@ -2748,8 +2745,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2748 r10_bio->devs[m].addr, 2745 r10_bio->devs[m].addr,
2749 r10_bio->sectors, 0); 2746 r10_bio->sectors, 0);
2750 rdev_dec_pending(rdev, conf->mddev); 2747 rdev_dec_pending(rdev, conf->mddev);
2751 } else if (bio != NULL && 2748 } else if (bio != NULL && bio->bi_error) {
2752 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2753 if (!narrow_write_error(r10_bio, m)) { 2749 if (!narrow_write_error(r10_bio, m)) {
2754 md_error(conf->mddev, rdev); 2750 md_error(conf->mddev, rdev);
2755 set_bit(R10BIO_Degraded, 2751 set_bit(R10BIO_Degraded,
@@ -3263,7 +3259,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3263 3259
3264 bio = r10_bio->devs[i].bio; 3260 bio = r10_bio->devs[i].bio;
3265 bio_reset(bio); 3261 bio_reset(bio);
3266 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3262 bio->bi_error = -EIO;
3267 if (conf->mirrors[d].rdev == NULL || 3263 if (conf->mirrors[d].rdev == NULL ||
3268 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 3264 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
3269 continue; 3265 continue;
@@ -3300,7 +3296,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3300 /* Need to set up for writing to the replacement */ 3296 /* Need to set up for writing to the replacement */
3301 bio = r10_bio->devs[i].repl_bio; 3297 bio = r10_bio->devs[i].repl_bio;
3302 bio_reset(bio); 3298 bio_reset(bio);
3303 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3299 bio->bi_error = -EIO;
3304 3300
3305 sector = r10_bio->devs[i].addr; 3301 sector = r10_bio->devs[i].addr;
3306 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3302 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -3377,7 +3373,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3377 3373
3378 if (bio->bi_end_io == end_sync_read) { 3374 if (bio->bi_end_io == end_sync_read) {
3379 md_sync_acct(bio->bi_bdev, nr_sectors); 3375 md_sync_acct(bio->bi_bdev, nr_sectors);
3380 set_bit(BIO_UPTODATE, &bio->bi_flags); 3376 bio->bi_error = 0;
3381 generic_make_request(bio); 3377 generic_make_request(bio);
3382 } 3378 }
3383 } 3379 }
@@ -4380,7 +4376,7 @@ read_more:
4380 read_bio->bi_end_io = end_sync_read; 4376 read_bio->bi_end_io = end_sync_read;
4381 read_bio->bi_rw = READ; 4377 read_bio->bi_rw = READ;
4382 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4378 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4383 __set_bit(BIO_UPTODATE, &read_bio->bi_flags); 4379 read_bio->bi_error = 0;
4384 read_bio->bi_vcnt = 0; 4380 read_bio->bi_vcnt = 0;
4385 read_bio->bi_iter.bi_size = 0; 4381 read_bio->bi_iter.bi_size = 0;
4386 r10_bio->master_bio = read_bio; 4382 r10_bio->master_bio = read_bio;
@@ -4601,9 +4597,8 @@ static int handle_reshape_read_error(struct mddev *mddev,
4601 return 0; 4597 return 0;
4602} 4598}
4603 4599
4604static void end_reshape_write(struct bio *bio, int error) 4600static void end_reshape_write(struct bio *bio)
4605{ 4601{
4606 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4607 struct r10bio *r10_bio = bio->bi_private; 4602 struct r10bio *r10_bio = bio->bi_private;
4608 struct mddev *mddev = r10_bio->mddev; 4603 struct mddev *mddev = r10_bio->mddev;
4609 struct r10conf *conf = mddev->private; 4604 struct r10conf *conf = mddev->private;
@@ -4620,7 +4615,7 @@ static void end_reshape_write(struct bio *bio, int error)
4620 rdev = conf->mirrors[d].rdev; 4615 rdev = conf->mirrors[d].rdev;
4621 } 4616 }
4622 4617
4623 if (!uptodate) { 4618 if (bio->bi_error) {
4624 /* FIXME should record badblock */ 4619 /* FIXME should record badblock */
4625 md_error(mddev, rdev); 4620 md_error(mddev, rdev);
4626 } 4621 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..84d6eec1033e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -233,7 +233,7 @@ static void return_io(struct bio *return_bi)
233 bi->bi_iter.bi_size = 0; 233 bi->bi_iter.bi_size = 0;
234 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 234 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
235 bi, 0); 235 bi, 0);
236 bio_endio(bi, 0); 236 bio_endio(bi);
237 bi = return_bi; 237 bi = return_bi;
238 } 238 }
239} 239}
@@ -887,9 +887,9 @@ static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
887} 887}
888 888
889static void 889static void
890raid5_end_read_request(struct bio *bi, int error); 890raid5_end_read_request(struct bio *bi);
891static void 891static void
892raid5_end_write_request(struct bio *bi, int error); 892raid5_end_write_request(struct bio *bi);
893 893
894static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 894static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
895{ 895{
@@ -2277,12 +2277,11 @@ static void shrink_stripes(struct r5conf *conf)
2277 conf->slab_cache = NULL; 2277 conf->slab_cache = NULL;
2278} 2278}
2279 2279
2280static void raid5_end_read_request(struct bio * bi, int error) 2280static void raid5_end_read_request(struct bio * bi)
2281{ 2281{
2282 struct stripe_head *sh = bi->bi_private; 2282 struct stripe_head *sh = bi->bi_private;
2283 struct r5conf *conf = sh->raid_conf; 2283 struct r5conf *conf = sh->raid_conf;
2284 int disks = sh->disks, i; 2284 int disks = sh->disks, i;
2285 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
2286 char b[BDEVNAME_SIZE]; 2285 char b[BDEVNAME_SIZE];
2287 struct md_rdev *rdev = NULL; 2286 struct md_rdev *rdev = NULL;
2288 sector_t s; 2287 sector_t s;
@@ -2291,9 +2290,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
2291 if (bi == &sh->dev[i].req) 2290 if (bi == &sh->dev[i].req)
2292 break; 2291 break;
2293 2292
2294 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 2293 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
2295 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2294 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2296 uptodate); 2295 bi->bi_error);
2297 if (i == disks) { 2296 if (i == disks) {
2298 BUG(); 2297 BUG();
2299 return; 2298 return;
@@ -2312,7 +2311,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
2312 s = sh->sector + rdev->new_data_offset; 2311 s = sh->sector + rdev->new_data_offset;
2313 else 2312 else
2314 s = sh->sector + rdev->data_offset; 2313 s = sh->sector + rdev->data_offset;
2315 if (uptodate) { 2314 if (!bi->bi_error) {
2316 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2315 set_bit(R5_UPTODATE, &sh->dev[i].flags);
2317 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2316 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2318 /* Note that this cannot happen on a 2317 /* Note that this cannot happen on a
@@ -2400,13 +2399,12 @@ static void raid5_end_read_request(struct bio * bi, int error)
2400 release_stripe(sh); 2399 release_stripe(sh);
2401} 2400}
2402 2401
2403static void raid5_end_write_request(struct bio *bi, int error) 2402static void raid5_end_write_request(struct bio *bi)
2404{ 2403{
2405 struct stripe_head *sh = bi->bi_private; 2404 struct stripe_head *sh = bi->bi_private;
2406 struct r5conf *conf = sh->raid_conf; 2405 struct r5conf *conf = sh->raid_conf;
2407 int disks = sh->disks, i; 2406 int disks = sh->disks, i;
2408 struct md_rdev *uninitialized_var(rdev); 2407 struct md_rdev *uninitialized_var(rdev);
2409 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
2410 sector_t first_bad; 2408 sector_t first_bad;
2411 int bad_sectors; 2409 int bad_sectors;
2412 int replacement = 0; 2410 int replacement = 0;
@@ -2429,23 +2427,23 @@ static void raid5_end_write_request(struct bio *bi, int error)
2429 break; 2427 break;
2430 } 2428 }
2431 } 2429 }
2432 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 2430 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
2433 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2431 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2434 uptodate); 2432 bi->bi_error);
2435 if (i == disks) { 2433 if (i == disks) {
2436 BUG(); 2434 BUG();
2437 return; 2435 return;
2438 } 2436 }
2439 2437
2440 if (replacement) { 2438 if (replacement) {
2441 if (!uptodate) 2439 if (bi->bi_error)
2442 md_error(conf->mddev, rdev); 2440 md_error(conf->mddev, rdev);
2443 else if (is_badblock(rdev, sh->sector, 2441 else if (is_badblock(rdev, sh->sector,
2444 STRIPE_SECTORS, 2442 STRIPE_SECTORS,
2445 &first_bad, &bad_sectors)) 2443 &first_bad, &bad_sectors))
2446 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2444 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2447 } else { 2445 } else {
2448 if (!uptodate) { 2446 if (bi->bi_error) {
2449 set_bit(STRIPE_DEGRADED, &sh->state); 2447 set_bit(STRIPE_DEGRADED, &sh->state);
2450 set_bit(WriteErrorSeen, &rdev->flags); 2448 set_bit(WriteErrorSeen, &rdev->flags);
2451 set_bit(R5_WriteError, &sh->dev[i].flags); 2449 set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2466,7 +2464,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
2466 } 2464 }
2467 rdev_dec_pending(rdev, conf->mddev); 2465 rdev_dec_pending(rdev, conf->mddev);
2468 2466
2469 if (sh->batch_head && !uptodate && !replacement) 2467 if (sh->batch_head && bi->bi_error && !replacement)
2470 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2468 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2471 2469
2472 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2470 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
@@ -3107,7 +3105,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3107 while (bi && bi->bi_iter.bi_sector < 3105 while (bi && bi->bi_iter.bi_sector <
3108 sh->dev[i].sector + STRIPE_SECTORS) { 3106 sh->dev[i].sector + STRIPE_SECTORS) {
3109 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3107 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3110 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3108
3109 bi->bi_error = -EIO;
3111 if (!raid5_dec_bi_active_stripes(bi)) { 3110 if (!raid5_dec_bi_active_stripes(bi)) {
3112 md_write_end(conf->mddev); 3111 md_write_end(conf->mddev);
3113 bi->bi_next = *return_bi; 3112 bi->bi_next = *return_bi;
@@ -3131,7 +3130,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3131 while (bi && bi->bi_iter.bi_sector < 3130 while (bi && bi->bi_iter.bi_sector <
3132 sh->dev[i].sector + STRIPE_SECTORS) { 3131 sh->dev[i].sector + STRIPE_SECTORS) {
3133 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3132 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3134 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3133
3134 bi->bi_error = -EIO;
3135 if (!raid5_dec_bi_active_stripes(bi)) { 3135 if (!raid5_dec_bi_active_stripes(bi)) {
3136 md_write_end(conf->mddev); 3136 md_write_end(conf->mddev);
3137 bi->bi_next = *return_bi; 3137 bi->bi_next = *return_bi;
@@ -3156,7 +3156,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3156 sh->dev[i].sector + STRIPE_SECTORS) { 3156 sh->dev[i].sector + STRIPE_SECTORS) {
3157 struct bio *nextbi = 3157 struct bio *nextbi =
3158 r5_next_bio(bi, sh->dev[i].sector); 3158 r5_next_bio(bi, sh->dev[i].sector);
3159 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3159
3160 bi->bi_error = -EIO;
3160 if (!raid5_dec_bi_active_stripes(bi)) { 3161 if (!raid5_dec_bi_active_stripes(bi)) {
3161 bi->bi_next = *return_bi; 3162 bi->bi_next = *return_bi;
3162 *return_bi = bi; 3163 *return_bi = bi;
@@ -4749,12 +4750,11 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
4749 * first). 4750 * first).
4750 * If the read failed.. 4751 * If the read failed..
4751 */ 4752 */
4752static void raid5_align_endio(struct bio *bi, int error) 4753static void raid5_align_endio(struct bio *bi)
4753{ 4754{
4754 struct bio* raid_bi = bi->bi_private; 4755 struct bio* raid_bi = bi->bi_private;
4755 struct mddev *mddev; 4756 struct mddev *mddev;
4756 struct r5conf *conf; 4757 struct r5conf *conf;
4757 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
4758 struct md_rdev *rdev; 4758 struct md_rdev *rdev;
4759 4759
4760 bio_put(bi); 4760 bio_put(bi);
@@ -4766,10 +4766,10 @@ static void raid5_align_endio(struct bio *bi, int error)
4766 4766
4767 rdev_dec_pending(rdev, conf->mddev); 4767 rdev_dec_pending(rdev, conf->mddev);
4768 4768
4769 if (!error && uptodate) { 4769 if (!bi->bi_error) {
4770 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), 4770 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
4771 raid_bi, 0); 4771 raid_bi, 0);
4772 bio_endio(raid_bi, 0); 4772 bio_endio(raid_bi);
4773 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4773 if (atomic_dec_and_test(&conf->active_aligned_reads))
4774 wake_up(&conf->wait_for_quiescent); 4774 wake_up(&conf->wait_for_quiescent);
4775 return; 4775 return;
@@ -5133,7 +5133,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5133 remaining = raid5_dec_bi_active_stripes(bi); 5133 remaining = raid5_dec_bi_active_stripes(bi);
5134 if (remaining == 0) { 5134 if (remaining == 0) {
5135 md_write_end(mddev); 5135 md_write_end(mddev);
5136 bio_endio(bi, 0); 5136 bio_endio(bi);
5137 } 5137 }
5138} 5138}
5139 5139
@@ -5297,7 +5297,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5297 release_stripe_plug(mddev, sh); 5297 release_stripe_plug(mddev, sh);
5298 } else { 5298 } else {
5299 /* cannot get stripe for read-ahead, just give-up */ 5299 /* cannot get stripe for read-ahead, just give-up */
5300 clear_bit(BIO_UPTODATE, &bi->bi_flags); 5300 bi->bi_error = -EIO;
5301 break; 5301 break;
5302 } 5302 }
5303 } 5303 }
@@ -5311,7 +5311,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5311 5311
5312 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 5312 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
5313 bi, 0); 5313 bi, 0);
5314 bio_endio(bi, 0); 5314 bio_endio(bi);
5315 } 5315 }
5316} 5316}
5317 5317
@@ -5707,7 +5707,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5707 if (remaining == 0) { 5707 if (remaining == 0) {
5708 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), 5708 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
5709 raid_bio, 0); 5709 raid_bio, 0);
5710 bio_endio(raid_bio, 0); 5710 bio_endio(raid_bio);
5711 } 5711 }
5712 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5712 if (atomic_dec_and_test(&conf->active_aligned_reads))
5713 wake_up(&conf->wait_for_quiescent); 5713 wake_up(&conf->wait_for_quiescent);
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 4f97b248c236..0df77cb07df6 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -180,7 +180,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
180 * another kernel subsystem, and we just pass it through. 180 * another kernel subsystem, and we just pass it through.
181 */ 181 */
182 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 182 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
183 err = -EIO; 183 bio->bi_error = -EIO;
184 goto out; 184 goto out;
185 } 185 }
186 186
@@ -199,6 +199,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
199 "io error in %s sector %lld, len %d,\n", 199 "io error in %s sector %lld, len %d,\n",
200 (rw == READ) ? "READ" : "WRITE", 200 (rw == READ) ? "READ" : "WRITE",
201 (unsigned long long) iter.bi_sector, len); 201 (unsigned long long) iter.bi_sector, len);
202 bio->bi_error = err;
202 break; 203 break;
203 } 204 }
204 } 205 }
@@ -206,7 +207,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
206 nd_iostat_end(bio, start); 207 nd_iostat_end(bio, start);
207 208
208 out: 209 out:
209 bio_endio(bio, err); 210 bio_endio(bio);
210} 211}
211 212
212static int nd_blk_rw_bytes(struct nd_namespace_common *ndns, 213static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 411c7b2bb37a..341202ed32b4 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1189,7 +1189,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
1189 * another kernel subsystem, and we just pass it through. 1189 * another kernel subsystem, and we just pass it through.
1190 */ 1190 */
1191 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1191 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1192 err = -EIO; 1192 bio->bi_error = -EIO;
1193 goto out; 1193 goto out;
1194 } 1194 }
1195 1195
@@ -1211,6 +1211,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
1211 "io error in %s sector %lld, len %d,\n", 1211 "io error in %s sector %lld, len %d,\n",
1212 (rw == READ) ? "READ" : "WRITE", 1212 (rw == READ) ? "READ" : "WRITE",
1213 (unsigned long long) iter.bi_sector, len); 1213 (unsigned long long) iter.bi_sector, len);
1214 bio->bi_error = err;
1214 break; 1215 break;
1215 } 1216 }
1216 } 1217 }
@@ -1218,7 +1219,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
1218 nd_iostat_end(bio, start); 1219 nd_iostat_end(bio, start);
1219 1220
1220out: 1221out:
1221 bio_endio(bio, err); 1222 bio_endio(bio);
1222} 1223}
1223 1224
1224static int btt_rw_page(struct block_device *bdev, sector_t sector, 1225static int btt_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ade9eb917a4d..4c079d5cb539 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -77,7 +77,7 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio)
77 if (bio_data_dir(bio)) 77 if (bio_data_dir(bio))
78 wmb_pmem(); 78 wmb_pmem();
79 79
80 bio_endio(bio, 0); 80 bio_endio(bio);
81} 81}
82 82
83static int pmem_rw_page(struct block_device *bdev, sector_t sector, 83static int pmem_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index da212813f2d5..8bcb822b0bac 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -871,7 +871,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
871 } 871 }
872 bytes_done += bvec.bv_len; 872 bytes_done += bvec.bv_len;
873 } 873 }
874 bio_endio(bio, 0); 874 bio_endio(bio);
875 return; 875 return;
876fail: 876fail:
877 bio_io_error(bio); 877 bio_io_error(bio);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 7d4e9397ac31..93856b9b6214 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -220,8 +220,7 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
220 index++; 220 index++;
221 } 221 }
222 } 222 }
223 set_bit(BIO_UPTODATE, &bio->bi_flags); 223 bio_endio(bio);
224 bio_endio(bio, 0);
225 return; 224 return;
226fail: 225fail:
227 bio_io_error(bio); 226 bio_io_error(bio);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 6d88d24e6cce..5a9982f5d5d6 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -306,20 +306,13 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
306 kfree(ibr); 306 kfree(ibr);
307} 307}
308 308
309static void iblock_bio_done(struct bio *bio, int err) 309static void iblock_bio_done(struct bio *bio)
310{ 310{
311 struct se_cmd *cmd = bio->bi_private; 311 struct se_cmd *cmd = bio->bi_private;
312 struct iblock_req *ibr = cmd->priv; 312 struct iblock_req *ibr = cmd->priv;
313 313
314 /* 314 if (bio->bi_error) {
315 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 315 pr_err("bio error: %p, err: %d\n", bio, bio->bi_error);
316 */
317 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
318 err = -EIO;
319
320 if (err != 0) {
321 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
322 " err: %d\n", bio, err);
323 /* 316 /*
324 * Bump the ib_bio_err_cnt and release bio. 317 * Bump the ib_bio_err_cnt and release bio.
325 */ 318 */
@@ -370,15 +363,15 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
370 blk_finish_plug(&plug); 363 blk_finish_plug(&plug);
371} 364}
372 365
373static void iblock_end_io_flush(struct bio *bio, int err) 366static void iblock_end_io_flush(struct bio *bio)
374{ 367{
375 struct se_cmd *cmd = bio->bi_private; 368 struct se_cmd *cmd = bio->bi_private;
376 369
377 if (err) 370 if (bio->bi_error)
378 pr_err("IBLOCK: cache flush failed: %d\n", err); 371 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
379 372
380 if (cmd) { 373 if (cmd) {
381 if (err) 374 if (bio->bi_error)
382 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 375 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
383 else 376 else
384 target_complete_cmd(cmd, SAM_STAT_GOOD); 377 target_complete_cmd(cmd, SAM_STAT_GOOD);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 08e9084ee615..de18790eb21c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -852,7 +852,7 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
852 return bl; 852 return bl;
853} 853}
854 854
855static void pscsi_bi_endio(struct bio *bio, int error) 855static void pscsi_bi_endio(struct bio *bio)
856{ 856{
857 bio_put(bio); 857 bio_put(bio);
858} 858}
@@ -973,7 +973,7 @@ fail:
973 while (*hbio) { 973 while (*hbio) {
974 bio = *hbio; 974 bio = *hbio;
975 *hbio = (*hbio)->bi_next; 975 *hbio = (*hbio)->bi_next;
976 bio_endio(bio, 0); /* XXX: should be error */ 976 bio_endio(bio);
977 } 977 }
978 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 978 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
979} 979}
@@ -1061,7 +1061,7 @@ fail_free_bio:
1061 while (hbio) { 1061 while (hbio) {
1062 struct bio *bio = hbio; 1062 struct bio *bio = hbio;
1063 hbio = hbio->bi_next; 1063 hbio = hbio->bi_next;
1064 bio_endio(bio, 0); /* XXX: should be error */ 1064 bio_endio(bio);
1065 } 1065 }
1066 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1066 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1067fail: 1067fail:
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index ce7dec88f4b8..541fbfaed276 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -343,7 +343,7 @@ static int btrfsic_process_written_superblock(
343 struct btrfsic_state *state, 343 struct btrfsic_state *state,
344 struct btrfsic_block *const block, 344 struct btrfsic_block *const block,
345 struct btrfs_super_block *const super_hdr); 345 struct btrfs_super_block *const super_hdr);
346static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status); 346static void btrfsic_bio_end_io(struct bio *bp);
347static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate); 347static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate);
348static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, 348static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
349 const struct btrfsic_block *block, 349 const struct btrfsic_block *block,
@@ -2207,7 +2207,7 @@ continue_loop:
2207 goto again; 2207 goto again;
2208} 2208}
2209 2209
2210static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) 2210static void btrfsic_bio_end_io(struct bio *bp)
2211{ 2211{
2212 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; 2212 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
2213 int iodone_w_error; 2213 int iodone_w_error;
@@ -2215,7 +2215,7 @@ static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
2215 /* mutex is not held! This is not save if IO is not yet completed 2215 /* mutex is not held! This is not save if IO is not yet completed
2216 * on umount */ 2216 * on umount */
2217 iodone_w_error = 0; 2217 iodone_w_error = 0;
2218 if (bio_error_status) 2218 if (bp->bi_error)
2219 iodone_w_error = 1; 2219 iodone_w_error = 1;
2220 2220
2221 BUG_ON(NULL == block); 2221 BUG_ON(NULL == block);
@@ -2230,7 +2230,7 @@ static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
2230 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2230 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2231 printk(KERN_INFO 2231 printk(KERN_INFO
2232 "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", 2232 "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
2233 bio_error_status, 2233 bp->bi_error,
2234 btrfsic_get_block_type(dev_state->state, block), 2234 btrfsic_get_block_type(dev_state->state, block),
2235 block->logical_bytenr, dev_state->name, 2235 block->logical_bytenr, dev_state->name,
2236 block->dev_bytenr, block->mirror_num); 2236 block->dev_bytenr, block->mirror_num);
@@ -2252,7 +2252,7 @@ static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
2252 block = next_block; 2252 block = next_block;
2253 } while (NULL != block); 2253 } while (NULL != block);
2254 2254
2255 bp->bi_end_io(bp, bio_error_status); 2255 bp->bi_end_io(bp);
2256} 2256}
2257 2257
2258static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate) 2258static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ce62324c78e7..302266ec2cdb 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -152,7 +152,7 @@ fail:
152 * The compressed pages are freed here, and it must be run 152 * The compressed pages are freed here, and it must be run
153 * in process context 153 * in process context
154 */ 154 */
155static void end_compressed_bio_read(struct bio *bio, int err) 155static void end_compressed_bio_read(struct bio *bio)
156{ 156{
157 struct compressed_bio *cb = bio->bi_private; 157 struct compressed_bio *cb = bio->bi_private;
158 struct inode *inode; 158 struct inode *inode;
@@ -160,7 +160,7 @@ static void end_compressed_bio_read(struct bio *bio, int err)
160 unsigned long index; 160 unsigned long index;
161 int ret; 161 int ret;
162 162
163 if (err) 163 if (bio->bi_error)
164 cb->errors = 1; 164 cb->errors = 1;
165 165
166 /* if there are more bios still pending for this compressed 166 /* if there are more bios still pending for this compressed
@@ -210,7 +210,7 @@ csum_failed:
210 bio_for_each_segment_all(bvec, cb->orig_bio, i) 210 bio_for_each_segment_all(bvec, cb->orig_bio, i)
211 SetPageChecked(bvec->bv_page); 211 SetPageChecked(bvec->bv_page);
212 212
213 bio_endio(cb->orig_bio, 0); 213 bio_endio(cb->orig_bio);
214 } 214 }
215 215
216 /* finally free the cb struct */ 216 /* finally free the cb struct */
@@ -266,7 +266,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
266 * This also calls the writeback end hooks for the file pages so that 266 * This also calls the writeback end hooks for the file pages so that
267 * metadata and checksums can be updated in the file. 267 * metadata and checksums can be updated in the file.
268 */ 268 */
269static void end_compressed_bio_write(struct bio *bio, int err) 269static void end_compressed_bio_write(struct bio *bio)
270{ 270{
271 struct extent_io_tree *tree; 271 struct extent_io_tree *tree;
272 struct compressed_bio *cb = bio->bi_private; 272 struct compressed_bio *cb = bio->bi_private;
@@ -274,7 +274,7 @@ static void end_compressed_bio_write(struct bio *bio, int err)
274 struct page *page; 274 struct page *page;
275 unsigned long index; 275 unsigned long index;
276 276
277 if (err) 277 if (bio->bi_error)
278 cb->errors = 1; 278 cb->errors = 1;
279 279
280 /* if there are more bios still pending for this compressed 280 /* if there are more bios still pending for this compressed
@@ -293,7 +293,7 @@ static void end_compressed_bio_write(struct bio *bio, int err)
293 cb->start, 293 cb->start,
294 cb->start + cb->len - 1, 294 cb->start + cb->len - 1,
295 NULL, 295 NULL,
296 err ? 0 : 1); 296 bio->bi_error ? 0 : 1);
297 cb->compressed_pages[0]->mapping = NULL; 297 cb->compressed_pages[0]->mapping = NULL;
298 298
299 end_compressed_writeback(inode, cb); 299 end_compressed_writeback(inode, cb);
@@ -697,8 +697,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
697 697
698 ret = btrfs_map_bio(root, READ, comp_bio, 698 ret = btrfs_map_bio(root, READ, comp_bio,
699 mirror_num, 0); 699 mirror_num, 0);
700 if (ret) 700 if (ret) {
701 bio_endio(comp_bio, ret); 701 bio->bi_error = ret;
702 bio_endio(comp_bio);
703 }
702 704
703 bio_put(comp_bio); 705 bio_put(comp_bio);
704 706
@@ -724,8 +726,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
724 } 726 }
725 727
726 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 728 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
727 if (ret) 729 if (ret) {
728 bio_endio(comp_bio, ret); 730 bio->bi_error = ret;
731 bio_endio(comp_bio);
732 }
729 733
730 bio_put(comp_bio); 734 bio_put(comp_bio);
731 return 0; 735 return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9aadb2ad525..a8c0de888a9d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -703,7 +703,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
703 return -EIO; /* we fixed nothing */ 703 return -EIO; /* we fixed nothing */
704} 704}
705 705
706static void end_workqueue_bio(struct bio *bio, int err) 706static void end_workqueue_bio(struct bio *bio)
707{ 707{
708 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; 708 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
709 struct btrfs_fs_info *fs_info; 709 struct btrfs_fs_info *fs_info;
@@ -711,7 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
711 btrfs_work_func_t func; 711 btrfs_work_func_t func;
712 712
713 fs_info = end_io_wq->info; 713 fs_info = end_io_wq->info;
714 end_io_wq->error = err; 714 end_io_wq->error = bio->bi_error;
715 715
716 if (bio->bi_rw & REQ_WRITE) { 716 if (bio->bi_rw & REQ_WRITE) {
717 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 717 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@@ -808,7 +808,8 @@ static void run_one_async_done(struct btrfs_work *work)
808 808
809 /* If an error occured we just want to clean up the bio and move on */ 809 /* If an error occured we just want to clean up the bio and move on */
810 if (async->error) { 810 if (async->error) {
811 bio_endio(async->bio, async->error); 811 async->bio->bi_error = async->error;
812 bio_endio(async->bio);
812 return; 813 return;
813 } 814 }
814 815
@@ -908,8 +909,10 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
908 * submission context. Just jump into btrfs_map_bio 909 * submission context. Just jump into btrfs_map_bio
909 */ 910 */
910 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); 911 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
911 if (ret) 912 if (ret) {
912 bio_endio(bio, ret); 913 bio->bi_error = ret;
914 bio_endio(bio);
915 }
913 return ret; 916 return ret;
914} 917}
915 918
@@ -960,10 +963,13 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
960 __btree_submit_bio_done); 963 __btree_submit_bio_done);
961 } 964 }
962 965
963 if (ret) { 966 if (ret)
967 goto out_w_error;
968 return 0;
969
964out_w_error: 970out_w_error:
965 bio_endio(bio, ret); 971 bio->bi_error = ret;
966 } 972 bio_endio(bio);
967 return ret; 973 return ret;
968} 974}
969 975
@@ -1735,16 +1741,15 @@ static void end_workqueue_fn(struct btrfs_work *work)
1735{ 1741{
1736 struct bio *bio; 1742 struct bio *bio;
1737 struct btrfs_end_io_wq *end_io_wq; 1743 struct btrfs_end_io_wq *end_io_wq;
1738 int error;
1739 1744
1740 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1745 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1741 bio = end_io_wq->bio; 1746 bio = end_io_wq->bio;
1742 1747
1743 error = end_io_wq->error; 1748 bio->bi_error = end_io_wq->error;
1744 bio->bi_private = end_io_wq->private; 1749 bio->bi_private = end_io_wq->private;
1745 bio->bi_end_io = end_io_wq->end_io; 1750 bio->bi_end_io = end_io_wq->end_io;
1746 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1751 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1747 bio_endio(bio, error); 1752 bio_endio(bio);
1748} 1753}
1749 1754
1750static int cleaner_kthread(void *arg) 1755static int cleaner_kthread(void *arg)
@@ -3323,10 +3328,8 @@ static int write_dev_supers(struct btrfs_device *device,
3323 * endio for the write_dev_flush, this will wake anyone waiting 3328 * endio for the write_dev_flush, this will wake anyone waiting
3324 * for the barrier when it is done 3329 * for the barrier when it is done
3325 */ 3330 */
3326static void btrfs_end_empty_barrier(struct bio *bio, int err) 3331static void btrfs_end_empty_barrier(struct bio *bio)
3327{ 3332{
3328 if (err)
3329 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3330 if (bio->bi_private) 3333 if (bio->bi_private)
3331 complete(bio->bi_private); 3334 complete(bio->bi_private);
3332 bio_put(bio); 3335 bio_put(bio);
@@ -3354,8 +3357,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3354 3357
3355 wait_for_completion(&device->flush_wait); 3358 wait_for_completion(&device->flush_wait);
3356 3359
3357 if (!bio_flagged(bio, BIO_UPTODATE)) { 3360 if (bio->bi_error) {
3358 ret = -EIO; 3361 ret = bio->bi_error;
3359 btrfs_dev_stat_inc_and_print(device, 3362 btrfs_dev_stat_inc_and_print(device,
3360 BTRFS_DEV_STAT_FLUSH_ERRS); 3363 BTRFS_DEV_STAT_FLUSH_ERRS);
3361 } 3364 }
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 02d05817cbdf..c22f175ed024 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2486,7 +2486,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2486 * Scheduling is not allowed, so the extent state tree is expected 2486 * Scheduling is not allowed, so the extent state tree is expected
2487 * to have one and only one object corresponding to this IO. 2487 * to have one and only one object corresponding to this IO.
2488 */ 2488 */
2489static void end_bio_extent_writepage(struct bio *bio, int err) 2489static void end_bio_extent_writepage(struct bio *bio)
2490{ 2490{
2491 struct bio_vec *bvec; 2491 struct bio_vec *bvec;
2492 u64 start; 2492 u64 start;
@@ -2516,7 +2516,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2516 start = page_offset(page); 2516 start = page_offset(page);
2517 end = start + bvec->bv_offset + bvec->bv_len - 1; 2517 end = start + bvec->bv_offset + bvec->bv_len - 1;
2518 2518
2519 if (end_extent_writepage(page, err, start, end)) 2519 if (end_extent_writepage(page, bio->bi_error, start, end))
2520 continue; 2520 continue;
2521 2521
2522 end_page_writeback(page); 2522 end_page_writeback(page);
@@ -2548,10 +2548,10 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2548 * Scheduling is not allowed, so the extent state tree is expected 2548 * Scheduling is not allowed, so the extent state tree is expected
2549 * to have one and only one object corresponding to this IO. 2549 * to have one and only one object corresponding to this IO.
2550 */ 2550 */
2551static void end_bio_extent_readpage(struct bio *bio, int err) 2551static void end_bio_extent_readpage(struct bio *bio)
2552{ 2552{
2553 struct bio_vec *bvec; 2553 struct bio_vec *bvec;
2554 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 2554 int uptodate = !bio->bi_error;
2555 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 2555 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2556 struct extent_io_tree *tree; 2556 struct extent_io_tree *tree;
2557 u64 offset = 0; 2557 u64 offset = 0;
@@ -2564,16 +2564,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2564 int ret; 2564 int ret;
2565 int i; 2565 int i;
2566 2566
2567 if (err)
2568 uptodate = 0;
2569
2570 bio_for_each_segment_all(bvec, bio, i) { 2567 bio_for_each_segment_all(bvec, bio, i) {
2571 struct page *page = bvec->bv_page; 2568 struct page *page = bvec->bv_page;
2572 struct inode *inode = page->mapping->host; 2569 struct inode *inode = page->mapping->host;
2573 2570
2574 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2571 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2575 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err, 2572 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
2576 io_bio->mirror_num); 2573 bio->bi_error, io_bio->mirror_num);
2577 tree = &BTRFS_I(inode)->io_tree; 2574 tree = &BTRFS_I(inode)->io_tree;
2578 2575
2579 /* We always issue full-page reads, but if some block 2576 /* We always issue full-page reads, but if some block
@@ -2614,8 +2611,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2614 2611
2615 if (tree->ops && tree->ops->readpage_io_failed_hook) { 2612 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2616 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2613 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2617 if (!ret && !err && 2614 if (!ret && !bio->bi_error)
2618 test_bit(BIO_UPTODATE, &bio->bi_flags))
2619 uptodate = 1; 2615 uptodate = 1;
2620 } else { 2616 } else {
2621 /* 2617 /*
@@ -2631,10 +2627,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2631 ret = bio_readpage_error(bio, offset, page, start, end, 2627 ret = bio_readpage_error(bio, offset, page, start, end,
2632 mirror); 2628 mirror);
2633 if (ret == 0) { 2629 if (ret == 0) {
2634 uptodate = 2630 uptodate = !bio->bi_error;
2635 test_bit(BIO_UPTODATE, &bio->bi_flags);
2636 if (err)
2637 uptodate = 0;
2638 offset += len; 2631 offset += len;
2639 continue; 2632 continue;
2640 } 2633 }
@@ -2684,7 +2677,7 @@ readpage_ok:
2684 endio_readpage_release_extent(tree, extent_start, extent_len, 2677 endio_readpage_release_extent(tree, extent_start, extent_len,
2685 uptodate); 2678 uptodate);
2686 if (io_bio->end_io) 2679 if (io_bio->end_io)
2687 io_bio->end_io(io_bio, err); 2680 io_bio->end_io(io_bio, bio->bi_error);
2688 bio_put(bio); 2681 bio_put(bio);
2689} 2682}
2690 2683
@@ -3696,7 +3689,7 @@ static void set_btree_ioerr(struct page *page)
3696 } 3689 }
3697} 3690}
3698 3691
3699static void end_bio_extent_buffer_writepage(struct bio *bio, int err) 3692static void end_bio_extent_buffer_writepage(struct bio *bio)
3700{ 3693{
3701 struct bio_vec *bvec; 3694 struct bio_vec *bvec;
3702 struct extent_buffer *eb; 3695 struct extent_buffer *eb;
@@ -3709,7 +3702,8 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3709 BUG_ON(!eb); 3702 BUG_ON(!eb);
3710 done = atomic_dec_and_test(&eb->io_pages); 3703 done = atomic_dec_and_test(&eb->io_pages);
3711 3704
3712 if (err || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { 3705 if (bio->bi_error ||
3706 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3713 ClearPageUptodate(page); 3707 ClearPageUptodate(page);
3714 set_btree_ioerr(page); 3708 set_btree_ioerr(page);
3715 } 3709 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b33c0cf02668..6b8becfe2057 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1845,8 +1845,10 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1845 int ret; 1845 int ret;
1846 1846
1847 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); 1847 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1848 if (ret) 1848 if (ret) {
1849 bio_endio(bio, ret); 1849 bio->bi_error = ret;
1850 bio_endio(bio);
1851 }
1850 return ret; 1852 return ret;
1851} 1853}
1852 1854
@@ -1906,8 +1908,10 @@ mapit:
1906 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 1908 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1907 1909
1908out: 1910out:
1909 if (ret < 0) 1911 if (ret < 0) {
1910 bio_endio(bio, ret); 1912 bio->bi_error = ret;
1913 bio_endio(bio);
1914 }
1911 return ret; 1915 return ret;
1912} 1916}
1913 1917
@@ -7689,13 +7693,13 @@ struct btrfs_retry_complete {
7689 int uptodate; 7693 int uptodate;
7690}; 7694};
7691 7695
7692static void btrfs_retry_endio_nocsum(struct bio *bio, int err) 7696static void btrfs_retry_endio_nocsum(struct bio *bio)
7693{ 7697{
7694 struct btrfs_retry_complete *done = bio->bi_private; 7698 struct btrfs_retry_complete *done = bio->bi_private;
7695 struct bio_vec *bvec; 7699 struct bio_vec *bvec;
7696 int i; 7700 int i;
7697 7701
7698 if (err) 7702 if (bio->bi_error)
7699 goto end; 7703 goto end;
7700 7704
7701 done->uptodate = 1; 7705 done->uptodate = 1;
@@ -7744,7 +7748,7 @@ try_again:
7744 return 0; 7748 return 0;
7745} 7749}
7746 7750
7747static void btrfs_retry_endio(struct bio *bio, int err) 7751static void btrfs_retry_endio(struct bio *bio)
7748{ 7752{
7749 struct btrfs_retry_complete *done = bio->bi_private; 7753 struct btrfs_retry_complete *done = bio->bi_private;
7750 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7754 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
@@ -7753,7 +7757,7 @@ static void btrfs_retry_endio(struct bio *bio, int err)
7753 int ret; 7757 int ret;
7754 int i; 7758 int i;
7755 7759
7756 if (err) 7760 if (bio->bi_error)
7757 goto end; 7761 goto end;
7758 7762
7759 uptodate = 1; 7763 uptodate = 1;
@@ -7836,12 +7840,13 @@ static int btrfs_subio_endio_read(struct inode *inode,
7836 } 7840 }
7837} 7841}
7838 7842
7839static void btrfs_endio_direct_read(struct bio *bio, int err) 7843static void btrfs_endio_direct_read(struct bio *bio)
7840{ 7844{
7841 struct btrfs_dio_private *dip = bio->bi_private; 7845 struct btrfs_dio_private *dip = bio->bi_private;
7842 struct inode *inode = dip->inode; 7846 struct inode *inode = dip->inode;
7843 struct bio *dio_bio; 7847 struct bio *dio_bio;
7844 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7848 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7849 int err = bio->bi_error;
7845 7850
7846 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 7851 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
7847 err = btrfs_subio_endio_read(inode, io_bio, err); 7852 err = btrfs_subio_endio_read(inode, io_bio, err);
@@ -7852,17 +7857,14 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
7852 7857
7853 kfree(dip); 7858 kfree(dip);
7854 7859
7855 /* If we had a csum failure make sure to clear the uptodate flag */ 7860 dio_end_io(dio_bio, bio->bi_error);
7856 if (err)
7857 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
7858 dio_end_io(dio_bio, err);
7859 7861
7860 if (io_bio->end_io) 7862 if (io_bio->end_io)
7861 io_bio->end_io(io_bio, err); 7863 io_bio->end_io(io_bio, err);
7862 bio_put(bio); 7864 bio_put(bio);
7863} 7865}
7864 7866
7865static void btrfs_endio_direct_write(struct bio *bio, int err) 7867static void btrfs_endio_direct_write(struct bio *bio)
7866{ 7868{
7867 struct btrfs_dio_private *dip = bio->bi_private; 7869 struct btrfs_dio_private *dip = bio->bi_private;
7868 struct inode *inode = dip->inode; 7870 struct inode *inode = dip->inode;
@@ -7876,7 +7878,8 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
7876again: 7878again:
7877 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 7879 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7878 &ordered_offset, 7880 &ordered_offset,
7879 ordered_bytes, !err); 7881 ordered_bytes,
7882 !bio->bi_error);
7880 if (!ret) 7883 if (!ret)
7881 goto out_test; 7884 goto out_test;
7882 7885
@@ -7899,10 +7902,7 @@ out_test:
7899 7902
7900 kfree(dip); 7903 kfree(dip);
7901 7904
7902 /* If we had an error make sure to clear the uptodate flag */ 7905 dio_end_io(dio_bio, bio->bi_error);
7903 if (err)
7904 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
7905 dio_end_io(dio_bio, err);
7906 bio_put(bio); 7906 bio_put(bio);
7907} 7907}
7908 7908
@@ -7917,9 +7917,10 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7917 return 0; 7917 return 0;
7918} 7918}
7919 7919
7920static void btrfs_end_dio_bio(struct bio *bio, int err) 7920static void btrfs_end_dio_bio(struct bio *bio)
7921{ 7921{
7922 struct btrfs_dio_private *dip = bio->bi_private; 7922 struct btrfs_dio_private *dip = bio->bi_private;
7923 int err = bio->bi_error;
7923 7924
7924 if (err) 7925 if (err)
7925 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 7926 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
@@ -7948,8 +7949,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
7948 if (dip->errors) { 7949 if (dip->errors) {
7949 bio_io_error(dip->orig_bio); 7950 bio_io_error(dip->orig_bio);
7950 } else { 7951 } else {
7951 set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags); 7952 dip->dio_bio->bi_error = 0;
7952 bio_endio(dip->orig_bio, 0); 7953 bio_endio(dip->orig_bio);
7953 } 7954 }
7954out: 7955out:
7955 bio_put(bio); 7956 bio_put(bio);
@@ -8220,7 +8221,8 @@ free_ordered:
8220 * callbacks - they require an allocated dip and a clone of dio_bio. 8221 * callbacks - they require an allocated dip and a clone of dio_bio.
8221 */ 8222 */
8222 if (io_bio && dip) { 8223 if (io_bio && dip) {
8223 bio_endio(io_bio, ret); 8224 io_bio->bi_error = -EIO;
8225 bio_endio(io_bio);
8224 /* 8226 /*
8225 * The end io callbacks free our dip, do the final put on io_bio 8227 * The end io callbacks free our dip, do the final put on io_bio
8226 * and all the cleanup and final put for dio_bio (through 8228 * and all the cleanup and final put for dio_bio (through
@@ -8247,7 +8249,7 @@ free_ordered:
8247 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 8249 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8248 file_offset + dio_bio->bi_iter.bi_size - 1); 8250 file_offset + dio_bio->bi_iter.bi_size - 1);
8249 } 8251 }
8250 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); 8252 dio_bio->bi_error = -EIO;
8251 /* 8253 /*
8252 * Releases and cleans up our dio_bio, no need to bio_put() 8254 * Releases and cleans up our dio_bio, no need to bio_put()
8253 * nor bio_endio()/bio_io_error() against dio_bio. 8255 * nor bio_endio()/bio_io_error() against dio_bio.
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index fa72068bd256..0a02e24900aa 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -851,7 +851,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
851 * this frees the rbio and runs through all the bios in the 851 * this frees the rbio and runs through all the bios in the
852 * bio_list and calls end_io on them 852 * bio_list and calls end_io on them
853 */ 853 */
854static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) 854static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
855{ 855{
856 struct bio *cur = bio_list_get(&rbio->bio_list); 856 struct bio *cur = bio_list_get(&rbio->bio_list);
857 struct bio *next; 857 struct bio *next;
@@ -864,9 +864,8 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
864 while (cur) { 864 while (cur) {
865 next = cur->bi_next; 865 next = cur->bi_next;
866 cur->bi_next = NULL; 866 cur->bi_next = NULL;
867 if (uptodate) 867 cur->bi_error = err;
868 set_bit(BIO_UPTODATE, &cur->bi_flags); 868 bio_endio(cur);
869 bio_endio(cur, err);
870 cur = next; 869 cur = next;
871 } 870 }
872} 871}
@@ -875,9 +874,10 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
875 * end io function used by finish_rmw. When we finally 874 * end io function used by finish_rmw. When we finally
876 * get here, we've written a full stripe 875 * get here, we've written a full stripe
877 */ 876 */
878static void raid_write_end_io(struct bio *bio, int err) 877static void raid_write_end_io(struct bio *bio)
879{ 878{
880 struct btrfs_raid_bio *rbio = bio->bi_private; 879 struct btrfs_raid_bio *rbio = bio->bi_private;
880 int err = bio->bi_error;
881 881
882 if (err) 882 if (err)
883 fail_bio_stripe(rbio, bio); 883 fail_bio_stripe(rbio, bio);
@@ -893,7 +893,7 @@ static void raid_write_end_io(struct bio *bio, int err)
893 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 893 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
894 err = -EIO; 894 err = -EIO;
895 895
896 rbio_orig_end_io(rbio, err, 0); 896 rbio_orig_end_io(rbio, err);
897 return; 897 return;
898} 898}
899 899
@@ -1071,7 +1071,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1071 * devices or if they are not contiguous 1071 * devices or if they are not contiguous
1072 */ 1072 */
1073 if (last_end == disk_start && stripe->dev->bdev && 1073 if (last_end == disk_start && stripe->dev->bdev &&
1074 test_bit(BIO_UPTODATE, &last->bi_flags) && 1074 !last->bi_error &&
1075 last->bi_bdev == stripe->dev->bdev) { 1075 last->bi_bdev == stripe->dev->bdev) {
1076 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 1076 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1077 if (ret == PAGE_CACHE_SIZE) 1077 if (ret == PAGE_CACHE_SIZE)
@@ -1087,7 +1087,6 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1087 bio->bi_iter.bi_size = 0; 1087 bio->bi_iter.bi_size = 0;
1088 bio->bi_bdev = stripe->dev->bdev; 1088 bio->bi_bdev = stripe->dev->bdev;
1089 bio->bi_iter.bi_sector = disk_start >> 9; 1089 bio->bi_iter.bi_sector = disk_start >> 9;
1090 set_bit(BIO_UPTODATE, &bio->bi_flags);
1091 1090
1092 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1091 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1093 bio_list_add(bio_list, bio); 1092 bio_list_add(bio_list, bio);
@@ -1312,13 +1311,12 @@ write_data:
1312 1311
1313 bio->bi_private = rbio; 1312 bio->bi_private = rbio;
1314 bio->bi_end_io = raid_write_end_io; 1313 bio->bi_end_io = raid_write_end_io;
1315 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1316 submit_bio(WRITE, bio); 1314 submit_bio(WRITE, bio);
1317 } 1315 }
1318 return; 1316 return;
1319 1317
1320cleanup: 1318cleanup:
1321 rbio_orig_end_io(rbio, -EIO, 0); 1319 rbio_orig_end_io(rbio, -EIO);
1322} 1320}
1323 1321
1324/* 1322/*
@@ -1441,11 +1439,11 @@ static void set_bio_pages_uptodate(struct bio *bio)
1441 * This will usually kick off finish_rmw once all the bios are read in, but it 1439 * This will usually kick off finish_rmw once all the bios are read in, but it
1442 * may trigger parity reconstruction if we had any errors along the way 1440 * may trigger parity reconstruction if we had any errors along the way
1443 */ 1441 */
1444static void raid_rmw_end_io(struct bio *bio, int err) 1442static void raid_rmw_end_io(struct bio *bio)
1445{ 1443{
1446 struct btrfs_raid_bio *rbio = bio->bi_private; 1444 struct btrfs_raid_bio *rbio = bio->bi_private;
1447 1445
1448 if (err) 1446 if (bio->bi_error)
1449 fail_bio_stripe(rbio, bio); 1447 fail_bio_stripe(rbio, bio);
1450 else 1448 else
1451 set_bio_pages_uptodate(bio); 1449 set_bio_pages_uptodate(bio);
@@ -1455,7 +1453,6 @@ static void raid_rmw_end_io(struct bio *bio, int err)
1455 if (!atomic_dec_and_test(&rbio->stripes_pending)) 1453 if (!atomic_dec_and_test(&rbio->stripes_pending))
1456 return; 1454 return;
1457 1455
1458 err = 0;
1459 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 1456 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1460 goto cleanup; 1457 goto cleanup;
1461 1458
@@ -1469,7 +1466,7 @@ static void raid_rmw_end_io(struct bio *bio, int err)
1469 1466
1470cleanup: 1467cleanup:
1471 1468
1472 rbio_orig_end_io(rbio, -EIO, 0); 1469 rbio_orig_end_io(rbio, -EIO);
1473} 1470}
1474 1471
1475static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 1472static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
@@ -1572,14 +1569,13 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1572 btrfs_bio_wq_end_io(rbio->fs_info, bio, 1569 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1573 BTRFS_WQ_ENDIO_RAID56); 1570 BTRFS_WQ_ENDIO_RAID56);
1574 1571
1575 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1576 submit_bio(READ, bio); 1572 submit_bio(READ, bio);
1577 } 1573 }
1578 /* the actual write will happen once the reads are done */ 1574 /* the actual write will happen once the reads are done */
1579 return 0; 1575 return 0;
1580 1576
1581cleanup: 1577cleanup:
1582 rbio_orig_end_io(rbio, -EIO, 0); 1578 rbio_orig_end_io(rbio, -EIO);
1583 return -EIO; 1579 return -EIO;
1584 1580
1585finish: 1581finish:
@@ -1964,7 +1960,7 @@ cleanup_io:
1964 else 1960 else
1965 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 1961 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1966 1962
1967 rbio_orig_end_io(rbio, err, err == 0); 1963 rbio_orig_end_io(rbio, err);
1968 } else if (err == 0) { 1964 } else if (err == 0) {
1969 rbio->faila = -1; 1965 rbio->faila = -1;
1970 rbio->failb = -1; 1966 rbio->failb = -1;
@@ -1976,7 +1972,7 @@ cleanup_io:
1976 else 1972 else
1977 BUG(); 1973 BUG();
1978 } else { 1974 } else {
1979 rbio_orig_end_io(rbio, err, 0); 1975 rbio_orig_end_io(rbio, err);
1980 } 1976 }
1981} 1977}
1982 1978
@@ -1984,7 +1980,7 @@ cleanup_io:
1984 * This is called only for stripes we've read from disk to 1980 * This is called only for stripes we've read from disk to
1985 * reconstruct the parity. 1981 * reconstruct the parity.
1986 */ 1982 */
1987static void raid_recover_end_io(struct bio *bio, int err) 1983static void raid_recover_end_io(struct bio *bio)
1988{ 1984{
1989 struct btrfs_raid_bio *rbio = bio->bi_private; 1985 struct btrfs_raid_bio *rbio = bio->bi_private;
1990 1986
@@ -1992,7 +1988,7 @@ static void raid_recover_end_io(struct bio *bio, int err)
1992 * we only read stripe pages off the disk, set them 1988 * we only read stripe pages off the disk, set them
1993 * up to date if there were no errors 1989 * up to date if there were no errors
1994 */ 1990 */
1995 if (err) 1991 if (bio->bi_error)
1996 fail_bio_stripe(rbio, bio); 1992 fail_bio_stripe(rbio, bio);
1997 else 1993 else
1998 set_bio_pages_uptodate(bio); 1994 set_bio_pages_uptodate(bio);
@@ -2002,7 +1998,7 @@ static void raid_recover_end_io(struct bio *bio, int err)
2002 return; 1998 return;
2003 1999
2004 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2000 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2005 rbio_orig_end_io(rbio, -EIO, 0); 2001 rbio_orig_end_io(rbio, -EIO);
2006 else 2002 else
2007 __raid_recover_end_io(rbio); 2003 __raid_recover_end_io(rbio);
2008} 2004}
@@ -2094,7 +2090,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2094 btrfs_bio_wq_end_io(rbio->fs_info, bio, 2090 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2095 BTRFS_WQ_ENDIO_RAID56); 2091 BTRFS_WQ_ENDIO_RAID56);
2096 2092
2097 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2098 submit_bio(READ, bio); 2093 submit_bio(READ, bio);
2099 } 2094 }
2100out: 2095out:
@@ -2102,7 +2097,7 @@ out:
2102 2097
2103cleanup: 2098cleanup:
2104 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) 2099 if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
2105 rbio_orig_end_io(rbio, -EIO, 0); 2100 rbio_orig_end_io(rbio, -EIO);
2106 return -EIO; 2101 return -EIO;
2107} 2102}
2108 2103
@@ -2277,11 +2272,12 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2277 * end io function used by finish_rmw. When we finally 2272 * end io function used by finish_rmw. When we finally
2278 * get here, we've written a full stripe 2273 * get here, we've written a full stripe
2279 */ 2274 */
2280static void raid_write_parity_end_io(struct bio *bio, int err) 2275static void raid_write_parity_end_io(struct bio *bio)
2281{ 2276{
2282 struct btrfs_raid_bio *rbio = bio->bi_private; 2277 struct btrfs_raid_bio *rbio = bio->bi_private;
2278 int err = bio->bi_error;
2283 2279
2284 if (err) 2280 if (bio->bi_error)
2285 fail_bio_stripe(rbio, bio); 2281 fail_bio_stripe(rbio, bio);
2286 2282
2287 bio_put(bio); 2283 bio_put(bio);
@@ -2294,7 +2290,7 @@ static void raid_write_parity_end_io(struct bio *bio, int err)
2294 if (atomic_read(&rbio->error)) 2290 if (atomic_read(&rbio->error))
2295 err = -EIO; 2291 err = -EIO;
2296 2292
2297 rbio_orig_end_io(rbio, err, 0); 2293 rbio_orig_end_io(rbio, err);
2298} 2294}
2299 2295
2300static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 2296static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
@@ -2437,7 +2433,7 @@ submit_write:
2437 nr_data = bio_list_size(&bio_list); 2433 nr_data = bio_list_size(&bio_list);
2438 if (!nr_data) { 2434 if (!nr_data) {
2439 /* Every parity is right */ 2435 /* Every parity is right */
2440 rbio_orig_end_io(rbio, 0, 0); 2436 rbio_orig_end_io(rbio, 0);
2441 return; 2437 return;
2442 } 2438 }
2443 2439
@@ -2450,13 +2446,12 @@ submit_write:
2450 2446
2451 bio->bi_private = rbio; 2447 bio->bi_private = rbio;
2452 bio->bi_end_io = raid_write_parity_end_io; 2448 bio->bi_end_io = raid_write_parity_end_io;
2453 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2454 submit_bio(WRITE, bio); 2449 submit_bio(WRITE, bio);
2455 } 2450 }
2456 return; 2451 return;
2457 2452
2458cleanup: 2453cleanup:
2459 rbio_orig_end_io(rbio, -EIO, 0); 2454 rbio_orig_end_io(rbio, -EIO);
2460} 2455}
2461 2456
2462static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 2457static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
@@ -2524,7 +2519,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2524 return; 2519 return;
2525 2520
2526cleanup: 2521cleanup:
2527 rbio_orig_end_io(rbio, -EIO, 0); 2522 rbio_orig_end_io(rbio, -EIO);
2528} 2523}
2529 2524
2530/* 2525/*
@@ -2535,11 +2530,11 @@ cleanup:
2535 * This will usually kick off finish_rmw once all the bios are read in, but it 2530 * This will usually kick off finish_rmw once all the bios are read in, but it
2536 * may trigger parity reconstruction if we had any errors along the way 2531 * may trigger parity reconstruction if we had any errors along the way
2537 */ 2532 */
2538static void raid56_parity_scrub_end_io(struct bio *bio, int err) 2533static void raid56_parity_scrub_end_io(struct bio *bio)
2539{ 2534{
2540 struct btrfs_raid_bio *rbio = bio->bi_private; 2535 struct btrfs_raid_bio *rbio = bio->bi_private;
2541 2536
2542 if (err) 2537 if (bio->bi_error)
2543 fail_bio_stripe(rbio, bio); 2538 fail_bio_stripe(rbio, bio);
2544 else 2539 else
2545 set_bio_pages_uptodate(bio); 2540 set_bio_pages_uptodate(bio);
@@ -2632,14 +2627,13 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2632 btrfs_bio_wq_end_io(rbio->fs_info, bio, 2627 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2633 BTRFS_WQ_ENDIO_RAID56); 2628 BTRFS_WQ_ENDIO_RAID56);
2634 2629
2635 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2636 submit_bio(READ, bio); 2630 submit_bio(READ, bio);
2637 } 2631 }
2638 /* the actual write will happen once the reads are done */ 2632 /* the actual write will happen once the reads are done */
2639 return; 2633 return;
2640 2634
2641cleanup: 2635cleanup:
2642 rbio_orig_end_io(rbio, -EIO, 0); 2636 rbio_orig_end_io(rbio, -EIO);
2643 return; 2637 return;
2644 2638
2645finish: 2639finish:
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 94db0fa5225a..ebb8260186fe 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -278,7 +278,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
278 u64 physical, struct btrfs_device *dev, u64 flags, 278 u64 physical, struct btrfs_device *dev, u64 flags,
279 u64 gen, int mirror_num, u8 *csum, int force, 279 u64 gen, int mirror_num, u8 *csum, int force,
280 u64 physical_for_dev_replace); 280 u64 physical_for_dev_replace);
281static void scrub_bio_end_io(struct bio *bio, int err); 281static void scrub_bio_end_io(struct bio *bio);
282static void scrub_bio_end_io_worker(struct btrfs_work *work); 282static void scrub_bio_end_io_worker(struct btrfs_work *work);
283static void scrub_block_complete(struct scrub_block *sblock); 283static void scrub_block_complete(struct scrub_block *sblock);
284static void scrub_remap_extent(struct btrfs_fs_info *fs_info, 284static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
@@ -295,7 +295,7 @@ static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
295static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, 295static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
296 struct scrub_page *spage); 296 struct scrub_page *spage);
297static void scrub_wr_submit(struct scrub_ctx *sctx); 297static void scrub_wr_submit(struct scrub_ctx *sctx);
298static void scrub_wr_bio_end_io(struct bio *bio, int err); 298static void scrub_wr_bio_end_io(struct bio *bio);
299static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); 299static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
300static int write_page_nocow(struct scrub_ctx *sctx, 300static int write_page_nocow(struct scrub_ctx *sctx,
301 u64 physical_for_dev_replace, struct page *page); 301 u64 physical_for_dev_replace, struct page *page);
@@ -1429,11 +1429,11 @@ struct scrub_bio_ret {
1429 int error; 1429 int error;
1430}; 1430};
1431 1431
1432static void scrub_bio_wait_endio(struct bio *bio, int error) 1432static void scrub_bio_wait_endio(struct bio *bio)
1433{ 1433{
1434 struct scrub_bio_ret *ret = bio->bi_private; 1434 struct scrub_bio_ret *ret = bio->bi_private;
1435 1435
1436 ret->error = error; 1436 ret->error = bio->bi_error;
1437 complete(&ret->event); 1437 complete(&ret->event);
1438} 1438}
1439 1439
@@ -1790,12 +1790,12 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
1790 btrfsic_submit_bio(WRITE, sbio->bio); 1790 btrfsic_submit_bio(WRITE, sbio->bio);
1791} 1791}
1792 1792
1793static void scrub_wr_bio_end_io(struct bio *bio, int err) 1793static void scrub_wr_bio_end_io(struct bio *bio)
1794{ 1794{
1795 struct scrub_bio *sbio = bio->bi_private; 1795 struct scrub_bio *sbio = bio->bi_private;
1796 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 1796 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1797 1797
1798 sbio->err = err; 1798 sbio->err = bio->bi_error;
1799 sbio->bio = bio; 1799 sbio->bio = bio;
1800 1800
1801 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, 1801 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
@@ -2098,7 +2098,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
2098 */ 2098 */
2099 printk_ratelimited(KERN_WARNING 2099 printk_ratelimited(KERN_WARNING
2100 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n"); 2100 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2101 bio_endio(sbio->bio, -EIO); 2101 bio_io_error(sbio->bio);
2102 } else { 2102 } else {
2103 btrfsic_submit_bio(READ, sbio->bio); 2103 btrfsic_submit_bio(READ, sbio->bio);
2104 } 2104 }
@@ -2260,12 +2260,12 @@ leave_nomem:
2260 return 0; 2260 return 0;
2261} 2261}
2262 2262
2263static void scrub_bio_end_io(struct bio *bio, int err) 2263static void scrub_bio_end_io(struct bio *bio)
2264{ 2264{
2265 struct scrub_bio *sbio = bio->bi_private; 2265 struct scrub_bio *sbio = bio->bi_private;
2266 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; 2266 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2267 2267
2268 sbio->err = err; 2268 sbio->err = bio->bi_error;
2269 sbio->bio = bio; 2269 sbio->bio = bio;
2270 2270
2271 btrfs_queue_work(fs_info->scrub_workers, &sbio->work); 2271 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
@@ -2672,11 +2672,11 @@ static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2672 scrub_pending_bio_dec(sctx); 2672 scrub_pending_bio_dec(sctx);
2673} 2673}
2674 2674
2675static void scrub_parity_bio_endio(struct bio *bio, int error) 2675static void scrub_parity_bio_endio(struct bio *bio)
2676{ 2676{
2677 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; 2677 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2678 2678
2679 if (error) 2679 if (bio->bi_error)
2680 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, 2680 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2681 sparity->nsectors); 2681 sparity->nsectors);
2682 2682
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index fbe7c104531c..8f2ca18c71f4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5741,23 +5741,23 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5741 return 0; 5741 return 0;
5742} 5742}
5743 5743
5744static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err) 5744static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5745{ 5745{
5746 bio->bi_private = bbio->private; 5746 bio->bi_private = bbio->private;
5747 bio->bi_end_io = bbio->end_io; 5747 bio->bi_end_io = bbio->end_io;
5748 bio_endio(bio, err); 5748 bio_endio(bio);
5749 5749
5750 btrfs_put_bbio(bbio); 5750 btrfs_put_bbio(bbio);
5751} 5751}
5752 5752
5753static void btrfs_end_bio(struct bio *bio, int err) 5753static void btrfs_end_bio(struct bio *bio)
5754{ 5754{
5755 struct btrfs_bio *bbio = bio->bi_private; 5755 struct btrfs_bio *bbio = bio->bi_private;
5756 int is_orig_bio = 0; 5756 int is_orig_bio = 0;
5757 5757
5758 if (err) { 5758 if (bio->bi_error) {
5759 atomic_inc(&bbio->error); 5759 atomic_inc(&bbio->error);
5760 if (err == -EIO || err == -EREMOTEIO) { 5760 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5761 unsigned int stripe_index = 5761 unsigned int stripe_index =
5762 btrfs_io_bio(bio)->stripe_index; 5762 btrfs_io_bio(bio)->stripe_index;
5763 struct btrfs_device *dev; 5763 struct btrfs_device *dev;
@@ -5795,17 +5795,16 @@ static void btrfs_end_bio(struct bio *bio, int err)
5795 * beyond the tolerance of the btrfs bio 5795 * beyond the tolerance of the btrfs bio
5796 */ 5796 */
5797 if (atomic_read(&bbio->error) > bbio->max_errors) { 5797 if (atomic_read(&bbio->error) > bbio->max_errors) {
5798 err = -EIO; 5798 bio->bi_error = -EIO;
5799 } else { 5799 } else {
5800 /* 5800 /*
5801 * this bio is actually up to date, we didn't 5801 * this bio is actually up to date, we didn't
5802 * go over the max number of errors 5802 * go over the max number of errors
5803 */ 5803 */
5804 set_bit(BIO_UPTODATE, &bio->bi_flags); 5804 bio->bi_error = 0;
5805 err = 0;
5806 } 5805 }
5807 5806
5808 btrfs_end_bbio(bbio, bio, err); 5807 btrfs_end_bbio(bbio, bio);
5809 } else if (!is_orig_bio) { 5808 } else if (!is_orig_bio) {
5810 bio_put(bio); 5809 bio_put(bio);
5811 } 5810 }
@@ -5826,7 +5825,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5826 struct btrfs_pending_bios *pending_bios; 5825 struct btrfs_pending_bios *pending_bios;
5827 5826
5828 if (device->missing || !device->bdev) { 5827 if (device->missing || !device->bdev) {
5829 bio_endio(bio, -EIO); 5828 bio_io_error(bio);
5830 return; 5829 return;
5831 } 5830 }
5832 5831
@@ -5973,8 +5972,8 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5973 5972
5974 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5973 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5975 bio->bi_iter.bi_sector = logical >> 9; 5974 bio->bi_iter.bi_sector = logical >> 9;
5976 5975 bio->bi_error = -EIO;
5977 btrfs_end_bbio(bbio, bio, -EIO); 5976 btrfs_end_bbio(bbio, bio);
5978 } 5977 }
5979} 5978}
5980 5979
diff --git a/fs/buffer.c b/fs/buffer.c
index 1cf7a53a0277..7a49bb84ecb5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2957,14 +2957,14 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2957} 2957}
2958EXPORT_SYMBOL(generic_block_bmap); 2958EXPORT_SYMBOL(generic_block_bmap);
2959 2959
2960static void end_bio_bh_io_sync(struct bio *bio, int err) 2960static void end_bio_bh_io_sync(struct bio *bio)
2961{ 2961{
2962 struct buffer_head *bh = bio->bi_private; 2962 struct buffer_head *bh = bio->bi_private;
2963 2963
2964 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) 2964 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2965 set_bit(BH_Quiet, &bh->b_state); 2965 set_bit(BH_Quiet, &bh->b_state);
2966 2966
2967 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 2967 bh->b_end_io(bh, !bio->bi_error);
2968 bio_put(bio); 2968 bio_put(bio);
2969} 2969}
2970 2970
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 745d2342651a..e1639c8c14d5 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -285,7 +285,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio);
285/* 285/*
286 * Asynchronous IO callback. 286 * Asynchronous IO callback.
287 */ 287 */
288static void dio_bio_end_aio(struct bio *bio, int error) 288static void dio_bio_end_aio(struct bio *bio)
289{ 289{
290 struct dio *dio = bio->bi_private; 290 struct dio *dio = bio->bi_private;
291 unsigned long remaining; 291 unsigned long remaining;
@@ -318,7 +318,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
318 * During I/O bi_private points at the dio. After I/O, bi_private is used to 318 * During I/O bi_private points at the dio. After I/O, bi_private is used to
319 * implement a singly-linked list of completed BIOs, at dio->bio_list. 319 * implement a singly-linked list of completed BIOs, at dio->bio_list.
320 */ 320 */
321static void dio_bio_end_io(struct bio *bio, int error) 321static void dio_bio_end_io(struct bio *bio)
322{ 322{
323 struct dio *dio = bio->bi_private; 323 struct dio *dio = bio->bi_private;
324 unsigned long flags; 324 unsigned long flags;
@@ -345,9 +345,9 @@ void dio_end_io(struct bio *bio, int error)
345 struct dio *dio = bio->bi_private; 345 struct dio *dio = bio->bi_private;
346 346
347 if (dio->is_async) 347 if (dio->is_async)
348 dio_bio_end_aio(bio, error); 348 dio_bio_end_aio(bio);
349 else 349 else
350 dio_bio_end_io(bio, error); 350 dio_bio_end_io(bio);
351} 351}
352EXPORT_SYMBOL_GPL(dio_end_io); 352EXPORT_SYMBOL_GPL(dio_end_io);
353 353
@@ -457,11 +457,10 @@ static struct bio *dio_await_one(struct dio *dio)
457 */ 457 */
458static int dio_bio_complete(struct dio *dio, struct bio *bio) 458static int dio_bio_complete(struct dio *dio, struct bio *bio)
459{ 459{
460 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
461 struct bio_vec *bvec; 460 struct bio_vec *bvec;
462 unsigned i; 461 unsigned i;
463 462
464 if (!uptodate) 463 if (bio->bi_error)
465 dio->io_error = -EIO; 464 dio->io_error = -EIO;
466 465
467 if (dio->is_async && dio->rw == READ) { 466 if (dio->is_async && dio->rw == READ) {
@@ -476,7 +475,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
476 } 475 }
477 bio_put(bio); 476 bio_put(bio);
478 } 477 }
479 return uptodate ? 0 : -EIO; 478 return bio->bi_error;
480} 479}
481 480
482/* 481/*
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 5602450f03f6..aa95566f14be 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -61,7 +61,6 @@ static void buffer_io_error(struct buffer_head *bh)
61static void ext4_finish_bio(struct bio *bio) 61static void ext4_finish_bio(struct bio *bio)
62{ 62{
63 int i; 63 int i;
64 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
65 struct bio_vec *bvec; 64 struct bio_vec *bvec;
66 65
67 bio_for_each_segment_all(bvec, bio, i) { 66 bio_for_each_segment_all(bvec, bio, i) {
@@ -88,7 +87,7 @@ static void ext4_finish_bio(struct bio *bio)
88 } 87 }
89#endif 88#endif
90 89
91 if (error) { 90 if (bio->bi_error) {
92 SetPageError(page); 91 SetPageError(page);
93 set_bit(AS_EIO, &page->mapping->flags); 92 set_bit(AS_EIO, &page->mapping->flags);
94 } 93 }
@@ -107,7 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
107 continue; 106 continue;
108 } 107 }
109 clear_buffer_async_write(bh); 108 clear_buffer_async_write(bh);
110 if (error) 109 if (bio->bi_error)
111 buffer_io_error(bh); 110 buffer_io_error(bh);
112 } while ((bh = bh->b_this_page) != head); 111 } while ((bh = bh->b_this_page) != head);
113 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); 112 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
@@ -310,27 +309,25 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
310} 309}
311 310
312/* BIO completion function for page writeback */ 311/* BIO completion function for page writeback */
313static void ext4_end_bio(struct bio *bio, int error) 312static void ext4_end_bio(struct bio *bio)
314{ 313{
315 ext4_io_end_t *io_end = bio->bi_private; 314 ext4_io_end_t *io_end = bio->bi_private;
316 sector_t bi_sector = bio->bi_iter.bi_sector; 315 sector_t bi_sector = bio->bi_iter.bi_sector;
317 316
318 BUG_ON(!io_end); 317 BUG_ON(!io_end);
319 bio->bi_end_io = NULL; 318 bio->bi_end_io = NULL;
320 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
321 error = 0;
322 319
323 if (error) { 320 if (bio->bi_error) {
324 struct inode *inode = io_end->inode; 321 struct inode *inode = io_end->inode;
325 322
326 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " 323 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
327 "(offset %llu size %ld starting block %llu)", 324 "(offset %llu size %ld starting block %llu)",
328 error, inode->i_ino, 325 bio->bi_error, inode->i_ino,
329 (unsigned long long) io_end->offset, 326 (unsigned long long) io_end->offset,
330 (long) io_end->size, 327 (long) io_end->size,
331 (unsigned long long) 328 (unsigned long long)
332 bi_sector >> (inode->i_blkbits - 9)); 329 bi_sector >> (inode->i_blkbits - 9));
333 mapping_set_error(inode->i_mapping, error); 330 mapping_set_error(inode->i_mapping, bio->bi_error);
334 } 331 }
335 332
336 if (io_end->flag & EXT4_IO_END_UNWRITTEN) { 333 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index ec3ef93a52db..5de5b871c178 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -98,7 +98,7 @@ static inline bool ext4_bio_encrypted(struct bio *bio)
98 * status of that page is hard. See end_buffer_async_read() for the details. 98 * status of that page is hard. See end_buffer_async_read() for the details.
99 * There is no point in duplicating all that complexity. 99 * There is no point in duplicating all that complexity.
100 */ 100 */
101static void mpage_end_io(struct bio *bio, int err) 101static void mpage_end_io(struct bio *bio)
102{ 102{
103 struct bio_vec *bv; 103 struct bio_vec *bv;
104 int i; 104 int i;
@@ -106,7 +106,7 @@ static void mpage_end_io(struct bio *bio, int err)
106 if (ext4_bio_encrypted(bio)) { 106 if (ext4_bio_encrypted(bio)) {
107 struct ext4_crypto_ctx *ctx = bio->bi_private; 107 struct ext4_crypto_ctx *ctx = bio->bi_private;
108 108
109 if (err) { 109 if (bio->bi_error) {
110 ext4_release_crypto_ctx(ctx); 110 ext4_release_crypto_ctx(ctx);
111 } else { 111 } else {
112 INIT_WORK(&ctx->r.work, completion_pages); 112 INIT_WORK(&ctx->r.work, completion_pages);
@@ -118,7 +118,7 @@ static void mpage_end_io(struct bio *bio, int err)
118 bio_for_each_segment_all(bv, bio, i) { 118 bio_for_each_segment_all(bv, bio, i) {
119 struct page *page = bv->bv_page; 119 struct page *page = bv->bv_page;
120 120
121 if (!err) { 121 if (!bio->bi_error) {
122 SetPageUptodate(page); 122 SetPageUptodate(page);
123 } else { 123 } else {
124 ClearPageUptodate(page); 124 ClearPageUptodate(page);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..8f0baa7ffb50 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -29,13 +29,13 @@
29static struct kmem_cache *extent_tree_slab; 29static struct kmem_cache *extent_tree_slab;
30static struct kmem_cache *extent_node_slab; 30static struct kmem_cache *extent_node_slab;
31 31
32static void f2fs_read_end_io(struct bio *bio, int err) 32static void f2fs_read_end_io(struct bio *bio)
33{ 33{
34 struct bio_vec *bvec; 34 struct bio_vec *bvec;
35 int i; 35 int i;
36 36
37 if (f2fs_bio_encrypted(bio)) { 37 if (f2fs_bio_encrypted(bio)) {
38 if (err) { 38 if (bio->bi_error) {
39 f2fs_release_crypto_ctx(bio->bi_private); 39 f2fs_release_crypto_ctx(bio->bi_private);
40 } else { 40 } else {
41 f2fs_end_io_crypto_work(bio->bi_private, bio); 41 f2fs_end_io_crypto_work(bio->bi_private, bio);
@@ -46,7 +46,7 @@ static void f2fs_read_end_io(struct bio *bio, int err)
46 bio_for_each_segment_all(bvec, bio, i) { 46 bio_for_each_segment_all(bvec, bio, i) {
47 struct page *page = bvec->bv_page; 47 struct page *page = bvec->bv_page;
48 48
49 if (!err) { 49 if (!bio->bi_error) {
50 SetPageUptodate(page); 50 SetPageUptodate(page);
51 } else { 51 } else {
52 ClearPageUptodate(page); 52 ClearPageUptodate(page);
@@ -57,7 +57,7 @@ static void f2fs_read_end_io(struct bio *bio, int err)
57 bio_put(bio); 57 bio_put(bio);
58} 58}
59 59
60static void f2fs_write_end_io(struct bio *bio, int err) 60static void f2fs_write_end_io(struct bio *bio)
61{ 61{
62 struct f2fs_sb_info *sbi = bio->bi_private; 62 struct f2fs_sb_info *sbi = bio->bi_private;
63 struct bio_vec *bvec; 63 struct bio_vec *bvec;
@@ -68,7 +68,7 @@ static void f2fs_write_end_io(struct bio *bio, int err)
68 68
69 f2fs_restore_and_release_control_page(&page); 69 f2fs_restore_and_release_control_page(&page);
70 70
71 if (unlikely(err)) { 71 if (unlikely(bio->bi_error)) {
72 set_page_dirty(page); 72 set_page_dirty(page);
73 set_bit(AS_EIO, &page->mapping->flags); 73 set_bit(AS_EIO, &page->mapping->flags);
74 f2fs_stop_checkpoint(sbi); 74 f2fs_stop_checkpoint(sbi);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 2c1ae861dc94..c0a1b967deba 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -202,22 +202,22 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
202 * 202 *
203 */ 203 */
204 204
205static void gfs2_end_log_write(struct bio *bio, int error) 205static void gfs2_end_log_write(struct bio *bio)
206{ 206{
207 struct gfs2_sbd *sdp = bio->bi_private; 207 struct gfs2_sbd *sdp = bio->bi_private;
208 struct bio_vec *bvec; 208 struct bio_vec *bvec;
209 struct page *page; 209 struct page *page;
210 int i; 210 int i;
211 211
212 if (error) { 212 if (bio->bi_error) {
213 sdp->sd_log_error = error; 213 sdp->sd_log_error = bio->bi_error;
214 fs_err(sdp, "Error %d writing to log\n", error); 214 fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
215 } 215 }
216 216
217 bio_for_each_segment_all(bvec, bio, i) { 217 bio_for_each_segment_all(bvec, bio, i) {
218 page = bvec->bv_page; 218 page = bvec->bv_page;
219 if (page_has_buffers(page)) 219 if (page_has_buffers(page))
220 gfs2_end_log_write_bh(sdp, bvec, error); 220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
221 else 221 else
222 mempool_free(page, gfs2_page_pool); 222 mempool_free(page, gfs2_page_pool);
223 } 223 }
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1e3a93f2f71d..02586e7eb964 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -171,14 +171,14 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
171 return -EINVAL; 171 return -EINVAL;
172} 172}
173 173
174static void end_bio_io_page(struct bio *bio, int error) 174static void end_bio_io_page(struct bio *bio)
175{ 175{
176 struct page *page = bio->bi_private; 176 struct page *page = bio->bi_private;
177 177
178 if (!error) 178 if (!bio->bi_error)
179 SetPageUptodate(page); 179 SetPageUptodate(page);
180 else 180 else
181 pr_warn("error %d reading superblock\n", error); 181 pr_warn("error %d reading superblock\n", bio->bi_error);
182 unlock_page(page); 182 unlock_page(page);
183} 183}
184 184
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index bc462dcd7a40..d301acfdb80d 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2011,7 +2011,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
2011 /*check if journaling to disk has been disabled*/ 2011 /*check if journaling to disk has been disabled*/
2012 if (log->no_integrity) { 2012 if (log->no_integrity) {
2013 bio->bi_iter.bi_size = 0; 2013 bio->bi_iter.bi_size = 0;
2014 lbmIODone(bio, 0); 2014 lbmIODone(bio);
2015 } else { 2015 } else {
2016 submit_bio(READ_SYNC, bio); 2016 submit_bio(READ_SYNC, bio);
2017 } 2017 }
@@ -2158,7 +2158,7 @@ static void lbmStartIO(struct lbuf * bp)
2158 /* check if journaling to disk has been disabled */ 2158 /* check if journaling to disk has been disabled */
2159 if (log->no_integrity) { 2159 if (log->no_integrity) {
2160 bio->bi_iter.bi_size = 0; 2160 bio->bi_iter.bi_size = 0;
2161 lbmIODone(bio, 0); 2161 lbmIODone(bio);
2162 } else { 2162 } else {
2163 submit_bio(WRITE_SYNC, bio); 2163 submit_bio(WRITE_SYNC, bio);
2164 INCREMENT(lmStat.submitted); 2164 INCREMENT(lmStat.submitted);
@@ -2196,7 +2196,7 @@ static int lbmIOWait(struct lbuf * bp, int flag)
2196 * 2196 *
2197 * executed at INTIODONE level 2197 * executed at INTIODONE level
2198 */ 2198 */
2199static void lbmIODone(struct bio *bio, int error) 2199static void lbmIODone(struct bio *bio)
2200{ 2200{
2201 struct lbuf *bp = bio->bi_private; 2201 struct lbuf *bp = bio->bi_private;
2202 struct lbuf *nextbp, *tail; 2202 struct lbuf *nextbp, *tail;
@@ -2212,7 +2212,7 @@ static void lbmIODone(struct bio *bio, int error)
2212 2212
2213 bp->l_flag |= lbmDONE; 2213 bp->l_flag |= lbmDONE;
2214 2214
2215 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2215 if (bio->bi_error) {
2216 bp->l_flag |= lbmERROR; 2216 bp->l_flag |= lbmERROR;
2217 2217
2218 jfs_err("lbmIODone: I/O error in JFS log"); 2218 jfs_err("lbmIODone: I/O error in JFS log");
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 16a0922beb59..a3eb316b1ac3 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -276,11 +276,11 @@ static void last_read_complete(struct page *page)
276 unlock_page(page); 276 unlock_page(page);
277} 277}
278 278
279static void metapage_read_end_io(struct bio *bio, int err) 279static void metapage_read_end_io(struct bio *bio)
280{ 280{
281 struct page *page = bio->bi_private; 281 struct page *page = bio->bi_private;
282 282
283 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 283 if (bio->bi_error) {
284 printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 284 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
285 SetPageError(page); 285 SetPageError(page);
286 } 286 }
@@ -331,13 +331,13 @@ static void last_write_complete(struct page *page)
331 end_page_writeback(page); 331 end_page_writeback(page);
332} 332}
333 333
334static void metapage_write_end_io(struct bio *bio, int err) 334static void metapage_write_end_io(struct bio *bio)
335{ 335{
336 struct page *page = bio->bi_private; 336 struct page *page = bio->bi_private;
337 337
338 BUG_ON(!PagePrivate(page)); 338 BUG_ON(!PagePrivate(page));
339 339
340 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { 340 if (bio->bi_error) {
341 printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 341 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
342 SetPageError(page); 342 SetPageError(page);
343 } 343 }
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 76279e11982d..cea0cc9878b7 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -53,16 +53,14 @@ static int bdev_readpage(void *_sb, struct page *page)
53 53
54static DECLARE_WAIT_QUEUE_HEAD(wq); 54static DECLARE_WAIT_QUEUE_HEAD(wq);
55 55
56static void writeseg_end_io(struct bio *bio, int err) 56static void writeseg_end_io(struct bio *bio)
57{ 57{
58 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
59 struct bio_vec *bvec; 58 struct bio_vec *bvec;
60 int i; 59 int i;
61 struct super_block *sb = bio->bi_private; 60 struct super_block *sb = bio->bi_private;
62 struct logfs_super *super = logfs_super(sb); 61 struct logfs_super *super = logfs_super(sb);
63 62
64 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 63 BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
65 BUG_ON(err);
66 64
67 bio_for_each_segment_all(bvec, bio, i) { 65 bio_for_each_segment_all(bvec, bio, i) {
68 end_page_writeback(bvec->bv_page); 66 end_page_writeback(bvec->bv_page);
@@ -153,14 +151,12 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
153} 151}
154 152
155 153
156static void erase_end_io(struct bio *bio, int err) 154static void erase_end_io(struct bio *bio)
157{ 155{
158 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
159 struct super_block *sb = bio->bi_private; 156 struct super_block *sb = bio->bi_private;
160 struct logfs_super *super = logfs_super(sb); 157 struct logfs_super *super = logfs_super(sb);
161 158
162 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 159 BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
163 BUG_ON(err);
164 BUG_ON(bio->bi_vcnt == 0); 160 BUG_ON(bio->bi_vcnt == 0);
165 bio_put(bio); 161 bio_put(bio);
166 if (atomic_dec_and_test(&super->s_pending_writes)) 162 if (atomic_dec_and_test(&super->s_pending_writes))
diff --git a/fs/mpage.c b/fs/mpage.c
index ca0244b69de8..abac9361b3f1 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -42,14 +42,14 @@
42 * status of that page is hard. See end_buffer_async_read() for the details. 42 * status of that page is hard. See end_buffer_async_read() for the details.
43 * There is no point in duplicating all that complexity. 43 * There is no point in duplicating all that complexity.
44 */ 44 */
45static void mpage_end_io(struct bio *bio, int err) 45static void mpage_end_io(struct bio *bio)
46{ 46{
47 struct bio_vec *bv; 47 struct bio_vec *bv;
48 int i; 48 int i;
49 49
50 bio_for_each_segment_all(bv, bio, i) { 50 bio_for_each_segment_all(bv, bio, i) {
51 struct page *page = bv->bv_page; 51 struct page *page = bv->bv_page;
52 page_endio(page, bio_data_dir(bio), err); 52 page_endio(page, bio_data_dir(bio), bio->bi_error);
53 } 53 }
54 54
55 bio_put(bio); 55 bio_put(bio);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index d2554fe140a3..9cd4eb3a1e22 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -116,7 +116,7 @@ bl_submit_bio(int rw, struct bio *bio)
116 116
117static struct bio * 117static struct bio *
118bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector, 118bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
119 void (*end_io)(struct bio *, int err), struct parallel_io *par) 119 bio_end_io_t end_io, struct parallel_io *par)
120{ 120{
121 struct bio *bio; 121 struct bio *bio;
122 122
@@ -139,8 +139,7 @@ bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
139static struct bio * 139static struct bio *
140do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, 140do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
141 struct page *page, struct pnfs_block_dev_map *map, 141 struct page *page, struct pnfs_block_dev_map *map,
142 struct pnfs_block_extent *be, 142 struct pnfs_block_extent *be, bio_end_io_t end_io,
143 void (*end_io)(struct bio *, int err),
144 struct parallel_io *par, unsigned int offset, int *len) 143 struct parallel_io *par, unsigned int offset, int *len)
145{ 144{
146 struct pnfs_block_dev *dev = 145 struct pnfs_block_dev *dev =
@@ -183,11 +182,11 @@ retry:
183 return bio; 182 return bio;
184} 183}
185 184
186static void bl_end_io_read(struct bio *bio, int err) 185static void bl_end_io_read(struct bio *bio)
187{ 186{
188 struct parallel_io *par = bio->bi_private; 187 struct parallel_io *par = bio->bi_private;
189 188
190 if (err) { 189 if (bio->bi_error) {
191 struct nfs_pgio_header *header = par->data; 190 struct nfs_pgio_header *header = par->data;
192 191
193 if (!header->pnfs_error) 192 if (!header->pnfs_error)
@@ -316,13 +315,12 @@ out:
316 return PNFS_ATTEMPTED; 315 return PNFS_ATTEMPTED;
317} 316}
318 317
319static void bl_end_io_write(struct bio *bio, int err) 318static void bl_end_io_write(struct bio *bio)
320{ 319{
321 struct parallel_io *par = bio->bi_private; 320 struct parallel_io *par = bio->bi_private;
322 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
323 struct nfs_pgio_header *header = par->data; 321 struct nfs_pgio_header *header = par->data;
324 322
325 if (!uptodate) { 323 if (bio->bi_error) {
326 if (!header->pnfs_error) 324 if (!header->pnfs_error)
327 header->pnfs_error = -EIO; 325 header->pnfs_error = -EIO;
328 pnfs_set_lo_fail(header->lseg); 326 pnfs_set_lo_fail(header->lseg);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 42468e5ab3e7..550b10efb14e 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -338,12 +338,11 @@ void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
338/* 338/*
339 * BIO operations 339 * BIO operations
340 */ 340 */
341static void nilfs_end_bio_write(struct bio *bio, int err) 341static void nilfs_end_bio_write(struct bio *bio)
342{ 342{
343 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
344 struct nilfs_segment_buffer *segbuf = bio->bi_private; 343 struct nilfs_segment_buffer *segbuf = bio->bi_private;
345 344
346 if (!uptodate) 345 if (bio->bi_error)
347 atomic_inc(&segbuf->sb_err); 346 atomic_inc(&segbuf->sb_err);
348 347
349 bio_put(bio); 348 bio_put(bio);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 16eff45727ee..140de3c93d2e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -372,14 +372,13 @@ static void o2hb_wait_on_io(struct o2hb_region *reg,
372 wait_for_completion(&wc->wc_io_complete); 372 wait_for_completion(&wc->wc_io_complete);
373} 373}
374 374
375static void o2hb_bio_end_io(struct bio *bio, 375static void o2hb_bio_end_io(struct bio *bio)
376 int error)
377{ 376{
378 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; 377 struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
379 378
380 if (error) { 379 if (bio->bi_error) {
381 mlog(ML_ERROR, "IO Error %d\n", error); 380 mlog(ML_ERROR, "IO Error %d\n", bio->bi_error);
382 wc->wc_error = error; 381 wc->wc_error = bio->bi_error;
383 } 382 }
384 383
385 o2hb_bio_wait_dec(wc, 1); 384 o2hb_bio_wait_dec(wc, 1);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3859f5e27a4d..3714844a81d8 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -351,12 +351,11 @@ xfs_imap_valid(
351 */ 351 */
352STATIC void 352STATIC void
353xfs_end_bio( 353xfs_end_bio(
354 struct bio *bio, 354 struct bio *bio)
355 int error)
356{ 355{
357 xfs_ioend_t *ioend = bio->bi_private; 356 xfs_ioend_t *ioend = bio->bi_private;
358 357
359 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; 358 ioend->io_error = bio->bi_error;
360 359
361 /* Toss bio and pass work off to an xfsdatad thread */ 360 /* Toss bio and pass work off to an xfsdatad thread */
362 bio->bi_private = NULL; 361 bio->bi_private = NULL;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a4b7d92e946c..01bd6781974e 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1096,8 +1096,7 @@ xfs_bwrite(
1096 1096
1097STATIC void 1097STATIC void
1098xfs_buf_bio_end_io( 1098xfs_buf_bio_end_io(
1099 struct bio *bio, 1099 struct bio *bio)
1100 int error)
1101{ 1100{
1102 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1101 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1103 1102
@@ -1105,10 +1104,10 @@ xfs_buf_bio_end_io(
1105 * don't overwrite existing errors - otherwise we can lose errors on 1104 * don't overwrite existing errors - otherwise we can lose errors on
1106 * buffers that require multiple bios to complete. 1105 * buffers that require multiple bios to complete.
1107 */ 1106 */
1108 if (error) { 1107 if (bio->bi_error) {
1109 spin_lock(&bp->b_lock); 1108 spin_lock(&bp->b_lock);
1110 if (!bp->b_io_error) 1109 if (!bp->b_io_error)
1111 bp->b_io_error = error; 1110 bp->b_io_error = bio->bi_error;
1112 spin_unlock(&bp->b_lock); 1111 spin_unlock(&bp->b_lock);
1113 } 1112 }
1114 1113
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5e963a6d7c14..6b918177002d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -195,8 +195,6 @@ static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1)); 195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196} 196}
197 197
198#define bio_io_error(bio) bio_endio((bio), -EIO)
199
200/* 198/*
201 * drivers should _never_ use the all version - the bio may have been split 199 * drivers should _never_ use the all version - the bio may have been split
202 * before it got to the driver and the driver won't own all of it 200 * before it got to the driver and the driver won't own all of it
@@ -426,7 +424,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
426 424
427} 425}
428 426
429extern void bio_endio(struct bio *, int); 427extern void bio_endio(struct bio *);
428
429static inline void bio_io_error(struct bio *bio)
430{
431 bio->bi_error = -EIO;
432 bio_endio(bio);
433}
434
430struct request_queue; 435struct request_queue;
431extern int bio_phys_segments(struct request_queue *, struct bio *); 436extern int bio_phys_segments(struct request_queue *, struct bio *);
432 437
@@ -717,7 +722,7 @@ extern void bio_integrity_free(struct bio *);
717extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 722extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
718extern bool bio_integrity_enabled(struct bio *bio); 723extern bool bio_integrity_enabled(struct bio *bio);
719extern int bio_integrity_prep(struct bio *); 724extern int bio_integrity_prep(struct bio *);
720extern void bio_integrity_endio(struct bio *, int); 725extern void bio_integrity_endio(struct bio *);
721extern void bio_integrity_advance(struct bio *, unsigned int); 726extern void bio_integrity_advance(struct bio *, unsigned int);
722extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 727extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
723extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 728extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7303b3405520..6164fb8a817b 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,7 +14,7 @@ struct page;
14struct block_device; 14struct block_device;
15struct io_context; 15struct io_context;
16struct cgroup_subsys_state; 16struct cgroup_subsys_state;
17typedef void (bio_end_io_t) (struct bio *, int); 17typedef void (bio_end_io_t) (struct bio *);
18typedef void (bio_destructor_t) (struct bio *); 18typedef void (bio_destructor_t) (struct bio *);
19 19
20/* 20/*
@@ -53,6 +53,7 @@ struct bio {
53 53
54 struct bvec_iter bi_iter; 54 struct bvec_iter bi_iter;
55 55
56 int bi_error;
56 /* Number of segments in this BIO after 57 /* Number of segments in this BIO after
57 * physical address coalescing is performed. 58 * physical address coalescing is performed.
58 */ 59 */
@@ -111,7 +112,6 @@ struct bio {
111/* 112/*
112 * bio flags 113 * bio flags
113 */ 114 */
114#define BIO_UPTODATE 0 /* ok after I/O completion */
115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
116#define BIO_CLONED 2 /* doesn't own data */ 116#define BIO_CLONED 2 /* doesn't own data */
117#define BIO_BOUNCED 3 /* bio is a bounce bio */ 117#define BIO_BOUNCED 3 /* bio is a bounce bio */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 38874729dc5f..31496d201fdc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -373,9 +373,9 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
373/* linux/mm/page_io.c */ 373/* linux/mm/page_io.c */
374extern int swap_readpage(struct page *); 374extern int swap_readpage(struct page *);
375extern int swap_writepage(struct page *page, struct writeback_control *wbc); 375extern int swap_writepage(struct page *page, struct writeback_control *wbc);
376extern void end_swap_bio_write(struct bio *bio, int err); 376extern void end_swap_bio_write(struct bio *bio);
377extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 377extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
378 void (*end_write_func)(struct bio *, int)); 378 bio_end_io_t end_write_func);
379extern int swap_set_page_dirty(struct page *page); 379extern int swap_set_page_dirty(struct page *page);
380 380
381int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 381int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 2f30ca91e4fa..b2066fb5b10f 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -227,27 +227,23 @@ static void hib_init_batch(struct hib_bio_batch *hb)
227 hb->error = 0; 227 hb->error = 0;
228} 228}
229 229
230static void hib_end_io(struct bio *bio, int error) 230static void hib_end_io(struct bio *bio)
231{ 231{
232 struct hib_bio_batch *hb = bio->bi_private; 232 struct hib_bio_batch *hb = bio->bi_private;
233 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
234 struct page *page = bio->bi_io_vec[0].bv_page; 233 struct page *page = bio->bi_io_vec[0].bv_page;
235 234
236 if (!uptodate || error) { 235 if (bio->bi_error) {
237 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 236 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
238 imajor(bio->bi_bdev->bd_inode), 237 imajor(bio->bi_bdev->bd_inode),
239 iminor(bio->bi_bdev->bd_inode), 238 iminor(bio->bi_bdev->bd_inode),
240 (unsigned long long)bio->bi_iter.bi_sector); 239 (unsigned long long)bio->bi_iter.bi_sector);
241
242 if (!error)
243 error = -EIO;
244 } 240 }
245 241
246 if (bio_data_dir(bio) == WRITE) 242 if (bio_data_dir(bio) == WRITE)
247 put_page(page); 243 put_page(page);
248 244
249 if (error && !hb->error) 245 if (bio->bi_error && !hb->error)
250 hb->error = error; 246 hb->error = bio->bi_error;
251 if (atomic_dec_and_test(&hb->count)) 247 if (atomic_dec_and_test(&hb->count))
252 wake_up(&hb->wait); 248 wake_up(&hb->wait);
253 249
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b3e6b39b6cf9..90e72a0c3047 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -778,9 +778,6 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
778 if (likely(!bt)) 778 if (likely(!bt))
779 return; 779 return;
780 780
781 if (!error && !bio_flagged(bio, BIO_UPTODATE))
782 error = EIO;
783
784 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 781 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
785 bio->bi_rw, what, error, 0, NULL); 782 bio->bi_rw, what, error, 0, NULL);
786} 783}
@@ -887,8 +884,7 @@ static void blk_add_trace_split(void *ignore,
887 884
888 __blk_add_trace(bt, bio->bi_iter.bi_sector, 885 __blk_add_trace(bt, bio->bi_iter.bi_sector,
889 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, 886 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
890 !bio_flagged(bio, BIO_UPTODATE), 887 bio->bi_error, sizeof(rpdu), &rpdu);
891 sizeof(rpdu), &rpdu);
892 } 888 }
893} 889}
894 890
@@ -920,8 +916,8 @@ static void blk_add_trace_bio_remap(void *ignore,
920 r.sector_from = cpu_to_be64(from); 916 r.sector_from = cpu_to_be64(from);
921 917
922 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 918 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
923 bio->bi_rw, BLK_TA_REMAP, 919 bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
924 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); 920 sizeof(r), &r);
925} 921}
926 922
927/** 923/**
diff --git a/mm/page_io.c b/mm/page_io.c
index 520baa4b04d7..338ce68942a0 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -43,12 +43,11 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
43 return bio; 43 return bio;
44} 44}
45 45
46void end_swap_bio_write(struct bio *bio, int err) 46void end_swap_bio_write(struct bio *bio)
47{ 47{
48 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
49 struct page *page = bio->bi_io_vec[0].bv_page; 48 struct page *page = bio->bi_io_vec[0].bv_page;
50 49
51 if (!uptodate) { 50 if (bio->bi_error) {
52 SetPageError(page); 51 SetPageError(page);
53 /* 52 /*
54 * We failed to write the page out to swap-space. 53 * We failed to write the page out to swap-space.
@@ -69,12 +68,11 @@ void end_swap_bio_write(struct bio *bio, int err)
69 bio_put(bio); 68 bio_put(bio);
70} 69}
71 70
72static void end_swap_bio_read(struct bio *bio, int err) 71static void end_swap_bio_read(struct bio *bio)
73{ 72{
74 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
75 struct page *page = bio->bi_io_vec[0].bv_page; 73 struct page *page = bio->bi_io_vec[0].bv_page;
76 74
77 if (!uptodate) { 75 if (bio->bi_error) {
78 SetPageError(page); 76 SetPageError(page);
79 ClearPageUptodate(page); 77 ClearPageUptodate(page);
80 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 78 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
@@ -254,7 +252,7 @@ static sector_t swap_page_sector(struct page *page)
254} 252}
255 253
256int __swap_writepage(struct page *page, struct writeback_control *wbc, 254int __swap_writepage(struct page *page, struct writeback_control *wbc,
257 void (*end_write_func)(struct bio *, int)) 255 bio_end_io_t end_write_func)
258{ 256{
259 struct bio *bio; 257 struct bio *bio;
260 int ret, rw = WRITE; 258 int ret, rw = WRITE;