aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-07-20 09:29:37 -0400
committerJens Axboe <axboe@fb.com>2015-07-29 10:55:15 -0400
commit4246a0b63bd8f56a1469b12eafeb875b1041a451 (patch)
tree3281bb158d658ef7f208ad380c0ecee600a5ab5e /block
parent0034af036554c39eefd14d835a8ec3496ac46712 (diff)
block: add a bi_error field to struct bio
Currently we have two different ways to signal an I/O error on a BIO: (1) by clearing the BIO_UPTODATE flag (2) by returning a Linux errno value to the bi_end_io callback The first one has the drawback of only communicating a single possible error (-EIO), and the second one has the drawback of not beeing persistent when bios are queued up, and are not passed along from child to parent bio in the ever more popular chaining scenario. Having both mechanisms available has the additional drawback of utterly confusing driver authors and introducing bugs where various I/O submitters only deal with one of them, and the others have to add boilerplate code to deal with both kinds of error returns. So add a new bi_error field to store an errno value directly in struct bio and remove the existing mechanisms to clean all this up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c11
-rw-r--r--block/bio.c43
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-lib.c30
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/bounce.c27
7 files changed, 62 insertions, 72 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 719b7152aed1..4aecca79374a 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -355,13 +355,12 @@ static void bio_integrity_verify_fn(struct work_struct *work)
355 container_of(work, struct bio_integrity_payload, bip_work); 355 container_of(work, struct bio_integrity_payload, bip_work);
356 struct bio *bio = bip->bip_bio; 356 struct bio *bio = bip->bip_bio;
357 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 357 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
358 int error;
359 358
360 error = bio_integrity_process(bio, bi->verify_fn); 359 bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
361 360
362 /* Restore original bio completion handler */ 361 /* Restore original bio completion handler */
363 bio->bi_end_io = bip->bip_end_io; 362 bio->bi_end_io = bip->bip_end_io;
364 bio_endio(bio, error); 363 bio_endio(bio);
365} 364}
366 365
367/** 366/**
@@ -376,7 +375,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
376 * in process context. This function postpones completion 375 * in process context. This function postpones completion
377 * accordingly. 376 * accordingly.
378 */ 377 */
379void bio_integrity_endio(struct bio *bio, int error) 378void bio_integrity_endio(struct bio *bio)
380{ 379{
381 struct bio_integrity_payload *bip = bio_integrity(bio); 380 struct bio_integrity_payload *bip = bio_integrity(bio);
382 381
@@ -386,9 +385,9 @@ void bio_integrity_endio(struct bio *bio, int error)
386 * integrity metadata. Restore original bio end_io handler 385 * integrity metadata. Restore original bio end_io handler
387 * and run it. 386 * and run it.
388 */ 387 */
389 if (error) { 388 if (bio->bi_error) {
390 bio->bi_end_io = bip->bip_end_io; 389 bio->bi_end_io = bip->bip_end_io;
391 bio_endio(bio, error); 390 bio_endio(bio);
392 391
393 return; 392 return;
394 } 393 }
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..a23f489f398f 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -269,7 +269,6 @@ static void bio_free(struct bio *bio)
269void bio_init(struct bio *bio) 269void bio_init(struct bio *bio)
270{ 270{
271 memset(bio, 0, sizeof(*bio)); 271 memset(bio, 0, sizeof(*bio));
272 bio->bi_flags = 1 << BIO_UPTODATE;
273 atomic_set(&bio->__bi_remaining, 1); 272 atomic_set(&bio->__bi_remaining, 1);
274 atomic_set(&bio->__bi_cnt, 1); 273 atomic_set(&bio->__bi_cnt, 1);
275} 274}
@@ -292,14 +291,17 @@ void bio_reset(struct bio *bio)
292 __bio_free(bio); 291 __bio_free(bio);
293 292
294 memset(bio, 0, BIO_RESET_BYTES); 293 memset(bio, 0, BIO_RESET_BYTES);
295 bio->bi_flags = flags | (1 << BIO_UPTODATE); 294 bio->bi_flags = flags;
296 atomic_set(&bio->__bi_remaining, 1); 295 atomic_set(&bio->__bi_remaining, 1);
297} 296}
298EXPORT_SYMBOL(bio_reset); 297EXPORT_SYMBOL(bio_reset);
299 298
300static void bio_chain_endio(struct bio *bio, int error) 299static void bio_chain_endio(struct bio *bio)
301{ 300{
302 bio_endio(bio->bi_private, error); 301 struct bio *parent = bio->bi_private;
302
303 parent->bi_error = bio->bi_error;
304 bio_endio(parent);
303 bio_put(bio); 305 bio_put(bio);
304} 306}
305 307
@@ -896,11 +898,11 @@ struct submit_bio_ret {
896 int error; 898 int error;
897}; 899};
898 900
899static void submit_bio_wait_endio(struct bio *bio, int error) 901static void submit_bio_wait_endio(struct bio *bio)
900{ 902{
901 struct submit_bio_ret *ret = bio->bi_private; 903 struct submit_bio_ret *ret = bio->bi_private;
902 904
903 ret->error = error; 905 ret->error = bio->bi_error;
904 complete(&ret->event); 906 complete(&ret->event);
905} 907}
906 908
@@ -1445,7 +1447,7 @@ void bio_unmap_user(struct bio *bio)
1445} 1447}
1446EXPORT_SYMBOL(bio_unmap_user); 1448EXPORT_SYMBOL(bio_unmap_user);
1447 1449
1448static void bio_map_kern_endio(struct bio *bio, int err) 1450static void bio_map_kern_endio(struct bio *bio)
1449{ 1451{
1450 bio_put(bio); 1452 bio_put(bio);
1451} 1453}
@@ -1501,13 +1503,13 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1501} 1503}
1502EXPORT_SYMBOL(bio_map_kern); 1504EXPORT_SYMBOL(bio_map_kern);
1503 1505
1504static void bio_copy_kern_endio(struct bio *bio, int err) 1506static void bio_copy_kern_endio(struct bio *bio)
1505{ 1507{
1506 bio_free_pages(bio); 1508 bio_free_pages(bio);
1507 bio_put(bio); 1509 bio_put(bio);
1508} 1510}
1509 1511
1510static void bio_copy_kern_endio_read(struct bio *bio, int err) 1512static void bio_copy_kern_endio_read(struct bio *bio)
1511{ 1513{
1512 char *p = bio->bi_private; 1514 char *p = bio->bi_private;
1513 struct bio_vec *bvec; 1515 struct bio_vec *bvec;
@@ -1518,7 +1520,7 @@ static void bio_copy_kern_endio_read(struct bio *bio, int err)
1518 p += bvec->bv_len; 1520 p += bvec->bv_len;
1519 } 1521 }
1520 1522
1521 bio_copy_kern_endio(bio, err); 1523 bio_copy_kern_endio(bio);
1522} 1524}
1523 1525
1524/** 1526/**
@@ -1778,25 +1780,15 @@ static inline bool bio_remaining_done(struct bio *bio)
1778/** 1780/**
1779 * bio_endio - end I/O on a bio 1781 * bio_endio - end I/O on a bio
1780 * @bio: bio 1782 * @bio: bio
1781 * @error: error, if any
1782 * 1783 *
1783 * Description: 1784 * Description:
1784 * bio_endio() will end I/O on the whole bio. bio_endio() is the 1785 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1785 * preferred way to end I/O on a bio, it takes care of clearing 1786 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1786 * BIO_UPTODATE on error. @error is 0 on success, and and one of the 1787 * bio unless they own it and thus know that it has an end_io function.
1787 * established -Exxxx (-EIO, for instance) error values in case
1788 * something went wrong. No one should call bi_end_io() directly on a
1789 * bio unless they own it and thus know that it has an end_io
1790 * function.
1791 **/ 1788 **/
1792void bio_endio(struct bio *bio, int error) 1789void bio_endio(struct bio *bio)
1793{ 1790{
1794 while (bio) { 1791 while (bio) {
1795 if (error)
1796 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1797 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1798 error = -EIO;
1799
1800 if (unlikely(!bio_remaining_done(bio))) 1792 if (unlikely(!bio_remaining_done(bio)))
1801 break; 1793 break;
1802 1794
@@ -1810,11 +1802,12 @@ void bio_endio(struct bio *bio, int error)
1810 */ 1802 */
1811 if (bio->bi_end_io == bio_chain_endio) { 1803 if (bio->bi_end_io == bio_chain_endio) {
1812 struct bio *parent = bio->bi_private; 1804 struct bio *parent = bio->bi_private;
1805 parent->bi_error = bio->bi_error;
1813 bio_put(bio); 1806 bio_put(bio);
1814 bio = parent; 1807 bio = parent;
1815 } else { 1808 } else {
1816 if (bio->bi_end_io) 1809 if (bio->bi_end_io)
1817 bio->bi_end_io(bio, error); 1810 bio->bi_end_io(bio);
1818 bio = NULL; 1811 bio = NULL;
1819 } 1812 }
1820 } 1813 }
diff --git a/block/blk-core.c b/block/blk-core.c
index 627ed0c593fb..7ef15b947b91 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -143,9 +143,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
143 unsigned int nbytes, int error) 143 unsigned int nbytes, int error)
144{ 144{
145 if (error) 145 if (error)
146 clear_bit(BIO_UPTODATE, &bio->bi_flags); 146 bio->bi_error = error;
147 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
148 error = -EIO;
149 147
150 if (unlikely(rq->cmd_flags & REQ_QUIET)) 148 if (unlikely(rq->cmd_flags & REQ_QUIET))
151 set_bit(BIO_QUIET, &bio->bi_flags); 149 set_bit(BIO_QUIET, &bio->bi_flags);
@@ -154,7 +152,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
154 152
155 /* don't actually finish bio if it's part of flush sequence */ 153 /* don't actually finish bio if it's part of flush sequence */
156 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
157 bio_endio(bio, error); 155 bio_endio(bio);
158} 156}
159 157
160void blk_dump_rq_flags(struct request *rq, char *msg) 158void blk_dump_rq_flags(struct request *rq, char *msg)
@@ -1620,7 +1618,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
1620 blk_queue_bounce(q, &bio); 1618 blk_queue_bounce(q, &bio);
1621 1619
1622 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1620 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1623 bio_endio(bio, -EIO); 1621 bio->bi_error = -EIO;
1622 bio_endio(bio);
1624 return; 1623 return;
1625 } 1624 }
1626 1625
@@ -1673,7 +1672,8 @@ get_rq:
1673 */ 1672 */
1674 req = get_request(q, rw_flags, bio, GFP_NOIO); 1673 req = get_request(q, rw_flags, bio, GFP_NOIO);
1675 if (IS_ERR(req)) { 1674 if (IS_ERR(req)) {
1676 bio_endio(bio, PTR_ERR(req)); /* @q is dead */ 1675 bio->bi_error = PTR_ERR(req);
1676 bio_endio(bio);
1677 goto out_unlock; 1677 goto out_unlock;
1678 } 1678 }
1679 1679
@@ -1896,7 +1896,8 @@ generic_make_request_checks(struct bio *bio)
1896 return true; 1896 return true;
1897 1897
1898end_io: 1898end_io:
1899 bio_endio(bio, err); 1899 bio->bi_error = err;
1900 bio_endio(bio);
1900 return false; 1901 return false;
1901} 1902}
1902 1903
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 7688ee3f5d72..6dee17443f14 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -11,16 +11,16 @@
11 11
12struct bio_batch { 12struct bio_batch {
13 atomic_t done; 13 atomic_t done;
14 unsigned long flags; 14 int error;
15 struct completion *wait; 15 struct completion *wait;
16}; 16};
17 17
18static void bio_batch_end_io(struct bio *bio, int err) 18static void bio_batch_end_io(struct bio *bio)
19{ 19{
20 struct bio_batch *bb = bio->bi_private; 20 struct bio_batch *bb = bio->bi_private;
21 21
22 if (err && (err != -EOPNOTSUPP)) 22 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23 clear_bit(BIO_UPTODATE, &bb->flags); 23 bb->error = bio->bi_error;
24 if (atomic_dec_and_test(&bb->done)) 24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait); 25 complete(bb->wait);
26 bio_put(bio); 26 bio_put(bio);
@@ -78,7 +78,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
78 } 78 }
79 79
80 atomic_set(&bb.done, 1); 80 atomic_set(&bb.done, 1);
81 bb.flags = 1 << BIO_UPTODATE; 81 bb.error = 0;
82 bb.wait = &wait; 82 bb.wait = &wait;
83 83
84 blk_start_plug(&plug); 84 blk_start_plug(&plug);
@@ -134,9 +134,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
134 if (!atomic_dec_and_test(&bb.done)) 134 if (!atomic_dec_and_test(&bb.done))
135 wait_for_completion_io(&wait); 135 wait_for_completion_io(&wait);
136 136
137 if (!test_bit(BIO_UPTODATE, &bb.flags)) 137 if (bb.error)
138 ret = -EIO; 138 return bb.error;
139
140 return ret; 139 return ret;
141} 140}
142EXPORT_SYMBOL(blkdev_issue_discard); 141EXPORT_SYMBOL(blkdev_issue_discard);
@@ -172,7 +171,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
172 return -EOPNOTSUPP; 171 return -EOPNOTSUPP;
173 172
174 atomic_set(&bb.done, 1); 173 atomic_set(&bb.done, 1);
175 bb.flags = 1 << BIO_UPTODATE; 174 bb.error = 0;
176 bb.wait = &wait; 175 bb.wait = &wait;
177 176
178 while (nr_sects) { 177 while (nr_sects) {
@@ -208,9 +207,8 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
208 if (!atomic_dec_and_test(&bb.done)) 207 if (!atomic_dec_and_test(&bb.done))
209 wait_for_completion_io(&wait); 208 wait_for_completion_io(&wait);
210 209
211 if (!test_bit(BIO_UPTODATE, &bb.flags)) 210 if (bb.error)
212 ret = -ENOTSUPP; 211 return bb.error;
213
214 return ret; 212 return ret;
215} 213}
216EXPORT_SYMBOL(blkdev_issue_write_same); 214EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -236,7 +234,7 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
236 DECLARE_COMPLETION_ONSTACK(wait); 234 DECLARE_COMPLETION_ONSTACK(wait);
237 235
238 atomic_set(&bb.done, 1); 236 atomic_set(&bb.done, 1);
239 bb.flags = 1 << BIO_UPTODATE; 237 bb.error = 0;
240 bb.wait = &wait; 238 bb.wait = &wait;
241 239
242 ret = 0; 240 ret = 0;
@@ -270,10 +268,8 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
270 if (!atomic_dec_and_test(&bb.done)) 268 if (!atomic_dec_and_test(&bb.done))
271 wait_for_completion_io(&wait); 269 wait_for_completion_io(&wait);
272 270
273 if (!test_bit(BIO_UPTODATE, &bb.flags)) 271 if (bb.error)
274 /* One of bios in the batch was completed with error.*/ 272 return bb.error;
275 ret = -EIO;
276
277 return ret; 273 return ret;
278} 274}
279 275
diff --git a/block/blk-map.c b/block/blk-map.c
index da310a105429..5fe1c30bfba7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -103,7 +103,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
103 * normal IO completion path 103 * normal IO completion path
104 */ 104 */
105 bio_get(bio); 105 bio_get(bio);
106 bio_endio(bio, 0); 106 bio_endio(bio);
107 __blk_rq_unmap_user(bio); 107 __blk_rq_unmap_user(bio);
108 return -EINVAL; 108 return -EINVAL;
109 } 109 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7d842db59699..94559025c5e6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1199,7 +1199,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1199 struct blk_mq_alloc_data alloc_data; 1199 struct blk_mq_alloc_data alloc_data;
1200 1200
1201 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { 1201 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1202 bio_endio(bio, -EIO); 1202 bio_io_error(bio);
1203 return NULL; 1203 return NULL;
1204 } 1204 }
1205 1205
@@ -1283,7 +1283,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1283 blk_queue_bounce(q, &bio); 1283 blk_queue_bounce(q, &bio);
1284 1284
1285 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1285 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1286 bio_endio(bio, -EIO); 1286 bio_io_error(bio);
1287 return; 1287 return;
1288 } 1288 }
1289 1289
@@ -1368,7 +1368,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1368 blk_queue_bounce(q, &bio); 1368 blk_queue_bounce(q, &bio);
1369 1369
1370 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1370 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1371 bio_endio(bio, -EIO); 1371 bio_io_error(bio);
1372 return; 1372 return;
1373 } 1373 }
1374 1374
diff --git a/block/bounce.c b/block/bounce.c
index b17311227c12..f4db245b9f3a 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -123,7 +123,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
123 } 123 }
124} 124}
125 125
126static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) 126static void bounce_end_io(struct bio *bio, mempool_t *pool)
127{ 127{
128 struct bio *bio_orig = bio->bi_private; 128 struct bio *bio_orig = bio->bi_private;
129 struct bio_vec *bvec, *org_vec; 129 struct bio_vec *bvec, *org_vec;
@@ -141,39 +141,40 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
141 mempool_free(bvec->bv_page, pool); 141 mempool_free(bvec->bv_page, pool);
142 } 142 }
143 143
144 bio_endio(bio_orig, err); 144 bio_orig->bi_error = bio->bi_error;
145 bio_endio(bio_orig);
145 bio_put(bio); 146 bio_put(bio);
146} 147}
147 148
148static void bounce_end_io_write(struct bio *bio, int err) 149static void bounce_end_io_write(struct bio *bio)
149{ 150{
150 bounce_end_io(bio, page_pool, err); 151 bounce_end_io(bio, page_pool);
151} 152}
152 153
153static void bounce_end_io_write_isa(struct bio *bio, int err) 154static void bounce_end_io_write_isa(struct bio *bio)
154{ 155{
155 156
156 bounce_end_io(bio, isa_page_pool, err); 157 bounce_end_io(bio, isa_page_pool);
157} 158}
158 159
159static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) 160static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
160{ 161{
161 struct bio *bio_orig = bio->bi_private; 162 struct bio *bio_orig = bio->bi_private;
162 163
163 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 164 if (!bio->bi_error)
164 copy_to_high_bio_irq(bio_orig, bio); 165 copy_to_high_bio_irq(bio_orig, bio);
165 166
166 bounce_end_io(bio, pool, err); 167 bounce_end_io(bio, pool);
167} 168}
168 169
169static void bounce_end_io_read(struct bio *bio, int err) 170static void bounce_end_io_read(struct bio *bio)
170{ 171{
171 __bounce_end_io_read(bio, page_pool, err); 172 __bounce_end_io_read(bio, page_pool);
172} 173}
173 174
174static void bounce_end_io_read_isa(struct bio *bio, int err) 175static void bounce_end_io_read_isa(struct bio *bio)
175{ 176{
176 __bounce_end_io_read(bio, isa_page_pool, err); 177 __bounce_end_io_read(bio, isa_page_pool);
177} 178}
178 179
179#ifdef CONFIG_NEED_BOUNCE_POOL 180#ifdef CONFIG_NEED_BOUNCE_POOL