diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-03 03:38:06 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-06-09 11:27:32 -0400 |
commit | 4e4cbee93d56137ebff722be022cae5f70ef84fb (patch) | |
tree | 4fa7345155599fc6bdd653fca8c5224ddf90a5be | |
parent | fc17b6534eb8395f0b3133eb31d87deec32c642b (diff) |
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion.
Note that device mapper overloaded bi_error with a private value, which
we'll have to keep arround at least for now and thus propagate to a
proper blk_status_t value.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
106 files changed, 625 insertions, 603 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 5384713d48bc..17b9740e138b 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -221,7 +221,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, | |||
221 | * @bio: bio to generate/verify integrity metadata for | 221 | * @bio: bio to generate/verify integrity metadata for |
222 | * @proc_fn: Pointer to the relevant processing function | 222 | * @proc_fn: Pointer to the relevant processing function |
223 | */ | 223 | */ |
224 | static int bio_integrity_process(struct bio *bio, | 224 | static blk_status_t bio_integrity_process(struct bio *bio, |
225 | integrity_processing_fn *proc_fn) | 225 | integrity_processing_fn *proc_fn) |
226 | { | 226 | { |
227 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 227 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
@@ -229,7 +229,7 @@ static int bio_integrity_process(struct bio *bio, | |||
229 | struct bvec_iter bviter; | 229 | struct bvec_iter bviter; |
230 | struct bio_vec bv; | 230 | struct bio_vec bv; |
231 | struct bio_integrity_payload *bip = bio_integrity(bio); | 231 | struct bio_integrity_payload *bip = bio_integrity(bio); |
232 | unsigned int ret = 0; | 232 | blk_status_t ret = BLK_STS_OK; |
233 | void *prot_buf = page_address(bip->bip_vec->bv_page) + | 233 | void *prot_buf = page_address(bip->bip_vec->bv_page) + |
234 | bip->bip_vec->bv_offset; | 234 | bip->bip_vec->bv_offset; |
235 | 235 | ||
@@ -366,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) | |||
366 | struct bio *bio = bip->bip_bio; | 366 | struct bio *bio = bip->bip_bio; |
367 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 367 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
368 | 368 | ||
369 | bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn); | 369 | bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn); |
370 | 370 | ||
371 | /* Restore original bio completion handler */ | 371 | /* Restore original bio completion handler */ |
372 | bio->bi_end_io = bip->bip_end_io; | 372 | bio->bi_end_io = bip->bip_end_io; |
@@ -395,7 +395,7 @@ void bio_integrity_endio(struct bio *bio) | |||
395 | * integrity metadata. Restore original bio end_io handler | 395 | * integrity metadata. Restore original bio end_io handler |
396 | * and run it. | 396 | * and run it. |
397 | */ | 397 | */ |
398 | if (bio->bi_error) { | 398 | if (bio->bi_status) { |
399 | bio->bi_end_io = bip->bip_end_io; | 399 | bio->bi_end_io = bip->bip_end_io; |
400 | bio_endio(bio); | 400 | bio_endio(bio); |
401 | 401 | ||
diff --git a/block/bio.c b/block/bio.c index 888e7801c638..7a5c8ed27f42 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio) | |||
309 | { | 309 | { |
310 | struct bio *parent = bio->bi_private; | 310 | struct bio *parent = bio->bi_private; |
311 | 311 | ||
312 | if (!parent->bi_error) | 312 | if (!parent->bi_status) |
313 | parent->bi_error = bio->bi_error; | 313 | parent->bi_status = bio->bi_status; |
314 | bio_put(bio); | 314 | bio_put(bio); |
315 | return parent; | 315 | return parent; |
316 | } | 316 | } |
@@ -918,7 +918,7 @@ static void submit_bio_wait_endio(struct bio *bio) | |||
918 | { | 918 | { |
919 | struct submit_bio_ret *ret = bio->bi_private; | 919 | struct submit_bio_ret *ret = bio->bi_private; |
920 | 920 | ||
921 | ret->error = bio->bi_error; | 921 | ret->error = blk_status_to_errno(bio->bi_status); |
922 | complete(&ret->event); | 922 | complete(&ret->event); |
923 | } | 923 | } |
924 | 924 | ||
@@ -1818,7 +1818,7 @@ again: | |||
1818 | 1818 | ||
1819 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { | 1819 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
1820 | trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), | 1820 | trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), |
1821 | bio, bio->bi_error); | 1821 | bio, bio->bi_status); |
1822 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); | 1822 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); |
1823 | } | 1823 | } |
1824 | 1824 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index e942a9f814c7..3d84820ace9e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -144,6 +144,9 @@ static const struct { | |||
144 | [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, | 144 | [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, |
145 | [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, | 145 | [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, |
146 | 146 | ||
147 | /* device mapper special case, should not leak out: */ | ||
148 | [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, | ||
149 | |||
147 | /* everything else not covered above: */ | 150 | /* everything else not covered above: */ |
148 | [BLK_STS_IOERR] = { -EIO, "I/O" }, | 151 | [BLK_STS_IOERR] = { -EIO, "I/O" }, |
149 | }; | 152 | }; |
@@ -188,7 +191,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
188 | unsigned int nbytes, blk_status_t error) | 191 | unsigned int nbytes, blk_status_t error) |
189 | { | 192 | { |
190 | if (error) | 193 | if (error) |
191 | bio->bi_error = blk_status_to_errno(error); | 194 | bio->bi_status = error; |
192 | 195 | ||
193 | if (unlikely(rq->rq_flags & RQF_QUIET)) | 196 | if (unlikely(rq->rq_flags & RQF_QUIET)) |
194 | bio_set_flag(bio, BIO_QUIET); | 197 | bio_set_flag(bio, BIO_QUIET); |
@@ -1717,7 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1717 | blk_queue_split(q, &bio, q->bio_split); | 1720 | blk_queue_split(q, &bio, q->bio_split); |
1718 | 1721 | ||
1719 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { | 1722 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { |
1720 | bio->bi_error = -EIO; | 1723 | bio->bi_status = BLK_STS_IOERR; |
1721 | bio_endio(bio); | 1724 | bio_endio(bio); |
1722 | return BLK_QC_T_NONE; | 1725 | return BLK_QC_T_NONE; |
1723 | } | 1726 | } |
@@ -1775,7 +1778,10 @@ get_rq: | |||
1775 | req = get_request(q, bio->bi_opf, bio, GFP_NOIO); | 1778 | req = get_request(q, bio->bi_opf, bio, GFP_NOIO); |
1776 | if (IS_ERR(req)) { | 1779 | if (IS_ERR(req)) { |
1777 | __wbt_done(q->rq_wb, wb_acct); | 1780 | __wbt_done(q->rq_wb, wb_acct); |
1778 | bio->bi_error = PTR_ERR(req); | 1781 | if (PTR_ERR(req) == -ENOMEM) |
1782 | bio->bi_status = BLK_STS_RESOURCE; | ||
1783 | else | ||
1784 | bio->bi_status = BLK_STS_IOERR; | ||
1779 | bio_endio(bio); | 1785 | bio_endio(bio); |
1780 | goto out_unlock; | 1786 | goto out_unlock; |
1781 | } | 1787 | } |
@@ -1930,7 +1936,7 @@ generic_make_request_checks(struct bio *bio) | |||
1930 | { | 1936 | { |
1931 | struct request_queue *q; | 1937 | struct request_queue *q; |
1932 | int nr_sectors = bio_sectors(bio); | 1938 | int nr_sectors = bio_sectors(bio); |
1933 | int err = -EIO; | 1939 | blk_status_t status = BLK_STS_IOERR; |
1934 | char b[BDEVNAME_SIZE]; | 1940 | char b[BDEVNAME_SIZE]; |
1935 | struct hd_struct *part; | 1941 | struct hd_struct *part; |
1936 | 1942 | ||
@@ -1973,7 +1979,7 @@ generic_make_request_checks(struct bio *bio) | |||
1973 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { | 1979 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { |
1974 | bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); | 1980 | bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); |
1975 | if (!nr_sectors) { | 1981 | if (!nr_sectors) { |
1976 | err = 0; | 1982 | status = BLK_STS_OK; |
1977 | goto end_io; | 1983 | goto end_io; |
1978 | } | 1984 | } |
1979 | } | 1985 | } |
@@ -2025,9 +2031,9 @@ generic_make_request_checks(struct bio *bio) | |||
2025 | return true; | 2031 | return true; |
2026 | 2032 | ||
2027 | not_supported: | 2033 | not_supported: |
2028 | err = -EOPNOTSUPP; | 2034 | status = BLK_STS_NOTSUPP; |
2029 | end_io: | 2035 | end_io: |
2030 | bio->bi_error = err; | 2036 | bio->bi_status = status; |
2031 | bio_endio(bio); | 2037 | bio_endio(bio); |
2032 | return false; | 2038 | return false; |
2033 | } | 2039 | } |
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 0f891a9aff4d..feb30570eaf5 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = { | |||
384 | .sysfs_ops = &integrity_ops, | 384 | .sysfs_ops = &integrity_ops, |
385 | }; | 385 | }; |
386 | 386 | ||
387 | static int blk_integrity_nop_fn(struct blk_integrity_iter *iter) | 387 | static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter) |
388 | { | 388 | { |
389 | return 0; | 389 | return BLK_STS_OK; |
390 | } | 390 | } |
391 | 391 | ||
392 | static const struct blk_integrity_profile nop_profile = { | 392 | static const struct blk_integrity_profile nop_profile = { |
diff --git a/block/bounce.c b/block/bounce.c index 1cb5dd3a5da1..e4703181d97f 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
@@ -143,7 +143,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) | |||
143 | mempool_free(bvec->bv_page, pool); | 143 | mempool_free(bvec->bv_page, pool); |
144 | } | 144 | } |
145 | 145 | ||
146 | bio_orig->bi_error = bio->bi_error; | 146 | bio_orig->bi_status = bio->bi_status; |
147 | bio_endio(bio_orig); | 147 | bio_endio(bio_orig); |
148 | bio_put(bio); | 148 | bio_put(bio); |
149 | } | 149 | } |
@@ -163,7 +163,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) | |||
163 | { | 163 | { |
164 | struct bio *bio_orig = bio->bi_private; | 164 | struct bio *bio_orig = bio->bi_private; |
165 | 165 | ||
166 | if (!bio->bi_error) | 166 | if (!bio->bi_status) |
167 | copy_to_high_bio_irq(bio_orig, bio); | 167 | copy_to_high_bio_irq(bio_orig, bio); |
168 | 168 | ||
169 | bounce_end_io(bio, pool); | 169 | bounce_end_io(bio, pool); |
diff --git a/block/t10-pi.c b/block/t10-pi.c index 680c6d636298..350b3cbcf9e5 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c | |||
@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len) | |||
46 | * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref | 46 | * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref |
47 | * tag. | 47 | * tag. |
48 | */ | 48 | */ |
49 | static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, | 49 | static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, |
50 | unsigned int type) | 50 | csum_fn *fn, unsigned int type) |
51 | { | 51 | { |
52 | unsigned int i; | 52 | unsigned int i; |
53 | 53 | ||
@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, | |||
67 | iter->seed++; | 67 | iter->seed++; |
68 | } | 68 | } |
69 | 69 | ||
70 | return 0; | 70 | return BLK_STS_OK; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, | 73 | static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, |
74 | unsigned int type) | 74 | csum_fn *fn, unsigned int type) |
75 | { | 75 | { |
76 | unsigned int i; | 76 | unsigned int i; |
77 | 77 | ||
@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, | |||
108 | "(rcvd %04x, want %04x)\n", iter->disk_name, | 108 | "(rcvd %04x, want %04x)\n", iter->disk_name, |
109 | (unsigned long long)iter->seed, | 109 | (unsigned long long)iter->seed, |
110 | be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); | 110 | be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); |
111 | return -EILSEQ; | 111 | return BLK_STS_PROTECTION; |
112 | } | 112 | } |
113 | 113 | ||
114 | next: | 114 | next: |
@@ -117,45 +117,45 @@ next: | |||
117 | iter->seed++; | 117 | iter->seed++; |
118 | } | 118 | } |
119 | 119 | ||
120 | return 0; | 120 | return BLK_STS_OK; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) | 123 | static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) |
124 | { | 124 | { |
125 | return t10_pi_generate(iter, t10_pi_crc_fn, 1); | 125 | return t10_pi_generate(iter, t10_pi_crc_fn, 1); |
126 | } | 126 | } |
127 | 127 | ||
128 | static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) | 128 | static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) |
129 | { | 129 | { |
130 | return t10_pi_generate(iter, t10_pi_ip_fn, 1); | 130 | return t10_pi_generate(iter, t10_pi_ip_fn, 1); |
131 | } | 131 | } |
132 | 132 | ||
133 | static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) | 133 | static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) |
134 | { | 134 | { |
135 | return t10_pi_verify(iter, t10_pi_crc_fn, 1); | 135 | return t10_pi_verify(iter, t10_pi_crc_fn, 1); |
136 | } | 136 | } |
137 | 137 | ||
138 | static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) | 138 | static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) |
139 | { | 139 | { |
140 | return t10_pi_verify(iter, t10_pi_ip_fn, 1); | 140 | return t10_pi_verify(iter, t10_pi_ip_fn, 1); |
141 | } | 141 | } |
142 | 142 | ||
143 | static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) | 143 | static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) |
144 | { | 144 | { |
145 | return t10_pi_generate(iter, t10_pi_crc_fn, 3); | 145 | return t10_pi_generate(iter, t10_pi_crc_fn, 3); |
146 | } | 146 | } |
147 | 147 | ||
148 | static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) | 148 | static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) |
149 | { | 149 | { |
150 | return t10_pi_generate(iter, t10_pi_ip_fn, 3); | 150 | return t10_pi_generate(iter, t10_pi_ip_fn, 3); |
151 | } | 151 | } |
152 | 152 | ||
153 | static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) | 153 | static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) |
154 | { | 154 | { |
155 | return t10_pi_verify(iter, t10_pi_crc_fn, 3); | 155 | return t10_pi_verify(iter, t10_pi_crc_fn, 3); |
156 | } | 156 | } |
157 | 157 | ||
158 | static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) | 158 | static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) |
159 | { | 159 | { |
160 | return t10_pi_verify(iter, t10_pi_ip_fn, 3); | 160 | return t10_pi_verify(iter, t10_pi_ip_fn, 3); |
161 | } | 161 | } |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 5bf0c9d21fc1..dc43254e05a4 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -1070,7 +1070,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) | |||
1070 | d->ip.rq = NULL; | 1070 | d->ip.rq = NULL; |
1071 | do { | 1071 | do { |
1072 | bio = rq->bio; | 1072 | bio = rq->bio; |
1073 | bok = !fastfail && !bio->bi_error; | 1073 | bok = !fastfail && !bio->bi_status; |
1074 | } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); | 1074 | } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); |
1075 | 1075 | ||
1076 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ | 1076 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ |
@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f) | |||
1131 | ahout->cmdstat, ahin->cmdstat, | 1131 | ahout->cmdstat, ahin->cmdstat, |
1132 | d->aoemajor, d->aoeminor); | 1132 | d->aoemajor, d->aoeminor); |
1133 | noskb: if (buf) | 1133 | noskb: if (buf) |
1134 | buf->bio->bi_error = -EIO; | 1134 | buf->bio->bi_status = BLK_STS_IOERR; |
1135 | goto out; | 1135 | goto out; |
1136 | } | 1136 | } |
1137 | 1137 | ||
@@ -1144,7 +1144,7 @@ noskb: if (buf) | |||
1144 | "aoe: runt data size in read from", | 1144 | "aoe: runt data size in read from", |
1145 | (long) d->aoemajor, d->aoeminor, | 1145 | (long) d->aoemajor, d->aoeminor, |
1146 | skb->len, n); | 1146 | skb->len, n); |
1147 | buf->bio->bi_error = -EIO; | 1147 | buf->bio->bi_status = BLK_STS_IOERR; |
1148 | break; | 1148 | break; |
1149 | } | 1149 | } |
1150 | if (n > f->iter.bi_size) { | 1150 | if (n > f->iter.bi_size) { |
@@ -1152,7 +1152,7 @@ noskb: if (buf) | |||
1152 | "aoe: too-large data size in read from", | 1152 | "aoe: too-large data size in read from", |
1153 | (long) d->aoemajor, d->aoeminor, | 1153 | (long) d->aoemajor, d->aoeminor, |
1154 | n, f->iter.bi_size); | 1154 | n, f->iter.bi_size); |
1155 | buf->bio->bi_error = -EIO; | 1155 | buf->bio->bi_status = BLK_STS_IOERR; |
1156 | break; | 1156 | break; |
1157 | } | 1157 | } |
1158 | bvcpy(skb, f->buf->bio, f->iter, n); | 1158 | bvcpy(skb, f->buf->bio, f->iter, n); |
@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) | |||
1654 | if (buf == NULL) | 1654 | if (buf == NULL) |
1655 | return; | 1655 | return; |
1656 | buf->iter.bi_size = 0; | 1656 | buf->iter.bi_size = 0; |
1657 | buf->bio->bi_error = -EIO; | 1657 | buf->bio->bi_status = BLK_STS_IOERR; |
1658 | if (buf->nframesout == 0) | 1658 | if (buf->nframesout == 0) |
1659 | aoe_end_buf(d, buf); | 1659 | aoe_end_buf(d, buf); |
1660 | } | 1660 | } |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index ffd1947500c6..b28fefb90391 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d) | |||
170 | if (rq == NULL) | 170 | if (rq == NULL) |
171 | return; | 171 | return; |
172 | while ((bio = d->ip.nxbio)) { | 172 | while ((bio = d->ip.nxbio)) { |
173 | bio->bi_error = -EIO; | 173 | bio->bi_status = BLK_STS_IOERR; |
174 | d->ip.nxbio = bio->bi_next; | 174 | d->ip.nxbio = bio->bi_next; |
175 | n = (unsigned long) rq->special; | 175 | n = (unsigned long) rq->special; |
176 | rq->special = (void *) --n; | 176 | rq->special = (void *) --n; |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 8d7bcfa49c12..e02c45cd3c5a 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, | |||
178 | else | 178 | else |
179 | submit_bio(bio); | 179 | submit_bio(bio); |
180 | wait_until_done_or_force_detached(device, bdev, &device->md_io.done); | 180 | wait_until_done_or_force_detached(device, bdev, &device->md_io.done); |
181 | if (!bio->bi_error) | 181 | if (!bio->bi_status) |
182 | err = device->md_io.error; | 182 | err = device->md_io.error; |
183 | 183 | ||
184 | out: | 184 | out: |
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index a804a4107fbc..809fd245c3dc 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio) | |||
959 | !bm_test_page_unchanged(b->bm_pages[idx])) | 959 | !bm_test_page_unchanged(b->bm_pages[idx])) |
960 | drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); | 960 | drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); |
961 | 961 | ||
962 | if (bio->bi_error) { | 962 | if (bio->bi_status) { |
963 | /* ctx error will hold the completed-last non-zero error code, | 963 | /* ctx error will hold the completed-last non-zero error code, |
964 | * in case error codes differ. */ | 964 | * in case error codes differ. */ |
965 | ctx->error = bio->bi_error; | 965 | ctx->error = blk_status_to_errno(bio->bi_status); |
966 | bm_set_page_io_err(b->bm_pages[idx]); | 966 | bm_set_page_io_err(b->bm_pages[idx]); |
967 | /* Not identical to on disk version of it. | 967 | /* Not identical to on disk version of it. |
968 | * Is BM_PAGE_IO_ERROR enough? */ | 968 | * Is BM_PAGE_IO_ERROR enough? */ |
969 | if (__ratelimit(&drbd_ratelimit_state)) | 969 | if (__ratelimit(&drbd_ratelimit_state)) |
970 | drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", | 970 | drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", |
971 | bio->bi_error, idx); | 971 | bio->bi_status, idx); |
972 | } else { | 972 | } else { |
973 | bm_clear_page_io_err(b->bm_pages[idx]); | 973 | bm_clear_page_io_err(b->bm_pages[idx]); |
974 | dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); | 974 | dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index d5da45bb03a6..76761b4ca13e 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -1627,7 +1627,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device, | |||
1627 | __release(local); | 1627 | __release(local); |
1628 | if (!bio->bi_bdev) { | 1628 | if (!bio->bi_bdev) { |
1629 | drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); | 1629 | drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); |
1630 | bio->bi_error = -ENODEV; | 1630 | bio->bi_status = BLK_STS_IOERR; |
1631 | bio_endio(bio); | 1631 | bio_endio(bio); |
1632 | return; | 1632 | return; |
1633 | } | 1633 | } |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 1b0a2be24f39..c7e95e6380fb 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio) | |||
1229 | struct drbd_device *device = octx->device; | 1229 | struct drbd_device *device = octx->device; |
1230 | struct issue_flush_context *ctx = octx->ctx; | 1230 | struct issue_flush_context *ctx = octx->ctx; |
1231 | 1231 | ||
1232 | if (bio->bi_error) { | 1232 | if (bio->bi_status) { |
1233 | ctx->error = bio->bi_error; | 1233 | ctx->error = blk_status_to_errno(bio->bi_status); |
1234 | drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error); | 1234 | drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status); |
1235 | } | 1235 | } |
1236 | kfree(octx); | 1236 | kfree(octx); |
1237 | bio_put(bio); | 1237 | bio_put(bio); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 656624314f0d..fca6b9914948 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection) | |||
203 | void complete_master_bio(struct drbd_device *device, | 203 | void complete_master_bio(struct drbd_device *device, |
204 | struct bio_and_error *m) | 204 | struct bio_and_error *m) |
205 | { | 205 | { |
206 | m->bio->bi_error = m->error; | 206 | m->bio->bi_status = errno_to_blk_status(m->error); |
207 | bio_endio(m->bio); | 207 | bio_endio(m->bio); |
208 | dec_ap_bio(device); | 208 | dec_ap_bio(device); |
209 | } | 209 | } |
@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req) | |||
1157 | 1157 | ||
1158 | if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9, | 1158 | if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9, |
1159 | GFP_NOIO, 0)) | 1159 | GFP_NOIO, 0)) |
1160 | req->private_bio->bi_error = -EIO; | 1160 | req->private_bio->bi_status = BLK_STS_IOERR; |
1161 | bio_endio(req->private_bio); | 1161 | bio_endio(req->private_bio); |
1162 | } | 1162 | } |
1163 | 1163 | ||
@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long | |||
1225 | /* only pass the error to the upper layers. | 1225 | /* only pass the error to the upper layers. |
1226 | * if user cannot handle io errors, that's not our business. */ | 1226 | * if user cannot handle io errors, that's not our business. */ |
1227 | drbd_err(device, "could not kmalloc() req\n"); | 1227 | drbd_err(device, "could not kmalloc() req\n"); |
1228 | bio->bi_error = -ENOMEM; | 1228 | bio->bi_status = BLK_STS_RESOURCE; |
1229 | bio_endio(bio); | 1229 | bio_endio(bio); |
1230 | return ERR_PTR(-ENOMEM); | 1230 | return ERR_PTR(-ENOMEM); |
1231 | } | 1231 | } |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 1afcb4e02d8d..1d8726a8df34 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio) | |||
63 | struct drbd_device *device; | 63 | struct drbd_device *device; |
64 | 64 | ||
65 | device = bio->bi_private; | 65 | device = bio->bi_private; |
66 | device->md_io.error = bio->bi_error; | 66 | device->md_io.error = blk_status_to_errno(bio->bi_status); |
67 | 67 | ||
68 | /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able | 68 | /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able |
69 | * to timeout on the lower level device, and eventually detach from it. | 69 | * to timeout on the lower level device, and eventually detach from it. |
@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio) | |||
177 | bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES || | 177 | bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES || |
178 | bio_op(bio) == REQ_OP_DISCARD; | 178 | bio_op(bio) == REQ_OP_DISCARD; |
179 | 179 | ||
180 | if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) | 180 | if (bio->bi_status && __ratelimit(&drbd_ratelimit_state)) |
181 | drbd_warn(device, "%s: error=%d s=%llus\n", | 181 | drbd_warn(device, "%s: error=%d s=%llus\n", |
182 | is_write ? (is_discard ? "discard" : "write") | 182 | is_write ? (is_discard ? "discard" : "write") |
183 | : "read", bio->bi_error, | 183 | : "read", bio->bi_status, |
184 | (unsigned long long)peer_req->i.sector); | 184 | (unsigned long long)peer_req->i.sector); |
185 | 185 | ||
186 | if (bio->bi_error) | 186 | if (bio->bi_status) |
187 | set_bit(__EE_WAS_ERROR, &peer_req->flags); | 187 | set_bit(__EE_WAS_ERROR, &peer_req->flags); |
188 | 188 | ||
189 | bio_put(bio); /* no need for the bio anymore */ | 189 | bio_put(bio); /* no need for the bio anymore */ |
@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio) | |||
243 | if (__ratelimit(&drbd_ratelimit_state)) | 243 | if (__ratelimit(&drbd_ratelimit_state)) |
244 | drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); | 244 | drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); |
245 | 245 | ||
246 | if (!bio->bi_error) | 246 | if (!bio->bi_status) |
247 | drbd_panic_after_delayed_completion_of_aborted_request(device); | 247 | drbd_panic_after_delayed_completion_of_aborted_request(device); |
248 | } | 248 | } |
249 | 249 | ||
250 | /* to avoid recursion in __req_mod */ | 250 | /* to avoid recursion in __req_mod */ |
251 | if (unlikely(bio->bi_error)) { | 251 | if (unlikely(bio->bi_status)) { |
252 | switch (bio_op(bio)) { | 252 | switch (bio_op(bio)) { |
253 | case REQ_OP_WRITE_ZEROES: | 253 | case REQ_OP_WRITE_ZEROES: |
254 | case REQ_OP_DISCARD: | 254 | case REQ_OP_DISCARD: |
255 | if (bio->bi_error == -EOPNOTSUPP) | 255 | if (bio->bi_status == BLK_STS_NOTSUPP) |
256 | what = DISCARD_COMPLETED_NOTSUPP; | 256 | what = DISCARD_COMPLETED_NOTSUPP; |
257 | else | 257 | else |
258 | what = DISCARD_COMPLETED_WITH_ERROR; | 258 | what = DISCARD_COMPLETED_WITH_ERROR; |
@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio) | |||
272 | } | 272 | } |
273 | 273 | ||
274 | bio_put(req->private_bio); | 274 | bio_put(req->private_bio); |
275 | req->private_bio = ERR_PTR(bio->bi_error); | 275 | req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); |
276 | 276 | ||
277 | /* not req_mod(), we need irqsave here! */ | 277 | /* not req_mod(), we need irqsave here! */ |
278 | spin_lock_irqsave(&device->resource->req_lock, flags); | 278 | spin_lock_irqsave(&device->resource->req_lock, flags); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cc75a5176057..9e3cb32e365d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio) | |||
3780 | struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; | 3780 | struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; |
3781 | int drive = cbdata->drive; | 3781 | int drive = cbdata->drive; |
3782 | 3782 | ||
3783 | if (bio->bi_error) { | 3783 | if (bio->bi_status) { |
3784 | pr_info("floppy: error %d while reading block 0\n", | 3784 | pr_info("floppy: error %d while reading block 0\n", |
3785 | bio->bi_error); | 3785 | bio->bi_status); |
3786 | set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); | 3786 | set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); |
3787 | } | 3787 | } |
3788 | complete(&cbdata->complete); | 3788 | complete(&cbdata->complete); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 42e3c880a8a5..e8a381161db6 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio) | |||
952 | 952 | ||
953 | pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", | 953 | pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", |
954 | bio, (unsigned long long)pkt->sector, | 954 | bio, (unsigned long long)pkt->sector, |
955 | (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); | 955 | (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); |
956 | 956 | ||
957 | if (bio->bi_error) | 957 | if (bio->bi_status) |
958 | atomic_inc(&pkt->io_errors); | 958 | atomic_inc(&pkt->io_errors); |
959 | if (atomic_dec_and_test(&pkt->io_wait)) { | 959 | if (atomic_dec_and_test(&pkt->io_wait)) { |
960 | atomic_inc(&pkt->run_sm); | 960 | atomic_inc(&pkt->run_sm); |
@@ -969,7 +969,7 @@ static void pkt_end_io_packet_write(struct bio *bio) | |||
969 | struct pktcdvd_device *pd = pkt->pd; | 969 | struct pktcdvd_device *pd = pkt->pd; |
970 | BUG_ON(!pd); | 970 | BUG_ON(!pd); |
971 | 971 | ||
972 | pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error); | 972 | pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); |
973 | 973 | ||
974 | pd->stats.pkt_ended++; | 974 | pd->stats.pkt_ended++; |
975 | 975 | ||
@@ -1305,16 +1305,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1305 | pkt_queue_bio(pd, pkt->w_bio); | 1305 | pkt_queue_bio(pd, pkt->w_bio); |
1306 | } | 1306 | } |
1307 | 1307 | ||
1308 | static void pkt_finish_packet(struct packet_data *pkt, int error) | 1308 | static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) |
1309 | { | 1309 | { |
1310 | struct bio *bio; | 1310 | struct bio *bio; |
1311 | 1311 | ||
1312 | if (error) | 1312 | if (status) |
1313 | pkt->cache_valid = 0; | 1313 | pkt->cache_valid = 0; |
1314 | 1314 | ||
1315 | /* Finish all bios corresponding to this packet */ | 1315 | /* Finish all bios corresponding to this packet */ |
1316 | while ((bio = bio_list_pop(&pkt->orig_bios))) { | 1316 | while ((bio = bio_list_pop(&pkt->orig_bios))) { |
1317 | bio->bi_error = error; | 1317 | bio->bi_status = status; |
1318 | bio_endio(bio); | 1318 | bio_endio(bio); |
1319 | } | 1319 | } |
1320 | } | 1320 | } |
@@ -1349,7 +1349,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data | |||
1349 | if (atomic_read(&pkt->io_wait) > 0) | 1349 | if (atomic_read(&pkt->io_wait) > 0) |
1350 | return; | 1350 | return; |
1351 | 1351 | ||
1352 | if (!pkt->w_bio->bi_error) { | 1352 | if (!pkt->w_bio->bi_status) { |
1353 | pkt_set_state(pkt, PACKET_FINISHED_STATE); | 1353 | pkt_set_state(pkt, PACKET_FINISHED_STATE); |
1354 | } else { | 1354 | } else { |
1355 | pkt_set_state(pkt, PACKET_RECOVERY_STATE); | 1355 | pkt_set_state(pkt, PACKET_RECOVERY_STATE); |
@@ -1366,7 +1366,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data | |||
1366 | break; | 1366 | break; |
1367 | 1367 | ||
1368 | case PACKET_FINISHED_STATE: | 1368 | case PACKET_FINISHED_STATE: |
1369 | pkt_finish_packet(pkt, pkt->w_bio->bi_error); | 1369 | pkt_finish_packet(pkt, pkt->w_bio->bi_status); |
1370 | return; | 1370 | return; |
1371 | 1371 | ||
1372 | default: | 1372 | default: |
@@ -2301,7 +2301,7 @@ static void pkt_end_io_read_cloned(struct bio *bio) | |||
2301 | struct packet_stacked_data *psd = bio->bi_private; | 2301 | struct packet_stacked_data *psd = bio->bi_private; |
2302 | struct pktcdvd_device *pd = psd->pd; | 2302 | struct pktcdvd_device *pd = psd->pd; |
2303 | 2303 | ||
2304 | psd->bio->bi_error = bio->bi_error; | 2304 | psd->bio->bi_status = bio->bi_status; |
2305 | bio_put(bio); | 2305 | bio_put(bio); |
2306 | bio_endio(psd->bio); | 2306 | bio_endio(psd->bio); |
2307 | mempool_free(psd, psd_pool); | 2307 | mempool_free(psd, psd_pool); |
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 456b4fe21559..6fa2b8197013 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
@@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev) | |||
428 | kfree(priv->cache.tags); | 428 | kfree(priv->cache.tags); |
429 | } | 429 | } |
430 | 430 | ||
431 | static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, | 431 | static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, |
432 | size_t len, size_t *retlen, u_char *buf) | 432 | size_t len, size_t *retlen, u_char *buf) |
433 | { | 433 | { |
434 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); | 434 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); |
@@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, | |||
438 | (unsigned int)from, len); | 438 | (unsigned int)from, len); |
439 | 439 | ||
440 | if (from >= priv->size) | 440 | if (from >= priv->size) |
441 | return -EIO; | 441 | return BLK_STS_IOERR; |
442 | 442 | ||
443 | if (len > priv->size - from) | 443 | if (len > priv->size - from) |
444 | len = priv->size - from; | 444 | len = priv->size - from; |
@@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, | |||
472 | return 0; | 472 | return 0; |
473 | } | 473 | } |
474 | 474 | ||
475 | static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, | 475 | static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, |
476 | size_t len, size_t *retlen, const u_char *buf) | 476 | size_t len, size_t *retlen, const u_char *buf) |
477 | { | 477 | { |
478 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); | 478 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); |
479 | unsigned int cached, count; | 479 | unsigned int cached, count; |
480 | 480 | ||
481 | if (to >= priv->size) | 481 | if (to >= priv->size) |
482 | return -EIO; | 482 | return BLK_STS_IOERR; |
483 | 483 | ||
484 | if (len > priv->size - to) | 484 | if (len > priv->size - to) |
485 | len = priv->size - to; | 485 | len = priv->size - to; |
@@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, | |||
554 | int write = bio_data_dir(bio) == WRITE; | 554 | int write = bio_data_dir(bio) == WRITE; |
555 | const char *op = write ? "write" : "read"; | 555 | const char *op = write ? "write" : "read"; |
556 | loff_t offset = bio->bi_iter.bi_sector << 9; | 556 | loff_t offset = bio->bi_iter.bi_sector << 9; |
557 | int error = 0; | 557 | blk_status_t error = 0; |
558 | struct bio_vec bvec; | 558 | struct bio_vec bvec; |
559 | struct bvec_iter iter; | 559 | struct bvec_iter iter; |
560 | struct bio *next; | 560 | struct bio *next; |
@@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, | |||
578 | 578 | ||
579 | if (retlen != len) { | 579 | if (retlen != len) { |
580 | dev_err(&dev->core, "Short %s\n", op); | 580 | dev_err(&dev->core, "Short %s\n", op); |
581 | error = -EIO; | 581 | error = BLK_STS_IOERR; |
582 | goto out; | 582 | goto out; |
583 | } | 583 | } |
584 | 584 | ||
@@ -593,7 +593,7 @@ out: | |||
593 | next = bio_list_peek(&priv->list); | 593 | next = bio_list_peek(&priv->list); |
594 | spin_unlock_irq(&priv->lock); | 594 | spin_unlock_irq(&priv->lock); |
595 | 595 | ||
596 | bio->bi_error = error; | 596 | bio->bi_status = error; |
597 | bio_endio(bio); | 597 | bio_endio(bio); |
598 | return next; | 598 | return next; |
599 | } | 599 | } |
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 9c566364ac9c..0b0a0a902355 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c | |||
@@ -149,7 +149,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
149 | { | 149 | { |
150 | struct rsxx_cardinfo *card = q->queuedata; | 150 | struct rsxx_cardinfo *card = q->queuedata; |
151 | struct rsxx_bio_meta *bio_meta; | 151 | struct rsxx_bio_meta *bio_meta; |
152 | int st = -EINVAL; | 152 | blk_status_t st = BLK_STS_IOERR; |
153 | 153 | ||
154 | blk_queue_split(q, &bio, q->bio_split); | 154 | blk_queue_split(q, &bio, q->bio_split); |
155 | 155 | ||
@@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
161 | if (bio_end_sector(bio) > get_capacity(card->gendisk)) | 161 | if (bio_end_sector(bio) > get_capacity(card->gendisk)) |
162 | goto req_err; | 162 | goto req_err; |
163 | 163 | ||
164 | if (unlikely(card->halt)) { | 164 | if (unlikely(card->halt)) |
165 | st = -EFAULT; | ||
166 | goto req_err; | 165 | goto req_err; |
167 | } | ||
168 | 166 | ||
169 | if (unlikely(card->dma_fault)) { | 167 | if (unlikely(card->dma_fault)) |
170 | st = (-EFAULT); | ||
171 | goto req_err; | 168 | goto req_err; |
172 | } | ||
173 | 169 | ||
174 | if (bio->bi_iter.bi_size == 0) { | 170 | if (bio->bi_iter.bi_size == 0) { |
175 | dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); | 171 | dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); |
@@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
178 | 174 | ||
179 | bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); | 175 | bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); |
180 | if (!bio_meta) { | 176 | if (!bio_meta) { |
181 | st = -ENOMEM; | 177 | st = BLK_STS_RESOURCE; |
182 | goto req_err; | 178 | goto req_err; |
183 | } | 179 | } |
184 | 180 | ||
@@ -205,7 +201,7 @@ queue_err: | |||
205 | kmem_cache_free(bio_meta_pool, bio_meta); | 201 | kmem_cache_free(bio_meta_pool, bio_meta); |
206 | req_err: | 202 | req_err: |
207 | if (st) | 203 | if (st) |
208 | bio->bi_error = st; | 204 | bio->bi_status = st; |
209 | bio_endio(bio); | 205 | bio_endio(bio); |
210 | return BLK_QC_T_NONE; | 206 | return BLK_QC_T_NONE; |
211 | } | 207 | } |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 5a20385f87d0..6a1b2177951c 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work) | |||
611 | mutex_unlock(&ctrl->work_lock); | 611 | mutex_unlock(&ctrl->work_lock); |
612 | } | 612 | } |
613 | 613 | ||
614 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, | 614 | static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card, |
615 | struct list_head *q, | 615 | struct list_head *q, |
616 | unsigned int laddr, | 616 | unsigned int laddr, |
617 | rsxx_dma_cb cb, | 617 | rsxx_dma_cb cb, |
@@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card, | |||
621 | 621 | ||
622 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | 622 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); |
623 | if (!dma) | 623 | if (!dma) |
624 | return -ENOMEM; | 624 | return BLK_STS_RESOURCE; |
625 | 625 | ||
626 | dma->cmd = HW_CMD_BLK_DISCARD; | 626 | dma->cmd = HW_CMD_BLK_DISCARD; |
627 | dma->laddr = laddr; | 627 | dma->laddr = laddr; |
@@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card, | |||
640 | return 0; | 640 | return 0; |
641 | } | 641 | } |
642 | 642 | ||
643 | static int rsxx_queue_dma(struct rsxx_cardinfo *card, | 643 | static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card, |
644 | struct list_head *q, | 644 | struct list_head *q, |
645 | int dir, | 645 | int dir, |
646 | unsigned int dma_off, | 646 | unsigned int dma_off, |
@@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |||
655 | 655 | ||
656 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | 656 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); |
657 | if (!dma) | 657 | if (!dma) |
658 | return -ENOMEM; | 658 | return BLK_STS_RESOURCE; |
659 | 659 | ||
660 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; | 660 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; |
661 | dma->laddr = laddr; | 661 | dma->laddr = laddr; |
@@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |||
677 | return 0; | 677 | return 0; |
678 | } | 678 | } |
679 | 679 | ||
680 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | 680 | blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card, |
681 | struct bio *bio, | 681 | struct bio *bio, |
682 | atomic_t *n_dmas, | 682 | atomic_t *n_dmas, |
683 | rsxx_dma_cb cb, | 683 | rsxx_dma_cb cb, |
@@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
694 | unsigned int dma_len; | 694 | unsigned int dma_len; |
695 | int dma_cnt[RSXX_MAX_TARGETS]; | 695 | int dma_cnt[RSXX_MAX_TARGETS]; |
696 | int tgt; | 696 | int tgt; |
697 | int st; | 697 | blk_status_t st; |
698 | int i; | 698 | int i; |
699 | 699 | ||
700 | addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ | 700 | addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ |
@@ -769,7 +769,6 @@ bvec_err: | |||
769 | for (i = 0; i < card->n_targets; i++) | 769 | for (i = 0; i < card->n_targets; i++) |
770 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], | 770 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], |
771 | FREE_DMA); | 771 | FREE_DMA); |
772 | |||
773 | return st; | 772 | return st; |
774 | } | 773 | } |
775 | 774 | ||
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 6bbc64d0f690..277f27e673a2 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
@@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); | |||
391 | void rsxx_dma_cleanup(void); | 391 | void rsxx_dma_cleanup(void); |
392 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | 392 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); |
393 | int rsxx_dma_configure(struct rsxx_cardinfo *card); | 393 | int rsxx_dma_configure(struct rsxx_cardinfo *card); |
394 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | 394 | blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card, |
395 | struct bio *bio, | 395 | struct bio *bio, |
396 | atomic_t *n_dmas, | 396 | atomic_t *n_dmas, |
397 | rsxx_dma_cb cb, | 397 | rsxx_dma_cb cb, |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index c141cc3be22b..4b3c947697b1 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -454,7 +454,7 @@ static void process_page(unsigned long data) | |||
454 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | 454 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); |
455 | if (control & DMASCR_HARD_ERROR) { | 455 | if (control & DMASCR_HARD_ERROR) { |
456 | /* error */ | 456 | /* error */ |
457 | bio->bi_error = -EIO; | 457 | bio->bi_status = BLK_STS_IOERR; |
458 | dev_printk(KERN_WARNING, &card->dev->dev, | 458 | dev_printk(KERN_WARNING, &card->dev->dev, |
459 | "I/O error on sector %d/%d\n", | 459 | "I/O error on sector %d/%d\n", |
460 | le32_to_cpu(desc->local_addr)>>9, | 460 | le32_to_cpu(desc->local_addr)>>9, |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 726c32e35db9..746bd8c8c09a 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -1069,20 +1069,17 @@ static void xen_blk_drain_io(struct xen_blkif_ring *ring) | |||
1069 | atomic_set(&blkif->drain, 0); | 1069 | atomic_set(&blkif->drain, 0); |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | /* | 1072 | static void __end_block_io_op(struct pending_req *pending_req, |
1073 | * Completion callback on the bio's. Called as bh->b_end_io() | 1073 | blk_status_t error) |
1074 | */ | ||
1075 | |||
1076 | static void __end_block_io_op(struct pending_req *pending_req, int error) | ||
1077 | { | 1074 | { |
1078 | /* An error fails the entire request. */ | 1075 | /* An error fails the entire request. */ |
1079 | if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && | 1076 | if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE && |
1080 | (error == -EOPNOTSUPP)) { | 1077 | error == BLK_STS_NOTSUPP) { |
1081 | pr_debug("flush diskcache op failed, not supported\n"); | 1078 | pr_debug("flush diskcache op failed, not supported\n"); |
1082 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); | 1079 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); |
1083 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | 1080 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
1084 | } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && | 1081 | } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER && |
1085 | (error == -EOPNOTSUPP)) { | 1082 | error == BLK_STS_NOTSUPP) { |
1086 | pr_debug("write barrier op failed, not supported\n"); | 1083 | pr_debug("write barrier op failed, not supported\n"); |
1087 | xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); | 1084 | xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); |
1088 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | 1085 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
@@ -1106,7 +1103,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
1106 | */ | 1103 | */ |
1107 | static void end_block_io_op(struct bio *bio) | 1104 | static void end_block_io_op(struct bio *bio) |
1108 | { | 1105 | { |
1109 | __end_block_io_op(bio->bi_private, bio->bi_error); | 1106 | __end_block_io_op(bio->bi_private, bio->bi_status); |
1110 | bio_put(bio); | 1107 | bio_put(bio); |
1111 | } | 1108 | } |
1112 | 1109 | ||
@@ -1423,7 +1420,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1423 | for (i = 0; i < nbio; i++) | 1420 | for (i = 0; i < nbio; i++) |
1424 | bio_put(biolist[i]); | 1421 | bio_put(biolist[i]); |
1425 | atomic_set(&pending_req->pendcnt, 1); | 1422 | atomic_set(&pending_req->pendcnt, 1); |
1426 | __end_block_io_op(pending_req, -EINVAL); | 1423 | __end_block_io_op(pending_req, BLK_STS_RESOURCE); |
1427 | msleep(1); /* back off a bit */ | 1424 | msleep(1); /* back off a bit */ |
1428 | return -EIO; | 1425 | return -EIO; |
1429 | } | 1426 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2f468cf86dcf..e3be666c2776 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -2006,7 +2006,7 @@ static void split_bio_end(struct bio *bio) | |||
2006 | 2006 | ||
2007 | if (atomic_dec_and_test(&split_bio->pending)) { | 2007 | if (atomic_dec_and_test(&split_bio->pending)) { |
2008 | split_bio->bio->bi_phys_segments = 0; | 2008 | split_bio->bio->bi_phys_segments = 0; |
2009 | split_bio->bio->bi_error = bio->bi_error; | 2009 | split_bio->bio->bi_status = bio->bi_status; |
2010 | bio_endio(split_bio->bio); | 2010 | bio_endio(split_bio->bio); |
2011 | kfree(split_bio); | 2011 | kfree(split_bio); |
2012 | } | 2012 | } |
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 5e44768ccffa..4e0de995cd90 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c | |||
@@ -296,8 +296,8 @@ void pblk_flush_writer(struct pblk *pblk) | |||
296 | pr_err("pblk: tear down bio failed\n"); | 296 | pr_err("pblk: tear down bio failed\n"); |
297 | } | 297 | } |
298 | 298 | ||
299 | if (bio->bi_error) | 299 | if (bio->bi_status) |
300 | pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error); | 300 | pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status); |
301 | 301 | ||
302 | bio_put(bio); | 302 | bio_put(bio); |
303 | } | 303 | } |
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 4a12f14d78c6..762c0b73cb67 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c | |||
@@ -114,7 +114,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd) | |||
114 | pblk_log_read_err(pblk, rqd); | 114 | pblk_log_read_err(pblk, rqd); |
115 | #ifdef CONFIG_NVM_DEBUG | 115 | #ifdef CONFIG_NVM_DEBUG |
116 | else | 116 | else |
117 | WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n"); | 117 | WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n"); |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | if (rqd->nr_ppas > 1) | 120 | if (rqd->nr_ppas > 1) |
@@ -123,7 +123,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd) | |||
123 | bio_put(bio); | 123 | bio_put(bio); |
124 | if (r_ctx->orig_bio) { | 124 | if (r_ctx->orig_bio) { |
125 | #ifdef CONFIG_NVM_DEBUG | 125 | #ifdef CONFIG_NVM_DEBUG |
126 | WARN_ONCE(r_ctx->orig_bio->bi_error, | 126 | WARN_ONCE(r_ctx->orig_bio->bi_status, |
127 | "pblk: corrupted read bio\n"); | 127 | "pblk: corrupted read bio\n"); |
128 | #endif | 128 | #endif |
129 | bio_endio(r_ctx->orig_bio); | 129 | bio_endio(r_ctx->orig_bio); |
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index aef6fd7c4a0c..79b90d8dbcb3 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c | |||
@@ -186,7 +186,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd) | |||
186 | } | 186 | } |
187 | #ifdef CONFIG_NVM_DEBUG | 187 | #ifdef CONFIG_NVM_DEBUG |
188 | else | 188 | else |
189 | WARN_ONCE(rqd->bio->bi_error, "pblk: corrupted write error\n"); | 189 | WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n"); |
190 | #endif | 190 | #endif |
191 | 191 | ||
192 | pblk_complete_write(pblk, rqd, c_ctx); | 192 | pblk_complete_write(pblk, rqd, c_ctx); |
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index cf0e28a0ff61..8d3b53bb3307 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c | |||
@@ -279,8 +279,8 @@ static void rrpc_end_sync_bio(struct bio *bio) | |||
279 | { | 279 | { |
280 | struct completion *waiting = bio->bi_private; | 280 | struct completion *waiting = bio->bi_private; |
281 | 281 | ||
282 | if (bio->bi_error) | 282 | if (bio->bi_status) |
283 | pr_err("nvm: gc request failed (%u).\n", bio->bi_error); | 283 | pr_err("nvm: gc request failed (%u).\n", bio->bi_status); |
284 | 284 | ||
285 | complete(waiting); | 285 | complete(waiting); |
286 | } | 286 | } |
@@ -359,7 +359,7 @@ try: | |||
359 | goto finished; | 359 | goto finished; |
360 | } | 360 | } |
361 | wait_for_completion_io(&wait); | 361 | wait_for_completion_io(&wait); |
362 | if (bio->bi_error) { | 362 | if (bio->bi_status) { |
363 | rrpc_inflight_laddr_release(rrpc, rqd); | 363 | rrpc_inflight_laddr_release(rrpc, rqd); |
364 | goto finished; | 364 | goto finished; |
365 | } | 365 | } |
@@ -385,7 +385,7 @@ try: | |||
385 | wait_for_completion_io(&wait); | 385 | wait_for_completion_io(&wait); |
386 | 386 | ||
387 | rrpc_inflight_laddr_release(rrpc, rqd); | 387 | rrpc_inflight_laddr_release(rrpc, rqd); |
388 | if (bio->bi_error) | 388 | if (bio->bi_status) |
389 | goto finished; | 389 | goto finished; |
390 | 390 | ||
391 | bio_reset(bio); | 391 | bio_reset(bio); |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index c3ea03c9a1a8..dee542fff68e 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c) | |||
849 | 849 | ||
850 | /* Forward declarations */ | 850 | /* Forward declarations */ |
851 | 851 | ||
852 | void bch_count_io_errors(struct cache *, int, const char *); | 852 | void bch_count_io_errors(struct cache *, blk_status_t, const char *); |
853 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, | 853 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, |
854 | int, const char *); | 854 | blk_status_t, const char *); |
855 | void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); | 855 | void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, |
856 | const char *); | ||
856 | void bch_bbio_free(struct bio *, struct cache_set *); | 857 | void bch_bbio_free(struct bio *, struct cache_set *); |
857 | struct bio *bch_bbio_alloc(struct cache_set *); | 858 | struct bio *bch_bbio_alloc(struct cache_set *); |
858 | 859 | ||
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 450d0e848ae4..866dcf78ff8e 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b) | |||
307 | bch_submit_bbio(bio, b->c, &b->key, 0); | 307 | bch_submit_bbio(bio, b->c, &b->key, 0); |
308 | closure_sync(&cl); | 308 | closure_sync(&cl); |
309 | 309 | ||
310 | if (bio->bi_error) | 310 | if (bio->bi_status) |
311 | set_btree_node_io_error(b); | 311 | set_btree_node_io_error(b); |
312 | 312 | ||
313 | bch_bbio_free(bio, b->c); | 313 | bch_bbio_free(bio, b->c); |
@@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio) | |||
374 | struct closure *cl = bio->bi_private; | 374 | struct closure *cl = bio->bi_private; |
375 | struct btree *b = container_of(cl, struct btree, io); | 375 | struct btree *b = container_of(cl, struct btree, io); |
376 | 376 | ||
377 | if (bio->bi_error) | 377 | if (bio->bi_status) |
378 | set_btree_node_io_error(b); | 378 | set_btree_node_io_error(b); |
379 | 379 | ||
380 | bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); | 380 | bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); |
381 | closure_put(cl); | 381 | closure_put(cl); |
382 | } | 382 | } |
383 | 383 | ||
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index db45a88c0ce9..6a9b85095e7b 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, | |||
50 | 50 | ||
51 | /* IO errors */ | 51 | /* IO errors */ |
52 | 52 | ||
53 | void bch_count_io_errors(struct cache *ca, int error, const char *m) | 53 | void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m) |
54 | { | 54 | { |
55 | /* | 55 | /* |
56 | * The halflife of an error is: | 56 | * The halflife of an error is: |
@@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | 105 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, |
106 | int error, const char *m) | 106 | blk_status_t error, const char *m) |
107 | { | 107 | { |
108 | struct bbio *b = container_of(bio, struct bbio, bio); | 108 | struct bbio *b = container_of(bio, struct bbio, bio); |
109 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | 109 | struct cache *ca = PTR_CACHE(c, &b->key, 0); |
@@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, | 134 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, |
135 | int error, const char *m) | 135 | blk_status_t error, const char *m) |
136 | { | 136 | { |
137 | struct closure *cl = bio->bi_private; | 137 | struct closure *cl = bio->bi_private; |
138 | 138 | ||
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 1198e53d5670..0352d05e495c 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
@@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio) | |||
549 | { | 549 | { |
550 | struct journal_write *w = bio->bi_private; | 550 | struct journal_write *w = bio->bi_private; |
551 | 551 | ||
552 | cache_set_err_on(bio->bi_error, w->c, "journal io error"); | 552 | cache_set_err_on(bio->bi_status, w->c, "journal io error"); |
553 | closure_put(&w->c->journal.io); | 553 | closure_put(&w->c->journal.io); |
554 | } | 554 | } |
555 | 555 | ||
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 13b8a907006d..f633b30c962e 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
@@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio) | |||
63 | struct moving_io *io = container_of(bio->bi_private, | 63 | struct moving_io *io = container_of(bio->bi_private, |
64 | struct moving_io, cl); | 64 | struct moving_io, cl); |
65 | 65 | ||
66 | if (bio->bi_error) | 66 | if (bio->bi_status) |
67 | io->op.error = bio->bi_error; | 67 | io->op.status = bio->bi_status; |
68 | else if (!KEY_DIRTY(&b->key) && | 68 | else if (!KEY_DIRTY(&b->key) && |
69 | ptr_stale(io->op.c, &b->key, 0)) { | 69 | ptr_stale(io->op.c, &b->key, 0)) { |
70 | io->op.error = -EINTR; | 70 | io->op.status = BLK_STS_IOERR; |
71 | } | 71 | } |
72 | 72 | ||
73 | bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); | 73 | bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void moving_init(struct moving_io *io) | 76 | static void moving_init(struct moving_io *io) |
@@ -92,7 +92,7 @@ static void write_moving(struct closure *cl) | |||
92 | struct moving_io *io = container_of(cl, struct moving_io, cl); | 92 | struct moving_io *io = container_of(cl, struct moving_io, cl); |
93 | struct data_insert_op *op = &io->op; | 93 | struct data_insert_op *op = &io->op; |
94 | 94 | ||
95 | if (!op->error) { | 95 | if (!op->status) { |
96 | moving_init(io); | 96 | moving_init(io); |
97 | 97 | ||
98 | io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); | 98 | io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 709c9cc34369..019b3df9f1c6 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl) | |||
81 | if (ret == -ESRCH) { | 81 | if (ret == -ESRCH) { |
82 | op->replace_collision = true; | 82 | op->replace_collision = true; |
83 | } else if (ret) { | 83 | } else if (ret) { |
84 | op->error = -ENOMEM; | 84 | op->status = BLK_STS_RESOURCE; |
85 | op->insert_data_done = true; | 85 | op->insert_data_done = true; |
86 | } | 86 | } |
87 | 87 | ||
@@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio) | |||
178 | struct closure *cl = bio->bi_private; | 178 | struct closure *cl = bio->bi_private; |
179 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 179 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
180 | 180 | ||
181 | if (bio->bi_error) { | 181 | if (bio->bi_status) { |
182 | /* TODO: We could try to recover from this. */ | 182 | /* TODO: We could try to recover from this. */ |
183 | if (op->writeback) | 183 | if (op->writeback) |
184 | op->error = bio->bi_error; | 184 | op->status = bio->bi_status; |
185 | else if (!op->replace) | 185 | else if (!op->replace) |
186 | set_closure_fn(cl, bch_data_insert_error, op->wq); | 186 | set_closure_fn(cl, bch_data_insert_error, op->wq); |
187 | else | 187 | else |
188 | set_closure_fn(cl, NULL, NULL); | 188 | set_closure_fn(cl, NULL, NULL); |
189 | } | 189 | } |
190 | 190 | ||
191 | bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); | 191 | bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void bch_data_insert_start(struct closure *cl) | 194 | static void bch_data_insert_start(struct closure *cl) |
@@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio) | |||
488 | * from the backing device. | 488 | * from the backing device. |
489 | */ | 489 | */ |
490 | 490 | ||
491 | if (bio->bi_error) | 491 | if (bio->bi_status) |
492 | s->iop.error = bio->bi_error; | 492 | s->iop.status = bio->bi_status; |
493 | else if (!KEY_DIRTY(&b->key) && | 493 | else if (!KEY_DIRTY(&b->key) && |
494 | ptr_stale(s->iop.c, &b->key, 0)) { | 494 | ptr_stale(s->iop.c, &b->key, 0)) { |
495 | atomic_long_inc(&s->iop.c->cache_read_races); | 495 | atomic_long_inc(&s->iop.c->cache_read_races); |
496 | s->iop.error = -EINTR; | 496 | s->iop.status = BLK_STS_IOERR; |
497 | } | 497 | } |
498 | 498 | ||
499 | bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); | 499 | bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); |
500 | } | 500 | } |
501 | 501 | ||
502 | /* | 502 | /* |
@@ -593,9 +593,9 @@ static void request_endio(struct bio *bio) | |||
593 | { | 593 | { |
594 | struct closure *cl = bio->bi_private; | 594 | struct closure *cl = bio->bi_private; |
595 | 595 | ||
596 | if (bio->bi_error) { | 596 | if (bio->bi_status) { |
597 | struct search *s = container_of(cl, struct search, cl); | 597 | struct search *s = container_of(cl, struct search, cl); |
598 | s->iop.error = bio->bi_error; | 598 | s->iop.status = bio->bi_status; |
599 | /* Only cache read errors are recoverable */ | 599 | /* Only cache read errors are recoverable */ |
600 | s->recoverable = false; | 600 | s->recoverable = false; |
601 | } | 601 | } |
@@ -611,7 +611,7 @@ static void bio_complete(struct search *s) | |||
611 | &s->d->disk->part0, s->start_time); | 611 | &s->d->disk->part0, s->start_time); |
612 | 612 | ||
613 | trace_bcache_request_end(s->d, s->orig_bio); | 613 | trace_bcache_request_end(s->d, s->orig_bio); |
614 | s->orig_bio->bi_error = s->iop.error; | 614 | s->orig_bio->bi_status = s->iop.status; |
615 | bio_endio(s->orig_bio); | 615 | bio_endio(s->orig_bio); |
616 | s->orig_bio = NULL; | 616 | s->orig_bio = NULL; |
617 | } | 617 | } |
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
664 | s->iop.inode = d->id; | 664 | s->iop.inode = d->id; |
665 | s->iop.write_point = hash_long((unsigned long) current, 16); | 665 | s->iop.write_point = hash_long((unsigned long) current, 16); |
666 | s->iop.write_prio = 0; | 666 | s->iop.write_prio = 0; |
667 | s->iop.error = 0; | 667 | s->iop.status = 0; |
668 | s->iop.flags = 0; | 668 | s->iop.flags = 0; |
669 | s->iop.flush_journal = op_is_flush(bio->bi_opf); | 669 | s->iop.flush_journal = op_is_flush(bio->bi_opf); |
670 | s->iop.wq = bcache_wq; | 670 | s->iop.wq = bcache_wq; |
@@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl) | |||
707 | /* Retry from the backing device: */ | 707 | /* Retry from the backing device: */ |
708 | trace_bcache_read_retry(s->orig_bio); | 708 | trace_bcache_read_retry(s->orig_bio); |
709 | 709 | ||
710 | s->iop.error = 0; | 710 | s->iop.status = 0; |
711 | do_bio_hook(s, s->orig_bio); | 711 | do_bio_hook(s, s->orig_bio); |
712 | 712 | ||
713 | /* XXX: invalidate cache */ | 713 | /* XXX: invalidate cache */ |
@@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl) | |||
767 | !s->cache_miss, s->iop.bypass); | 767 | !s->cache_miss, s->iop.bypass); |
768 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); | 768 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); |
769 | 769 | ||
770 | if (s->iop.error) | 770 | if (s->iop.status) |
771 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); | 771 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); |
772 | else if (s->iop.bio || verify(dc, &s->bio.bio)) | 772 | else if (s->iop.bio || verify(dc, &s->bio.bio)) |
773 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); | 773 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); |
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 1ff36875c2b3..7689176951ce 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h | |||
@@ -10,7 +10,7 @@ struct data_insert_op { | |||
10 | unsigned inode; | 10 | unsigned inode; |
11 | uint16_t write_point; | 11 | uint16_t write_point; |
12 | uint16_t write_prio; | 12 | uint16_t write_prio; |
13 | short error; | 13 | blk_status_t status; |
14 | 14 | ||
15 | union { | 15 | union { |
16 | uint16_t flags; | 16 | uint16_t flags; |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e57353e39168..fbc4f5412dec 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio) | |||
271 | { | 271 | { |
272 | struct cache *ca = bio->bi_private; | 272 | struct cache *ca = bio->bi_private; |
273 | 273 | ||
274 | bch_count_io_errors(ca, bio->bi_error, "writing superblock"); | 274 | bch_count_io_errors(ca, bio->bi_status, "writing superblock"); |
275 | closure_put(&ca->set->sb_write); | 275 | closure_put(&ca->set->sb_write); |
276 | } | 276 | } |
277 | 277 | ||
@@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio) | |||
321 | struct closure *cl = bio->bi_private; | 321 | struct closure *cl = bio->bi_private; |
322 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); | 322 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
323 | 323 | ||
324 | cache_set_err_on(bio->bi_error, c, "accessing uuids"); | 324 | cache_set_err_on(bio->bi_status, c, "accessing uuids"); |
325 | bch_bbio_free(bio, c); | 325 | bch_bbio_free(bio, c); |
326 | closure_put(cl); | 326 | closure_put(cl); |
327 | } | 327 | } |
@@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio) | |||
494 | { | 494 | { |
495 | struct cache *ca = bio->bi_private; | 495 | struct cache *ca = bio->bi_private; |
496 | 496 | ||
497 | cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); | 497 | cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); |
498 | bch_bbio_free(bio, ca->set); | 498 | bch_bbio_free(bio, ca->set); |
499 | closure_put(&ca->prio); | 499 | closure_put(&ca->prio); |
500 | } | 500 | } |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6ac2e48b9235..42c66e76f05e 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio) | |||
167 | struct keybuf_key *w = bio->bi_private; | 167 | struct keybuf_key *w = bio->bi_private; |
168 | struct dirty_io *io = w->private; | 168 | struct dirty_io *io = w->private; |
169 | 169 | ||
170 | if (bio->bi_error) | 170 | if (bio->bi_status) |
171 | SET_KEY_DIRTY(&w->key, false); | 171 | SET_KEY_DIRTY(&w->key, false); |
172 | 172 | ||
173 | closure_put(&io->cl); | 173 | closure_put(&io->cl); |
@@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio) | |||
195 | struct dirty_io *io = w->private; | 195 | struct dirty_io *io = w->private; |
196 | 196 | ||
197 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), | 197 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), |
198 | bio->bi_error, "reading dirty data from cache"); | 198 | bio->bi_status, "reading dirty data from cache"); |
199 | 199 | ||
200 | dirty_endio(bio); | 200 | dirty_endio(bio); |
201 | } | 201 | } |
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c index ae7da2c30a57..82d27384d31f 100644 --- a/drivers/md/dm-bio-prison-v1.c +++ b/drivers/md/dm-bio-prison-v1.c | |||
@@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, | |||
229 | EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); | 229 | EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); |
230 | 230 | ||
231 | void dm_cell_error(struct dm_bio_prison *prison, | 231 | void dm_cell_error(struct dm_bio_prison *prison, |
232 | struct dm_bio_prison_cell *cell, int error) | 232 | struct dm_bio_prison_cell *cell, blk_status_t error) |
233 | { | 233 | { |
234 | struct bio_list bios; | 234 | struct bio_list bios; |
235 | struct bio *bio; | 235 | struct bio *bio; |
@@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison, | |||
238 | dm_cell_release(prison, cell, &bios); | 238 | dm_cell_release(prison, cell, &bios); |
239 | 239 | ||
240 | while ((bio = bio_list_pop(&bios))) { | 240 | while ((bio = bio_list_pop(&bios))) { |
241 | bio->bi_error = error; | 241 | bio->bi_status = error; |
242 | bio_endio(bio); | 242 | bio_endio(bio); |
243 | } | 243 | } |
244 | } | 244 | } |
diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h index cddd4ac07e2c..cec52ac5e1ae 100644 --- a/drivers/md/dm-bio-prison-v1.h +++ b/drivers/md/dm-bio-prison-v1.h | |||
@@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, | |||
91 | struct dm_bio_prison_cell *cell, | 91 | struct dm_bio_prison_cell *cell, |
92 | struct bio_list *inmates); | 92 | struct bio_list *inmates); |
93 | void dm_cell_error(struct dm_bio_prison *prison, | 93 | void dm_cell_error(struct dm_bio_prison *prison, |
94 | struct dm_bio_prison_cell *cell, int error); | 94 | struct dm_bio_prison_cell *cell, blk_status_t error); |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Visits the cell and then releases. Guarantees no new inmates are | 97 | * Visits the cell and then releases. Guarantees no new inmates are |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd8139593ccd..0902d2fd1743 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -145,8 +145,8 @@ struct dm_buffer { | |||
145 | enum data_mode data_mode; | 145 | enum data_mode data_mode; |
146 | unsigned char list_mode; /* LIST_* */ | 146 | unsigned char list_mode; /* LIST_* */ |
147 | unsigned hold_count; | 147 | unsigned hold_count; |
148 | int read_error; | 148 | blk_status_t read_error; |
149 | int write_error; | 149 | blk_status_t write_error; |
150 | unsigned long state; | 150 | unsigned long state; |
151 | unsigned long last_accessed; | 151 | unsigned long last_accessed; |
152 | struct dm_bufio_client *c; | 152 | struct dm_bufio_client *c; |
@@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context) | |||
555 | { | 555 | { |
556 | struct dm_buffer *b = context; | 556 | struct dm_buffer *b = context; |
557 | 557 | ||
558 | b->bio.bi_error = error ? -EIO : 0; | 558 | b->bio.bi_status = error ? BLK_STS_IOERR : 0; |
559 | b->bio.bi_end_io(&b->bio); | 559 | b->bio.bi_end_io(&b->bio); |
560 | } | 560 | } |
561 | 561 | ||
@@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, | |||
588 | 588 | ||
589 | r = dm_io(&io_req, 1, ®ion, NULL); | 589 | r = dm_io(&io_req, 1, ®ion, NULL); |
590 | if (r) { | 590 | if (r) { |
591 | b->bio.bi_error = r; | 591 | b->bio.bi_status = errno_to_blk_status(r); |
592 | end_io(&b->bio); | 592 | end_io(&b->bio); |
593 | } | 593 | } |
594 | } | 594 | } |
@@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, | |||
596 | static void inline_endio(struct bio *bio) | 596 | static void inline_endio(struct bio *bio) |
597 | { | 597 | { |
598 | bio_end_io_t *end_fn = bio->bi_private; | 598 | bio_end_io_t *end_fn = bio->bi_private; |
599 | int error = bio->bi_error; | 599 | blk_status_t status = bio->bi_status; |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * Reset the bio to free any attached resources | 602 | * Reset the bio to free any attached resources |
@@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio) | |||
604 | */ | 604 | */ |
605 | bio_reset(bio); | 605 | bio_reset(bio); |
606 | 606 | ||
607 | bio->bi_error = error; | 607 | bio->bi_status = status; |
608 | end_fn(bio); | 608 | end_fn(bio); |
609 | } | 609 | } |
610 | 610 | ||
@@ -685,11 +685,12 @@ static void write_endio(struct bio *bio) | |||
685 | { | 685 | { |
686 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | 686 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); |
687 | 687 | ||
688 | b->write_error = bio->bi_error; | 688 | b->write_error = bio->bi_status; |
689 | if (unlikely(bio->bi_error)) { | 689 | if (unlikely(bio->bi_status)) { |
690 | struct dm_bufio_client *c = b->c; | 690 | struct dm_bufio_client *c = b->c; |
691 | int error = bio->bi_error; | 691 | |
692 | (void)cmpxchg(&c->async_write_error, 0, error); | 692 | (void)cmpxchg(&c->async_write_error, 0, |
693 | blk_status_to_errno(bio->bi_status)); | ||
693 | } | 694 | } |
694 | 695 | ||
695 | BUG_ON(!test_bit(B_WRITING, &b->state)); | 696 | BUG_ON(!test_bit(B_WRITING, &b->state)); |
@@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio) | |||
1063 | { | 1064 | { |
1064 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | 1065 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); |
1065 | 1066 | ||
1066 | b->read_error = bio->bi_error; | 1067 | b->read_error = bio->bi_status; |
1067 | 1068 | ||
1068 | BUG_ON(!test_bit(B_READING, &b->state)); | 1069 | BUG_ON(!test_bit(B_READING, &b->state)); |
1069 | 1070 | ||
@@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, | |||
1107 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); | 1108 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1108 | 1109 | ||
1109 | if (b->read_error) { | 1110 | if (b->read_error) { |
1110 | int error = b->read_error; | 1111 | int error = blk_status_to_errno(b->read_error); |
1111 | 1112 | ||
1112 | dm_bufio_release(b); | 1113 | dm_bufio_release(b); |
1113 | 1114 | ||
@@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); | |||
1257 | */ | 1258 | */ |
1258 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) | 1259 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
1259 | { | 1260 | { |
1260 | int a, f; | 1261 | blk_status_t a; |
1262 | int f; | ||
1261 | unsigned long buffers_processed = 0; | 1263 | unsigned long buffers_processed = 0; |
1262 | struct dm_buffer *b, *tmp; | 1264 | struct dm_buffer *b, *tmp; |
1263 | 1265 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index c48612e6d525..c5ea03fc7ee1 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len) | |||
119 | */ | 119 | */ |
120 | struct continuation { | 120 | struct continuation { |
121 | struct work_struct ws; | 121 | struct work_struct ws; |
122 | int input; | 122 | blk_status_t input; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | static inline void init_continuation(struct continuation *k, | 125 | static inline void init_continuation(struct continuation *k, |
@@ -145,7 +145,7 @@ struct batcher { | |||
145 | /* | 145 | /* |
146 | * The operation that everyone is waiting for. | 146 | * The operation that everyone is waiting for. |
147 | */ | 147 | */ |
148 | int (*commit_op)(void *context); | 148 | blk_status_t (*commit_op)(void *context); |
149 | void *commit_context; | 149 | void *commit_context; |
150 | 150 | ||
151 | /* | 151 | /* |
@@ -171,8 +171,7 @@ struct batcher { | |||
171 | static void __commit(struct work_struct *_ws) | 171 | static void __commit(struct work_struct *_ws) |
172 | { | 172 | { |
173 | struct batcher *b = container_of(_ws, struct batcher, commit_work); | 173 | struct batcher *b = container_of(_ws, struct batcher, commit_work); |
174 | 174 | blk_status_t r; | |
175 | int r; | ||
176 | unsigned long flags; | 175 | unsigned long flags; |
177 | struct list_head work_items; | 176 | struct list_head work_items; |
178 | struct work_struct *ws, *tmp; | 177 | struct work_struct *ws, *tmp; |
@@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws) | |||
205 | 204 | ||
206 | while ((bio = bio_list_pop(&bios))) { | 205 | while ((bio = bio_list_pop(&bios))) { |
207 | if (r) { | 206 | if (r) { |
208 | bio->bi_error = r; | 207 | bio->bi_status = r; |
209 | bio_endio(bio); | 208 | bio_endio(bio); |
210 | } else | 209 | } else |
211 | b->issue_op(bio, b->issue_context); | 210 | b->issue_op(bio, b->issue_context); |
@@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws) | |||
213 | } | 212 | } |
214 | 213 | ||
215 | static void batcher_init(struct batcher *b, | 214 | static void batcher_init(struct batcher *b, |
216 | int (*commit_op)(void *), | 215 | blk_status_t (*commit_op)(void *), |
217 | void *commit_context, | 216 | void *commit_context, |
218 | void (*issue_op)(struct bio *bio, void *), | 217 | void (*issue_op)(struct bio *bio, void *), |
219 | void *issue_context, | 218 | void *issue_context, |
@@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio) | |||
955 | 954 | ||
956 | dm_unhook_bio(&pb->hook_info, bio); | 955 | dm_unhook_bio(&pb->hook_info, bio); |
957 | 956 | ||
958 | if (bio->bi_error) { | 957 | if (bio->bi_status) { |
959 | bio_endio(bio); | 958 | bio_endio(bio); |
960 | return; | 959 | return; |
961 | } | 960 | } |
@@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) | |||
1220 | struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); | 1219 | struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); |
1221 | 1220 | ||
1222 | if (read_err || write_err) | 1221 | if (read_err || write_err) |
1223 | mg->k.input = -EIO; | 1222 | mg->k.input = BLK_STS_IOERR; |
1224 | 1223 | ||
1225 | queue_continuation(mg->cache->wq, &mg->k); | 1224 | queue_continuation(mg->cache->wq, &mg->k); |
1226 | } | 1225 | } |
@@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio) | |||
1266 | 1265 | ||
1267 | dm_unhook_bio(&pb->hook_info, bio); | 1266 | dm_unhook_bio(&pb->hook_info, bio); |
1268 | 1267 | ||
1269 | if (bio->bi_error) | 1268 | if (bio->bi_status) |
1270 | mg->k.input = bio->bi_error; | 1269 | mg->k.input = bio->bi_status; |
1271 | 1270 | ||
1272 | queue_continuation(mg->cache->wq, &mg->k); | 1271 | queue_continuation(mg->cache->wq, &mg->k); |
1273 | } | 1272 | } |
@@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success) | |||
1323 | if (mg->overwrite_bio) { | 1322 | if (mg->overwrite_bio) { |
1324 | if (success) | 1323 | if (success) |
1325 | force_set_dirty(cache, cblock); | 1324 | force_set_dirty(cache, cblock); |
1325 | else if (mg->k.input) | ||
1326 | mg->overwrite_bio->bi_status = mg->k.input; | ||
1326 | else | 1327 | else |
1327 | mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO); | 1328 | mg->overwrite_bio->bi_status = BLK_STS_IOERR; |
1328 | bio_endio(mg->overwrite_bio); | 1329 | bio_endio(mg->overwrite_bio); |
1329 | } else { | 1330 | } else { |
1330 | if (success) | 1331 | if (success) |
@@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws) | |||
1504 | r = copy(mg, is_policy_promote); | 1505 | r = copy(mg, is_policy_promote); |
1505 | if (r) { | 1506 | if (r) { |
1506 | DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); | 1507 | DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); |
1507 | mg->k.input = -EIO; | 1508 | mg->k.input = BLK_STS_IOERR; |
1508 | mg_complete(mg, false); | 1509 | mg_complete(mg, false); |
1509 | } | 1510 | } |
1510 | } | 1511 | } |
@@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown) | |||
1907 | /* | 1908 | /* |
1908 | * Used by the batcher. | 1909 | * Used by the batcher. |
1909 | */ | 1910 | */ |
1910 | static int commit_op(void *context) | 1911 | static blk_status_t commit_op(void *context) |
1911 | { | 1912 | { |
1912 | struct cache *cache = context; | 1913 | struct cache *cache = context; |
1913 | 1914 | ||
1914 | if (dm_cache_changed_this_transaction(cache->cmd)) | 1915 | if (dm_cache_changed_this_transaction(cache->cmd)) |
1915 | return commit(cache, false); | 1916 | return errno_to_blk_status(commit(cache, false)); |
1916 | 1917 | ||
1917 | return 0; | 1918 | return 0; |
1918 | } | 1919 | } |
@@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache) | |||
2018 | bio_list_init(&cache->deferred_bios); | 2019 | bio_list_init(&cache->deferred_bios); |
2019 | 2020 | ||
2020 | while ((bio = bio_list_pop(&bios))) { | 2021 | while ((bio = bio_list_pop(&bios))) { |
2021 | bio->bi_error = DM_ENDIO_REQUEUE; | 2022 | bio->bi_status = BLK_STS_DM_REQUEUE; |
2022 | bio_endio(bio); | 2023 | bio_endio(bio); |
2023 | } | 2024 | } |
2024 | } | 2025 | } |
@@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2820 | return r; | 2821 | return r; |
2821 | } | 2822 | } |
2822 | 2823 | ||
2823 | static int cache_end_io(struct dm_target *ti, struct bio *bio, int *error) | 2824 | static int cache_end_io(struct dm_target *ti, struct bio *bio, |
2825 | blk_status_t *error) | ||
2824 | { | 2826 | { |
2825 | struct cache *cache = ti->private; | 2827 | struct cache *cache = ti->private; |
2826 | unsigned long flags; | 2828 | unsigned long flags; |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f4b51809db21..586cef085c6a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -71,7 +71,7 @@ struct dm_crypt_io { | |||
71 | struct convert_context ctx; | 71 | struct convert_context ctx; |
72 | 72 | ||
73 | atomic_t io_pending; | 73 | atomic_t io_pending; |
74 | int error; | 74 | blk_status_t error; |
75 | sector_t sector; | 75 | sector_t sector; |
76 | 76 | ||
77 | struct rb_node rb_node; | 77 | struct rb_node rb_node; |
@@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ | |||
1292 | /* | 1292 | /* |
1293 | * Encrypt / decrypt data from one bio to another one (can be the same one) | 1293 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
1294 | */ | 1294 | */ |
1295 | static int crypt_convert(struct crypt_config *cc, | 1295 | static blk_status_t crypt_convert(struct crypt_config *cc, |
1296 | struct convert_context *ctx) | 1296 | struct convert_context *ctx) |
1297 | { | 1297 | { |
1298 | unsigned int tag_offset = 0; | 1298 | unsigned int tag_offset = 0; |
@@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc, | |||
1343 | */ | 1343 | */ |
1344 | case -EBADMSG: | 1344 | case -EBADMSG: |
1345 | atomic_dec(&ctx->cc_pending); | 1345 | atomic_dec(&ctx->cc_pending); |
1346 | return -EILSEQ; | 1346 | return BLK_STS_PROTECTION; |
1347 | /* | 1347 | /* |
1348 | * There was an error while processing the request. | 1348 | * There was an error while processing the request. |
1349 | */ | 1349 | */ |
1350 | default: | 1350 | default: |
1351 | atomic_dec(&ctx->cc_pending); | 1351 | atomic_dec(&ctx->cc_pending); |
1352 | return -EIO; | 1352 | return BLK_STS_IOERR; |
1353 | } | 1353 | } |
1354 | } | 1354 | } |
1355 | 1355 | ||
@@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1463 | { | 1463 | { |
1464 | struct crypt_config *cc = io->cc; | 1464 | struct crypt_config *cc = io->cc; |
1465 | struct bio *base_bio = io->base_bio; | 1465 | struct bio *base_bio = io->base_bio; |
1466 | int error = io->error; | 1466 | blk_status_t error = io->error; |
1467 | 1467 | ||
1468 | if (!atomic_dec_and_test(&io->io_pending)) | 1468 | if (!atomic_dec_and_test(&io->io_pending)) |
1469 | return; | 1469 | return; |
@@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1476 | else | 1476 | else |
1477 | kfree(io->integrity_metadata); | 1477 | kfree(io->integrity_metadata); |
1478 | 1478 | ||
1479 | base_bio->bi_error = error; | 1479 | base_bio->bi_status = error; |
1480 | bio_endio(base_bio); | 1480 | bio_endio(base_bio); |
1481 | } | 1481 | } |
1482 | 1482 | ||
@@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone) | |||
1502 | struct dm_crypt_io *io = clone->bi_private; | 1502 | struct dm_crypt_io *io = clone->bi_private; |
1503 | struct crypt_config *cc = io->cc; | 1503 | struct crypt_config *cc = io->cc; |
1504 | unsigned rw = bio_data_dir(clone); | 1504 | unsigned rw = bio_data_dir(clone); |
1505 | int error; | 1505 | blk_status_t error; |
1506 | 1506 | ||
1507 | /* | 1507 | /* |
1508 | * free the processed pages | 1508 | * free the processed pages |
@@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone) | |||
1510 | if (rw == WRITE) | 1510 | if (rw == WRITE) |
1511 | crypt_free_buffer_pages(cc, clone); | 1511 | crypt_free_buffer_pages(cc, clone); |
1512 | 1512 | ||
1513 | error = clone->bi_error; | 1513 | error = clone->bi_status; |
1514 | bio_put(clone); | 1514 | bio_put(clone); |
1515 | 1515 | ||
1516 | if (rw == READ && !error) { | 1516 | if (rw == READ && !error) { |
@@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work) | |||
1570 | 1570 | ||
1571 | crypt_inc_pending(io); | 1571 | crypt_inc_pending(io); |
1572 | if (kcryptd_io_read(io, GFP_NOIO)) | 1572 | if (kcryptd_io_read(io, GFP_NOIO)) |
1573 | io->error = -ENOMEM; | 1573 | io->error = BLK_STS_RESOURCE; |
1574 | crypt_dec_pending(io); | 1574 | crypt_dec_pending(io); |
1575 | } | 1575 | } |
1576 | 1576 | ||
@@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |||
1656 | sector_t sector; | 1656 | sector_t sector; |
1657 | struct rb_node **rbp, *parent; | 1657 | struct rb_node **rbp, *parent; |
1658 | 1658 | ||
1659 | if (unlikely(io->error < 0)) { | 1659 | if (unlikely(io->error)) { |
1660 | crypt_free_buffer_pages(cc, clone); | 1660 | crypt_free_buffer_pages(cc, clone); |
1661 | bio_put(clone); | 1661 | bio_put(clone); |
1662 | crypt_dec_pending(io); | 1662 | crypt_dec_pending(io); |
@@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1697 | struct bio *clone; | 1697 | struct bio *clone; |
1698 | int crypt_finished; | 1698 | int crypt_finished; |
1699 | sector_t sector = io->sector; | 1699 | sector_t sector = io->sector; |
1700 | int r; | 1700 | blk_status_t r; |
1701 | 1701 | ||
1702 | /* | 1702 | /* |
1703 | * Prevent io from disappearing until this function completes. | 1703 | * Prevent io from disappearing until this function completes. |
@@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1707 | 1707 | ||
1708 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); | 1708 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); |
1709 | if (unlikely(!clone)) { | 1709 | if (unlikely(!clone)) { |
1710 | io->error = -EIO; | 1710 | io->error = BLK_STS_IOERR; |
1711 | goto dec; | 1711 | goto dec; |
1712 | } | 1712 | } |
1713 | 1713 | ||
@@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1718 | 1718 | ||
1719 | crypt_inc_pending(io); | 1719 | crypt_inc_pending(io); |
1720 | r = crypt_convert(cc, &io->ctx); | 1720 | r = crypt_convert(cc, &io->ctx); |
1721 | if (r < 0) | 1721 | if (r) |
1722 | io->error = r; | 1722 | io->error = r; |
1723 | crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); | 1723 | crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); |
1724 | 1724 | ||
@@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io) | |||
1740 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | 1740 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
1741 | { | 1741 | { |
1742 | struct crypt_config *cc = io->cc; | 1742 | struct crypt_config *cc = io->cc; |
1743 | int r = 0; | 1743 | blk_status_t r; |
1744 | 1744 | ||
1745 | crypt_inc_pending(io); | 1745 | crypt_inc_pending(io); |
1746 | 1746 | ||
@@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | |||
1748 | io->sector); | 1748 | io->sector); |
1749 | 1749 | ||
1750 | r = crypt_convert(cc, &io->ctx); | 1750 | r = crypt_convert(cc, &io->ctx); |
1751 | if (r < 0) | 1751 | if (r) |
1752 | io->error = r; | 1752 | io->error = r; |
1753 | 1753 | ||
1754 | if (atomic_dec_and_test(&io->ctx.cc_pending)) | 1754 | if (atomic_dec_and_test(&io->ctx.cc_pending)) |
@@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1781 | if (error == -EBADMSG) { | 1781 | if (error == -EBADMSG) { |
1782 | DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", | 1782 | DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", |
1783 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); | 1783 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); |
1784 | io->error = -EILSEQ; | 1784 | io->error = BLK_STS_PROTECTION; |
1785 | } else if (error < 0) | 1785 | } else if (error < 0) |
1786 | io->error = -EIO; | 1786 | io->error = BLK_STS_IOERR; |
1787 | 1787 | ||
1788 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); | 1788 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
1789 | 1789 | ||
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c9539917a59b..3d04d5ce19d9 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -358,7 +358,8 @@ map_bio: | |||
358 | return DM_MAPIO_REMAPPED; | 358 | return DM_MAPIO_REMAPPED; |
359 | } | 359 | } |
360 | 360 | ||
361 | static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) | 361 | static int flakey_end_io(struct dm_target *ti, struct bio *bio, |
362 | blk_status_t *error) | ||
362 | { | 363 | { |
363 | struct flakey_c *fc = ti->private; | 364 | struct flakey_c *fc = ti->private; |
364 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 365 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
@@ -377,7 +378,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
377 | * Error read during the down_interval if drop_writes | 378 | * Error read during the down_interval if drop_writes |
378 | * and error_writes were not configured. | 379 | * and error_writes were not configured. |
379 | */ | 380 | */ |
380 | *error = -EIO; | 381 | *error = BLK_STS_IOERR; |
381 | } | 382 | } |
382 | } | 383 | } |
383 | 384 | ||
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ee78fb471229..ccc6ef4d00b9 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -246,7 +246,7 @@ struct dm_integrity_io { | |||
246 | unsigned metadata_offset; | 246 | unsigned metadata_offset; |
247 | 247 | ||
248 | atomic_t in_flight; | 248 | atomic_t in_flight; |
249 | int bi_error; | 249 | blk_status_t bi_status; |
250 | 250 | ||
251 | struct completion *completion; | 251 | struct completion *completion; |
252 | 252 | ||
@@ -1114,8 +1114,8 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io * | |||
1114 | static void do_endio(struct dm_integrity_c *ic, struct bio *bio) | 1114 | static void do_endio(struct dm_integrity_c *ic, struct bio *bio) |
1115 | { | 1115 | { |
1116 | int r = dm_integrity_failed(ic); | 1116 | int r = dm_integrity_failed(ic); |
1117 | if (unlikely(r) && !bio->bi_error) | 1117 | if (unlikely(r) && !bio->bi_status) |
1118 | bio->bi_error = r; | 1118 | bio->bi_status = errno_to_blk_status(r); |
1119 | bio_endio(bio); | 1119 | bio_endio(bio); |
1120 | } | 1120 | } |
1121 | 1121 | ||
@@ -1123,7 +1123,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di | |||
1123 | { | 1123 | { |
1124 | struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); | 1124 | struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
1125 | 1125 | ||
1126 | if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) | 1126 | if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) |
1127 | submit_flush_bio(ic, dio); | 1127 | submit_flush_bio(ic, dio); |
1128 | else | 1128 | else |
1129 | do_endio(ic, bio); | 1129 | do_endio(ic, bio); |
@@ -1142,9 +1142,9 @@ static void dec_in_flight(struct dm_integrity_io *dio) | |||
1142 | 1142 | ||
1143 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); | 1143 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
1144 | 1144 | ||
1145 | if (unlikely(dio->bi_error) && !bio->bi_error) | 1145 | if (unlikely(dio->bi_status) && !bio->bi_status) |
1146 | bio->bi_error = dio->bi_error; | 1146 | bio->bi_status = dio->bi_status; |
1147 | if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { | 1147 | if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { |
1148 | dio->range.logical_sector += dio->range.n_sectors; | 1148 | dio->range.logical_sector += dio->range.n_sectors; |
1149 | bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); | 1149 | bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); |
1150 | INIT_WORK(&dio->work, integrity_bio_wait); | 1150 | INIT_WORK(&dio->work, integrity_bio_wait); |
@@ -1318,7 +1318,7 @@ skip_io: | |||
1318 | dec_in_flight(dio); | 1318 | dec_in_flight(dio); |
1319 | return; | 1319 | return; |
1320 | error: | 1320 | error: |
1321 | dio->bi_error = r; | 1321 | dio->bi_status = errno_to_blk_status(r); |
1322 | dec_in_flight(dio); | 1322 | dec_in_flight(dio); |
1323 | } | 1323 | } |
1324 | 1324 | ||
@@ -1331,7 +1331,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) | |||
1331 | sector_t area, offset; | 1331 | sector_t area, offset; |
1332 | 1332 | ||
1333 | dio->ic = ic; | 1333 | dio->ic = ic; |
1334 | dio->bi_error = 0; | 1334 | dio->bi_status = 0; |
1335 | 1335 | ||
1336 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { | 1336 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
1337 | submit_flush_bio(ic, dio); | 1337 | submit_flush_bio(ic, dio); |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..c8f8f3004085 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -124,7 +124,7 @@ static void complete_io(struct io *io) | |||
124 | fn(error_bits, context); | 124 | fn(error_bits, context); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void dec_count(struct io *io, unsigned int region, int error) | 127 | static void dec_count(struct io *io, unsigned int region, blk_status_t error) |
128 | { | 128 | { |
129 | if (error) | 129 | if (error) |
130 | set_bit(region, &io->error_bits); | 130 | set_bit(region, &io->error_bits); |
@@ -137,9 +137,9 @@ static void endio(struct bio *bio) | |||
137 | { | 137 | { |
138 | struct io *io; | 138 | struct io *io; |
139 | unsigned region; | 139 | unsigned region; |
140 | int error; | 140 | blk_status_t error; |
141 | 141 | ||
142 | if (bio->bi_error && bio_data_dir(bio) == READ) | 142 | if (bio->bi_status && bio_data_dir(bio) == READ) |
143 | zero_fill_bio(bio); | 143 | zero_fill_bio(bio); |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -147,7 +147,7 @@ static void endio(struct bio *bio) | |||
147 | */ | 147 | */ |
148 | retrieve_io_and_region_from_bio(bio, &io, ®ion); | 148 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
149 | 149 | ||
150 | error = bio->bi_error; | 150 | error = bio->bi_status; |
151 | bio_put(bio); | 151 | bio_put(bio); |
152 | 152 | ||
153 | dec_count(io, region, error); | 153 | dec_count(io, region, error); |
@@ -319,7 +319,7 @@ static void do_region(int op, int op_flags, unsigned region, | |||
319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || | 319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || |
320 | op == REQ_OP_WRITE_SAME) && | 320 | op == REQ_OP_WRITE_SAME) && |
321 | special_cmd_max_sectors == 0) { | 321 | special_cmd_max_sectors == 0) { |
322 | dec_count(io, region, -EOPNOTSUPP); | 322 | dec_count(io, region, BLK_STS_NOTSUPP); |
323 | return; | 323 | return; |
324 | } | 324 | } |
325 | 325 | ||
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index cc57c7fa1268..a1da0eb58a93 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio) | |||
150 | { | 150 | { |
151 | struct log_writes_c *lc = bio->bi_private; | 151 | struct log_writes_c *lc = bio->bi_private; |
152 | 152 | ||
153 | if (bio->bi_error) { | 153 | if (bio->bi_status) { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | 155 | ||
156 | DMERR("Error writing log block, error=%d", bio->bi_error); | 156 | DMERR("Error writing log block, error=%d", bio->bi_status); |
157 | spin_lock_irqsave(&lc->blocks_lock, flags); | 157 | spin_lock_irqsave(&lc->blocks_lock, flags); |
158 | lc->logging_enabled = false; | 158 | lc->logging_enabled = false; |
159 | spin_unlock_irqrestore(&lc->blocks_lock, flags); | 159 | spin_unlock_irqrestore(&lc->blocks_lock, flags); |
@@ -664,7 +664,8 @@ map_bio: | |||
664 | return DM_MAPIO_REMAPPED; | 664 | return DM_MAPIO_REMAPPED; |
665 | } | 665 | } |
666 | 666 | ||
667 | static int normal_end_io(struct dm_target *ti, struct bio *bio, int *error) | 667 | static int normal_end_io(struct dm_target *ti, struct bio *bio, |
668 | blk_status_t *error) | ||
668 | { | 669 | { |
669 | struct log_writes_c *lc = ti->private; | 670 | struct log_writes_c *lc = ti->private; |
670 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 671 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 39262e344ae1..a7d2e0840cc5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -565,7 +565,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
565 | mpio->pgpath = pgpath; | 565 | mpio->pgpath = pgpath; |
566 | mpio->nr_bytes = nr_bytes; | 566 | mpio->nr_bytes = nr_bytes; |
567 | 567 | ||
568 | bio->bi_error = 0; | 568 | bio->bi_status = 0; |
569 | bio->bi_bdev = pgpath->path.dev->bdev; | 569 | bio->bi_bdev = pgpath->path.dev->bdev; |
570 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; | 570 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; |
571 | 571 | ||
@@ -623,10 +623,10 @@ static void process_queued_bios(struct work_struct *work) | |||
623 | r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); | 623 | r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); |
624 | switch (r) { | 624 | switch (r) { |
625 | case DM_MAPIO_KILL: | 625 | case DM_MAPIO_KILL: |
626 | r = -EIO; | 626 | bio->bi_status = BLK_STS_IOERR; |
627 | /*FALLTHRU*/ | 627 | bio_endio(bio); |
628 | case DM_MAPIO_REQUEUE: | 628 | case DM_MAPIO_REQUEUE: |
629 | bio->bi_error = r; | 629 | bio->bi_status = BLK_STS_DM_REQUEUE; |
630 | bio_endio(bio); | 630 | bio_endio(bio); |
631 | break; | 631 | break; |
632 | case DM_MAPIO_REMAPPED: | 632 | case DM_MAPIO_REMAPPED: |
@@ -1510,7 +1510,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
1510 | return r; | 1510 | return r; |
1511 | } | 1511 | } |
1512 | 1512 | ||
1513 | static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *error) | 1513 | static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, |
1514 | blk_status_t *error) | ||
1514 | { | 1515 | { |
1515 | struct multipath *m = ti->private; | 1516 | struct multipath *m = ti->private; |
1516 | struct dm_mpath_io *mpio = get_mpio_from_bio(clone); | 1517 | struct dm_mpath_io *mpio = get_mpio_from_bio(clone); |
@@ -1518,7 +1519,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er | |||
1518 | unsigned long flags; | 1519 | unsigned long flags; |
1519 | int r = DM_ENDIO_DONE; | 1520 | int r = DM_ENDIO_DONE; |
1520 | 1521 | ||
1521 | if (!*error || noretry_error(errno_to_blk_status(*error))) | 1522 | if (!*error || noretry_error(*error)) |
1522 | goto done; | 1523 | goto done; |
1523 | 1524 | ||
1524 | if (pgpath) | 1525 | if (pgpath) |
@@ -1527,7 +1528,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er | |||
1527 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1528 | if (atomic_read(&m->nr_valid_paths) == 0 && |
1528 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1529 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1529 | dm_report_EIO(m); | 1530 | dm_report_EIO(m); |
1530 | *error = -EIO; | 1531 | *error = BLK_STS_IOERR; |
1531 | goto done; | 1532 | goto done; |
1532 | } | 1533 | } |
1533 | 1534 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 77bcf50ce75f..0822e4a6f67d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -490,9 +490,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio) | |||
490 | * If device is suspended, complete the bio. | 490 | * If device is suspended, complete the bio. |
491 | */ | 491 | */ |
492 | if (dm_noflush_suspending(ms->ti)) | 492 | if (dm_noflush_suspending(ms->ti)) |
493 | bio->bi_error = DM_ENDIO_REQUEUE; | 493 | bio->bi_status = BLK_STS_DM_REQUEUE; |
494 | else | 494 | else |
495 | bio->bi_error = -EIO; | 495 | bio->bi_status = BLK_STS_IOERR; |
496 | 496 | ||
497 | bio_endio(bio); | 497 | bio_endio(bio); |
498 | return; | 498 | return; |
@@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context) | |||
626 | * degrade the array. | 626 | * degrade the array. |
627 | */ | 627 | */ |
628 | if (bio_op(bio) == REQ_OP_DISCARD) { | 628 | if (bio_op(bio) == REQ_OP_DISCARD) { |
629 | bio->bi_error = -EOPNOTSUPP; | 629 | bio->bi_status = BLK_STS_NOTSUPP; |
630 | bio_endio(bio); | 630 | bio_endio(bio); |
631 | return; | 631 | return; |
632 | } | 632 | } |
@@ -1236,7 +1236,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
1236 | return DM_MAPIO_REMAPPED; | 1236 | return DM_MAPIO_REMAPPED; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) | 1239 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, |
1240 | blk_status_t *error) | ||
1240 | { | 1241 | { |
1241 | int rw = bio_data_dir(bio); | 1242 | int rw = bio_data_dir(bio); |
1242 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 1243 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
@@ -1255,7 +1256,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
1255 | return DM_ENDIO_DONE; | 1256 | return DM_ENDIO_DONE; |
1256 | } | 1257 | } |
1257 | 1258 | ||
1258 | if (*error == -EOPNOTSUPP) | 1259 | if (*error == BLK_STS_NOTSUPP) |
1259 | return DM_ENDIO_DONE; | 1260 | return DM_ENDIO_DONE; |
1260 | 1261 | ||
1261 | if (bio->bi_opf & REQ_RAHEAD) | 1262 | if (bio->bi_opf & REQ_RAHEAD) |
@@ -1277,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
1277 | bd = &bio_record->details; | 1278 | bd = &bio_record->details; |
1278 | 1279 | ||
1279 | dm_bio_restore(bd, bio); | 1280 | dm_bio_restore(bd, bio); |
1280 | bio->bi_error = 0; | 1281 | bio->bi_status = 0; |
1281 | 1282 | ||
1282 | queue_bio(ms, bio, rw); | 1283 | queue_bio(ms, bio, rw); |
1283 | return DM_ENDIO_INCOMPLETE; | 1284 | return DM_ENDIO_INCOMPLETE; |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 63402f8a38de..fafd5326e572 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone) | |||
119 | struct dm_rq_target_io *tio = info->tio; | 119 | struct dm_rq_target_io *tio = info->tio; |
120 | struct bio *bio = info->orig; | 120 | struct bio *bio = info->orig; |
121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | 121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
122 | blk_status_t error = errno_to_blk_status(clone->bi_error); | 122 | blk_status_t error = clone->bi_status; |
123 | 123 | ||
124 | bio_put(clone); | 124 | bio_put(clone); |
125 | 125 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 79a845798e2f..1ba41048b438 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio) | |||
1590 | { | 1590 | { |
1591 | void *callback_data = bio->bi_private; | 1591 | void *callback_data = bio->bi_private; |
1592 | 1592 | ||
1593 | dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); | 1593 | dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static void start_full_bio(struct dm_snap_pending_exception *pe, | 1596 | static void start_full_bio(struct dm_snap_pending_exception *pe, |
@@ -1851,7 +1851,8 @@ out_unlock: | |||
1851 | return r; | 1851 | return r; |
1852 | } | 1852 | } |
1853 | 1853 | ||
1854 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int *error) | 1854 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
1855 | blk_status_t *error) | ||
1855 | { | 1856 | { |
1856 | struct dm_snapshot *s = ti->private; | 1857 | struct dm_snapshot *s = ti->private; |
1857 | 1858 | ||
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 49888bc2c909..11621a0af887 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -375,7 +375,8 @@ static void stripe_status(struct dm_target *ti, status_type_t type, | |||
375 | } | 375 | } |
376 | } | 376 | } |
377 | 377 | ||
378 | static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) | 378 | static int stripe_end_io(struct dm_target *ti, struct bio *bio, |
379 | blk_status_t *error) | ||
379 | { | 380 | { |
380 | unsigned i; | 381 | unsigned i; |
381 | char major_minor[16]; | 382 | char major_minor[16]; |
@@ -387,7 +388,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
387 | if (bio->bi_opf & REQ_RAHEAD) | 388 | if (bio->bi_opf & REQ_RAHEAD) |
388 | return DM_ENDIO_DONE; | 389 | return DM_ENDIO_DONE; |
389 | 390 | ||
390 | if (*error == -EOPNOTSUPP) | 391 | if (*error == BLK_STS_NOTSUPP) |
391 | return DM_ENDIO_DONE; | 392 | return DM_ENDIO_DONE; |
392 | 393 | ||
393 | memset(major_minor, 0, sizeof(major_minor)); | 394 | memset(major_minor, 0, sizeof(major_minor)); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 22b1a64c44b7..3490b300cbff 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r) | |||
383 | * Even if r is set, there could be sub discards in flight that we | 383 | * Even if r is set, there could be sub discards in flight that we |
384 | * need to wait for. | 384 | * need to wait for. |
385 | */ | 385 | */ |
386 | if (r && !op->parent_bio->bi_error) | 386 | if (r && !op->parent_bio->bi_status) |
387 | op->parent_bio->bi_error = r; | 387 | op->parent_bio->bi_status = errno_to_blk_status(r); |
388 | bio_endio(op->parent_bio); | 388 | bio_endio(op->parent_bio); |
389 | } | 389 | } |
390 | 390 | ||
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool, | |||
450 | } | 450 | } |
451 | 451 | ||
452 | static void cell_error_with_code(struct pool *pool, | 452 | static void cell_error_with_code(struct pool *pool, |
453 | struct dm_bio_prison_cell *cell, int error_code) | 453 | struct dm_bio_prison_cell *cell, blk_status_t error_code) |
454 | { | 454 | { |
455 | dm_cell_error(pool->prison, cell, error_code); | 455 | dm_cell_error(pool->prison, cell, error_code); |
456 | dm_bio_prison_free_cell(pool->prison, cell); | 456 | dm_bio_prison_free_cell(pool->prison, cell); |
457 | } | 457 | } |
458 | 458 | ||
459 | static int get_pool_io_error_code(struct pool *pool) | 459 | static blk_status_t get_pool_io_error_code(struct pool *pool) |
460 | { | 460 | { |
461 | return pool->out_of_data_space ? -ENOSPC : -EIO; | 461 | return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; |
462 | } | 462 | } |
463 | 463 | ||
464 | static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) | 464 | static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) |
465 | { | 465 | { |
466 | int error = get_pool_io_error_code(pool); | 466 | cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); |
467 | |||
468 | cell_error_with_code(pool, cell, error); | ||
469 | } | 467 | } |
470 | 468 | ||
471 | static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) | 469 | static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) |
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) | |||
475 | 473 | ||
476 | static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) | 474 | static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) |
477 | { | 475 | { |
478 | cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); | 476 | cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); |
479 | } | 477 | } |
480 | 478 | ||
481 | /*----------------------------------------------------------------*/ | 479 | /*----------------------------------------------------------------*/ |
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master) | |||
555 | bio_list_init(master); | 553 | bio_list_init(master); |
556 | } | 554 | } |
557 | 555 | ||
558 | static void error_bio_list(struct bio_list *bios, int error) | 556 | static void error_bio_list(struct bio_list *bios, blk_status_t error) |
559 | { | 557 | { |
560 | struct bio *bio; | 558 | struct bio *bio; |
561 | 559 | ||
562 | while ((bio = bio_list_pop(bios))) { | 560 | while ((bio = bio_list_pop(bios))) { |
563 | bio->bi_error = error; | 561 | bio->bi_status = error; |
564 | bio_endio(bio); | 562 | bio_endio(bio); |
565 | } | 563 | } |
566 | } | 564 | } |
567 | 565 | ||
568 | static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) | 566 | static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, |
567 | blk_status_t error) | ||
569 | { | 568 | { |
570 | struct bio_list bios; | 569 | struct bio_list bios; |
571 | unsigned long flags; | 570 | unsigned long flags; |
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc) | |||
608 | __merge_bio_list(&bios, &tc->retry_on_resume_list); | 607 | __merge_bio_list(&bios, &tc->retry_on_resume_list); |
609 | spin_unlock_irqrestore(&tc->lock, flags); | 608 | spin_unlock_irqrestore(&tc->lock, flags); |
610 | 609 | ||
611 | error_bio_list(&bios, DM_ENDIO_REQUEUE); | 610 | error_bio_list(&bios, BLK_STS_DM_REQUEUE); |
612 | requeue_deferred_cells(tc); | 611 | requeue_deferred_cells(tc); |
613 | } | 612 | } |
614 | 613 | ||
615 | static void error_retry_list_with_code(struct pool *pool, int error) | 614 | static void error_retry_list_with_code(struct pool *pool, blk_status_t error) |
616 | { | 615 | { |
617 | struct thin_c *tc; | 616 | struct thin_c *tc; |
618 | 617 | ||
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error) | |||
624 | 623 | ||
625 | static void error_retry_list(struct pool *pool) | 624 | static void error_retry_list(struct pool *pool) |
626 | { | 625 | { |
627 | int error = get_pool_io_error_code(pool); | 626 | error_retry_list_with_code(pool, get_pool_io_error_code(pool)); |
628 | |||
629 | error_retry_list_with_code(pool, error); | ||
630 | } | 627 | } |
631 | 628 | ||
632 | /* | 629 | /* |
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping { | |||
774 | */ | 771 | */ |
775 | atomic_t prepare_actions; | 772 | atomic_t prepare_actions; |
776 | 773 | ||
777 | int err; | 774 | blk_status_t status; |
778 | struct thin_c *tc; | 775 | struct thin_c *tc; |
779 | dm_block_t virt_begin, virt_end; | 776 | dm_block_t virt_begin, virt_end; |
780 | dm_block_t data_block; | 777 | dm_block_t data_block; |
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) | |||
814 | { | 811 | { |
815 | struct dm_thin_new_mapping *m = context; | 812 | struct dm_thin_new_mapping *m = context; |
816 | 813 | ||
817 | m->err = read_err || write_err ? -EIO : 0; | 814 | m->status = read_err || write_err ? BLK_STS_IOERR : 0; |
818 | complete_mapping_preparation(m); | 815 | complete_mapping_preparation(m); |
819 | } | 816 | } |
820 | 817 | ||
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio) | |||
825 | 822 | ||
826 | bio->bi_end_io = m->saved_bi_end_io; | 823 | bio->bi_end_io = m->saved_bi_end_io; |
827 | 824 | ||
828 | m->err = bio->bi_error; | 825 | m->status = bio->bi_status; |
829 | complete_mapping_preparation(m); | 826 | complete_mapping_preparation(m); |
830 | } | 827 | } |
831 | 828 | ||
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
925 | struct bio *bio = m->bio; | 922 | struct bio *bio = m->bio; |
926 | int r; | 923 | int r; |
927 | 924 | ||
928 | if (m->err) { | 925 | if (m->status) { |
929 | cell_error(pool, m->cell); | 926 | cell_error(pool, m->cell); |
930 | goto out; | 927 | goto out; |
931 | } | 928 | } |
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio) | |||
1495 | spin_unlock_irqrestore(&tc->lock, flags); | 1492 | spin_unlock_irqrestore(&tc->lock, flags); |
1496 | } | 1493 | } |
1497 | 1494 | ||
1498 | static int should_error_unserviceable_bio(struct pool *pool) | 1495 | static blk_status_t should_error_unserviceable_bio(struct pool *pool) |
1499 | { | 1496 | { |
1500 | enum pool_mode m = get_pool_mode(pool); | 1497 | enum pool_mode m = get_pool_mode(pool); |
1501 | 1498 | ||
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool) | |||
1503 | case PM_WRITE: | 1500 | case PM_WRITE: |
1504 | /* Shouldn't get here */ | 1501 | /* Shouldn't get here */ |
1505 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); | 1502 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); |
1506 | return -EIO; | 1503 | return BLK_STS_IOERR; |
1507 | 1504 | ||
1508 | case PM_OUT_OF_DATA_SPACE: | 1505 | case PM_OUT_OF_DATA_SPACE: |
1509 | return pool->pf.error_if_no_space ? -ENOSPC : 0; | 1506 | return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; |
1510 | 1507 | ||
1511 | case PM_READ_ONLY: | 1508 | case PM_READ_ONLY: |
1512 | case PM_FAIL: | 1509 | case PM_FAIL: |
1513 | return -EIO; | 1510 | return BLK_STS_IOERR; |
1514 | default: | 1511 | default: |
1515 | /* Shouldn't get here */ | 1512 | /* Shouldn't get here */ |
1516 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); | 1513 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); |
1517 | return -EIO; | 1514 | return BLK_STS_IOERR; |
1518 | } | 1515 | } |
1519 | } | 1516 | } |
1520 | 1517 | ||
1521 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) | 1518 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) |
1522 | { | 1519 | { |
1523 | int error = should_error_unserviceable_bio(pool); | 1520 | blk_status_t error = should_error_unserviceable_bio(pool); |
1524 | 1521 | ||
1525 | if (error) { | 1522 | if (error) { |
1526 | bio->bi_error = error; | 1523 | bio->bi_status = error; |
1527 | bio_endio(bio); | 1524 | bio_endio(bio); |
1528 | } else | 1525 | } else |
1529 | retry_on_resume(bio); | 1526 | retry_on_resume(bio); |
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c | |||
1533 | { | 1530 | { |
1534 | struct bio *bio; | 1531 | struct bio *bio; |
1535 | struct bio_list bios; | 1532 | struct bio_list bios; |
1536 | int error; | 1533 | blk_status_t error; |
1537 | 1534 | ||
1538 | error = should_error_unserviceable_bio(pool); | 1535 | error = should_error_unserviceable_bio(pool); |
1539 | if (error) { | 1536 | if (error) { |
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc) | |||
2071 | unsigned count = 0; | 2068 | unsigned count = 0; |
2072 | 2069 | ||
2073 | if (tc->requeue_mode) { | 2070 | if (tc->requeue_mode) { |
2074 | error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); | 2071 | error_thin_bio_list(tc, &tc->deferred_bio_list, |
2072 | BLK_STS_DM_REQUEUE); | ||
2075 | return; | 2073 | return; |
2076 | } | 2074 | } |
2077 | 2075 | ||
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws) | |||
2322 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { | 2320 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { |
2323 | pool->pf.error_if_no_space = true; | 2321 | pool->pf.error_if_no_space = true; |
2324 | notify_of_pool_mode_change_to_oods(pool); | 2322 | notify_of_pool_mode_change_to_oods(pool); |
2325 | error_retry_list_with_code(pool, -ENOSPC); | 2323 | error_retry_list_with_code(pool, BLK_STS_NOSPC); |
2326 | } | 2324 | } |
2327 | } | 2325 | } |
2328 | 2326 | ||
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
2624 | thin_hook_bio(tc, bio); | 2622 | thin_hook_bio(tc, bio); |
2625 | 2623 | ||
2626 | if (tc->requeue_mode) { | 2624 | if (tc->requeue_mode) { |
2627 | bio->bi_error = DM_ENDIO_REQUEUE; | 2625 | bio->bi_status = BLK_STS_DM_REQUEUE; |
2628 | bio_endio(bio); | 2626 | bio_endio(bio); |
2629 | return DM_MAPIO_SUBMITTED; | 2627 | return DM_MAPIO_SUBMITTED; |
2630 | } | 2628 | } |
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio) | |||
4177 | return thin_bio_map(ti, bio); | 4175 | return thin_bio_map(ti, bio); |
4178 | } | 4176 | } |
4179 | 4177 | ||
4180 | static int thin_endio(struct dm_target *ti, struct bio *bio, int *err) | 4178 | static int thin_endio(struct dm_target *ti, struct bio *bio, |
4179 | blk_status_t *err) | ||
4181 | { | 4180 | { |
4182 | unsigned long flags; | 4181 | unsigned long flags; |
4183 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 4182 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 9ed55468b98b..2dca66eb67e1 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io) | |||
538 | /* | 538 | /* |
539 | * End one "io" structure with a given error. | 539 | * End one "io" structure with a given error. |
540 | */ | 540 | */ |
541 | static void verity_finish_io(struct dm_verity_io *io, int error) | 541 | static void verity_finish_io(struct dm_verity_io *io, blk_status_t status) |
542 | { | 542 | { |
543 | struct dm_verity *v = io->v; | 543 | struct dm_verity *v = io->v; |
544 | struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); | 544 | struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); |
545 | 545 | ||
546 | bio->bi_end_io = io->orig_bi_end_io; | 546 | bio->bi_end_io = io->orig_bi_end_io; |
547 | bio->bi_error = error; | 547 | bio->bi_status = status; |
548 | 548 | ||
549 | verity_fec_finish_io(io); | 549 | verity_fec_finish_io(io); |
550 | 550 | ||
@@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w) | |||
555 | { | 555 | { |
556 | struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); | 556 | struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); |
557 | 557 | ||
558 | verity_finish_io(io, verity_verify_io(io)); | 558 | verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); |
559 | } | 559 | } |
560 | 560 | ||
561 | static void verity_end_io(struct bio *bio) | 561 | static void verity_end_io(struct bio *bio) |
562 | { | 562 | { |
563 | struct dm_verity_io *io = bio->bi_private; | 563 | struct dm_verity_io *io = bio->bi_private; |
564 | 564 | ||
565 | if (bio->bi_error && !verity_fec_is_enabled(io->v)) { | 565 | if (bio->bi_status && !verity_fec_is_enabled(io->v)) { |
566 | verity_finish_io(io, bio->bi_error); | 566 | verity_finish_io(io, bio->bi_status); |
567 | return; | 567 | return; |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7a7047211c64..f38f9dd5cbdd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue; | |||
63 | */ | 63 | */ |
64 | struct dm_io { | 64 | struct dm_io { |
65 | struct mapped_device *md; | 65 | struct mapped_device *md; |
66 | int error; | 66 | blk_status_t status; |
67 | atomic_t io_count; | 67 | atomic_t io_count; |
68 | struct bio *bio; | 68 | struct bio *bio; |
69 | unsigned long start_time; | 69 | unsigned long start_time; |
@@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md) | |||
768 | * Decrements the number of outstanding ios that a bio has been | 768 | * Decrements the number of outstanding ios that a bio has been |
769 | * cloned into, completing the original io if necc. | 769 | * cloned into, completing the original io if necc. |
770 | */ | 770 | */ |
771 | static void dec_pending(struct dm_io *io, int error) | 771 | static void dec_pending(struct dm_io *io, blk_status_t error) |
772 | { | 772 | { |
773 | unsigned long flags; | 773 | unsigned long flags; |
774 | int io_error; | 774 | blk_status_t io_error; |
775 | struct bio *bio; | 775 | struct bio *bio; |
776 | struct mapped_device *md = io->md; | 776 | struct mapped_device *md = io->md; |
777 | 777 | ||
778 | /* Push-back supersedes any I/O errors */ | 778 | /* Push-back supersedes any I/O errors */ |
779 | if (unlikely(error)) { | 779 | if (unlikely(error)) { |
780 | spin_lock_irqsave(&io->endio_lock, flags); | 780 | spin_lock_irqsave(&io->endio_lock, flags); |
781 | if (!(io->error > 0 && __noflush_suspending(md))) | 781 | if (!(io->status == BLK_STS_DM_REQUEUE && |
782 | io->error = error; | 782 | __noflush_suspending(md))) |
783 | io->status = error; | ||
783 | spin_unlock_irqrestore(&io->endio_lock, flags); | 784 | spin_unlock_irqrestore(&io->endio_lock, flags); |
784 | } | 785 | } |
785 | 786 | ||
786 | if (atomic_dec_and_test(&io->io_count)) { | 787 | if (atomic_dec_and_test(&io->io_count)) { |
787 | if (io->error == DM_ENDIO_REQUEUE) { | 788 | if (io->status == BLK_STS_DM_REQUEUE) { |
788 | /* | 789 | /* |
789 | * Target requested pushing back the I/O. | 790 | * Target requested pushing back the I/O. |
790 | */ | 791 | */ |
@@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error) | |||
793 | bio_list_add_head(&md->deferred, io->bio); | 794 | bio_list_add_head(&md->deferred, io->bio); |
794 | else | 795 | else |
795 | /* noflush suspend was interrupted. */ | 796 | /* noflush suspend was interrupted. */ |
796 | io->error = -EIO; | 797 | io->status = BLK_STS_IOERR; |
797 | spin_unlock_irqrestore(&md->deferred_lock, flags); | 798 | spin_unlock_irqrestore(&md->deferred_lock, flags); |
798 | } | 799 | } |
799 | 800 | ||
800 | io_error = io->error; | 801 | io_error = io->status; |
801 | bio = io->bio; | 802 | bio = io->bio; |
802 | end_io_acct(io); | 803 | end_io_acct(io); |
803 | free_io(md, io); | 804 | free_io(md, io); |
804 | 805 | ||
805 | if (io_error == DM_ENDIO_REQUEUE) | 806 | if (io_error == BLK_STS_DM_REQUEUE) |
806 | return; | 807 | return; |
807 | 808 | ||
808 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { | 809 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { |
@@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
814 | queue_io(md, bio); | 815 | queue_io(md, bio); |
815 | } else { | 816 | } else { |
816 | /* done with normal IO or empty flush */ | 817 | /* done with normal IO or empty flush */ |
817 | bio->bi_error = io_error; | 818 | bio->bi_status = io_error; |
818 | bio_endio(bio); | 819 | bio_endio(bio); |
819 | } | 820 | } |
820 | } | 821 | } |
@@ -838,14 +839,13 @@ void disable_write_zeroes(struct mapped_device *md) | |||
838 | 839 | ||
839 | static void clone_endio(struct bio *bio) | 840 | static void clone_endio(struct bio *bio) |
840 | { | 841 | { |
841 | int error = bio->bi_error; | 842 | blk_status_t error = bio->bi_status; |
842 | int r = error; | ||
843 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); | 843 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); |
844 | struct dm_io *io = tio->io; | 844 | struct dm_io *io = tio->io; |
845 | struct mapped_device *md = tio->io->md; | 845 | struct mapped_device *md = tio->io->md; |
846 | dm_endio_fn endio = tio->ti->type->end_io; | 846 | dm_endio_fn endio = tio->ti->type->end_io; |
847 | 847 | ||
848 | if (unlikely(error == -EREMOTEIO)) { | 848 | if (unlikely(error == BLK_STS_TARGET)) { |
849 | if (bio_op(bio) == REQ_OP_WRITE_SAME && | 849 | if (bio_op(bio) == REQ_OP_WRITE_SAME && |
850 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) | 850 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) |
851 | disable_write_same(md); | 851 | disable_write_same(md); |
@@ -855,10 +855,10 @@ static void clone_endio(struct bio *bio) | |||
855 | } | 855 | } |
856 | 856 | ||
857 | if (endio) { | 857 | if (endio) { |
858 | r = endio(tio->ti, bio, &error); | 858 | int r = endio(tio->ti, bio, &error); |
859 | switch (r) { | 859 | switch (r) { |
860 | case DM_ENDIO_REQUEUE: | 860 | case DM_ENDIO_REQUEUE: |
861 | error = DM_ENDIO_REQUEUE; | 861 | error = BLK_STS_DM_REQUEUE; |
862 | /*FALLTHRU*/ | 862 | /*FALLTHRU*/ |
863 | case DM_ENDIO_DONE: | 863 | case DM_ENDIO_DONE: |
864 | break; | 864 | break; |
@@ -1094,11 +1094,11 @@ static void __map_bio(struct dm_target_io *tio) | |||
1094 | generic_make_request(clone); | 1094 | generic_make_request(clone); |
1095 | break; | 1095 | break; |
1096 | case DM_MAPIO_KILL: | 1096 | case DM_MAPIO_KILL: |
1097 | r = -EIO; | 1097 | dec_pending(tio->io, BLK_STS_IOERR); |
1098 | /*FALLTHRU*/ | 1098 | free_tio(tio); |
1099 | break; | ||
1099 | case DM_MAPIO_REQUEUE: | 1100 | case DM_MAPIO_REQUEUE: |
1100 | /* error the io and bail out, or requeue it if needed */ | 1101 | dec_pending(tio->io, BLK_STS_DM_REQUEUE); |
1101 | dec_pending(tio->io, r); | ||
1102 | free_tio(tio); | 1102 | free_tio(tio); |
1103 | break; | 1103 | break; |
1104 | default: | 1104 | default: |
@@ -1366,7 +1366,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1366 | ci.map = map; | 1366 | ci.map = map; |
1367 | ci.md = md; | 1367 | ci.md = md; |
1368 | ci.io = alloc_io(md); | 1368 | ci.io = alloc_io(md); |
1369 | ci.io->error = 0; | 1369 | ci.io->status = 0; |
1370 | atomic_set(&ci.io->io_count, 1); | 1370 | atomic_set(&ci.io->io_count, 1); |
1371 | ci.io->bio = bio; | 1371 | ci.io->bio = bio; |
1372 | ci.io->md = md; | 1372 | ci.io->md = md; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 10367ffe92e3..6452e83fd650 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
273 | } | 273 | } |
274 | if (mddev->ro == 1 && unlikely(rw == WRITE)) { | 274 | if (mddev->ro == 1 && unlikely(rw == WRITE)) { |
275 | if (bio_sectors(bio) != 0) | 275 | if (bio_sectors(bio) != 0) |
276 | bio->bi_error = -EROFS; | 276 | bio->bi_status = BLK_STS_IOERR; |
277 | bio_endio(bio); | 277 | bio_endio(bio); |
278 | return BLK_QC_T_NONE; | 278 | return BLK_QC_T_NONE; |
279 | } | 279 | } |
@@ -719,8 +719,8 @@ static void super_written(struct bio *bio) | |||
719 | struct md_rdev *rdev = bio->bi_private; | 719 | struct md_rdev *rdev = bio->bi_private; |
720 | struct mddev *mddev = rdev->mddev; | 720 | struct mddev *mddev = rdev->mddev; |
721 | 721 | ||
722 | if (bio->bi_error) { | 722 | if (bio->bi_status) { |
723 | pr_err("md: super_written gets error=%d\n", bio->bi_error); | 723 | pr_err("md: super_written gets error=%d\n", bio->bi_status); |
724 | md_error(mddev, rdev); | 724 | md_error(mddev, rdev); |
725 | if (!test_bit(Faulty, &rdev->flags) | 725 | if (!test_bit(Faulty, &rdev->flags) |
726 | && (bio->bi_opf & MD_FAILFAST)) { | 726 | && (bio->bi_opf & MD_FAILFAST)) { |
@@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | |||
801 | 801 | ||
802 | submit_bio_wait(bio); | 802 | submit_bio_wait(bio); |
803 | 803 | ||
804 | ret = !bio->bi_error; | 804 | ret = !bio->bi_status; |
805 | bio_put(bio); | 805 | bio_put(bio); |
806 | return ret; | 806 | return ret; |
807 | } | 807 | } |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index e95d521d93e9..68d036e64041 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | |||
73 | * operation and are ready to return a success/failure code to the buffer | 73 | * operation and are ready to return a success/failure code to the buffer |
74 | * cache layer. | 74 | * cache layer. |
75 | */ | 75 | */ |
76 | static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) | 76 | static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status) |
77 | { | 77 | { |
78 | struct bio *bio = mp_bh->master_bio; | 78 | struct bio *bio = mp_bh->master_bio; |
79 | struct mpconf *conf = mp_bh->mddev->private; | 79 | struct mpconf *conf = mp_bh->mddev->private; |
80 | 80 | ||
81 | bio->bi_error = err; | 81 | bio->bi_status = status; |
82 | bio_endio(bio); | 82 | bio_endio(bio); |
83 | mempool_free(mp_bh, conf->pool); | 83 | mempool_free(mp_bh, conf->pool); |
84 | } | 84 | } |
@@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio) | |||
89 | struct mpconf *conf = mp_bh->mddev->private; | 89 | struct mpconf *conf = mp_bh->mddev->private; |
90 | struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; | 90 | struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; |
91 | 91 | ||
92 | if (!bio->bi_error) | 92 | if (!bio->bi_status) |
93 | multipath_end_bh_io(mp_bh, 0); | 93 | multipath_end_bh_io(mp_bh, 0); |
94 | else if (!(bio->bi_opf & REQ_RAHEAD)) { | 94 | else if (!(bio->bi_opf & REQ_RAHEAD)) { |
95 | /* | 95 | /* |
@@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio) | |||
102 | (unsigned long long)bio->bi_iter.bi_sector); | 102 | (unsigned long long)bio->bi_iter.bi_sector); |
103 | multipath_reschedule_retry(mp_bh); | 103 | multipath_reschedule_retry(mp_bh); |
104 | } else | 104 | } else |
105 | multipath_end_bh_io(mp_bh, bio->bi_error); | 105 | multipath_end_bh_io(mp_bh, bio->bi_status); |
106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
107 | } | 107 | } |
108 | 108 | ||
@@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread) | |||
347 | pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", | 347 | pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", |
348 | bdevname(bio->bi_bdev,b), | 348 | bdevname(bio->bi_bdev,b), |
349 | (unsigned long long)bio->bi_iter.bi_sector); | 349 | (unsigned long long)bio->bi_iter.bi_sector); |
350 | multipath_end_bh_io(mp_bh, -EIO); | 350 | multipath_end_bh_io(mp_bh, BLK_STS_IOERR); |
351 | } else { | 351 | } else { |
352 | pr_err("multipath: %s: redirecting sector %llu to another IO path\n", | 352 | pr_err("multipath: %s: redirecting sector %llu to another IO path\n", |
353 | bdevname(bio->bi_bdev,b), | 353 | bdevname(bio->bi_bdev,b), |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af5056d56878..94b87c4d0f7b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
277 | struct r1conf *conf = r1_bio->mddev->private; | 277 | struct r1conf *conf = r1_bio->mddev->private; |
278 | 278 | ||
279 | if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) | 279 | if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) |
280 | bio->bi_error = -EIO; | 280 | bio->bi_status = BLK_STS_IOERR; |
281 | 281 | ||
282 | bio_endio(bio); | 282 | bio_endio(bio); |
283 | /* | 283 | /* |
@@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) | |||
335 | 335 | ||
336 | static void raid1_end_read_request(struct bio *bio) | 336 | static void raid1_end_read_request(struct bio *bio) |
337 | { | 337 | { |
338 | int uptodate = !bio->bi_error; | 338 | int uptodate = !bio->bi_status; |
339 | struct r1bio *r1_bio = bio->bi_private; | 339 | struct r1bio *r1_bio = bio->bi_private; |
340 | struct r1conf *conf = r1_bio->mddev->private; | 340 | struct r1conf *conf = r1_bio->mddev->private; |
341 | struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; | 341 | struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; |
@@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio) | |||
426 | struct md_rdev *rdev = conf->mirrors[mirror].rdev; | 426 | struct md_rdev *rdev = conf->mirrors[mirror].rdev; |
427 | bool discard_error; | 427 | bool discard_error; |
428 | 428 | ||
429 | discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; | 429 | discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; |
430 | 430 | ||
431 | /* | 431 | /* |
432 | * 'one mirror IO has finished' event handler: | 432 | * 'one mirror IO has finished' event handler: |
433 | */ | 433 | */ |
434 | if (bio->bi_error && !discard_error) { | 434 | if (bio->bi_status && !discard_error) { |
435 | set_bit(WriteErrorSeen, &rdev->flags); | 435 | set_bit(WriteErrorSeen, &rdev->flags); |
436 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) | 436 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
437 | set_bit(MD_RECOVERY_NEEDED, & | 437 | set_bit(MD_RECOVERY_NEEDED, & |
@@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) | |||
802 | bio->bi_next = NULL; | 802 | bio->bi_next = NULL; |
803 | bio->bi_bdev = rdev->bdev; | 803 | bio->bi_bdev = rdev->bdev; |
804 | if (test_bit(Faulty, &rdev->flags)) { | 804 | if (test_bit(Faulty, &rdev->flags)) { |
805 | bio->bi_error = -EIO; | 805 | bio->bi_status = BLK_STS_IOERR; |
806 | bio_endio(bio); | 806 | bio_endio(bio); |
807 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 807 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
808 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 808 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
@@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio) | |||
1856 | * or re-read if the read failed. | 1856 | * or re-read if the read failed. |
1857 | * We don't do much here, just schedule handling by raid1d | 1857 | * We don't do much here, just schedule handling by raid1d |
1858 | */ | 1858 | */ |
1859 | if (!bio->bi_error) | 1859 | if (!bio->bi_status) |
1860 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 1860 | set_bit(R1BIO_Uptodate, &r1_bio->state); |
1861 | 1861 | ||
1862 | if (atomic_dec_and_test(&r1_bio->remaining)) | 1862 | if (atomic_dec_and_test(&r1_bio->remaining)) |
@@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio) | |||
1865 | 1865 | ||
1866 | static void end_sync_write(struct bio *bio) | 1866 | static void end_sync_write(struct bio *bio) |
1867 | { | 1867 | { |
1868 | int uptodate = !bio->bi_error; | 1868 | int uptodate = !bio->bi_status; |
1869 | struct r1bio *r1_bio = get_resync_r1bio(bio); | 1869 | struct r1bio *r1_bio = get_resync_r1bio(bio); |
1870 | struct mddev *mddev = r1_bio->mddev; | 1870 | struct mddev *mddev = r1_bio->mddev; |
1871 | struct r1conf *conf = mddev->private; | 1871 | struct r1conf *conf = mddev->private; |
@@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) | |||
2058 | idx ++; | 2058 | idx ++; |
2059 | } | 2059 | } |
2060 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 2060 | set_bit(R1BIO_Uptodate, &r1_bio->state); |
2061 | bio->bi_error = 0; | 2061 | bio->bi_status = 0; |
2062 | return 1; | 2062 | return 1; |
2063 | } | 2063 | } |
2064 | 2064 | ||
@@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio) | |||
2082 | for (i = 0; i < conf->raid_disks * 2; i++) { | 2082 | for (i = 0; i < conf->raid_disks * 2; i++) { |
2083 | int j; | 2083 | int j; |
2084 | int size; | 2084 | int size; |
2085 | int error; | 2085 | blk_status_t status; |
2086 | struct bio_vec *bi; | 2086 | struct bio_vec *bi; |
2087 | struct bio *b = r1_bio->bios[i]; | 2087 | struct bio *b = r1_bio->bios[i]; |
2088 | struct resync_pages *rp = get_resync_pages(b); | 2088 | struct resync_pages *rp = get_resync_pages(b); |
2089 | if (b->bi_end_io != end_sync_read) | 2089 | if (b->bi_end_io != end_sync_read) |
2090 | continue; | 2090 | continue; |
2091 | /* fixup the bio for reuse, but preserve errno */ | 2091 | /* fixup the bio for reuse, but preserve errno */ |
2092 | error = b->bi_error; | 2092 | status = b->bi_status; |
2093 | bio_reset(b); | 2093 | bio_reset(b); |
2094 | b->bi_error = error; | 2094 | b->bi_status = status; |
2095 | b->bi_vcnt = vcnt; | 2095 | b->bi_vcnt = vcnt; |
2096 | b->bi_iter.bi_size = r1_bio->sectors << 9; | 2096 | b->bi_iter.bi_size = r1_bio->sectors << 9; |
2097 | b->bi_iter.bi_sector = r1_bio->sector + | 2097 | b->bi_iter.bi_sector = r1_bio->sector + |
@@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
2113 | } | 2113 | } |
2114 | for (primary = 0; primary < conf->raid_disks * 2; primary++) | 2114 | for (primary = 0; primary < conf->raid_disks * 2; primary++) |
2115 | if (r1_bio->bios[primary]->bi_end_io == end_sync_read && | 2115 | if (r1_bio->bios[primary]->bi_end_io == end_sync_read && |
2116 | !r1_bio->bios[primary]->bi_error) { | 2116 | !r1_bio->bios[primary]->bi_status) { |
2117 | r1_bio->bios[primary]->bi_end_io = NULL; | 2117 | r1_bio->bios[primary]->bi_end_io = NULL; |
2118 | rdev_dec_pending(conf->mirrors[primary].rdev, mddev); | 2118 | rdev_dec_pending(conf->mirrors[primary].rdev, mddev); |
2119 | break; | 2119 | break; |
@@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
2123 | int j; | 2123 | int j; |
2124 | struct bio *pbio = r1_bio->bios[primary]; | 2124 | struct bio *pbio = r1_bio->bios[primary]; |
2125 | struct bio *sbio = r1_bio->bios[i]; | 2125 | struct bio *sbio = r1_bio->bios[i]; |
2126 | int error = sbio->bi_error; | 2126 | blk_status_t status = sbio->bi_status; |
2127 | struct page **ppages = get_resync_pages(pbio)->pages; | 2127 | struct page **ppages = get_resync_pages(pbio)->pages; |
2128 | struct page **spages = get_resync_pages(sbio)->pages; | 2128 | struct page **spages = get_resync_pages(sbio)->pages; |
2129 | struct bio_vec *bi; | 2129 | struct bio_vec *bi; |
@@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio) | |||
2132 | if (sbio->bi_end_io != end_sync_read) | 2132 | if (sbio->bi_end_io != end_sync_read) |
2133 | continue; | 2133 | continue; |
2134 | /* Now we can 'fixup' the error value */ | 2134 | /* Now we can 'fixup' the error value */ |
2135 | sbio->bi_error = 0; | 2135 | sbio->bi_status = 0; |
2136 | 2136 | ||
2137 | bio_for_each_segment_all(bi, sbio, j) | 2137 | bio_for_each_segment_all(bi, sbio, j) |
2138 | page_len[j] = bi->bv_len; | 2138 | page_len[j] = bi->bv_len; |
2139 | 2139 | ||
2140 | if (!error) { | 2140 | if (!status) { |
2141 | for (j = vcnt; j-- ; ) { | 2141 | for (j = vcnt; j-- ; ) { |
2142 | if (memcmp(page_address(ppages[j]), | 2142 | if (memcmp(page_address(ppages[j]), |
2143 | page_address(spages[j]), | 2143 | page_address(spages[j]), |
@@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
2149 | if (j >= 0) | 2149 | if (j >= 0) |
2150 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); | 2150 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); |
2151 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) | 2151 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) |
2152 | && !error)) { | 2152 | && !status)) { |
2153 | /* No need to write to this device. */ | 2153 | /* No need to write to this device. */ |
2154 | sbio->bi_end_io = NULL; | 2154 | sbio->bi_end_io = NULL; |
2155 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); | 2155 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); |
@@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio | |||
2400 | struct bio *bio = r1_bio->bios[m]; | 2400 | struct bio *bio = r1_bio->bios[m]; |
2401 | if (bio->bi_end_io == NULL) | 2401 | if (bio->bi_end_io == NULL) |
2402 | continue; | 2402 | continue; |
2403 | if (!bio->bi_error && | 2403 | if (!bio->bi_status && |
2404 | test_bit(R1BIO_MadeGood, &r1_bio->state)) { | 2404 | test_bit(R1BIO_MadeGood, &r1_bio->state)) { |
2405 | rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); | 2405 | rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); |
2406 | } | 2406 | } |
2407 | if (bio->bi_error && | 2407 | if (bio->bi_status && |
2408 | test_bit(R1BIO_WriteError, &r1_bio->state)) { | 2408 | test_bit(R1BIO_WriteError, &r1_bio->state)) { |
2409 | if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) | 2409 | if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) |
2410 | md_error(conf->mddev, rdev); | 2410 | md_error(conf->mddev, rdev); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4343d7ff9916..89ad1cd29037 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio) | |||
336 | struct r10conf *conf = r10_bio->mddev->private; | 336 | struct r10conf *conf = r10_bio->mddev->private; |
337 | 337 | ||
338 | if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) | 338 | if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) |
339 | bio->bi_error = -EIO; | 339 | bio->bi_status = BLK_STS_IOERR; |
340 | 340 | ||
341 | bio_endio(bio); | 341 | bio_endio(bio); |
342 | /* | 342 | /* |
@@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, | |||
389 | 389 | ||
390 | static void raid10_end_read_request(struct bio *bio) | 390 | static void raid10_end_read_request(struct bio *bio) |
391 | { | 391 | { |
392 | int uptodate = !bio->bi_error; | 392 | int uptodate = !bio->bi_status; |
393 | struct r10bio *r10_bio = bio->bi_private; | 393 | struct r10bio *r10_bio = bio->bi_private; |
394 | int slot, dev; | 394 | int slot, dev; |
395 | struct md_rdev *rdev; | 395 | struct md_rdev *rdev; |
@@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio) | |||
477 | struct bio *to_put = NULL; | 477 | struct bio *to_put = NULL; |
478 | bool discard_error; | 478 | bool discard_error; |
479 | 479 | ||
480 | discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; | 480 | discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; |
481 | 481 | ||
482 | dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); | 482 | dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); |
483 | 483 | ||
@@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio) | |||
491 | /* | 491 | /* |
492 | * this branch is our 'one mirror IO has finished' event handler: | 492 | * this branch is our 'one mirror IO has finished' event handler: |
493 | */ | 493 | */ |
494 | if (bio->bi_error && !discard_error) { | 494 | if (bio->bi_status && !discard_error) { |
495 | if (repl) | 495 | if (repl) |
496 | /* Never record new bad blocks to replacement, | 496 | /* Never record new bad blocks to replacement, |
497 | * just fail it. | 497 | * just fail it. |
@@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf) | |||
913 | bio->bi_next = NULL; | 913 | bio->bi_next = NULL; |
914 | bio->bi_bdev = rdev->bdev; | 914 | bio->bi_bdev = rdev->bdev; |
915 | if (test_bit(Faulty, &rdev->flags)) { | 915 | if (test_bit(Faulty, &rdev->flags)) { |
916 | bio->bi_error = -EIO; | 916 | bio->bi_status = BLK_STS_IOERR; |
917 | bio_endio(bio); | 917 | bio_endio(bio); |
918 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 918 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
919 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 919 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
@@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1098 | bio->bi_next = NULL; | 1098 | bio->bi_next = NULL; |
1099 | bio->bi_bdev = rdev->bdev; | 1099 | bio->bi_bdev = rdev->bdev; |
1100 | if (test_bit(Faulty, &rdev->flags)) { | 1100 | if (test_bit(Faulty, &rdev->flags)) { |
1101 | bio->bi_error = -EIO; | 1101 | bio->bi_status = BLK_STS_IOERR; |
1102 | bio_endio(bio); | 1102 | bio_endio(bio); |
1103 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 1103 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
1104 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1104 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
@@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) | |||
1888 | { | 1888 | { |
1889 | struct r10conf *conf = r10_bio->mddev->private; | 1889 | struct r10conf *conf = r10_bio->mddev->private; |
1890 | 1890 | ||
1891 | if (!bio->bi_error) | 1891 | if (!bio->bi_status) |
1892 | set_bit(R10BIO_Uptodate, &r10_bio->state); | 1892 | set_bit(R10BIO_Uptodate, &r10_bio->state); |
1893 | else | 1893 | else |
1894 | /* The write handler will notice the lack of | 1894 | /* The write handler will notice the lack of |
@@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio) | |||
1972 | else | 1972 | else |
1973 | rdev = conf->mirrors[d].rdev; | 1973 | rdev = conf->mirrors[d].rdev; |
1974 | 1974 | ||
1975 | if (bio->bi_error) { | 1975 | if (bio->bi_status) { |
1976 | if (repl) | 1976 | if (repl) |
1977 | md_error(mddev, rdev); | 1977 | md_error(mddev, rdev); |
1978 | else { | 1978 | else { |
@@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2021 | 2021 | ||
2022 | /* find the first device with a block */ | 2022 | /* find the first device with a block */ |
2023 | for (i=0; i<conf->copies; i++) | 2023 | for (i=0; i<conf->copies; i++) |
2024 | if (!r10_bio->devs[i].bio->bi_error) | 2024 | if (!r10_bio->devs[i].bio->bi_status) |
2025 | break; | 2025 | break; |
2026 | 2026 | ||
2027 | if (i == conf->copies) | 2027 | if (i == conf->copies) |
@@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2050 | tpages = get_resync_pages(tbio)->pages; | 2050 | tpages = get_resync_pages(tbio)->pages; |
2051 | d = r10_bio->devs[i].devnum; | 2051 | d = r10_bio->devs[i].devnum; |
2052 | rdev = conf->mirrors[d].rdev; | 2052 | rdev = conf->mirrors[d].rdev; |
2053 | if (!r10_bio->devs[i].bio->bi_error) { | 2053 | if (!r10_bio->devs[i].bio->bi_status) { |
2054 | /* We know that the bi_io_vec layout is the same for | 2054 | /* We know that the bi_io_vec layout is the same for |
2055 | * both 'first' and 'i', so we just compare them. | 2055 | * both 'first' and 'i', so we just compare them. |
2056 | * All vec entries are PAGE_SIZE; | 2056 | * All vec entries are PAGE_SIZE; |
@@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2633 | rdev = conf->mirrors[dev].rdev; | 2633 | rdev = conf->mirrors[dev].rdev; |
2634 | if (r10_bio->devs[m].bio == NULL) | 2634 | if (r10_bio->devs[m].bio == NULL) |
2635 | continue; | 2635 | continue; |
2636 | if (!r10_bio->devs[m].bio->bi_error) { | 2636 | if (!r10_bio->devs[m].bio->bi_status) { |
2637 | rdev_clear_badblocks( | 2637 | rdev_clear_badblocks( |
2638 | rdev, | 2638 | rdev, |
2639 | r10_bio->devs[m].addr, | 2639 | r10_bio->devs[m].addr, |
@@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2649 | if (r10_bio->devs[m].repl_bio == NULL) | 2649 | if (r10_bio->devs[m].repl_bio == NULL) |
2650 | continue; | 2650 | continue; |
2651 | 2651 | ||
2652 | if (!r10_bio->devs[m].repl_bio->bi_error) { | 2652 | if (!r10_bio->devs[m].repl_bio->bi_status) { |
2653 | rdev_clear_badblocks( | 2653 | rdev_clear_badblocks( |
2654 | rdev, | 2654 | rdev, |
2655 | r10_bio->devs[m].addr, | 2655 | r10_bio->devs[m].addr, |
@@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2675 | r10_bio->devs[m].addr, | 2675 | r10_bio->devs[m].addr, |
2676 | r10_bio->sectors, 0); | 2676 | r10_bio->sectors, 0); |
2677 | rdev_dec_pending(rdev, conf->mddev); | 2677 | rdev_dec_pending(rdev, conf->mddev); |
2678 | } else if (bio != NULL && bio->bi_error) { | 2678 | } else if (bio != NULL && bio->bi_status) { |
2679 | fail = true; | 2679 | fail = true; |
2680 | if (!narrow_write_error(r10_bio, m)) { | 2680 | if (!narrow_write_error(r10_bio, m)) { |
2681 | md_error(conf->mddev, rdev); | 2681 | md_error(conf->mddev, rdev); |
@@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3267 | r10_bio->devs[i].repl_bio->bi_end_io = NULL; | 3267 | r10_bio->devs[i].repl_bio->bi_end_io = NULL; |
3268 | 3268 | ||
3269 | bio = r10_bio->devs[i].bio; | 3269 | bio = r10_bio->devs[i].bio; |
3270 | bio->bi_error = -EIO; | 3270 | bio->bi_status = BLK_STS_IOERR; |
3271 | rcu_read_lock(); | 3271 | rcu_read_lock(); |
3272 | rdev = rcu_dereference(conf->mirrors[d].rdev); | 3272 | rdev = rcu_dereference(conf->mirrors[d].rdev); |
3273 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { | 3273 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { |
@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3309 | 3309 | ||
3310 | /* Need to set up for writing to the replacement */ | 3310 | /* Need to set up for writing to the replacement */ |
3311 | bio = r10_bio->devs[i].repl_bio; | 3311 | bio = r10_bio->devs[i].repl_bio; |
3312 | bio->bi_error = -EIO; | 3312 | bio->bi_status = BLK_STS_IOERR; |
3313 | 3313 | ||
3314 | sector = r10_bio->devs[i].addr; | 3314 | sector = r10_bio->devs[i].addr; |
3315 | bio->bi_next = biolist; | 3315 | bio->bi_next = biolist; |
@@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3375 | 3375 | ||
3376 | if (bio->bi_end_io == end_sync_read) { | 3376 | if (bio->bi_end_io == end_sync_read) { |
3377 | md_sync_acct(bio->bi_bdev, nr_sectors); | 3377 | md_sync_acct(bio->bi_bdev, nr_sectors); |
3378 | bio->bi_error = 0; | 3378 | bio->bi_status = 0; |
3379 | generic_make_request(bio); | 3379 | generic_make_request(bio); |
3380 | } | 3380 | } |
3381 | } | 3381 | } |
@@ -4394,7 +4394,7 @@ read_more: | |||
4394 | read_bio->bi_end_io = end_reshape_read; | 4394 | read_bio->bi_end_io = end_reshape_read; |
4395 | bio_set_op_attrs(read_bio, REQ_OP_READ, 0); | 4395 | bio_set_op_attrs(read_bio, REQ_OP_READ, 0); |
4396 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); | 4396 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); |
4397 | read_bio->bi_error = 0; | 4397 | read_bio->bi_status = 0; |
4398 | read_bio->bi_vcnt = 0; | 4398 | read_bio->bi_vcnt = 0; |
4399 | read_bio->bi_iter.bi_size = 0; | 4399 | read_bio->bi_iter.bi_size = 0; |
4400 | r10_bio->master_bio = read_bio; | 4400 | r10_bio->master_bio = read_bio; |
@@ -4638,7 +4638,7 @@ static void end_reshape_write(struct bio *bio) | |||
4638 | rdev = conf->mirrors[d].rdev; | 4638 | rdev = conf->mirrors[d].rdev; |
4639 | } | 4639 | } |
4640 | 4640 | ||
4641 | if (bio->bi_error) { | 4641 | if (bio->bi_status) { |
4642 | /* FIXME should record badblock */ | 4642 | /* FIXME should record badblock */ |
4643 | md_error(mddev, rdev); | 4643 | md_error(mddev, rdev); |
4644 | } | 4644 | } |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4c00bc248287..3ed6a0d89db8 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio) | |||
572 | struct r5l_log *log = io->log; | 572 | struct r5l_log *log = io->log; |
573 | unsigned long flags; | 573 | unsigned long flags; |
574 | 574 | ||
575 | if (bio->bi_error) | 575 | if (bio->bi_status) |
576 | md_error(log->rdev->mddev, log->rdev); | 576 | md_error(log->rdev->mddev, log->rdev); |
577 | 577 | ||
578 | bio_put(bio); | 578 | bio_put(bio); |
@@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio) | |||
1247 | unsigned long flags; | 1247 | unsigned long flags; |
1248 | struct r5l_io_unit *io; | 1248 | struct r5l_io_unit *io; |
1249 | 1249 | ||
1250 | if (bio->bi_error) | 1250 | if (bio->bi_status) |
1251 | md_error(log->rdev->mddev, log->rdev); | 1251 | md_error(log->rdev->mddev, log->rdev); |
1252 | 1252 | ||
1253 | spin_lock_irqsave(&log->io_list_lock, flags); | 1253 | spin_lock_irqsave(&log->io_list_lock, flags); |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 5d25bebf3328..09e04be34e5f 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio) | |||
397 | 397 | ||
398 | pr_debug("%s: seq: %llu\n", __func__, io->seq); | 398 | pr_debug("%s: seq: %llu\n", __func__, io->seq); |
399 | 399 | ||
400 | if (bio->bi_error) | 400 | if (bio->bi_status) |
401 | md_error(ppl_conf->mddev, log->rdev); | 401 | md_error(ppl_conf->mddev, log->rdev); |
402 | 402 | ||
403 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { | 403 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9c4f7659f8b1..e1bdc320f664 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2476 | 2476 | ||
2477 | pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", | 2477 | pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", |
2478 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2478 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2479 | bi->bi_error); | 2479 | bi->bi_status); |
2480 | if (i == disks) { | 2480 | if (i == disks) { |
2481 | bio_reset(bi); | 2481 | bio_reset(bi); |
2482 | BUG(); | 2482 | BUG(); |
@@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2496 | s = sh->sector + rdev->new_data_offset; | 2496 | s = sh->sector + rdev->new_data_offset; |
2497 | else | 2497 | else |
2498 | s = sh->sector + rdev->data_offset; | 2498 | s = sh->sector + rdev->data_offset; |
2499 | if (!bi->bi_error) { | 2499 | if (!bi->bi_status) { |
2500 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | 2500 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
2501 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { | 2501 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
2502 | /* Note that this cannot happen on a | 2502 | /* Note that this cannot happen on a |
@@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2613 | } | 2613 | } |
2614 | pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", | 2614 | pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", |
2615 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2615 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2616 | bi->bi_error); | 2616 | bi->bi_status); |
2617 | if (i == disks) { | 2617 | if (i == disks) { |
2618 | bio_reset(bi); | 2618 | bio_reset(bi); |
2619 | BUG(); | 2619 | BUG(); |
@@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi) | |||
2621 | } | 2621 | } |
2622 | 2622 | ||
2623 | if (replacement) { | 2623 | if (replacement) { |
2624 | if (bi->bi_error) | 2624 | if (bi->bi_status) |
2625 | md_error(conf->mddev, rdev); | 2625 | md_error(conf->mddev, rdev); |
2626 | else if (is_badblock(rdev, sh->sector, | 2626 | else if (is_badblock(rdev, sh->sector, |
2627 | STRIPE_SECTORS, | 2627 | STRIPE_SECTORS, |
2628 | &first_bad, &bad_sectors)) | 2628 | &first_bad, &bad_sectors)) |
2629 | set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); | 2629 | set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); |
2630 | } else { | 2630 | } else { |
2631 | if (bi->bi_error) { | 2631 | if (bi->bi_status) { |
2632 | set_bit(STRIPE_DEGRADED, &sh->state); | 2632 | set_bit(STRIPE_DEGRADED, &sh->state); |
2633 | set_bit(WriteErrorSeen, &rdev->flags); | 2633 | set_bit(WriteErrorSeen, &rdev->flags); |
2634 | set_bit(R5_WriteError, &sh->dev[i].flags); | 2634 | set_bit(R5_WriteError, &sh->dev[i].flags); |
@@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2649 | } | 2649 | } |
2650 | rdev_dec_pending(rdev, conf->mddev); | 2650 | rdev_dec_pending(rdev, conf->mddev); |
2651 | 2651 | ||
2652 | if (sh->batch_head && bi->bi_error && !replacement) | 2652 | if (sh->batch_head && bi->bi_status && !replacement) |
2653 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); | 2653 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); |
2654 | 2654 | ||
2655 | bio_reset(bi); | 2655 | bio_reset(bi); |
@@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3381 | sh->dev[i].sector + STRIPE_SECTORS) { | 3381 | sh->dev[i].sector + STRIPE_SECTORS) { |
3382 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | 3382 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
3383 | 3383 | ||
3384 | bi->bi_error = -EIO; | 3384 | bi->bi_status = BLK_STS_IOERR; |
3385 | md_write_end(conf->mddev); | 3385 | md_write_end(conf->mddev); |
3386 | bio_endio(bi); | 3386 | bio_endio(bi); |
3387 | bi = nextbi; | 3387 | bi = nextbi; |
@@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3403 | sh->dev[i].sector + STRIPE_SECTORS) { | 3403 | sh->dev[i].sector + STRIPE_SECTORS) { |
3404 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | 3404 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
3405 | 3405 | ||
3406 | bi->bi_error = -EIO; | 3406 | bi->bi_status = BLK_STS_IOERR; |
3407 | md_write_end(conf->mddev); | 3407 | md_write_end(conf->mddev); |
3408 | bio_endio(bi); | 3408 | bio_endio(bi); |
3409 | bi = bi2; | 3409 | bi = bi2; |
@@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3429 | struct bio *nextbi = | 3429 | struct bio *nextbi = |
3430 | r5_next_bio(bi, sh->dev[i].sector); | 3430 | r5_next_bio(bi, sh->dev[i].sector); |
3431 | 3431 | ||
3432 | bi->bi_error = -EIO; | 3432 | bi->bi_status = BLK_STS_IOERR; |
3433 | bio_endio(bi); | 3433 | bio_endio(bi); |
3434 | bi = nextbi; | 3434 | bi = nextbi; |
3435 | } | 3435 | } |
@@ -5144,7 +5144,7 @@ static void raid5_align_endio(struct bio *bi) | |||
5144 | struct mddev *mddev; | 5144 | struct mddev *mddev; |
5145 | struct r5conf *conf; | 5145 | struct r5conf *conf; |
5146 | struct md_rdev *rdev; | 5146 | struct md_rdev *rdev; |
5147 | int error = bi->bi_error; | 5147 | blk_status_t error = bi->bi_status; |
5148 | 5148 | ||
5149 | bio_put(bi); | 5149 | bio_put(bi); |
5150 | 5150 | ||
@@ -5721,7 +5721,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
5721 | release_stripe_plug(mddev, sh); | 5721 | release_stripe_plug(mddev, sh); |
5722 | } else { | 5722 | } else { |
5723 | /* cannot get stripe for read-ahead, just give-up */ | 5723 | /* cannot get stripe for read-ahead, just give-up */ |
5724 | bi->bi_error = -EIO; | 5724 | bi->bi_status = BLK_STS_IOERR; |
5725 | break; | 5725 | break; |
5726 | } | 5726 | } |
5727 | } | 5727 | } |
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 822198a75e96..79eb9fb358d5 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c | |||
@@ -186,7 +186,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) | |||
186 | * another kernel subsystem, and we just pass it through. | 186 | * another kernel subsystem, and we just pass it through. |
187 | */ | 187 | */ |
188 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { | 188 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { |
189 | bio->bi_error = -EIO; | 189 | bio->bi_status = BLK_STS_IOERR; |
190 | goto out; | 190 | goto out; |
191 | } | 191 | } |
192 | 192 | ||
@@ -205,7 +205,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) | |||
205 | "io error in %s sector %lld, len %d,\n", | 205 | "io error in %s sector %lld, len %d,\n", |
206 | (rw == READ) ? "READ" : "WRITE", | 206 | (rw == READ) ? "READ" : "WRITE", |
207 | (unsigned long long) iter.bi_sector, len); | 207 | (unsigned long long) iter.bi_sector, len); |
208 | bio->bi_error = err; | 208 | bio->bi_status = errno_to_blk_status(err); |
209 | break; | 209 | break; |
210 | } | 210 | } |
211 | } | 211 | } |
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 983718b8fd9b..31b2d14e210d 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
@@ -1210,7 +1210,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) | |||
1210 | * another kernel subsystem, and we just pass it through. | 1210 | * another kernel subsystem, and we just pass it through. |
1211 | */ | 1211 | */ |
1212 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { | 1212 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { |
1213 | bio->bi_error = -EIO; | 1213 | bio->bi_status = BLK_STS_IOERR; |
1214 | goto out; | 1214 | goto out; |
1215 | } | 1215 | } |
1216 | 1216 | ||
@@ -1232,7 +1232,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) | |||
1232 | (op_is_write(bio_op(bio))) ? "WRITE" : | 1232 | (op_is_write(bio_op(bio))) ? "WRITE" : |
1233 | "READ", | 1233 | "READ", |
1234 | (unsigned long long) iter.bi_sector, len); | 1234 | (unsigned long long) iter.bi_sector, len); |
1235 | bio->bi_error = err; | 1235 | bio->bi_status = errno_to_blk_status(err); |
1236 | break; | 1236 | break; |
1237 | } | 1237 | } |
1238 | } | 1238 | } |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index c544d466ea51..7bd383aeea14 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -49,19 +49,19 @@ static struct nd_region *to_region(struct pmem_device *pmem) | |||
49 | return to_nd_region(to_dev(pmem)->parent); | 49 | return to_nd_region(to_dev(pmem)->parent); |
50 | } | 50 | } |
51 | 51 | ||
52 | static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, | 52 | static blk_status_t pmem_clear_poison(struct pmem_device *pmem, |
53 | unsigned int len) | 53 | phys_addr_t offset, unsigned int len) |
54 | { | 54 | { |
55 | struct device *dev = to_dev(pmem); | 55 | struct device *dev = to_dev(pmem); |
56 | sector_t sector; | 56 | sector_t sector; |
57 | long cleared; | 57 | long cleared; |
58 | int rc = 0; | 58 | blk_status_t rc = BLK_STS_OK; |
59 | 59 | ||
60 | sector = (offset - pmem->data_offset) / 512; | 60 | sector = (offset - pmem->data_offset) / 512; |
61 | 61 | ||
62 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); | 62 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
63 | if (cleared < len) | 63 | if (cleared < len) |
64 | rc = -EIO; | 64 | rc = BLK_STS_IOERR; |
65 | if (cleared > 0 && cleared / 512) { | 65 | if (cleared > 0 && cleared / 512) { |
66 | cleared /= 512; | 66 | cleared /= 512; |
67 | dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, | 67 | dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, |
@@ -84,7 +84,7 @@ static void write_pmem(void *pmem_addr, struct page *page, | |||
84 | kunmap_atomic(mem); | 84 | kunmap_atomic(mem); |
85 | } | 85 | } |
86 | 86 | ||
87 | static int read_pmem(struct page *page, unsigned int off, | 87 | static blk_status_t read_pmem(struct page *page, unsigned int off, |
88 | void *pmem_addr, unsigned int len) | 88 | void *pmem_addr, unsigned int len) |
89 | { | 89 | { |
90 | int rc; | 90 | int rc; |
@@ -93,15 +93,15 @@ static int read_pmem(struct page *page, unsigned int off, | |||
93 | rc = memcpy_mcsafe(mem + off, pmem_addr, len); | 93 | rc = memcpy_mcsafe(mem + off, pmem_addr, len); |
94 | kunmap_atomic(mem); | 94 | kunmap_atomic(mem); |
95 | if (rc) | 95 | if (rc) |
96 | return -EIO; | 96 | return BLK_STS_IOERR; |
97 | return 0; | 97 | return BLK_STS_OK; |
98 | } | 98 | } |
99 | 99 | ||
100 | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | 100 | static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
101 | unsigned int len, unsigned int off, bool is_write, | 101 | unsigned int len, unsigned int off, bool is_write, |
102 | sector_t sector) | 102 | sector_t sector) |
103 | { | 103 | { |
104 | int rc = 0; | 104 | blk_status_t rc = BLK_STS_OK; |
105 | bool bad_pmem = false; | 105 | bool bad_pmem = false; |
106 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; | 106 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
107 | void *pmem_addr = pmem->virt_addr + pmem_off; | 107 | void *pmem_addr = pmem->virt_addr + pmem_off; |
@@ -111,7 +111,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | |||
111 | 111 | ||
112 | if (!is_write) { | 112 | if (!is_write) { |
113 | if (unlikely(bad_pmem)) | 113 | if (unlikely(bad_pmem)) |
114 | rc = -EIO; | 114 | rc = BLK_STS_IOERR; |
115 | else { | 115 | else { |
116 | rc = read_pmem(page, off, pmem_addr, len); | 116 | rc = read_pmem(page, off, pmem_addr, len); |
117 | flush_dcache_page(page); | 117 | flush_dcache_page(page); |
@@ -149,7 +149,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | |||
149 | 149 | ||
150 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) | 150 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) |
151 | { | 151 | { |
152 | int rc = 0; | 152 | blk_status_t rc = 0; |
153 | bool do_acct; | 153 | bool do_acct; |
154 | unsigned long start; | 154 | unsigned long start; |
155 | struct bio_vec bvec; | 155 | struct bio_vec bvec; |
@@ -166,7 +166,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) | |||
166 | bvec.bv_offset, op_is_write(bio_op(bio)), | 166 | bvec.bv_offset, op_is_write(bio_op(bio)), |
167 | iter.bi_sector); | 167 | iter.bi_sector); |
168 | if (rc) { | 168 | if (rc) { |
169 | bio->bi_error = rc; | 169 | bio->bi_status = rc; |
170 | break; | 170 | break; |
171 | } | 171 | } |
172 | } | 172 | } |
@@ -184,7 +184,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |||
184 | struct page *page, bool is_write) | 184 | struct page *page, bool is_write) |
185 | { | 185 | { |
186 | struct pmem_device *pmem = bdev->bd_queue->queuedata; | 186 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
187 | int rc; | 187 | blk_status_t rc; |
188 | 188 | ||
189 | rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); | 189 | rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); |
190 | 190 | ||
@@ -197,7 +197,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |||
197 | if (rc == 0) | 197 | if (rc == 0) |
198 | page_endio(page, is_write, 0); | 198 | page_endio(page, is_write, 0); |
199 | 199 | ||
200 | return rc; | 200 | return blk_status_to_errno(rc); |
201 | } | 201 | } |
202 | 202 | ||
203 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ | 203 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index c77940d80fc8..40128793e613 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c | |||
@@ -21,7 +21,7 @@ static void nvmet_bio_done(struct bio *bio) | |||
21 | struct nvmet_req *req = bio->bi_private; | 21 | struct nvmet_req *req = bio->bi_private; |
22 | 22 | ||
23 | nvmet_req_complete(req, | 23 | nvmet_req_complete(req, |
24 | bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); | 24 | bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); |
25 | 25 | ||
26 | if (bio != &req->inline_bio) | 26 | if (bio != &req->inline_bio) |
27 | bio_put(bio); | 27 | bio_put(bio); |
@@ -145,7 +145,7 @@ static void nvmet_execute_discard(struct nvmet_req *req) | |||
145 | bio->bi_private = req; | 145 | bio->bi_private = req; |
146 | bio->bi_end_io = nvmet_bio_done; | 146 | bio->bi_end_io = nvmet_bio_done; |
147 | if (status) { | 147 | if (status) { |
148 | bio->bi_error = -EIO; | 148 | bio->bi_status = BLK_STS_IOERR; |
149 | bio_endio(bio); | 149 | bio_endio(bio); |
150 | } else { | 150 | } else { |
151 | submit_bio(bio); | 151 | submit_bio(bio); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index bb069ebe4aa6..75373624604b 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -296,8 +296,8 @@ static void iblock_bio_done(struct bio *bio) | |||
296 | struct se_cmd *cmd = bio->bi_private; | 296 | struct se_cmd *cmd = bio->bi_private; |
297 | struct iblock_req *ibr = cmd->priv; | 297 | struct iblock_req *ibr = cmd->priv; |
298 | 298 | ||
299 | if (bio->bi_error) { | 299 | if (bio->bi_status) { |
300 | pr_err("bio error: %p, err: %d\n", bio, bio->bi_error); | 300 | pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); |
301 | /* | 301 | /* |
302 | * Bump the ib_bio_err_cnt and release bio. | 302 | * Bump the ib_bio_err_cnt and release bio. |
303 | */ | 303 | */ |
@@ -354,11 +354,11 @@ static void iblock_end_io_flush(struct bio *bio) | |||
354 | { | 354 | { |
355 | struct se_cmd *cmd = bio->bi_private; | 355 | struct se_cmd *cmd = bio->bi_private; |
356 | 356 | ||
357 | if (bio->bi_error) | 357 | if (bio->bi_status) |
358 | pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error); | 358 | pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status); |
359 | 359 | ||
360 | if (cmd) { | 360 | if (cmd) { |
361 | if (bio->bi_error) | 361 | if (bio->bi_status) |
362 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); | 362 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
363 | else | 363 | else |
364 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 364 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index c1dc393ad6b9..bcd8e16a34e1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -262,8 +262,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, | |||
262 | if (vecs != inline_vecs) | 262 | if (vecs != inline_vecs) |
263 | kfree(vecs); | 263 | kfree(vecs); |
264 | 264 | ||
265 | if (unlikely(bio.bi_error)) | 265 | if (unlikely(bio.bi_status)) |
266 | return bio.bi_error; | 266 | return blk_status_to_errno(bio.bi_status); |
267 | return ret; | 267 | return ret; |
268 | } | 268 | } |
269 | 269 | ||
@@ -288,16 +288,18 @@ static void blkdev_bio_end_io(struct bio *bio) | |||
288 | bool should_dirty = dio->should_dirty; | 288 | bool should_dirty = dio->should_dirty; |
289 | 289 | ||
290 | if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { | 290 | if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { |
291 | if (bio->bi_error && !dio->bio.bi_error) | 291 | if (bio->bi_status && !dio->bio.bi_status) |
292 | dio->bio.bi_error = bio->bi_error; | 292 | dio->bio.bi_status = bio->bi_status; |
293 | } else { | 293 | } else { |
294 | if (!dio->is_sync) { | 294 | if (!dio->is_sync) { |
295 | struct kiocb *iocb = dio->iocb; | 295 | struct kiocb *iocb = dio->iocb; |
296 | ssize_t ret = dio->bio.bi_error; | 296 | ssize_t ret; |
297 | 297 | ||
298 | if (likely(!ret)) { | 298 | if (likely(!dio->bio.bi_status)) { |
299 | ret = dio->size; | 299 | ret = dio->size; |
300 | iocb->ki_pos += ret; | 300 | iocb->ki_pos += ret; |
301 | } else { | ||
302 | ret = blk_status_to_errno(dio->bio.bi_status); | ||
301 | } | 303 | } |
302 | 304 | ||
303 | dio->iocb->ki_complete(iocb, ret, 0); | 305 | dio->iocb->ki_complete(iocb, ret, 0); |
@@ -363,7 +365,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
363 | 365 | ||
364 | ret = bio_iov_iter_get_pages(bio, iter); | 366 | ret = bio_iov_iter_get_pages(bio, iter); |
365 | if (unlikely(ret)) { | 367 | if (unlikely(ret)) { |
366 | bio->bi_error = -EIO; | 368 | bio->bi_status = BLK_STS_IOERR; |
367 | bio_endio(bio); | 369 | bio_endio(bio); |
368 | break; | 370 | break; |
369 | } | 371 | } |
@@ -413,7 +415,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
413 | __set_current_state(TASK_RUNNING); | 415 | __set_current_state(TASK_RUNNING); |
414 | 416 | ||
415 | if (!ret) | 417 | if (!ret) |
416 | ret = dio->bio.bi_error; | 418 | ret = blk_status_to_errno(dio->bio.bi_status); |
417 | if (likely(!ret)) | 419 | if (likely(!ret)) |
418 | ret = dio->size; | 420 | ret = dio->size; |
419 | 421 | ||
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index b8622e4d1744..d87ac27a5f2b 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -310,7 +310,8 @@ struct btrfs_dio_private { | |||
310 | * The original bio may be split to several sub-bios, this is | 310 | * The original bio may be split to several sub-bios, this is |
311 | * done during endio of sub-bios | 311 | * done during endio of sub-bios |
312 | */ | 312 | */ |
313 | int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); | 313 | blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *, |
314 | blk_status_t); | ||
314 | }; | 315 | }; |
315 | 316 | ||
316 | /* | 317 | /* |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index ab14c2e635ca..4ded1c3f92b8 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -2129,7 +2129,7 @@ static void btrfsic_bio_end_io(struct bio *bp) | |||
2129 | /* mutex is not held! This is not save if IO is not yet completed | 2129 | /* mutex is not held! This is not save if IO is not yet completed |
2130 | * on umount */ | 2130 | * on umount */ |
2131 | iodone_w_error = 0; | 2131 | iodone_w_error = 0; |
2132 | if (bp->bi_error) | 2132 | if (bp->bi_status) |
2133 | iodone_w_error = 1; | 2133 | iodone_w_error = 1; |
2134 | 2134 | ||
2135 | BUG_ON(NULL == block); | 2135 | BUG_ON(NULL == block); |
@@ -2143,7 +2143,7 @@ static void btrfsic_bio_end_io(struct bio *bp) | |||
2143 | if ((dev_state->state->print_mask & | 2143 | if ((dev_state->state->print_mask & |
2144 | BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) | 2144 | BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) |
2145 | pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", | 2145 | pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", |
2146 | bp->bi_error, | 2146 | bp->bi_status, |
2147 | btrfsic_get_block_type(dev_state->state, block), | 2147 | btrfsic_get_block_type(dev_state->state, block), |
2148 | block->logical_bytenr, dev_state->name, | 2148 | block->logical_bytenr, dev_state->name, |
2149 | block->dev_bytenr, block->mirror_num); | 2149 | block->dev_bytenr, block->mirror_num); |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 10e6b282d09d..9ac55b266e78 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -155,7 +155,7 @@ static void end_compressed_bio_read(struct bio *bio) | |||
155 | unsigned long index; | 155 | unsigned long index; |
156 | int ret; | 156 | int ret; |
157 | 157 | ||
158 | if (bio->bi_error) | 158 | if (bio->bi_status) |
159 | cb->errors = 1; | 159 | cb->errors = 1; |
160 | 160 | ||
161 | /* if there are more bios still pending for this compressed | 161 | /* if there are more bios still pending for this compressed |
@@ -268,7 +268,7 @@ static void end_compressed_bio_write(struct bio *bio) | |||
268 | struct page *page; | 268 | struct page *page; |
269 | unsigned long index; | 269 | unsigned long index; |
270 | 270 | ||
271 | if (bio->bi_error) | 271 | if (bio->bi_status) |
272 | cb->errors = 1; | 272 | cb->errors = 1; |
273 | 273 | ||
274 | /* if there are more bios still pending for this compressed | 274 | /* if there are more bios still pending for this compressed |
@@ -287,7 +287,7 @@ static void end_compressed_bio_write(struct bio *bio) | |||
287 | cb->start, | 287 | cb->start, |
288 | cb->start + cb->len - 1, | 288 | cb->start + cb->len - 1, |
289 | NULL, | 289 | NULL, |
290 | bio->bi_error ? 0 : 1); | 290 | bio->bi_status ? 0 : 1); |
291 | cb->compressed_pages[0]->mapping = NULL; | 291 | cb->compressed_pages[0]->mapping = NULL; |
292 | 292 | ||
293 | end_compressed_writeback(inode, cb); | 293 | end_compressed_writeback(inode, cb); |
@@ -320,7 +320,7 @@ out: | |||
320 | * This also checksums the file bytes and gets things ready for | 320 | * This also checksums the file bytes and gets things ready for |
321 | * the end io hooks. | 321 | * the end io hooks. |
322 | */ | 322 | */ |
323 | int btrfs_submit_compressed_write(struct inode *inode, u64 start, | 323 | blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
324 | unsigned long len, u64 disk_start, | 324 | unsigned long len, u64 disk_start, |
325 | unsigned long compressed_len, | 325 | unsigned long compressed_len, |
326 | struct page **compressed_pages, | 326 | struct page **compressed_pages, |
@@ -335,13 +335,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
335 | struct page *page; | 335 | struct page *page; |
336 | u64 first_byte = disk_start; | 336 | u64 first_byte = disk_start; |
337 | struct block_device *bdev; | 337 | struct block_device *bdev; |
338 | int ret; | 338 | blk_status_t ret; |
339 | int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 339 | int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
340 | 340 | ||
341 | WARN_ON(start & ((u64)PAGE_SIZE - 1)); | 341 | WARN_ON(start & ((u64)PAGE_SIZE - 1)); |
342 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); | 342 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
343 | if (!cb) | 343 | if (!cb) |
344 | return -ENOMEM; | 344 | return BLK_STS_RESOURCE; |
345 | refcount_set(&cb->pending_bios, 0); | 345 | refcount_set(&cb->pending_bios, 0); |
346 | cb->errors = 0; | 346 | cb->errors = 0; |
347 | cb->inode = inode; | 347 | cb->inode = inode; |
@@ -358,7 +358,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
358 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); | 358 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); |
359 | if (!bio) { | 359 | if (!bio) { |
360 | kfree(cb); | 360 | kfree(cb); |
361 | return -ENOMEM; | 361 | return BLK_STS_RESOURCE; |
362 | } | 362 | } |
363 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 363 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
364 | bio->bi_private = cb; | 364 | bio->bi_private = cb; |
@@ -368,17 +368,17 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
368 | /* create and submit bios for the compressed pages */ | 368 | /* create and submit bios for the compressed pages */ |
369 | bytes_left = compressed_len; | 369 | bytes_left = compressed_len; |
370 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { | 370 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
371 | int submit = 0; | ||
372 | |||
371 | page = compressed_pages[pg_index]; | 373 | page = compressed_pages[pg_index]; |
372 | page->mapping = inode->i_mapping; | 374 | page->mapping = inode->i_mapping; |
373 | if (bio->bi_iter.bi_size) | 375 | if (bio->bi_iter.bi_size) |
374 | ret = io_tree->ops->merge_bio_hook(page, 0, | 376 | submit = io_tree->ops->merge_bio_hook(page, 0, |
375 | PAGE_SIZE, | 377 | PAGE_SIZE, |
376 | bio, 0); | 378 | bio, 0); |
377 | else | ||
378 | ret = 0; | ||
379 | 379 | ||
380 | page->mapping = NULL; | 380 | page->mapping = NULL; |
381 | if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < | 381 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
382 | PAGE_SIZE) { | 382 | PAGE_SIZE) { |
383 | bio_get(bio); | 383 | bio_get(bio); |
384 | 384 | ||
@@ -400,7 +400,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
400 | 400 | ||
401 | ret = btrfs_map_bio(fs_info, bio, 0, 1); | 401 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
402 | if (ret) { | 402 | if (ret) { |
403 | bio->bi_error = ret; | 403 | bio->bi_status = ret; |
404 | bio_endio(bio); | 404 | bio_endio(bio); |
405 | } | 405 | } |
406 | 406 | ||
@@ -434,7 +434,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
434 | 434 | ||
435 | ret = btrfs_map_bio(fs_info, bio, 0, 1); | 435 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
436 | if (ret) { | 436 | if (ret) { |
437 | bio->bi_error = ret; | 437 | bio->bi_status = ret; |
438 | bio_endio(bio); | 438 | bio_endio(bio); |
439 | } | 439 | } |
440 | 440 | ||
@@ -569,7 +569,7 @@ next: | |||
569 | * After the compressed pages are read, we copy the bytes into the | 569 | * After the compressed pages are read, we copy the bytes into the |
570 | * bio we were passed and then call the bio end_io calls | 570 | * bio we were passed and then call the bio end_io calls |
571 | */ | 571 | */ |
572 | int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | 572 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
573 | int mirror_num, unsigned long bio_flags) | 573 | int mirror_num, unsigned long bio_flags) |
574 | { | 574 | { |
575 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 575 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
@@ -586,7 +586,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
586 | u64 em_len; | 586 | u64 em_len; |
587 | u64 em_start; | 587 | u64 em_start; |
588 | struct extent_map *em; | 588 | struct extent_map *em; |
589 | int ret = -ENOMEM; | 589 | blk_status_t ret = BLK_STS_RESOURCE; |
590 | int faili = 0; | 590 | int faili = 0; |
591 | u32 *sums; | 591 | u32 *sums; |
592 | 592 | ||
@@ -600,7 +600,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
600 | PAGE_SIZE); | 600 | PAGE_SIZE); |
601 | read_unlock(&em_tree->lock); | 601 | read_unlock(&em_tree->lock); |
602 | if (!em) | 602 | if (!em) |
603 | return -EIO; | 603 | return BLK_STS_IOERR; |
604 | 604 | ||
605 | compressed_len = em->block_len; | 605 | compressed_len = em->block_len; |
606 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); | 606 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
@@ -659,19 +659,19 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
659 | refcount_set(&cb->pending_bios, 1); | 659 | refcount_set(&cb->pending_bios, 1); |
660 | 660 | ||
661 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { | 661 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
662 | int submit = 0; | ||
663 | |||
662 | page = cb->compressed_pages[pg_index]; | 664 | page = cb->compressed_pages[pg_index]; |
663 | page->mapping = inode->i_mapping; | 665 | page->mapping = inode->i_mapping; |
664 | page->index = em_start >> PAGE_SHIFT; | 666 | page->index = em_start >> PAGE_SHIFT; |
665 | 667 | ||
666 | if (comp_bio->bi_iter.bi_size) | 668 | if (comp_bio->bi_iter.bi_size) |
667 | ret = tree->ops->merge_bio_hook(page, 0, | 669 | submit = tree->ops->merge_bio_hook(page, 0, |
668 | PAGE_SIZE, | 670 | PAGE_SIZE, |
669 | comp_bio, 0); | 671 | comp_bio, 0); |
670 | else | ||
671 | ret = 0; | ||
672 | 672 | ||
673 | page->mapping = NULL; | 673 | page->mapping = NULL; |
674 | if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < | 674 | if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < |
675 | PAGE_SIZE) { | 675 | PAGE_SIZE) { |
676 | bio_get(comp_bio); | 676 | bio_get(comp_bio); |
677 | 677 | ||
@@ -697,7 +697,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
697 | 697 | ||
698 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); | 698 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
699 | if (ret) { | 699 | if (ret) { |
700 | comp_bio->bi_error = ret; | 700 | comp_bio->bi_status = ret; |
701 | bio_endio(comp_bio); | 701 | bio_endio(comp_bio); |
702 | } | 702 | } |
703 | 703 | ||
@@ -726,7 +726,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
726 | 726 | ||
727 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); | 727 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
728 | if (ret) { | 728 | if (ret) { |
729 | comp_bio->bi_error = ret; | 729 | comp_bio->bi_status = ret; |
730 | bio_endio(comp_bio); | 730 | bio_endio(comp_bio); |
731 | } | 731 | } |
732 | 732 | ||
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 39ec43ab8df1..680d4265d601 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h | |||
@@ -48,12 +48,12 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, | |||
48 | unsigned long total_out, u64 disk_start, | 48 | unsigned long total_out, u64 disk_start, |
49 | struct bio *bio); | 49 | struct bio *bio); |
50 | 50 | ||
51 | int btrfs_submit_compressed_write(struct inode *inode, u64 start, | 51 | blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
52 | unsigned long len, u64 disk_start, | 52 | unsigned long len, u64 disk_start, |
53 | unsigned long compressed_len, | 53 | unsigned long compressed_len, |
54 | struct page **compressed_pages, | 54 | struct page **compressed_pages, |
55 | unsigned long nr_pages); | 55 | unsigned long nr_pages); |
56 | int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | 56 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
57 | int mirror_num, unsigned long bio_flags); | 57 | int mirror_num, unsigned long bio_flags); |
58 | 58 | ||
59 | enum btrfs_compression_type { | 59 | enum btrfs_compression_type { |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 643c70d2b2e6..d2da0a52d560 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -3078,8 +3078,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path, | |||
3078 | struct btrfs_dio_private; | 3078 | struct btrfs_dio_private; |
3079 | int btrfs_del_csums(struct btrfs_trans_handle *trans, | 3079 | int btrfs_del_csums(struct btrfs_trans_handle *trans, |
3080 | struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); | 3080 | struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); |
3081 | int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); | 3081 | blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); |
3082 | int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, | 3082 | blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, |
3083 | u64 logical_offset); | 3083 | u64 logical_offset); |
3084 | int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, | 3084 | int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, |
3085 | struct btrfs_root *root, | 3085 | struct btrfs_root *root, |
@@ -3094,7 +3094,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, | |||
3094 | int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, | 3094 | int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, |
3095 | struct btrfs_root *root, | 3095 | struct btrfs_root *root, |
3096 | struct btrfs_ordered_sum *sums); | 3096 | struct btrfs_ordered_sum *sums); |
3097 | int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, | 3097 | blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, |
3098 | u64 file_start, int contig); | 3098 | u64 file_start, int contig); |
3099 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, | 3099 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, |
3100 | struct list_head *list, int search_commit); | 3100 | struct list_head *list, int search_commit); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8685d67185d0..46accc75ad5a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -87,7 +87,7 @@ struct btrfs_end_io_wq { | |||
87 | bio_end_io_t *end_io; | 87 | bio_end_io_t *end_io; |
88 | void *private; | 88 | void *private; |
89 | struct btrfs_fs_info *info; | 89 | struct btrfs_fs_info *info; |
90 | int error; | 90 | blk_status_t status; |
91 | enum btrfs_wq_endio_type metadata; | 91 | enum btrfs_wq_endio_type metadata; |
92 | struct list_head list; | 92 | struct list_head list; |
93 | struct btrfs_work work; | 93 | struct btrfs_work work; |
@@ -131,7 +131,7 @@ struct async_submit_bio { | |||
131 | */ | 131 | */ |
132 | u64 bio_offset; | 132 | u64 bio_offset; |
133 | struct btrfs_work work; | 133 | struct btrfs_work work; |
134 | int error; | 134 | blk_status_t status; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | /* | 137 | /* |
@@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio) | |||
799 | btrfs_work_func_t func; | 799 | btrfs_work_func_t func; |
800 | 800 | ||
801 | fs_info = end_io_wq->info; | 801 | fs_info = end_io_wq->info; |
802 | end_io_wq->error = bio->bi_error; | 802 | end_io_wq->status = bio->bi_status; |
803 | 803 | ||
804 | if (bio_op(bio) == REQ_OP_WRITE) { | 804 | if (bio_op(bio) == REQ_OP_WRITE) { |
805 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { | 805 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
@@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio) | |||
836 | btrfs_queue_work(wq, &end_io_wq->work); | 836 | btrfs_queue_work(wq, &end_io_wq->work); |
837 | } | 837 | } |
838 | 838 | ||
839 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, | 839 | blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
840 | enum btrfs_wq_endio_type metadata) | 840 | enum btrfs_wq_endio_type metadata) |
841 | { | 841 | { |
842 | struct btrfs_end_io_wq *end_io_wq; | 842 | struct btrfs_end_io_wq *end_io_wq; |
843 | 843 | ||
844 | end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); | 844 | end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); |
845 | if (!end_io_wq) | 845 | if (!end_io_wq) |
846 | return -ENOMEM; | 846 | return BLK_STS_RESOURCE; |
847 | 847 | ||
848 | end_io_wq->private = bio->bi_private; | 848 | end_io_wq->private = bio->bi_private; |
849 | end_io_wq->end_io = bio->bi_end_io; | 849 | end_io_wq->end_io = bio->bi_end_io; |
850 | end_io_wq->info = info; | 850 | end_io_wq->info = info; |
851 | end_io_wq->error = 0; | 851 | end_io_wq->status = 0; |
852 | end_io_wq->bio = bio; | 852 | end_io_wq->bio = bio; |
853 | end_io_wq->metadata = metadata; | 853 | end_io_wq->metadata = metadata; |
854 | 854 | ||
@@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) | |||
868 | static void run_one_async_start(struct btrfs_work *work) | 868 | static void run_one_async_start(struct btrfs_work *work) |
869 | { | 869 | { |
870 | struct async_submit_bio *async; | 870 | struct async_submit_bio *async; |
871 | int ret; | 871 | blk_status_t ret; |
872 | 872 | ||
873 | async = container_of(work, struct async_submit_bio, work); | 873 | async = container_of(work, struct async_submit_bio, work); |
874 | ret = async->submit_bio_start(async->inode, async->bio, | 874 | ret = async->submit_bio_start(async->inode, async->bio, |
875 | async->mirror_num, async->bio_flags, | 875 | async->mirror_num, async->bio_flags, |
876 | async->bio_offset); | 876 | async->bio_offset); |
877 | if (ret) | 877 | if (ret) |
878 | async->error = ret; | 878 | async->status = ret; |
879 | } | 879 | } |
880 | 880 | ||
881 | static void run_one_async_done(struct btrfs_work *work) | 881 | static void run_one_async_done(struct btrfs_work *work) |
@@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work) | |||
898 | wake_up(&fs_info->async_submit_wait); | 898 | wake_up(&fs_info->async_submit_wait); |
899 | 899 | ||
900 | /* If an error occurred we just want to clean up the bio and move on */ | 900 | /* If an error occurred we just want to clean up the bio and move on */ |
901 | if (async->error) { | 901 | if (async->status) { |
902 | async->bio->bi_error = async->error; | 902 | async->bio->bi_status = async->status; |
903 | bio_endio(async->bio); | 903 | bio_endio(async->bio); |
904 | return; | 904 | return; |
905 | } | 905 | } |
@@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work) | |||
916 | kfree(async); | 916 | kfree(async); |
917 | } | 917 | } |
918 | 918 | ||
919 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 919 | blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, |
920 | struct bio *bio, int mirror_num, | 920 | struct inode *inode, struct bio *bio, int mirror_num, |
921 | unsigned long bio_flags, | 921 | unsigned long bio_flags, u64 bio_offset, |
922 | u64 bio_offset, | 922 | extent_submit_bio_hook_t *submit_bio_start, |
923 | extent_submit_bio_hook_t *submit_bio_start, | 923 | extent_submit_bio_hook_t *submit_bio_done) |
924 | extent_submit_bio_hook_t *submit_bio_done) | ||
925 | { | 924 | { |
926 | struct async_submit_bio *async; | 925 | struct async_submit_bio *async; |
927 | 926 | ||
928 | async = kmalloc(sizeof(*async), GFP_NOFS); | 927 | async = kmalloc(sizeof(*async), GFP_NOFS); |
929 | if (!async) | 928 | if (!async) |
930 | return -ENOMEM; | 929 | return BLK_STS_RESOURCE; |
931 | 930 | ||
932 | async->inode = inode; | 931 | async->inode = inode; |
933 | async->bio = bio; | 932 | async->bio = bio; |
@@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
941 | async->bio_flags = bio_flags; | 940 | async->bio_flags = bio_flags; |
942 | async->bio_offset = bio_offset; | 941 | async->bio_offset = bio_offset; |
943 | 942 | ||
944 | async->error = 0; | 943 | async->status = 0; |
945 | 944 | ||
946 | atomic_inc(&fs_info->nr_async_submits); | 945 | atomic_inc(&fs_info->nr_async_submits); |
947 | 946 | ||
@@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
959 | return 0; | 958 | return 0; |
960 | } | 959 | } |
961 | 960 | ||
962 | static int btree_csum_one_bio(struct bio *bio) | 961 | static blk_status_t btree_csum_one_bio(struct bio *bio) |
963 | { | 962 | { |
964 | struct bio_vec *bvec; | 963 | struct bio_vec *bvec; |
965 | struct btrfs_root *root; | 964 | struct btrfs_root *root; |
@@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio) | |||
972 | break; | 971 | break; |
973 | } | 972 | } |
974 | 973 | ||
975 | return ret; | 974 | return errno_to_blk_status(ret); |
976 | } | 975 | } |
977 | 976 | ||
978 | static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, | 977 | static blk_status_t __btree_submit_bio_start(struct inode *inode, |
979 | int mirror_num, unsigned long bio_flags, | 978 | struct bio *bio, int mirror_num, unsigned long bio_flags, |
980 | u64 bio_offset) | 979 | u64 bio_offset) |
981 | { | 980 | { |
982 | /* | 981 | /* |
983 | * when we're called for a write, we're already in the async | 982 | * when we're called for a write, we're already in the async |
@@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, | |||
986 | return btree_csum_one_bio(bio); | 985 | return btree_csum_one_bio(bio); |
987 | } | 986 | } |
988 | 987 | ||
989 | static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, | 988 | static blk_status_t __btree_submit_bio_done(struct inode *inode, |
990 | int mirror_num, unsigned long bio_flags, | 989 | struct bio *bio, int mirror_num, unsigned long bio_flags, |
991 | u64 bio_offset) | 990 | u64 bio_offset) |
992 | { | 991 | { |
993 | int ret; | 992 | blk_status_t ret; |
994 | 993 | ||
995 | /* | 994 | /* |
996 | * when we're called for a write, we're already in the async | 995 | * when we're called for a write, we're already in the async |
@@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, | |||
998 | */ | 997 | */ |
999 | ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); | 998 | ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); |
1000 | if (ret) { | 999 | if (ret) { |
1001 | bio->bi_error = ret; | 1000 | bio->bi_status = ret; |
1002 | bio_endio(bio); | 1001 | bio_endio(bio); |
1003 | } | 1002 | } |
1004 | return ret; | 1003 | return ret; |
@@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags) | |||
1015 | return 1; | 1014 | return 1; |
1016 | } | 1015 | } |
1017 | 1016 | ||
1018 | static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, | 1017 | static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, |
1019 | int mirror_num, unsigned long bio_flags, | 1018 | int mirror_num, unsigned long bio_flags, |
1020 | u64 bio_offset) | 1019 | u64 bio_offset) |
1021 | { | 1020 | { |
1022 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 1021 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
1023 | int async = check_async_write(bio_flags); | 1022 | int async = check_async_write(bio_flags); |
1024 | int ret; | 1023 | blk_status_t ret; |
1025 | 1024 | ||
1026 | if (bio_op(bio) != REQ_OP_WRITE) { | 1025 | if (bio_op(bio) != REQ_OP_WRITE) { |
1027 | /* | 1026 | /* |
@@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, | |||
1054 | return 0; | 1053 | return 0; |
1055 | 1054 | ||
1056 | out_w_error: | 1055 | out_w_error: |
1057 | bio->bi_error = ret; | 1056 | bio->bi_status = ret; |
1058 | bio_endio(bio); | 1057 | bio_endio(bio); |
1059 | return ret; | 1058 | return ret; |
1060 | } | 1059 | } |
@@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work) | |||
1820 | end_io_wq = container_of(work, struct btrfs_end_io_wq, work); | 1819 | end_io_wq = container_of(work, struct btrfs_end_io_wq, work); |
1821 | bio = end_io_wq->bio; | 1820 | bio = end_io_wq->bio; |
1822 | 1821 | ||
1823 | bio->bi_error = end_io_wq->error; | 1822 | bio->bi_status = end_io_wq->status; |
1824 | bio->bi_private = end_io_wq->private; | 1823 | bio->bi_private = end_io_wq->private; |
1825 | bio->bi_end_io = end_io_wq->end_io; | 1824 | bio->bi_end_io = end_io_wq->end_io; |
1826 | kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); | 1825 | kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); |
@@ -3495,11 +3494,11 @@ static void btrfs_end_empty_barrier(struct bio *bio) | |||
3495 | * any device where the flush fails with eopnotsupp are flagged as not-barrier | 3494 | * any device where the flush fails with eopnotsupp are flagged as not-barrier |
3496 | * capable | 3495 | * capable |
3497 | */ | 3496 | */ |
3498 | static int write_dev_flush(struct btrfs_device *device, int wait) | 3497 | static blk_status_t write_dev_flush(struct btrfs_device *device, int wait) |
3499 | { | 3498 | { |
3500 | struct request_queue *q = bdev_get_queue(device->bdev); | 3499 | struct request_queue *q = bdev_get_queue(device->bdev); |
3501 | struct bio *bio; | 3500 | struct bio *bio; |
3502 | int ret = 0; | 3501 | blk_status_t ret = 0; |
3503 | 3502 | ||
3504 | if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | 3503 | if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
3505 | return 0; | 3504 | return 0; |
@@ -3511,8 +3510,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
3511 | 3510 | ||
3512 | wait_for_completion(&device->flush_wait); | 3511 | wait_for_completion(&device->flush_wait); |
3513 | 3512 | ||
3514 | if (bio->bi_error) { | 3513 | if (bio->bi_status) { |
3515 | ret = bio->bi_error; | 3514 | ret = bio->bi_status; |
3516 | btrfs_dev_stat_inc_and_print(device, | 3515 | btrfs_dev_stat_inc_and_print(device, |
3517 | BTRFS_DEV_STAT_FLUSH_ERRS); | 3516 | BTRFS_DEV_STAT_FLUSH_ERRS); |
3518 | } | 3517 | } |
@@ -3531,7 +3530,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
3531 | device->flush_bio = NULL; | 3530 | device->flush_bio = NULL; |
3532 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); | 3531 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); |
3533 | if (!bio) | 3532 | if (!bio) |
3534 | return -ENOMEM; | 3533 | return BLK_STS_RESOURCE; |
3535 | 3534 | ||
3536 | bio->bi_end_io = btrfs_end_empty_barrier; | 3535 | bio->bi_end_io = btrfs_end_empty_barrier; |
3537 | bio->bi_bdev = device->bdev; | 3536 | bio->bi_bdev = device->bdev; |
@@ -3556,7 +3555,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info) | |||
3556 | struct btrfs_device *dev; | 3555 | struct btrfs_device *dev; |
3557 | int errors_send = 0; | 3556 | int errors_send = 0; |
3558 | int errors_wait = 0; | 3557 | int errors_wait = 0; |
3559 | int ret; | 3558 | blk_status_t ret; |
3560 | 3559 | ||
3561 | /* send down all the barriers */ | 3560 | /* send down all the barriers */ |
3562 | head = &info->fs_devices->devices; | 3561 | head = &info->fs_devices->devices; |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 21f1ceb85b76..c581927555f3 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
@@ -118,13 +118,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, | |||
118 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); | 118 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); |
119 | u32 btrfs_csum_data(const char *data, u32 seed, size_t len); | 119 | u32 btrfs_csum_data(const char *data, u32 seed, size_t len); |
120 | void btrfs_csum_final(u32 crc, u8 *result); | 120 | void btrfs_csum_final(u32 crc, u8 *result); |
121 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, | 121 | blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
122 | enum btrfs_wq_endio_type metadata); | 122 | enum btrfs_wq_endio_type metadata); |
123 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 123 | blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, |
124 | struct bio *bio, int mirror_num, | 124 | struct inode *inode, struct bio *bio, int mirror_num, |
125 | unsigned long bio_flags, u64 bio_offset, | 125 | unsigned long bio_flags, u64 bio_offset, |
126 | extent_submit_bio_hook_t *submit_bio_start, | 126 | extent_submit_bio_hook_t *submit_bio_start, |
127 | extent_submit_bio_hook_t *submit_bio_done); | 127 | extent_submit_bio_hook_t *submit_bio_done); |
128 | unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); | 128 | unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); |
129 | int btrfs_write_tree_block(struct extent_buffer *buf); | 129 | int btrfs_write_tree_block(struct extent_buffer *buf); |
130 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); | 130 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d8da3edf2ac3..35cbb6ceb70d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2399,6 +2399,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2399 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; | 2399 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
2400 | struct bio *bio; | 2400 | struct bio *bio; |
2401 | int read_mode = 0; | 2401 | int read_mode = 0; |
2402 | blk_status_t status; | ||
2402 | int ret; | 2403 | int ret; |
2403 | 2404 | ||
2404 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); | 2405 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
@@ -2431,11 +2432,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2431 | "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", | 2432 | "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", |
2432 | read_mode, failrec->this_mirror, failrec->in_validation); | 2433 | read_mode, failrec->this_mirror, failrec->in_validation); |
2433 | 2434 | ||
2434 | ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, | 2435 | status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, |
2435 | failrec->bio_flags, 0); | 2436 | failrec->bio_flags, 0); |
2436 | if (ret) { | 2437 | if (status) { |
2437 | free_io_failure(BTRFS_I(inode), failrec); | 2438 | free_io_failure(BTRFS_I(inode), failrec); |
2438 | bio_put(bio); | 2439 | bio_put(bio); |
2440 | ret = blk_status_to_errno(status); | ||
2439 | } | 2441 | } |
2440 | 2442 | ||
2441 | return ret; | 2443 | return ret; |
@@ -2474,6 +2476,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) | |||
2474 | */ | 2476 | */ |
2475 | static void end_bio_extent_writepage(struct bio *bio) | 2477 | static void end_bio_extent_writepage(struct bio *bio) |
2476 | { | 2478 | { |
2479 | int error = blk_status_to_errno(bio->bi_status); | ||
2477 | struct bio_vec *bvec; | 2480 | struct bio_vec *bvec; |
2478 | u64 start; | 2481 | u64 start; |
2479 | u64 end; | 2482 | u64 end; |
@@ -2503,7 +2506,7 @@ static void end_bio_extent_writepage(struct bio *bio) | |||
2503 | start = page_offset(page); | 2506 | start = page_offset(page); |
2504 | end = start + bvec->bv_offset + bvec->bv_len - 1; | 2507 | end = start + bvec->bv_offset + bvec->bv_len - 1; |
2505 | 2508 | ||
2506 | end_extent_writepage(page, bio->bi_error, start, end); | 2509 | end_extent_writepage(page, error, start, end); |
2507 | end_page_writeback(page); | 2510 | end_page_writeback(page); |
2508 | } | 2511 | } |
2509 | 2512 | ||
@@ -2536,7 +2539,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, | |||
2536 | static void end_bio_extent_readpage(struct bio *bio) | 2539 | static void end_bio_extent_readpage(struct bio *bio) |
2537 | { | 2540 | { |
2538 | struct bio_vec *bvec; | 2541 | struct bio_vec *bvec; |
2539 | int uptodate = !bio->bi_error; | 2542 | int uptodate = !bio->bi_status; |
2540 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | 2543 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); |
2541 | struct extent_io_tree *tree; | 2544 | struct extent_io_tree *tree; |
2542 | u64 offset = 0; | 2545 | u64 offset = 0; |
@@ -2556,7 +2559,7 @@ static void end_bio_extent_readpage(struct bio *bio) | |||
2556 | 2559 | ||
2557 | btrfs_debug(fs_info, | 2560 | btrfs_debug(fs_info, |
2558 | "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u", | 2561 | "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u", |
2559 | (u64)bio->bi_iter.bi_sector, bio->bi_error, | 2562 | (u64)bio->bi_iter.bi_sector, bio->bi_status, |
2560 | io_bio->mirror_num); | 2563 | io_bio->mirror_num); |
2561 | tree = &BTRFS_I(inode)->io_tree; | 2564 | tree = &BTRFS_I(inode)->io_tree; |
2562 | 2565 | ||
@@ -2615,7 +2618,7 @@ static void end_bio_extent_readpage(struct bio *bio) | |||
2615 | ret = bio_readpage_error(bio, offset, page, | 2618 | ret = bio_readpage_error(bio, offset, page, |
2616 | start, end, mirror); | 2619 | start, end, mirror); |
2617 | if (ret == 0) { | 2620 | if (ret == 0) { |
2618 | uptodate = !bio->bi_error; | 2621 | uptodate = !bio->bi_status; |
2619 | offset += len; | 2622 | offset += len; |
2620 | continue; | 2623 | continue; |
2621 | } | 2624 | } |
@@ -2673,7 +2676,7 @@ readpage_ok: | |||
2673 | endio_readpage_release_extent(tree, extent_start, extent_len, | 2676 | endio_readpage_release_extent(tree, extent_start, extent_len, |
2674 | uptodate); | 2677 | uptodate); |
2675 | if (io_bio->end_io) | 2678 | if (io_bio->end_io) |
2676 | io_bio->end_io(io_bio, bio->bi_error); | 2679 | io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status)); |
2677 | bio_put(bio); | 2680 | bio_put(bio); |
2678 | } | 2681 | } |
2679 | 2682 | ||
@@ -2743,7 +2746,7 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | |||
2743 | static int __must_check submit_one_bio(struct bio *bio, int mirror_num, | 2746 | static int __must_check submit_one_bio(struct bio *bio, int mirror_num, |
2744 | unsigned long bio_flags) | 2747 | unsigned long bio_flags) |
2745 | { | 2748 | { |
2746 | int ret = 0; | 2749 | blk_status_t ret = 0; |
2747 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 2750 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
2748 | struct page *page = bvec->bv_page; | 2751 | struct page *page = bvec->bv_page; |
2749 | struct extent_io_tree *tree = bio->bi_private; | 2752 | struct extent_io_tree *tree = bio->bi_private; |
@@ -2761,7 +2764,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num, | |||
2761 | btrfsic_submit_bio(bio); | 2764 | btrfsic_submit_bio(bio); |
2762 | 2765 | ||
2763 | bio_put(bio); | 2766 | bio_put(bio); |
2764 | return ret; | 2767 | return blk_status_to_errno(ret); |
2765 | } | 2768 | } |
2766 | 2769 | ||
2767 | static int merge_bio(struct extent_io_tree *tree, struct page *page, | 2770 | static int merge_bio(struct extent_io_tree *tree, struct page *page, |
@@ -3707,7 +3710,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio) | |||
3707 | BUG_ON(!eb); | 3710 | BUG_ON(!eb); |
3708 | done = atomic_dec_and_test(&eb->io_pages); | 3711 | done = atomic_dec_and_test(&eb->io_pages); |
3709 | 3712 | ||
3710 | if (bio->bi_error || | 3713 | if (bio->bi_status || |
3711 | test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { | 3714 | test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { |
3712 | ClearPageUptodate(page); | 3715 | ClearPageUptodate(page); |
3713 | set_btree_ioerr(page); | 3716 | set_btree_ioerr(page); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 1eafa2f0ede3..487ca0207cb6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -92,9 +92,9 @@ struct btrfs_inode; | |||
92 | struct btrfs_io_bio; | 92 | struct btrfs_io_bio; |
93 | struct io_failure_record; | 93 | struct io_failure_record; |
94 | 94 | ||
95 | typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, | 95 | typedef blk_status_t (extent_submit_bio_hook_t)(struct inode *inode, |
96 | int mirror_num, unsigned long bio_flags, | 96 | struct bio *bio, int mirror_num, unsigned long bio_flags, |
97 | u64 bio_offset); | 97 | u64 bio_offset); |
98 | struct extent_io_ops { | 98 | struct extent_io_ops { |
99 | /* | 99 | /* |
100 | * The following callbacks must be allways defined, the function | 100 | * The following callbacks must be allways defined, the function |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 64fcb31d7163..5b1c7090e546 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -160,7 +160,7 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) | |||
160 | kfree(bio->csum_allocated); | 160 | kfree(bio->csum_allocated); |
161 | } | 161 | } |
162 | 162 | ||
163 | static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, | 163 | static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, |
164 | u64 logical_offset, u32 *dst, int dio) | 164 | u64 logical_offset, u32 *dst, int dio) |
165 | { | 165 | { |
166 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 166 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, | |||
182 | 182 | ||
183 | path = btrfs_alloc_path(); | 183 | path = btrfs_alloc_path(); |
184 | if (!path) | 184 | if (!path) |
185 | return -ENOMEM; | 185 | return BLK_STS_RESOURCE; |
186 | 186 | ||
187 | nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; | 187 | nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; |
188 | if (!dst) { | 188 | if (!dst) { |
@@ -191,7 +191,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, | |||
191 | csum_size, GFP_NOFS); | 191 | csum_size, GFP_NOFS); |
192 | if (!btrfs_bio->csum_allocated) { | 192 | if (!btrfs_bio->csum_allocated) { |
193 | btrfs_free_path(path); | 193 | btrfs_free_path(path); |
194 | return -ENOMEM; | 194 | return BLK_STS_RESOURCE; |
195 | } | 195 | } |
196 | btrfs_bio->csum = btrfs_bio->csum_allocated; | 196 | btrfs_bio->csum = btrfs_bio->csum_allocated; |
197 | btrfs_bio->end_io = btrfs_io_bio_endio_readpage; | 197 | btrfs_bio->end_io = btrfs_io_bio_endio_readpage; |
@@ -303,12 +303,12 @@ next: | |||
303 | return 0; | 303 | return 0; |
304 | } | 304 | } |
305 | 305 | ||
306 | int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) | 306 | blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) |
307 | { | 307 | { |
308 | return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); | 308 | return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); |
309 | } | 309 | } |
310 | 310 | ||
311 | int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) | 311 | blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) |
312 | { | 312 | { |
313 | return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); | 313 | return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); |
314 | } | 314 | } |
@@ -433,7 +433,7 @@ fail: | |||
433 | return ret; | 433 | return ret; |
434 | } | 434 | } |
435 | 435 | ||
436 | int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, | 436 | blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, |
437 | u64 file_start, int contig) | 437 | u64 file_start, int contig) |
438 | { | 438 | { |
439 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 439 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
@@ -452,7 +452,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, | |||
452 | sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), | 452 | sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), |
453 | GFP_NOFS); | 453 | GFP_NOFS); |
454 | if (!sums) | 454 | if (!sums) |
455 | return -ENOMEM; | 455 | return BLK_STS_RESOURCE; |
456 | 456 | ||
457 | sums->len = bio->bi_iter.bi_size; | 457 | sums->len = bio->bi_iter.bi_size; |
458 | INIT_LIST_HEAD(&sums->list); | 458 | INIT_LIST_HEAD(&sums->list); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 758b2666885e..ea7cae1003eb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -842,13 +842,12 @@ retry: | |||
842 | NULL, EXTENT_LOCKED | EXTENT_DELALLOC, | 842 | NULL, EXTENT_LOCKED | EXTENT_DELALLOC, |
843 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | 843 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | |
844 | PAGE_SET_WRITEBACK); | 844 | PAGE_SET_WRITEBACK); |
845 | ret = btrfs_submit_compressed_write(inode, | 845 | if (btrfs_submit_compressed_write(inode, |
846 | async_extent->start, | 846 | async_extent->start, |
847 | async_extent->ram_size, | 847 | async_extent->ram_size, |
848 | ins.objectid, | 848 | ins.objectid, |
849 | ins.offset, async_extent->pages, | 849 | ins.offset, async_extent->pages, |
850 | async_extent->nr_pages); | 850 | async_extent->nr_pages)) { |
851 | if (ret) { | ||
852 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; | 851 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
853 | struct page *p = async_extent->pages[0]; | 852 | struct page *p = async_extent->pages[0]; |
854 | const u64 start = async_extent->start; | 853 | const u64 start = async_extent->start; |
@@ -1901,11 +1900,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, | |||
1901 | * At IO completion time the cums attached on the ordered extent record | 1900 | * At IO completion time the cums attached on the ordered extent record |
1902 | * are inserted into the btree | 1901 | * are inserted into the btree |
1903 | */ | 1902 | */ |
1904 | static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio, | 1903 | static blk_status_t __btrfs_submit_bio_start(struct inode *inode, |
1905 | int mirror_num, unsigned long bio_flags, | 1904 | struct bio *bio, int mirror_num, unsigned long bio_flags, |
1906 | u64 bio_offset) | 1905 | u64 bio_offset) |
1907 | { | 1906 | { |
1908 | int ret = 0; | 1907 | blk_status_t ret = 0; |
1909 | 1908 | ||
1910 | ret = btrfs_csum_one_bio(inode, bio, 0, 0); | 1909 | ret = btrfs_csum_one_bio(inode, bio, 0, 0); |
1911 | BUG_ON(ret); /* -ENOMEM */ | 1910 | BUG_ON(ret); /* -ENOMEM */ |
@@ -1920,16 +1919,16 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio, | |||
1920 | * At IO completion time the cums attached on the ordered extent record | 1919 | * At IO completion time the cums attached on the ordered extent record |
1921 | * are inserted into the btree | 1920 | * are inserted into the btree |
1922 | */ | 1921 | */ |
1923 | static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio, | 1922 | static blk_status_t __btrfs_submit_bio_done(struct inode *inode, |
1924 | int mirror_num, unsigned long bio_flags, | 1923 | struct bio *bio, int mirror_num, unsigned long bio_flags, |
1925 | u64 bio_offset) | 1924 | u64 bio_offset) |
1926 | { | 1925 | { |
1927 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 1926 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
1928 | int ret; | 1927 | blk_status_t ret; |
1929 | 1928 | ||
1930 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 1); | 1929 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 1); |
1931 | if (ret) { | 1930 | if (ret) { |
1932 | bio->bi_error = ret; | 1931 | bio->bi_status = ret; |
1933 | bio_endio(bio); | 1932 | bio_endio(bio); |
1934 | } | 1933 | } |
1935 | return ret; | 1934 | return ret; |
@@ -1939,14 +1938,14 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio, | |||
1939 | * extent_io.c submission hook. This does the right thing for csum calculation | 1938 | * extent_io.c submission hook. This does the right thing for csum calculation |
1940 | * on write, or reading the csums from the tree before a read | 1939 | * on write, or reading the csums from the tree before a read |
1941 | */ | 1940 | */ |
1942 | static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, | 1941 | static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, |
1943 | int mirror_num, unsigned long bio_flags, | 1942 | int mirror_num, unsigned long bio_flags, |
1944 | u64 bio_offset) | 1943 | u64 bio_offset) |
1945 | { | 1944 | { |
1946 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 1945 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
1947 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1946 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1948 | enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; | 1947 | enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; |
1949 | int ret = 0; | 1948 | blk_status_t ret = 0; |
1950 | int skip_sum; | 1949 | int skip_sum; |
1951 | int async = !atomic_read(&BTRFS_I(inode)->sync_writers); | 1950 | int async = !atomic_read(&BTRFS_I(inode)->sync_writers); |
1952 | 1951 | ||
@@ -1991,8 +1990,8 @@ mapit: | |||
1991 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); | 1990 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
1992 | 1991 | ||
1993 | out: | 1992 | out: |
1994 | if (ret < 0) { | 1993 | if (ret) { |
1995 | bio->bi_error = ret; | 1994 | bio->bi_status = ret; |
1996 | bio_endio(bio); | 1995 | bio_endio(bio); |
1997 | } | 1996 | } |
1998 | return ret; | 1997 | return ret; |
@@ -8037,7 +8036,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) | |||
8037 | struct bio_vec *bvec; | 8036 | struct bio_vec *bvec; |
8038 | int i; | 8037 | int i; |
8039 | 8038 | ||
8040 | if (bio->bi_error) | 8039 | if (bio->bi_status) |
8041 | goto end; | 8040 | goto end; |
8042 | 8041 | ||
8043 | ASSERT(bio->bi_vcnt == 1); | 8042 | ASSERT(bio->bi_vcnt == 1); |
@@ -8116,7 +8115,7 @@ static void btrfs_retry_endio(struct bio *bio) | |||
8116 | int ret; | 8115 | int ret; |
8117 | int i; | 8116 | int i; |
8118 | 8117 | ||
8119 | if (bio->bi_error) | 8118 | if (bio->bi_status) |
8120 | goto end; | 8119 | goto end; |
8121 | 8120 | ||
8122 | uptodate = 1; | 8121 | uptodate = 1; |
@@ -8141,8 +8140,8 @@ end: | |||
8141 | bio_put(bio); | 8140 | bio_put(bio); |
8142 | } | 8141 | } |
8143 | 8142 | ||
8144 | static int __btrfs_subio_endio_read(struct inode *inode, | 8143 | static blk_status_t __btrfs_subio_endio_read(struct inode *inode, |
8145 | struct btrfs_io_bio *io_bio, int err) | 8144 | struct btrfs_io_bio *io_bio, blk_status_t err) |
8146 | { | 8145 | { |
8147 | struct btrfs_fs_info *fs_info; | 8146 | struct btrfs_fs_info *fs_info; |
8148 | struct bio_vec *bvec; | 8147 | struct bio_vec *bvec; |
@@ -8184,7 +8183,7 @@ try_again: | |||
8184 | io_bio->mirror_num, | 8183 | io_bio->mirror_num, |
8185 | btrfs_retry_endio, &done); | 8184 | btrfs_retry_endio, &done); |
8186 | if (ret) { | 8185 | if (ret) { |
8187 | err = ret; | 8186 | err = errno_to_blk_status(ret); |
8188 | goto next; | 8187 | goto next; |
8189 | } | 8188 | } |
8190 | 8189 | ||
@@ -8211,8 +8210,8 @@ next: | |||
8211 | return err; | 8210 | return err; |
8212 | } | 8211 | } |
8213 | 8212 | ||
8214 | static int btrfs_subio_endio_read(struct inode *inode, | 8213 | static blk_status_t btrfs_subio_endio_read(struct inode *inode, |
8215 | struct btrfs_io_bio *io_bio, int err) | 8214 | struct btrfs_io_bio *io_bio, blk_status_t err) |
8216 | { | 8215 | { |
8217 | bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 8216 | bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
8218 | 8217 | ||
@@ -8232,7 +8231,7 @@ static void btrfs_endio_direct_read(struct bio *bio) | |||
8232 | struct inode *inode = dip->inode; | 8231 | struct inode *inode = dip->inode; |
8233 | struct bio *dio_bio; | 8232 | struct bio *dio_bio; |
8234 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | 8233 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); |
8235 | int err = bio->bi_error; | 8234 | blk_status_t err = bio->bi_status; |
8236 | 8235 | ||
8237 | if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) | 8236 | if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) |
8238 | err = btrfs_subio_endio_read(inode, io_bio, err); | 8237 | err = btrfs_subio_endio_read(inode, io_bio, err); |
@@ -8243,11 +8242,11 @@ static void btrfs_endio_direct_read(struct bio *bio) | |||
8243 | 8242 | ||
8244 | kfree(dip); | 8243 | kfree(dip); |
8245 | 8244 | ||
8246 | dio_bio->bi_error = bio->bi_error; | 8245 | dio_bio->bi_status = bio->bi_status; |
8247 | dio_end_io(dio_bio); | 8246 | dio_end_io(dio_bio); |
8248 | 8247 | ||
8249 | if (io_bio->end_io) | 8248 | if (io_bio->end_io) |
8250 | io_bio->end_io(io_bio, err); | 8249 | io_bio->end_io(io_bio, blk_status_to_errno(err)); |
8251 | bio_put(bio); | 8250 | bio_put(bio); |
8252 | } | 8251 | } |
8253 | 8252 | ||
@@ -8299,20 +8298,20 @@ static void btrfs_endio_direct_write(struct bio *bio) | |||
8299 | struct bio *dio_bio = dip->dio_bio; | 8298 | struct bio *dio_bio = dip->dio_bio; |
8300 | 8299 | ||
8301 | __endio_write_update_ordered(dip->inode, dip->logical_offset, | 8300 | __endio_write_update_ordered(dip->inode, dip->logical_offset, |
8302 | dip->bytes, !bio->bi_error); | 8301 | dip->bytes, !bio->bi_status); |
8303 | 8302 | ||
8304 | kfree(dip); | 8303 | kfree(dip); |
8305 | 8304 | ||
8306 | dio_bio->bi_error = bio->bi_error; | 8305 | dio_bio->bi_status = bio->bi_status; |
8307 | dio_end_io(dio_bio); | 8306 | dio_end_io(dio_bio); |
8308 | bio_put(bio); | 8307 | bio_put(bio); |
8309 | } | 8308 | } |
8310 | 8309 | ||
8311 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, | 8310 | static blk_status_t __btrfs_submit_bio_start_direct_io(struct inode *inode, |
8312 | struct bio *bio, int mirror_num, | 8311 | struct bio *bio, int mirror_num, |
8313 | unsigned long bio_flags, u64 offset) | 8312 | unsigned long bio_flags, u64 offset) |
8314 | { | 8313 | { |
8315 | int ret; | 8314 | blk_status_t ret; |
8316 | ret = btrfs_csum_one_bio(inode, bio, offset, 1); | 8315 | ret = btrfs_csum_one_bio(inode, bio, offset, 1); |
8317 | BUG_ON(ret); /* -ENOMEM */ | 8316 | BUG_ON(ret); /* -ENOMEM */ |
8318 | return 0; | 8317 | return 0; |
@@ -8321,7 +8320,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, | |||
8321 | static void btrfs_end_dio_bio(struct bio *bio) | 8320 | static void btrfs_end_dio_bio(struct bio *bio) |
8322 | { | 8321 | { |
8323 | struct btrfs_dio_private *dip = bio->bi_private; | 8322 | struct btrfs_dio_private *dip = bio->bi_private; |
8324 | int err = bio->bi_error; | 8323 | blk_status_t err = bio->bi_status; |
8325 | 8324 | ||
8326 | if (err) | 8325 | if (err) |
8327 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, | 8326 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, |
@@ -8351,7 +8350,7 @@ static void btrfs_end_dio_bio(struct bio *bio) | |||
8351 | if (dip->errors) { | 8350 | if (dip->errors) { |
8352 | bio_io_error(dip->orig_bio); | 8351 | bio_io_error(dip->orig_bio); |
8353 | } else { | 8352 | } else { |
8354 | dip->dio_bio->bi_error = 0; | 8353 | dip->dio_bio->bi_status = 0; |
8355 | bio_endio(dip->orig_bio); | 8354 | bio_endio(dip->orig_bio); |
8356 | } | 8355 | } |
8357 | out: | 8356 | out: |
@@ -8368,14 +8367,14 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |||
8368 | return bio; | 8367 | return bio; |
8369 | } | 8368 | } |
8370 | 8369 | ||
8371 | static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode, | 8370 | static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, |
8372 | struct btrfs_dio_private *dip, | 8371 | struct btrfs_dio_private *dip, |
8373 | struct bio *bio, | 8372 | struct bio *bio, |
8374 | u64 file_offset) | 8373 | u64 file_offset) |
8375 | { | 8374 | { |
8376 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | 8375 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); |
8377 | struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); | 8376 | struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); |
8378 | int ret; | 8377 | blk_status_t ret; |
8379 | 8378 | ||
8380 | /* | 8379 | /* |
8381 | * We load all the csum data we need when we submit | 8380 | * We load all the csum data we need when we submit |
@@ -8406,7 +8405,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
8406 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | 8405 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
8407 | struct btrfs_dio_private *dip = bio->bi_private; | 8406 | struct btrfs_dio_private *dip = bio->bi_private; |
8408 | bool write = bio_op(bio) == REQ_OP_WRITE; | 8407 | bool write = bio_op(bio) == REQ_OP_WRITE; |
8409 | int ret; | 8408 | blk_status_t ret; |
8410 | 8409 | ||
8411 | if (async_submit) | 8410 | if (async_submit) |
8412 | async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); | 8411 | async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); |
@@ -8649,7 +8648,7 @@ free_ordered: | |||
8649 | * callbacks - they require an allocated dip and a clone of dio_bio. | 8648 | * callbacks - they require an allocated dip and a clone of dio_bio. |
8650 | */ | 8649 | */ |
8651 | if (io_bio && dip) { | 8650 | if (io_bio && dip) { |
8652 | io_bio->bi_error = -EIO; | 8651 | io_bio->bi_status = BLK_STS_IOERR; |
8653 | bio_endio(io_bio); | 8652 | bio_endio(io_bio); |
8654 | /* | 8653 | /* |
8655 | * The end io callbacks free our dip, do the final put on io_bio | 8654 | * The end io callbacks free our dip, do the final put on io_bio |
@@ -8668,7 +8667,7 @@ free_ordered: | |||
8668 | unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, | 8667 | unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, |
8669 | file_offset + dio_bio->bi_iter.bi_size - 1); | 8668 | file_offset + dio_bio->bi_iter.bi_size - 1); |
8670 | 8669 | ||
8671 | dio_bio->bi_error = -EIO; | 8670 | dio_bio->bi_status = BLK_STS_IOERR; |
8672 | /* | 8671 | /* |
8673 | * Releases and cleans up our dio_bio, no need to bio_put() | 8672 | * Releases and cleans up our dio_bio, no need to bio_put() |
8674 | * nor bio_endio()/bio_io_error() against dio_bio. | 8673 | * nor bio_endio()/bio_io_error() against dio_bio. |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index d8ea0eb76325..f3d30d9ea8f9 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -871,7 +871,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio) | |||
871 | * this frees the rbio and runs through all the bios in the | 871 | * this frees the rbio and runs through all the bios in the |
872 | * bio_list and calls end_io on them | 872 | * bio_list and calls end_io on them |
873 | */ | 873 | */ |
874 | static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) | 874 | static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) |
875 | { | 875 | { |
876 | struct bio *cur = bio_list_get(&rbio->bio_list); | 876 | struct bio *cur = bio_list_get(&rbio->bio_list); |
877 | struct bio *next; | 877 | struct bio *next; |
@@ -884,7 +884,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) | |||
884 | while (cur) { | 884 | while (cur) { |
885 | next = cur->bi_next; | 885 | next = cur->bi_next; |
886 | cur->bi_next = NULL; | 886 | cur->bi_next = NULL; |
887 | cur->bi_error = err; | 887 | cur->bi_status = err; |
888 | bio_endio(cur); | 888 | bio_endio(cur); |
889 | cur = next; | 889 | cur = next; |
890 | } | 890 | } |
@@ -897,7 +897,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) | |||
897 | static void raid_write_end_io(struct bio *bio) | 897 | static void raid_write_end_io(struct bio *bio) |
898 | { | 898 | { |
899 | struct btrfs_raid_bio *rbio = bio->bi_private; | 899 | struct btrfs_raid_bio *rbio = bio->bi_private; |
900 | int err = bio->bi_error; | 900 | blk_status_t err = bio->bi_status; |
901 | int max_errors; | 901 | int max_errors; |
902 | 902 | ||
903 | if (err) | 903 | if (err) |
@@ -914,7 +914,7 @@ static void raid_write_end_io(struct bio *bio) | |||
914 | max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? | 914 | max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? |
915 | 0 : rbio->bbio->max_errors; | 915 | 0 : rbio->bbio->max_errors; |
916 | if (atomic_read(&rbio->error) > max_errors) | 916 | if (atomic_read(&rbio->error) > max_errors) |
917 | err = -EIO; | 917 | err = BLK_STS_IOERR; |
918 | 918 | ||
919 | rbio_orig_end_io(rbio, err); | 919 | rbio_orig_end_io(rbio, err); |
920 | } | 920 | } |
@@ -1092,7 +1092,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, | |||
1092 | * devices or if they are not contiguous | 1092 | * devices or if they are not contiguous |
1093 | */ | 1093 | */ |
1094 | if (last_end == disk_start && stripe->dev->bdev && | 1094 | if (last_end == disk_start && stripe->dev->bdev && |
1095 | !last->bi_error && | 1095 | !last->bi_status && |
1096 | last->bi_bdev == stripe->dev->bdev) { | 1096 | last->bi_bdev == stripe->dev->bdev) { |
1097 | ret = bio_add_page(last, page, PAGE_SIZE, 0); | 1097 | ret = bio_add_page(last, page, PAGE_SIZE, 0); |
1098 | if (ret == PAGE_SIZE) | 1098 | if (ret == PAGE_SIZE) |
@@ -1448,7 +1448,7 @@ static void raid_rmw_end_io(struct bio *bio) | |||
1448 | { | 1448 | { |
1449 | struct btrfs_raid_bio *rbio = bio->bi_private; | 1449 | struct btrfs_raid_bio *rbio = bio->bi_private; |
1450 | 1450 | ||
1451 | if (bio->bi_error) | 1451 | if (bio->bi_status) |
1452 | fail_bio_stripe(rbio, bio); | 1452 | fail_bio_stripe(rbio, bio); |
1453 | else | 1453 | else |
1454 | set_bio_pages_uptodate(bio); | 1454 | set_bio_pages_uptodate(bio); |
@@ -1991,7 +1991,7 @@ static void raid_recover_end_io(struct bio *bio) | |||
1991 | * we only read stripe pages off the disk, set them | 1991 | * we only read stripe pages off the disk, set them |
1992 | * up to date if there were no errors | 1992 | * up to date if there were no errors |
1993 | */ | 1993 | */ |
1994 | if (bio->bi_error) | 1994 | if (bio->bi_status) |
1995 | fail_bio_stripe(rbio, bio); | 1995 | fail_bio_stripe(rbio, bio); |
1996 | else | 1996 | else |
1997 | set_bio_pages_uptodate(bio); | 1997 | set_bio_pages_uptodate(bio); |
@@ -2530,7 +2530,7 @@ static void raid56_parity_scrub_end_io(struct bio *bio) | |||
2530 | { | 2530 | { |
2531 | struct btrfs_raid_bio *rbio = bio->bi_private; | 2531 | struct btrfs_raid_bio *rbio = bio->bi_private; |
2532 | 2532 | ||
2533 | if (bio->bi_error) | 2533 | if (bio->bi_status) |
2534 | fail_bio_stripe(rbio, bio); | 2534 | fail_bio_stripe(rbio, bio); |
2535 | else | 2535 | else |
2536 | set_bio_pages_uptodate(bio); | 2536 | set_bio_pages_uptodate(bio); |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c7b45eb2403d..ba5595d19de1 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -95,7 +95,7 @@ struct scrub_bio { | |||
95 | struct scrub_ctx *sctx; | 95 | struct scrub_ctx *sctx; |
96 | struct btrfs_device *dev; | 96 | struct btrfs_device *dev; |
97 | struct bio *bio; | 97 | struct bio *bio; |
98 | int err; | 98 | blk_status_t status; |
99 | u64 logical; | 99 | u64 logical; |
100 | u64 physical; | 100 | u64 physical; |
101 | #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO | 101 | #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO |
@@ -1668,14 +1668,14 @@ leave_nomem: | |||
1668 | 1668 | ||
1669 | struct scrub_bio_ret { | 1669 | struct scrub_bio_ret { |
1670 | struct completion event; | 1670 | struct completion event; |
1671 | int error; | 1671 | blk_status_t status; |
1672 | }; | 1672 | }; |
1673 | 1673 | ||
1674 | static void scrub_bio_wait_endio(struct bio *bio) | 1674 | static void scrub_bio_wait_endio(struct bio *bio) |
1675 | { | 1675 | { |
1676 | struct scrub_bio_ret *ret = bio->bi_private; | 1676 | struct scrub_bio_ret *ret = bio->bi_private; |
1677 | 1677 | ||
1678 | ret->error = bio->bi_error; | 1678 | ret->status = bio->bi_status; |
1679 | complete(&ret->event); | 1679 | complete(&ret->event); |
1680 | } | 1680 | } |
1681 | 1681 | ||
@@ -1693,7 +1693,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, | |||
1693 | int ret; | 1693 | int ret; |
1694 | 1694 | ||
1695 | init_completion(&done.event); | 1695 | init_completion(&done.event); |
1696 | done.error = 0; | 1696 | done.status = 0; |
1697 | bio->bi_iter.bi_sector = page->logical >> 9; | 1697 | bio->bi_iter.bi_sector = page->logical >> 9; |
1698 | bio->bi_private = &done; | 1698 | bio->bi_private = &done; |
1699 | bio->bi_end_io = scrub_bio_wait_endio; | 1699 | bio->bi_end_io = scrub_bio_wait_endio; |
@@ -1705,7 +1705,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, | |||
1705 | return ret; | 1705 | return ret; |
1706 | 1706 | ||
1707 | wait_for_completion(&done.event); | 1707 | wait_for_completion(&done.event); |
1708 | if (done.error) | 1708 | if (done.status) |
1709 | return -EIO; | 1709 | return -EIO; |
1710 | 1710 | ||
1711 | return 0; | 1711 | return 0; |
@@ -1937,7 +1937,7 @@ again: | |||
1937 | bio->bi_bdev = sbio->dev->bdev; | 1937 | bio->bi_bdev = sbio->dev->bdev; |
1938 | bio->bi_iter.bi_sector = sbio->physical >> 9; | 1938 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
1939 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 1939 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
1940 | sbio->err = 0; | 1940 | sbio->status = 0; |
1941 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 1941 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != |
1942 | spage->physical_for_dev_replace || | 1942 | spage->physical_for_dev_replace || |
1943 | sbio->logical + sbio->page_count * PAGE_SIZE != | 1943 | sbio->logical + sbio->page_count * PAGE_SIZE != |
@@ -1992,7 +1992,7 @@ static void scrub_wr_bio_end_io(struct bio *bio) | |||
1992 | struct scrub_bio *sbio = bio->bi_private; | 1992 | struct scrub_bio *sbio = bio->bi_private; |
1993 | struct btrfs_fs_info *fs_info = sbio->dev->fs_info; | 1993 | struct btrfs_fs_info *fs_info = sbio->dev->fs_info; |
1994 | 1994 | ||
1995 | sbio->err = bio->bi_error; | 1995 | sbio->status = bio->bi_status; |
1996 | sbio->bio = bio; | 1996 | sbio->bio = bio; |
1997 | 1997 | ||
1998 | btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, | 1998 | btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, |
@@ -2007,7 +2007,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) | |||
2007 | int i; | 2007 | int i; |
2008 | 2008 | ||
2009 | WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); | 2009 | WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); |
2010 | if (sbio->err) { | 2010 | if (sbio->status) { |
2011 | struct btrfs_dev_replace *dev_replace = | 2011 | struct btrfs_dev_replace *dev_replace = |
2012 | &sbio->sctx->fs_info->dev_replace; | 2012 | &sbio->sctx->fs_info->dev_replace; |
2013 | 2013 | ||
@@ -2341,7 +2341,7 @@ again: | |||
2341 | bio->bi_bdev = sbio->dev->bdev; | 2341 | bio->bi_bdev = sbio->dev->bdev; |
2342 | bio->bi_iter.bi_sector = sbio->physical >> 9; | 2342 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
2343 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | 2343 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
2344 | sbio->err = 0; | 2344 | sbio->status = 0; |
2345 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 2345 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != |
2346 | spage->physical || | 2346 | spage->physical || |
2347 | sbio->logical + sbio->page_count * PAGE_SIZE != | 2347 | sbio->logical + sbio->page_count * PAGE_SIZE != |
@@ -2377,7 +2377,7 @@ static void scrub_missing_raid56_end_io(struct bio *bio) | |||
2377 | struct scrub_block *sblock = bio->bi_private; | 2377 | struct scrub_block *sblock = bio->bi_private; |
2378 | struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; | 2378 | struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; |
2379 | 2379 | ||
2380 | if (bio->bi_error) | 2380 | if (bio->bi_status) |
2381 | sblock->no_io_error_seen = 0; | 2381 | sblock->no_io_error_seen = 0; |
2382 | 2382 | ||
2383 | bio_put(bio); | 2383 | bio_put(bio); |
@@ -2588,7 +2588,7 @@ static void scrub_bio_end_io(struct bio *bio) | |||
2588 | struct scrub_bio *sbio = bio->bi_private; | 2588 | struct scrub_bio *sbio = bio->bi_private; |
2589 | struct btrfs_fs_info *fs_info = sbio->dev->fs_info; | 2589 | struct btrfs_fs_info *fs_info = sbio->dev->fs_info; |
2590 | 2590 | ||
2591 | sbio->err = bio->bi_error; | 2591 | sbio->status = bio->bi_status; |
2592 | sbio->bio = bio; | 2592 | sbio->bio = bio; |
2593 | 2593 | ||
2594 | btrfs_queue_work(fs_info->scrub_workers, &sbio->work); | 2594 | btrfs_queue_work(fs_info->scrub_workers, &sbio->work); |
@@ -2601,7 +2601,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) | |||
2601 | int i; | 2601 | int i; |
2602 | 2602 | ||
2603 | BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); | 2603 | BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); |
2604 | if (sbio->err) { | 2604 | if (sbio->status) { |
2605 | for (i = 0; i < sbio->page_count; i++) { | 2605 | for (i = 0; i < sbio->page_count; i++) { |
2606 | struct scrub_page *spage = sbio->pagev[i]; | 2606 | struct scrub_page *spage = sbio->pagev[i]; |
2607 | 2607 | ||
@@ -3004,7 +3004,7 @@ static void scrub_parity_bio_endio(struct bio *bio) | |||
3004 | struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; | 3004 | struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; |
3005 | struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; | 3005 | struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; |
3006 | 3006 | ||
3007 | if (bio->bi_error) | 3007 | if (bio->bi_status) |
3008 | bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, | 3008 | bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, |
3009 | sparity->nsectors); | 3009 | sparity->nsectors); |
3010 | 3010 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 017b67daa3bb..84a495967e0a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -6042,9 +6042,10 @@ static void btrfs_end_bio(struct bio *bio) | |||
6042 | struct btrfs_bio *bbio = bio->bi_private; | 6042 | struct btrfs_bio *bbio = bio->bi_private; |
6043 | int is_orig_bio = 0; | 6043 | int is_orig_bio = 0; |
6044 | 6044 | ||
6045 | if (bio->bi_error) { | 6045 | if (bio->bi_status) { |
6046 | atomic_inc(&bbio->error); | 6046 | atomic_inc(&bbio->error); |
6047 | if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) { | 6047 | if (bio->bi_status == BLK_STS_IOERR || |
6048 | bio->bi_status == BLK_STS_TARGET) { | ||
6048 | unsigned int stripe_index = | 6049 | unsigned int stripe_index = |
6049 | btrfs_io_bio(bio)->stripe_index; | 6050 | btrfs_io_bio(bio)->stripe_index; |
6050 | struct btrfs_device *dev; | 6051 | struct btrfs_device *dev; |
@@ -6082,13 +6083,13 @@ static void btrfs_end_bio(struct bio *bio) | |||
6082 | * beyond the tolerance of the btrfs bio | 6083 | * beyond the tolerance of the btrfs bio |
6083 | */ | 6084 | */ |
6084 | if (atomic_read(&bbio->error) > bbio->max_errors) { | 6085 | if (atomic_read(&bbio->error) > bbio->max_errors) { |
6085 | bio->bi_error = -EIO; | 6086 | bio->bi_status = BLK_STS_IOERR; |
6086 | } else { | 6087 | } else { |
6087 | /* | 6088 | /* |
6088 | * this bio is actually up to date, we didn't | 6089 | * this bio is actually up to date, we didn't |
6089 | * go over the max number of errors | 6090 | * go over the max number of errors |
6090 | */ | 6091 | */ |
6091 | bio->bi_error = 0; | 6092 | bio->bi_status = 0; |
6092 | } | 6093 | } |
6093 | 6094 | ||
6094 | btrfs_end_bbio(bbio, bio); | 6095 | btrfs_end_bbio(bbio, bio); |
@@ -6199,7 +6200,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) | |||
6199 | 6200 | ||
6200 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; | 6201 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; |
6201 | bio->bi_iter.bi_sector = logical >> 9; | 6202 | bio->bi_iter.bi_sector = logical >> 9; |
6202 | bio->bi_error = -EIO; | 6203 | bio->bi_status = BLK_STS_IOERR; |
6203 | btrfs_end_bbio(bbio, bio); | 6204 | btrfs_end_bbio(bbio, bio); |
6204 | } | 6205 | } |
6205 | } | 6206 | } |
diff --git a/fs/buffer.c b/fs/buffer.c index 161be58c5cb0..306b720f7383 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -3038,7 +3038,7 @@ static void end_bio_bh_io_sync(struct bio *bio) | |||
3038 | if (unlikely(bio_flagged(bio, BIO_QUIET))) | 3038 | if (unlikely(bio_flagged(bio, BIO_QUIET))) |
3039 | set_bit(BH_Quiet, &bh->b_state); | 3039 | set_bit(BH_Quiet, &bh->b_state); |
3040 | 3040 | ||
3041 | bh->b_end_io(bh, !bio->bi_error); | 3041 | bh->b_end_io(bh, !bio->bi_status); |
3042 | bio_put(bio); | 3042 | bio_put(bio); |
3043 | } | 3043 | } |
3044 | 3044 | ||
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index a409a84f1bca..6181e9526860 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c | |||
@@ -129,7 +129,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | |||
129 | goto errout; | 129 | goto errout; |
130 | } | 130 | } |
131 | err = submit_bio_wait(bio); | 131 | err = submit_bio_wait(bio); |
132 | if ((err == 0) && bio->bi_error) | 132 | if (err == 0 && bio->bi_status) |
133 | err = -EIO; | 133 | err = -EIO; |
134 | bio_put(bio); | 134 | bio_put(bio); |
135 | if (err) | 135 | if (err) |
diff --git a/fs/direct-io.c b/fs/direct-io.c index bb711e4b86c2..e8baaabebf13 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -294,7 +294,7 @@ static void dio_aio_complete_work(struct work_struct *work) | |||
294 | dio_complete(dio, 0, true); | 294 | dio_complete(dio, 0, true); |
295 | } | 295 | } |
296 | 296 | ||
297 | static int dio_bio_complete(struct dio *dio, struct bio *bio); | 297 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); |
298 | 298 | ||
299 | /* | 299 | /* |
300 | * Asynchronous IO callback. | 300 | * Asynchronous IO callback. |
@@ -473,11 +473,11 @@ static struct bio *dio_await_one(struct dio *dio) | |||
473 | /* | 473 | /* |
474 | * Process one completed BIO. No locks are held. | 474 | * Process one completed BIO. No locks are held. |
475 | */ | 475 | */ |
476 | static int dio_bio_complete(struct dio *dio, struct bio *bio) | 476 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) |
477 | { | 477 | { |
478 | struct bio_vec *bvec; | 478 | struct bio_vec *bvec; |
479 | unsigned i; | 479 | unsigned i; |
480 | int err = bio->bi_error; | 480 | blk_status_t err = bio->bi_status; |
481 | 481 | ||
482 | if (err) | 482 | if (err) |
483 | dio->io_error = -EIO; | 483 | dio->io_error = -EIO; |
@@ -536,7 +536,7 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) | |||
536 | bio = dio->bio_list; | 536 | bio = dio->bio_list; |
537 | dio->bio_list = bio->bi_private; | 537 | dio->bio_list = bio->bi_private; |
538 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 538 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
539 | ret2 = dio_bio_complete(dio, bio); | 539 | ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); |
540 | if (ret == 0) | 540 | if (ret == 0) |
541 | ret = ret2; | 541 | ret = ret2; |
542 | } | 542 | } |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 1a82138ba739..930ca0fc9a0f 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -85,7 +85,7 @@ static void ext4_finish_bio(struct bio *bio) | |||
85 | } | 85 | } |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | if (bio->bi_error) { | 88 | if (bio->bi_status) { |
89 | SetPageError(page); | 89 | SetPageError(page); |
90 | mapping_set_error(page->mapping, -EIO); | 90 | mapping_set_error(page->mapping, -EIO); |
91 | } | 91 | } |
@@ -104,7 +104,7 @@ static void ext4_finish_bio(struct bio *bio) | |||
104 | continue; | 104 | continue; |
105 | } | 105 | } |
106 | clear_buffer_async_write(bh); | 106 | clear_buffer_async_write(bh); |
107 | if (bio->bi_error) | 107 | if (bio->bi_status) |
108 | buffer_io_error(bh); | 108 | buffer_io_error(bh); |
109 | } while ((bh = bh->b_this_page) != head); | 109 | } while ((bh = bh->b_this_page) != head); |
110 | bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); | 110 | bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); |
@@ -303,24 +303,25 @@ static void ext4_end_bio(struct bio *bio) | |||
303 | bdevname(bio->bi_bdev, b), | 303 | bdevname(bio->bi_bdev, b), |
304 | (long long) bio->bi_iter.bi_sector, | 304 | (long long) bio->bi_iter.bi_sector, |
305 | (unsigned) bio_sectors(bio), | 305 | (unsigned) bio_sectors(bio), |
306 | bio->bi_error)) { | 306 | bio->bi_status)) { |
307 | ext4_finish_bio(bio); | 307 | ext4_finish_bio(bio); |
308 | bio_put(bio); | 308 | bio_put(bio); |
309 | return; | 309 | return; |
310 | } | 310 | } |
311 | bio->bi_end_io = NULL; | 311 | bio->bi_end_io = NULL; |
312 | 312 | ||
313 | if (bio->bi_error) { | 313 | if (bio->bi_status) { |
314 | struct inode *inode = io_end->inode; | 314 | struct inode *inode = io_end->inode; |
315 | 315 | ||
316 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " | 316 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " |
317 | "(offset %llu size %ld starting block %llu)", | 317 | "(offset %llu size %ld starting block %llu)", |
318 | bio->bi_error, inode->i_ino, | 318 | bio->bi_status, inode->i_ino, |
319 | (unsigned long long) io_end->offset, | 319 | (unsigned long long) io_end->offset, |
320 | (long) io_end->size, | 320 | (long) io_end->size, |
321 | (unsigned long long) | 321 | (unsigned long long) |
322 | bi_sector >> (inode->i_blkbits - 9)); | 322 | bi_sector >> (inode->i_blkbits - 9)); |
323 | mapping_set_error(inode->i_mapping, bio->bi_error); | 323 | mapping_set_error(inode->i_mapping, |
324 | blk_status_to_errno(bio->bi_status)); | ||
324 | } | 325 | } |
325 | 326 | ||
326 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { | 327 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index a81b829d56de..40a5497b0f60 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c | |||
@@ -73,7 +73,7 @@ static void mpage_end_io(struct bio *bio) | |||
73 | int i; | 73 | int i; |
74 | 74 | ||
75 | if (ext4_bio_encrypted(bio)) { | 75 | if (ext4_bio_encrypted(bio)) { |
76 | if (bio->bi_error) { | 76 | if (bio->bi_status) { |
77 | fscrypt_release_ctx(bio->bi_private); | 77 | fscrypt_release_ctx(bio->bi_private); |
78 | } else { | 78 | } else { |
79 | fscrypt_decrypt_bio_pages(bio->bi_private, bio); | 79 | fscrypt_decrypt_bio_pages(bio->bi_private, bio); |
@@ -83,7 +83,7 @@ static void mpage_end_io(struct bio *bio) | |||
83 | bio_for_each_segment_all(bv, bio, i) { | 83 | bio_for_each_segment_all(bv, bio, i) { |
84 | struct page *page = bv->bv_page; | 84 | struct page *page = bv->bv_page; |
85 | 85 | ||
86 | if (!bio->bi_error) { | 86 | if (!bio->bi_status) { |
87 | SetPageUptodate(page); | 87 | SetPageUptodate(page); |
88 | } else { | 88 | } else { |
89 | ClearPageUptodate(page); | 89 | ClearPageUptodate(page); |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 7c0f6bdf817d..36fe82012a33 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -58,12 +58,12 @@ static void f2fs_read_end_io(struct bio *bio) | |||
58 | #ifdef CONFIG_F2FS_FAULT_INJECTION | 58 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
59 | if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) { | 59 | if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) { |
60 | f2fs_show_injection_info(FAULT_IO); | 60 | f2fs_show_injection_info(FAULT_IO); |
61 | bio->bi_error = -EIO; | 61 | bio->bi_status = BLK_STS_IOERR; |
62 | } | 62 | } |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | if (f2fs_bio_encrypted(bio)) { | 65 | if (f2fs_bio_encrypted(bio)) { |
66 | if (bio->bi_error) { | 66 | if (bio->bi_status) { |
67 | fscrypt_release_ctx(bio->bi_private); | 67 | fscrypt_release_ctx(bio->bi_private); |
68 | } else { | 68 | } else { |
69 | fscrypt_decrypt_bio_pages(bio->bi_private, bio); | 69 | fscrypt_decrypt_bio_pages(bio->bi_private, bio); |
@@ -74,7 +74,7 @@ static void f2fs_read_end_io(struct bio *bio) | |||
74 | bio_for_each_segment_all(bvec, bio, i) { | 74 | bio_for_each_segment_all(bvec, bio, i) { |
75 | struct page *page = bvec->bv_page; | 75 | struct page *page = bvec->bv_page; |
76 | 76 | ||
77 | if (!bio->bi_error) { | 77 | if (!bio->bi_status) { |
78 | if (!PageUptodate(page)) | 78 | if (!PageUptodate(page)) |
79 | SetPageUptodate(page); | 79 | SetPageUptodate(page); |
80 | } else { | 80 | } else { |
@@ -102,14 +102,14 @@ static void f2fs_write_end_io(struct bio *bio) | |||
102 | unlock_page(page); | 102 | unlock_page(page); |
103 | mempool_free(page, sbi->write_io_dummy); | 103 | mempool_free(page, sbi->write_io_dummy); |
104 | 104 | ||
105 | if (unlikely(bio->bi_error)) | 105 | if (unlikely(bio->bi_status)) |
106 | f2fs_stop_checkpoint(sbi, true); | 106 | f2fs_stop_checkpoint(sbi, true); |
107 | continue; | 107 | continue; |
108 | } | 108 | } |
109 | 109 | ||
110 | fscrypt_pullback_bio_page(&page, true); | 110 | fscrypt_pullback_bio_page(&page, true); |
111 | 111 | ||
112 | if (unlikely(bio->bi_error)) { | 112 | if (unlikely(bio->bi_status)) { |
113 | mapping_set_error(page->mapping, -EIO); | 113 | mapping_set_error(page->mapping, -EIO); |
114 | f2fs_stop_checkpoint(sbi, true); | 114 | f2fs_stop_checkpoint(sbi, true); |
115 | } | 115 | } |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 96845854e7ee..ea9f455d94ba 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -749,7 +749,7 @@ static void f2fs_submit_discard_endio(struct bio *bio) | |||
749 | { | 749 | { |
750 | struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; | 750 | struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; |
751 | 751 | ||
752 | dc->error = bio->bi_error; | 752 | dc->error = blk_status_to_errno(bio->bi_status); |
753 | dc->state = D_DONE; | 753 | dc->state = D_DONE; |
754 | complete(&dc->wait); | 754 | complete(&dc->wait); |
755 | bio_put(bio); | 755 | bio_put(bio); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 13ebf15a4db0..885d36e7a29f 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -170,7 +170,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp) | |||
170 | */ | 170 | */ |
171 | 171 | ||
172 | static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, | 172 | static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, |
173 | int error) | 173 | blk_status_t error) |
174 | { | 174 | { |
175 | struct buffer_head *bh, *next; | 175 | struct buffer_head *bh, *next; |
176 | struct page *page = bvec->bv_page; | 176 | struct page *page = bvec->bv_page; |
@@ -209,13 +209,13 @@ static void gfs2_end_log_write(struct bio *bio) | |||
209 | struct page *page; | 209 | struct page *page; |
210 | int i; | 210 | int i; |
211 | 211 | ||
212 | if (bio->bi_error) | 212 | if (bio->bi_status) |
213 | fs_err(sdp, "Error %d writing to log\n", bio->bi_error); | 213 | fs_err(sdp, "Error %d writing to log\n", bio->bi_status); |
214 | 214 | ||
215 | bio_for_each_segment_all(bvec, bio, i) { | 215 | bio_for_each_segment_all(bvec, bio, i) { |
216 | page = bvec->bv_page; | 216 | page = bvec->bv_page; |
217 | if (page_has_buffers(page)) | 217 | if (page_has_buffers(page)) |
218 | gfs2_end_log_write_bh(sdp, bvec, bio->bi_error); | 218 | gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); |
219 | else | 219 | else |
220 | mempool_free(page, gfs2_page_pool); | 220 | mempool_free(page, gfs2_page_pool); |
221 | } | 221 | } |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 663ffc135ef3..fabe1614f879 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -201,7 +201,7 @@ static void gfs2_meta_read_endio(struct bio *bio) | |||
201 | do { | 201 | do { |
202 | struct buffer_head *next = bh->b_this_page; | 202 | struct buffer_head *next = bh->b_this_page; |
203 | len -= bh->b_size; | 203 | len -= bh->b_size; |
204 | bh->b_end_io(bh, !bio->bi_error); | 204 | bh->b_end_io(bh, !bio->bi_status); |
205 | bh = next; | 205 | bh = next; |
206 | } while (bh && len); | 206 | } while (bh && len); |
207 | } | 207 | } |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index ed67548b286c..83953cdbbc6c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -176,10 +176,10 @@ static void end_bio_io_page(struct bio *bio) | |||
176 | { | 176 | { |
177 | struct page *page = bio->bi_private; | 177 | struct page *page = bio->bi_private; |
178 | 178 | ||
179 | if (!bio->bi_error) | 179 | if (!bio->bi_status) |
180 | SetPageUptodate(page); | 180 | SetPageUptodate(page); |
181 | else | 181 | else |
182 | pr_warn("error %d reading superblock\n", bio->bi_error); | 182 | pr_warn("error %d reading superblock\n", bio->bi_status); |
183 | unlock_page(page); | 183 | unlock_page(page); |
184 | } | 184 | } |
185 | 185 | ||
diff --git a/fs/iomap.c b/fs/iomap.c index 4b10892967a5..18f2f2b8ba2c 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -672,8 +672,8 @@ static void iomap_dio_bio_end_io(struct bio *bio) | |||
672 | struct iomap_dio *dio = bio->bi_private; | 672 | struct iomap_dio *dio = bio->bi_private; |
673 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); | 673 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); |
674 | 674 | ||
675 | if (bio->bi_error) | 675 | if (bio->bi_status) |
676 | iomap_dio_set_error(dio, bio->bi_error); | 676 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); |
677 | 677 | ||
678 | if (atomic_dec_and_test(&dio->ref)) { | 678 | if (atomic_dec_and_test(&dio->ref)) { |
679 | if (is_sync_kiocb(dio->iocb)) { | 679 | if (is_sync_kiocb(dio->iocb)) { |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index bb1da1feafeb..a21f0e9eecd4 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -2205,7 +2205,7 @@ static void lbmIODone(struct bio *bio) | |||
2205 | 2205 | ||
2206 | bp->l_flag |= lbmDONE; | 2206 | bp->l_flag |= lbmDONE; |
2207 | 2207 | ||
2208 | if (bio->bi_error) { | 2208 | if (bio->bi_status) { |
2209 | bp->l_flag |= lbmERROR; | 2209 | bp->l_flag |= lbmERROR; |
2210 | 2210 | ||
2211 | jfs_err("lbmIODone: I/O error in JFS log"); | 2211 | jfs_err("lbmIODone: I/O error in JFS log"); |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 489aaa1403e5..ce93db3aef3c 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -280,7 +280,7 @@ static void metapage_read_end_io(struct bio *bio) | |||
280 | { | 280 | { |
281 | struct page *page = bio->bi_private; | 281 | struct page *page = bio->bi_private; |
282 | 282 | ||
283 | if (bio->bi_error) { | 283 | if (bio->bi_status) { |
284 | printk(KERN_ERR "metapage_read_end_io: I/O error\n"); | 284 | printk(KERN_ERR "metapage_read_end_io: I/O error\n"); |
285 | SetPageError(page); | 285 | SetPageError(page); |
286 | } | 286 | } |
@@ -337,7 +337,7 @@ static void metapage_write_end_io(struct bio *bio) | |||
337 | 337 | ||
338 | BUG_ON(!PagePrivate(page)); | 338 | BUG_ON(!PagePrivate(page)); |
339 | 339 | ||
340 | if (bio->bi_error) { | 340 | if (bio->bi_status) { |
341 | printk(KERN_ERR "metapage_write_end_io: I/O error\n"); | 341 | printk(KERN_ERR "metapage_write_end_io: I/O error\n"); |
342 | SetPageError(page); | 342 | SetPageError(page); |
343 | } | 343 | } |
diff --git a/fs/mpage.c b/fs/mpage.c index baff8f820c29..9524fdde00c2 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -50,7 +50,8 @@ static void mpage_end_io(struct bio *bio) | |||
50 | 50 | ||
51 | bio_for_each_segment_all(bv, bio, i) { | 51 | bio_for_each_segment_all(bv, bio, i) { |
52 | struct page *page = bv->bv_page; | 52 | struct page *page = bv->bv_page; |
53 | page_endio(page, op_is_write(bio_op(bio)), bio->bi_error); | 53 | page_endio(page, op_is_write(bio_op(bio)), |
54 | blk_status_to_errno(bio->bi_status)); | ||
54 | } | 55 | } |
55 | 56 | ||
56 | bio_put(bio); | 57 | bio_put(bio); |
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 0ca370d23ddb..d8863a804b15 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
@@ -188,7 +188,7 @@ static void bl_end_io_read(struct bio *bio) | |||
188 | { | 188 | { |
189 | struct parallel_io *par = bio->bi_private; | 189 | struct parallel_io *par = bio->bi_private; |
190 | 190 | ||
191 | if (bio->bi_error) { | 191 | if (bio->bi_status) { |
192 | struct nfs_pgio_header *header = par->data; | 192 | struct nfs_pgio_header *header = par->data; |
193 | 193 | ||
194 | if (!header->pnfs_error) | 194 | if (!header->pnfs_error) |
@@ -319,7 +319,7 @@ static void bl_end_io_write(struct bio *bio) | |||
319 | struct parallel_io *par = bio->bi_private; | 319 | struct parallel_io *par = bio->bi_private; |
320 | struct nfs_pgio_header *header = par->data; | 320 | struct nfs_pgio_header *header = par->data; |
321 | 321 | ||
322 | if (bio->bi_error) { | 322 | if (bio->bi_status) { |
323 | if (!header->pnfs_error) | 323 | if (!header->pnfs_error) |
324 | header->pnfs_error = -EIO; | 324 | header->pnfs_error = -EIO; |
325 | pnfs_set_lo_fail(header->lseg); | 325 | pnfs_set_lo_fail(header->lseg); |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 6f87b2ac1aeb..e73c86d9855c 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -338,7 +338,7 @@ static void nilfs_end_bio_write(struct bio *bio) | |||
338 | { | 338 | { |
339 | struct nilfs_segment_buffer *segbuf = bio->bi_private; | 339 | struct nilfs_segment_buffer *segbuf = bio->bi_private; |
340 | 340 | ||
341 | if (bio->bi_error) | 341 | if (bio->bi_status) |
342 | atomic_inc(&segbuf->sb_err); | 342 | atomic_inc(&segbuf->sb_err); |
343 | 343 | ||
344 | bio_put(bio); | 344 | bio_put(bio); |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 0da0332725aa..ffe003982d95 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -516,9 +516,9 @@ static void o2hb_bio_end_io(struct bio *bio) | |||
516 | { | 516 | { |
517 | struct o2hb_bio_wait_ctxt *wc = bio->bi_private; | 517 | struct o2hb_bio_wait_ctxt *wc = bio->bi_private; |
518 | 518 | ||
519 | if (bio->bi_error) { | 519 | if (bio->bi_status) { |
520 | mlog(ML_ERROR, "IO Error %d\n", bio->bi_error); | 520 | mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); |
521 | wc->wc_error = bio->bi_error; | 521 | wc->wc_error = blk_status_to_errno(bio->bi_status); |
522 | } | 522 | } |
523 | 523 | ||
524 | o2hb_bio_wait_dec(wc, 1); | 524 | o2hb_bio_wait_dec(wc, 1); |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 09af0f7cd55e..76b6f988e2fa 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -276,7 +276,7 @@ xfs_end_io( | |||
276 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | 276 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
277 | xfs_off_t offset = ioend->io_offset; | 277 | xfs_off_t offset = ioend->io_offset; |
278 | size_t size = ioend->io_size; | 278 | size_t size = ioend->io_size; |
279 | int error = ioend->io_bio->bi_error; | 279 | int error; |
280 | 280 | ||
281 | /* | 281 | /* |
282 | * Just clean up the in-memory strutures if the fs has been shut down. | 282 | * Just clean up the in-memory strutures if the fs has been shut down. |
@@ -289,6 +289,7 @@ xfs_end_io( | |||
289 | /* | 289 | /* |
290 | * Clean up any COW blocks on an I/O error. | 290 | * Clean up any COW blocks on an I/O error. |
291 | */ | 291 | */ |
292 | error = blk_status_to_errno(ioend->io_bio->bi_status); | ||
292 | if (unlikely(error)) { | 293 | if (unlikely(error)) { |
293 | switch (ioend->io_type) { | 294 | switch (ioend->io_type) { |
294 | case XFS_IO_COW: | 295 | case XFS_IO_COW: |
@@ -332,7 +333,7 @@ xfs_end_bio( | |||
332 | else if (ioend->io_append_trans) | 333 | else if (ioend->io_append_trans) |
333 | queue_work(mp->m_data_workqueue, &ioend->io_work); | 334 | queue_work(mp->m_data_workqueue, &ioend->io_work); |
334 | else | 335 | else |
335 | xfs_destroy_ioend(ioend, bio->bi_error); | 336 | xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status)); |
336 | } | 337 | } |
337 | 338 | ||
338 | STATIC int | 339 | STATIC int |
@@ -500,7 +501,7 @@ xfs_submit_ioend( | |||
500 | * time. | 501 | * time. |
501 | */ | 502 | */ |
502 | if (status) { | 503 | if (status) { |
503 | ioend->io_bio->bi_error = status; | 504 | ioend->io_bio->bi_status = errno_to_blk_status(status); |
504 | bio_endio(ioend->io_bio); | 505 | bio_endio(ioend->io_bio); |
505 | return status; | 506 | return status; |
506 | } | 507 | } |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 62fa39276a24..15c7a484a5d2 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1213,8 +1213,11 @@ xfs_buf_bio_end_io( | |||
1213 | * don't overwrite existing errors - otherwise we can lose errors on | 1213 | * don't overwrite existing errors - otherwise we can lose errors on |
1214 | * buffers that require multiple bios to complete. | 1214 | * buffers that require multiple bios to complete. |
1215 | */ | 1215 | */ |
1216 | if (bio->bi_error) | 1216 | if (bio->bi_status) { |
1217 | cmpxchg(&bp->b_io_error, 0, bio->bi_error); | 1217 | int error = blk_status_to_errno(bio->bi_status); |
1218 | |||
1219 | cmpxchg(&bp->b_io_error, 0, error); | ||
1220 | } | ||
1218 | 1221 | ||
1219 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 1222 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) |
1220 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 1223 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index d1b04b0e99cf..9455aada1399 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -414,7 +414,7 @@ extern void bio_endio(struct bio *); | |||
414 | 414 | ||
415 | static inline void bio_io_error(struct bio *bio) | 415 | static inline void bio_io_error(struct bio *bio) |
416 | { | 416 | { |
417 | bio->bi_error = -EIO; | 417 | bio->bi_status = BLK_STS_IOERR; |
418 | bio_endio(bio); | 418 | bio_endio(bio); |
419 | } | 419 | } |
420 | 420 | ||
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 59378939a8cd..dcd45b15a3a5 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -33,6 +33,9 @@ typedef u8 __bitwise blk_status_t; | |||
33 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) | 33 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) |
34 | #define BLK_STS_IOERR ((__force blk_status_t)10) | 34 | #define BLK_STS_IOERR ((__force blk_status_t)10) |
35 | 35 | ||
36 | /* hack for device mapper, don't use elsewhere: */ | ||
37 | #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) | ||
38 | |||
36 | struct blk_issue_stat { | 39 | struct blk_issue_stat { |
37 | u64 stat; | 40 | u64 stat; |
38 | }; | 41 | }; |
@@ -44,7 +47,7 @@ struct blk_issue_stat { | |||
44 | struct bio { | 47 | struct bio { |
45 | struct bio *bi_next; /* request queue link */ | 48 | struct bio *bi_next; /* request queue link */ |
46 | struct block_device *bi_bdev; | 49 | struct block_device *bi_bdev; |
47 | int bi_error; | 50 | blk_status_t bi_status; |
48 | unsigned int bi_opf; /* bottom bits req flags, | 51 | unsigned int bi_opf; /* bottom bits req flags, |
49 | * top bits REQ_OP. Use | 52 | * top bits REQ_OP. Use |
50 | * accessors. | 53 | * accessors. |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2a8871638453..76b6df862a12 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -1782,7 +1782,7 @@ struct blk_integrity_iter { | |||
1782 | const char *disk_name; | 1782 | const char *disk_name; |
1783 | }; | 1783 | }; |
1784 | 1784 | ||
1785 | typedef int (integrity_processing_fn) (struct blk_integrity_iter *); | 1785 | typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); |
1786 | 1786 | ||
1787 | struct blk_integrity_profile { | 1787 | struct blk_integrity_profile { |
1788 | integrity_processing_fn *generate_fn; | 1788 | integrity_processing_fn *generate_fn; |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 5de5c53251ec..456da5017b32 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -72,7 +72,7 @@ typedef void (*dm_release_clone_request_fn) (struct request *clone); | |||
72 | * 2 : The target wants to push back the io | 72 | * 2 : The target wants to push back the io |
73 | */ | 73 | */ |
74 | typedef int (*dm_endio_fn) (struct dm_target *ti, | 74 | typedef int (*dm_endio_fn) (struct dm_target *ti, |
75 | struct bio *bio, int *error); | 75 | struct bio *bio, blk_status_t *error); |
76 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, | 76 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
77 | struct request *clone, blk_status_t error, | 77 | struct request *clone, blk_status_t error, |
78 | union map_info *map_context); | 78 | union map_info *map_context); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index f80fd33639e0..57d22571f306 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -225,14 +225,14 @@ static struct block_device *hib_resume_bdev; | |||
225 | struct hib_bio_batch { | 225 | struct hib_bio_batch { |
226 | atomic_t count; | 226 | atomic_t count; |
227 | wait_queue_head_t wait; | 227 | wait_queue_head_t wait; |
228 | int error; | 228 | blk_status_t error; |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static void hib_init_batch(struct hib_bio_batch *hb) | 231 | static void hib_init_batch(struct hib_bio_batch *hb) |
232 | { | 232 | { |
233 | atomic_set(&hb->count, 0); | 233 | atomic_set(&hb->count, 0); |
234 | init_waitqueue_head(&hb->wait); | 234 | init_waitqueue_head(&hb->wait); |
235 | hb->error = 0; | 235 | hb->error = BLK_STS_OK; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void hib_end_io(struct bio *bio) | 238 | static void hib_end_io(struct bio *bio) |
@@ -240,7 +240,7 @@ static void hib_end_io(struct bio *bio) | |||
240 | struct hib_bio_batch *hb = bio->bi_private; | 240 | struct hib_bio_batch *hb = bio->bi_private; |
241 | struct page *page = bio->bi_io_vec[0].bv_page; | 241 | struct page *page = bio->bi_io_vec[0].bv_page; |
242 | 242 | ||
243 | if (bio->bi_error) { | 243 | if (bio->bi_status) { |
244 | printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", | 244 | printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", |
245 | imajor(bio->bi_bdev->bd_inode), | 245 | imajor(bio->bi_bdev->bd_inode), |
246 | iminor(bio->bi_bdev->bd_inode), | 246 | iminor(bio->bi_bdev->bd_inode), |
@@ -253,8 +253,8 @@ static void hib_end_io(struct bio *bio) | |||
253 | flush_icache_range((unsigned long)page_address(page), | 253 | flush_icache_range((unsigned long)page_address(page), |
254 | (unsigned long)page_address(page) + PAGE_SIZE); | 254 | (unsigned long)page_address(page) + PAGE_SIZE); |
255 | 255 | ||
256 | if (bio->bi_error && !hb->error) | 256 | if (bio->bi_status && !hb->error) |
257 | hb->error = bio->bi_error; | 257 | hb->error = bio->bi_status; |
258 | if (atomic_dec_and_test(&hb->count)) | 258 | if (atomic_dec_and_test(&hb->count)) |
259 | wake_up(&hb->wait); | 259 | wake_up(&hb->wait); |
260 | 260 | ||
@@ -293,10 +293,10 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, | |||
293 | return error; | 293 | return error; |
294 | } | 294 | } |
295 | 295 | ||
296 | static int hib_wait_io(struct hib_bio_batch *hb) | 296 | static blk_status_t hib_wait_io(struct hib_bio_batch *hb) |
297 | { | 297 | { |
298 | wait_event(hb->wait, atomic_read(&hb->count) == 0); | 298 | wait_event(hb->wait, atomic_read(&hb->count) == 0); |
299 | return hb->error; | 299 | return blk_status_to_errno(hb->error); |
300 | } | 300 | } |
301 | 301 | ||
302 | /* | 302 | /* |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 193c5f5e3f79..bc364f86100a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -867,7 +867,7 @@ static void blk_add_trace_split(void *ignore, | |||
867 | 867 | ||
868 | __blk_add_trace(bt, bio->bi_iter.bi_sector, | 868 | __blk_add_trace(bt, bio->bi_iter.bi_sector, |
869 | bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, | 869 | bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, |
870 | BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu), | 870 | BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), |
871 | &rpdu); | 871 | &rpdu); |
872 | } | 872 | } |
873 | } | 873 | } |
@@ -900,7 +900,7 @@ static void blk_add_trace_bio_remap(void *ignore, | |||
900 | r.sector_from = cpu_to_be64(from); | 900 | r.sector_from = cpu_to_be64(from); |
901 | 901 | ||
902 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, | 902 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
903 | bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error, | 903 | bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, |
904 | sizeof(r), &r); | 904 | sizeof(r), &r); |
905 | } | 905 | } |
906 | 906 | ||
diff --git a/mm/page_io.c b/mm/page_io.c index 23f6d0d3470f..2da71e627812 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -45,7 +45,7 @@ void end_swap_bio_write(struct bio *bio) | |||
45 | { | 45 | { |
46 | struct page *page = bio->bi_io_vec[0].bv_page; | 46 | struct page *page = bio->bi_io_vec[0].bv_page; |
47 | 47 | ||
48 | if (bio->bi_error) { | 48 | if (bio->bi_status) { |
49 | SetPageError(page); | 49 | SetPageError(page); |
50 | /* | 50 | /* |
51 | * We failed to write the page out to swap-space. | 51 | * We failed to write the page out to swap-space. |
@@ -118,7 +118,7 @@ static void end_swap_bio_read(struct bio *bio) | |||
118 | { | 118 | { |
119 | struct page *page = bio->bi_io_vec[0].bv_page; | 119 | struct page *page = bio->bi_io_vec[0].bv_page; |
120 | 120 | ||
121 | if (bio->bi_error) { | 121 | if (bio->bi_status) { |
122 | SetPageError(page); | 122 | SetPageError(page); |
123 | ClearPageUptodate(page); | 123 | ClearPageUptodate(page); |
124 | pr_alert("Read-error on swap-device (%u:%u:%llu)\n", | 124 | pr_alert("Read-error on swap-device (%u:%u:%llu)\n", |